2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
21 ethport_can_be_up(struct bna_ethport
*ethport
)
24 if (ethport
->bna
->enet
.type
== BNA_ENET_T_REGULAR
)
25 ready
= ((ethport
->flags
& BNA_ETHPORT_F_ADMIN_UP
) &&
26 (ethport
->flags
& BNA_ETHPORT_F_RX_STARTED
) &&
27 (ethport
->flags
& BNA_ETHPORT_F_PORT_ENABLED
));
29 ready
= ((ethport
->flags
& BNA_ETHPORT_F_ADMIN_UP
) &&
30 (ethport
->flags
& BNA_ETHPORT_F_RX_STARTED
) &&
31 !(ethport
->flags
& BNA_ETHPORT_F_PORT_ENABLED
));
35 #define ethport_is_up ethport_can_be_up
37 enum bna_ethport_event
{
43 ETHPORT_E_FWRESP_UP_OK
= 6,
44 ETHPORT_E_FWRESP_DOWN
= 7,
45 ETHPORT_E_FWRESP_UP_FAIL
= 8,
54 ENET_E_FWRESP_PAUSE
= 6,
55 ENET_E_CHLD_STOPPED
= 7,
58 enum bna_ioceth_event
{
61 IOCETH_E_IOC_RESET
= 3,
62 IOCETH_E_IOC_FAILED
= 4,
63 IOCETH_E_IOC_READY
= 5,
64 IOCETH_E_ENET_ATTR_RESP
= 6,
65 IOCETH_E_ENET_STOPPED
= 7,
66 IOCETH_E_IOC_DISABLED
= 8,
69 #define bna_stats_copy(_name, _type) \
71 count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
72 stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
73 stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
74 for (i = 0; i < count; i++) \
75 stats_dst[i] = be64_to_cpu(stats_src[i]); \
79 * FW response handlers
83 bna_bfi_ethport_enable_aen(struct bna_ethport
*ethport
,
84 struct bfi_msgq_mhdr
*msghdr
)
86 ethport
->flags
|= BNA_ETHPORT_F_PORT_ENABLED
;
88 if (ethport_can_be_up(ethport
))
89 bfa_fsm_send_event(ethport
, ETHPORT_E_UP
);
93 bna_bfi_ethport_disable_aen(struct bna_ethport
*ethport
,
94 struct bfi_msgq_mhdr
*msghdr
)
96 int ethport_up
= ethport_is_up(ethport
);
98 ethport
->flags
&= ~BNA_ETHPORT_F_PORT_ENABLED
;
101 bfa_fsm_send_event(ethport
, ETHPORT_E_DOWN
);
105 bna_bfi_ethport_admin_rsp(struct bna_ethport
*ethport
,
106 struct bfi_msgq_mhdr
*msghdr
)
108 struct bfi_enet_enable_req
*admin_req
=
109 ðport
->bfi_enet_cmd
.admin_req
;
110 struct bfi_enet_rsp
*rsp
= (struct bfi_enet_rsp
*)msghdr
;
112 switch (admin_req
->enable
) {
113 case BNA_STATUS_T_ENABLED
:
114 if (rsp
->error
== BFI_ENET_CMD_OK
)
115 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_UP_OK
);
117 ethport
->flags
&= ~BNA_ETHPORT_F_PORT_ENABLED
;
118 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_UP_FAIL
);
122 case BNA_STATUS_T_DISABLED
:
123 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_DOWN
);
124 ethport
->link_status
= BNA_LINK_DOWN
;
125 ethport
->link_cbfn(ethport
->bna
->bnad
, BNA_LINK_DOWN
);
131 bna_bfi_ethport_lpbk_rsp(struct bna_ethport
*ethport
,
132 struct bfi_msgq_mhdr
*msghdr
)
134 struct bfi_enet_diag_lb_req
*diag_lb_req
=
135 ðport
->bfi_enet_cmd
.lpbk_req
;
136 struct bfi_enet_rsp
*rsp
= (struct bfi_enet_rsp
*)msghdr
;
138 switch (diag_lb_req
->enable
) {
139 case BNA_STATUS_T_ENABLED
:
140 if (rsp
->error
== BFI_ENET_CMD_OK
)
141 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_UP_OK
);
143 ethport
->flags
&= ~BNA_ETHPORT_F_ADMIN_UP
;
144 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_UP_FAIL
);
148 case BNA_STATUS_T_DISABLED
:
149 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_DOWN
);
155 bna_bfi_pause_set_rsp(struct bna_enet
*enet
, struct bfi_msgq_mhdr
*msghdr
)
157 bfa_fsm_send_event(enet
, ENET_E_FWRESP_PAUSE
);
161 bna_bfi_attr_get_rsp(struct bna_ioceth
*ioceth
,
162 struct bfi_msgq_mhdr
*msghdr
)
164 struct bfi_enet_attr_rsp
*rsp
= (struct bfi_enet_attr_rsp
*)msghdr
;
167 * Store only if not set earlier, since BNAD can override the HW
170 if (!ioceth
->attr
.fw_query_complete
) {
171 ioceth
->attr
.num_txq
= ntohl(rsp
->max_cfg
);
172 ioceth
->attr
.num_rxp
= ntohl(rsp
->max_cfg
);
173 ioceth
->attr
.num_ucmac
= ntohl(rsp
->max_ucmac
);
174 ioceth
->attr
.num_mcmac
= BFI_ENET_MAX_MCAM
;
175 ioceth
->attr
.max_rit_size
= ntohl(rsp
->rit_size
);
176 ioceth
->attr
.fw_query_complete
= true;
179 bfa_fsm_send_event(ioceth
, IOCETH_E_ENET_ATTR_RESP
);
183 bna_bfi_stats_get_rsp(struct bna
*bna
, struct bfi_msgq_mhdr
*msghdr
)
185 struct bfi_enet_stats_req
*stats_req
= &bna
->stats_mod
.stats_get
;
188 u32 tx_enet_mask
= ntohl(stats_req
->tx_enet_mask
);
189 u32 rx_enet_mask
= ntohl(stats_req
->rx_enet_mask
);
193 bna_stats_copy(mac
, mac
);
194 bna_stats_copy(bpc
, bpc
);
195 bna_stats_copy(rad
, rad
);
196 bna_stats_copy(rlb
, rad
);
197 bna_stats_copy(fc_rx
, fc_rx
);
198 bna_stats_copy(fc_tx
, fc_tx
);
200 stats_src
= (u64
*)&(bna
->stats
.hw_stats_kva
->rxf_stats
[0]);
202 /* Copy Rxf stats to SW area, scatter them while copying */
203 for (i
= 0; i
< BFI_ENET_CFG_MAX
; i
++) {
204 stats_dst
= (u64
*)&(bna
->stats
.hw_stats
.rxf_stats
[i
]);
205 memset(stats_dst
, 0, sizeof(struct bfi_enet_stats_rxf
));
206 if (rx_enet_mask
& ((u32
)(1 << i
))) {
208 count
= sizeof(struct bfi_enet_stats_rxf
) /
210 for (k
= 0; k
< count
; k
++) {
211 stats_dst
[k
] = be64_to_cpu(*stats_src
);
217 /* Copy Txf stats to SW area, scatter them while copying */
218 for (i
= 0; i
< BFI_ENET_CFG_MAX
; i
++) {
219 stats_dst
= (u64
*)&(bna
->stats
.hw_stats
.txf_stats
[i
]);
220 memset(stats_dst
, 0, sizeof(struct bfi_enet_stats_txf
));
221 if (tx_enet_mask
& ((u32
)(1 << i
))) {
223 count
= sizeof(struct bfi_enet_stats_txf
) /
225 for (k
= 0; k
< count
; k
++) {
226 stats_dst
[k
] = be64_to_cpu(*stats_src
);
232 bna
->stats_mod
.stats_get_busy
= false;
233 bnad_cb_stats_get(bna
->bnad
, BNA_CB_SUCCESS
, &bna
->stats
);
237 bna_bfi_ethport_linkup_aen(struct bna_ethport
*ethport
,
238 struct bfi_msgq_mhdr
*msghdr
)
240 ethport
->link_status
= BNA_LINK_UP
;
242 /* Dispatch events */
243 ethport
->link_cbfn(ethport
->bna
->bnad
, ethport
->link_status
);
247 bna_bfi_ethport_linkdown_aen(struct bna_ethport
*ethport
,
248 struct bfi_msgq_mhdr
*msghdr
)
250 ethport
->link_status
= BNA_LINK_DOWN
;
252 /* Dispatch events */
253 ethport
->link_cbfn(ethport
->bna
->bnad
, BNA_LINK_DOWN
);
257 bna_err_handler(struct bna
*bna
, u32 intr_status
)
259 if (BNA_IS_HALT_INTR(bna
, intr_status
))
262 bfa_nw_ioc_error_isr(&bna
->ioceth
.ioc
);
266 bna_mbox_handler(struct bna
*bna
, u32 intr_status
)
268 if (BNA_IS_ERR_INTR(bna
, intr_status
)) {
269 bna_err_handler(bna
, intr_status
);
272 if (BNA_IS_MBOX_INTR(bna
, intr_status
))
273 bfa_nw_ioc_mbox_isr(&bna
->ioceth
.ioc
);
277 bna_msgq_rsp_handler(void *arg
, struct bfi_msgq_mhdr
*msghdr
)
279 struct bna
*bna
= (struct bna
*)arg
;
283 switch (msghdr
->msg_id
) {
284 case BFI_ENET_I2H_RX_CFG_SET_RSP
:
285 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
287 bna_bfi_rx_enet_start_rsp(rx
, msghdr
);
290 case BFI_ENET_I2H_RX_CFG_CLR_RSP
:
291 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
293 bna_bfi_rx_enet_stop_rsp(rx
, msghdr
);
296 case BFI_ENET_I2H_RIT_CFG_RSP
:
297 case BFI_ENET_I2H_RSS_CFG_RSP
:
298 case BFI_ENET_I2H_RSS_ENABLE_RSP
:
299 case BFI_ENET_I2H_RX_PROMISCUOUS_RSP
:
300 case BFI_ENET_I2H_RX_DEFAULT_RSP
:
301 case BFI_ENET_I2H_MAC_UCAST_CLR_RSP
:
302 case BFI_ENET_I2H_MAC_UCAST_ADD_RSP
:
303 case BFI_ENET_I2H_MAC_UCAST_DEL_RSP
:
304 case BFI_ENET_I2H_MAC_MCAST_DEL_RSP
:
305 case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP
:
306 case BFI_ENET_I2H_RX_VLAN_SET_RSP
:
307 case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP
:
308 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
310 bna_bfi_rxf_cfg_rsp(&rx
->rxf
, msghdr
);
313 case BFI_ENET_I2H_MAC_UCAST_SET_RSP
:
314 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
316 bna_bfi_rxf_ucast_set_rsp(&rx
->rxf
, msghdr
);
319 case BFI_ENET_I2H_MAC_MCAST_ADD_RSP
:
320 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
322 bna_bfi_rxf_mcast_add_rsp(&rx
->rxf
, msghdr
);
325 case BFI_ENET_I2H_TX_CFG_SET_RSP
:
326 bna_tx_from_rid(bna
, msghdr
->enet_id
, tx
);
328 bna_bfi_tx_enet_start_rsp(tx
, msghdr
);
331 case BFI_ENET_I2H_TX_CFG_CLR_RSP
:
332 bna_tx_from_rid(bna
, msghdr
->enet_id
, tx
);
334 bna_bfi_tx_enet_stop_rsp(tx
, msghdr
);
337 case BFI_ENET_I2H_PORT_ADMIN_RSP
:
338 bna_bfi_ethport_admin_rsp(&bna
->ethport
, msghdr
);
341 case BFI_ENET_I2H_DIAG_LOOPBACK_RSP
:
342 bna_bfi_ethport_lpbk_rsp(&bna
->ethport
, msghdr
);
345 case BFI_ENET_I2H_SET_PAUSE_RSP
:
346 bna_bfi_pause_set_rsp(&bna
->enet
, msghdr
);
349 case BFI_ENET_I2H_GET_ATTR_RSP
:
350 bna_bfi_attr_get_rsp(&bna
->ioceth
, msghdr
);
353 case BFI_ENET_I2H_STATS_GET_RSP
:
354 bna_bfi_stats_get_rsp(bna
, msghdr
);
357 case BFI_ENET_I2H_STATS_CLR_RSP
:
361 case BFI_ENET_I2H_LINK_UP_AEN
:
362 bna_bfi_ethport_linkup_aen(&bna
->ethport
, msghdr
);
365 case BFI_ENET_I2H_LINK_DOWN_AEN
:
366 bna_bfi_ethport_linkdown_aen(&bna
->ethport
, msghdr
);
369 case BFI_ENET_I2H_PORT_ENABLE_AEN
:
370 bna_bfi_ethport_enable_aen(&bna
->ethport
, msghdr
);
373 case BFI_ENET_I2H_PORT_DISABLE_AEN
:
374 bna_bfi_ethport_disable_aen(&bna
->ethport
, msghdr
);
377 case BFI_ENET_I2H_BW_UPDATE_AEN
:
378 bna_bfi_bw_update_aen(&bna
->tx_mod
);
388 #define call_ethport_stop_cbfn(_ethport) \
390 if ((_ethport)->stop_cbfn) { \
391 void (*cbfn)(struct bna_enet *); \
392 cbfn = (_ethport)->stop_cbfn; \
393 (_ethport)->stop_cbfn = NULL; \
394 cbfn(&(_ethport)->bna->enet); \
398 #define call_ethport_adminup_cbfn(ethport, status) \
400 if ((ethport)->adminup_cbfn) { \
401 void (*cbfn)(struct bnad *, enum bna_cb_status); \
402 cbfn = (ethport)->adminup_cbfn; \
403 (ethport)->adminup_cbfn = NULL; \
404 cbfn((ethport)->bna->bnad, status); \
409 bna_bfi_ethport_admin_up(struct bna_ethport
*ethport
)
411 struct bfi_enet_enable_req
*admin_up_req
=
412 ðport
->bfi_enet_cmd
.admin_req
;
414 bfi_msgq_mhdr_set(admin_up_req
->mh
, BFI_MC_ENET
,
415 BFI_ENET_H2I_PORT_ADMIN_UP_REQ
, 0, 0);
416 admin_up_req
->mh
.num_entries
= htons(
417 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
418 admin_up_req
->enable
= BNA_STATUS_T_ENABLED
;
420 bfa_msgq_cmd_set(ðport
->msgq_cmd
, NULL
, NULL
,
421 sizeof(struct bfi_enet_enable_req
), &admin_up_req
->mh
);
422 bfa_msgq_cmd_post(ðport
->bna
->msgq
, ðport
->msgq_cmd
);
426 bna_bfi_ethport_admin_down(struct bna_ethport
*ethport
)
428 struct bfi_enet_enable_req
*admin_down_req
=
429 ðport
->bfi_enet_cmd
.admin_req
;
431 bfi_msgq_mhdr_set(admin_down_req
->mh
, BFI_MC_ENET
,
432 BFI_ENET_H2I_PORT_ADMIN_UP_REQ
, 0, 0);
433 admin_down_req
->mh
.num_entries
= htons(
434 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
435 admin_down_req
->enable
= BNA_STATUS_T_DISABLED
;
437 bfa_msgq_cmd_set(ðport
->msgq_cmd
, NULL
, NULL
,
438 sizeof(struct bfi_enet_enable_req
), &admin_down_req
->mh
);
439 bfa_msgq_cmd_post(ðport
->bna
->msgq
, ðport
->msgq_cmd
);
443 bna_bfi_ethport_lpbk_up(struct bna_ethport
*ethport
)
445 struct bfi_enet_diag_lb_req
*lpbk_up_req
=
446 ðport
->bfi_enet_cmd
.lpbk_req
;
448 bfi_msgq_mhdr_set(lpbk_up_req
->mh
, BFI_MC_ENET
,
449 BFI_ENET_H2I_DIAG_LOOPBACK_REQ
, 0, 0);
450 lpbk_up_req
->mh
.num_entries
= htons(
451 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req
)));
452 lpbk_up_req
->mode
= (ethport
->bna
->enet
.type
==
453 BNA_ENET_T_LOOPBACK_INTERNAL
) ?
454 BFI_ENET_DIAG_LB_OPMODE_EXT
:
455 BFI_ENET_DIAG_LB_OPMODE_CBL
;
456 lpbk_up_req
->enable
= BNA_STATUS_T_ENABLED
;
458 bfa_msgq_cmd_set(ðport
->msgq_cmd
, NULL
, NULL
,
459 sizeof(struct bfi_enet_diag_lb_req
), &lpbk_up_req
->mh
);
460 bfa_msgq_cmd_post(ðport
->bna
->msgq
, ðport
->msgq_cmd
);
464 bna_bfi_ethport_lpbk_down(struct bna_ethport
*ethport
)
466 struct bfi_enet_diag_lb_req
*lpbk_down_req
=
467 ðport
->bfi_enet_cmd
.lpbk_req
;
469 bfi_msgq_mhdr_set(lpbk_down_req
->mh
, BFI_MC_ENET
,
470 BFI_ENET_H2I_DIAG_LOOPBACK_REQ
, 0, 0);
471 lpbk_down_req
->mh
.num_entries
= htons(
472 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req
)));
473 lpbk_down_req
->enable
= BNA_STATUS_T_DISABLED
;
475 bfa_msgq_cmd_set(ðport
->msgq_cmd
, NULL
, NULL
,
476 sizeof(struct bfi_enet_diag_lb_req
), &lpbk_down_req
->mh
);
477 bfa_msgq_cmd_post(ðport
->bna
->msgq
, ðport
->msgq_cmd
);
481 bna_bfi_ethport_up(struct bna_ethport
*ethport
)
483 if (ethport
->bna
->enet
.type
== BNA_ENET_T_REGULAR
)
484 bna_bfi_ethport_admin_up(ethport
);
486 bna_bfi_ethport_lpbk_up(ethport
);
490 bna_bfi_ethport_down(struct bna_ethport
*ethport
)
492 if (ethport
->bna
->enet
.type
== BNA_ENET_T_REGULAR
)
493 bna_bfi_ethport_admin_down(ethport
);
495 bna_bfi_ethport_lpbk_down(ethport
);
498 bfa_fsm_state_decl(bna_ethport
, stopped
, struct bna_ethport
,
499 enum bna_ethport_event
);
500 bfa_fsm_state_decl(bna_ethport
, down
, struct bna_ethport
,
501 enum bna_ethport_event
);
502 bfa_fsm_state_decl(bna_ethport
, up_resp_wait
, struct bna_ethport
,
503 enum bna_ethport_event
);
504 bfa_fsm_state_decl(bna_ethport
, down_resp_wait
, struct bna_ethport
,
505 enum bna_ethport_event
);
506 bfa_fsm_state_decl(bna_ethport
, up
, struct bna_ethport
,
507 enum bna_ethport_event
);
508 bfa_fsm_state_decl(bna_ethport
, last_resp_wait
, struct bna_ethport
,
509 enum bna_ethport_event
);
512 bna_ethport_sm_stopped_entry(struct bna_ethport
*ethport
)
514 call_ethport_stop_cbfn(ethport
);
518 bna_ethport_sm_stopped(struct bna_ethport
*ethport
,
519 enum bna_ethport_event event
)
522 case ETHPORT_E_START
:
523 bfa_fsm_set_state(ethport
, bna_ethport_sm_down
);
527 call_ethport_stop_cbfn(ethport
);
535 /* This event is received due to Rx objects failing */
545 bna_ethport_sm_down_entry(struct bna_ethport
*ethport
)
550 bna_ethport_sm_down(struct bna_ethport
*ethport
,
551 enum bna_ethport_event event
)
555 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
559 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
563 bfa_fsm_set_state(ethport
, bna_ethport_sm_up_resp_wait
);
564 bna_bfi_ethport_up(ethport
);
573 bna_ethport_sm_up_resp_wait_entry(struct bna_ethport
*ethport
)
578 bna_ethport_sm_up_resp_wait(struct bna_ethport
*ethport
,
579 enum bna_ethport_event event
)
583 bfa_fsm_set_state(ethport
, bna_ethport_sm_last_resp_wait
);
587 call_ethport_adminup_cbfn(ethport
, BNA_CB_FAIL
);
588 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
592 call_ethport_adminup_cbfn(ethport
, BNA_CB_INTERRUPT
);
593 bfa_fsm_set_state(ethport
, bna_ethport_sm_down_resp_wait
);
596 case ETHPORT_E_FWRESP_UP_OK
:
597 call_ethport_adminup_cbfn(ethport
, BNA_CB_SUCCESS
);
598 bfa_fsm_set_state(ethport
, bna_ethport_sm_up
);
601 case ETHPORT_E_FWRESP_UP_FAIL
:
602 call_ethport_adminup_cbfn(ethport
, BNA_CB_FAIL
);
603 bfa_fsm_set_state(ethport
, bna_ethport_sm_down
);
606 case ETHPORT_E_FWRESP_DOWN
:
607 /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
608 bna_bfi_ethport_up(ethport
);
617 bna_ethport_sm_down_resp_wait_entry(struct bna_ethport
*ethport
)
620 * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
621 * mbox due to up_resp_wait -> down_resp_wait transition on event
627 bna_ethport_sm_down_resp_wait(struct bna_ethport
*ethport
,
628 enum bna_ethport_event event
)
632 bfa_fsm_set_state(ethport
, bna_ethport_sm_last_resp_wait
);
636 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
640 bfa_fsm_set_state(ethport
, bna_ethport_sm_up_resp_wait
);
643 case ETHPORT_E_FWRESP_UP_OK
:
644 /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
645 bna_bfi_ethport_down(ethport
);
648 case ETHPORT_E_FWRESP_UP_FAIL
:
649 case ETHPORT_E_FWRESP_DOWN
:
650 bfa_fsm_set_state(ethport
, bna_ethport_sm_down
);
659 bna_ethport_sm_up_entry(struct bna_ethport
*ethport
)
664 bna_ethport_sm_up(struct bna_ethport
*ethport
,
665 enum bna_ethport_event event
)
669 bfa_fsm_set_state(ethport
, bna_ethport_sm_last_resp_wait
);
670 bna_bfi_ethport_down(ethport
);
674 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
678 bfa_fsm_set_state(ethport
, bna_ethport_sm_down_resp_wait
);
679 bna_bfi_ethport_down(ethport
);
688 bna_ethport_sm_last_resp_wait_entry(struct bna_ethport
*ethport
)
693 bna_ethport_sm_last_resp_wait(struct bna_ethport
*ethport
,
694 enum bna_ethport_event event
)
698 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
703 * This event is received due to Rx objects stopping in
704 * parallel to ethport
709 case ETHPORT_E_FWRESP_UP_OK
:
710 /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
711 bna_bfi_ethport_down(ethport
);
714 case ETHPORT_E_FWRESP_UP_FAIL
:
715 case ETHPORT_E_FWRESP_DOWN
:
716 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
725 bna_ethport_init(struct bna_ethport
*ethport
, struct bna
*bna
)
727 ethport
->flags
|= (BNA_ETHPORT_F_ADMIN_UP
| BNA_ETHPORT_F_PORT_ENABLED
);
730 ethport
->link_status
= BNA_LINK_DOWN
;
731 ethport
->link_cbfn
= bnad_cb_ethport_link_status
;
733 ethport
->rx_started_count
= 0;
735 ethport
->stop_cbfn
= NULL
;
736 ethport
->adminup_cbfn
= NULL
;
738 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
742 bna_ethport_uninit(struct bna_ethport
*ethport
)
744 ethport
->flags
&= ~BNA_ETHPORT_F_ADMIN_UP
;
745 ethport
->flags
&= ~BNA_ETHPORT_F_PORT_ENABLED
;
751 bna_ethport_start(struct bna_ethport
*ethport
)
753 bfa_fsm_send_event(ethport
, ETHPORT_E_START
);
757 bna_enet_cb_ethport_stopped(struct bna_enet
*enet
)
759 bfa_wc_down(&enet
->chld_stop_wc
);
763 bna_ethport_stop(struct bna_ethport
*ethport
)
765 ethport
->stop_cbfn
= bna_enet_cb_ethport_stopped
;
766 bfa_fsm_send_event(ethport
, ETHPORT_E_STOP
);
770 bna_ethport_fail(struct bna_ethport
*ethport
)
772 /* Reset the physical port status to enabled */
773 ethport
->flags
|= BNA_ETHPORT_F_PORT_ENABLED
;
775 if (ethport
->link_status
!= BNA_LINK_DOWN
) {
776 ethport
->link_status
= BNA_LINK_DOWN
;
777 ethport
->link_cbfn(ethport
->bna
->bnad
, BNA_LINK_DOWN
);
779 bfa_fsm_send_event(ethport
, ETHPORT_E_FAIL
);
782 /* Should be called only when ethport is disabled */
784 bna_ethport_cb_rx_started(struct bna_ethport
*ethport
)
786 ethport
->rx_started_count
++;
788 if (ethport
->rx_started_count
== 1) {
789 ethport
->flags
|= BNA_ETHPORT_F_RX_STARTED
;
791 if (ethport_can_be_up(ethport
))
792 bfa_fsm_send_event(ethport
, ETHPORT_E_UP
);
797 bna_ethport_cb_rx_stopped(struct bna_ethport
*ethport
)
799 int ethport_up
= ethport_is_up(ethport
);
801 ethport
->rx_started_count
--;
803 if (ethport
->rx_started_count
== 0) {
804 ethport
->flags
&= ~BNA_ETHPORT_F_RX_STARTED
;
807 bfa_fsm_send_event(ethport
, ETHPORT_E_DOWN
);
813 #define bna_enet_chld_start(enet) \
815 enum bna_tx_type tx_type = \
816 ((enet)->type == BNA_ENET_T_REGULAR) ? \
817 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
818 enum bna_rx_type rx_type = \
819 ((enet)->type == BNA_ENET_T_REGULAR) ? \
820 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
821 bna_ethport_start(&(enet)->bna->ethport); \
822 bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
823 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
826 #define bna_enet_chld_stop(enet) \
828 enum bna_tx_type tx_type = \
829 ((enet)->type == BNA_ENET_T_REGULAR) ? \
830 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
831 enum bna_rx_type rx_type = \
832 ((enet)->type == BNA_ENET_T_REGULAR) ? \
833 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
834 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
835 bfa_wc_up(&(enet)->chld_stop_wc); \
836 bna_ethport_stop(&(enet)->bna->ethport); \
837 bfa_wc_up(&(enet)->chld_stop_wc); \
838 bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
839 bfa_wc_up(&(enet)->chld_stop_wc); \
840 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
841 bfa_wc_wait(&(enet)->chld_stop_wc); \
844 #define bna_enet_chld_fail(enet) \
846 bna_ethport_fail(&(enet)->bna->ethport); \
847 bna_tx_mod_fail(&(enet)->bna->tx_mod); \
848 bna_rx_mod_fail(&(enet)->bna->rx_mod); \
851 #define bna_enet_rx_start(enet) \
853 enum bna_rx_type rx_type = \
854 ((enet)->type == BNA_ENET_T_REGULAR) ? \
855 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
856 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
859 #define bna_enet_rx_stop(enet) \
861 enum bna_rx_type rx_type = \
862 ((enet)->type == BNA_ENET_T_REGULAR) ? \
863 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
864 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
865 bfa_wc_up(&(enet)->chld_stop_wc); \
866 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
867 bfa_wc_wait(&(enet)->chld_stop_wc); \
870 #define call_enet_stop_cbfn(enet) \
872 if ((enet)->stop_cbfn) { \
873 void (*cbfn)(void *); \
875 cbfn = (enet)->stop_cbfn; \
876 cbarg = (enet)->stop_cbarg; \
877 (enet)->stop_cbfn = NULL; \
878 (enet)->stop_cbarg = NULL; \
883 #define call_enet_pause_cbfn(enet) \
885 if ((enet)->pause_cbfn) { \
886 void (*cbfn)(struct bnad *); \
887 cbfn = (enet)->pause_cbfn; \
888 (enet)->pause_cbfn = NULL; \
889 cbfn((enet)->bna->bnad); \
893 #define call_enet_mtu_cbfn(enet) \
895 if ((enet)->mtu_cbfn) { \
896 void (*cbfn)(struct bnad *); \
897 cbfn = (enet)->mtu_cbfn; \
898 (enet)->mtu_cbfn = NULL; \
899 cbfn((enet)->bna->bnad); \
903 static void bna_enet_cb_chld_stopped(void *arg
);
904 static void bna_bfi_pause_set(struct bna_enet
*enet
);
906 bfa_fsm_state_decl(bna_enet
, stopped
, struct bna_enet
,
907 enum bna_enet_event
);
908 bfa_fsm_state_decl(bna_enet
, pause_init_wait
, struct bna_enet
,
909 enum bna_enet_event
);
910 bfa_fsm_state_decl(bna_enet
, last_resp_wait
, struct bna_enet
,
911 enum bna_enet_event
);
912 bfa_fsm_state_decl(bna_enet
, started
, struct bna_enet
,
913 enum bna_enet_event
);
914 bfa_fsm_state_decl(bna_enet
, cfg_wait
, struct bna_enet
,
915 enum bna_enet_event
);
916 bfa_fsm_state_decl(bna_enet
, cfg_stop_wait
, struct bna_enet
,
917 enum bna_enet_event
);
918 bfa_fsm_state_decl(bna_enet
, chld_stop_wait
, struct bna_enet
,
919 enum bna_enet_event
);
922 bna_enet_sm_stopped_entry(struct bna_enet
*enet
)
924 call_enet_pause_cbfn(enet
);
925 call_enet_mtu_cbfn(enet
);
926 call_enet_stop_cbfn(enet
);
930 bna_enet_sm_stopped(struct bna_enet
*enet
, enum bna_enet_event event
)
934 bfa_fsm_set_state(enet
, bna_enet_sm_pause_init_wait
);
938 call_enet_stop_cbfn(enet
);
945 case ENET_E_PAUSE_CFG
:
946 call_enet_pause_cbfn(enet
);
950 call_enet_mtu_cbfn(enet
);
953 case ENET_E_CHLD_STOPPED
:
955 * This event is received due to Ethport, Tx and Rx objects
967 bna_enet_sm_pause_init_wait_entry(struct bna_enet
*enet
)
969 bna_bfi_pause_set(enet
);
973 bna_enet_sm_pause_init_wait(struct bna_enet
*enet
,
974 enum bna_enet_event event
)
978 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
979 bfa_fsm_set_state(enet
, bna_enet_sm_last_resp_wait
);
983 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
984 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
987 case ENET_E_PAUSE_CFG
:
988 enet
->flags
|= BNA_ENET_F_PAUSE_CHANGED
;
995 case ENET_E_FWRESP_PAUSE
:
996 if (enet
->flags
& BNA_ENET_F_PAUSE_CHANGED
) {
997 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
998 bna_bfi_pause_set(enet
);
1000 bfa_fsm_set_state(enet
, bna_enet_sm_started
);
1001 bna_enet_chld_start(enet
);
1006 bfa_sm_fault(event
);
1011 bna_enet_sm_last_resp_wait_entry(struct bna_enet
*enet
)
1013 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1017 bna_enet_sm_last_resp_wait(struct bna_enet
*enet
,
1018 enum bna_enet_event event
)
1022 case ENET_E_FWRESP_PAUSE
:
1023 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1027 bfa_sm_fault(event
);
1032 bna_enet_sm_started_entry(struct bna_enet
*enet
)
1035 * NOTE: Do not call bna_enet_chld_start() here, since it will be
1036 * inadvertently called during cfg_wait->started transition as well
1038 call_enet_pause_cbfn(enet
);
1039 call_enet_mtu_cbfn(enet
);
1043 bna_enet_sm_started(struct bna_enet
*enet
,
1044 enum bna_enet_event event
)
1048 bfa_fsm_set_state(enet
, bna_enet_sm_chld_stop_wait
);
1052 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1053 bna_enet_chld_fail(enet
);
1056 case ENET_E_PAUSE_CFG
:
1057 bfa_fsm_set_state(enet
, bna_enet_sm_cfg_wait
);
1058 bna_bfi_pause_set(enet
);
1061 case ENET_E_MTU_CFG
:
1062 bfa_fsm_set_state(enet
, bna_enet_sm_cfg_wait
);
1063 bna_enet_rx_stop(enet
);
1067 bfa_sm_fault(event
);
1072 bna_enet_sm_cfg_wait_entry(struct bna_enet
*enet
)
1077 bna_enet_sm_cfg_wait(struct bna_enet
*enet
,
1078 enum bna_enet_event event
)
1082 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1083 enet
->flags
&= ~BNA_ENET_F_MTU_CHANGED
;
1084 bfa_fsm_set_state(enet
, bna_enet_sm_cfg_stop_wait
);
1088 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1089 enet
->flags
&= ~BNA_ENET_F_MTU_CHANGED
;
1090 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1091 bna_enet_chld_fail(enet
);
1094 case ENET_E_PAUSE_CFG
:
1095 enet
->flags
|= BNA_ENET_F_PAUSE_CHANGED
;
1098 case ENET_E_MTU_CFG
:
1099 enet
->flags
|= BNA_ENET_F_MTU_CHANGED
;
1102 case ENET_E_CHLD_STOPPED
:
1103 bna_enet_rx_start(enet
);
1105 case ENET_E_FWRESP_PAUSE
:
1106 if (enet
->flags
& BNA_ENET_F_PAUSE_CHANGED
) {
1107 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1108 bna_bfi_pause_set(enet
);
1109 } else if (enet
->flags
& BNA_ENET_F_MTU_CHANGED
) {
1110 enet
->flags
&= ~BNA_ENET_F_MTU_CHANGED
;
1111 bna_enet_rx_stop(enet
);
1113 bfa_fsm_set_state(enet
, bna_enet_sm_started
);
1118 bfa_sm_fault(event
);
1123 bna_enet_sm_cfg_stop_wait_entry(struct bna_enet
*enet
)
1125 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1126 enet
->flags
&= ~BNA_ENET_F_MTU_CHANGED
;
1130 bna_enet_sm_cfg_stop_wait(struct bna_enet
*enet
,
1131 enum bna_enet_event event
)
1135 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1136 bna_enet_chld_fail(enet
);
1139 case ENET_E_FWRESP_PAUSE
:
1140 case ENET_E_CHLD_STOPPED
:
1141 bfa_fsm_set_state(enet
, bna_enet_sm_chld_stop_wait
);
1145 bfa_sm_fault(event
);
1150 bna_enet_sm_chld_stop_wait_entry(struct bna_enet
*enet
)
1152 bna_enet_chld_stop(enet
);
1156 bna_enet_sm_chld_stop_wait(struct bna_enet
*enet
,
1157 enum bna_enet_event event
)
1161 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1162 bna_enet_chld_fail(enet
);
1165 case ENET_E_CHLD_STOPPED
:
1166 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1170 bfa_sm_fault(event
);
1175 bna_bfi_pause_set(struct bna_enet
*enet
)
1177 struct bfi_enet_set_pause_req
*pause_req
= &enet
->pause_req
;
1179 bfi_msgq_mhdr_set(pause_req
->mh
, BFI_MC_ENET
,
1180 BFI_ENET_H2I_SET_PAUSE_REQ
, 0, 0);
1181 pause_req
->mh
.num_entries
= htons(
1182 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req
)));
1183 pause_req
->tx_pause
= enet
->pause_config
.tx_pause
;
1184 pause_req
->rx_pause
= enet
->pause_config
.rx_pause
;
1186 bfa_msgq_cmd_set(&enet
->msgq_cmd
, NULL
, NULL
,
1187 sizeof(struct bfi_enet_set_pause_req
), &pause_req
->mh
);
1188 bfa_msgq_cmd_post(&enet
->bna
->msgq
, &enet
->msgq_cmd
);
1192 bna_enet_cb_chld_stopped(void *arg
)
1194 struct bna_enet
*enet
= (struct bna_enet
*)arg
;
1196 bfa_fsm_send_event(enet
, ENET_E_CHLD_STOPPED
);
1200 bna_enet_init(struct bna_enet
*enet
, struct bna
*bna
)
1205 enet
->type
= BNA_ENET_T_REGULAR
;
1207 enet
->stop_cbfn
= NULL
;
1208 enet
->stop_cbarg
= NULL
;
1210 enet
->pause_cbfn
= NULL
;
1212 enet
->mtu_cbfn
= NULL
;
1214 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1218 bna_enet_uninit(struct bna_enet
*enet
)
1226 bna_enet_start(struct bna_enet
*enet
)
1228 enet
->flags
|= BNA_ENET_F_IOCETH_READY
;
1229 if (enet
->flags
& BNA_ENET_F_ENABLED
)
1230 bfa_fsm_send_event(enet
, ENET_E_START
);
1234 bna_ioceth_cb_enet_stopped(void *arg
)
1236 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1238 bfa_fsm_send_event(ioceth
, IOCETH_E_ENET_STOPPED
);
1242 bna_enet_stop(struct bna_enet
*enet
)
1244 enet
->stop_cbfn
= bna_ioceth_cb_enet_stopped
;
1245 enet
->stop_cbarg
= &enet
->bna
->ioceth
;
1247 enet
->flags
&= ~BNA_ENET_F_IOCETH_READY
;
1248 bfa_fsm_send_event(enet
, ENET_E_STOP
);
1252 bna_enet_fail(struct bna_enet
*enet
)
1254 enet
->flags
&= ~BNA_ENET_F_IOCETH_READY
;
1255 bfa_fsm_send_event(enet
, ENET_E_FAIL
);
1259 bna_enet_cb_tx_stopped(struct bna_enet
*enet
)
1261 bfa_wc_down(&enet
->chld_stop_wc
);
1265 bna_enet_cb_rx_stopped(struct bna_enet
*enet
)
1267 bfa_wc_down(&enet
->chld_stop_wc
);
1271 bna_enet_mtu_get(struct bna_enet
*enet
)
1277 bna_enet_enable(struct bna_enet
*enet
)
1279 if (enet
->fsm
!= (bfa_sm_t
)bna_enet_sm_stopped
)
1282 enet
->flags
|= BNA_ENET_F_ENABLED
;
1284 if (enet
->flags
& BNA_ENET_F_IOCETH_READY
)
1285 bfa_fsm_send_event(enet
, ENET_E_START
);
1289 bna_enet_disable(struct bna_enet
*enet
, enum bna_cleanup_type type
,
1290 void (*cbfn
)(void *))
1292 if (type
== BNA_SOFT_CLEANUP
) {
1293 (*cbfn
)(enet
->bna
->bnad
);
1297 enet
->stop_cbfn
= cbfn
;
1298 enet
->stop_cbarg
= enet
->bna
->bnad
;
1300 enet
->flags
&= ~BNA_ENET_F_ENABLED
;
1302 bfa_fsm_send_event(enet
, ENET_E_STOP
);
1306 bna_enet_pause_config(struct bna_enet
*enet
,
1307 struct bna_pause_config
*pause_config
,
1308 void (*cbfn
)(struct bnad
*))
1310 enet
->pause_config
= *pause_config
;
1312 enet
->pause_cbfn
= cbfn
;
1314 bfa_fsm_send_event(enet
, ENET_E_PAUSE_CFG
);
1318 bna_enet_mtu_set(struct bna_enet
*enet
, int mtu
,
1319 void (*cbfn
)(struct bnad
*))
1323 enet
->mtu_cbfn
= cbfn
;
1325 bfa_fsm_send_event(enet
, ENET_E_MTU_CFG
);
1329 bna_enet_perm_mac_get(struct bna_enet
*enet
, mac_t
*mac
)
1331 *mac
= bfa_nw_ioc_get_mac(&enet
->bna
->ioceth
.ioc
);
1336 #define enable_mbox_intr(_ioceth) \
1339 bna_intr_status_get((_ioceth)->bna, intr_status); \
1340 bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
1341 bna_mbox_intr_enable((_ioceth)->bna); \
1344 #define disable_mbox_intr(_ioceth) \
1346 bna_mbox_intr_disable((_ioceth)->bna); \
1347 bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
1350 #define call_ioceth_stop_cbfn(_ioceth) \
1352 if ((_ioceth)->stop_cbfn) { \
1353 void (*cbfn)(struct bnad *); \
1354 struct bnad *cbarg; \
1355 cbfn = (_ioceth)->stop_cbfn; \
1356 cbarg = (_ioceth)->stop_cbarg; \
1357 (_ioceth)->stop_cbfn = NULL; \
1358 (_ioceth)->stop_cbarg = NULL; \
1363 #define bna_stats_mod_uninit(_stats_mod) \
1367 #define bna_stats_mod_start(_stats_mod) \
1369 (_stats_mod)->ioc_ready = true; \
1372 #define bna_stats_mod_stop(_stats_mod) \
1374 (_stats_mod)->ioc_ready = false; \
1377 #define bna_stats_mod_fail(_stats_mod) \
1379 (_stats_mod)->ioc_ready = false; \
1380 (_stats_mod)->stats_get_busy = false; \
1381 (_stats_mod)->stats_clr_busy = false; \
1384 static void bna_bfi_attr_get(struct bna_ioceth
*ioceth
);
1386 bfa_fsm_state_decl(bna_ioceth
, stopped
, struct bna_ioceth
,
1387 enum bna_ioceth_event
);
1388 bfa_fsm_state_decl(bna_ioceth
, ioc_ready_wait
, struct bna_ioceth
,
1389 enum bna_ioceth_event
);
1390 bfa_fsm_state_decl(bna_ioceth
, enet_attr_wait
, struct bna_ioceth
,
1391 enum bna_ioceth_event
);
1392 bfa_fsm_state_decl(bna_ioceth
, ready
, struct bna_ioceth
,
1393 enum bna_ioceth_event
);
1394 bfa_fsm_state_decl(bna_ioceth
, last_resp_wait
, struct bna_ioceth
,
1395 enum bna_ioceth_event
);
1396 bfa_fsm_state_decl(bna_ioceth
, enet_stop_wait
, struct bna_ioceth
,
1397 enum bna_ioceth_event
);
1398 bfa_fsm_state_decl(bna_ioceth
, ioc_disable_wait
, struct bna_ioceth
,
1399 enum bna_ioceth_event
);
1400 bfa_fsm_state_decl(bna_ioceth
, failed
, struct bna_ioceth
,
1401 enum bna_ioceth_event
);
1404 bna_ioceth_sm_stopped_entry(struct bna_ioceth
*ioceth
)
1406 call_ioceth_stop_cbfn(ioceth
);
1410 bna_ioceth_sm_stopped(struct bna_ioceth
*ioceth
,
1411 enum bna_ioceth_event event
)
1414 case IOCETH_E_ENABLE
:
1415 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_ready_wait
);
1416 bfa_nw_ioc_enable(&ioceth
->ioc
);
1419 case IOCETH_E_DISABLE
:
1420 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_stopped
);
1423 case IOCETH_E_IOC_RESET
:
1424 enable_mbox_intr(ioceth
);
1427 case IOCETH_E_IOC_FAILED
:
1428 disable_mbox_intr(ioceth
);
1429 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_failed
);
1433 bfa_sm_fault(event
);
1438 bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth
*ioceth
)
1441 * Do not call bfa_nw_ioc_enable() here. It must be called in the
1442 * previous state due to failed -> ioc_ready_wait transition.
1447 bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth
*ioceth
,
1448 enum bna_ioceth_event event
)
1451 case IOCETH_E_DISABLE
:
1452 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1453 bfa_nw_ioc_disable(&ioceth
->ioc
);
1456 case IOCETH_E_IOC_RESET
:
1457 enable_mbox_intr(ioceth
);
1460 case IOCETH_E_IOC_FAILED
:
1461 disable_mbox_intr(ioceth
);
1462 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_failed
);
1465 case IOCETH_E_IOC_READY
:
1466 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_enet_attr_wait
);
1470 bfa_sm_fault(event
);
1475 bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth
*ioceth
)
1477 bna_bfi_attr_get(ioceth
);
1481 bna_ioceth_sm_enet_attr_wait(struct bna_ioceth
*ioceth
,
1482 enum bna_ioceth_event event
)
1485 case IOCETH_E_DISABLE
:
1486 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_last_resp_wait
);
1489 case IOCETH_E_IOC_FAILED
:
1490 disable_mbox_intr(ioceth
);
1491 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_failed
);
1494 case IOCETH_E_ENET_ATTR_RESP
:
1495 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ready
);
1499 bfa_sm_fault(event
);
1504 bna_ioceth_sm_ready_entry(struct bna_ioceth
*ioceth
)
1506 bna_enet_start(&ioceth
->bna
->enet
);
1507 bna_stats_mod_start(&ioceth
->bna
->stats_mod
);
1508 bnad_cb_ioceth_ready(ioceth
->bna
->bnad
);
1512 bna_ioceth_sm_ready(struct bna_ioceth
*ioceth
, enum bna_ioceth_event event
)
1515 case IOCETH_E_DISABLE
:
1516 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_enet_stop_wait
);
1519 case IOCETH_E_IOC_FAILED
:
1520 disable_mbox_intr(ioceth
);
1521 bna_enet_fail(&ioceth
->bna
->enet
);
1522 bna_stats_mod_fail(&ioceth
->bna
->stats_mod
);
1523 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_failed
);
1527 bfa_sm_fault(event
);
1532 bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth
*ioceth
)
1537 bna_ioceth_sm_last_resp_wait(struct bna_ioceth
*ioceth
,
1538 enum bna_ioceth_event event
)
1541 case IOCETH_E_IOC_FAILED
:
1542 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1543 disable_mbox_intr(ioceth
);
1544 bfa_nw_ioc_disable(&ioceth
->ioc
);
1547 case IOCETH_E_ENET_ATTR_RESP
:
1548 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1549 bfa_nw_ioc_disable(&ioceth
->ioc
);
1553 bfa_sm_fault(event
);
1558 bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth
*ioceth
)
1560 bna_stats_mod_stop(&ioceth
->bna
->stats_mod
);
1561 bna_enet_stop(&ioceth
->bna
->enet
);
1565 bna_ioceth_sm_enet_stop_wait(struct bna_ioceth
*ioceth
,
1566 enum bna_ioceth_event event
)
1569 case IOCETH_E_IOC_FAILED
:
1570 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1571 disable_mbox_intr(ioceth
);
1572 bna_enet_fail(&ioceth
->bna
->enet
);
1573 bna_stats_mod_fail(&ioceth
->bna
->stats_mod
);
1574 bfa_nw_ioc_disable(&ioceth
->ioc
);
1577 case IOCETH_E_ENET_STOPPED
:
1578 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1579 bfa_nw_ioc_disable(&ioceth
->ioc
);
1583 bfa_sm_fault(event
);
1588 bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth
*ioceth
)
1593 bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth
*ioceth
,
1594 enum bna_ioceth_event event
)
1597 case IOCETH_E_IOC_DISABLED
:
1598 disable_mbox_intr(ioceth
);
1599 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_stopped
);
1602 case IOCETH_E_ENET_STOPPED
:
1603 /* This event is received due to enet failing */
1608 bfa_sm_fault(event
);
1613 bna_ioceth_sm_failed_entry(struct bna_ioceth
*ioceth
)
1615 bnad_cb_ioceth_failed(ioceth
->bna
->bnad
);
1619 bna_ioceth_sm_failed(struct bna_ioceth
*ioceth
,
1620 enum bna_ioceth_event event
)
1623 case IOCETH_E_DISABLE
:
1624 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1625 bfa_nw_ioc_disable(&ioceth
->ioc
);
1628 case IOCETH_E_IOC_RESET
:
1629 enable_mbox_intr(ioceth
);
1630 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_ready_wait
);
1633 case IOCETH_E_IOC_FAILED
:
1637 bfa_sm_fault(event
);
1642 bna_bfi_attr_get(struct bna_ioceth
*ioceth
)
1644 struct bfi_enet_attr_req
*attr_req
= &ioceth
->attr_req
;
1646 bfi_msgq_mhdr_set(attr_req
->mh
, BFI_MC_ENET
,
1647 BFI_ENET_H2I_GET_ATTR_REQ
, 0, 0);
1648 attr_req
->mh
.num_entries
= htons(
1649 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req
)));
1650 bfa_msgq_cmd_set(&ioceth
->msgq_cmd
, NULL
, NULL
,
1651 sizeof(struct bfi_enet_attr_req
), &attr_req
->mh
);
1652 bfa_msgq_cmd_post(&ioceth
->bna
->msgq
, &ioceth
->msgq_cmd
);
1655 /* IOC callback functions */
1658 bna_cb_ioceth_enable(void *arg
, enum bfa_status error
)
1660 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1663 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_FAILED
);
1665 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_READY
);
1669 bna_cb_ioceth_disable(void *arg
)
1671 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1673 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_DISABLED
);
1677 bna_cb_ioceth_hbfail(void *arg
)
1679 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1681 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_FAILED
);
1685 bna_cb_ioceth_reset(void *arg
)
1687 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1689 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_RESET
);
1692 static struct bfa_ioc_cbfn bna_ioceth_cbfn
= {
1693 bna_cb_ioceth_enable
,
1694 bna_cb_ioceth_disable
,
1695 bna_cb_ioceth_hbfail
,
1699 static void bna_attr_init(struct bna_ioceth
*ioceth
)
1701 ioceth
->attr
.num_txq
= BFI_ENET_DEF_TXQ
;
1702 ioceth
->attr
.num_rxp
= BFI_ENET_DEF_RXP
;
1703 ioceth
->attr
.num_ucmac
= BFI_ENET_DEF_UCAM
;
1704 ioceth
->attr
.num_mcmac
= BFI_ENET_MAX_MCAM
;
1705 ioceth
->attr
.max_rit_size
= BFI_ENET_DEF_RITSZ
;
1706 ioceth
->attr
.fw_query_complete
= false;
1710 bna_ioceth_init(struct bna_ioceth
*ioceth
, struct bna
*bna
,
1711 struct bna_res_info
*res_info
)
1719 * Attach IOC and claim:
1720 * 1. DMA memory for IOC attributes
1721 * 2. Kernel memory for FW trace
1723 bfa_nw_ioc_attach(&ioceth
->ioc
, ioceth
, &bna_ioceth_cbfn
);
1724 bfa_nw_ioc_pci_init(&ioceth
->ioc
, &bna
->pcidev
, BFI_PCIFN_CLASS_ETH
);
1727 &res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1728 kva
= res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].kva
;
1729 bfa_nw_ioc_mem_claim(&ioceth
->ioc
, kva
, dma
);
1731 kva
= res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mdl
[0].kva
;
1732 bfa_nw_ioc_debug_memclaim(&ioceth
->ioc
, kva
);
1735 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1739 &res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1740 kva
= res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].kva
;
1741 bfa_nw_cee_attach(&bna
->cee
, &ioceth
->ioc
, bna
);
1742 bfa_nw_cee_mem_claim(&bna
->cee
, kva
, dma
);
1743 kva
+= bfa_nw_cee_meminfo();
1744 dma
+= bfa_nw_cee_meminfo();
1746 bfa_nw_flash_attach(&bna
->flash
, &ioceth
->ioc
, bna
);
1747 bfa_nw_flash_memclaim(&bna
->flash
, kva
, dma
);
1748 kva
+= bfa_nw_flash_meminfo();
1749 dma
+= bfa_nw_flash_meminfo();
1751 bfa_msgq_attach(&bna
->msgq
, &ioceth
->ioc
);
1752 bfa_msgq_memclaim(&bna
->msgq
, kva
, dma
);
1753 bfa_msgq_regisr(&bna
->msgq
, BFI_MC_ENET
, bna_msgq_rsp_handler
, bna
);
1754 kva
+= bfa_msgq_meminfo();
1755 dma
+= bfa_msgq_meminfo();
1757 ioceth
->stop_cbfn
= NULL
;
1758 ioceth
->stop_cbarg
= NULL
;
1760 bna_attr_init(ioceth
);
1762 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_stopped
);
1766 bna_ioceth_uninit(struct bna_ioceth
*ioceth
)
1768 bfa_nw_ioc_detach(&ioceth
->ioc
);
1774 bna_ioceth_enable(struct bna_ioceth
*ioceth
)
1776 if (ioceth
->fsm
== (bfa_fsm_t
)bna_ioceth_sm_ready
) {
1777 bnad_cb_ioceth_ready(ioceth
->bna
->bnad
);
1781 if (ioceth
->fsm
== (bfa_fsm_t
)bna_ioceth_sm_stopped
)
1782 bfa_fsm_send_event(ioceth
, IOCETH_E_ENABLE
);
1786 bna_ioceth_disable(struct bna_ioceth
*ioceth
, enum bna_cleanup_type type
)
1788 if (type
== BNA_SOFT_CLEANUP
) {
1789 bnad_cb_ioceth_disabled(ioceth
->bna
->bnad
);
1793 ioceth
->stop_cbfn
= bnad_cb_ioceth_disabled
;
1794 ioceth
->stop_cbarg
= ioceth
->bna
->bnad
;
1796 bfa_fsm_send_event(ioceth
, IOCETH_E_DISABLE
);
1800 bna_ucam_mod_init(struct bna_ucam_mod
*ucam_mod
, struct bna
*bna
,
1801 struct bna_res_info
*res_info
)
1805 ucam_mod
->ucmac
= (struct bna_mac
*)
1806 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
1808 INIT_LIST_HEAD(&ucam_mod
->free_q
);
1809 for (i
= 0; i
< bna
->ioceth
.attr
.num_ucmac
; i
++) {
1810 bfa_q_qe_init(&ucam_mod
->ucmac
[i
].qe
);
1811 list_add_tail(&ucam_mod
->ucmac
[i
].qe
, &ucam_mod
->free_q
);
1814 /* A separate queue to allow synchronous setting of a list of MACs */
1815 INIT_LIST_HEAD(&ucam_mod
->del_q
);
1816 for (i
= i
; i
< (bna
->ioceth
.attr
.num_ucmac
* 2); i
++) {
1817 bfa_q_qe_init(&ucam_mod
->ucmac
[i
].qe
);
1818 list_add_tail(&ucam_mod
->ucmac
[i
].qe
, &ucam_mod
->del_q
);
1821 ucam_mod
->bna
= bna
;
1825 bna_ucam_mod_uninit(struct bna_ucam_mod
*ucam_mod
)
1827 struct list_head
*qe
;
1831 list_for_each(qe
, &ucam_mod
->free_q
)
1835 list_for_each(qe
, &ucam_mod
->del_q
)
1838 ucam_mod
->bna
= NULL
;
1842 bna_mcam_mod_init(struct bna_mcam_mod
*mcam_mod
, struct bna
*bna
,
1843 struct bna_res_info
*res_info
)
1847 mcam_mod
->mcmac
= (struct bna_mac
*)
1848 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
1850 INIT_LIST_HEAD(&mcam_mod
->free_q
);
1851 for (i
= 0; i
< bna
->ioceth
.attr
.num_mcmac
; i
++) {
1852 bfa_q_qe_init(&mcam_mod
->mcmac
[i
].qe
);
1853 list_add_tail(&mcam_mod
->mcmac
[i
].qe
, &mcam_mod
->free_q
);
1856 mcam_mod
->mchandle
= (struct bna_mcam_handle
*)
1857 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
1859 INIT_LIST_HEAD(&mcam_mod
->free_handle_q
);
1860 for (i
= 0; i
< bna
->ioceth
.attr
.num_mcmac
; i
++) {
1861 bfa_q_qe_init(&mcam_mod
->mchandle
[i
].qe
);
1862 list_add_tail(&mcam_mod
->mchandle
[i
].qe
,
1863 &mcam_mod
->free_handle_q
);
1866 /* A separate queue to allow synchronous setting of a list of MACs */
1867 INIT_LIST_HEAD(&mcam_mod
->del_q
);
1868 for (i
= i
; i
< (bna
->ioceth
.attr
.num_mcmac
* 2); i
++) {
1869 bfa_q_qe_init(&mcam_mod
->mcmac
[i
].qe
);
1870 list_add_tail(&mcam_mod
->mcmac
[i
].qe
, &mcam_mod
->del_q
);
1873 mcam_mod
->bna
= bna
;
1877 bna_mcam_mod_uninit(struct bna_mcam_mod
*mcam_mod
)
1879 struct list_head
*qe
;
1883 list_for_each(qe
, &mcam_mod
->free_q
) i
++;
1886 list_for_each(qe
, &mcam_mod
->del_q
) i
++;
1889 list_for_each(qe
, &mcam_mod
->free_handle_q
) i
++;
1891 mcam_mod
->bna
= NULL
;
1895 bna_bfi_stats_get(struct bna
*bna
)
1897 struct bfi_enet_stats_req
*stats_req
= &bna
->stats_mod
.stats_get
;
1899 bna
->stats_mod
.stats_get_busy
= true;
1901 bfi_msgq_mhdr_set(stats_req
->mh
, BFI_MC_ENET
,
1902 BFI_ENET_H2I_STATS_GET_REQ
, 0, 0);
1903 stats_req
->mh
.num_entries
= htons(
1904 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req
)));
1905 stats_req
->stats_mask
= htons(BFI_ENET_STATS_ALL
);
1906 stats_req
->tx_enet_mask
= htonl(bna
->tx_mod
.rid_mask
);
1907 stats_req
->rx_enet_mask
= htonl(bna
->rx_mod
.rid_mask
);
1908 stats_req
->host_buffer
.a32
.addr_hi
= bna
->stats
.hw_stats_dma
.msb
;
1909 stats_req
->host_buffer
.a32
.addr_lo
= bna
->stats
.hw_stats_dma
.lsb
;
1911 bfa_msgq_cmd_set(&bna
->stats_mod
.stats_get_cmd
, NULL
, NULL
,
1912 sizeof(struct bfi_enet_stats_req
), &stats_req
->mh
);
1913 bfa_msgq_cmd_post(&bna
->msgq
, &bna
->stats_mod
.stats_get_cmd
);
1917 bna_res_req(struct bna_res_info
*res_info
)
1919 /* DMA memory for COMMON_MODULE */
1920 res_info
[BNA_RES_MEM_T_COM
].res_type
= BNA_RES_T_MEM
;
1921 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1922 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.num
= 1;
1923 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.len
= ALIGN(
1924 (bfa_nw_cee_meminfo() +
1925 bfa_nw_flash_meminfo() +
1926 bfa_msgq_meminfo()), PAGE_SIZE
);
1928 /* DMA memory for retrieving IOC attributes */
1929 res_info
[BNA_RES_MEM_T_ATTR
].res_type
= BNA_RES_T_MEM
;
1930 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1931 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.num
= 1;
1932 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.len
=
1933 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE
);
1935 /* Virtual memory for retreiving fw_trc */
1936 res_info
[BNA_RES_MEM_T_FWTRC
].res_type
= BNA_RES_T_MEM
;
1937 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mem_type
= BNA_MEM_T_KVA
;
1938 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.num
= 1;
1939 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.len
= BNA_DBG_FWTRC_LEN
;
1941 /* DMA memory for retreiving stats */
1942 res_info
[BNA_RES_MEM_T_STATS
].res_type
= BNA_RES_T_MEM
;
1943 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1944 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.num
= 1;
1945 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.len
=
1946 ALIGN(sizeof(struct bfi_enet_stats
),
1951 bna_mod_res_req(struct bna
*bna
, struct bna_res_info
*res_info
)
1953 struct bna_attr
*attr
= &bna
->ioceth
.attr
;
1955 /* Virtual memory for Tx objects - stored by Tx module */
1956 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_type
= BNA_RES_T_MEM
;
1957 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.mem_type
=
1959 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.num
= 1;
1960 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.len
=
1961 attr
->num_txq
* sizeof(struct bna_tx
);
1963 /* Virtual memory for TxQ - stored by Tx module */
1964 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
1965 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.mem_type
=
1967 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.num
= 1;
1968 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.len
=
1969 attr
->num_txq
* sizeof(struct bna_txq
);
1971 /* Virtual memory for Rx objects - stored by Rx module */
1972 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_type
= BNA_RES_T_MEM
;
1973 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.mem_type
=
1975 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.num
= 1;
1976 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.len
=
1977 attr
->num_rxp
* sizeof(struct bna_rx
);
1979 /* Virtual memory for RxPath - stored by Rx module */
1980 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_type
= BNA_RES_T_MEM
;
1981 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.mem_type
=
1983 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.num
= 1;
1984 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.len
=
1985 attr
->num_rxp
* sizeof(struct bna_rxp
);
1987 /* Virtual memory for RxQ - stored by Rx module */
1988 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
1989 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.mem_type
=
1991 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.num
= 1;
1992 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.len
=
1993 (attr
->num_rxp
* 2) * sizeof(struct bna_rxq
);
1995 /* Virtual memory for Unicast MAC address - stored by ucam module */
1996 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
1997 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
1999 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
2000 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.len
=
2001 (attr
->num_ucmac
* 2) * sizeof(struct bna_mac
);
2003 /* Virtual memory for Multicast MAC address - stored by mcam module */
2004 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
2005 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
2007 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
2008 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.len
=
2009 (attr
->num_mcmac
* 2) * sizeof(struct bna_mac
);
2011 /* Virtual memory for Multicast handle - stored by mcam module */
2012 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_type
= BNA_RES_T_MEM
;
2013 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_u
.mem_info
.mem_type
=
2015 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_u
.mem_info
.num
= 1;
2016 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_u
.mem_info
.len
=
2017 attr
->num_mcmac
* sizeof(struct bna_mcam_handle
);
2021 bna_init(struct bna
*bna
, struct bnad
*bnad
,
2022 struct bfa_pcidev
*pcidev
, struct bna_res_info
*res_info
)
2025 bna
->pcidev
= *pcidev
;
2027 bna
->stats
.hw_stats_kva
= (struct bfi_enet_stats
*)
2028 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].kva
;
2029 bna
->stats
.hw_stats_dma
.msb
=
2030 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.msb
;
2031 bna
->stats
.hw_stats_dma
.lsb
=
2032 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.lsb
;
2034 bna_reg_addr_init(bna
, &bna
->pcidev
);
2036 /* Also initializes diag, cee, sfp, phy_port, msgq */
2037 bna_ioceth_init(&bna
->ioceth
, bna
, res_info
);
2039 bna_enet_init(&bna
->enet
, bna
);
2040 bna_ethport_init(&bna
->ethport
, bna
);
2044 bna_mod_init(struct bna
*bna
, struct bna_res_info
*res_info
)
2046 bna_tx_mod_init(&bna
->tx_mod
, bna
, res_info
);
2048 bna_rx_mod_init(&bna
->rx_mod
, bna
, res_info
);
2050 bna_ucam_mod_init(&bna
->ucam_mod
, bna
, res_info
);
2052 bna_mcam_mod_init(&bna
->mcam_mod
, bna
, res_info
);
2054 bna
->default_mode_rid
= BFI_INVALID_RID
;
2055 bna
->promisc_rid
= BFI_INVALID_RID
;
2057 bna
->mod_flags
|= BNA_MOD_F_INIT_DONE
;
2061 bna_uninit(struct bna
*bna
)
2063 if (bna
->mod_flags
& BNA_MOD_F_INIT_DONE
) {
2064 bna_mcam_mod_uninit(&bna
->mcam_mod
);
2065 bna_ucam_mod_uninit(&bna
->ucam_mod
);
2066 bna_rx_mod_uninit(&bna
->rx_mod
);
2067 bna_tx_mod_uninit(&bna
->tx_mod
);
2068 bna
->mod_flags
&= ~BNA_MOD_F_INIT_DONE
;
2071 bna_stats_mod_uninit(&bna
->stats_mod
);
2072 bna_ethport_uninit(&bna
->ethport
);
2073 bna_enet_uninit(&bna
->enet
);
2075 bna_ioceth_uninit(&bna
->ioceth
);
2081 bna_num_txq_set(struct bna
*bna
, int num_txq
)
2083 if (bna
->ioceth
.attr
.fw_query_complete
&&
2084 (num_txq
<= bna
->ioceth
.attr
.num_txq
)) {
2085 bna
->ioceth
.attr
.num_txq
= num_txq
;
2086 return BNA_CB_SUCCESS
;
2093 bna_num_rxp_set(struct bna
*bna
, int num_rxp
)
2095 if (bna
->ioceth
.attr
.fw_query_complete
&&
2096 (num_rxp
<= bna
->ioceth
.attr
.num_rxp
)) {
2097 bna
->ioceth
.attr
.num_rxp
= num_rxp
;
2098 return BNA_CB_SUCCESS
;
2105 bna_cam_mod_mac_get(struct list_head
*head
)
2107 struct list_head
*qe
;
2109 if (list_empty(head
))
2112 bfa_q_deq(head
, &qe
);
2113 return (struct bna_mac
*)qe
;
2117 bna_cam_mod_mac_put(struct list_head
*tail
, struct bna_mac
*mac
)
2119 list_add_tail(&mac
->qe
, tail
);
2122 struct bna_mcam_handle
*
2123 bna_mcam_mod_handle_get(struct bna_mcam_mod
*mcam_mod
)
2125 struct list_head
*qe
;
2127 if (list_empty(&mcam_mod
->free_handle_q
))
2130 bfa_q_deq(&mcam_mod
->free_handle_q
, &qe
);
2132 return (struct bna_mcam_handle
*)qe
;
2136 bna_mcam_mod_handle_put(struct bna_mcam_mod
*mcam_mod
,
2137 struct bna_mcam_handle
*handle
)
2139 list_add_tail(&handle
->qe
, &mcam_mod
->free_handle_q
);
2143 bna_hw_stats_get(struct bna
*bna
)
2145 if (!bna
->stats_mod
.ioc_ready
) {
2146 bnad_cb_stats_get(bna
->bnad
, BNA_CB_FAIL
, &bna
->stats
);
2149 if (bna
->stats_mod
.stats_get_busy
) {
2150 bnad_cb_stats_get(bna
->bnad
, BNA_CB_BUSY
, &bna
->stats
);
2154 bna_bfi_stats_get(bna
);