2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
22 ethport_can_be_up(struct bna_ethport
*ethport
)
25 if (ethport
->bna
->enet
.type
== BNA_ENET_T_REGULAR
)
26 ready
= ((ethport
->flags
& BNA_ETHPORT_F_ADMIN_UP
) &&
27 (ethport
->flags
& BNA_ETHPORT_F_RX_STARTED
) &&
28 (ethport
->flags
& BNA_ETHPORT_F_PORT_ENABLED
));
30 ready
= ((ethport
->flags
& BNA_ETHPORT_F_ADMIN_UP
) &&
31 (ethport
->flags
& BNA_ETHPORT_F_RX_STARTED
) &&
32 !(ethport
->flags
& BNA_ETHPORT_F_PORT_ENABLED
));
36 #define ethport_is_up ethport_can_be_up
38 enum bna_ethport_event
{
44 ETHPORT_E_FWRESP_UP_OK
= 6,
45 ETHPORT_E_FWRESP_DOWN
= 7,
46 ETHPORT_E_FWRESP_UP_FAIL
= 8,
55 ENET_E_FWRESP_PAUSE
= 6,
56 ENET_E_CHLD_STOPPED
= 7,
59 enum bna_ioceth_event
{
62 IOCETH_E_IOC_RESET
= 3,
63 IOCETH_E_IOC_FAILED
= 4,
64 IOCETH_E_IOC_READY
= 5,
65 IOCETH_E_ENET_ATTR_RESP
= 6,
66 IOCETH_E_ENET_STOPPED
= 7,
67 IOCETH_E_IOC_DISABLED
= 8,
70 #define bna_stats_copy(_name, _type) \
72 count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
73 stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
74 stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
75 for (i = 0; i < count; i++) \
76 stats_dst[i] = be64_to_cpu(stats_src[i]); \
80 * FW response handlers
84 bna_bfi_ethport_enable_aen(struct bna_ethport
*ethport
,
85 struct bfi_msgq_mhdr
*msghdr
)
87 ethport
->flags
|= BNA_ETHPORT_F_PORT_ENABLED
;
89 if (ethport_can_be_up(ethport
))
90 bfa_fsm_send_event(ethport
, ETHPORT_E_UP
);
94 bna_bfi_ethport_disable_aen(struct bna_ethport
*ethport
,
95 struct bfi_msgq_mhdr
*msghdr
)
97 int ethport_up
= ethport_is_up(ethport
);
99 ethport
->flags
&= ~BNA_ETHPORT_F_PORT_ENABLED
;
102 bfa_fsm_send_event(ethport
, ETHPORT_E_DOWN
);
106 bna_bfi_ethport_admin_rsp(struct bna_ethport
*ethport
,
107 struct bfi_msgq_mhdr
*msghdr
)
109 struct bfi_enet_enable_req
*admin_req
=
110 ðport
->bfi_enet_cmd
.admin_req
;
111 struct bfi_enet_rsp
*rsp
=
112 container_of(msghdr
, struct bfi_enet_rsp
, mh
);
114 switch (admin_req
->enable
) {
115 case BNA_STATUS_T_ENABLED
:
116 if (rsp
->error
== BFI_ENET_CMD_OK
)
117 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_UP_OK
);
119 ethport
->flags
&= ~BNA_ETHPORT_F_PORT_ENABLED
;
120 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_UP_FAIL
);
124 case BNA_STATUS_T_DISABLED
:
125 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_DOWN
);
126 ethport
->link_status
= BNA_LINK_DOWN
;
127 ethport
->link_cbfn(ethport
->bna
->bnad
, BNA_LINK_DOWN
);
133 bna_bfi_ethport_lpbk_rsp(struct bna_ethport
*ethport
,
134 struct bfi_msgq_mhdr
*msghdr
)
136 struct bfi_enet_diag_lb_req
*diag_lb_req
=
137 ðport
->bfi_enet_cmd
.lpbk_req
;
138 struct bfi_enet_rsp
*rsp
=
139 container_of(msghdr
, struct bfi_enet_rsp
, mh
);
141 switch (diag_lb_req
->enable
) {
142 case BNA_STATUS_T_ENABLED
:
143 if (rsp
->error
== BFI_ENET_CMD_OK
)
144 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_UP_OK
);
146 ethport
->flags
&= ~BNA_ETHPORT_F_ADMIN_UP
;
147 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_UP_FAIL
);
151 case BNA_STATUS_T_DISABLED
:
152 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_DOWN
);
158 bna_bfi_pause_set_rsp(struct bna_enet
*enet
, struct bfi_msgq_mhdr
*msghdr
)
160 bfa_fsm_send_event(enet
, ENET_E_FWRESP_PAUSE
);
164 bna_bfi_attr_get_rsp(struct bna_ioceth
*ioceth
,
165 struct bfi_msgq_mhdr
*msghdr
)
167 struct bfi_enet_attr_rsp
*rsp
=
168 container_of(msghdr
, struct bfi_enet_attr_rsp
, mh
);
171 * Store only if not set earlier, since BNAD can override the HW
174 if (!ioceth
->attr
.fw_query_complete
) {
175 ioceth
->attr
.num_txq
= ntohl(rsp
->max_cfg
);
176 ioceth
->attr
.num_rxp
= ntohl(rsp
->max_cfg
);
177 ioceth
->attr
.num_ucmac
= ntohl(rsp
->max_ucmac
);
178 ioceth
->attr
.num_mcmac
= BFI_ENET_MAX_MCAM
;
179 ioceth
->attr
.max_rit_size
= ntohl(rsp
->rit_size
);
180 ioceth
->attr
.fw_query_complete
= true;
183 bfa_fsm_send_event(ioceth
, IOCETH_E_ENET_ATTR_RESP
);
187 bna_bfi_stats_get_rsp(struct bna
*bna
, struct bfi_msgq_mhdr
*msghdr
)
189 struct bfi_enet_stats_req
*stats_req
= &bna
->stats_mod
.stats_get
;
192 u32 tx_enet_mask
= ntohl(stats_req
->tx_enet_mask
);
193 u32 rx_enet_mask
= ntohl(stats_req
->rx_enet_mask
);
197 bna_stats_copy(mac
, mac
);
198 bna_stats_copy(bpc
, bpc
);
199 bna_stats_copy(rad
, rad
);
200 bna_stats_copy(rlb
, rad
);
201 bna_stats_copy(fc_rx
, fc_rx
);
202 bna_stats_copy(fc_tx
, fc_tx
);
204 stats_src
= (u64
*)&(bna
->stats
.hw_stats_kva
->rxf_stats
[0]);
206 /* Copy Rxf stats to SW area, scatter them while copying */
207 for (i
= 0; i
< BFI_ENET_CFG_MAX
; i
++) {
208 stats_dst
= (u64
*)&(bna
->stats
.hw_stats
.rxf_stats
[i
]);
209 memset(stats_dst
, 0, sizeof(struct bfi_enet_stats_rxf
));
210 if (rx_enet_mask
& BIT(i
)) {
212 count
= sizeof(struct bfi_enet_stats_rxf
) /
214 for (k
= 0; k
< count
; k
++) {
215 stats_dst
[k
] = be64_to_cpu(*stats_src
);
221 /* Copy Txf stats to SW area, scatter them while copying */
222 for (i
= 0; i
< BFI_ENET_CFG_MAX
; i
++) {
223 stats_dst
= (u64
*)&(bna
->stats
.hw_stats
.txf_stats
[i
]);
224 memset(stats_dst
, 0, sizeof(struct bfi_enet_stats_txf
));
225 if (tx_enet_mask
& BIT(i
)) {
227 count
= sizeof(struct bfi_enet_stats_txf
) /
229 for (k
= 0; k
< count
; k
++) {
230 stats_dst
[k
] = be64_to_cpu(*stats_src
);
236 bna
->stats_mod
.stats_get_busy
= false;
237 bnad_cb_stats_get(bna
->bnad
, BNA_CB_SUCCESS
, &bna
->stats
);
241 bna_bfi_ethport_linkup_aen(struct bna_ethport
*ethport
,
242 struct bfi_msgq_mhdr
*msghdr
)
244 ethport
->link_status
= BNA_LINK_UP
;
246 /* Dispatch events */
247 ethport
->link_cbfn(ethport
->bna
->bnad
, ethport
->link_status
);
251 bna_bfi_ethport_linkdown_aen(struct bna_ethport
*ethport
,
252 struct bfi_msgq_mhdr
*msghdr
)
254 ethport
->link_status
= BNA_LINK_DOWN
;
256 /* Dispatch events */
257 ethport
->link_cbfn(ethport
->bna
->bnad
, BNA_LINK_DOWN
);
261 bna_err_handler(struct bna
*bna
, u32 intr_status
)
263 if (BNA_IS_HALT_INTR(bna
, intr_status
))
266 bfa_nw_ioc_error_isr(&bna
->ioceth
.ioc
);
270 bna_mbox_handler(struct bna
*bna
, u32 intr_status
)
272 if (BNA_IS_ERR_INTR(bna
, intr_status
)) {
273 bna_err_handler(bna
, intr_status
);
276 if (BNA_IS_MBOX_INTR(bna
, intr_status
))
277 bfa_nw_ioc_mbox_isr(&bna
->ioceth
.ioc
);
281 bna_msgq_rsp_handler(void *arg
, struct bfi_msgq_mhdr
*msghdr
)
283 struct bna
*bna
= (struct bna
*)arg
;
287 switch (msghdr
->msg_id
) {
288 case BFI_ENET_I2H_RX_CFG_SET_RSP
:
289 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
291 bna_bfi_rx_enet_start_rsp(rx
, msghdr
);
294 case BFI_ENET_I2H_RX_CFG_CLR_RSP
:
295 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
297 bna_bfi_rx_enet_stop_rsp(rx
, msghdr
);
300 case BFI_ENET_I2H_RIT_CFG_RSP
:
301 case BFI_ENET_I2H_RSS_CFG_RSP
:
302 case BFI_ENET_I2H_RSS_ENABLE_RSP
:
303 case BFI_ENET_I2H_RX_PROMISCUOUS_RSP
:
304 case BFI_ENET_I2H_RX_DEFAULT_RSP
:
305 case BFI_ENET_I2H_MAC_UCAST_CLR_RSP
:
306 case BFI_ENET_I2H_MAC_UCAST_ADD_RSP
:
307 case BFI_ENET_I2H_MAC_UCAST_DEL_RSP
:
308 case BFI_ENET_I2H_MAC_MCAST_DEL_RSP
:
309 case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP
:
310 case BFI_ENET_I2H_RX_VLAN_SET_RSP
:
311 case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP
:
312 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
314 bna_bfi_rxf_cfg_rsp(&rx
->rxf
, msghdr
);
317 case BFI_ENET_I2H_MAC_UCAST_SET_RSP
:
318 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
320 bna_bfi_rxf_ucast_set_rsp(&rx
->rxf
, msghdr
);
323 case BFI_ENET_I2H_MAC_MCAST_ADD_RSP
:
324 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
326 bna_bfi_rxf_mcast_add_rsp(&rx
->rxf
, msghdr
);
329 case BFI_ENET_I2H_TX_CFG_SET_RSP
:
330 bna_tx_from_rid(bna
, msghdr
->enet_id
, tx
);
332 bna_bfi_tx_enet_start_rsp(tx
, msghdr
);
335 case BFI_ENET_I2H_TX_CFG_CLR_RSP
:
336 bna_tx_from_rid(bna
, msghdr
->enet_id
, tx
);
338 bna_bfi_tx_enet_stop_rsp(tx
, msghdr
);
341 case BFI_ENET_I2H_PORT_ADMIN_RSP
:
342 bna_bfi_ethport_admin_rsp(&bna
->ethport
, msghdr
);
345 case BFI_ENET_I2H_DIAG_LOOPBACK_RSP
:
346 bna_bfi_ethport_lpbk_rsp(&bna
->ethport
, msghdr
);
349 case BFI_ENET_I2H_SET_PAUSE_RSP
:
350 bna_bfi_pause_set_rsp(&bna
->enet
, msghdr
);
353 case BFI_ENET_I2H_GET_ATTR_RSP
:
354 bna_bfi_attr_get_rsp(&bna
->ioceth
, msghdr
);
357 case BFI_ENET_I2H_STATS_GET_RSP
:
358 bna_bfi_stats_get_rsp(bna
, msghdr
);
361 case BFI_ENET_I2H_STATS_CLR_RSP
:
365 case BFI_ENET_I2H_LINK_UP_AEN
:
366 bna_bfi_ethport_linkup_aen(&bna
->ethport
, msghdr
);
369 case BFI_ENET_I2H_LINK_DOWN_AEN
:
370 bna_bfi_ethport_linkdown_aen(&bna
->ethport
, msghdr
);
373 case BFI_ENET_I2H_PORT_ENABLE_AEN
:
374 bna_bfi_ethport_enable_aen(&bna
->ethport
, msghdr
);
377 case BFI_ENET_I2H_PORT_DISABLE_AEN
:
378 bna_bfi_ethport_disable_aen(&bna
->ethport
, msghdr
);
381 case BFI_ENET_I2H_BW_UPDATE_AEN
:
382 bna_bfi_bw_update_aen(&bna
->tx_mod
);
392 #define call_ethport_stop_cbfn(_ethport) \
394 if ((_ethport)->stop_cbfn) { \
395 void (*cbfn)(struct bna_enet *); \
396 cbfn = (_ethport)->stop_cbfn; \
397 (_ethport)->stop_cbfn = NULL; \
398 cbfn(&(_ethport)->bna->enet); \
402 #define call_ethport_adminup_cbfn(ethport, status) \
404 if ((ethport)->adminup_cbfn) { \
405 void (*cbfn)(struct bnad *, enum bna_cb_status); \
406 cbfn = (ethport)->adminup_cbfn; \
407 (ethport)->adminup_cbfn = NULL; \
408 cbfn((ethport)->bna->bnad, status); \
413 bna_bfi_ethport_admin_up(struct bna_ethport
*ethport
)
415 struct bfi_enet_enable_req
*admin_up_req
=
416 ðport
->bfi_enet_cmd
.admin_req
;
418 bfi_msgq_mhdr_set(admin_up_req
->mh
, BFI_MC_ENET
,
419 BFI_ENET_H2I_PORT_ADMIN_UP_REQ
, 0, 0);
420 admin_up_req
->mh
.num_entries
= htons(
421 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
422 admin_up_req
->enable
= BNA_STATUS_T_ENABLED
;
424 bfa_msgq_cmd_set(ðport
->msgq_cmd
, NULL
, NULL
,
425 sizeof(struct bfi_enet_enable_req
), &admin_up_req
->mh
);
426 bfa_msgq_cmd_post(ðport
->bna
->msgq
, ðport
->msgq_cmd
);
430 bna_bfi_ethport_admin_down(struct bna_ethport
*ethport
)
432 struct bfi_enet_enable_req
*admin_down_req
=
433 ðport
->bfi_enet_cmd
.admin_req
;
435 bfi_msgq_mhdr_set(admin_down_req
->mh
, BFI_MC_ENET
,
436 BFI_ENET_H2I_PORT_ADMIN_UP_REQ
, 0, 0);
437 admin_down_req
->mh
.num_entries
= htons(
438 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
439 admin_down_req
->enable
= BNA_STATUS_T_DISABLED
;
441 bfa_msgq_cmd_set(ðport
->msgq_cmd
, NULL
, NULL
,
442 sizeof(struct bfi_enet_enable_req
), &admin_down_req
->mh
);
443 bfa_msgq_cmd_post(ðport
->bna
->msgq
, ðport
->msgq_cmd
);
447 bna_bfi_ethport_lpbk_up(struct bna_ethport
*ethport
)
449 struct bfi_enet_diag_lb_req
*lpbk_up_req
=
450 ðport
->bfi_enet_cmd
.lpbk_req
;
452 bfi_msgq_mhdr_set(lpbk_up_req
->mh
, BFI_MC_ENET
,
453 BFI_ENET_H2I_DIAG_LOOPBACK_REQ
, 0, 0);
454 lpbk_up_req
->mh
.num_entries
= htons(
455 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req
)));
456 lpbk_up_req
->mode
= (ethport
->bna
->enet
.type
==
457 BNA_ENET_T_LOOPBACK_INTERNAL
) ?
458 BFI_ENET_DIAG_LB_OPMODE_EXT
:
459 BFI_ENET_DIAG_LB_OPMODE_CBL
;
460 lpbk_up_req
->enable
= BNA_STATUS_T_ENABLED
;
462 bfa_msgq_cmd_set(ðport
->msgq_cmd
, NULL
, NULL
,
463 sizeof(struct bfi_enet_diag_lb_req
), &lpbk_up_req
->mh
);
464 bfa_msgq_cmd_post(ðport
->bna
->msgq
, ðport
->msgq_cmd
);
468 bna_bfi_ethport_lpbk_down(struct bna_ethport
*ethport
)
470 struct bfi_enet_diag_lb_req
*lpbk_down_req
=
471 ðport
->bfi_enet_cmd
.lpbk_req
;
473 bfi_msgq_mhdr_set(lpbk_down_req
->mh
, BFI_MC_ENET
,
474 BFI_ENET_H2I_DIAG_LOOPBACK_REQ
, 0, 0);
475 lpbk_down_req
->mh
.num_entries
= htons(
476 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req
)));
477 lpbk_down_req
->enable
= BNA_STATUS_T_DISABLED
;
479 bfa_msgq_cmd_set(ðport
->msgq_cmd
, NULL
, NULL
,
480 sizeof(struct bfi_enet_diag_lb_req
), &lpbk_down_req
->mh
);
481 bfa_msgq_cmd_post(ðport
->bna
->msgq
, ðport
->msgq_cmd
);
485 bna_bfi_ethport_up(struct bna_ethport
*ethport
)
487 if (ethport
->bna
->enet
.type
== BNA_ENET_T_REGULAR
)
488 bna_bfi_ethport_admin_up(ethport
);
490 bna_bfi_ethport_lpbk_up(ethport
);
494 bna_bfi_ethport_down(struct bna_ethport
*ethport
)
496 if (ethport
->bna
->enet
.type
== BNA_ENET_T_REGULAR
)
497 bna_bfi_ethport_admin_down(ethport
);
499 bna_bfi_ethport_lpbk_down(ethport
);
502 bfa_fsm_state_decl(bna_ethport
, stopped
, struct bna_ethport
,
503 enum bna_ethport_event
);
504 bfa_fsm_state_decl(bna_ethport
, down
, struct bna_ethport
,
505 enum bna_ethport_event
);
506 bfa_fsm_state_decl(bna_ethport
, up_resp_wait
, struct bna_ethport
,
507 enum bna_ethport_event
);
508 bfa_fsm_state_decl(bna_ethport
, down_resp_wait
, struct bna_ethport
,
509 enum bna_ethport_event
);
510 bfa_fsm_state_decl(bna_ethport
, up
, struct bna_ethport
,
511 enum bna_ethport_event
);
512 bfa_fsm_state_decl(bna_ethport
, last_resp_wait
, struct bna_ethport
,
513 enum bna_ethport_event
);
516 bna_ethport_sm_stopped_entry(struct bna_ethport
*ethport
)
518 call_ethport_stop_cbfn(ethport
);
522 bna_ethport_sm_stopped(struct bna_ethport
*ethport
,
523 enum bna_ethport_event event
)
526 case ETHPORT_E_START
:
527 bfa_fsm_set_state(ethport
, bna_ethport_sm_down
);
531 call_ethport_stop_cbfn(ethport
);
539 /* This event is received due to Rx objects failing */
549 bna_ethport_sm_down_entry(struct bna_ethport
*ethport
)
554 bna_ethport_sm_down(struct bna_ethport
*ethport
,
555 enum bna_ethport_event event
)
559 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
563 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
567 bfa_fsm_set_state(ethport
, bna_ethport_sm_up_resp_wait
);
568 bna_bfi_ethport_up(ethport
);
577 bna_ethport_sm_up_resp_wait_entry(struct bna_ethport
*ethport
)
582 bna_ethport_sm_up_resp_wait(struct bna_ethport
*ethport
,
583 enum bna_ethport_event event
)
587 bfa_fsm_set_state(ethport
, bna_ethport_sm_last_resp_wait
);
591 call_ethport_adminup_cbfn(ethport
, BNA_CB_FAIL
);
592 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
596 call_ethport_adminup_cbfn(ethport
, BNA_CB_INTERRUPT
);
597 bfa_fsm_set_state(ethport
, bna_ethport_sm_down_resp_wait
);
600 case ETHPORT_E_FWRESP_UP_OK
:
601 call_ethport_adminup_cbfn(ethport
, BNA_CB_SUCCESS
);
602 bfa_fsm_set_state(ethport
, bna_ethport_sm_up
);
605 case ETHPORT_E_FWRESP_UP_FAIL
:
606 call_ethport_adminup_cbfn(ethport
, BNA_CB_FAIL
);
607 bfa_fsm_set_state(ethport
, bna_ethport_sm_down
);
610 case ETHPORT_E_FWRESP_DOWN
:
611 /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
612 bna_bfi_ethport_up(ethport
);
621 bna_ethport_sm_down_resp_wait_entry(struct bna_ethport
*ethport
)
624 * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
625 * mbox due to up_resp_wait -> down_resp_wait transition on event
631 bna_ethport_sm_down_resp_wait(struct bna_ethport
*ethport
,
632 enum bna_ethport_event event
)
636 bfa_fsm_set_state(ethport
, bna_ethport_sm_last_resp_wait
);
640 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
644 bfa_fsm_set_state(ethport
, bna_ethport_sm_up_resp_wait
);
647 case ETHPORT_E_FWRESP_UP_OK
:
648 /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
649 bna_bfi_ethport_down(ethport
);
652 case ETHPORT_E_FWRESP_UP_FAIL
:
653 case ETHPORT_E_FWRESP_DOWN
:
654 bfa_fsm_set_state(ethport
, bna_ethport_sm_down
);
663 bna_ethport_sm_up_entry(struct bna_ethport
*ethport
)
668 bna_ethport_sm_up(struct bna_ethport
*ethport
,
669 enum bna_ethport_event event
)
673 bfa_fsm_set_state(ethport
, bna_ethport_sm_last_resp_wait
);
674 bna_bfi_ethport_down(ethport
);
678 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
682 bfa_fsm_set_state(ethport
, bna_ethport_sm_down_resp_wait
);
683 bna_bfi_ethport_down(ethport
);
692 bna_ethport_sm_last_resp_wait_entry(struct bna_ethport
*ethport
)
697 bna_ethport_sm_last_resp_wait(struct bna_ethport
*ethport
,
698 enum bna_ethport_event event
)
702 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
707 * This event is received due to Rx objects stopping in
708 * parallel to ethport
713 case ETHPORT_E_FWRESP_UP_OK
:
714 /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
715 bna_bfi_ethport_down(ethport
);
718 case ETHPORT_E_FWRESP_UP_FAIL
:
719 case ETHPORT_E_FWRESP_DOWN
:
720 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
729 bna_ethport_init(struct bna_ethport
*ethport
, struct bna
*bna
)
731 ethport
->flags
|= (BNA_ETHPORT_F_ADMIN_UP
| BNA_ETHPORT_F_PORT_ENABLED
);
734 ethport
->link_status
= BNA_LINK_DOWN
;
735 ethport
->link_cbfn
= bnad_cb_ethport_link_status
;
737 ethport
->rx_started_count
= 0;
739 ethport
->stop_cbfn
= NULL
;
740 ethport
->adminup_cbfn
= NULL
;
742 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
746 bna_ethport_uninit(struct bna_ethport
*ethport
)
748 ethport
->flags
&= ~BNA_ETHPORT_F_ADMIN_UP
;
749 ethport
->flags
&= ~BNA_ETHPORT_F_PORT_ENABLED
;
755 bna_ethport_start(struct bna_ethport
*ethport
)
757 bfa_fsm_send_event(ethport
, ETHPORT_E_START
);
761 bna_enet_cb_ethport_stopped(struct bna_enet
*enet
)
763 bfa_wc_down(&enet
->chld_stop_wc
);
767 bna_ethport_stop(struct bna_ethport
*ethport
)
769 ethport
->stop_cbfn
= bna_enet_cb_ethport_stopped
;
770 bfa_fsm_send_event(ethport
, ETHPORT_E_STOP
);
774 bna_ethport_fail(struct bna_ethport
*ethport
)
776 /* Reset the physical port status to enabled */
777 ethport
->flags
|= BNA_ETHPORT_F_PORT_ENABLED
;
779 if (ethport
->link_status
!= BNA_LINK_DOWN
) {
780 ethport
->link_status
= BNA_LINK_DOWN
;
781 ethport
->link_cbfn(ethport
->bna
->bnad
, BNA_LINK_DOWN
);
783 bfa_fsm_send_event(ethport
, ETHPORT_E_FAIL
);
786 /* Should be called only when ethport is disabled */
788 bna_ethport_cb_rx_started(struct bna_ethport
*ethport
)
790 ethport
->rx_started_count
++;
792 if (ethport
->rx_started_count
== 1) {
793 ethport
->flags
|= BNA_ETHPORT_F_RX_STARTED
;
795 if (ethport_can_be_up(ethport
))
796 bfa_fsm_send_event(ethport
, ETHPORT_E_UP
);
801 bna_ethport_cb_rx_stopped(struct bna_ethport
*ethport
)
803 int ethport_up
= ethport_is_up(ethport
);
805 ethport
->rx_started_count
--;
807 if (ethport
->rx_started_count
== 0) {
808 ethport
->flags
&= ~BNA_ETHPORT_F_RX_STARTED
;
811 bfa_fsm_send_event(ethport
, ETHPORT_E_DOWN
);
817 #define bna_enet_chld_start(enet) \
819 enum bna_tx_type tx_type = \
820 ((enet)->type == BNA_ENET_T_REGULAR) ? \
821 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
822 enum bna_rx_type rx_type = \
823 ((enet)->type == BNA_ENET_T_REGULAR) ? \
824 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
825 bna_ethport_start(&(enet)->bna->ethport); \
826 bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
827 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
830 #define bna_enet_chld_stop(enet) \
832 enum bna_tx_type tx_type = \
833 ((enet)->type == BNA_ENET_T_REGULAR) ? \
834 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
835 enum bna_rx_type rx_type = \
836 ((enet)->type == BNA_ENET_T_REGULAR) ? \
837 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
838 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
839 bfa_wc_up(&(enet)->chld_stop_wc); \
840 bna_ethport_stop(&(enet)->bna->ethport); \
841 bfa_wc_up(&(enet)->chld_stop_wc); \
842 bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
843 bfa_wc_up(&(enet)->chld_stop_wc); \
844 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
845 bfa_wc_wait(&(enet)->chld_stop_wc); \
848 #define bna_enet_chld_fail(enet) \
850 bna_ethport_fail(&(enet)->bna->ethport); \
851 bna_tx_mod_fail(&(enet)->bna->tx_mod); \
852 bna_rx_mod_fail(&(enet)->bna->rx_mod); \
855 #define bna_enet_rx_start(enet) \
857 enum bna_rx_type rx_type = \
858 ((enet)->type == BNA_ENET_T_REGULAR) ? \
859 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
860 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
863 #define bna_enet_rx_stop(enet) \
865 enum bna_rx_type rx_type = \
866 ((enet)->type == BNA_ENET_T_REGULAR) ? \
867 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
868 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
869 bfa_wc_up(&(enet)->chld_stop_wc); \
870 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
871 bfa_wc_wait(&(enet)->chld_stop_wc); \
874 #define call_enet_stop_cbfn(enet) \
876 if ((enet)->stop_cbfn) { \
877 void (*cbfn)(void *); \
879 cbfn = (enet)->stop_cbfn; \
880 cbarg = (enet)->stop_cbarg; \
881 (enet)->stop_cbfn = NULL; \
882 (enet)->stop_cbarg = NULL; \
887 #define call_enet_mtu_cbfn(enet) \
889 if ((enet)->mtu_cbfn) { \
890 void (*cbfn)(struct bnad *); \
891 cbfn = (enet)->mtu_cbfn; \
892 (enet)->mtu_cbfn = NULL; \
893 cbfn((enet)->bna->bnad); \
897 static void bna_enet_cb_chld_stopped(void *arg
);
898 static void bna_bfi_pause_set(struct bna_enet
*enet
);
900 bfa_fsm_state_decl(bna_enet
, stopped
, struct bna_enet
,
901 enum bna_enet_event
);
902 bfa_fsm_state_decl(bna_enet
, pause_init_wait
, struct bna_enet
,
903 enum bna_enet_event
);
904 bfa_fsm_state_decl(bna_enet
, last_resp_wait
, struct bna_enet
,
905 enum bna_enet_event
);
906 bfa_fsm_state_decl(bna_enet
, started
, struct bna_enet
,
907 enum bna_enet_event
);
908 bfa_fsm_state_decl(bna_enet
, cfg_wait
, struct bna_enet
,
909 enum bna_enet_event
);
910 bfa_fsm_state_decl(bna_enet
, cfg_stop_wait
, struct bna_enet
,
911 enum bna_enet_event
);
912 bfa_fsm_state_decl(bna_enet
, chld_stop_wait
, struct bna_enet
,
913 enum bna_enet_event
);
916 bna_enet_sm_stopped_entry(struct bna_enet
*enet
)
918 call_enet_mtu_cbfn(enet
);
919 call_enet_stop_cbfn(enet
);
923 bna_enet_sm_stopped(struct bna_enet
*enet
, enum bna_enet_event event
)
927 bfa_fsm_set_state(enet
, bna_enet_sm_pause_init_wait
);
931 call_enet_stop_cbfn(enet
);
938 case ENET_E_PAUSE_CFG
:
942 call_enet_mtu_cbfn(enet
);
945 case ENET_E_CHLD_STOPPED
:
947 * This event is received due to Ethport, Tx and Rx objects
959 bna_enet_sm_pause_init_wait_entry(struct bna_enet
*enet
)
961 bna_bfi_pause_set(enet
);
965 bna_enet_sm_pause_init_wait(struct bna_enet
*enet
,
966 enum bna_enet_event event
)
970 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
971 bfa_fsm_set_state(enet
, bna_enet_sm_last_resp_wait
);
975 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
976 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
979 case ENET_E_PAUSE_CFG
:
980 enet
->flags
|= BNA_ENET_F_PAUSE_CHANGED
;
987 case ENET_E_FWRESP_PAUSE
:
988 if (enet
->flags
& BNA_ENET_F_PAUSE_CHANGED
) {
989 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
990 bna_bfi_pause_set(enet
);
992 bfa_fsm_set_state(enet
, bna_enet_sm_started
);
993 bna_enet_chld_start(enet
);
1003 bna_enet_sm_last_resp_wait_entry(struct bna_enet
*enet
)
1005 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1009 bna_enet_sm_last_resp_wait(struct bna_enet
*enet
,
1010 enum bna_enet_event event
)
1014 case ENET_E_FWRESP_PAUSE
:
1015 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1019 bfa_sm_fault(event
);
1024 bna_enet_sm_started_entry(struct bna_enet
*enet
)
1027 * NOTE: Do not call bna_enet_chld_start() here, since it will be
1028 * inadvertently called during cfg_wait->started transition as well
1030 call_enet_mtu_cbfn(enet
);
1034 bna_enet_sm_started(struct bna_enet
*enet
,
1035 enum bna_enet_event event
)
1039 bfa_fsm_set_state(enet
, bna_enet_sm_chld_stop_wait
);
1043 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1044 bna_enet_chld_fail(enet
);
1047 case ENET_E_PAUSE_CFG
:
1048 bfa_fsm_set_state(enet
, bna_enet_sm_cfg_wait
);
1049 bna_bfi_pause_set(enet
);
1052 case ENET_E_MTU_CFG
:
1053 bfa_fsm_set_state(enet
, bna_enet_sm_cfg_wait
);
1054 bna_enet_rx_stop(enet
);
1058 bfa_sm_fault(event
);
1063 bna_enet_sm_cfg_wait_entry(struct bna_enet
*enet
)
1068 bna_enet_sm_cfg_wait(struct bna_enet
*enet
,
1069 enum bna_enet_event event
)
1073 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1074 enet
->flags
&= ~BNA_ENET_F_MTU_CHANGED
;
1075 bfa_fsm_set_state(enet
, bna_enet_sm_cfg_stop_wait
);
1079 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1080 enet
->flags
&= ~BNA_ENET_F_MTU_CHANGED
;
1081 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1082 bna_enet_chld_fail(enet
);
1085 case ENET_E_PAUSE_CFG
:
1086 enet
->flags
|= BNA_ENET_F_PAUSE_CHANGED
;
1089 case ENET_E_MTU_CFG
:
1090 enet
->flags
|= BNA_ENET_F_MTU_CHANGED
;
1093 case ENET_E_CHLD_STOPPED
:
1094 bna_enet_rx_start(enet
);
1096 case ENET_E_FWRESP_PAUSE
:
1097 if (enet
->flags
& BNA_ENET_F_PAUSE_CHANGED
) {
1098 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1099 bna_bfi_pause_set(enet
);
1100 } else if (enet
->flags
& BNA_ENET_F_MTU_CHANGED
) {
1101 enet
->flags
&= ~BNA_ENET_F_MTU_CHANGED
;
1102 bna_enet_rx_stop(enet
);
1104 bfa_fsm_set_state(enet
, bna_enet_sm_started
);
1109 bfa_sm_fault(event
);
1114 bna_enet_sm_cfg_stop_wait_entry(struct bna_enet
*enet
)
1116 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1117 enet
->flags
&= ~BNA_ENET_F_MTU_CHANGED
;
1121 bna_enet_sm_cfg_stop_wait(struct bna_enet
*enet
,
1122 enum bna_enet_event event
)
1126 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1127 bna_enet_chld_fail(enet
);
1130 case ENET_E_FWRESP_PAUSE
:
1131 case ENET_E_CHLD_STOPPED
:
1132 bfa_fsm_set_state(enet
, bna_enet_sm_chld_stop_wait
);
1136 bfa_sm_fault(event
);
1141 bna_enet_sm_chld_stop_wait_entry(struct bna_enet
*enet
)
1143 bna_enet_chld_stop(enet
);
1147 bna_enet_sm_chld_stop_wait(struct bna_enet
*enet
,
1148 enum bna_enet_event event
)
1152 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1153 bna_enet_chld_fail(enet
);
1156 case ENET_E_CHLD_STOPPED
:
1157 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1161 bfa_sm_fault(event
);
1166 bna_bfi_pause_set(struct bna_enet
*enet
)
1168 struct bfi_enet_set_pause_req
*pause_req
= &enet
->pause_req
;
1170 bfi_msgq_mhdr_set(pause_req
->mh
, BFI_MC_ENET
,
1171 BFI_ENET_H2I_SET_PAUSE_REQ
, 0, 0);
1172 pause_req
->mh
.num_entries
= htons(
1173 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req
)));
1174 pause_req
->tx_pause
= enet
->pause_config
.tx_pause
;
1175 pause_req
->rx_pause
= enet
->pause_config
.rx_pause
;
1177 bfa_msgq_cmd_set(&enet
->msgq_cmd
, NULL
, NULL
,
1178 sizeof(struct bfi_enet_set_pause_req
), &pause_req
->mh
);
1179 bfa_msgq_cmd_post(&enet
->bna
->msgq
, &enet
->msgq_cmd
);
1183 bna_enet_cb_chld_stopped(void *arg
)
1185 struct bna_enet
*enet
= (struct bna_enet
*)arg
;
1187 bfa_fsm_send_event(enet
, ENET_E_CHLD_STOPPED
);
1191 bna_enet_init(struct bna_enet
*enet
, struct bna
*bna
)
1196 enet
->type
= BNA_ENET_T_REGULAR
;
1198 enet
->stop_cbfn
= NULL
;
1199 enet
->stop_cbarg
= NULL
;
1201 enet
->mtu_cbfn
= NULL
;
1203 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1207 bna_enet_uninit(struct bna_enet
*enet
)
1215 bna_enet_start(struct bna_enet
*enet
)
1217 enet
->flags
|= BNA_ENET_F_IOCETH_READY
;
1218 if (enet
->flags
& BNA_ENET_F_ENABLED
)
1219 bfa_fsm_send_event(enet
, ENET_E_START
);
1223 bna_ioceth_cb_enet_stopped(void *arg
)
1225 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1227 bfa_fsm_send_event(ioceth
, IOCETH_E_ENET_STOPPED
);
1231 bna_enet_stop(struct bna_enet
*enet
)
1233 enet
->stop_cbfn
= bna_ioceth_cb_enet_stopped
;
1234 enet
->stop_cbarg
= &enet
->bna
->ioceth
;
1236 enet
->flags
&= ~BNA_ENET_F_IOCETH_READY
;
1237 bfa_fsm_send_event(enet
, ENET_E_STOP
);
1241 bna_enet_fail(struct bna_enet
*enet
)
1243 enet
->flags
&= ~BNA_ENET_F_IOCETH_READY
;
1244 bfa_fsm_send_event(enet
, ENET_E_FAIL
);
1248 bna_enet_cb_tx_stopped(struct bna_enet
*enet
)
1250 bfa_wc_down(&enet
->chld_stop_wc
);
1254 bna_enet_cb_rx_stopped(struct bna_enet
*enet
)
1256 bfa_wc_down(&enet
->chld_stop_wc
);
1260 bna_enet_mtu_get(struct bna_enet
*enet
)
1266 bna_enet_enable(struct bna_enet
*enet
)
1268 if (enet
->fsm
!= (bfa_sm_t
)bna_enet_sm_stopped
)
1271 enet
->flags
|= BNA_ENET_F_ENABLED
;
1273 if (enet
->flags
& BNA_ENET_F_IOCETH_READY
)
1274 bfa_fsm_send_event(enet
, ENET_E_START
);
1278 bna_enet_disable(struct bna_enet
*enet
, enum bna_cleanup_type type
,
1279 void (*cbfn
)(void *))
1281 if (type
== BNA_SOFT_CLEANUP
) {
1282 (*cbfn
)(enet
->bna
->bnad
);
1286 enet
->stop_cbfn
= cbfn
;
1287 enet
->stop_cbarg
= enet
->bna
->bnad
;
1289 enet
->flags
&= ~BNA_ENET_F_ENABLED
;
1291 bfa_fsm_send_event(enet
, ENET_E_STOP
);
1295 bna_enet_pause_config(struct bna_enet
*enet
,
1296 struct bna_pause_config
*pause_config
)
1298 enet
->pause_config
= *pause_config
;
1300 bfa_fsm_send_event(enet
, ENET_E_PAUSE_CFG
);
1304 bna_enet_mtu_set(struct bna_enet
*enet
, int mtu
,
1305 void (*cbfn
)(struct bnad
*))
1309 enet
->mtu_cbfn
= cbfn
;
1311 bfa_fsm_send_event(enet
, ENET_E_MTU_CFG
);
1315 bna_enet_perm_mac_get(struct bna_enet
*enet
, u8
*mac
)
1317 bfa_nw_ioc_get_mac(&enet
->bna
->ioceth
.ioc
, mac
);
1322 #define enable_mbox_intr(_ioceth) \
1325 bna_intr_status_get((_ioceth)->bna, intr_status); \
1326 bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
1327 bna_mbox_intr_enable((_ioceth)->bna); \
1330 #define disable_mbox_intr(_ioceth) \
1332 bna_mbox_intr_disable((_ioceth)->bna); \
1333 bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
1336 #define call_ioceth_stop_cbfn(_ioceth) \
1338 if ((_ioceth)->stop_cbfn) { \
1339 void (*cbfn)(struct bnad *); \
1340 struct bnad *cbarg; \
1341 cbfn = (_ioceth)->stop_cbfn; \
1342 cbarg = (_ioceth)->stop_cbarg; \
1343 (_ioceth)->stop_cbfn = NULL; \
1344 (_ioceth)->stop_cbarg = NULL; \
1349 #define bna_stats_mod_uninit(_stats_mod) \
1353 #define bna_stats_mod_start(_stats_mod) \
1355 (_stats_mod)->ioc_ready = true; \
1358 #define bna_stats_mod_stop(_stats_mod) \
1360 (_stats_mod)->ioc_ready = false; \
1363 #define bna_stats_mod_fail(_stats_mod) \
1365 (_stats_mod)->ioc_ready = false; \
1366 (_stats_mod)->stats_get_busy = false; \
1367 (_stats_mod)->stats_clr_busy = false; \
1370 static void bna_bfi_attr_get(struct bna_ioceth
*ioceth
);
1372 bfa_fsm_state_decl(bna_ioceth
, stopped
, struct bna_ioceth
,
1373 enum bna_ioceth_event
);
1374 bfa_fsm_state_decl(bna_ioceth
, ioc_ready_wait
, struct bna_ioceth
,
1375 enum bna_ioceth_event
);
1376 bfa_fsm_state_decl(bna_ioceth
, enet_attr_wait
, struct bna_ioceth
,
1377 enum bna_ioceth_event
);
1378 bfa_fsm_state_decl(bna_ioceth
, ready
, struct bna_ioceth
,
1379 enum bna_ioceth_event
);
1380 bfa_fsm_state_decl(bna_ioceth
, last_resp_wait
, struct bna_ioceth
,
1381 enum bna_ioceth_event
);
1382 bfa_fsm_state_decl(bna_ioceth
, enet_stop_wait
, struct bna_ioceth
,
1383 enum bna_ioceth_event
);
1384 bfa_fsm_state_decl(bna_ioceth
, ioc_disable_wait
, struct bna_ioceth
,
1385 enum bna_ioceth_event
);
1386 bfa_fsm_state_decl(bna_ioceth
, failed
, struct bna_ioceth
,
1387 enum bna_ioceth_event
);
1390 bna_ioceth_sm_stopped_entry(struct bna_ioceth
*ioceth
)
1392 call_ioceth_stop_cbfn(ioceth
);
1396 bna_ioceth_sm_stopped(struct bna_ioceth
*ioceth
,
1397 enum bna_ioceth_event event
)
1400 case IOCETH_E_ENABLE
:
1401 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_ready_wait
);
1402 bfa_nw_ioc_enable(&ioceth
->ioc
);
1405 case IOCETH_E_DISABLE
:
1406 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_stopped
);
1409 case IOCETH_E_IOC_RESET
:
1410 enable_mbox_intr(ioceth
);
1413 case IOCETH_E_IOC_FAILED
:
1414 disable_mbox_intr(ioceth
);
1415 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_failed
);
1419 bfa_sm_fault(event
);
1424 bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth
*ioceth
)
1427 * Do not call bfa_nw_ioc_enable() here. It must be called in the
1428 * previous state due to failed -> ioc_ready_wait transition.
1433 bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth
*ioceth
,
1434 enum bna_ioceth_event event
)
1437 case IOCETH_E_DISABLE
:
1438 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1439 bfa_nw_ioc_disable(&ioceth
->ioc
);
1442 case IOCETH_E_IOC_RESET
:
1443 enable_mbox_intr(ioceth
);
1446 case IOCETH_E_IOC_FAILED
:
1447 disable_mbox_intr(ioceth
);
1448 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_failed
);
1451 case IOCETH_E_IOC_READY
:
1452 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_enet_attr_wait
);
1456 bfa_sm_fault(event
);
1461 bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth
*ioceth
)
1463 bna_bfi_attr_get(ioceth
);
1467 bna_ioceth_sm_enet_attr_wait(struct bna_ioceth
*ioceth
,
1468 enum bna_ioceth_event event
)
1471 case IOCETH_E_DISABLE
:
1472 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_last_resp_wait
);
1475 case IOCETH_E_IOC_FAILED
:
1476 disable_mbox_intr(ioceth
);
1477 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_failed
);
1480 case IOCETH_E_ENET_ATTR_RESP
:
1481 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ready
);
1485 bfa_sm_fault(event
);
1490 bna_ioceth_sm_ready_entry(struct bna_ioceth
*ioceth
)
1492 bna_enet_start(&ioceth
->bna
->enet
);
1493 bna_stats_mod_start(&ioceth
->bna
->stats_mod
);
1494 bnad_cb_ioceth_ready(ioceth
->bna
->bnad
);
1498 bna_ioceth_sm_ready(struct bna_ioceth
*ioceth
, enum bna_ioceth_event event
)
1501 case IOCETH_E_DISABLE
:
1502 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_enet_stop_wait
);
1505 case IOCETH_E_IOC_FAILED
:
1506 disable_mbox_intr(ioceth
);
1507 bna_enet_fail(&ioceth
->bna
->enet
);
1508 bna_stats_mod_fail(&ioceth
->bna
->stats_mod
);
1509 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_failed
);
1513 bfa_sm_fault(event
);
1518 bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth
*ioceth
)
1523 bna_ioceth_sm_last_resp_wait(struct bna_ioceth
*ioceth
,
1524 enum bna_ioceth_event event
)
1527 case IOCETH_E_IOC_FAILED
:
1528 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1529 disable_mbox_intr(ioceth
);
1530 bfa_nw_ioc_disable(&ioceth
->ioc
);
1533 case IOCETH_E_ENET_ATTR_RESP
:
1534 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1535 bfa_nw_ioc_disable(&ioceth
->ioc
);
1539 bfa_sm_fault(event
);
1544 bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth
*ioceth
)
1546 bna_stats_mod_stop(&ioceth
->bna
->stats_mod
);
1547 bna_enet_stop(&ioceth
->bna
->enet
);
1551 bna_ioceth_sm_enet_stop_wait(struct bna_ioceth
*ioceth
,
1552 enum bna_ioceth_event event
)
1555 case IOCETH_E_IOC_FAILED
:
1556 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1557 disable_mbox_intr(ioceth
);
1558 bna_enet_fail(&ioceth
->bna
->enet
);
1559 bna_stats_mod_fail(&ioceth
->bna
->stats_mod
);
1560 bfa_nw_ioc_disable(&ioceth
->ioc
);
1563 case IOCETH_E_ENET_STOPPED
:
1564 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1565 bfa_nw_ioc_disable(&ioceth
->ioc
);
1569 bfa_sm_fault(event
);
1574 bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth
*ioceth
)
1579 bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth
*ioceth
,
1580 enum bna_ioceth_event event
)
1583 case IOCETH_E_IOC_DISABLED
:
1584 disable_mbox_intr(ioceth
);
1585 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_stopped
);
1588 case IOCETH_E_ENET_STOPPED
:
1589 /* This event is received due to enet failing */
1594 bfa_sm_fault(event
);
1599 bna_ioceth_sm_failed_entry(struct bna_ioceth
*ioceth
)
1601 bnad_cb_ioceth_failed(ioceth
->bna
->bnad
);
1605 bna_ioceth_sm_failed(struct bna_ioceth
*ioceth
,
1606 enum bna_ioceth_event event
)
1609 case IOCETH_E_DISABLE
:
1610 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1611 bfa_nw_ioc_disable(&ioceth
->ioc
);
1614 case IOCETH_E_IOC_RESET
:
1615 enable_mbox_intr(ioceth
);
1616 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_ready_wait
);
1619 case IOCETH_E_IOC_FAILED
:
1623 bfa_sm_fault(event
);
1628 bna_bfi_attr_get(struct bna_ioceth
*ioceth
)
1630 struct bfi_enet_attr_req
*attr_req
= &ioceth
->attr_req
;
1632 bfi_msgq_mhdr_set(attr_req
->mh
, BFI_MC_ENET
,
1633 BFI_ENET_H2I_GET_ATTR_REQ
, 0, 0);
1634 attr_req
->mh
.num_entries
= htons(
1635 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req
)));
1636 bfa_msgq_cmd_set(&ioceth
->msgq_cmd
, NULL
, NULL
,
1637 sizeof(struct bfi_enet_attr_req
), &attr_req
->mh
);
1638 bfa_msgq_cmd_post(&ioceth
->bna
->msgq
, &ioceth
->msgq_cmd
);
1641 /* IOC callback functions */
1644 bna_cb_ioceth_enable(void *arg
, enum bfa_status error
)
1646 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1649 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_FAILED
);
1651 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_READY
);
1655 bna_cb_ioceth_disable(void *arg
)
1657 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1659 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_DISABLED
);
1663 bna_cb_ioceth_hbfail(void *arg
)
1665 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1667 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_FAILED
);
1671 bna_cb_ioceth_reset(void *arg
)
1673 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1675 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_RESET
);
1678 static struct bfa_ioc_cbfn bna_ioceth_cbfn
= {
1679 bna_cb_ioceth_enable
,
1680 bna_cb_ioceth_disable
,
1681 bna_cb_ioceth_hbfail
,
1685 static void bna_attr_init(struct bna_ioceth
*ioceth
)
1687 ioceth
->attr
.num_txq
= BFI_ENET_DEF_TXQ
;
1688 ioceth
->attr
.num_rxp
= BFI_ENET_DEF_RXP
;
1689 ioceth
->attr
.num_ucmac
= BFI_ENET_DEF_UCAM
;
1690 ioceth
->attr
.num_mcmac
= BFI_ENET_MAX_MCAM
;
1691 ioceth
->attr
.max_rit_size
= BFI_ENET_DEF_RITSZ
;
1692 ioceth
->attr
.fw_query_complete
= false;
1696 bna_ioceth_init(struct bna_ioceth
*ioceth
, struct bna
*bna
,
1697 struct bna_res_info
*res_info
)
1705 * Attach IOC and claim:
1706 * 1. DMA memory for IOC attributes
1707 * 2. Kernel memory for FW trace
1709 bfa_nw_ioc_attach(&ioceth
->ioc
, ioceth
, &bna_ioceth_cbfn
);
1710 bfa_nw_ioc_pci_init(&ioceth
->ioc
, &bna
->pcidev
, BFI_PCIFN_CLASS_ETH
);
1713 &res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1714 kva
= res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].kva
;
1715 bfa_nw_ioc_mem_claim(&ioceth
->ioc
, kva
, dma
);
1717 kva
= res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mdl
[0].kva
;
1718 bfa_nw_ioc_debug_memclaim(&ioceth
->ioc
, kva
);
1721 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1725 &res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1726 kva
= res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].kva
;
1727 bfa_nw_cee_attach(&bna
->cee
, &ioceth
->ioc
, bna
);
1728 bfa_nw_cee_mem_claim(&bna
->cee
, kva
, dma
);
1729 kva
+= bfa_nw_cee_meminfo();
1730 dma
+= bfa_nw_cee_meminfo();
1732 bfa_nw_flash_attach(&bna
->flash
, &ioceth
->ioc
, bna
);
1733 bfa_nw_flash_memclaim(&bna
->flash
, kva
, dma
);
1734 kva
+= bfa_nw_flash_meminfo();
1735 dma
+= bfa_nw_flash_meminfo();
1737 bfa_msgq_attach(&bna
->msgq
, &ioceth
->ioc
);
1738 bfa_msgq_memclaim(&bna
->msgq
, kva
, dma
);
1739 bfa_msgq_regisr(&bna
->msgq
, BFI_MC_ENET
, bna_msgq_rsp_handler
, bna
);
1740 kva
+= bfa_msgq_meminfo();
1741 dma
+= bfa_msgq_meminfo();
1743 ioceth
->stop_cbfn
= NULL
;
1744 ioceth
->stop_cbarg
= NULL
;
1746 bna_attr_init(ioceth
);
1748 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_stopped
);
1752 bna_ioceth_uninit(struct bna_ioceth
*ioceth
)
1754 bfa_nw_ioc_detach(&ioceth
->ioc
);
1760 bna_ioceth_enable(struct bna_ioceth
*ioceth
)
1762 if (ioceth
->fsm
== (bfa_fsm_t
)bna_ioceth_sm_ready
) {
1763 bnad_cb_ioceth_ready(ioceth
->bna
->bnad
);
1767 if (ioceth
->fsm
== (bfa_fsm_t
)bna_ioceth_sm_stopped
)
1768 bfa_fsm_send_event(ioceth
, IOCETH_E_ENABLE
);
1772 bna_ioceth_disable(struct bna_ioceth
*ioceth
, enum bna_cleanup_type type
)
1774 if (type
== BNA_SOFT_CLEANUP
) {
1775 bnad_cb_ioceth_disabled(ioceth
->bna
->bnad
);
1779 ioceth
->stop_cbfn
= bnad_cb_ioceth_disabled
;
1780 ioceth
->stop_cbarg
= ioceth
->bna
->bnad
;
1782 bfa_fsm_send_event(ioceth
, IOCETH_E_DISABLE
);
1786 bna_ucam_mod_init(struct bna_ucam_mod
*ucam_mod
, struct bna
*bna
,
1787 struct bna_res_info
*res_info
)
1791 ucam_mod
->ucmac
= (struct bna_mac
*)
1792 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
1794 INIT_LIST_HEAD(&ucam_mod
->free_q
);
1795 for (i
= 0; i
< bna
->ioceth
.attr
.num_ucmac
; i
++)
1796 list_add_tail(&ucam_mod
->ucmac
[i
].qe
, &ucam_mod
->free_q
);
1798 /* A separate queue to allow synchronous setting of a list of MACs */
1799 INIT_LIST_HEAD(&ucam_mod
->del_q
);
1800 for (i
= i
; i
< (bna
->ioceth
.attr
.num_ucmac
* 2); i
++)
1801 list_add_tail(&ucam_mod
->ucmac
[i
].qe
, &ucam_mod
->del_q
);
1803 ucam_mod
->bna
= bna
;
1807 bna_ucam_mod_uninit(struct bna_ucam_mod
*ucam_mod
)
1809 ucam_mod
->bna
= NULL
;
1813 bna_mcam_mod_init(struct bna_mcam_mod
*mcam_mod
, struct bna
*bna
,
1814 struct bna_res_info
*res_info
)
1818 mcam_mod
->mcmac
= (struct bna_mac
*)
1819 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
1821 INIT_LIST_HEAD(&mcam_mod
->free_q
);
1822 for (i
= 0; i
< bna
->ioceth
.attr
.num_mcmac
; i
++)
1823 list_add_tail(&mcam_mod
->mcmac
[i
].qe
, &mcam_mod
->free_q
);
1825 mcam_mod
->mchandle
= (struct bna_mcam_handle
*)
1826 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
1828 INIT_LIST_HEAD(&mcam_mod
->free_handle_q
);
1829 for (i
= 0; i
< bna
->ioceth
.attr
.num_mcmac
; i
++)
1830 list_add_tail(&mcam_mod
->mchandle
[i
].qe
,
1831 &mcam_mod
->free_handle_q
);
1833 /* A separate queue to allow synchronous setting of a list of MACs */
1834 INIT_LIST_HEAD(&mcam_mod
->del_q
);
1835 for (i
= i
; i
< (bna
->ioceth
.attr
.num_mcmac
* 2); i
++)
1836 list_add_tail(&mcam_mod
->mcmac
[i
].qe
, &mcam_mod
->del_q
);
1838 mcam_mod
->bna
= bna
;
1842 bna_mcam_mod_uninit(struct bna_mcam_mod
*mcam_mod
)
1844 mcam_mod
->bna
= NULL
;
1848 bna_bfi_stats_get(struct bna
*bna
)
1850 struct bfi_enet_stats_req
*stats_req
= &bna
->stats_mod
.stats_get
;
1852 bna
->stats_mod
.stats_get_busy
= true;
1854 bfi_msgq_mhdr_set(stats_req
->mh
, BFI_MC_ENET
,
1855 BFI_ENET_H2I_STATS_GET_REQ
, 0, 0);
1856 stats_req
->mh
.num_entries
= htons(
1857 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req
)));
1858 stats_req
->stats_mask
= htons(BFI_ENET_STATS_ALL
);
1859 stats_req
->tx_enet_mask
= htonl(bna
->tx_mod
.rid_mask
);
1860 stats_req
->rx_enet_mask
= htonl(bna
->rx_mod
.rid_mask
);
1861 stats_req
->host_buffer
.a32
.addr_hi
= bna
->stats
.hw_stats_dma
.msb
;
1862 stats_req
->host_buffer
.a32
.addr_lo
= bna
->stats
.hw_stats_dma
.lsb
;
1864 bfa_msgq_cmd_set(&bna
->stats_mod
.stats_get_cmd
, NULL
, NULL
,
1865 sizeof(struct bfi_enet_stats_req
), &stats_req
->mh
);
1866 bfa_msgq_cmd_post(&bna
->msgq
, &bna
->stats_mod
.stats_get_cmd
);
1870 bna_res_req(struct bna_res_info
*res_info
)
1872 /* DMA memory for COMMON_MODULE */
1873 res_info
[BNA_RES_MEM_T_COM
].res_type
= BNA_RES_T_MEM
;
1874 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1875 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.num
= 1;
1876 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.len
= ALIGN(
1877 (bfa_nw_cee_meminfo() +
1878 bfa_nw_flash_meminfo() +
1879 bfa_msgq_meminfo()), PAGE_SIZE
);
1881 /* DMA memory for retrieving IOC attributes */
1882 res_info
[BNA_RES_MEM_T_ATTR
].res_type
= BNA_RES_T_MEM
;
1883 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1884 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.num
= 1;
1885 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.len
=
1886 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE
);
1888 /* Virtual memory for retreiving fw_trc */
1889 res_info
[BNA_RES_MEM_T_FWTRC
].res_type
= BNA_RES_T_MEM
;
1890 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mem_type
= BNA_MEM_T_KVA
;
1891 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.num
= 1;
1892 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.len
= BNA_DBG_FWTRC_LEN
;
1894 /* DMA memory for retreiving stats */
1895 res_info
[BNA_RES_MEM_T_STATS
].res_type
= BNA_RES_T_MEM
;
1896 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1897 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.num
= 1;
1898 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.len
=
1899 ALIGN(sizeof(struct bfi_enet_stats
),
1904 bna_mod_res_req(struct bna
*bna
, struct bna_res_info
*res_info
)
1906 struct bna_attr
*attr
= &bna
->ioceth
.attr
;
1908 /* Virtual memory for Tx objects - stored by Tx module */
1909 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_type
= BNA_RES_T_MEM
;
1910 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.mem_type
=
1912 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.num
= 1;
1913 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.len
=
1914 attr
->num_txq
* sizeof(struct bna_tx
);
1916 /* Virtual memory for TxQ - stored by Tx module */
1917 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
1918 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.mem_type
=
1920 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.num
= 1;
1921 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.len
=
1922 attr
->num_txq
* sizeof(struct bna_txq
);
1924 /* Virtual memory for Rx objects - stored by Rx module */
1925 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_type
= BNA_RES_T_MEM
;
1926 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.mem_type
=
1928 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.num
= 1;
1929 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.len
=
1930 attr
->num_rxp
* sizeof(struct bna_rx
);
1932 /* Virtual memory for RxPath - stored by Rx module */
1933 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_type
= BNA_RES_T_MEM
;
1934 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.mem_type
=
1936 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.num
= 1;
1937 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.len
=
1938 attr
->num_rxp
* sizeof(struct bna_rxp
);
1940 /* Virtual memory for RxQ - stored by Rx module */
1941 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
1942 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.mem_type
=
1944 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.num
= 1;
1945 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.len
=
1946 (attr
->num_rxp
* 2) * sizeof(struct bna_rxq
);
1948 /* Virtual memory for Unicast MAC address - stored by ucam module */
1949 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
1950 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
1952 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
1953 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.len
=
1954 (attr
->num_ucmac
* 2) * sizeof(struct bna_mac
);
1956 /* Virtual memory for Multicast MAC address - stored by mcam module */
1957 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
1958 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
1960 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
1961 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.len
=
1962 (attr
->num_mcmac
* 2) * sizeof(struct bna_mac
);
1964 /* Virtual memory for Multicast handle - stored by mcam module */
1965 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_type
= BNA_RES_T_MEM
;
1966 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_u
.mem_info
.mem_type
=
1968 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_u
.mem_info
.num
= 1;
1969 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_u
.mem_info
.len
=
1970 attr
->num_mcmac
* sizeof(struct bna_mcam_handle
);
1974 bna_init(struct bna
*bna
, struct bnad
*bnad
,
1975 struct bfa_pcidev
*pcidev
, struct bna_res_info
*res_info
)
1978 bna
->pcidev
= *pcidev
;
1980 bna
->stats
.hw_stats_kva
= (struct bfi_enet_stats
*)
1981 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].kva
;
1982 bna
->stats
.hw_stats_dma
.msb
=
1983 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.msb
;
1984 bna
->stats
.hw_stats_dma
.lsb
=
1985 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.lsb
;
1987 bna_reg_addr_init(bna
, &bna
->pcidev
);
1989 /* Also initializes diag, cee, sfp, phy_port, msgq */
1990 bna_ioceth_init(&bna
->ioceth
, bna
, res_info
);
1992 bna_enet_init(&bna
->enet
, bna
);
1993 bna_ethport_init(&bna
->ethport
, bna
);
1997 bna_mod_init(struct bna
*bna
, struct bna_res_info
*res_info
)
1999 bna_tx_mod_init(&bna
->tx_mod
, bna
, res_info
);
2001 bna_rx_mod_init(&bna
->rx_mod
, bna
, res_info
);
2003 bna_ucam_mod_init(&bna
->ucam_mod
, bna
, res_info
);
2005 bna_mcam_mod_init(&bna
->mcam_mod
, bna
, res_info
);
2007 bna
->default_mode_rid
= BFI_INVALID_RID
;
2008 bna
->promisc_rid
= BFI_INVALID_RID
;
2010 bna
->mod_flags
|= BNA_MOD_F_INIT_DONE
;
2014 bna_uninit(struct bna
*bna
)
2016 if (bna
->mod_flags
& BNA_MOD_F_INIT_DONE
) {
2017 bna_mcam_mod_uninit(&bna
->mcam_mod
);
2018 bna_ucam_mod_uninit(&bna
->ucam_mod
);
2019 bna_rx_mod_uninit(&bna
->rx_mod
);
2020 bna_tx_mod_uninit(&bna
->tx_mod
);
2021 bna
->mod_flags
&= ~BNA_MOD_F_INIT_DONE
;
2024 bna_stats_mod_uninit(&bna
->stats_mod
);
2025 bna_ethport_uninit(&bna
->ethport
);
2026 bna_enet_uninit(&bna
->enet
);
2028 bna_ioceth_uninit(&bna
->ioceth
);
2034 bna_num_txq_set(struct bna
*bna
, int num_txq
)
2036 if (bna
->ioceth
.attr
.fw_query_complete
&&
2037 (num_txq
<= bna
->ioceth
.attr
.num_txq
)) {
2038 bna
->ioceth
.attr
.num_txq
= num_txq
;
2039 return BNA_CB_SUCCESS
;
2046 bna_num_rxp_set(struct bna
*bna
, int num_rxp
)
2048 if (bna
->ioceth
.attr
.fw_query_complete
&&
2049 (num_rxp
<= bna
->ioceth
.attr
.num_rxp
)) {
2050 bna
->ioceth
.attr
.num_rxp
= num_rxp
;
2051 return BNA_CB_SUCCESS
;
2058 bna_cam_mod_mac_get(struct list_head
*head
)
2060 struct bna_mac
*mac
;
2062 mac
= list_first_entry_or_null(head
, struct bna_mac
, qe
);
2069 struct bna_mcam_handle
*
2070 bna_mcam_mod_handle_get(struct bna_mcam_mod
*mcam_mod
)
2072 struct bna_mcam_handle
*handle
;
2074 handle
= list_first_entry_or_null(&mcam_mod
->free_handle_q
,
2075 struct bna_mcam_handle
, qe
);
2077 list_del(&handle
->qe
);
2083 bna_mcam_mod_handle_put(struct bna_mcam_mod
*mcam_mod
,
2084 struct bna_mcam_handle
*handle
)
2086 list_add_tail(&handle
->qe
, &mcam_mod
->free_handle_q
);
2090 bna_hw_stats_get(struct bna
*bna
)
2092 if (!bna
->stats_mod
.ioc_ready
) {
2093 bnad_cb_stats_get(bna
->bnad
, BNA_CB_FAIL
, &bna
->stats
);
2096 if (bna
->stats_mod
.stats_get_busy
) {
2097 bnad_cb_stats_get(bna
->bnad
, BNA_CB_BUSY
, &bna
->stats
);
2101 bna_bfi_stats_get(bna
);