1 // SPDX-License-Identifier: GPL-2.0-only
3 * Linux network driver for QLogic BR-series Converged Network Adapter.
6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7 * Copyright (c) 2014-2015 QLogic Corporation
14 ethport_can_be_up(struct bna_ethport
*ethport
)
17 if (ethport
->bna
->enet
.type
== BNA_ENET_T_REGULAR
)
18 ready
= ((ethport
->flags
& BNA_ETHPORT_F_ADMIN_UP
) &&
19 (ethport
->flags
& BNA_ETHPORT_F_RX_STARTED
) &&
20 (ethport
->flags
& BNA_ETHPORT_F_PORT_ENABLED
));
22 ready
= ((ethport
->flags
& BNA_ETHPORT_F_ADMIN_UP
) &&
23 (ethport
->flags
& BNA_ETHPORT_F_RX_STARTED
) &&
24 !(ethport
->flags
& BNA_ETHPORT_F_PORT_ENABLED
));
28 #define ethport_is_up ethport_can_be_up
30 enum bna_ethport_event
{
36 ETHPORT_E_FWRESP_UP_OK
= 6,
37 ETHPORT_E_FWRESP_DOWN
= 7,
38 ETHPORT_E_FWRESP_UP_FAIL
= 8,
47 ENET_E_FWRESP_PAUSE
= 6,
48 ENET_E_CHLD_STOPPED
= 7,
51 enum bna_ioceth_event
{
54 IOCETH_E_IOC_RESET
= 3,
55 IOCETH_E_IOC_FAILED
= 4,
56 IOCETH_E_IOC_READY
= 5,
57 IOCETH_E_ENET_ATTR_RESP
= 6,
58 IOCETH_E_ENET_STOPPED
= 7,
59 IOCETH_E_IOC_DISABLED
= 8,
62 #define bna_stats_copy(_name, _type) \
64 count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
65 stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
66 stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
67 for (i = 0; i < count; i++) \
68 stats_dst[i] = be64_to_cpu(stats_src[i]); \
72 * FW response handlers
76 bna_bfi_ethport_enable_aen(struct bna_ethport
*ethport
,
77 struct bfi_msgq_mhdr
*msghdr
)
79 ethport
->flags
|= BNA_ETHPORT_F_PORT_ENABLED
;
81 if (ethport_can_be_up(ethport
))
82 bfa_fsm_send_event(ethport
, ETHPORT_E_UP
);
86 bna_bfi_ethport_disable_aen(struct bna_ethport
*ethport
,
87 struct bfi_msgq_mhdr
*msghdr
)
89 int ethport_up
= ethport_is_up(ethport
);
91 ethport
->flags
&= ~BNA_ETHPORT_F_PORT_ENABLED
;
94 bfa_fsm_send_event(ethport
, ETHPORT_E_DOWN
);
98 bna_bfi_ethport_admin_rsp(struct bna_ethport
*ethport
,
99 struct bfi_msgq_mhdr
*msghdr
)
101 struct bfi_enet_enable_req
*admin_req
=
102 ðport
->bfi_enet_cmd
.admin_req
;
103 struct bfi_enet_rsp
*rsp
=
104 container_of(msghdr
, struct bfi_enet_rsp
, mh
);
106 switch (admin_req
->enable
) {
107 case BNA_STATUS_T_ENABLED
:
108 if (rsp
->error
== BFI_ENET_CMD_OK
)
109 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_UP_OK
);
111 ethport
->flags
&= ~BNA_ETHPORT_F_PORT_ENABLED
;
112 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_UP_FAIL
);
116 case BNA_STATUS_T_DISABLED
:
117 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_DOWN
);
118 ethport
->link_status
= BNA_LINK_DOWN
;
119 ethport
->link_cbfn(ethport
->bna
->bnad
, BNA_LINK_DOWN
);
125 bna_bfi_ethport_lpbk_rsp(struct bna_ethport
*ethport
,
126 struct bfi_msgq_mhdr
*msghdr
)
128 struct bfi_enet_diag_lb_req
*diag_lb_req
=
129 ðport
->bfi_enet_cmd
.lpbk_req
;
130 struct bfi_enet_rsp
*rsp
=
131 container_of(msghdr
, struct bfi_enet_rsp
, mh
);
133 switch (diag_lb_req
->enable
) {
134 case BNA_STATUS_T_ENABLED
:
135 if (rsp
->error
== BFI_ENET_CMD_OK
)
136 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_UP_OK
);
138 ethport
->flags
&= ~BNA_ETHPORT_F_ADMIN_UP
;
139 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_UP_FAIL
);
143 case BNA_STATUS_T_DISABLED
:
144 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_DOWN
);
150 bna_bfi_pause_set_rsp(struct bna_enet
*enet
, struct bfi_msgq_mhdr
*msghdr
)
152 bfa_fsm_send_event(enet
, ENET_E_FWRESP_PAUSE
);
156 bna_bfi_attr_get_rsp(struct bna_ioceth
*ioceth
,
157 struct bfi_msgq_mhdr
*msghdr
)
159 struct bfi_enet_attr_rsp
*rsp
=
160 container_of(msghdr
, struct bfi_enet_attr_rsp
, mh
);
163 * Store only if not set earlier, since BNAD can override the HW
166 if (!ioceth
->attr
.fw_query_complete
) {
167 ioceth
->attr
.num_txq
= ntohl(rsp
->max_cfg
);
168 ioceth
->attr
.num_rxp
= ntohl(rsp
->max_cfg
);
169 ioceth
->attr
.num_ucmac
= ntohl(rsp
->max_ucmac
);
170 ioceth
->attr
.num_mcmac
= BFI_ENET_MAX_MCAM
;
171 ioceth
->attr
.max_rit_size
= ntohl(rsp
->rit_size
);
172 ioceth
->attr
.fw_query_complete
= true;
175 bfa_fsm_send_event(ioceth
, IOCETH_E_ENET_ATTR_RESP
);
179 bna_bfi_stats_get_rsp(struct bna
*bna
, struct bfi_msgq_mhdr
*msghdr
)
181 struct bfi_enet_stats_req
*stats_req
= &bna
->stats_mod
.stats_get
;
184 u32 tx_enet_mask
= ntohl(stats_req
->tx_enet_mask
);
185 u32 rx_enet_mask
= ntohl(stats_req
->rx_enet_mask
);
189 bna_stats_copy(mac
, mac
);
190 bna_stats_copy(bpc
, bpc
);
191 bna_stats_copy(rad
, rad
);
192 bna_stats_copy(rlb
, rad
);
193 bna_stats_copy(fc_rx
, fc_rx
);
194 bna_stats_copy(fc_tx
, fc_tx
);
196 stats_src
= (u64
*)&(bna
->stats
.hw_stats_kva
->rxf_stats
[0]);
198 /* Copy Rxf stats to SW area, scatter them while copying */
199 for (i
= 0; i
< BFI_ENET_CFG_MAX
; i
++) {
200 stats_dst
= (u64
*)&(bna
->stats
.hw_stats
.rxf_stats
[i
]);
201 memset(stats_dst
, 0, sizeof(struct bfi_enet_stats_rxf
));
202 if (rx_enet_mask
& BIT(i
)) {
204 count
= sizeof(struct bfi_enet_stats_rxf
) /
206 for (k
= 0; k
< count
; k
++) {
207 stats_dst
[k
] = be64_to_cpu(*stats_src
);
213 /* Copy Txf stats to SW area, scatter them while copying */
214 for (i
= 0; i
< BFI_ENET_CFG_MAX
; i
++) {
215 stats_dst
= (u64
*)&(bna
->stats
.hw_stats
.txf_stats
[i
]);
216 memset(stats_dst
, 0, sizeof(struct bfi_enet_stats_txf
));
217 if (tx_enet_mask
& BIT(i
)) {
219 count
= sizeof(struct bfi_enet_stats_txf
) /
221 for (k
= 0; k
< count
; k
++) {
222 stats_dst
[k
] = be64_to_cpu(*stats_src
);
228 bna
->stats_mod
.stats_get_busy
= false;
229 bnad_cb_stats_get(bna
->bnad
, BNA_CB_SUCCESS
, &bna
->stats
);
233 bna_bfi_ethport_linkup_aen(struct bna_ethport
*ethport
,
234 struct bfi_msgq_mhdr
*msghdr
)
236 ethport
->link_status
= BNA_LINK_UP
;
238 /* Dispatch events */
239 ethport
->link_cbfn(ethport
->bna
->bnad
, ethport
->link_status
);
243 bna_bfi_ethport_linkdown_aen(struct bna_ethport
*ethport
,
244 struct bfi_msgq_mhdr
*msghdr
)
246 ethport
->link_status
= BNA_LINK_DOWN
;
248 /* Dispatch events */
249 ethport
->link_cbfn(ethport
->bna
->bnad
, BNA_LINK_DOWN
);
253 bna_err_handler(struct bna
*bna
, u32 intr_status
)
255 if (BNA_IS_HALT_INTR(bna
, intr_status
))
258 bfa_nw_ioc_error_isr(&bna
->ioceth
.ioc
);
262 bna_mbox_handler(struct bna
*bna
, u32 intr_status
)
264 if (BNA_IS_ERR_INTR(bna
, intr_status
)) {
265 bna_err_handler(bna
, intr_status
);
268 if (BNA_IS_MBOX_INTR(bna
, intr_status
))
269 bfa_nw_ioc_mbox_isr(&bna
->ioceth
.ioc
);
273 bna_msgq_rsp_handler(void *arg
, struct bfi_msgq_mhdr
*msghdr
)
275 struct bna
*bna
= (struct bna
*)arg
;
279 switch (msghdr
->msg_id
) {
280 case BFI_ENET_I2H_RX_CFG_SET_RSP
:
281 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
283 bna_bfi_rx_enet_start_rsp(rx
, msghdr
);
286 case BFI_ENET_I2H_RX_CFG_CLR_RSP
:
287 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
289 bna_bfi_rx_enet_stop_rsp(rx
, msghdr
);
292 case BFI_ENET_I2H_RIT_CFG_RSP
:
293 case BFI_ENET_I2H_RSS_CFG_RSP
:
294 case BFI_ENET_I2H_RSS_ENABLE_RSP
:
295 case BFI_ENET_I2H_RX_PROMISCUOUS_RSP
:
296 case BFI_ENET_I2H_RX_DEFAULT_RSP
:
297 case BFI_ENET_I2H_MAC_UCAST_CLR_RSP
:
298 case BFI_ENET_I2H_MAC_UCAST_ADD_RSP
:
299 case BFI_ENET_I2H_MAC_UCAST_DEL_RSP
:
300 case BFI_ENET_I2H_MAC_MCAST_DEL_RSP
:
301 case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP
:
302 case BFI_ENET_I2H_RX_VLAN_SET_RSP
:
303 case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP
:
304 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
306 bna_bfi_rxf_cfg_rsp(&rx
->rxf
, msghdr
);
309 case BFI_ENET_I2H_MAC_UCAST_SET_RSP
:
310 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
312 bna_bfi_rxf_ucast_set_rsp(&rx
->rxf
, msghdr
);
315 case BFI_ENET_I2H_MAC_MCAST_ADD_RSP
:
316 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
318 bna_bfi_rxf_mcast_add_rsp(&rx
->rxf
, msghdr
);
321 case BFI_ENET_I2H_TX_CFG_SET_RSP
:
322 bna_tx_from_rid(bna
, msghdr
->enet_id
, tx
);
324 bna_bfi_tx_enet_start_rsp(tx
, msghdr
);
327 case BFI_ENET_I2H_TX_CFG_CLR_RSP
:
328 bna_tx_from_rid(bna
, msghdr
->enet_id
, tx
);
330 bna_bfi_tx_enet_stop_rsp(tx
, msghdr
);
333 case BFI_ENET_I2H_PORT_ADMIN_RSP
:
334 bna_bfi_ethport_admin_rsp(&bna
->ethport
, msghdr
);
337 case BFI_ENET_I2H_DIAG_LOOPBACK_RSP
:
338 bna_bfi_ethport_lpbk_rsp(&bna
->ethport
, msghdr
);
341 case BFI_ENET_I2H_SET_PAUSE_RSP
:
342 bna_bfi_pause_set_rsp(&bna
->enet
, msghdr
);
345 case BFI_ENET_I2H_GET_ATTR_RSP
:
346 bna_bfi_attr_get_rsp(&bna
->ioceth
, msghdr
);
349 case BFI_ENET_I2H_STATS_GET_RSP
:
350 bna_bfi_stats_get_rsp(bna
, msghdr
);
353 case BFI_ENET_I2H_STATS_CLR_RSP
:
357 case BFI_ENET_I2H_LINK_UP_AEN
:
358 bna_bfi_ethport_linkup_aen(&bna
->ethport
, msghdr
);
361 case BFI_ENET_I2H_LINK_DOWN_AEN
:
362 bna_bfi_ethport_linkdown_aen(&bna
->ethport
, msghdr
);
365 case BFI_ENET_I2H_PORT_ENABLE_AEN
:
366 bna_bfi_ethport_enable_aen(&bna
->ethport
, msghdr
);
369 case BFI_ENET_I2H_PORT_DISABLE_AEN
:
370 bna_bfi_ethport_disable_aen(&bna
->ethport
, msghdr
);
373 case BFI_ENET_I2H_BW_UPDATE_AEN
:
374 bna_bfi_bw_update_aen(&bna
->tx_mod
);
384 #define call_ethport_stop_cbfn(_ethport) \
386 if ((_ethport)->stop_cbfn) { \
387 void (*cbfn)(struct bna_enet *); \
388 cbfn = (_ethport)->stop_cbfn; \
389 (_ethport)->stop_cbfn = NULL; \
390 cbfn(&(_ethport)->bna->enet); \
394 #define call_ethport_adminup_cbfn(ethport, status) \
396 if ((ethport)->adminup_cbfn) { \
397 void (*cbfn)(struct bnad *, enum bna_cb_status); \
398 cbfn = (ethport)->adminup_cbfn; \
399 (ethport)->adminup_cbfn = NULL; \
400 cbfn((ethport)->bna->bnad, status); \
405 bna_bfi_ethport_admin_up(struct bna_ethport
*ethport
)
407 struct bfi_enet_enable_req
*admin_up_req
=
408 ðport
->bfi_enet_cmd
.admin_req
;
410 bfi_msgq_mhdr_set(admin_up_req
->mh
, BFI_MC_ENET
,
411 BFI_ENET_H2I_PORT_ADMIN_UP_REQ
, 0, 0);
412 admin_up_req
->mh
.num_entries
= htons(
413 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
414 admin_up_req
->enable
= BNA_STATUS_T_ENABLED
;
416 bfa_msgq_cmd_set(ðport
->msgq_cmd
, NULL
, NULL
,
417 sizeof(struct bfi_enet_enable_req
), &admin_up_req
->mh
);
418 bfa_msgq_cmd_post(ðport
->bna
->msgq
, ðport
->msgq_cmd
);
422 bna_bfi_ethport_admin_down(struct bna_ethport
*ethport
)
424 struct bfi_enet_enable_req
*admin_down_req
=
425 ðport
->bfi_enet_cmd
.admin_req
;
427 bfi_msgq_mhdr_set(admin_down_req
->mh
, BFI_MC_ENET
,
428 BFI_ENET_H2I_PORT_ADMIN_UP_REQ
, 0, 0);
429 admin_down_req
->mh
.num_entries
= htons(
430 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
431 admin_down_req
->enable
= BNA_STATUS_T_DISABLED
;
433 bfa_msgq_cmd_set(ðport
->msgq_cmd
, NULL
, NULL
,
434 sizeof(struct bfi_enet_enable_req
), &admin_down_req
->mh
);
435 bfa_msgq_cmd_post(ðport
->bna
->msgq
, ðport
->msgq_cmd
);
439 bna_bfi_ethport_lpbk_up(struct bna_ethport
*ethport
)
441 struct bfi_enet_diag_lb_req
*lpbk_up_req
=
442 ðport
->bfi_enet_cmd
.lpbk_req
;
444 bfi_msgq_mhdr_set(lpbk_up_req
->mh
, BFI_MC_ENET
,
445 BFI_ENET_H2I_DIAG_LOOPBACK_REQ
, 0, 0);
446 lpbk_up_req
->mh
.num_entries
= htons(
447 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req
)));
448 lpbk_up_req
->mode
= (ethport
->bna
->enet
.type
==
449 BNA_ENET_T_LOOPBACK_INTERNAL
) ?
450 BFI_ENET_DIAG_LB_OPMODE_EXT
:
451 BFI_ENET_DIAG_LB_OPMODE_CBL
;
452 lpbk_up_req
->enable
= BNA_STATUS_T_ENABLED
;
454 bfa_msgq_cmd_set(ðport
->msgq_cmd
, NULL
, NULL
,
455 sizeof(struct bfi_enet_diag_lb_req
), &lpbk_up_req
->mh
);
456 bfa_msgq_cmd_post(ðport
->bna
->msgq
, ðport
->msgq_cmd
);
460 bna_bfi_ethport_lpbk_down(struct bna_ethport
*ethport
)
462 struct bfi_enet_diag_lb_req
*lpbk_down_req
=
463 ðport
->bfi_enet_cmd
.lpbk_req
;
465 bfi_msgq_mhdr_set(lpbk_down_req
->mh
, BFI_MC_ENET
,
466 BFI_ENET_H2I_DIAG_LOOPBACK_REQ
, 0, 0);
467 lpbk_down_req
->mh
.num_entries
= htons(
468 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req
)));
469 lpbk_down_req
->enable
= BNA_STATUS_T_DISABLED
;
471 bfa_msgq_cmd_set(ðport
->msgq_cmd
, NULL
, NULL
,
472 sizeof(struct bfi_enet_diag_lb_req
), &lpbk_down_req
->mh
);
473 bfa_msgq_cmd_post(ðport
->bna
->msgq
, ðport
->msgq_cmd
);
477 bna_bfi_ethport_up(struct bna_ethport
*ethport
)
479 if (ethport
->bna
->enet
.type
== BNA_ENET_T_REGULAR
)
480 bna_bfi_ethport_admin_up(ethport
);
482 bna_bfi_ethport_lpbk_up(ethport
);
486 bna_bfi_ethport_down(struct bna_ethport
*ethport
)
488 if (ethport
->bna
->enet
.type
== BNA_ENET_T_REGULAR
)
489 bna_bfi_ethport_admin_down(ethport
);
491 bna_bfi_ethport_lpbk_down(ethport
);
494 bfa_fsm_state_decl(bna_ethport
, stopped
, struct bna_ethport
,
495 enum bna_ethport_event
);
496 bfa_fsm_state_decl(bna_ethport
, down
, struct bna_ethport
,
497 enum bna_ethport_event
);
498 bfa_fsm_state_decl(bna_ethport
, up_resp_wait
, struct bna_ethport
,
499 enum bna_ethport_event
);
500 bfa_fsm_state_decl(bna_ethport
, down_resp_wait
, struct bna_ethport
,
501 enum bna_ethport_event
);
502 bfa_fsm_state_decl(bna_ethport
, up
, struct bna_ethport
,
503 enum bna_ethport_event
);
504 bfa_fsm_state_decl(bna_ethport
, last_resp_wait
, struct bna_ethport
,
505 enum bna_ethport_event
);
508 bna_ethport_sm_stopped_entry(struct bna_ethport
*ethport
)
510 call_ethport_stop_cbfn(ethport
);
514 bna_ethport_sm_stopped(struct bna_ethport
*ethport
,
515 enum bna_ethport_event event
)
518 case ETHPORT_E_START
:
519 bfa_fsm_set_state(ethport
, bna_ethport_sm_down
);
523 call_ethport_stop_cbfn(ethport
);
531 /* This event is received due to Rx objects failing */
541 bna_ethport_sm_down_entry(struct bna_ethport
*ethport
)
546 bna_ethport_sm_down(struct bna_ethport
*ethport
,
547 enum bna_ethport_event event
)
551 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
555 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
559 bfa_fsm_set_state(ethport
, bna_ethport_sm_up_resp_wait
);
560 bna_bfi_ethport_up(ethport
);
569 bna_ethport_sm_up_resp_wait_entry(struct bna_ethport
*ethport
)
574 bna_ethport_sm_up_resp_wait(struct bna_ethport
*ethport
,
575 enum bna_ethport_event event
)
579 bfa_fsm_set_state(ethport
, bna_ethport_sm_last_resp_wait
);
583 call_ethport_adminup_cbfn(ethport
, BNA_CB_FAIL
);
584 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
588 call_ethport_adminup_cbfn(ethport
, BNA_CB_INTERRUPT
);
589 bfa_fsm_set_state(ethport
, bna_ethport_sm_down_resp_wait
);
592 case ETHPORT_E_FWRESP_UP_OK
:
593 call_ethport_adminup_cbfn(ethport
, BNA_CB_SUCCESS
);
594 bfa_fsm_set_state(ethport
, bna_ethport_sm_up
);
597 case ETHPORT_E_FWRESP_UP_FAIL
:
598 call_ethport_adminup_cbfn(ethport
, BNA_CB_FAIL
);
599 bfa_fsm_set_state(ethport
, bna_ethport_sm_down
);
602 case ETHPORT_E_FWRESP_DOWN
:
603 /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
604 bna_bfi_ethport_up(ethport
);
613 bna_ethport_sm_down_resp_wait_entry(struct bna_ethport
*ethport
)
616 * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
617 * mbox due to up_resp_wait -> down_resp_wait transition on event
623 bna_ethport_sm_down_resp_wait(struct bna_ethport
*ethport
,
624 enum bna_ethport_event event
)
628 bfa_fsm_set_state(ethport
, bna_ethport_sm_last_resp_wait
);
632 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
636 bfa_fsm_set_state(ethport
, bna_ethport_sm_up_resp_wait
);
639 case ETHPORT_E_FWRESP_UP_OK
:
640 /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
641 bna_bfi_ethport_down(ethport
);
644 case ETHPORT_E_FWRESP_UP_FAIL
:
645 case ETHPORT_E_FWRESP_DOWN
:
646 bfa_fsm_set_state(ethport
, bna_ethport_sm_down
);
655 bna_ethport_sm_up_entry(struct bna_ethport
*ethport
)
660 bna_ethport_sm_up(struct bna_ethport
*ethport
,
661 enum bna_ethport_event event
)
665 bfa_fsm_set_state(ethport
, bna_ethport_sm_last_resp_wait
);
666 bna_bfi_ethport_down(ethport
);
670 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
674 bfa_fsm_set_state(ethport
, bna_ethport_sm_down_resp_wait
);
675 bna_bfi_ethport_down(ethport
);
684 bna_ethport_sm_last_resp_wait_entry(struct bna_ethport
*ethport
)
689 bna_ethport_sm_last_resp_wait(struct bna_ethport
*ethport
,
690 enum bna_ethport_event event
)
694 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
699 * This event is received due to Rx objects stopping in
700 * parallel to ethport
705 case ETHPORT_E_FWRESP_UP_OK
:
706 /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
707 bna_bfi_ethport_down(ethport
);
710 case ETHPORT_E_FWRESP_UP_FAIL
:
711 case ETHPORT_E_FWRESP_DOWN
:
712 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
721 bna_ethport_init(struct bna_ethport
*ethport
, struct bna
*bna
)
723 ethport
->flags
|= (BNA_ETHPORT_F_ADMIN_UP
| BNA_ETHPORT_F_PORT_ENABLED
);
726 ethport
->link_status
= BNA_LINK_DOWN
;
727 ethport
->link_cbfn
= bnad_cb_ethport_link_status
;
729 ethport
->rx_started_count
= 0;
731 ethport
->stop_cbfn
= NULL
;
732 ethport
->adminup_cbfn
= NULL
;
734 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
738 bna_ethport_uninit(struct bna_ethport
*ethport
)
740 ethport
->flags
&= ~BNA_ETHPORT_F_ADMIN_UP
;
741 ethport
->flags
&= ~BNA_ETHPORT_F_PORT_ENABLED
;
747 bna_ethport_start(struct bna_ethport
*ethport
)
749 bfa_fsm_send_event(ethport
, ETHPORT_E_START
);
753 bna_enet_cb_ethport_stopped(struct bna_enet
*enet
)
755 bfa_wc_down(&enet
->chld_stop_wc
);
759 bna_ethport_stop(struct bna_ethport
*ethport
)
761 ethport
->stop_cbfn
= bna_enet_cb_ethport_stopped
;
762 bfa_fsm_send_event(ethport
, ETHPORT_E_STOP
);
766 bna_ethport_fail(struct bna_ethport
*ethport
)
768 /* Reset the physical port status to enabled */
769 ethport
->flags
|= BNA_ETHPORT_F_PORT_ENABLED
;
771 if (ethport
->link_status
!= BNA_LINK_DOWN
) {
772 ethport
->link_status
= BNA_LINK_DOWN
;
773 ethport
->link_cbfn(ethport
->bna
->bnad
, BNA_LINK_DOWN
);
775 bfa_fsm_send_event(ethport
, ETHPORT_E_FAIL
);
778 /* Should be called only when ethport is disabled */
780 bna_ethport_cb_rx_started(struct bna_ethport
*ethport
)
782 ethport
->rx_started_count
++;
784 if (ethport
->rx_started_count
== 1) {
785 ethport
->flags
|= BNA_ETHPORT_F_RX_STARTED
;
787 if (ethport_can_be_up(ethport
))
788 bfa_fsm_send_event(ethport
, ETHPORT_E_UP
);
793 bna_ethport_cb_rx_stopped(struct bna_ethport
*ethport
)
795 int ethport_up
= ethport_is_up(ethport
);
797 ethport
->rx_started_count
--;
799 if (ethport
->rx_started_count
== 0) {
800 ethport
->flags
&= ~BNA_ETHPORT_F_RX_STARTED
;
803 bfa_fsm_send_event(ethport
, ETHPORT_E_DOWN
);
809 #define bna_enet_chld_start(enet) \
811 enum bna_tx_type tx_type = \
812 ((enet)->type == BNA_ENET_T_REGULAR) ? \
813 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
814 enum bna_rx_type rx_type = \
815 ((enet)->type == BNA_ENET_T_REGULAR) ? \
816 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
817 bna_ethport_start(&(enet)->bna->ethport); \
818 bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
819 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
822 #define bna_enet_chld_stop(enet) \
824 enum bna_tx_type tx_type = \
825 ((enet)->type == BNA_ENET_T_REGULAR) ? \
826 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
827 enum bna_rx_type rx_type = \
828 ((enet)->type == BNA_ENET_T_REGULAR) ? \
829 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
830 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
831 bfa_wc_up(&(enet)->chld_stop_wc); \
832 bna_ethport_stop(&(enet)->bna->ethport); \
833 bfa_wc_up(&(enet)->chld_stop_wc); \
834 bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
835 bfa_wc_up(&(enet)->chld_stop_wc); \
836 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
837 bfa_wc_wait(&(enet)->chld_stop_wc); \
840 #define bna_enet_chld_fail(enet) \
842 bna_ethport_fail(&(enet)->bna->ethport); \
843 bna_tx_mod_fail(&(enet)->bna->tx_mod); \
844 bna_rx_mod_fail(&(enet)->bna->rx_mod); \
847 #define bna_enet_rx_start(enet) \
849 enum bna_rx_type rx_type = \
850 ((enet)->type == BNA_ENET_T_REGULAR) ? \
851 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
852 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
855 #define bna_enet_rx_stop(enet) \
857 enum bna_rx_type rx_type = \
858 ((enet)->type == BNA_ENET_T_REGULAR) ? \
859 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
860 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
861 bfa_wc_up(&(enet)->chld_stop_wc); \
862 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
863 bfa_wc_wait(&(enet)->chld_stop_wc); \
866 #define call_enet_stop_cbfn(enet) \
868 if ((enet)->stop_cbfn) { \
869 void (*cbfn)(void *); \
871 cbfn = (enet)->stop_cbfn; \
872 cbarg = (enet)->stop_cbarg; \
873 (enet)->stop_cbfn = NULL; \
874 (enet)->stop_cbarg = NULL; \
879 #define call_enet_mtu_cbfn(enet) \
881 if ((enet)->mtu_cbfn) { \
882 void (*cbfn)(struct bnad *); \
883 cbfn = (enet)->mtu_cbfn; \
884 (enet)->mtu_cbfn = NULL; \
885 cbfn((enet)->bna->bnad); \
889 static void bna_enet_cb_chld_stopped(void *arg
);
890 static void bna_bfi_pause_set(struct bna_enet
*enet
);
892 bfa_fsm_state_decl(bna_enet
, stopped
, struct bna_enet
,
893 enum bna_enet_event
);
894 bfa_fsm_state_decl(bna_enet
, pause_init_wait
, struct bna_enet
,
895 enum bna_enet_event
);
896 bfa_fsm_state_decl(bna_enet
, last_resp_wait
, struct bna_enet
,
897 enum bna_enet_event
);
898 bfa_fsm_state_decl(bna_enet
, started
, struct bna_enet
,
899 enum bna_enet_event
);
900 bfa_fsm_state_decl(bna_enet
, cfg_wait
, struct bna_enet
,
901 enum bna_enet_event
);
902 bfa_fsm_state_decl(bna_enet
, cfg_stop_wait
, struct bna_enet
,
903 enum bna_enet_event
);
904 bfa_fsm_state_decl(bna_enet
, chld_stop_wait
, struct bna_enet
,
905 enum bna_enet_event
);
908 bna_enet_sm_stopped_entry(struct bna_enet
*enet
)
910 call_enet_mtu_cbfn(enet
);
911 call_enet_stop_cbfn(enet
);
915 bna_enet_sm_stopped(struct bna_enet
*enet
, enum bna_enet_event event
)
919 bfa_fsm_set_state(enet
, bna_enet_sm_pause_init_wait
);
923 call_enet_stop_cbfn(enet
);
930 case ENET_E_PAUSE_CFG
:
934 call_enet_mtu_cbfn(enet
);
937 case ENET_E_CHLD_STOPPED
:
939 * This event is received due to Ethport, Tx and Rx objects
951 bna_enet_sm_pause_init_wait_entry(struct bna_enet
*enet
)
953 bna_bfi_pause_set(enet
);
957 bna_enet_sm_pause_init_wait(struct bna_enet
*enet
,
958 enum bna_enet_event event
)
962 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
963 bfa_fsm_set_state(enet
, bna_enet_sm_last_resp_wait
);
967 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
968 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
971 case ENET_E_PAUSE_CFG
:
972 enet
->flags
|= BNA_ENET_F_PAUSE_CHANGED
;
979 case ENET_E_FWRESP_PAUSE
:
980 if (enet
->flags
& BNA_ENET_F_PAUSE_CHANGED
) {
981 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
982 bna_bfi_pause_set(enet
);
984 bfa_fsm_set_state(enet
, bna_enet_sm_started
);
985 bna_enet_chld_start(enet
);
995 bna_enet_sm_last_resp_wait_entry(struct bna_enet
*enet
)
997 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1001 bna_enet_sm_last_resp_wait(struct bna_enet
*enet
,
1002 enum bna_enet_event event
)
1006 case ENET_E_FWRESP_PAUSE
:
1007 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1011 bfa_sm_fault(event
);
1016 bna_enet_sm_started_entry(struct bna_enet
*enet
)
1019 * NOTE: Do not call bna_enet_chld_start() here, since it will be
1020 * inadvertently called during cfg_wait->started transition as well
1022 call_enet_mtu_cbfn(enet
);
1026 bna_enet_sm_started(struct bna_enet
*enet
,
1027 enum bna_enet_event event
)
1031 bfa_fsm_set_state(enet
, bna_enet_sm_chld_stop_wait
);
1035 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1036 bna_enet_chld_fail(enet
);
1039 case ENET_E_PAUSE_CFG
:
1040 bfa_fsm_set_state(enet
, bna_enet_sm_cfg_wait
);
1041 bna_bfi_pause_set(enet
);
1044 case ENET_E_MTU_CFG
:
1045 bfa_fsm_set_state(enet
, bna_enet_sm_cfg_wait
);
1046 bna_enet_rx_stop(enet
);
1050 bfa_sm_fault(event
);
1055 bna_enet_sm_cfg_wait_entry(struct bna_enet
*enet
)
1060 bna_enet_sm_cfg_wait(struct bna_enet
*enet
,
1061 enum bna_enet_event event
)
1065 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1066 enet
->flags
&= ~BNA_ENET_F_MTU_CHANGED
;
1067 bfa_fsm_set_state(enet
, bna_enet_sm_cfg_stop_wait
);
1071 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1072 enet
->flags
&= ~BNA_ENET_F_MTU_CHANGED
;
1073 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1074 bna_enet_chld_fail(enet
);
1077 case ENET_E_PAUSE_CFG
:
1078 enet
->flags
|= BNA_ENET_F_PAUSE_CHANGED
;
1081 case ENET_E_MTU_CFG
:
1082 enet
->flags
|= BNA_ENET_F_MTU_CHANGED
;
1085 case ENET_E_CHLD_STOPPED
:
1086 bna_enet_rx_start(enet
);
1088 case ENET_E_FWRESP_PAUSE
:
1089 if (enet
->flags
& BNA_ENET_F_PAUSE_CHANGED
) {
1090 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1091 bna_bfi_pause_set(enet
);
1092 } else if (enet
->flags
& BNA_ENET_F_MTU_CHANGED
) {
1093 enet
->flags
&= ~BNA_ENET_F_MTU_CHANGED
;
1094 bna_enet_rx_stop(enet
);
1096 bfa_fsm_set_state(enet
, bna_enet_sm_started
);
1101 bfa_sm_fault(event
);
1106 bna_enet_sm_cfg_stop_wait_entry(struct bna_enet
*enet
)
1108 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1109 enet
->flags
&= ~BNA_ENET_F_MTU_CHANGED
;
1113 bna_enet_sm_cfg_stop_wait(struct bna_enet
*enet
,
1114 enum bna_enet_event event
)
1118 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1119 bna_enet_chld_fail(enet
);
1122 case ENET_E_FWRESP_PAUSE
:
1123 case ENET_E_CHLD_STOPPED
:
1124 bfa_fsm_set_state(enet
, bna_enet_sm_chld_stop_wait
);
1128 bfa_sm_fault(event
);
1133 bna_enet_sm_chld_stop_wait_entry(struct bna_enet
*enet
)
1135 bna_enet_chld_stop(enet
);
1139 bna_enet_sm_chld_stop_wait(struct bna_enet
*enet
,
1140 enum bna_enet_event event
)
1144 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1145 bna_enet_chld_fail(enet
);
1148 case ENET_E_CHLD_STOPPED
:
1149 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1153 bfa_sm_fault(event
);
1158 bna_bfi_pause_set(struct bna_enet
*enet
)
1160 struct bfi_enet_set_pause_req
*pause_req
= &enet
->pause_req
;
1162 bfi_msgq_mhdr_set(pause_req
->mh
, BFI_MC_ENET
,
1163 BFI_ENET_H2I_SET_PAUSE_REQ
, 0, 0);
1164 pause_req
->mh
.num_entries
= htons(
1165 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req
)));
1166 pause_req
->tx_pause
= enet
->pause_config
.tx_pause
;
1167 pause_req
->rx_pause
= enet
->pause_config
.rx_pause
;
1169 bfa_msgq_cmd_set(&enet
->msgq_cmd
, NULL
, NULL
,
1170 sizeof(struct bfi_enet_set_pause_req
), &pause_req
->mh
);
1171 bfa_msgq_cmd_post(&enet
->bna
->msgq
, &enet
->msgq_cmd
);
1175 bna_enet_cb_chld_stopped(void *arg
)
1177 struct bna_enet
*enet
= (struct bna_enet
*)arg
;
1179 bfa_fsm_send_event(enet
, ENET_E_CHLD_STOPPED
);
1183 bna_enet_init(struct bna_enet
*enet
, struct bna
*bna
)
1188 enet
->type
= BNA_ENET_T_REGULAR
;
1190 enet
->stop_cbfn
= NULL
;
1191 enet
->stop_cbarg
= NULL
;
1193 enet
->mtu_cbfn
= NULL
;
1195 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1199 bna_enet_uninit(struct bna_enet
*enet
)
1207 bna_enet_start(struct bna_enet
*enet
)
1209 enet
->flags
|= BNA_ENET_F_IOCETH_READY
;
1210 if (enet
->flags
& BNA_ENET_F_ENABLED
)
1211 bfa_fsm_send_event(enet
, ENET_E_START
);
1215 bna_ioceth_cb_enet_stopped(void *arg
)
1217 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1219 bfa_fsm_send_event(ioceth
, IOCETH_E_ENET_STOPPED
);
1223 bna_enet_stop(struct bna_enet
*enet
)
1225 enet
->stop_cbfn
= bna_ioceth_cb_enet_stopped
;
1226 enet
->stop_cbarg
= &enet
->bna
->ioceth
;
1228 enet
->flags
&= ~BNA_ENET_F_IOCETH_READY
;
1229 bfa_fsm_send_event(enet
, ENET_E_STOP
);
1233 bna_enet_fail(struct bna_enet
*enet
)
1235 enet
->flags
&= ~BNA_ENET_F_IOCETH_READY
;
1236 bfa_fsm_send_event(enet
, ENET_E_FAIL
);
1240 bna_enet_cb_tx_stopped(struct bna_enet
*enet
)
1242 bfa_wc_down(&enet
->chld_stop_wc
);
1246 bna_enet_cb_rx_stopped(struct bna_enet
*enet
)
1248 bfa_wc_down(&enet
->chld_stop_wc
);
1252 bna_enet_mtu_get(struct bna_enet
*enet
)
1258 bna_enet_enable(struct bna_enet
*enet
)
1260 if (enet
->fsm
!= (bfa_sm_t
)bna_enet_sm_stopped
)
1263 enet
->flags
|= BNA_ENET_F_ENABLED
;
1265 if (enet
->flags
& BNA_ENET_F_IOCETH_READY
)
1266 bfa_fsm_send_event(enet
, ENET_E_START
);
1270 bna_enet_disable(struct bna_enet
*enet
, enum bna_cleanup_type type
,
1271 void (*cbfn
)(void *))
1273 if (type
== BNA_SOFT_CLEANUP
) {
1274 (*cbfn
)(enet
->bna
->bnad
);
1278 enet
->stop_cbfn
= cbfn
;
1279 enet
->stop_cbarg
= enet
->bna
->bnad
;
1281 enet
->flags
&= ~BNA_ENET_F_ENABLED
;
1283 bfa_fsm_send_event(enet
, ENET_E_STOP
);
1287 bna_enet_pause_config(struct bna_enet
*enet
,
1288 struct bna_pause_config
*pause_config
)
1290 enet
->pause_config
= *pause_config
;
1292 bfa_fsm_send_event(enet
, ENET_E_PAUSE_CFG
);
1296 bna_enet_mtu_set(struct bna_enet
*enet
, int mtu
,
1297 void (*cbfn
)(struct bnad
*))
1301 enet
->mtu_cbfn
= cbfn
;
1303 bfa_fsm_send_event(enet
, ENET_E_MTU_CFG
);
1307 bna_enet_perm_mac_get(struct bna_enet
*enet
, u8
*mac
)
1309 bfa_nw_ioc_get_mac(&enet
->bna
->ioceth
.ioc
, mac
);
1314 #define enable_mbox_intr(_ioceth) \
1317 bna_intr_status_get((_ioceth)->bna, intr_status); \
1318 bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
1319 bna_mbox_intr_enable((_ioceth)->bna); \
1322 #define disable_mbox_intr(_ioceth) \
1324 bna_mbox_intr_disable((_ioceth)->bna); \
1325 bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
1328 #define call_ioceth_stop_cbfn(_ioceth) \
1330 if ((_ioceth)->stop_cbfn) { \
1331 void (*cbfn)(struct bnad *); \
1332 struct bnad *cbarg; \
1333 cbfn = (_ioceth)->stop_cbfn; \
1334 cbarg = (_ioceth)->stop_cbarg; \
1335 (_ioceth)->stop_cbfn = NULL; \
1336 (_ioceth)->stop_cbarg = NULL; \
1341 #define bna_stats_mod_uninit(_stats_mod) \
1345 #define bna_stats_mod_start(_stats_mod) \
1347 (_stats_mod)->ioc_ready = true; \
1350 #define bna_stats_mod_stop(_stats_mod) \
1352 (_stats_mod)->ioc_ready = false; \
1355 #define bna_stats_mod_fail(_stats_mod) \
1357 (_stats_mod)->ioc_ready = false; \
1358 (_stats_mod)->stats_get_busy = false; \
1359 (_stats_mod)->stats_clr_busy = false; \
1362 static void bna_bfi_attr_get(struct bna_ioceth
*ioceth
);
1364 bfa_fsm_state_decl(bna_ioceth
, stopped
, struct bna_ioceth
,
1365 enum bna_ioceth_event
);
1366 bfa_fsm_state_decl(bna_ioceth
, ioc_ready_wait
, struct bna_ioceth
,
1367 enum bna_ioceth_event
);
1368 bfa_fsm_state_decl(bna_ioceth
, enet_attr_wait
, struct bna_ioceth
,
1369 enum bna_ioceth_event
);
1370 bfa_fsm_state_decl(bna_ioceth
, ready
, struct bna_ioceth
,
1371 enum bna_ioceth_event
);
1372 bfa_fsm_state_decl(bna_ioceth
, last_resp_wait
, struct bna_ioceth
,
1373 enum bna_ioceth_event
);
1374 bfa_fsm_state_decl(bna_ioceth
, enet_stop_wait
, struct bna_ioceth
,
1375 enum bna_ioceth_event
);
1376 bfa_fsm_state_decl(bna_ioceth
, ioc_disable_wait
, struct bna_ioceth
,
1377 enum bna_ioceth_event
);
1378 bfa_fsm_state_decl(bna_ioceth
, failed
, struct bna_ioceth
,
1379 enum bna_ioceth_event
);
1382 bna_ioceth_sm_stopped_entry(struct bna_ioceth
*ioceth
)
1384 call_ioceth_stop_cbfn(ioceth
);
1388 bna_ioceth_sm_stopped(struct bna_ioceth
*ioceth
,
1389 enum bna_ioceth_event event
)
1392 case IOCETH_E_ENABLE
:
1393 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_ready_wait
);
1394 bfa_nw_ioc_enable(&ioceth
->ioc
);
1397 case IOCETH_E_DISABLE
:
1398 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_stopped
);
1401 case IOCETH_E_IOC_RESET
:
1402 enable_mbox_intr(ioceth
);
1405 case IOCETH_E_IOC_FAILED
:
1406 disable_mbox_intr(ioceth
);
1407 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_failed
);
1411 bfa_sm_fault(event
);
1416 bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth
*ioceth
)
1419 * Do not call bfa_nw_ioc_enable() here. It must be called in the
1420 * previous state due to failed -> ioc_ready_wait transition.
1425 bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth
*ioceth
,
1426 enum bna_ioceth_event event
)
1429 case IOCETH_E_DISABLE
:
1430 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1431 bfa_nw_ioc_disable(&ioceth
->ioc
);
1434 case IOCETH_E_IOC_RESET
:
1435 enable_mbox_intr(ioceth
);
1438 case IOCETH_E_IOC_FAILED
:
1439 disable_mbox_intr(ioceth
);
1440 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_failed
);
1443 case IOCETH_E_IOC_READY
:
1444 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_enet_attr_wait
);
1448 bfa_sm_fault(event
);
1453 bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth
*ioceth
)
1455 bna_bfi_attr_get(ioceth
);
1459 bna_ioceth_sm_enet_attr_wait(struct bna_ioceth
*ioceth
,
1460 enum bna_ioceth_event event
)
1463 case IOCETH_E_DISABLE
:
1464 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_last_resp_wait
);
1467 case IOCETH_E_IOC_FAILED
:
1468 disable_mbox_intr(ioceth
);
1469 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_failed
);
1472 case IOCETH_E_ENET_ATTR_RESP
:
1473 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ready
);
1477 bfa_sm_fault(event
);
1482 bna_ioceth_sm_ready_entry(struct bna_ioceth
*ioceth
)
1484 bna_enet_start(&ioceth
->bna
->enet
);
1485 bna_stats_mod_start(&ioceth
->bna
->stats_mod
);
1486 bnad_cb_ioceth_ready(ioceth
->bna
->bnad
);
1490 bna_ioceth_sm_ready(struct bna_ioceth
*ioceth
, enum bna_ioceth_event event
)
1493 case IOCETH_E_DISABLE
:
1494 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_enet_stop_wait
);
1497 case IOCETH_E_IOC_FAILED
:
1498 disable_mbox_intr(ioceth
);
1499 bna_enet_fail(&ioceth
->bna
->enet
);
1500 bna_stats_mod_fail(&ioceth
->bna
->stats_mod
);
1501 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_failed
);
1505 bfa_sm_fault(event
);
1510 bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth
*ioceth
)
1515 bna_ioceth_sm_last_resp_wait(struct bna_ioceth
*ioceth
,
1516 enum bna_ioceth_event event
)
1519 case IOCETH_E_IOC_FAILED
:
1520 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1521 disable_mbox_intr(ioceth
);
1522 bfa_nw_ioc_disable(&ioceth
->ioc
);
1525 case IOCETH_E_ENET_ATTR_RESP
:
1526 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1527 bfa_nw_ioc_disable(&ioceth
->ioc
);
1531 bfa_sm_fault(event
);
1536 bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth
*ioceth
)
1538 bna_stats_mod_stop(&ioceth
->bna
->stats_mod
);
1539 bna_enet_stop(&ioceth
->bna
->enet
);
1543 bna_ioceth_sm_enet_stop_wait(struct bna_ioceth
*ioceth
,
1544 enum bna_ioceth_event event
)
1547 case IOCETH_E_IOC_FAILED
:
1548 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1549 disable_mbox_intr(ioceth
);
1550 bna_enet_fail(&ioceth
->bna
->enet
);
1551 bna_stats_mod_fail(&ioceth
->bna
->stats_mod
);
1552 bfa_nw_ioc_disable(&ioceth
->ioc
);
1555 case IOCETH_E_ENET_STOPPED
:
1556 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1557 bfa_nw_ioc_disable(&ioceth
->ioc
);
1561 bfa_sm_fault(event
);
1566 bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth
*ioceth
)
1571 bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth
*ioceth
,
1572 enum bna_ioceth_event event
)
1575 case IOCETH_E_IOC_DISABLED
:
1576 disable_mbox_intr(ioceth
);
1577 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_stopped
);
1580 case IOCETH_E_ENET_STOPPED
:
1581 /* This event is received due to enet failing */
1586 bfa_sm_fault(event
);
1591 bna_ioceth_sm_failed_entry(struct bna_ioceth
*ioceth
)
1593 bnad_cb_ioceth_failed(ioceth
->bna
->bnad
);
1597 bna_ioceth_sm_failed(struct bna_ioceth
*ioceth
,
1598 enum bna_ioceth_event event
)
1601 case IOCETH_E_DISABLE
:
1602 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1603 bfa_nw_ioc_disable(&ioceth
->ioc
);
1606 case IOCETH_E_IOC_RESET
:
1607 enable_mbox_intr(ioceth
);
1608 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_ready_wait
);
1611 case IOCETH_E_IOC_FAILED
:
1615 bfa_sm_fault(event
);
1620 bna_bfi_attr_get(struct bna_ioceth
*ioceth
)
1622 struct bfi_enet_attr_req
*attr_req
= &ioceth
->attr_req
;
1624 bfi_msgq_mhdr_set(attr_req
->mh
, BFI_MC_ENET
,
1625 BFI_ENET_H2I_GET_ATTR_REQ
, 0, 0);
1626 attr_req
->mh
.num_entries
= htons(
1627 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req
)));
1628 bfa_msgq_cmd_set(&ioceth
->msgq_cmd
, NULL
, NULL
,
1629 sizeof(struct bfi_enet_attr_req
), &attr_req
->mh
);
1630 bfa_msgq_cmd_post(&ioceth
->bna
->msgq
, &ioceth
->msgq_cmd
);
1633 /* IOC callback functions */
1636 bna_cb_ioceth_enable(void *arg
, enum bfa_status error
)
1638 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1641 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_FAILED
);
1643 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_READY
);
1647 bna_cb_ioceth_disable(void *arg
)
1649 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1651 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_DISABLED
);
1655 bna_cb_ioceth_hbfail(void *arg
)
1657 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1659 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_FAILED
);
1663 bna_cb_ioceth_reset(void *arg
)
1665 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1667 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_RESET
);
1670 static struct bfa_ioc_cbfn bna_ioceth_cbfn
= {
1671 .enable_cbfn
= bna_cb_ioceth_enable
,
1672 .disable_cbfn
= bna_cb_ioceth_disable
,
1673 .hbfail_cbfn
= bna_cb_ioceth_hbfail
,
1674 .reset_cbfn
= bna_cb_ioceth_reset
1677 static void bna_attr_init(struct bna_ioceth
*ioceth
)
1679 ioceth
->attr
.num_txq
= BFI_ENET_DEF_TXQ
;
1680 ioceth
->attr
.num_rxp
= BFI_ENET_DEF_RXP
;
1681 ioceth
->attr
.num_ucmac
= BFI_ENET_DEF_UCAM
;
1682 ioceth
->attr
.num_mcmac
= BFI_ENET_MAX_MCAM
;
1683 ioceth
->attr
.max_rit_size
= BFI_ENET_DEF_RITSZ
;
1684 ioceth
->attr
.fw_query_complete
= false;
1688 bna_ioceth_init(struct bna_ioceth
*ioceth
, struct bna
*bna
,
1689 struct bna_res_info
*res_info
)
1697 * Attach IOC and claim:
1698 * 1. DMA memory for IOC attributes
1699 * 2. Kernel memory for FW trace
1701 bfa_nw_ioc_attach(&ioceth
->ioc
, ioceth
, &bna_ioceth_cbfn
);
1702 bfa_nw_ioc_pci_init(&ioceth
->ioc
, &bna
->pcidev
, BFI_PCIFN_CLASS_ETH
);
1705 &res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1706 kva
= res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].kva
;
1707 bfa_nw_ioc_mem_claim(&ioceth
->ioc
, kva
, dma
);
1709 kva
= res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mdl
[0].kva
;
1710 bfa_nw_ioc_debug_memclaim(&ioceth
->ioc
, kva
);
1713 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1717 &res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1718 kva
= res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].kva
;
1719 bfa_nw_cee_attach(&bna
->cee
, &ioceth
->ioc
, bna
);
1720 bfa_nw_cee_mem_claim(&bna
->cee
, kva
, dma
);
1721 kva
+= bfa_nw_cee_meminfo();
1722 dma
+= bfa_nw_cee_meminfo();
1724 bfa_nw_flash_attach(&bna
->flash
, &ioceth
->ioc
, bna
);
1725 bfa_nw_flash_memclaim(&bna
->flash
, kva
, dma
);
1726 kva
+= bfa_nw_flash_meminfo();
1727 dma
+= bfa_nw_flash_meminfo();
1729 bfa_msgq_attach(&bna
->msgq
, &ioceth
->ioc
);
1730 bfa_msgq_memclaim(&bna
->msgq
, kva
, dma
);
1731 bfa_msgq_regisr(&bna
->msgq
, BFI_MC_ENET
, bna_msgq_rsp_handler
, bna
);
1732 kva
+= bfa_msgq_meminfo();
1733 dma
+= bfa_msgq_meminfo();
1735 ioceth
->stop_cbfn
= NULL
;
1736 ioceth
->stop_cbarg
= NULL
;
1738 bna_attr_init(ioceth
);
1740 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_stopped
);
1744 bna_ioceth_uninit(struct bna_ioceth
*ioceth
)
1746 bfa_nw_ioc_detach(&ioceth
->ioc
);
1752 bna_ioceth_enable(struct bna_ioceth
*ioceth
)
1754 if (ioceth
->fsm
== (bfa_fsm_t
)bna_ioceth_sm_ready
) {
1755 bnad_cb_ioceth_ready(ioceth
->bna
->bnad
);
1759 if (ioceth
->fsm
== (bfa_fsm_t
)bna_ioceth_sm_stopped
)
1760 bfa_fsm_send_event(ioceth
, IOCETH_E_ENABLE
);
1764 bna_ioceth_disable(struct bna_ioceth
*ioceth
, enum bna_cleanup_type type
)
1766 if (type
== BNA_SOFT_CLEANUP
) {
1767 bnad_cb_ioceth_disabled(ioceth
->bna
->bnad
);
1771 ioceth
->stop_cbfn
= bnad_cb_ioceth_disabled
;
1772 ioceth
->stop_cbarg
= ioceth
->bna
->bnad
;
1774 bfa_fsm_send_event(ioceth
, IOCETH_E_DISABLE
);
1778 bna_ucam_mod_init(struct bna_ucam_mod
*ucam_mod
, struct bna
*bna
,
1779 struct bna_res_info
*res_info
)
1783 ucam_mod
->ucmac
= (struct bna_mac
*)
1784 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
1786 INIT_LIST_HEAD(&ucam_mod
->free_q
);
1787 for (i
= 0; i
< bna
->ioceth
.attr
.num_ucmac
; i
++)
1788 list_add_tail(&ucam_mod
->ucmac
[i
].qe
, &ucam_mod
->free_q
);
1790 /* A separate queue to allow synchronous setting of a list of MACs */
1791 INIT_LIST_HEAD(&ucam_mod
->del_q
);
1792 for (; i
< (bna
->ioceth
.attr
.num_ucmac
* 2); i
++)
1793 list_add_tail(&ucam_mod
->ucmac
[i
].qe
, &ucam_mod
->del_q
);
1795 ucam_mod
->bna
= bna
;
1799 bna_ucam_mod_uninit(struct bna_ucam_mod
*ucam_mod
)
1801 ucam_mod
->bna
= NULL
;
1805 bna_mcam_mod_init(struct bna_mcam_mod
*mcam_mod
, struct bna
*bna
,
1806 struct bna_res_info
*res_info
)
1810 mcam_mod
->mcmac
= (struct bna_mac
*)
1811 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
1813 INIT_LIST_HEAD(&mcam_mod
->free_q
);
1814 for (i
= 0; i
< bna
->ioceth
.attr
.num_mcmac
; i
++)
1815 list_add_tail(&mcam_mod
->mcmac
[i
].qe
, &mcam_mod
->free_q
);
1817 mcam_mod
->mchandle
= (struct bna_mcam_handle
*)
1818 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
1820 INIT_LIST_HEAD(&mcam_mod
->free_handle_q
);
1821 for (i
= 0; i
< bna
->ioceth
.attr
.num_mcmac
; i
++)
1822 list_add_tail(&mcam_mod
->mchandle
[i
].qe
,
1823 &mcam_mod
->free_handle_q
);
1825 /* A separate queue to allow synchronous setting of a list of MACs */
1826 INIT_LIST_HEAD(&mcam_mod
->del_q
);
1827 for (; i
< (bna
->ioceth
.attr
.num_mcmac
* 2); i
++)
1828 list_add_tail(&mcam_mod
->mcmac
[i
].qe
, &mcam_mod
->del_q
);
1830 mcam_mod
->bna
= bna
;
1834 bna_mcam_mod_uninit(struct bna_mcam_mod
*mcam_mod
)
1836 mcam_mod
->bna
= NULL
;
1840 bna_bfi_stats_get(struct bna
*bna
)
1842 struct bfi_enet_stats_req
*stats_req
= &bna
->stats_mod
.stats_get
;
1844 bna
->stats_mod
.stats_get_busy
= true;
1846 bfi_msgq_mhdr_set(stats_req
->mh
, BFI_MC_ENET
,
1847 BFI_ENET_H2I_STATS_GET_REQ
, 0, 0);
1848 stats_req
->mh
.num_entries
= htons(
1849 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req
)));
1850 stats_req
->stats_mask
= htons(BFI_ENET_STATS_ALL
);
1851 stats_req
->tx_enet_mask
= htonl(bna
->tx_mod
.rid_mask
);
1852 stats_req
->rx_enet_mask
= htonl(bna
->rx_mod
.rid_mask
);
1853 stats_req
->host_buffer
.a32
.addr_hi
= bna
->stats
.hw_stats_dma
.msb
;
1854 stats_req
->host_buffer
.a32
.addr_lo
= bna
->stats
.hw_stats_dma
.lsb
;
1856 bfa_msgq_cmd_set(&bna
->stats_mod
.stats_get_cmd
, NULL
, NULL
,
1857 sizeof(struct bfi_enet_stats_req
), &stats_req
->mh
);
1858 bfa_msgq_cmd_post(&bna
->msgq
, &bna
->stats_mod
.stats_get_cmd
);
1862 bna_res_req(struct bna_res_info
*res_info
)
1864 /* DMA memory for COMMON_MODULE */
1865 res_info
[BNA_RES_MEM_T_COM
].res_type
= BNA_RES_T_MEM
;
1866 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1867 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.num
= 1;
1868 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.len
= ALIGN(
1869 (bfa_nw_cee_meminfo() +
1870 bfa_nw_flash_meminfo() +
1871 bfa_msgq_meminfo()), PAGE_SIZE
);
1873 /* DMA memory for retrieving IOC attributes */
1874 res_info
[BNA_RES_MEM_T_ATTR
].res_type
= BNA_RES_T_MEM
;
1875 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1876 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.num
= 1;
1877 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.len
=
1878 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE
);
1880 /* Virtual memory for retreiving fw_trc */
1881 res_info
[BNA_RES_MEM_T_FWTRC
].res_type
= BNA_RES_T_MEM
;
1882 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mem_type
= BNA_MEM_T_KVA
;
1883 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.num
= 1;
1884 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.len
= BNA_DBG_FWTRC_LEN
;
1886 /* DMA memory for retreiving stats */
1887 res_info
[BNA_RES_MEM_T_STATS
].res_type
= BNA_RES_T_MEM
;
1888 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1889 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.num
= 1;
1890 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.len
=
1891 ALIGN(sizeof(struct bfi_enet_stats
),
1896 bna_mod_res_req(struct bna
*bna
, struct bna_res_info
*res_info
)
1898 struct bna_attr
*attr
= &bna
->ioceth
.attr
;
1900 /* Virtual memory for Tx objects - stored by Tx module */
1901 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_type
= BNA_RES_T_MEM
;
1902 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.mem_type
=
1904 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.num
= 1;
1905 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.len
=
1906 attr
->num_txq
* sizeof(struct bna_tx
);
1908 /* Virtual memory for TxQ - stored by Tx module */
1909 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
1910 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.mem_type
=
1912 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.num
= 1;
1913 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.len
=
1914 attr
->num_txq
* sizeof(struct bna_txq
);
1916 /* Virtual memory for Rx objects - stored by Rx module */
1917 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_type
= BNA_RES_T_MEM
;
1918 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.mem_type
=
1920 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.num
= 1;
1921 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.len
=
1922 attr
->num_rxp
* sizeof(struct bna_rx
);
1924 /* Virtual memory for RxPath - stored by Rx module */
1925 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_type
= BNA_RES_T_MEM
;
1926 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.mem_type
=
1928 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.num
= 1;
1929 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.len
=
1930 attr
->num_rxp
* sizeof(struct bna_rxp
);
1932 /* Virtual memory for RxQ - stored by Rx module */
1933 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
1934 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.mem_type
=
1936 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.num
= 1;
1937 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.len
=
1938 (attr
->num_rxp
* 2) * sizeof(struct bna_rxq
);
1940 /* Virtual memory for Unicast MAC address - stored by ucam module */
1941 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
1942 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
1944 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
1945 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.len
=
1946 (attr
->num_ucmac
* 2) * sizeof(struct bna_mac
);
1948 /* Virtual memory for Multicast MAC address - stored by mcam module */
1949 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
1950 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
1952 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
1953 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.len
=
1954 (attr
->num_mcmac
* 2) * sizeof(struct bna_mac
);
1956 /* Virtual memory for Multicast handle - stored by mcam module */
1957 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_type
= BNA_RES_T_MEM
;
1958 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_u
.mem_info
.mem_type
=
1960 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_u
.mem_info
.num
= 1;
1961 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_u
.mem_info
.len
=
1962 attr
->num_mcmac
* sizeof(struct bna_mcam_handle
);
1966 bna_init(struct bna
*bna
, struct bnad
*bnad
,
1967 struct bfa_pcidev
*pcidev
, struct bna_res_info
*res_info
)
1970 bna
->pcidev
= *pcidev
;
1972 bna
->stats
.hw_stats_kva
= (struct bfi_enet_stats
*)
1973 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].kva
;
1974 bna
->stats
.hw_stats_dma
.msb
=
1975 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.msb
;
1976 bna
->stats
.hw_stats_dma
.lsb
=
1977 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.lsb
;
1979 bna_reg_addr_init(bna
, &bna
->pcidev
);
1981 /* Also initializes diag, cee, sfp, phy_port, msgq */
1982 bna_ioceth_init(&bna
->ioceth
, bna
, res_info
);
1984 bna_enet_init(&bna
->enet
, bna
);
1985 bna_ethport_init(&bna
->ethport
, bna
);
1989 bna_mod_init(struct bna
*bna
, struct bna_res_info
*res_info
)
1991 bna_tx_mod_init(&bna
->tx_mod
, bna
, res_info
);
1993 bna_rx_mod_init(&bna
->rx_mod
, bna
, res_info
);
1995 bna_ucam_mod_init(&bna
->ucam_mod
, bna
, res_info
);
1997 bna_mcam_mod_init(&bna
->mcam_mod
, bna
, res_info
);
1999 bna
->default_mode_rid
= BFI_INVALID_RID
;
2000 bna
->promisc_rid
= BFI_INVALID_RID
;
2002 bna
->mod_flags
|= BNA_MOD_F_INIT_DONE
;
2006 bna_uninit(struct bna
*bna
)
2008 if (bna
->mod_flags
& BNA_MOD_F_INIT_DONE
) {
2009 bna_mcam_mod_uninit(&bna
->mcam_mod
);
2010 bna_ucam_mod_uninit(&bna
->ucam_mod
);
2011 bna_rx_mod_uninit(&bna
->rx_mod
);
2012 bna_tx_mod_uninit(&bna
->tx_mod
);
2013 bna
->mod_flags
&= ~BNA_MOD_F_INIT_DONE
;
2016 bna_stats_mod_uninit(&bna
->stats_mod
);
2017 bna_ethport_uninit(&bna
->ethport
);
2018 bna_enet_uninit(&bna
->enet
);
2020 bna_ioceth_uninit(&bna
->ioceth
);
2026 bna_num_txq_set(struct bna
*bna
, int num_txq
)
2028 if (bna
->ioceth
.attr
.fw_query_complete
&&
2029 (num_txq
<= bna
->ioceth
.attr
.num_txq
)) {
2030 bna
->ioceth
.attr
.num_txq
= num_txq
;
2031 return BNA_CB_SUCCESS
;
2038 bna_num_rxp_set(struct bna
*bna
, int num_rxp
)
2040 if (bna
->ioceth
.attr
.fw_query_complete
&&
2041 (num_rxp
<= bna
->ioceth
.attr
.num_rxp
)) {
2042 bna
->ioceth
.attr
.num_rxp
= num_rxp
;
2043 return BNA_CB_SUCCESS
;
2050 bna_cam_mod_mac_get(struct list_head
*head
)
2052 struct bna_mac
*mac
;
2054 mac
= list_first_entry_or_null(head
, struct bna_mac
, qe
);
2061 struct bna_mcam_handle
*
2062 bna_mcam_mod_handle_get(struct bna_mcam_mod
*mcam_mod
)
2064 struct bna_mcam_handle
*handle
;
2066 handle
= list_first_entry_or_null(&mcam_mod
->free_handle_q
,
2067 struct bna_mcam_handle
, qe
);
2069 list_del(&handle
->qe
);
2075 bna_mcam_mod_handle_put(struct bna_mcam_mod
*mcam_mod
,
2076 struct bna_mcam_handle
*handle
)
2078 list_add_tail(&handle
->qe
, &mcam_mod
->free_handle_q
);
2082 bna_hw_stats_get(struct bna
*bna
)
2084 if (!bna
->stats_mod
.ioc_ready
) {
2085 bnad_cb_stats_get(bna
->bnad
, BNA_CB_FAIL
, &bna
->stats
);
2088 if (bna
->stats_mod
.stats_get_busy
) {
2089 bnad_cb_stats_get(bna
->bnad
, BNA_CB_BUSY
, &bna
->stats
);
2093 bna_bfi_stats_get(bna
);