2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
21 ethport_can_be_up(struct bna_ethport
*ethport
)
24 if (ethport
->bna
->enet
.type
== BNA_ENET_T_REGULAR
)
25 ready
= ((ethport
->flags
& BNA_ETHPORT_F_ADMIN_UP
) &&
26 (ethport
->flags
& BNA_ETHPORT_F_RX_STARTED
) &&
27 (ethport
->flags
& BNA_ETHPORT_F_PORT_ENABLED
));
29 ready
= ((ethport
->flags
& BNA_ETHPORT_F_ADMIN_UP
) &&
30 (ethport
->flags
& BNA_ETHPORT_F_RX_STARTED
) &&
31 !(ethport
->flags
& BNA_ETHPORT_F_PORT_ENABLED
));
35 #define ethport_is_up ethport_can_be_up
37 enum bna_ethport_event
{
43 ETHPORT_E_FWRESP_UP_OK
= 6,
44 ETHPORT_E_FWRESP_DOWN
= 7,
45 ETHPORT_E_FWRESP_UP_FAIL
= 8,
54 ENET_E_FWRESP_PAUSE
= 6,
55 ENET_E_CHLD_STOPPED
= 7,
58 enum bna_ioceth_event
{
61 IOCETH_E_IOC_RESET
= 3,
62 IOCETH_E_IOC_FAILED
= 4,
63 IOCETH_E_IOC_READY
= 5,
64 IOCETH_E_ENET_ATTR_RESP
= 6,
65 IOCETH_E_ENET_STOPPED
= 7,
66 IOCETH_E_IOC_DISABLED
= 8,
69 #define bna_stats_copy(_name, _type) \
71 count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
72 stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
73 stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
74 for (i = 0; i < count; i++) \
75 stats_dst[i] = be64_to_cpu(stats_src[i]); \
79 * FW response handlers
83 bna_bfi_ethport_enable_aen(struct bna_ethport
*ethport
,
84 struct bfi_msgq_mhdr
*msghdr
)
86 ethport
->flags
|= BNA_ETHPORT_F_PORT_ENABLED
;
88 if (ethport_can_be_up(ethport
))
89 bfa_fsm_send_event(ethport
, ETHPORT_E_UP
);
93 bna_bfi_ethport_disable_aen(struct bna_ethport
*ethport
,
94 struct bfi_msgq_mhdr
*msghdr
)
96 int ethport_up
= ethport_is_up(ethport
);
98 ethport
->flags
&= ~BNA_ETHPORT_F_PORT_ENABLED
;
101 bfa_fsm_send_event(ethport
, ETHPORT_E_DOWN
);
105 bna_bfi_ethport_admin_rsp(struct bna_ethport
*ethport
,
106 struct bfi_msgq_mhdr
*msghdr
)
108 struct bfi_enet_enable_req
*admin_req
=
109 ðport
->bfi_enet_cmd
.admin_req
;
110 struct bfi_enet_rsp
*rsp
= (struct bfi_enet_rsp
*)msghdr
;
112 switch (admin_req
->enable
) {
113 case BNA_STATUS_T_ENABLED
:
114 if (rsp
->error
== BFI_ENET_CMD_OK
)
115 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_UP_OK
);
117 ethport
->flags
&= ~BNA_ETHPORT_F_PORT_ENABLED
;
118 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_UP_FAIL
);
122 case BNA_STATUS_T_DISABLED
:
123 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_DOWN
);
124 ethport
->link_status
= BNA_LINK_DOWN
;
125 ethport
->link_cbfn(ethport
->bna
->bnad
, BNA_LINK_DOWN
);
131 bna_bfi_ethport_lpbk_rsp(struct bna_ethport
*ethport
,
132 struct bfi_msgq_mhdr
*msghdr
)
134 struct bfi_enet_diag_lb_req
*diag_lb_req
=
135 ðport
->bfi_enet_cmd
.lpbk_req
;
136 struct bfi_enet_rsp
*rsp
= (struct bfi_enet_rsp
*)msghdr
;
138 switch (diag_lb_req
->enable
) {
139 case BNA_STATUS_T_ENABLED
:
140 if (rsp
->error
== BFI_ENET_CMD_OK
)
141 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_UP_OK
);
143 ethport
->flags
&= ~BNA_ETHPORT_F_ADMIN_UP
;
144 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_UP_FAIL
);
148 case BNA_STATUS_T_DISABLED
:
149 bfa_fsm_send_event(ethport
, ETHPORT_E_FWRESP_DOWN
);
155 bna_bfi_pause_set_rsp(struct bna_enet
*enet
, struct bfi_msgq_mhdr
*msghdr
)
157 bfa_fsm_send_event(enet
, ENET_E_FWRESP_PAUSE
);
161 bna_bfi_attr_get_rsp(struct bna_ioceth
*ioceth
,
162 struct bfi_msgq_mhdr
*msghdr
)
164 struct bfi_enet_attr_rsp
*rsp
= (struct bfi_enet_attr_rsp
*)msghdr
;
167 * Store only if not set earlier, since BNAD can override the HW
170 if (!ioceth
->attr
.fw_query_complete
) {
171 ioceth
->attr
.num_txq
= ntohl(rsp
->max_cfg
);
172 ioceth
->attr
.num_rxp
= ntohl(rsp
->max_cfg
);
173 ioceth
->attr
.num_ucmac
= ntohl(rsp
->max_ucmac
);
174 ioceth
->attr
.num_mcmac
= BFI_ENET_MAX_MCAM
;
175 ioceth
->attr
.max_rit_size
= ntohl(rsp
->rit_size
);
176 ioceth
->attr
.fw_query_complete
= true;
179 bfa_fsm_send_event(ioceth
, IOCETH_E_ENET_ATTR_RESP
);
183 bna_bfi_stats_get_rsp(struct bna
*bna
, struct bfi_msgq_mhdr
*msghdr
)
185 struct bfi_enet_stats_req
*stats_req
= &bna
->stats_mod
.stats_get
;
188 u32 tx_enet_mask
= ntohl(stats_req
->tx_enet_mask
);
189 u32 rx_enet_mask
= ntohl(stats_req
->rx_enet_mask
);
193 bna_stats_copy(mac
, mac
);
194 bna_stats_copy(bpc
, bpc
);
195 bna_stats_copy(rad
, rad
);
196 bna_stats_copy(rlb
, rad
);
197 bna_stats_copy(fc_rx
, fc_rx
);
198 bna_stats_copy(fc_tx
, fc_tx
);
200 stats_src
= (u64
*)&(bna
->stats
.hw_stats_kva
->rxf_stats
[0]);
202 /* Copy Rxf stats to SW area, scatter them while copying */
203 for (i
= 0; i
< BFI_ENET_CFG_MAX
; i
++) {
204 stats_dst
= (u64
*)&(bna
->stats
.hw_stats
.rxf_stats
[i
]);
205 memset(stats_dst
, 0, sizeof(struct bfi_enet_stats_rxf
));
206 if (rx_enet_mask
& ((u32
)(1 << i
))) {
208 count
= sizeof(struct bfi_enet_stats_rxf
) /
210 for (k
= 0; k
< count
; k
++) {
211 stats_dst
[k
] = be64_to_cpu(*stats_src
);
217 /* Copy Txf stats to SW area, scatter them while copying */
218 for (i
= 0; i
< BFI_ENET_CFG_MAX
; i
++) {
219 stats_dst
= (u64
*)&(bna
->stats
.hw_stats
.txf_stats
[i
]);
220 memset(stats_dst
, 0, sizeof(struct bfi_enet_stats_txf
));
221 if (tx_enet_mask
& ((u32
)(1 << i
))) {
223 count
= sizeof(struct bfi_enet_stats_txf
) /
225 for (k
= 0; k
< count
; k
++) {
226 stats_dst
[k
] = be64_to_cpu(*stats_src
);
232 bna
->stats_mod
.stats_get_busy
= false;
233 bnad_cb_stats_get(bna
->bnad
, BNA_CB_SUCCESS
, &bna
->stats
);
237 bna_bfi_ethport_linkup_aen(struct bna_ethport
*ethport
,
238 struct bfi_msgq_mhdr
*msghdr
)
240 ethport
->link_status
= BNA_LINK_UP
;
242 /* Dispatch events */
243 ethport
->link_cbfn(ethport
->bna
->bnad
, ethport
->link_status
);
247 bna_bfi_ethport_linkdown_aen(struct bna_ethport
*ethport
,
248 struct bfi_msgq_mhdr
*msghdr
)
250 ethport
->link_status
= BNA_LINK_DOWN
;
252 /* Dispatch events */
253 ethport
->link_cbfn(ethport
->bna
->bnad
, BNA_LINK_DOWN
);
257 bna_err_handler(struct bna
*bna
, u32 intr_status
)
259 if (BNA_IS_HALT_INTR(bna
, intr_status
))
262 bfa_nw_ioc_error_isr(&bna
->ioceth
.ioc
);
266 bna_mbox_handler(struct bna
*bna
, u32 intr_status
)
268 if (BNA_IS_ERR_INTR(bna
, intr_status
)) {
269 bna_err_handler(bna
, intr_status
);
272 if (BNA_IS_MBOX_INTR(bna
, intr_status
))
273 bfa_nw_ioc_mbox_isr(&bna
->ioceth
.ioc
);
277 bna_msgq_rsp_handler(void *arg
, struct bfi_msgq_mhdr
*msghdr
)
279 struct bna
*bna
= (struct bna
*)arg
;
283 switch (msghdr
->msg_id
) {
284 case BFI_ENET_I2H_RX_CFG_SET_RSP
:
285 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
287 bna_bfi_rx_enet_start_rsp(rx
, msghdr
);
290 case BFI_ENET_I2H_RX_CFG_CLR_RSP
:
291 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
293 bna_bfi_rx_enet_stop_rsp(rx
, msghdr
);
296 case BFI_ENET_I2H_RIT_CFG_RSP
:
297 case BFI_ENET_I2H_RSS_CFG_RSP
:
298 case BFI_ENET_I2H_RSS_ENABLE_RSP
:
299 case BFI_ENET_I2H_RX_PROMISCUOUS_RSP
:
300 case BFI_ENET_I2H_RX_DEFAULT_RSP
:
301 case BFI_ENET_I2H_MAC_UCAST_SET_RSP
:
302 case BFI_ENET_I2H_MAC_UCAST_CLR_RSP
:
303 case BFI_ENET_I2H_MAC_UCAST_ADD_RSP
:
304 case BFI_ENET_I2H_MAC_UCAST_DEL_RSP
:
305 case BFI_ENET_I2H_MAC_MCAST_DEL_RSP
:
306 case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP
:
307 case BFI_ENET_I2H_RX_VLAN_SET_RSP
:
308 case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP
:
309 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
311 bna_bfi_rxf_cfg_rsp(&rx
->rxf
, msghdr
);
314 case BFI_ENET_I2H_MAC_MCAST_ADD_RSP
:
315 bna_rx_from_rid(bna
, msghdr
->enet_id
, rx
);
317 bna_bfi_rxf_mcast_add_rsp(&rx
->rxf
, msghdr
);
320 case BFI_ENET_I2H_TX_CFG_SET_RSP
:
321 bna_tx_from_rid(bna
, msghdr
->enet_id
, tx
);
323 bna_bfi_tx_enet_start_rsp(tx
, msghdr
);
326 case BFI_ENET_I2H_TX_CFG_CLR_RSP
:
327 bna_tx_from_rid(bna
, msghdr
->enet_id
, tx
);
329 bna_bfi_tx_enet_stop_rsp(tx
, msghdr
);
332 case BFI_ENET_I2H_PORT_ADMIN_RSP
:
333 bna_bfi_ethport_admin_rsp(&bna
->ethport
, msghdr
);
336 case BFI_ENET_I2H_DIAG_LOOPBACK_RSP
:
337 bna_bfi_ethport_lpbk_rsp(&bna
->ethport
, msghdr
);
340 case BFI_ENET_I2H_SET_PAUSE_RSP
:
341 bna_bfi_pause_set_rsp(&bna
->enet
, msghdr
);
344 case BFI_ENET_I2H_GET_ATTR_RSP
:
345 bna_bfi_attr_get_rsp(&bna
->ioceth
, msghdr
);
348 case BFI_ENET_I2H_STATS_GET_RSP
:
349 bna_bfi_stats_get_rsp(bna
, msghdr
);
352 case BFI_ENET_I2H_STATS_CLR_RSP
:
356 case BFI_ENET_I2H_LINK_UP_AEN
:
357 bna_bfi_ethport_linkup_aen(&bna
->ethport
, msghdr
);
360 case BFI_ENET_I2H_LINK_DOWN_AEN
:
361 bna_bfi_ethport_linkdown_aen(&bna
->ethport
, msghdr
);
364 case BFI_ENET_I2H_PORT_ENABLE_AEN
:
365 bna_bfi_ethport_enable_aen(&bna
->ethport
, msghdr
);
368 case BFI_ENET_I2H_PORT_DISABLE_AEN
:
369 bna_bfi_ethport_disable_aen(&bna
->ethport
, msghdr
);
372 case BFI_ENET_I2H_BW_UPDATE_AEN
:
373 bna_bfi_bw_update_aen(&bna
->tx_mod
);
384 #define call_ethport_stop_cbfn(_ethport) \
386 if ((_ethport)->stop_cbfn) { \
387 void (*cbfn)(struct bna_enet *); \
388 cbfn = (_ethport)->stop_cbfn; \
389 (_ethport)->stop_cbfn = NULL; \
390 cbfn(&(_ethport)->bna->enet); \
394 #define call_ethport_adminup_cbfn(ethport, status) \
396 if ((ethport)->adminup_cbfn) { \
397 void (*cbfn)(struct bnad *, enum bna_cb_status); \
398 cbfn = (ethport)->adminup_cbfn; \
399 (ethport)->adminup_cbfn = NULL; \
400 cbfn((ethport)->bna->bnad, status); \
405 bna_bfi_ethport_admin_up(struct bna_ethport
*ethport
)
407 struct bfi_enet_enable_req
*admin_up_req
=
408 ðport
->bfi_enet_cmd
.admin_req
;
410 bfi_msgq_mhdr_set(admin_up_req
->mh
, BFI_MC_ENET
,
411 BFI_ENET_H2I_PORT_ADMIN_UP_REQ
, 0, 0);
412 admin_up_req
->mh
.num_entries
= htons(
413 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
414 admin_up_req
->enable
= BNA_STATUS_T_ENABLED
;
416 bfa_msgq_cmd_set(ðport
->msgq_cmd
, NULL
, NULL
,
417 sizeof(struct bfi_enet_enable_req
), &admin_up_req
->mh
);
418 bfa_msgq_cmd_post(ðport
->bna
->msgq
, ðport
->msgq_cmd
);
422 bna_bfi_ethport_admin_down(struct bna_ethport
*ethport
)
424 struct bfi_enet_enable_req
*admin_down_req
=
425 ðport
->bfi_enet_cmd
.admin_req
;
427 bfi_msgq_mhdr_set(admin_down_req
->mh
, BFI_MC_ENET
,
428 BFI_ENET_H2I_PORT_ADMIN_UP_REQ
, 0, 0);
429 admin_down_req
->mh
.num_entries
= htons(
430 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
431 admin_down_req
->enable
= BNA_STATUS_T_DISABLED
;
433 bfa_msgq_cmd_set(ðport
->msgq_cmd
, NULL
, NULL
,
434 sizeof(struct bfi_enet_enable_req
), &admin_down_req
->mh
);
435 bfa_msgq_cmd_post(ðport
->bna
->msgq
, ðport
->msgq_cmd
);
439 bna_bfi_ethport_lpbk_up(struct bna_ethport
*ethport
)
441 struct bfi_enet_diag_lb_req
*lpbk_up_req
=
442 ðport
->bfi_enet_cmd
.lpbk_req
;
444 bfi_msgq_mhdr_set(lpbk_up_req
->mh
, BFI_MC_ENET
,
445 BFI_ENET_H2I_DIAG_LOOPBACK_REQ
, 0, 0);
446 lpbk_up_req
->mh
.num_entries
= htons(
447 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req
)));
448 lpbk_up_req
->mode
= (ethport
->bna
->enet
.type
==
449 BNA_ENET_T_LOOPBACK_INTERNAL
) ?
450 BFI_ENET_DIAG_LB_OPMODE_EXT
:
451 BFI_ENET_DIAG_LB_OPMODE_CBL
;
452 lpbk_up_req
->enable
= BNA_STATUS_T_ENABLED
;
454 bfa_msgq_cmd_set(ðport
->msgq_cmd
, NULL
, NULL
,
455 sizeof(struct bfi_enet_diag_lb_req
), &lpbk_up_req
->mh
);
456 bfa_msgq_cmd_post(ðport
->bna
->msgq
, ðport
->msgq_cmd
);
460 bna_bfi_ethport_lpbk_down(struct bna_ethport
*ethport
)
462 struct bfi_enet_diag_lb_req
*lpbk_down_req
=
463 ðport
->bfi_enet_cmd
.lpbk_req
;
465 bfi_msgq_mhdr_set(lpbk_down_req
->mh
, BFI_MC_ENET
,
466 BFI_ENET_H2I_DIAG_LOOPBACK_REQ
, 0, 0);
467 lpbk_down_req
->mh
.num_entries
= htons(
468 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req
)));
469 lpbk_down_req
->enable
= BNA_STATUS_T_DISABLED
;
471 bfa_msgq_cmd_set(ðport
->msgq_cmd
, NULL
, NULL
,
472 sizeof(struct bfi_enet_diag_lb_req
), &lpbk_down_req
->mh
);
473 bfa_msgq_cmd_post(ðport
->bna
->msgq
, ðport
->msgq_cmd
);
477 bna_bfi_ethport_up(struct bna_ethport
*ethport
)
479 if (ethport
->bna
->enet
.type
== BNA_ENET_T_REGULAR
)
480 bna_bfi_ethport_admin_up(ethport
);
482 bna_bfi_ethport_lpbk_up(ethport
);
486 bna_bfi_ethport_down(struct bna_ethport
*ethport
)
488 if (ethport
->bna
->enet
.type
== BNA_ENET_T_REGULAR
)
489 bna_bfi_ethport_admin_down(ethport
);
491 bna_bfi_ethport_lpbk_down(ethport
);
494 bfa_fsm_state_decl(bna_ethport
, stopped
, struct bna_ethport
,
495 enum bna_ethport_event
);
496 bfa_fsm_state_decl(bna_ethport
, down
, struct bna_ethport
,
497 enum bna_ethport_event
);
498 bfa_fsm_state_decl(bna_ethport
, up_resp_wait
, struct bna_ethport
,
499 enum bna_ethport_event
);
500 bfa_fsm_state_decl(bna_ethport
, down_resp_wait
, struct bna_ethport
,
501 enum bna_ethport_event
);
502 bfa_fsm_state_decl(bna_ethport
, up
, struct bna_ethport
,
503 enum bna_ethport_event
);
504 bfa_fsm_state_decl(bna_ethport
, last_resp_wait
, struct bna_ethport
,
505 enum bna_ethport_event
);
508 bna_ethport_sm_stopped_entry(struct bna_ethport
*ethport
)
510 call_ethport_stop_cbfn(ethport
);
514 bna_ethport_sm_stopped(struct bna_ethport
*ethport
,
515 enum bna_ethport_event event
)
518 case ETHPORT_E_START
:
519 bfa_fsm_set_state(ethport
, bna_ethport_sm_down
);
523 call_ethport_stop_cbfn(ethport
);
531 /* This event is received due to Rx objects failing */
541 bna_ethport_sm_down_entry(struct bna_ethport
*ethport
)
546 bna_ethport_sm_down(struct bna_ethport
*ethport
,
547 enum bna_ethport_event event
)
551 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
555 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
559 bfa_fsm_set_state(ethport
, bna_ethport_sm_up_resp_wait
);
560 bna_bfi_ethport_up(ethport
);
569 bna_ethport_sm_up_resp_wait_entry(struct bna_ethport
*ethport
)
574 bna_ethport_sm_up_resp_wait(struct bna_ethport
*ethport
,
575 enum bna_ethport_event event
)
579 bfa_fsm_set_state(ethport
, bna_ethport_sm_last_resp_wait
);
583 call_ethport_adminup_cbfn(ethport
, BNA_CB_FAIL
);
584 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
588 call_ethport_adminup_cbfn(ethport
, BNA_CB_INTERRUPT
);
589 bfa_fsm_set_state(ethport
, bna_ethport_sm_down_resp_wait
);
592 case ETHPORT_E_FWRESP_UP_OK
:
593 call_ethport_adminup_cbfn(ethport
, BNA_CB_SUCCESS
);
594 bfa_fsm_set_state(ethport
, bna_ethport_sm_up
);
597 case ETHPORT_E_FWRESP_UP_FAIL
:
598 call_ethport_adminup_cbfn(ethport
, BNA_CB_FAIL
);
599 bfa_fsm_set_state(ethport
, bna_ethport_sm_down
);
602 case ETHPORT_E_FWRESP_DOWN
:
603 /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
604 bna_bfi_ethport_up(ethport
);
613 bna_ethport_sm_down_resp_wait_entry(struct bna_ethport
*ethport
)
616 * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
617 * mbox due to up_resp_wait -> down_resp_wait transition on event
623 bna_ethport_sm_down_resp_wait(struct bna_ethport
*ethport
,
624 enum bna_ethport_event event
)
628 bfa_fsm_set_state(ethport
, bna_ethport_sm_last_resp_wait
);
632 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
636 bfa_fsm_set_state(ethport
, bna_ethport_sm_up_resp_wait
);
639 case ETHPORT_E_FWRESP_UP_OK
:
640 /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
641 bna_bfi_ethport_down(ethport
);
644 case ETHPORT_E_FWRESP_UP_FAIL
:
645 case ETHPORT_E_FWRESP_DOWN
:
646 bfa_fsm_set_state(ethport
, bna_ethport_sm_down
);
655 bna_ethport_sm_up_entry(struct bna_ethport
*ethport
)
660 bna_ethport_sm_up(struct bna_ethport
*ethport
,
661 enum bna_ethport_event event
)
665 bfa_fsm_set_state(ethport
, bna_ethport_sm_last_resp_wait
);
666 bna_bfi_ethport_down(ethport
);
670 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
674 bfa_fsm_set_state(ethport
, bna_ethport_sm_down_resp_wait
);
675 bna_bfi_ethport_down(ethport
);
684 bna_ethport_sm_last_resp_wait_entry(struct bna_ethport
*ethport
)
689 bna_ethport_sm_last_resp_wait(struct bna_ethport
*ethport
,
690 enum bna_ethport_event event
)
694 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
699 * This event is received due to Rx objects stopping in
700 * parallel to ethport
705 case ETHPORT_E_FWRESP_UP_OK
:
706 /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
707 bna_bfi_ethport_down(ethport
);
710 case ETHPORT_E_FWRESP_UP_FAIL
:
711 case ETHPORT_E_FWRESP_DOWN
:
712 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
721 bna_ethport_init(struct bna_ethport
*ethport
, struct bna
*bna
)
723 ethport
->flags
|= (BNA_ETHPORT_F_ADMIN_UP
| BNA_ETHPORT_F_PORT_ENABLED
);
726 ethport
->link_status
= BNA_LINK_DOWN
;
727 ethport
->link_cbfn
= bnad_cb_ethport_link_status
;
729 ethport
->rx_started_count
= 0;
731 ethport
->stop_cbfn
= NULL
;
732 ethport
->adminup_cbfn
= NULL
;
734 bfa_fsm_set_state(ethport
, bna_ethport_sm_stopped
);
738 bna_ethport_uninit(struct bna_ethport
*ethport
)
740 ethport
->flags
&= ~BNA_ETHPORT_F_ADMIN_UP
;
741 ethport
->flags
&= ~BNA_ETHPORT_F_PORT_ENABLED
;
747 bna_ethport_start(struct bna_ethport
*ethport
)
749 bfa_fsm_send_event(ethport
, ETHPORT_E_START
);
753 bna_enet_cb_ethport_stopped(struct bna_enet
*enet
)
755 bfa_wc_down(&enet
->chld_stop_wc
);
759 bna_ethport_stop(struct bna_ethport
*ethport
)
761 ethport
->stop_cbfn
= bna_enet_cb_ethport_stopped
;
762 bfa_fsm_send_event(ethport
, ETHPORT_E_STOP
);
766 bna_ethport_fail(struct bna_ethport
*ethport
)
768 /* Reset the physical port status to enabled */
769 ethport
->flags
|= BNA_ETHPORT_F_PORT_ENABLED
;
771 if (ethport
->link_status
!= BNA_LINK_DOWN
) {
772 ethport
->link_status
= BNA_LINK_DOWN
;
773 ethport
->link_cbfn(ethport
->bna
->bnad
, BNA_LINK_DOWN
);
775 bfa_fsm_send_event(ethport
, ETHPORT_E_FAIL
);
778 /* Should be called only when ethport is disabled */
780 bna_ethport_cb_rx_started(struct bna_ethport
*ethport
)
782 ethport
->rx_started_count
++;
784 if (ethport
->rx_started_count
== 1) {
785 ethport
->flags
|= BNA_ETHPORT_F_RX_STARTED
;
787 if (ethport_can_be_up(ethport
))
788 bfa_fsm_send_event(ethport
, ETHPORT_E_UP
);
793 bna_ethport_cb_rx_stopped(struct bna_ethport
*ethport
)
795 int ethport_up
= ethport_is_up(ethport
);
797 ethport
->rx_started_count
--;
799 if (ethport
->rx_started_count
== 0) {
800 ethport
->flags
&= ~BNA_ETHPORT_F_RX_STARTED
;
803 bfa_fsm_send_event(ethport
, ETHPORT_E_DOWN
);
810 #define bna_enet_chld_start(enet) \
812 enum bna_tx_type tx_type = \
813 ((enet)->type == BNA_ENET_T_REGULAR) ? \
814 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
815 enum bna_rx_type rx_type = \
816 ((enet)->type == BNA_ENET_T_REGULAR) ? \
817 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
818 bna_ethport_start(&(enet)->bna->ethport); \
819 bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
820 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
823 #define bna_enet_chld_stop(enet) \
825 enum bna_tx_type tx_type = \
826 ((enet)->type == BNA_ENET_T_REGULAR) ? \
827 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
828 enum bna_rx_type rx_type = \
829 ((enet)->type == BNA_ENET_T_REGULAR) ? \
830 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
831 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
832 bfa_wc_up(&(enet)->chld_stop_wc); \
833 bna_ethport_stop(&(enet)->bna->ethport); \
834 bfa_wc_up(&(enet)->chld_stop_wc); \
835 bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
836 bfa_wc_up(&(enet)->chld_stop_wc); \
837 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
838 bfa_wc_wait(&(enet)->chld_stop_wc); \
841 #define bna_enet_chld_fail(enet) \
843 bna_ethport_fail(&(enet)->bna->ethport); \
844 bna_tx_mod_fail(&(enet)->bna->tx_mod); \
845 bna_rx_mod_fail(&(enet)->bna->rx_mod); \
848 #define bna_enet_rx_start(enet) \
850 enum bna_rx_type rx_type = \
851 ((enet)->type == BNA_ENET_T_REGULAR) ? \
852 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
853 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
856 #define bna_enet_rx_stop(enet) \
858 enum bna_rx_type rx_type = \
859 ((enet)->type == BNA_ENET_T_REGULAR) ? \
860 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
861 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
862 bfa_wc_up(&(enet)->chld_stop_wc); \
863 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
864 bfa_wc_wait(&(enet)->chld_stop_wc); \
867 #define call_enet_stop_cbfn(enet) \
869 if ((enet)->stop_cbfn) { \
870 void (*cbfn)(void *); \
872 cbfn = (enet)->stop_cbfn; \
873 cbarg = (enet)->stop_cbarg; \
874 (enet)->stop_cbfn = NULL; \
875 (enet)->stop_cbarg = NULL; \
880 #define call_enet_pause_cbfn(enet) \
882 if ((enet)->pause_cbfn) { \
883 void (*cbfn)(struct bnad *); \
884 cbfn = (enet)->pause_cbfn; \
885 (enet)->pause_cbfn = NULL; \
886 cbfn((enet)->bna->bnad); \
890 #define call_enet_mtu_cbfn(enet) \
892 if ((enet)->mtu_cbfn) { \
893 void (*cbfn)(struct bnad *); \
894 cbfn = (enet)->mtu_cbfn; \
895 (enet)->mtu_cbfn = NULL; \
896 cbfn((enet)->bna->bnad); \
900 static void bna_enet_cb_chld_stopped(void *arg
);
901 static void bna_bfi_pause_set(struct bna_enet
*enet
);
903 bfa_fsm_state_decl(bna_enet
, stopped
, struct bna_enet
,
904 enum bna_enet_event
);
905 bfa_fsm_state_decl(bna_enet
, pause_init_wait
, struct bna_enet
,
906 enum bna_enet_event
);
907 bfa_fsm_state_decl(bna_enet
, last_resp_wait
, struct bna_enet
,
908 enum bna_enet_event
);
909 bfa_fsm_state_decl(bna_enet
, started
, struct bna_enet
,
910 enum bna_enet_event
);
911 bfa_fsm_state_decl(bna_enet
, cfg_wait
, struct bna_enet
,
912 enum bna_enet_event
);
913 bfa_fsm_state_decl(bna_enet
, cfg_stop_wait
, struct bna_enet
,
914 enum bna_enet_event
);
915 bfa_fsm_state_decl(bna_enet
, chld_stop_wait
, struct bna_enet
,
916 enum bna_enet_event
);
919 bna_enet_sm_stopped_entry(struct bna_enet
*enet
)
921 call_enet_pause_cbfn(enet
);
922 call_enet_mtu_cbfn(enet
);
923 call_enet_stop_cbfn(enet
);
927 bna_enet_sm_stopped(struct bna_enet
*enet
, enum bna_enet_event event
)
931 bfa_fsm_set_state(enet
, bna_enet_sm_pause_init_wait
);
935 call_enet_stop_cbfn(enet
);
942 case ENET_E_PAUSE_CFG
:
943 call_enet_pause_cbfn(enet
);
947 call_enet_mtu_cbfn(enet
);
950 case ENET_E_CHLD_STOPPED
:
952 * This event is received due to Ethport, Tx and Rx objects
964 bna_enet_sm_pause_init_wait_entry(struct bna_enet
*enet
)
966 bna_bfi_pause_set(enet
);
970 bna_enet_sm_pause_init_wait(struct bna_enet
*enet
,
971 enum bna_enet_event event
)
975 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
976 bfa_fsm_set_state(enet
, bna_enet_sm_last_resp_wait
);
980 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
981 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
984 case ENET_E_PAUSE_CFG
:
985 enet
->flags
|= BNA_ENET_F_PAUSE_CHANGED
;
992 case ENET_E_FWRESP_PAUSE
:
993 if (enet
->flags
& BNA_ENET_F_PAUSE_CHANGED
) {
994 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
995 bna_bfi_pause_set(enet
);
997 bfa_fsm_set_state(enet
, bna_enet_sm_started
);
998 bna_enet_chld_start(enet
);
1003 bfa_sm_fault(event
);
1008 bna_enet_sm_last_resp_wait_entry(struct bna_enet
*enet
)
1010 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1014 bna_enet_sm_last_resp_wait(struct bna_enet
*enet
,
1015 enum bna_enet_event event
)
1019 case ENET_E_FWRESP_PAUSE
:
1020 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1024 bfa_sm_fault(event
);
1029 bna_enet_sm_started_entry(struct bna_enet
*enet
)
1032 * NOTE: Do not call bna_enet_chld_start() here, since it will be
1033 * inadvertently called during cfg_wait->started transition as well
1035 call_enet_pause_cbfn(enet
);
1036 call_enet_mtu_cbfn(enet
);
1040 bna_enet_sm_started(struct bna_enet
*enet
,
1041 enum bna_enet_event event
)
1045 bfa_fsm_set_state(enet
, bna_enet_sm_chld_stop_wait
);
1049 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1050 bna_enet_chld_fail(enet
);
1053 case ENET_E_PAUSE_CFG
:
1054 bfa_fsm_set_state(enet
, bna_enet_sm_cfg_wait
);
1055 bna_bfi_pause_set(enet
);
1058 case ENET_E_MTU_CFG
:
1059 bfa_fsm_set_state(enet
, bna_enet_sm_cfg_wait
);
1060 bna_enet_rx_stop(enet
);
1064 bfa_sm_fault(event
);
1069 bna_enet_sm_cfg_wait_entry(struct bna_enet
*enet
)
1074 bna_enet_sm_cfg_wait(struct bna_enet
*enet
,
1075 enum bna_enet_event event
)
1079 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1080 enet
->flags
&= ~BNA_ENET_F_MTU_CHANGED
;
1081 bfa_fsm_set_state(enet
, bna_enet_sm_cfg_stop_wait
);
1085 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1086 enet
->flags
&= ~BNA_ENET_F_MTU_CHANGED
;
1087 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1088 bna_enet_chld_fail(enet
);
1091 case ENET_E_PAUSE_CFG
:
1092 enet
->flags
|= BNA_ENET_F_PAUSE_CHANGED
;
1095 case ENET_E_MTU_CFG
:
1096 enet
->flags
|= BNA_ENET_F_MTU_CHANGED
;
1099 case ENET_E_CHLD_STOPPED
:
1100 bna_enet_rx_start(enet
);
1102 case ENET_E_FWRESP_PAUSE
:
1103 if (enet
->flags
& BNA_ENET_F_PAUSE_CHANGED
) {
1104 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1105 bna_bfi_pause_set(enet
);
1106 } else if (enet
->flags
& BNA_ENET_F_MTU_CHANGED
) {
1107 enet
->flags
&= ~BNA_ENET_F_MTU_CHANGED
;
1108 bna_enet_rx_stop(enet
);
1110 bfa_fsm_set_state(enet
, bna_enet_sm_started
);
1115 bfa_sm_fault(event
);
1120 bna_enet_sm_cfg_stop_wait_entry(struct bna_enet
*enet
)
1122 enet
->flags
&= ~BNA_ENET_F_PAUSE_CHANGED
;
1123 enet
->flags
&= ~BNA_ENET_F_MTU_CHANGED
;
1127 bna_enet_sm_cfg_stop_wait(struct bna_enet
*enet
,
1128 enum bna_enet_event event
)
1132 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1133 bna_enet_chld_fail(enet
);
1136 case ENET_E_FWRESP_PAUSE
:
1137 case ENET_E_CHLD_STOPPED
:
1138 bfa_fsm_set_state(enet
, bna_enet_sm_chld_stop_wait
);
1142 bfa_sm_fault(event
);
1147 bna_enet_sm_chld_stop_wait_entry(struct bna_enet
*enet
)
1149 bna_enet_chld_stop(enet
);
1153 bna_enet_sm_chld_stop_wait(struct bna_enet
*enet
,
1154 enum bna_enet_event event
)
1158 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1159 bna_enet_chld_fail(enet
);
1162 case ENET_E_CHLD_STOPPED
:
1163 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1167 bfa_sm_fault(event
);
1172 bna_bfi_pause_set(struct bna_enet
*enet
)
1174 struct bfi_enet_set_pause_req
*pause_req
= &enet
->pause_req
;
1176 bfi_msgq_mhdr_set(pause_req
->mh
, BFI_MC_ENET
,
1177 BFI_ENET_H2I_SET_PAUSE_REQ
, 0, 0);
1178 pause_req
->mh
.num_entries
= htons(
1179 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req
)));
1180 pause_req
->tx_pause
= enet
->pause_config
.tx_pause
;
1181 pause_req
->rx_pause
= enet
->pause_config
.rx_pause
;
1183 bfa_msgq_cmd_set(&enet
->msgq_cmd
, NULL
, NULL
,
1184 sizeof(struct bfi_enet_set_pause_req
), &pause_req
->mh
);
1185 bfa_msgq_cmd_post(&enet
->bna
->msgq
, &enet
->msgq_cmd
);
1189 bna_enet_cb_chld_stopped(void *arg
)
1191 struct bna_enet
*enet
= (struct bna_enet
*)arg
;
1193 bfa_fsm_send_event(enet
, ENET_E_CHLD_STOPPED
);
1197 bna_enet_init(struct bna_enet
*enet
, struct bna
*bna
)
1202 enet
->type
= BNA_ENET_T_REGULAR
;
1204 enet
->stop_cbfn
= NULL
;
1205 enet
->stop_cbarg
= NULL
;
1207 enet
->pause_cbfn
= NULL
;
1209 enet
->mtu_cbfn
= NULL
;
1211 bfa_fsm_set_state(enet
, bna_enet_sm_stopped
);
1215 bna_enet_uninit(struct bna_enet
*enet
)
1223 bna_enet_start(struct bna_enet
*enet
)
1225 enet
->flags
|= BNA_ENET_F_IOCETH_READY
;
1226 if (enet
->flags
& BNA_ENET_F_ENABLED
)
1227 bfa_fsm_send_event(enet
, ENET_E_START
);
1231 bna_ioceth_cb_enet_stopped(void *arg
)
1233 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1235 bfa_fsm_send_event(ioceth
, IOCETH_E_ENET_STOPPED
);
1239 bna_enet_stop(struct bna_enet
*enet
)
1241 enet
->stop_cbfn
= bna_ioceth_cb_enet_stopped
;
1242 enet
->stop_cbarg
= &enet
->bna
->ioceth
;
1244 enet
->flags
&= ~BNA_ENET_F_IOCETH_READY
;
1245 bfa_fsm_send_event(enet
, ENET_E_STOP
);
1249 bna_enet_fail(struct bna_enet
*enet
)
1251 enet
->flags
&= ~BNA_ENET_F_IOCETH_READY
;
1252 bfa_fsm_send_event(enet
, ENET_E_FAIL
);
1256 bna_enet_cb_tx_stopped(struct bna_enet
*enet
)
1258 bfa_wc_down(&enet
->chld_stop_wc
);
1262 bna_enet_cb_rx_stopped(struct bna_enet
*enet
)
1264 bfa_wc_down(&enet
->chld_stop_wc
);
1268 bna_enet_mtu_get(struct bna_enet
*enet
)
1274 bna_enet_enable(struct bna_enet
*enet
)
1276 if (enet
->fsm
!= (bfa_sm_t
)bna_enet_sm_stopped
)
1279 enet
->flags
|= BNA_ENET_F_ENABLED
;
1281 if (enet
->flags
& BNA_ENET_F_IOCETH_READY
)
1282 bfa_fsm_send_event(enet
, ENET_E_START
);
1286 bna_enet_disable(struct bna_enet
*enet
, enum bna_cleanup_type type
,
1287 void (*cbfn
)(void *))
1289 if (type
== BNA_SOFT_CLEANUP
) {
1290 (*cbfn
)(enet
->bna
->bnad
);
1294 enet
->stop_cbfn
= cbfn
;
1295 enet
->stop_cbarg
= enet
->bna
->bnad
;
1297 enet
->flags
&= ~BNA_ENET_F_ENABLED
;
1299 bfa_fsm_send_event(enet
, ENET_E_STOP
);
1303 bna_enet_pause_config(struct bna_enet
*enet
,
1304 struct bna_pause_config
*pause_config
,
1305 void (*cbfn
)(struct bnad
*))
1307 enet
->pause_config
= *pause_config
;
1309 enet
->pause_cbfn
= cbfn
;
1311 bfa_fsm_send_event(enet
, ENET_E_PAUSE_CFG
);
1315 bna_enet_mtu_set(struct bna_enet
*enet
, int mtu
,
1316 void (*cbfn
)(struct bnad
*))
1320 enet
->mtu_cbfn
= cbfn
;
1322 bfa_fsm_send_event(enet
, ENET_E_MTU_CFG
);
1326 bna_enet_perm_mac_get(struct bna_enet
*enet
, mac_t
*mac
)
1328 *mac
= bfa_nw_ioc_get_mac(&enet
->bna
->ioceth
.ioc
);
1334 #define enable_mbox_intr(_ioceth) \
1337 bna_intr_status_get((_ioceth)->bna, intr_status); \
1338 bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
1339 bna_mbox_intr_enable((_ioceth)->bna); \
1342 #define disable_mbox_intr(_ioceth) \
1344 bna_mbox_intr_disable((_ioceth)->bna); \
1345 bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
1348 #define call_ioceth_stop_cbfn(_ioceth) \
1350 if ((_ioceth)->stop_cbfn) { \
1351 void (*cbfn)(struct bnad *); \
1352 struct bnad *cbarg; \
1353 cbfn = (_ioceth)->stop_cbfn; \
1354 cbarg = (_ioceth)->stop_cbarg; \
1355 (_ioceth)->stop_cbfn = NULL; \
1356 (_ioceth)->stop_cbarg = NULL; \
1361 #define bna_stats_mod_uninit(_stats_mod) \
1365 #define bna_stats_mod_start(_stats_mod) \
1367 (_stats_mod)->ioc_ready = true; \
1370 #define bna_stats_mod_stop(_stats_mod) \
1372 (_stats_mod)->ioc_ready = false; \
1375 #define bna_stats_mod_fail(_stats_mod) \
1377 (_stats_mod)->ioc_ready = false; \
1378 (_stats_mod)->stats_get_busy = false; \
1379 (_stats_mod)->stats_clr_busy = false; \
1382 static void bna_bfi_attr_get(struct bna_ioceth
*ioceth
);
1384 bfa_fsm_state_decl(bna_ioceth
, stopped
, struct bna_ioceth
,
1385 enum bna_ioceth_event
);
1386 bfa_fsm_state_decl(bna_ioceth
, ioc_ready_wait
, struct bna_ioceth
,
1387 enum bna_ioceth_event
);
1388 bfa_fsm_state_decl(bna_ioceth
, enet_attr_wait
, struct bna_ioceth
,
1389 enum bna_ioceth_event
);
1390 bfa_fsm_state_decl(bna_ioceth
, ready
, struct bna_ioceth
,
1391 enum bna_ioceth_event
);
1392 bfa_fsm_state_decl(bna_ioceth
, last_resp_wait
, struct bna_ioceth
,
1393 enum bna_ioceth_event
);
1394 bfa_fsm_state_decl(bna_ioceth
, enet_stop_wait
, struct bna_ioceth
,
1395 enum bna_ioceth_event
);
1396 bfa_fsm_state_decl(bna_ioceth
, ioc_disable_wait
, struct bna_ioceth
,
1397 enum bna_ioceth_event
);
1398 bfa_fsm_state_decl(bna_ioceth
, failed
, struct bna_ioceth
,
1399 enum bna_ioceth_event
);
1402 bna_ioceth_sm_stopped_entry(struct bna_ioceth
*ioceth
)
1404 call_ioceth_stop_cbfn(ioceth
);
1408 bna_ioceth_sm_stopped(struct bna_ioceth
*ioceth
,
1409 enum bna_ioceth_event event
)
1412 case IOCETH_E_ENABLE
:
1413 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_ready_wait
);
1414 bfa_nw_ioc_enable(&ioceth
->ioc
);
1417 case IOCETH_E_DISABLE
:
1418 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_stopped
);
1421 case IOCETH_E_IOC_RESET
:
1422 enable_mbox_intr(ioceth
);
1425 case IOCETH_E_IOC_FAILED
:
1426 disable_mbox_intr(ioceth
);
1427 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_failed
);
1431 bfa_sm_fault(event
);
1436 bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth
*ioceth
)
1439 * Do not call bfa_nw_ioc_enable() here. It must be called in the
1440 * previous state due to failed -> ioc_ready_wait transition.
1445 bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth
*ioceth
,
1446 enum bna_ioceth_event event
)
1449 case IOCETH_E_DISABLE
:
1450 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1451 bfa_nw_ioc_disable(&ioceth
->ioc
);
1454 case IOCETH_E_IOC_RESET
:
1455 enable_mbox_intr(ioceth
);
1458 case IOCETH_E_IOC_FAILED
:
1459 disable_mbox_intr(ioceth
);
1460 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_failed
);
1463 case IOCETH_E_IOC_READY
:
1464 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_enet_attr_wait
);
1468 bfa_sm_fault(event
);
1473 bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth
*ioceth
)
1475 bna_bfi_attr_get(ioceth
);
1479 bna_ioceth_sm_enet_attr_wait(struct bna_ioceth
*ioceth
,
1480 enum bna_ioceth_event event
)
1483 case IOCETH_E_DISABLE
:
1484 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_last_resp_wait
);
1487 case IOCETH_E_IOC_FAILED
:
1488 disable_mbox_intr(ioceth
);
1489 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_failed
);
1492 case IOCETH_E_ENET_ATTR_RESP
:
1493 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ready
);
1497 bfa_sm_fault(event
);
1502 bna_ioceth_sm_ready_entry(struct bna_ioceth
*ioceth
)
1504 bna_enet_start(&ioceth
->bna
->enet
);
1505 bna_stats_mod_start(&ioceth
->bna
->stats_mod
);
1506 bnad_cb_ioceth_ready(ioceth
->bna
->bnad
);
1510 bna_ioceth_sm_ready(struct bna_ioceth
*ioceth
, enum bna_ioceth_event event
)
1513 case IOCETH_E_DISABLE
:
1514 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_enet_stop_wait
);
1517 case IOCETH_E_IOC_FAILED
:
1518 disable_mbox_intr(ioceth
);
1519 bna_enet_fail(&ioceth
->bna
->enet
);
1520 bna_stats_mod_fail(&ioceth
->bna
->stats_mod
);
1521 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_failed
);
1525 bfa_sm_fault(event
);
1530 bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth
*ioceth
)
1535 bna_ioceth_sm_last_resp_wait(struct bna_ioceth
*ioceth
,
1536 enum bna_ioceth_event event
)
1539 case IOCETH_E_IOC_FAILED
:
1540 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1541 disable_mbox_intr(ioceth
);
1542 bfa_nw_ioc_disable(&ioceth
->ioc
);
1545 case IOCETH_E_ENET_ATTR_RESP
:
1546 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1547 bfa_nw_ioc_disable(&ioceth
->ioc
);
1551 bfa_sm_fault(event
);
1556 bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth
*ioceth
)
1558 bna_stats_mod_stop(&ioceth
->bna
->stats_mod
);
1559 bna_enet_stop(&ioceth
->bna
->enet
);
1563 bna_ioceth_sm_enet_stop_wait(struct bna_ioceth
*ioceth
,
1564 enum bna_ioceth_event event
)
1567 case IOCETH_E_IOC_FAILED
:
1568 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1569 disable_mbox_intr(ioceth
);
1570 bna_enet_fail(&ioceth
->bna
->enet
);
1571 bna_stats_mod_fail(&ioceth
->bna
->stats_mod
);
1572 bfa_nw_ioc_disable(&ioceth
->ioc
);
1575 case IOCETH_E_ENET_STOPPED
:
1576 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1577 bfa_nw_ioc_disable(&ioceth
->ioc
);
1581 bfa_sm_fault(event
);
1586 bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth
*ioceth
)
1591 bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth
*ioceth
,
1592 enum bna_ioceth_event event
)
1595 case IOCETH_E_IOC_DISABLED
:
1596 disable_mbox_intr(ioceth
);
1597 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_stopped
);
1600 case IOCETH_E_ENET_STOPPED
:
1601 /* This event is received due to enet failing */
1606 bfa_sm_fault(event
);
1611 bna_ioceth_sm_failed_entry(struct bna_ioceth
*ioceth
)
1613 bnad_cb_ioceth_failed(ioceth
->bna
->bnad
);
1617 bna_ioceth_sm_failed(struct bna_ioceth
*ioceth
,
1618 enum bna_ioceth_event event
)
1621 case IOCETH_E_DISABLE
:
1622 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_disable_wait
);
1623 bfa_nw_ioc_disable(&ioceth
->ioc
);
1626 case IOCETH_E_IOC_RESET
:
1627 enable_mbox_intr(ioceth
);
1628 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_ioc_ready_wait
);
1631 case IOCETH_E_IOC_FAILED
:
1635 bfa_sm_fault(event
);
1640 bna_bfi_attr_get(struct bna_ioceth
*ioceth
)
1642 struct bfi_enet_attr_req
*attr_req
= &ioceth
->attr_req
;
1644 bfi_msgq_mhdr_set(attr_req
->mh
, BFI_MC_ENET
,
1645 BFI_ENET_H2I_GET_ATTR_REQ
, 0, 0);
1646 attr_req
->mh
.num_entries
= htons(
1647 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req
)));
1648 bfa_msgq_cmd_set(&ioceth
->msgq_cmd
, NULL
, NULL
,
1649 sizeof(struct bfi_enet_attr_req
), &attr_req
->mh
);
1650 bfa_msgq_cmd_post(&ioceth
->bna
->msgq
, &ioceth
->msgq_cmd
);
1653 /* IOC callback functions */
1656 bna_cb_ioceth_enable(void *arg
, enum bfa_status error
)
1658 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1661 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_FAILED
);
1663 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_READY
);
1667 bna_cb_ioceth_disable(void *arg
)
1669 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1671 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_DISABLED
);
1675 bna_cb_ioceth_hbfail(void *arg
)
1677 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1679 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_FAILED
);
1683 bna_cb_ioceth_reset(void *arg
)
1685 struct bna_ioceth
*ioceth
= (struct bna_ioceth
*)arg
;
1687 bfa_fsm_send_event(ioceth
, IOCETH_E_IOC_RESET
);
1690 static struct bfa_ioc_cbfn bna_ioceth_cbfn
= {
1691 bna_cb_ioceth_enable
,
1692 bna_cb_ioceth_disable
,
1693 bna_cb_ioceth_hbfail
,
1697 static void bna_attr_init(struct bna_ioceth
*ioceth
)
1699 ioceth
->attr
.num_txq
= BFI_ENET_DEF_TXQ
;
1700 ioceth
->attr
.num_rxp
= BFI_ENET_DEF_RXP
;
1701 ioceth
->attr
.num_ucmac
= BFI_ENET_DEF_UCAM
;
1702 ioceth
->attr
.num_mcmac
= BFI_ENET_MAX_MCAM
;
1703 ioceth
->attr
.max_rit_size
= BFI_ENET_DEF_RITSZ
;
1704 ioceth
->attr
.fw_query_complete
= false;
1708 bna_ioceth_init(struct bna_ioceth
*ioceth
, struct bna
*bna
,
1709 struct bna_res_info
*res_info
)
1717 * Attach IOC and claim:
1718 * 1. DMA memory for IOC attributes
1719 * 2. Kernel memory for FW trace
1721 bfa_nw_ioc_attach(&ioceth
->ioc
, ioceth
, &bna_ioceth_cbfn
);
1722 bfa_nw_ioc_pci_init(&ioceth
->ioc
, &bna
->pcidev
, BFI_PCIFN_CLASS_ETH
);
1725 &res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1726 kva
= res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].kva
;
1727 bfa_nw_ioc_mem_claim(&ioceth
->ioc
, kva
, dma
);
1729 kva
= res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mdl
[0].kva
;
1730 bfa_nw_ioc_debug_memclaim(&ioceth
->ioc
, kva
);
1733 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1737 &res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1738 kva
= res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].kva
;
1739 bfa_nw_cee_attach(&bna
->cee
, &ioceth
->ioc
, bna
);
1740 bfa_nw_cee_mem_claim(&bna
->cee
, kva
, dma
);
1741 kva
+= bfa_nw_cee_meminfo();
1742 dma
+= bfa_nw_cee_meminfo();
1744 bfa_nw_flash_attach(&bna
->flash
, &ioceth
->ioc
, bna
);
1745 bfa_nw_flash_memclaim(&bna
->flash
, kva
, dma
);
1746 kva
+= bfa_nw_flash_meminfo();
1747 dma
+= bfa_nw_flash_meminfo();
1749 bfa_msgq_attach(&bna
->msgq
, &ioceth
->ioc
);
1750 bfa_msgq_memclaim(&bna
->msgq
, kva
, dma
);
1751 bfa_msgq_regisr(&bna
->msgq
, BFI_MC_ENET
, bna_msgq_rsp_handler
, bna
);
1752 kva
+= bfa_msgq_meminfo();
1753 dma
+= bfa_msgq_meminfo();
1755 ioceth
->stop_cbfn
= NULL
;
1756 ioceth
->stop_cbarg
= NULL
;
1758 bna_attr_init(ioceth
);
1760 bfa_fsm_set_state(ioceth
, bna_ioceth_sm_stopped
);
1764 bna_ioceth_uninit(struct bna_ioceth
*ioceth
)
1766 bfa_nw_ioc_detach(&ioceth
->ioc
);
1772 bna_ioceth_enable(struct bna_ioceth
*ioceth
)
1774 if (ioceth
->fsm
== (bfa_fsm_t
)bna_ioceth_sm_ready
) {
1775 bnad_cb_ioceth_ready(ioceth
->bna
->bnad
);
1779 if (ioceth
->fsm
== (bfa_fsm_t
)bna_ioceth_sm_stopped
)
1780 bfa_fsm_send_event(ioceth
, IOCETH_E_ENABLE
);
1784 bna_ioceth_disable(struct bna_ioceth
*ioceth
, enum bna_cleanup_type type
)
1786 if (type
== BNA_SOFT_CLEANUP
) {
1787 bnad_cb_ioceth_disabled(ioceth
->bna
->bnad
);
1791 ioceth
->stop_cbfn
= bnad_cb_ioceth_disabled
;
1792 ioceth
->stop_cbarg
= ioceth
->bna
->bnad
;
1794 bfa_fsm_send_event(ioceth
, IOCETH_E_DISABLE
);
1798 bna_ucam_mod_init(struct bna_ucam_mod
*ucam_mod
, struct bna
*bna
,
1799 struct bna_res_info
*res_info
)
1803 ucam_mod
->ucmac
= (struct bna_mac
*)
1804 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
1806 INIT_LIST_HEAD(&ucam_mod
->free_q
);
1807 for (i
= 0; i
< bna
->ioceth
.attr
.num_ucmac
; i
++) {
1808 bfa_q_qe_init(&ucam_mod
->ucmac
[i
].qe
);
1809 list_add_tail(&ucam_mod
->ucmac
[i
].qe
, &ucam_mod
->free_q
);
1812 ucam_mod
->bna
= bna
;
1816 bna_ucam_mod_uninit(struct bna_ucam_mod
*ucam_mod
)
1818 struct list_head
*qe
;
1821 list_for_each(qe
, &ucam_mod
->free_q
)
1824 ucam_mod
->bna
= NULL
;
1828 bna_mcam_mod_init(struct bna_mcam_mod
*mcam_mod
, struct bna
*bna
,
1829 struct bna_res_info
*res_info
)
1833 mcam_mod
->mcmac
= (struct bna_mac
*)
1834 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
1836 INIT_LIST_HEAD(&mcam_mod
->free_q
);
1837 for (i
= 0; i
< bna
->ioceth
.attr
.num_mcmac
; i
++) {
1838 bfa_q_qe_init(&mcam_mod
->mcmac
[i
].qe
);
1839 list_add_tail(&mcam_mod
->mcmac
[i
].qe
, &mcam_mod
->free_q
);
1842 mcam_mod
->mchandle
= (struct bna_mcam_handle
*)
1843 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
1845 INIT_LIST_HEAD(&mcam_mod
->free_handle_q
);
1846 for (i
= 0; i
< bna
->ioceth
.attr
.num_mcmac
; i
++) {
1847 bfa_q_qe_init(&mcam_mod
->mchandle
[i
].qe
);
1848 list_add_tail(&mcam_mod
->mchandle
[i
].qe
,
1849 &mcam_mod
->free_handle_q
);
1852 mcam_mod
->bna
= bna
;
1856 bna_mcam_mod_uninit(struct bna_mcam_mod
*mcam_mod
)
1858 struct list_head
*qe
;
1862 list_for_each(qe
, &mcam_mod
->free_q
) i
++;
1865 list_for_each(qe
, &mcam_mod
->free_handle_q
) i
++;
1867 mcam_mod
->bna
= NULL
;
1871 bna_bfi_stats_get(struct bna
*bna
)
1873 struct bfi_enet_stats_req
*stats_req
= &bna
->stats_mod
.stats_get
;
1875 bna
->stats_mod
.stats_get_busy
= true;
1877 bfi_msgq_mhdr_set(stats_req
->mh
, BFI_MC_ENET
,
1878 BFI_ENET_H2I_STATS_GET_REQ
, 0, 0);
1879 stats_req
->mh
.num_entries
= htons(
1880 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req
)));
1881 stats_req
->stats_mask
= htons(BFI_ENET_STATS_ALL
);
1882 stats_req
->tx_enet_mask
= htonl(bna
->tx_mod
.rid_mask
);
1883 stats_req
->rx_enet_mask
= htonl(bna
->rx_mod
.rid_mask
);
1884 stats_req
->host_buffer
.a32
.addr_hi
= bna
->stats
.hw_stats_dma
.msb
;
1885 stats_req
->host_buffer
.a32
.addr_lo
= bna
->stats
.hw_stats_dma
.lsb
;
1887 bfa_msgq_cmd_set(&bna
->stats_mod
.stats_get_cmd
, NULL
, NULL
,
1888 sizeof(struct bfi_enet_stats_req
), &stats_req
->mh
);
1889 bfa_msgq_cmd_post(&bna
->msgq
, &bna
->stats_mod
.stats_get_cmd
);
1893 bna_res_req(struct bna_res_info
*res_info
)
1895 /* DMA memory for COMMON_MODULE */
1896 res_info
[BNA_RES_MEM_T_COM
].res_type
= BNA_RES_T_MEM
;
1897 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1898 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.num
= 1;
1899 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.len
= ALIGN(
1900 (bfa_nw_cee_meminfo() +
1901 bfa_nw_flash_meminfo() +
1902 bfa_msgq_meminfo()), PAGE_SIZE
);
1904 /* DMA memory for retrieving IOC attributes */
1905 res_info
[BNA_RES_MEM_T_ATTR
].res_type
= BNA_RES_T_MEM
;
1906 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1907 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.num
= 1;
1908 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.len
=
1909 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE
);
1911 /* Virtual memory for retreiving fw_trc */
1912 res_info
[BNA_RES_MEM_T_FWTRC
].res_type
= BNA_RES_T_MEM
;
1913 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mem_type
= BNA_MEM_T_KVA
;
1914 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.num
= 1;
1915 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.len
= BNA_DBG_FWTRC_LEN
;
1917 /* DMA memory for retreiving stats */
1918 res_info
[BNA_RES_MEM_T_STATS
].res_type
= BNA_RES_T_MEM
;
1919 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1920 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.num
= 1;
1921 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.len
=
1922 ALIGN(sizeof(struct bfi_enet_stats
),
1927 bna_mod_res_req(struct bna
*bna
, struct bna_res_info
*res_info
)
1929 struct bna_attr
*attr
= &bna
->ioceth
.attr
;
1931 /* Virtual memory for Tx objects - stored by Tx module */
1932 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_type
= BNA_RES_T_MEM
;
1933 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.mem_type
=
1935 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.num
= 1;
1936 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.len
=
1937 attr
->num_txq
* sizeof(struct bna_tx
);
1939 /* Virtual memory for TxQ - stored by Tx module */
1940 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
1941 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.mem_type
=
1943 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.num
= 1;
1944 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.len
=
1945 attr
->num_txq
* sizeof(struct bna_txq
);
1947 /* Virtual memory for Rx objects - stored by Rx module */
1948 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_type
= BNA_RES_T_MEM
;
1949 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.mem_type
=
1951 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.num
= 1;
1952 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.len
=
1953 attr
->num_rxp
* sizeof(struct bna_rx
);
1955 /* Virtual memory for RxPath - stored by Rx module */
1956 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_type
= BNA_RES_T_MEM
;
1957 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.mem_type
=
1959 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.num
= 1;
1960 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.len
=
1961 attr
->num_rxp
* sizeof(struct bna_rxp
);
1963 /* Virtual memory for RxQ - stored by Rx module */
1964 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
1965 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.mem_type
=
1967 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.num
= 1;
1968 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.len
=
1969 (attr
->num_rxp
* 2) * sizeof(struct bna_rxq
);
1971 /* Virtual memory for Unicast MAC address - stored by ucam module */
1972 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
1973 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
1975 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
1976 res_info
[BNA_MOD_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.len
=
1977 attr
->num_ucmac
* sizeof(struct bna_mac
);
1979 /* Virtual memory for Multicast MAC address - stored by mcam module */
1980 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
1981 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
1983 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
1984 res_info
[BNA_MOD_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.len
=
1985 attr
->num_mcmac
* sizeof(struct bna_mac
);
1987 /* Virtual memory for Multicast handle - stored by mcam module */
1988 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_type
= BNA_RES_T_MEM
;
1989 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_u
.mem_info
.mem_type
=
1991 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_u
.mem_info
.num
= 1;
1992 res_info
[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY
].res_u
.mem_info
.len
=
1993 attr
->num_mcmac
* sizeof(struct bna_mcam_handle
);
1997 bna_init(struct bna
*bna
, struct bnad
*bnad
,
1998 struct bfa_pcidev
*pcidev
, struct bna_res_info
*res_info
)
2001 bna
->pcidev
= *pcidev
;
2003 bna
->stats
.hw_stats_kva
= (struct bfi_enet_stats
*)
2004 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].kva
;
2005 bna
->stats
.hw_stats_dma
.msb
=
2006 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.msb
;
2007 bna
->stats
.hw_stats_dma
.lsb
=
2008 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.lsb
;
2010 bna_reg_addr_init(bna
, &bna
->pcidev
);
2012 /* Also initializes diag, cee, sfp, phy_port, msgq */
2013 bna_ioceth_init(&bna
->ioceth
, bna
, res_info
);
2015 bna_enet_init(&bna
->enet
, bna
);
2016 bna_ethport_init(&bna
->ethport
, bna
);
2020 bna_mod_init(struct bna
*bna
, struct bna_res_info
*res_info
)
2022 bna_tx_mod_init(&bna
->tx_mod
, bna
, res_info
);
2024 bna_rx_mod_init(&bna
->rx_mod
, bna
, res_info
);
2026 bna_ucam_mod_init(&bna
->ucam_mod
, bna
, res_info
);
2028 bna_mcam_mod_init(&bna
->mcam_mod
, bna
, res_info
);
2030 bna
->default_mode_rid
= BFI_INVALID_RID
;
2031 bna
->promisc_rid
= BFI_INVALID_RID
;
2033 bna
->mod_flags
|= BNA_MOD_F_INIT_DONE
;
2037 bna_uninit(struct bna
*bna
)
2039 if (bna
->mod_flags
& BNA_MOD_F_INIT_DONE
) {
2040 bna_mcam_mod_uninit(&bna
->mcam_mod
);
2041 bna_ucam_mod_uninit(&bna
->ucam_mod
);
2042 bna_rx_mod_uninit(&bna
->rx_mod
);
2043 bna_tx_mod_uninit(&bna
->tx_mod
);
2044 bna
->mod_flags
&= ~BNA_MOD_F_INIT_DONE
;
2047 bna_stats_mod_uninit(&bna
->stats_mod
);
2048 bna_ethport_uninit(&bna
->ethport
);
2049 bna_enet_uninit(&bna
->enet
);
2051 bna_ioceth_uninit(&bna
->ioceth
);
2057 bna_num_txq_set(struct bna
*bna
, int num_txq
)
2059 if (bna
->ioceth
.attr
.fw_query_complete
&&
2060 (num_txq
<= bna
->ioceth
.attr
.num_txq
)) {
2061 bna
->ioceth
.attr
.num_txq
= num_txq
;
2062 return BNA_CB_SUCCESS
;
2069 bna_num_rxp_set(struct bna
*bna
, int num_rxp
)
2071 if (bna
->ioceth
.attr
.fw_query_complete
&&
2072 (num_rxp
<= bna
->ioceth
.attr
.num_rxp
)) {
2073 bna
->ioceth
.attr
.num_rxp
= num_rxp
;
2074 return BNA_CB_SUCCESS
;
2081 bna_ucam_mod_mac_get(struct bna_ucam_mod
*ucam_mod
)
2083 struct list_head
*qe
;
2085 if (list_empty(&ucam_mod
->free_q
))
2088 bfa_q_deq(&ucam_mod
->free_q
, &qe
);
2090 return (struct bna_mac
*)qe
;
2094 bna_ucam_mod_mac_put(struct bna_ucam_mod
*ucam_mod
, struct bna_mac
*mac
)
2096 list_add_tail(&mac
->qe
, &ucam_mod
->free_q
);
2100 bna_mcam_mod_mac_get(struct bna_mcam_mod
*mcam_mod
)
2102 struct list_head
*qe
;
2104 if (list_empty(&mcam_mod
->free_q
))
2107 bfa_q_deq(&mcam_mod
->free_q
, &qe
);
2109 return (struct bna_mac
*)qe
;
2113 bna_mcam_mod_mac_put(struct bna_mcam_mod
*mcam_mod
, struct bna_mac
*mac
)
2115 list_add_tail(&mac
->qe
, &mcam_mod
->free_q
);
2118 struct bna_mcam_handle
*
2119 bna_mcam_mod_handle_get(struct bna_mcam_mod
*mcam_mod
)
2121 struct list_head
*qe
;
2123 if (list_empty(&mcam_mod
->free_handle_q
))
2126 bfa_q_deq(&mcam_mod
->free_handle_q
, &qe
);
2128 return (struct bna_mcam_handle
*)qe
;
2132 bna_mcam_mod_handle_put(struct bna_mcam_mod
*mcam_mod
,
2133 struct bna_mcam_handle
*handle
)
2135 list_add_tail(&handle
->qe
, &mcam_mod
->free_handle_q
);
2139 bna_hw_stats_get(struct bna
*bna
)
2141 if (!bna
->stats_mod
.ioc_ready
) {
2142 bnad_cb_stats_get(bna
->bnad
, BNA_CB_FAIL
, &bna
->stats
);
2145 if (bna
->stats_mod
.stats_get_busy
) {
2146 bnad_cb_stats_get(bna
->bnad
, BNA_CB_BUSY
, &bna
->stats
);
2150 bna_bfi_stats_get(bna
);