2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
22 static void bna_device_cb_port_stopped(void *arg
, enum bna_cb_status status
);
25 bna_port_cb_link_up(struct bna_port
*port
, struct bfi_ll_aen
*aen
,
31 port
->llport
.link_status
= BNA_LINK_UP
;
33 port
->llport
.link_status
= BNA_CEE_UP
;
35 /* Compute the priority */
36 prio_map
= aen
->prio_map
;
38 for (i
= 0; i
< 8; i
++) {
39 if ((prio_map
>> i
) & 0x1)
47 bna_tx_mod_cee_link_status(&port
->bna
->tx_mod
, aen
->cee_linkup
);
48 bna_tx_mod_prio_changed(&port
->bna
->tx_mod
, port
->priority
);
49 port
->link_cbfn(port
->bna
->bnad
, port
->llport
.link_status
);
53 bna_port_cb_link_down(struct bna_port
*port
, int status
)
55 port
->llport
.link_status
= BNA_LINK_DOWN
;
58 bna_tx_mod_cee_link_status(&port
->bna
->tx_mod
, BNA_LINK_DOWN
);
59 port
->link_cbfn(port
->bna
->bnad
, BNA_LINK_DOWN
);
63 llport_can_be_up(struct bna_llport
*llport
)
66 if (llport
->type
== BNA_PORT_T_REGULAR
)
67 ready
= ((llport
->flags
& BNA_LLPORT_F_ADMIN_UP
) &&
68 (llport
->flags
& BNA_LLPORT_F_RX_STARTED
) &&
69 (llport
->flags
& BNA_LLPORT_F_PORT_ENABLED
));
71 ready
= ((llport
->flags
& BNA_LLPORT_F_ADMIN_UP
) &&
72 (llport
->flags
& BNA_LLPORT_F_RX_STARTED
) &&
73 !(llport
->flags
& BNA_LLPORT_F_PORT_ENABLED
));
77 #define llport_is_up llport_can_be_up
79 enum bna_llport_event
{
85 LLPORT_E_FWRESP_UP_OK
= 6,
86 LLPORT_E_FWRESP_UP_FAIL
= 7,
87 LLPORT_E_FWRESP_DOWN
= 8
91 bna_llport_cb_port_enabled(struct bna_llport
*llport
)
93 llport
->flags
|= BNA_LLPORT_F_PORT_ENABLED
;
95 if (llport_can_be_up(llport
))
96 bfa_fsm_send_event(llport
, LLPORT_E_UP
);
100 bna_llport_cb_port_disabled(struct bna_llport
*llport
)
102 int llport_up
= llport_is_up(llport
);
104 llport
->flags
&= ~BNA_LLPORT_F_PORT_ENABLED
;
107 bfa_fsm_send_event(llport
, LLPORT_E_DOWN
);
114 bna_is_aen(u8 msg_id
)
117 case BFI_LL_I2H_LINK_DOWN_AEN
:
118 case BFI_LL_I2H_LINK_UP_AEN
:
119 case BFI_LL_I2H_PORT_ENABLE_AEN
:
120 case BFI_LL_I2H_PORT_DISABLE_AEN
:
129 bna_mbox_aen_callback(struct bna
*bna
, struct bfi_mbmsg
*msg
)
131 struct bfi_ll_aen
*aen
= (struct bfi_ll_aen
*)(msg
);
133 switch (aen
->mh
.msg_id
) {
134 case BFI_LL_I2H_LINK_UP_AEN
:
135 bna_port_cb_link_up(&bna
->port
, aen
, aen
->reason
);
137 case BFI_LL_I2H_LINK_DOWN_AEN
:
138 bna_port_cb_link_down(&bna
->port
, aen
->reason
);
140 case BFI_LL_I2H_PORT_ENABLE_AEN
:
141 bna_llport_cb_port_enabled(&bna
->port
.llport
);
143 case BFI_LL_I2H_PORT_DISABLE_AEN
:
144 bna_llport_cb_port_disabled(&bna
->port
.llport
);
152 bna_ll_isr(void *llarg
, struct bfi_mbmsg
*msg
)
154 struct bna
*bna
= (struct bna
*)(llarg
);
155 struct bfi_ll_rsp
*mb_rsp
= (struct bfi_ll_rsp
*)(msg
);
156 struct bfi_mhdr
*cmd_h
, *rsp_h
;
157 struct bna_mbox_qe
*mb_qe
= NULL
;
160 char message
[BNA_MESSAGE_SIZE
];
162 aen
= bna_is_aen(mb_rsp
->mh
.msg_id
);
165 mb_qe
= bfa_q_first(&bna
->mbox_mod
.posted_q
);
166 cmd_h
= (struct bfi_mhdr
*)(&mb_qe
->cmd
.msg
[0]);
167 rsp_h
= (struct bfi_mhdr
*)(&mb_rsp
->mh
);
169 if ((BFA_I2HM(cmd_h
->msg_id
) == rsp_h
->msg_id
) &&
170 (cmd_h
->mtag
.i2htok
== rsp_h
->mtag
.i2htok
)) {
171 /* Remove the request from posted_q, update state */
172 list_del(&mb_qe
->qe
);
173 bna
->mbox_mod
.msg_pending
--;
174 if (list_empty(&bna
->mbox_mod
.posted_q
))
175 bna
->mbox_mod
.state
= BNA_MBOX_FREE
;
179 /* Dispatch the cbfn */
181 mb_qe
->cbfn(mb_qe
->cbarg
, mb_rsp
->error
);
183 /* Post the next entry, if needed */
185 mb_qe
= bfa_q_first(&bna
->mbox_mod
.posted_q
);
186 bfa_nw_ioc_mbox_queue(&bna
->device
.ioc
,
190 snprintf(message
, BNA_MESSAGE_SIZE
,
191 "No matching rsp for [%d:%d:%d]\n",
192 mb_rsp
->mh
.msg_class
, mb_rsp
->mh
.msg_id
,
193 mb_rsp
->mh
.mtag
.i2htok
);
194 pr_info("%s", message
);
198 bna_mbox_aen_callback(bna
, msg
);
202 bna_err_handler(struct bna
*bna
, u32 intr_status
)
206 if (intr_status
& __HALT_STATUS_BITS
) {
207 init_halt
= readl(bna
->device
.ioc
.ioc_regs
.ll_halt
);
208 init_halt
&= ~__FW_INIT_HALT_P
;
209 writel(init_halt
, bna
->device
.ioc
.ioc_regs
.ll_halt
);
212 bfa_nw_ioc_error_isr(&bna
->device
.ioc
);
216 bna_mbox_handler(struct bna
*bna
, u32 intr_status
)
218 if (BNA_IS_ERR_INTR(intr_status
)) {
219 bna_err_handler(bna
, intr_status
);
222 if (BNA_IS_MBOX_INTR(intr_status
))
223 bfa_nw_ioc_mbox_isr(&bna
->device
.ioc
);
227 bna_mbox_send(struct bna
*bna
, struct bna_mbox_qe
*mbox_qe
)
231 mh
= (struct bfi_mhdr
*)(&mbox_qe
->cmd
.msg
[0]);
233 mh
->mtag
.i2htok
= htons(bna
->mbox_mod
.msg_ctr
);
234 bna
->mbox_mod
.msg_ctr
++;
235 bna
->mbox_mod
.msg_pending
++;
236 if (bna
->mbox_mod
.state
== BNA_MBOX_FREE
) {
237 list_add_tail(&mbox_qe
->qe
, &bna
->mbox_mod
.posted_q
);
238 bfa_nw_ioc_mbox_queue(&bna
->device
.ioc
, &mbox_qe
->cmd
);
239 bna
->mbox_mod
.state
= BNA_MBOX_POSTED
;
241 list_add_tail(&mbox_qe
->qe
, &bna
->mbox_mod
.posted_q
);
246 bna_mbox_flush_q(struct bna
*bna
, struct list_head
*q
)
248 struct bna_mbox_qe
*mb_qe
= NULL
;
249 struct bfi_mhdr
*cmd_h
;
250 struct list_head
*mb_q
;
251 void (*cbfn
)(void *arg
, int status
);
254 mb_q
= &bna
->mbox_mod
.posted_q
;
256 while (!list_empty(mb_q
)) {
257 bfa_q_deq(mb_q
, &mb_qe
);
259 cbarg
= mb_qe
->cbarg
;
260 bfa_q_qe_init(mb_qe
);
261 bna
->mbox_mod
.msg_pending
--;
263 cmd_h
= (struct bfi_mhdr
*)(&mb_qe
->cmd
.msg
[0]);
265 cbfn(cbarg
, BNA_CB_NOT_EXEC
);
268 bna
->mbox_mod
.state
= BNA_MBOX_FREE
;
272 bna_mbox_mod_start(struct bna_mbox_mod
*mbox_mod
)
277 bna_mbox_mod_stop(struct bna_mbox_mod
*mbox_mod
)
279 bna_mbox_flush_q(mbox_mod
->bna
, &mbox_mod
->posted_q
);
283 bna_mbox_mod_init(struct bna_mbox_mod
*mbox_mod
, struct bna
*bna
)
285 bfa_nw_ioc_mbox_regisr(&bna
->device
.ioc
, BFI_MC_LL
, bna_ll_isr
, bna
);
286 mbox_mod
->state
= BNA_MBOX_FREE
;
287 mbox_mod
->msg_ctr
= mbox_mod
->msg_pending
= 0;
288 INIT_LIST_HEAD(&mbox_mod
->posted_q
);
293 bna_mbox_mod_uninit(struct bna_mbox_mod
*mbox_mod
)
295 mbox_mod
->bna
= NULL
;
301 #define call_llport_stop_cbfn(llport, status)\
303 if ((llport)->stop_cbfn)\
304 (llport)->stop_cbfn(&(llport)->bna->port, status);\
305 (llport)->stop_cbfn = NULL;\
308 static void bna_fw_llport_up(struct bna_llport
*llport
);
309 static void bna_fw_cb_llport_up(void *arg
, int status
);
310 static void bna_fw_llport_down(struct bna_llport
*llport
);
311 static void bna_fw_cb_llport_down(void *arg
, int status
);
312 static void bna_llport_start(struct bna_llport
*llport
);
313 static void bna_llport_stop(struct bna_llport
*llport
);
314 static void bna_llport_fail(struct bna_llport
*llport
);
316 enum bna_llport_state
{
317 BNA_LLPORT_STOPPED
= 1,
319 BNA_LLPORT_UP_RESP_WAIT
= 3,
320 BNA_LLPORT_DOWN_RESP_WAIT
= 4,
322 BNA_LLPORT_LAST_RESP_WAIT
= 6
325 bfa_fsm_state_decl(bna_llport
, stopped
, struct bna_llport
,
326 enum bna_llport_event
);
327 bfa_fsm_state_decl(bna_llport
, down
, struct bna_llport
,
328 enum bna_llport_event
);
329 bfa_fsm_state_decl(bna_llport
, up_resp_wait
, struct bna_llport
,
330 enum bna_llport_event
);
331 bfa_fsm_state_decl(bna_llport
, down_resp_wait
, struct bna_llport
,
332 enum bna_llport_event
);
333 bfa_fsm_state_decl(bna_llport
, up
, struct bna_llport
,
334 enum bna_llport_event
);
335 bfa_fsm_state_decl(bna_llport
, last_resp_wait
, struct bna_llport
,
336 enum bna_llport_event
);
338 static struct bfa_sm_table llport_sm_table
[] = {
339 {BFA_SM(bna_llport_sm_stopped
), BNA_LLPORT_STOPPED
},
340 {BFA_SM(bna_llport_sm_down
), BNA_LLPORT_DOWN
},
341 {BFA_SM(bna_llport_sm_up_resp_wait
), BNA_LLPORT_UP_RESP_WAIT
},
342 {BFA_SM(bna_llport_sm_down_resp_wait
), BNA_LLPORT_DOWN_RESP_WAIT
},
343 {BFA_SM(bna_llport_sm_up
), BNA_LLPORT_UP
},
344 {BFA_SM(bna_llport_sm_last_resp_wait
), BNA_LLPORT_LAST_RESP_WAIT
}
348 bna_llport_sm_stopped_entry(struct bna_llport
*llport
)
350 llport
->bna
->port
.link_cbfn((llport
)->bna
->bnad
, BNA_LINK_DOWN
);
351 call_llport_stop_cbfn(llport
, BNA_CB_SUCCESS
);
355 bna_llport_sm_stopped(struct bna_llport
*llport
,
356 enum bna_llport_event event
)
360 bfa_fsm_set_state(llport
, bna_llport_sm_down
);
364 call_llport_stop_cbfn(llport
, BNA_CB_SUCCESS
);
371 /* This event is received due to Rx objects failing */
375 case LLPORT_E_FWRESP_UP_OK
:
376 case LLPORT_E_FWRESP_DOWN
:
378 * These events are received due to flushing of mbox when
385 bfa_sm_fault(llport
->bna
, event
);
390 bna_llport_sm_down_entry(struct bna_llport
*llport
)
392 bnad_cb_port_link_status((llport
)->bna
->bnad
, BNA_LINK_DOWN
);
396 bna_llport_sm_down(struct bna_llport
*llport
,
397 enum bna_llport_event event
)
401 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
405 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
409 bfa_fsm_set_state(llport
, bna_llport_sm_up_resp_wait
);
410 bna_fw_llport_up(llport
);
414 bfa_sm_fault(llport
->bna
, event
);
419 bna_llport_sm_up_resp_wait_entry(struct bna_llport
*llport
)
421 BUG_ON(!llport_can_be_up(llport
));
423 * NOTE: Do not call bna_fw_llport_up() here. That will over step
424 * mbox due to down_resp_wait -> up_resp_wait transition on event
430 bna_llport_sm_up_resp_wait(struct bna_llport
*llport
,
431 enum bna_llport_event event
)
435 bfa_fsm_set_state(llport
, bna_llport_sm_last_resp_wait
);
439 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
443 bfa_fsm_set_state(llport
, bna_llport_sm_down_resp_wait
);
446 case LLPORT_E_FWRESP_UP_OK
:
447 bfa_fsm_set_state(llport
, bna_llport_sm_up
);
450 case LLPORT_E_FWRESP_UP_FAIL
:
451 bfa_fsm_set_state(llport
, bna_llport_sm_down
);
454 case LLPORT_E_FWRESP_DOWN
:
455 /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
456 bna_fw_llport_up(llport
);
460 bfa_sm_fault(llport
->bna
, event
);
465 bna_llport_sm_down_resp_wait_entry(struct bna_llport
*llport
)
468 * NOTE: Do not call bna_fw_llport_down() here. That will over step
469 * mbox due to up_resp_wait -> down_resp_wait transition on event
475 bna_llport_sm_down_resp_wait(struct bna_llport
*llport
,
476 enum bna_llport_event event
)
480 bfa_fsm_set_state(llport
, bna_llport_sm_last_resp_wait
);
484 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
488 bfa_fsm_set_state(llport
, bna_llport_sm_up_resp_wait
);
491 case LLPORT_E_FWRESP_UP_OK
:
492 /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
493 bna_fw_llport_down(llport
);
496 case LLPORT_E_FWRESP_UP_FAIL
:
497 case LLPORT_E_FWRESP_DOWN
:
498 bfa_fsm_set_state(llport
, bna_llport_sm_down
);
502 bfa_sm_fault(llport
->bna
, event
);
507 bna_llport_sm_up_entry(struct bna_llport
*llport
)
512 bna_llport_sm_up(struct bna_llport
*llport
,
513 enum bna_llport_event event
)
517 bfa_fsm_set_state(llport
, bna_llport_sm_last_resp_wait
);
518 bna_fw_llport_down(llport
);
522 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
526 bfa_fsm_set_state(llport
, bna_llport_sm_down_resp_wait
);
527 bna_fw_llport_down(llport
);
531 bfa_sm_fault(llport
->bna
, event
);
536 bna_llport_sm_last_resp_wait_entry(struct bna_llport
*llport
)
541 bna_llport_sm_last_resp_wait(struct bna_llport
*llport
,
542 enum bna_llport_event event
)
546 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
551 * This event is received due to Rx objects stopping in
557 case LLPORT_E_FWRESP_UP_OK
:
558 /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
559 bna_fw_llport_down(llport
);
562 case LLPORT_E_FWRESP_UP_FAIL
:
563 case LLPORT_E_FWRESP_DOWN
:
564 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
568 bfa_sm_fault(llport
->bna
, event
);
573 bna_fw_llport_admin_up(struct bna_llport
*llport
)
575 struct bfi_ll_port_admin_req ll_req
;
577 memset(&ll_req
, 0, sizeof(ll_req
));
578 ll_req
.mh
.msg_class
= BFI_MC_LL
;
579 ll_req
.mh
.msg_id
= BFI_LL_H2I_PORT_ADMIN_REQ
;
580 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
582 ll_req
.up
= BNA_STATUS_T_ENABLED
;
584 bna_mbox_qe_fill(&llport
->mbox_qe
, &ll_req
, sizeof(ll_req
),
585 bna_fw_cb_llport_up
, llport
);
587 bna_mbox_send(llport
->bna
, &llport
->mbox_qe
);
591 bna_fw_llport_up(struct bna_llport
*llport
)
593 if (llport
->type
== BNA_PORT_T_REGULAR
)
594 bna_fw_llport_admin_up(llport
);
598 bna_fw_cb_llport_up(void *arg
, int status
)
600 struct bna_llport
*llport
= (struct bna_llport
*)arg
;
602 bfa_q_qe_init(&llport
->mbox_qe
.qe
);
603 if (status
== BFI_LL_CMD_FAIL
) {
604 if (llport
->type
== BNA_PORT_T_REGULAR
)
605 llport
->flags
&= ~BNA_LLPORT_F_PORT_ENABLED
;
607 llport
->flags
&= ~BNA_LLPORT_F_ADMIN_UP
;
608 bfa_fsm_send_event(llport
, LLPORT_E_FWRESP_UP_FAIL
);
610 bfa_fsm_send_event(llport
, LLPORT_E_FWRESP_UP_OK
);
614 bna_fw_llport_admin_down(struct bna_llport
*llport
)
616 struct bfi_ll_port_admin_req ll_req
;
618 memset(&ll_req
, 0, sizeof(ll_req
));
619 ll_req
.mh
.msg_class
= BFI_MC_LL
;
620 ll_req
.mh
.msg_id
= BFI_LL_H2I_PORT_ADMIN_REQ
;
621 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
623 ll_req
.up
= BNA_STATUS_T_DISABLED
;
625 bna_mbox_qe_fill(&llport
->mbox_qe
, &ll_req
, sizeof(ll_req
),
626 bna_fw_cb_llport_down
, llport
);
628 bna_mbox_send(llport
->bna
, &llport
->mbox_qe
);
632 bna_fw_llport_down(struct bna_llport
*llport
)
634 if (llport
->type
== BNA_PORT_T_REGULAR
)
635 bna_fw_llport_admin_down(llport
);
639 bna_fw_cb_llport_down(void *arg
, int status
)
641 struct bna_llport
*llport
= (struct bna_llport
*)arg
;
643 bfa_q_qe_init(&llport
->mbox_qe
.qe
);
644 bfa_fsm_send_event(llport
, LLPORT_E_FWRESP_DOWN
);
648 bna_port_cb_llport_stopped(struct bna_port
*port
,
649 enum bna_cb_status status
)
651 bfa_wc_down(&port
->chld_stop_wc
);
655 bna_llport_init(struct bna_llport
*llport
, struct bna
*bna
)
657 llport
->flags
|= BNA_LLPORT_F_ADMIN_UP
;
658 llport
->flags
|= BNA_LLPORT_F_PORT_ENABLED
;
659 llport
->type
= BNA_PORT_T_REGULAR
;
662 llport
->link_status
= BNA_LINK_DOWN
;
664 llport
->rx_started_count
= 0;
666 llport
->stop_cbfn
= NULL
;
668 bfa_q_qe_init(&llport
->mbox_qe
.qe
);
670 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
674 bna_llport_uninit(struct bna_llport
*llport
)
676 llport
->flags
&= ~BNA_LLPORT_F_ADMIN_UP
;
677 llport
->flags
&= ~BNA_LLPORT_F_PORT_ENABLED
;
683 bna_llport_start(struct bna_llport
*llport
)
685 bfa_fsm_send_event(llport
, LLPORT_E_START
);
689 bna_llport_stop(struct bna_llport
*llport
)
691 llport
->stop_cbfn
= bna_port_cb_llport_stopped
;
693 bfa_fsm_send_event(llport
, LLPORT_E_STOP
);
697 bna_llport_fail(struct bna_llport
*llport
)
699 /* Reset the physical port status to enabled */
700 llport
->flags
|= BNA_LLPORT_F_PORT_ENABLED
;
701 bfa_fsm_send_event(llport
, LLPORT_E_FAIL
);
705 bna_llport_state_get(struct bna_llport
*llport
)
707 return bfa_sm_to_state(llport_sm_table
, llport
->fsm
);
711 bna_llport_rx_started(struct bna_llport
*llport
)
713 llport
->rx_started_count
++;
715 if (llport
->rx_started_count
== 1) {
717 llport
->flags
|= BNA_LLPORT_F_RX_STARTED
;
719 if (llport_can_be_up(llport
))
720 bfa_fsm_send_event(llport
, LLPORT_E_UP
);
725 bna_llport_rx_stopped(struct bna_llport
*llport
)
727 int llport_up
= llport_is_up(llport
);
729 llport
->rx_started_count
--;
731 if (llport
->rx_started_count
== 0) {
733 llport
->flags
&= ~BNA_LLPORT_F_RX_STARTED
;
736 bfa_fsm_send_event(llport
, LLPORT_E_DOWN
);
743 #define bna_port_chld_start(port)\
745 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
746 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
747 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
748 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
749 bna_llport_start(&(port)->llport);\
750 bna_tx_mod_start(&(port)->bna->tx_mod, tx_type);\
751 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
754 #define bna_port_chld_stop(port)\
756 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
757 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
758 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
759 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
760 bfa_wc_up(&(port)->chld_stop_wc);\
761 bfa_wc_up(&(port)->chld_stop_wc);\
762 bfa_wc_up(&(port)->chld_stop_wc);\
763 bna_llport_stop(&(port)->llport);\
764 bna_tx_mod_stop(&(port)->bna->tx_mod, tx_type);\
765 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
768 #define bna_port_chld_fail(port)\
770 bna_llport_fail(&(port)->llport);\
771 bna_tx_mod_fail(&(port)->bna->tx_mod);\
772 bna_rx_mod_fail(&(port)->bna->rx_mod);\
775 #define bna_port_rx_start(port)\
777 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
778 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
779 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
782 #define bna_port_rx_stop(port)\
784 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
785 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
786 bfa_wc_up(&(port)->chld_stop_wc);\
787 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
790 #define call_port_stop_cbfn(port, status)\
792 if ((port)->stop_cbfn)\
793 (port)->stop_cbfn((port)->stop_cbarg, status);\
794 (port)->stop_cbfn = NULL;\
795 (port)->stop_cbarg = NULL;\
798 #define call_port_pause_cbfn(port, status)\
800 if ((port)->pause_cbfn)\
801 (port)->pause_cbfn((port)->bna->bnad, status);\
802 (port)->pause_cbfn = NULL;\
805 #define call_port_mtu_cbfn(port, status)\
807 if ((port)->mtu_cbfn)\
808 (port)->mtu_cbfn((port)->bna->bnad, status);\
809 (port)->mtu_cbfn = NULL;\
812 static void bna_fw_pause_set(struct bna_port
*port
);
813 static void bna_fw_cb_pause_set(void *arg
, int status
);
814 static void bna_fw_mtu_set(struct bna_port
*port
);
815 static void bna_fw_cb_mtu_set(void *arg
, int status
);
817 enum bna_port_event
{
821 PORT_E_PAUSE_CFG
= 4,
823 PORT_E_CHLD_STOPPED
= 6,
824 PORT_E_FWRESP_PAUSE
= 7,
825 PORT_E_FWRESP_MTU
= 8
828 enum bna_port_state
{
829 BNA_PORT_STOPPED
= 1,
830 BNA_PORT_MTU_INIT_WAIT
= 2,
831 BNA_PORT_PAUSE_INIT_WAIT
= 3,
832 BNA_PORT_LAST_RESP_WAIT
= 4,
833 BNA_PORT_STARTED
= 5,
834 BNA_PORT_PAUSE_CFG_WAIT
= 6,
835 BNA_PORT_RX_STOP_WAIT
= 7,
836 BNA_PORT_MTU_CFG_WAIT
= 8,
837 BNA_PORT_CHLD_STOP_WAIT
= 9
840 bfa_fsm_state_decl(bna_port
, stopped
, struct bna_port
,
841 enum bna_port_event
);
842 bfa_fsm_state_decl(bna_port
, mtu_init_wait
, struct bna_port
,
843 enum bna_port_event
);
844 bfa_fsm_state_decl(bna_port
, pause_init_wait
, struct bna_port
,
845 enum bna_port_event
);
846 bfa_fsm_state_decl(bna_port
, last_resp_wait
, struct bna_port
,
847 enum bna_port_event
);
848 bfa_fsm_state_decl(bna_port
, started
, struct bna_port
,
849 enum bna_port_event
);
850 bfa_fsm_state_decl(bna_port
, pause_cfg_wait
, struct bna_port
,
851 enum bna_port_event
);
852 bfa_fsm_state_decl(bna_port
, rx_stop_wait
, struct bna_port
,
853 enum bna_port_event
);
854 bfa_fsm_state_decl(bna_port
, mtu_cfg_wait
, struct bna_port
,
855 enum bna_port_event
);
856 bfa_fsm_state_decl(bna_port
, chld_stop_wait
, struct bna_port
,
857 enum bna_port_event
);
859 static struct bfa_sm_table port_sm_table
[] = {
860 {BFA_SM(bna_port_sm_stopped
), BNA_PORT_STOPPED
},
861 {BFA_SM(bna_port_sm_mtu_init_wait
), BNA_PORT_MTU_INIT_WAIT
},
862 {BFA_SM(bna_port_sm_pause_init_wait
), BNA_PORT_PAUSE_INIT_WAIT
},
863 {BFA_SM(bna_port_sm_last_resp_wait
), BNA_PORT_LAST_RESP_WAIT
},
864 {BFA_SM(bna_port_sm_started
), BNA_PORT_STARTED
},
865 {BFA_SM(bna_port_sm_pause_cfg_wait
), BNA_PORT_PAUSE_CFG_WAIT
},
866 {BFA_SM(bna_port_sm_rx_stop_wait
), BNA_PORT_RX_STOP_WAIT
},
867 {BFA_SM(bna_port_sm_mtu_cfg_wait
), BNA_PORT_MTU_CFG_WAIT
},
868 {BFA_SM(bna_port_sm_chld_stop_wait
), BNA_PORT_CHLD_STOP_WAIT
}
872 bna_port_sm_stopped_entry(struct bna_port
*port
)
874 call_port_pause_cbfn(port
, BNA_CB_SUCCESS
);
875 call_port_mtu_cbfn(port
, BNA_CB_SUCCESS
);
876 call_port_stop_cbfn(port
, BNA_CB_SUCCESS
);
880 bna_port_sm_stopped(struct bna_port
*port
, enum bna_port_event event
)
884 bfa_fsm_set_state(port
, bna_port_sm_mtu_init_wait
);
888 call_port_stop_cbfn(port
, BNA_CB_SUCCESS
);
895 case PORT_E_PAUSE_CFG
:
896 call_port_pause_cbfn(port
, BNA_CB_SUCCESS
);
900 call_port_mtu_cbfn(port
, BNA_CB_SUCCESS
);
903 case PORT_E_CHLD_STOPPED
:
905 * This event is received due to LLPort, Tx and Rx objects
911 case PORT_E_FWRESP_PAUSE
:
912 case PORT_E_FWRESP_MTU
:
914 * These events are received due to flushing of mbox when
921 bfa_sm_fault(port
->bna
, event
);
926 bna_port_sm_mtu_init_wait_entry(struct bna_port
*port
)
928 bna_fw_mtu_set(port
);
932 bna_port_sm_mtu_init_wait(struct bna_port
*port
, enum bna_port_event event
)
936 bfa_fsm_set_state(port
, bna_port_sm_last_resp_wait
);
940 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
943 case PORT_E_PAUSE_CFG
:
948 port
->flags
|= BNA_PORT_F_MTU_CHANGED
;
951 case PORT_E_FWRESP_MTU
:
952 if (port
->flags
& BNA_PORT_F_MTU_CHANGED
) {
953 port
->flags
&= ~BNA_PORT_F_MTU_CHANGED
;
954 bna_fw_mtu_set(port
);
956 bfa_fsm_set_state(port
, bna_port_sm_pause_init_wait
);
961 bfa_sm_fault(port
->bna
, event
);
966 bna_port_sm_pause_init_wait_entry(struct bna_port
*port
)
968 bna_fw_pause_set(port
);
972 bna_port_sm_pause_init_wait(struct bna_port
*port
,
973 enum bna_port_event event
)
977 bfa_fsm_set_state(port
, bna_port_sm_last_resp_wait
);
981 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
984 case PORT_E_PAUSE_CFG
:
985 port
->flags
|= BNA_PORT_F_PAUSE_CHANGED
;
989 port
->flags
|= BNA_PORT_F_MTU_CHANGED
;
992 case PORT_E_FWRESP_PAUSE
:
993 if (port
->flags
& BNA_PORT_F_PAUSE_CHANGED
) {
994 port
->flags
&= ~BNA_PORT_F_PAUSE_CHANGED
;
995 bna_fw_pause_set(port
);
996 } else if (port
->flags
& BNA_PORT_F_MTU_CHANGED
) {
997 port
->flags
&= ~BNA_PORT_F_MTU_CHANGED
;
998 bfa_fsm_set_state(port
, bna_port_sm_mtu_init_wait
);
1000 bfa_fsm_set_state(port
, bna_port_sm_started
);
1001 bna_port_chld_start(port
);
1006 bfa_sm_fault(port
->bna
, event
);
1011 bna_port_sm_last_resp_wait_entry(struct bna_port
*port
)
1016 bna_port_sm_last_resp_wait(struct bna_port
*port
,
1017 enum bna_port_event event
)
1021 case PORT_E_FWRESP_PAUSE
:
1022 case PORT_E_FWRESP_MTU
:
1023 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1027 bfa_sm_fault(port
->bna
, event
);
1032 bna_port_sm_started_entry(struct bna_port
*port
)
1035 * NOTE: Do not call bna_port_chld_start() here, since it will be
1036 * inadvertently called during pause_cfg_wait->started transition
1039 call_port_pause_cbfn(port
, BNA_CB_SUCCESS
);
1040 call_port_mtu_cbfn(port
, BNA_CB_SUCCESS
);
1044 bna_port_sm_started(struct bna_port
*port
,
1045 enum bna_port_event event
)
1049 bfa_fsm_set_state(port
, bna_port_sm_chld_stop_wait
);
1053 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1054 bna_port_chld_fail(port
);
1057 case PORT_E_PAUSE_CFG
:
1058 bfa_fsm_set_state(port
, bna_port_sm_pause_cfg_wait
);
1061 case PORT_E_MTU_CFG
:
1062 bfa_fsm_set_state(port
, bna_port_sm_rx_stop_wait
);
1066 bfa_sm_fault(port
->bna
, event
);
1071 bna_port_sm_pause_cfg_wait_entry(struct bna_port
*port
)
1073 bna_fw_pause_set(port
);
1077 bna_port_sm_pause_cfg_wait(struct bna_port
*port
,
1078 enum bna_port_event event
)
1082 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1083 bna_port_chld_fail(port
);
1086 case PORT_E_FWRESP_PAUSE
:
1087 bfa_fsm_set_state(port
, bna_port_sm_started
);
1091 bfa_sm_fault(port
->bna
, event
);
1096 bna_port_sm_rx_stop_wait_entry(struct bna_port
*port
)
1098 bna_port_rx_stop(port
);
1102 bna_port_sm_rx_stop_wait(struct bna_port
*port
,
1103 enum bna_port_event event
)
1107 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1108 bna_port_chld_fail(port
);
1111 case PORT_E_CHLD_STOPPED
:
1112 bfa_fsm_set_state(port
, bna_port_sm_mtu_cfg_wait
);
1116 bfa_sm_fault(port
->bna
, event
);
1121 bna_port_sm_mtu_cfg_wait_entry(struct bna_port
*port
)
1123 bna_fw_mtu_set(port
);
1127 bna_port_sm_mtu_cfg_wait(struct bna_port
*port
, enum bna_port_event event
)
1131 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1132 bna_port_chld_fail(port
);
1135 case PORT_E_FWRESP_MTU
:
1136 bfa_fsm_set_state(port
, bna_port_sm_started
);
1137 bna_port_rx_start(port
);
1141 bfa_sm_fault(port
->bna
, event
);
1146 bna_port_sm_chld_stop_wait_entry(struct bna_port
*port
)
1148 bna_port_chld_stop(port
);
1152 bna_port_sm_chld_stop_wait(struct bna_port
*port
,
1153 enum bna_port_event event
)
1157 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1158 bna_port_chld_fail(port
);
1161 case PORT_E_CHLD_STOPPED
:
1162 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1166 bfa_sm_fault(port
->bna
, event
);
1171 bna_fw_pause_set(struct bna_port
*port
)
1173 struct bfi_ll_set_pause_req ll_req
;
1175 memset(&ll_req
, 0, sizeof(ll_req
));
1176 ll_req
.mh
.msg_class
= BFI_MC_LL
;
1177 ll_req
.mh
.msg_id
= BFI_LL_H2I_SET_PAUSE_REQ
;
1178 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
1180 ll_req
.tx_pause
= port
->pause_config
.tx_pause
;
1181 ll_req
.rx_pause
= port
->pause_config
.rx_pause
;
1183 bna_mbox_qe_fill(&port
->mbox_qe
, &ll_req
, sizeof(ll_req
),
1184 bna_fw_cb_pause_set
, port
);
1186 bna_mbox_send(port
->bna
, &port
->mbox_qe
);
1190 bna_fw_cb_pause_set(void *arg
, int status
)
1192 struct bna_port
*port
= (struct bna_port
*)arg
;
1194 bfa_q_qe_init(&port
->mbox_qe
.qe
);
1195 bfa_fsm_send_event(port
, PORT_E_FWRESP_PAUSE
);
1199 bna_fw_mtu_set(struct bna_port
*port
)
1201 struct bfi_ll_mtu_info_req ll_req
;
1203 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_MTU_INFO_REQ
, 0);
1204 ll_req
.mtu
= htons((u16
)port
->mtu
);
1206 bna_mbox_qe_fill(&port
->mbox_qe
, &ll_req
, sizeof(ll_req
),
1207 bna_fw_cb_mtu_set
, port
);
1208 bna_mbox_send(port
->bna
, &port
->mbox_qe
);
1212 bna_fw_cb_mtu_set(void *arg
, int status
)
1214 struct bna_port
*port
= (struct bna_port
*)arg
;
1216 bfa_q_qe_init(&port
->mbox_qe
.qe
);
1217 bfa_fsm_send_event(port
, PORT_E_FWRESP_MTU
);
1221 bna_port_cb_chld_stopped(void *arg
)
1223 struct bna_port
*port
= (struct bna_port
*)arg
;
1225 bfa_fsm_send_event(port
, PORT_E_CHLD_STOPPED
);
1229 bna_port_init(struct bna_port
*port
, struct bna
*bna
)
1234 port
->type
= BNA_PORT_T_REGULAR
;
1236 port
->link_cbfn
= bnad_cb_port_link_status
;
1238 port
->chld_stop_wc
.wc_resume
= bna_port_cb_chld_stopped
;
1239 port
->chld_stop_wc
.wc_cbarg
= port
;
1240 port
->chld_stop_wc
.wc_count
= 0;
1242 port
->stop_cbfn
= NULL
;
1243 port
->stop_cbarg
= NULL
;
1245 port
->pause_cbfn
= NULL
;
1247 port
->mtu_cbfn
= NULL
;
1249 bfa_q_qe_init(&port
->mbox_qe
.qe
);
1251 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1253 bna_llport_init(&port
->llport
, bna
);
1257 bna_port_uninit(struct bna_port
*port
)
1259 bna_llport_uninit(&port
->llport
);
1267 bna_port_state_get(struct bna_port
*port
)
1269 return bfa_sm_to_state(port_sm_table
, port
->fsm
);
1273 bna_port_start(struct bna_port
*port
)
1275 port
->flags
|= BNA_PORT_F_DEVICE_READY
;
1276 if (port
->flags
& BNA_PORT_F_ENABLED
)
1277 bfa_fsm_send_event(port
, PORT_E_START
);
1281 bna_port_stop(struct bna_port
*port
)
1283 port
->stop_cbfn
= bna_device_cb_port_stopped
;
1284 port
->stop_cbarg
= &port
->bna
->device
;
1286 port
->flags
&= ~BNA_PORT_F_DEVICE_READY
;
1287 bfa_fsm_send_event(port
, PORT_E_STOP
);
1291 bna_port_fail(struct bna_port
*port
)
1293 port
->flags
&= ~BNA_PORT_F_DEVICE_READY
;
1294 bfa_fsm_send_event(port
, PORT_E_FAIL
);
1298 bna_port_cb_tx_stopped(struct bna_port
*port
, enum bna_cb_status status
)
1300 bfa_wc_down(&port
->chld_stop_wc
);
1304 bna_port_cb_rx_stopped(struct bna_port
*port
, enum bna_cb_status status
)
1306 bfa_wc_down(&port
->chld_stop_wc
);
1310 bna_port_mtu_get(struct bna_port
*port
)
1316 bna_port_enable(struct bna_port
*port
)
1318 if (port
->fsm
!= (bfa_sm_t
)bna_port_sm_stopped
)
1321 port
->flags
|= BNA_PORT_F_ENABLED
;
1323 if (port
->flags
& BNA_PORT_F_DEVICE_READY
)
1324 bfa_fsm_send_event(port
, PORT_E_START
);
1328 bna_port_disable(struct bna_port
*port
, enum bna_cleanup_type type
,
1329 void (*cbfn
)(void *, enum bna_cb_status
))
1331 if (type
== BNA_SOFT_CLEANUP
) {
1332 (*cbfn
)(port
->bna
->bnad
, BNA_CB_SUCCESS
);
1336 port
->stop_cbfn
= cbfn
;
1337 port
->stop_cbarg
= port
->bna
->bnad
;
1339 port
->flags
&= ~BNA_PORT_F_ENABLED
;
1341 bfa_fsm_send_event(port
, PORT_E_STOP
);
1345 bna_port_pause_config(struct bna_port
*port
,
1346 struct bna_pause_config
*pause_config
,
1347 void (*cbfn
)(struct bnad
*, enum bna_cb_status
))
1349 port
->pause_config
= *pause_config
;
1351 port
->pause_cbfn
= cbfn
;
1353 bfa_fsm_send_event(port
, PORT_E_PAUSE_CFG
);
1357 bna_port_mtu_set(struct bna_port
*port
, int mtu
,
1358 void (*cbfn
)(struct bnad
*, enum bna_cb_status
))
1362 port
->mtu_cbfn
= cbfn
;
1364 bfa_fsm_send_event(port
, PORT_E_MTU_CFG
);
1368 bna_port_mac_get(struct bna_port
*port
, mac_t
*mac
)
1370 *mac
= bfa_nw_ioc_get_mac(&port
->bna
->device
.ioc
);
1376 #define enable_mbox_intr(_device)\
1379 bna_intr_status_get((_device)->bna, intr_status);\
1380 bnad_cb_device_enable_mbox_intr((_device)->bna->bnad);\
1381 bna_mbox_intr_enable((_device)->bna);\
1384 #define disable_mbox_intr(_device)\
1386 bna_mbox_intr_disable((_device)->bna);\
1387 bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
1390 static const struct bna_chip_regs_offset reg_offset
[] =
1391 {{HOST_PAGE_NUM_FN0
, HOSTFN0_INT_STATUS
,
1392 HOSTFN0_INT_MASK
, HOST_MSIX_ERR_INDEX_FN0
},
1393 {HOST_PAGE_NUM_FN1
, HOSTFN1_INT_STATUS
,
1394 HOSTFN1_INT_MASK
, HOST_MSIX_ERR_INDEX_FN1
},
1395 {HOST_PAGE_NUM_FN2
, HOSTFN2_INT_STATUS
,
1396 HOSTFN2_INT_MASK
, HOST_MSIX_ERR_INDEX_FN2
},
1397 {HOST_PAGE_NUM_FN3
, HOSTFN3_INT_STATUS
,
1398 HOSTFN3_INT_MASK
, HOST_MSIX_ERR_INDEX_FN3
},
1401 enum bna_device_event
{
1402 DEVICE_E_ENABLE
= 1,
1403 DEVICE_E_DISABLE
= 2,
1404 DEVICE_E_IOC_READY
= 3,
1405 DEVICE_E_IOC_FAILED
= 4,
1406 DEVICE_E_IOC_DISABLED
= 5,
1407 DEVICE_E_IOC_RESET
= 6,
1408 DEVICE_E_PORT_STOPPED
= 7,
1411 enum bna_device_state
{
1412 BNA_DEVICE_STOPPED
= 1,
1413 BNA_DEVICE_IOC_READY_WAIT
= 2,
1414 BNA_DEVICE_READY
= 3,
1415 BNA_DEVICE_PORT_STOP_WAIT
= 4,
1416 BNA_DEVICE_IOC_DISABLE_WAIT
= 5,
1417 BNA_DEVICE_FAILED
= 6
1420 bfa_fsm_state_decl(bna_device
, stopped
, struct bna_device
,
1421 enum bna_device_event
);
1422 bfa_fsm_state_decl(bna_device
, ioc_ready_wait
, struct bna_device
,
1423 enum bna_device_event
);
1424 bfa_fsm_state_decl(bna_device
, ready
, struct bna_device
,
1425 enum bna_device_event
);
1426 bfa_fsm_state_decl(bna_device
, port_stop_wait
, struct bna_device
,
1427 enum bna_device_event
);
1428 bfa_fsm_state_decl(bna_device
, ioc_disable_wait
, struct bna_device
,
1429 enum bna_device_event
);
1430 bfa_fsm_state_decl(bna_device
, failed
, struct bna_device
,
1431 enum bna_device_event
);
1433 static struct bfa_sm_table device_sm_table
[] = {
1434 {BFA_SM(bna_device_sm_stopped
), BNA_DEVICE_STOPPED
},
1435 {BFA_SM(bna_device_sm_ioc_ready_wait
), BNA_DEVICE_IOC_READY_WAIT
},
1436 {BFA_SM(bna_device_sm_ready
), BNA_DEVICE_READY
},
1437 {BFA_SM(bna_device_sm_port_stop_wait
), BNA_DEVICE_PORT_STOP_WAIT
},
1438 {BFA_SM(bna_device_sm_ioc_disable_wait
), BNA_DEVICE_IOC_DISABLE_WAIT
},
1439 {BFA_SM(bna_device_sm_failed
), BNA_DEVICE_FAILED
},
1443 bna_device_sm_stopped_entry(struct bna_device
*device
)
1445 if (device
->stop_cbfn
)
1446 device
->stop_cbfn(device
->stop_cbarg
, BNA_CB_SUCCESS
);
1448 device
->stop_cbfn
= NULL
;
1449 device
->stop_cbarg
= NULL
;
1453 bna_device_sm_stopped(struct bna_device
*device
,
1454 enum bna_device_event event
)
1457 case DEVICE_E_ENABLE
:
1458 if (device
->intr_type
== BNA_INTR_T_MSIX
)
1459 bna_mbox_msix_idx_set(device
);
1460 bfa_nw_ioc_enable(&device
->ioc
);
1461 bfa_fsm_set_state(device
, bna_device_sm_ioc_ready_wait
);
1464 case DEVICE_E_DISABLE
:
1465 bfa_fsm_set_state(device
, bna_device_sm_stopped
);
1468 case DEVICE_E_IOC_RESET
:
1469 enable_mbox_intr(device
);
1472 case DEVICE_E_IOC_FAILED
:
1473 bfa_fsm_set_state(device
, bna_device_sm_failed
);
1477 bfa_sm_fault(device
->bna
, event
);
1482 bna_device_sm_ioc_ready_wait_entry(struct bna_device
*device
)
1485 * Do not call bfa_ioc_enable() here. It must be called in the
1486 * previous state due to failed -> ioc_ready_wait transition.
1491 bna_device_sm_ioc_ready_wait(struct bna_device
*device
,
1492 enum bna_device_event event
)
1495 case DEVICE_E_DISABLE
:
1496 if (device
->ready_cbfn
)
1497 device
->ready_cbfn(device
->ready_cbarg
,
1499 device
->ready_cbfn
= NULL
;
1500 device
->ready_cbarg
= NULL
;
1501 bfa_fsm_set_state(device
, bna_device_sm_ioc_disable_wait
);
1504 case DEVICE_E_IOC_READY
:
1505 bfa_fsm_set_state(device
, bna_device_sm_ready
);
1508 case DEVICE_E_IOC_FAILED
:
1509 bfa_fsm_set_state(device
, bna_device_sm_failed
);
1512 case DEVICE_E_IOC_RESET
:
1513 enable_mbox_intr(device
);
1517 bfa_sm_fault(device
->bna
, event
);
1522 bna_device_sm_ready_entry(struct bna_device
*device
)
1524 bna_mbox_mod_start(&device
->bna
->mbox_mod
);
1525 bna_port_start(&device
->bna
->port
);
1527 if (device
->ready_cbfn
)
1528 device
->ready_cbfn(device
->ready_cbarg
,
1530 device
->ready_cbfn
= NULL
;
1531 device
->ready_cbarg
= NULL
;
1535 bna_device_sm_ready(struct bna_device
*device
, enum bna_device_event event
)
1538 case DEVICE_E_DISABLE
:
1539 bfa_fsm_set_state(device
, bna_device_sm_port_stop_wait
);
1542 case DEVICE_E_IOC_FAILED
:
1543 bfa_fsm_set_state(device
, bna_device_sm_failed
);
1547 bfa_sm_fault(device
->bna
, event
);
1552 bna_device_sm_port_stop_wait_entry(struct bna_device
*device
)
1554 bna_port_stop(&device
->bna
->port
);
1558 bna_device_sm_port_stop_wait(struct bna_device
*device
,
1559 enum bna_device_event event
)
1562 case DEVICE_E_PORT_STOPPED
:
1563 bna_mbox_mod_stop(&device
->bna
->mbox_mod
);
1564 bfa_fsm_set_state(device
, bna_device_sm_ioc_disable_wait
);
1567 case DEVICE_E_IOC_FAILED
:
1568 disable_mbox_intr(device
);
1569 bna_port_fail(&device
->bna
->port
);
1573 bfa_sm_fault(device
->bna
, event
);
1578 bna_device_sm_ioc_disable_wait_entry(struct bna_device
*device
)
1580 bfa_nw_ioc_disable(&device
->ioc
);
1584 bna_device_sm_ioc_disable_wait(struct bna_device
*device
,
1585 enum bna_device_event event
)
1588 case DEVICE_E_IOC_DISABLED
:
1589 disable_mbox_intr(device
);
1590 bfa_fsm_set_state(device
, bna_device_sm_stopped
);
1594 bfa_sm_fault(device
->bna
, event
);
1599 bna_device_sm_failed_entry(struct bna_device
*device
)
1601 disable_mbox_intr(device
);
1602 bna_port_fail(&device
->bna
->port
);
1603 bna_mbox_mod_stop(&device
->bna
->mbox_mod
);
1605 if (device
->ready_cbfn
)
1606 device
->ready_cbfn(device
->ready_cbarg
,
1608 device
->ready_cbfn
= NULL
;
1609 device
->ready_cbarg
= NULL
;
1613 bna_device_sm_failed(struct bna_device
*device
,
1614 enum bna_device_event event
)
1617 case DEVICE_E_DISABLE
:
1618 bfa_fsm_set_state(device
, bna_device_sm_ioc_disable_wait
);
1621 case DEVICE_E_IOC_RESET
:
1622 enable_mbox_intr(device
);
1623 bfa_fsm_set_state(device
, bna_device_sm_ioc_ready_wait
);
1627 bfa_sm_fault(device
->bna
, event
);
1631 /* IOC callback functions */
1634 bna_device_cb_iocll_ready(void *dev
, enum bfa_status error
)
1636 struct bna_device
*device
= (struct bna_device
*)dev
;
1639 bfa_fsm_send_event(device
, DEVICE_E_IOC_FAILED
);
1641 bfa_fsm_send_event(device
, DEVICE_E_IOC_READY
);
1645 bna_device_cb_iocll_disabled(void *dev
)
1647 struct bna_device
*device
= (struct bna_device
*)dev
;
1649 bfa_fsm_send_event(device
, DEVICE_E_IOC_DISABLED
);
1653 bna_device_cb_iocll_failed(void *dev
)
1655 struct bna_device
*device
= (struct bna_device
*)dev
;
1657 bfa_fsm_send_event(device
, DEVICE_E_IOC_FAILED
);
1661 bna_device_cb_iocll_reset(void *dev
)
1663 struct bna_device
*device
= (struct bna_device
*)dev
;
1665 bfa_fsm_send_event(device
, DEVICE_E_IOC_RESET
);
1668 static struct bfa_ioc_cbfn bfa_iocll_cbfn
= {
1669 bna_device_cb_iocll_ready
,
1670 bna_device_cb_iocll_disabled
,
1671 bna_device_cb_iocll_failed
,
1672 bna_device_cb_iocll_reset
1677 bna_adv_device_init(struct bna_device
*device
, struct bna
*bna
,
1678 struct bna_res_info
*res_info
)
1685 kva
= res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mdl
[0].kva
;
1688 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1692 &res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1693 kva
= res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].kva
;
1695 bfa_nw_cee_attach(&bna
->cee
, &device
->ioc
, bna
);
1696 bfa_nw_cee_mem_claim(&bna
->cee
, kva
, dma
);
1697 kva
+= bfa_nw_cee_meminfo();
1698 dma
+= bfa_nw_cee_meminfo();
1703 bna_device_init(struct bna_device
*device
, struct bna
*bna
,
1704 struct bna_res_info
*res_info
)
1711 * Attach IOC and claim:
1712 * 1. DMA memory for IOC attributes
1713 * 2. Kernel memory for FW trace
1715 bfa_nw_ioc_attach(&device
->ioc
, device
, &bfa_iocll_cbfn
);
1716 bfa_nw_ioc_pci_init(&device
->ioc
, &bna
->pcidev
, BFI_MC_LL
);
1719 &res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1720 bfa_nw_ioc_mem_claim(&device
->ioc
,
1721 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].kva
,
1724 bna_adv_device_init(device
, bna
, res_info
);
1726 * Initialize mbox_mod only after IOC, so that mbox handler
1727 * registration goes through
1730 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.intr_type
;
1732 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.idl
[0].vector
;
1733 bna_mbox_mod_init(&bna
->mbox_mod
, bna
);
1735 device
->ready_cbfn
= device
->stop_cbfn
= NULL
;
1736 device
->ready_cbarg
= device
->stop_cbarg
= NULL
;
1738 bfa_fsm_set_state(device
, bna_device_sm_stopped
);
1742 bna_device_uninit(struct bna_device
*device
)
1744 bna_mbox_mod_uninit(&device
->bna
->mbox_mod
);
1746 bfa_nw_ioc_detach(&device
->ioc
);
1752 bna_device_cb_port_stopped(void *arg
, enum bna_cb_status status
)
1754 struct bna_device
*device
= (struct bna_device
*)arg
;
1756 bfa_fsm_send_event(device
, DEVICE_E_PORT_STOPPED
);
1760 bna_device_status_get(struct bna_device
*device
)
1762 return device
->fsm
== (bfa_fsm_t
)bna_device_sm_ready
;
1766 bna_device_enable(struct bna_device
*device
)
1768 if (device
->fsm
!= (bfa_fsm_t
)bna_device_sm_stopped
) {
1769 bnad_cb_device_enabled(device
->bna
->bnad
, BNA_CB_BUSY
);
1773 device
->ready_cbfn
= bnad_cb_device_enabled
;
1774 device
->ready_cbarg
= device
->bna
->bnad
;
1776 bfa_fsm_send_event(device
, DEVICE_E_ENABLE
);
1780 bna_device_disable(struct bna_device
*device
, enum bna_cleanup_type type
)
1782 if (type
== BNA_SOFT_CLEANUP
) {
1783 bnad_cb_device_disabled(device
->bna
->bnad
, BNA_CB_SUCCESS
);
1787 device
->stop_cbfn
= bnad_cb_device_disabled
;
1788 device
->stop_cbarg
= device
->bna
->bnad
;
1790 bfa_fsm_send_event(device
, DEVICE_E_DISABLE
);
1794 bna_device_state_get(struct bna_device
*device
)
1796 return bfa_sm_to_state(device_sm_table
, device
->fsm
);
1799 const u32 bna_napi_dim_vector
[BNA_LOAD_T_MAX
][BNA_BIAS_T_MAX
] = {
1813 bna_adv_res_req(struct bna_res_info
*res_info
)
1815 /* DMA memory for COMMON_MODULE */
1816 res_info
[BNA_RES_MEM_T_COM
].res_type
= BNA_RES_T_MEM
;
1817 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1818 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.num
= 1;
1819 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.len
= ALIGN(
1820 bfa_nw_cee_meminfo(), PAGE_SIZE
);
1822 /* Virtual memory for retreiving fw_trc */
1823 res_info
[BNA_RES_MEM_T_FWTRC
].res_type
= BNA_RES_T_MEM
;
1824 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mem_type
= BNA_MEM_T_KVA
;
1825 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.num
= 0;
1826 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.len
= 0;
1828 /* DMA memory for retreiving stats */
1829 res_info
[BNA_RES_MEM_T_STATS
].res_type
= BNA_RES_T_MEM
;
1830 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1831 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.num
= 1;
1832 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.len
=
1833 ALIGN(BFI_HW_STATS_SIZE
, PAGE_SIZE
);
1835 /* Virtual memory for soft stats */
1836 res_info
[BNA_RES_MEM_T_SWSTATS
].res_type
= BNA_RES_T_MEM
;
1837 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.mem_type
= BNA_MEM_T_KVA
;
1838 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.num
= 1;
1839 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.len
=
1840 sizeof(struct bna_sw_stats
);
1844 bna_sw_stats_get(struct bna
*bna
, struct bna_sw_stats
*sw_stats
)
1847 struct bna_txq
*txq
;
1849 struct bna_rxp
*rxp
;
1850 struct list_head
*qe
;
1851 struct list_head
*txq_qe
;
1852 struct list_head
*rxp_qe
;
1853 struct list_head
*mac_qe
;
1856 sw_stats
->device_state
= bna_device_state_get(&bna
->device
);
1857 sw_stats
->port_state
= bna_port_state_get(&bna
->port
);
1858 sw_stats
->port_flags
= bna
->port
.flags
;
1859 sw_stats
->llport_state
= bna_llport_state_get(&bna
->port
.llport
);
1860 sw_stats
->priority
= bna
->port
.priority
;
1863 list_for_each(qe
, &bna
->tx_mod
.tx_active_q
) {
1864 tx
= (struct bna_tx
*)qe
;
1865 sw_stats
->tx_stats
[i
].tx_state
= bna_tx_state_get(tx
);
1866 sw_stats
->tx_stats
[i
].tx_flags
= tx
->flags
;
1868 sw_stats
->tx_stats
[i
].num_txqs
= 0;
1869 sw_stats
->tx_stats
[i
].txq_bmap
[0] = 0;
1870 sw_stats
->tx_stats
[i
].txq_bmap
[1] = 0;
1871 list_for_each(txq_qe
, &tx
->txq_q
) {
1872 txq
= (struct bna_txq
*)txq_qe
;
1873 if (txq
->txq_id
< 32)
1874 sw_stats
->tx_stats
[i
].txq_bmap
[0] |=
1875 ((u32
)1 << txq
->txq_id
);
1877 sw_stats
->tx_stats
[i
].txq_bmap
[1] |=
1879 1 << (txq
->txq_id
- 32));
1880 sw_stats
->tx_stats
[i
].num_txqs
++;
1883 sw_stats
->tx_stats
[i
].txf_id
= tx
->txf
.txf_id
;
1887 sw_stats
->num_active_tx
= i
;
1890 list_for_each(qe
, &bna
->rx_mod
.rx_active_q
) {
1891 rx
= (struct bna_rx
*)qe
;
1892 sw_stats
->rx_stats
[i
].rx_state
= bna_rx_state_get(rx
);
1893 sw_stats
->rx_stats
[i
].rx_flags
= rx
->rx_flags
;
1895 sw_stats
->rx_stats
[i
].num_rxps
= 0;
1896 sw_stats
->rx_stats
[i
].num_rxqs
= 0;
1897 sw_stats
->rx_stats
[i
].rxq_bmap
[0] = 0;
1898 sw_stats
->rx_stats
[i
].rxq_bmap
[1] = 0;
1899 sw_stats
->rx_stats
[i
].cq_bmap
[0] = 0;
1900 sw_stats
->rx_stats
[i
].cq_bmap
[1] = 0;
1901 list_for_each(rxp_qe
, &rx
->rxp_q
) {
1902 rxp
= (struct bna_rxp
*)rxp_qe
;
1904 sw_stats
->rx_stats
[i
].num_rxqs
+= 1;
1906 if (rxp
->type
== BNA_RXP_SINGLE
) {
1907 if (rxp
->rxq
.single
.only
->rxq_id
< 32) {
1908 sw_stats
->rx_stats
[i
].rxq_bmap
[0] |=
1910 rxp
->rxq
.single
.only
->rxq_id
);
1912 sw_stats
->rx_stats
[i
].rxq_bmap
[1] |=
1914 (rxp
->rxq
.single
.only
->rxq_id
- 32));
1917 if (rxp
->rxq
.slr
.large
->rxq_id
< 32) {
1918 sw_stats
->rx_stats
[i
].rxq_bmap
[0] |=
1920 rxp
->rxq
.slr
.large
->rxq_id
);
1922 sw_stats
->rx_stats
[i
].rxq_bmap
[1] |=
1924 (rxp
->rxq
.slr
.large
->rxq_id
- 32));
1927 if (rxp
->rxq
.slr
.small
->rxq_id
< 32) {
1928 sw_stats
->rx_stats
[i
].rxq_bmap
[0] |=
1930 rxp
->rxq
.slr
.small
->rxq_id
);
1932 sw_stats
->rx_stats
[i
].rxq_bmap
[1] |=
1934 (rxp
->rxq
.slr
.small
->rxq_id
- 32));
1936 sw_stats
->rx_stats
[i
].num_rxqs
+= 1;
1939 if (rxp
->cq
.cq_id
< 32)
1940 sw_stats
->rx_stats
[i
].cq_bmap
[0] |=
1941 (1 << rxp
->cq
.cq_id
);
1943 sw_stats
->rx_stats
[i
].cq_bmap
[1] |=
1944 (1 << (rxp
->cq
.cq_id
- 32));
1946 sw_stats
->rx_stats
[i
].num_rxps
++;
1949 sw_stats
->rx_stats
[i
].rxf_id
= rx
->rxf
.rxf_id
;
1950 sw_stats
->rx_stats
[i
].rxf_state
= bna_rxf_state_get(&rx
->rxf
);
1951 sw_stats
->rx_stats
[i
].rxf_oper_state
= rx
->rxf
.rxf_oper_state
;
1953 sw_stats
->rx_stats
[i
].num_active_ucast
= 0;
1954 if (rx
->rxf
.ucast_active_mac
)
1955 sw_stats
->rx_stats
[i
].num_active_ucast
++;
1956 list_for_each(mac_qe
, &rx
->rxf
.ucast_active_q
)
1957 sw_stats
->rx_stats
[i
].num_active_ucast
++;
1959 sw_stats
->rx_stats
[i
].num_active_mcast
= 0;
1960 list_for_each(mac_qe
, &rx
->rxf
.mcast_active_q
)
1961 sw_stats
->rx_stats
[i
].num_active_mcast
++;
1963 sw_stats
->rx_stats
[i
].rxmode_active
= rx
->rxf
.rxmode_active
;
1964 sw_stats
->rx_stats
[i
].vlan_filter_status
=
1965 rx
->rxf
.vlan_filter_status
;
1966 memcpy(sw_stats
->rx_stats
[i
].vlan_filter_table
,
1967 rx
->rxf
.vlan_filter_table
,
1968 sizeof(u32
) * ((BFI_MAX_VLAN
+ 1) / 32));
1970 sw_stats
->rx_stats
[i
].rss_status
= rx
->rxf
.rss_status
;
1971 sw_stats
->rx_stats
[i
].hds_status
= rx
->rxf
.hds_status
;
1975 sw_stats
->num_active_rx
= i
;
1979 bna_fw_cb_stats_get(void *arg
, int status
)
1981 struct bna
*bna
= (struct bna
*)arg
;
1984 int rxf_count
, txf_count
;
1985 u64 rxf_bmap
, txf_bmap
;
1987 bfa_q_qe_init(&bna
->mbox_qe
.qe
);
1990 p_stats
= (u64
*)bna
->stats
.hw_stats
;
1991 count
= sizeof(struct bfi_ll_stats
) / sizeof(u64
);
1992 for (i
= 0; i
< count
; i
++)
1993 p_stats
[i
] = cpu_to_be64(p_stats
[i
]);
1996 rxf_bmap
= (u64
)bna
->stats
.rxf_bmap
[0] |
1997 ((u64
)bna
->stats
.rxf_bmap
[1] << 32);
1998 for (i
= 0; i
< BFI_LL_RXF_ID_MAX
; i
++)
1999 if (rxf_bmap
& ((u64
)1 << i
))
2003 txf_bmap
= (u64
)bna
->stats
.txf_bmap
[0] |
2004 ((u64
)bna
->stats
.txf_bmap
[1] << 32);
2005 for (i
= 0; i
< BFI_LL_TXF_ID_MAX
; i
++)
2006 if (txf_bmap
& ((u64
)1 << i
))
2009 p_stats
= (u64
*)&bna
->stats
.hw_stats
->rxf_stats
[0] +
2010 ((rxf_count
* sizeof(struct bfi_ll_stats_rxf
) +
2011 txf_count
* sizeof(struct bfi_ll_stats_txf
))/
2014 /* Populate the TXF stats from the firmware DMAed copy */
2015 for (i
= (BFI_LL_TXF_ID_MAX
- 1); i
>= 0; i
--)
2016 if (txf_bmap
& ((u64
)1 << i
)) {
2017 p_stats
-= sizeof(struct bfi_ll_stats_txf
)/
2019 memcpy(&bna
->stats
.hw_stats
->txf_stats
[i
],
2021 sizeof(struct bfi_ll_stats_txf
));
2024 /* Populate the RXF stats from the firmware DMAed copy */
2025 for (i
= (BFI_LL_RXF_ID_MAX
- 1); i
>= 0; i
--)
2026 if (rxf_bmap
& ((u64
)1 << i
)) {
2027 p_stats
-= sizeof(struct bfi_ll_stats_rxf
)/
2029 memcpy(&bna
->stats
.hw_stats
->rxf_stats
[i
],
2031 sizeof(struct bfi_ll_stats_rxf
));
2034 bna_sw_stats_get(bna
, bna
->stats
.sw_stats
);
2035 bnad_cb_stats_get(bna
->bnad
, BNA_CB_SUCCESS
, &bna
->stats
);
2037 bnad_cb_stats_get(bna
->bnad
, BNA_CB_FAIL
, &bna
->stats
);
2041 bna_fw_stats_get(struct bna
*bna
)
2043 struct bfi_ll_stats_req ll_req
;
2045 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_STATS_GET_REQ
, 0);
2046 ll_req
.stats_mask
= htons(BFI_LL_STATS_ALL
);
2048 ll_req
.rxf_id_mask
[0] = htonl(bna
->rx_mod
.rxf_bmap
[0]);
2049 ll_req
.rxf_id_mask
[1] = htonl(bna
->rx_mod
.rxf_bmap
[1]);
2050 ll_req
.txf_id_mask
[0] = htonl(bna
->tx_mod
.txf_bmap
[0]);
2051 ll_req
.txf_id_mask
[1] = htonl(bna
->tx_mod
.txf_bmap
[1]);
2053 ll_req
.host_buffer
.a32
.addr_hi
= bna
->hw_stats_dma
.msb
;
2054 ll_req
.host_buffer
.a32
.addr_lo
= bna
->hw_stats_dma
.lsb
;
2056 bna_mbox_qe_fill(&bna
->mbox_qe
, &ll_req
, sizeof(ll_req
),
2057 bna_fw_cb_stats_get
, bna
);
2058 bna_mbox_send(bna
, &bna
->mbox_qe
);
2060 bna
->stats
.rxf_bmap
[0] = bna
->rx_mod
.rxf_bmap
[0];
2061 bna
->stats
.rxf_bmap
[1] = bna
->rx_mod
.rxf_bmap
[1];
2062 bna
->stats
.txf_bmap
[0] = bna
->tx_mod
.txf_bmap
[0];
2063 bna
->stats
.txf_bmap
[1] = bna
->tx_mod
.txf_bmap
[1];
2067 bna_stats_get(struct bna
*bna
)
2069 if (bna_device_status_get(&bna
->device
))
2070 bna_fw_stats_get(bna
);
2072 bnad_cb_stats_get(bna
->bnad
, BNA_CB_FAIL
, &bna
->stats
);
2077 bna_ib_coalescing_timeo_set(struct bna_ib
*ib
, u8 coalescing_timeo
)
2079 ib
->ib_config
.coalescing_timeo
= coalescing_timeo
;
2081 if (ib
->start_count
)
2082 ib
->door_bell
.doorbell_ack
= BNA_DOORBELL_IB_INT_ACK(
2083 (u32
)ib
->ib_config
.coalescing_timeo
, 0);
2088 bna_rxf_adv_init(struct bna_rxf
*rxf
,
2090 struct bna_rx_config
*q_config
)
2092 switch (q_config
->rxp_type
) {
2093 case BNA_RXP_SINGLE
:
2097 rxf
->ctrl_flags
|= BNA_RXF_CF_SM_LG_RXQ
;
2100 rxf
->hds_cfg
.hdr_type
= q_config
->hds_config
.hdr_type
;
2101 rxf
->hds_cfg
.header_size
=
2102 q_config
->hds_config
.header_size
;
2103 rxf
->forced_offset
= 0;
2109 if (q_config
->rss_status
== BNA_STATUS_T_ENABLED
) {
2110 rxf
->ctrl_flags
|= BNA_RXF_CF_RSS_ENABLE
;
2111 rxf
->rss_cfg
.hash_type
= q_config
->rss_config
.hash_type
;
2112 rxf
->rss_cfg
.hash_mask
= q_config
->rss_config
.hash_mask
;
2113 memcpy(&rxf
->rss_cfg
.toeplitz_hash_key
[0],
2114 &q_config
->rss_config
.toeplitz_hash_key
[0],
2115 sizeof(rxf
->rss_cfg
.toeplitz_hash_key
));
2120 rxf_fltr_mbox_cmd(struct bna_rxf
*rxf
, u8 cmd
, enum bna_status status
)
2122 struct bfi_ll_rxf_req req
;
2124 bfi_h2i_set(req
.mh
, BFI_MC_LL
, cmd
, 0);
2126 req
.rxf_id
= rxf
->rxf_id
;
2127 req
.enable
= status
;
2129 bna_mbox_qe_fill(&rxf
->mbox_qe
, &req
, sizeof(req
),
2130 rxf_cb_cam_fltr_mbox_cmd
, rxf
);
2132 bna_mbox_send(rxf
->rx
->bna
, &rxf
->mbox_qe
);
2136 rxf_process_packet_filter_ucast(struct bna_rxf
*rxf
)
2138 struct bna_mac
*mac
= NULL
;
2139 struct list_head
*qe
;
2141 /* Add additional MAC entries */
2142 if (!list_empty(&rxf
->ucast_pending_add_q
)) {
2143 bfa_q_deq(&rxf
->ucast_pending_add_q
, &qe
);
2145 mac
= (struct bna_mac
*)qe
;
2146 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_ADD_REQ
, mac
);
2147 list_add_tail(&mac
->qe
, &rxf
->ucast_active_q
);
2151 /* Delete MAC addresses previousely added */
2152 if (!list_empty(&rxf
->ucast_pending_del_q
)) {
2153 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
2155 mac
= (struct bna_mac
*)qe
;
2156 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_DEL_REQ
, mac
);
2157 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
2165 rxf_process_packet_filter_promisc(struct bna_rxf
*rxf
)
2167 struct bna
*bna
= rxf
->rx
->bna
;
2169 /* Enable/disable promiscuous mode */
2170 if (is_promisc_enable(rxf
->rxmode_pending
,
2171 rxf
->rxmode_pending_bitmask
)) {
2172 /* move promisc configuration from pending -> active */
2173 promisc_inactive(rxf
->rxmode_pending
,
2174 rxf
->rxmode_pending_bitmask
);
2175 rxf
->rxmode_active
|= BNA_RXMODE_PROMISC
;
2177 /* Disable VLAN filter to allow all VLANs */
2178 __rxf_vlan_filter_set(rxf
, BNA_STATUS_T_DISABLED
);
2179 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2180 BNA_STATUS_T_ENABLED
);
2182 } else if (is_promisc_disable(rxf
->rxmode_pending
,
2183 rxf
->rxmode_pending_bitmask
)) {
2184 /* move promisc configuration from pending -> active */
2185 promisc_inactive(rxf
->rxmode_pending
,
2186 rxf
->rxmode_pending_bitmask
);
2187 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2188 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2190 /* Revert VLAN filter */
2191 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2192 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2193 BNA_STATUS_T_DISABLED
);
2201 rxf_process_packet_filter_allmulti(struct bna_rxf
*rxf
)
2203 /* Enable/disable allmulti mode */
2204 if (is_allmulti_enable(rxf
->rxmode_pending
,
2205 rxf
->rxmode_pending_bitmask
)) {
2206 /* move allmulti configuration from pending -> active */
2207 allmulti_inactive(rxf
->rxmode_pending
,
2208 rxf
->rxmode_pending_bitmask
);
2209 rxf
->rxmode_active
|= BNA_RXMODE_ALLMULTI
;
2211 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2212 BNA_STATUS_T_ENABLED
);
2214 } else if (is_allmulti_disable(rxf
->rxmode_pending
,
2215 rxf
->rxmode_pending_bitmask
)) {
2216 /* move allmulti configuration from pending -> active */
2217 allmulti_inactive(rxf
->rxmode_pending
,
2218 rxf
->rxmode_pending_bitmask
);
2219 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2221 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2222 BNA_STATUS_T_DISABLED
);
2230 rxf_clear_packet_filter_ucast(struct bna_rxf
*rxf
)
2232 struct bna_mac
*mac
= NULL
;
2233 struct list_head
*qe
;
2235 /* 1. delete pending ucast entries */
2236 if (!list_empty(&rxf
->ucast_pending_del_q
)) {
2237 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
2239 mac
= (struct bna_mac
*)qe
;
2240 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_DEL_REQ
, mac
);
2241 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
2245 /* 2. clear active ucast entries; move them to pending_add_q */
2246 if (!list_empty(&rxf
->ucast_active_q
)) {
2247 bfa_q_deq(&rxf
->ucast_active_q
, &qe
);
2249 mac
= (struct bna_mac
*)qe
;
2250 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_DEL_REQ
, mac
);
2251 list_add_tail(&mac
->qe
, &rxf
->ucast_pending_add_q
);
2259 rxf_clear_packet_filter_promisc(struct bna_rxf
*rxf
)
2261 struct bna
*bna
= rxf
->rx
->bna
;
2263 /* 6. Execute pending promisc mode disable command */
2264 if (is_promisc_disable(rxf
->rxmode_pending
,
2265 rxf
->rxmode_pending_bitmask
)) {
2266 /* move promisc configuration from pending -> active */
2267 promisc_inactive(rxf
->rxmode_pending
,
2268 rxf
->rxmode_pending_bitmask
);
2269 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2270 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2272 /* Revert VLAN filter */
2273 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2274 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2275 BNA_STATUS_T_DISABLED
);
2279 /* 7. Clear active promisc mode; move it to pending enable */
2280 if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
2281 /* move promisc configuration from active -> pending */
2282 promisc_enable(rxf
->rxmode_pending
,
2283 rxf
->rxmode_pending_bitmask
);
2284 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2286 /* Revert VLAN filter */
2287 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2288 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2289 BNA_STATUS_T_DISABLED
);
2297 rxf_clear_packet_filter_allmulti(struct bna_rxf
*rxf
)
2299 /* 10. Execute pending allmulti mode disable command */
2300 if (is_allmulti_disable(rxf
->rxmode_pending
,
2301 rxf
->rxmode_pending_bitmask
)) {
2302 /* move allmulti configuration from pending -> active */
2303 allmulti_inactive(rxf
->rxmode_pending
,
2304 rxf
->rxmode_pending_bitmask
);
2305 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2306 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2307 BNA_STATUS_T_DISABLED
);
2311 /* 11. Clear active allmulti mode; move it to pending enable */
2312 if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
2313 /* move allmulti configuration from active -> pending */
2314 allmulti_enable(rxf
->rxmode_pending
,
2315 rxf
->rxmode_pending_bitmask
);
2316 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2317 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2318 BNA_STATUS_T_DISABLED
);
2326 rxf_reset_packet_filter_ucast(struct bna_rxf
*rxf
)
2328 struct list_head
*qe
;
2329 struct bna_mac
*mac
;
2331 /* 1. Move active ucast entries to pending_add_q */
2332 while (!list_empty(&rxf
->ucast_active_q
)) {
2333 bfa_q_deq(&rxf
->ucast_active_q
, &qe
);
2335 list_add_tail(qe
, &rxf
->ucast_pending_add_q
);
2338 /* 2. Throw away delete pending ucast entries */
2339 while (!list_empty(&rxf
->ucast_pending_del_q
)) {
2340 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
2342 mac
= (struct bna_mac
*)qe
;
2343 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
2348 rxf_reset_packet_filter_promisc(struct bna_rxf
*rxf
)
2350 struct bna
*bna
= rxf
->rx
->bna
;
2352 /* 6. Clear pending promisc mode disable */
2353 if (is_promisc_disable(rxf
->rxmode_pending
,
2354 rxf
->rxmode_pending_bitmask
)) {
2355 promisc_inactive(rxf
->rxmode_pending
,
2356 rxf
->rxmode_pending_bitmask
);
2357 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2358 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2361 /* 7. Move promisc mode config from active -> pending */
2362 if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
2363 promisc_enable(rxf
->rxmode_pending
,
2364 rxf
->rxmode_pending_bitmask
);
2365 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2371 rxf_reset_packet_filter_allmulti(struct bna_rxf
*rxf
)
2373 /* 10. Clear pending allmulti mode disable */
2374 if (is_allmulti_disable(rxf
->rxmode_pending
,
2375 rxf
->rxmode_pending_bitmask
)) {
2376 allmulti_inactive(rxf
->rxmode_pending
,
2377 rxf
->rxmode_pending_bitmask
);
2378 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2381 /* 11. Move allmulti mode config from active -> pending */
2382 if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
2383 allmulti_enable(rxf
->rxmode_pending
,
2384 rxf
->rxmode_pending_bitmask
);
2385 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2390 * Should only be called by bna_rxf_mode_set.
2391 * Helps deciding if h/w configuration is needed or not.
2394 * 1 = need h/w change
2397 rxf_promisc_enable(struct bna_rxf
*rxf
)
2399 struct bna
*bna
= rxf
->rx
->bna
;
2402 /* There can not be any pending disable command */
2404 /* Do nothing if pending enable or already enabled */
2405 if (is_promisc_enable(rxf
->rxmode_pending
,
2406 rxf
->rxmode_pending_bitmask
) ||
2407 (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
)) {
2408 /* Schedule enable */
2410 /* Promisc mode should not be active in the system */
2411 promisc_enable(rxf
->rxmode_pending
,
2412 rxf
->rxmode_pending_bitmask
);
2413 bna
->rxf_promisc_id
= rxf
->rxf_id
;
2421 * Should only be called by bna_rxf_mode_set.
2422 * Helps deciding if h/w configuration is needed or not.
2425 * 1 = need h/w change
2428 rxf_promisc_disable(struct bna_rxf
*rxf
)
2430 struct bna
*bna
= rxf
->rx
->bna
;
2433 /* There can not be any pending disable */
2435 /* Turn off pending enable command , if any */
2436 if (is_promisc_enable(rxf
->rxmode_pending
,
2437 rxf
->rxmode_pending_bitmask
)) {
2438 /* Promisc mode should not be active */
2439 /* system promisc state should be pending */
2440 promisc_inactive(rxf
->rxmode_pending
,
2441 rxf
->rxmode_pending_bitmask
);
2442 /* Remove the promisc state from the system */
2443 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2445 /* Schedule disable */
2446 } else if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
2447 /* Promisc mode should be active in the system */
2448 promisc_disable(rxf
->rxmode_pending
,
2449 rxf
->rxmode_pending_bitmask
);
2452 /* Do nothing if already disabled */
2460 * Should only be called by bna_rxf_mode_set.
2461 * Helps deciding if h/w configuration is needed or not.
2464 * 1 = need h/w change
2467 rxf_allmulti_enable(struct bna_rxf
*rxf
)
2471 /* There can not be any pending disable command */
2473 /* Do nothing if pending enable or already enabled */
2474 if (is_allmulti_enable(rxf
->rxmode_pending
,
2475 rxf
->rxmode_pending_bitmask
) ||
2476 (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
)) {
2477 /* Schedule enable */
2479 allmulti_enable(rxf
->rxmode_pending
,
2480 rxf
->rxmode_pending_bitmask
);
2488 * Should only be called by bna_rxf_mode_set.
2489 * Helps deciding if h/w configuration is needed or not.
2492 * 1 = need h/w change
2495 rxf_allmulti_disable(struct bna_rxf
*rxf
)
2499 /* There can not be any pending disable */
2501 /* Turn off pending enable command , if any */
2502 if (is_allmulti_enable(rxf
->rxmode_pending
,
2503 rxf
->rxmode_pending_bitmask
)) {
2504 /* Allmulti mode should not be active */
2505 allmulti_inactive(rxf
->rxmode_pending
,
2506 rxf
->rxmode_pending_bitmask
);
2508 /* Schedule disable */
2509 } else if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
2510 allmulti_disable(rxf
->rxmode_pending
,
2511 rxf
->rxmode_pending_bitmask
);
2520 bna_rx_mode_set(struct bna_rx
*rx
, enum bna_rxmode new_mode
,
2521 enum bna_rxmode bitmask
,
2522 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
2523 enum bna_cb_status
))
2525 struct bna_rxf
*rxf
= &rx
->rxf
;
2526 int need_hw_config
= 0;
2528 /* Process the commands */
2530 if (is_promisc_enable(new_mode
, bitmask
)) {
2531 /* If promisc mode is already enabled elsewhere in the system */
2532 if ((rx
->bna
->rxf_promisc_id
!= BFI_MAX_RXF
) &&
2533 (rx
->bna
->rxf_promisc_id
!= rxf
->rxf_id
))
2535 if (rxf_promisc_enable(rxf
))
2537 } else if (is_promisc_disable(new_mode
, bitmask
)) {
2538 if (rxf_promisc_disable(rxf
))
2542 if (is_allmulti_enable(new_mode
, bitmask
)) {
2543 if (rxf_allmulti_enable(rxf
))
2545 } else if (is_allmulti_disable(new_mode
, bitmask
)) {
2546 if (rxf_allmulti_disable(rxf
))
2550 /* Trigger h/w if needed */
2552 if (need_hw_config
) {
2553 rxf
->cam_fltr_cbfn
= cbfn
;
2554 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
2555 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
2557 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
2559 return BNA_CB_SUCCESS
;
2567 bna_rx_vlanfilter_enable(struct bna_rx
*rx
)
2569 struct bna_rxf
*rxf
= &rx
->rxf
;
2571 if (rxf
->vlan_filter_status
== BNA_STATUS_T_DISABLED
) {
2572 rxf
->rxf_flags
|= BNA_RXF_FL_VLAN_CONFIG_PENDING
;
2573 rxf
->vlan_filter_status
= BNA_STATUS_T_ENABLED
;
2574 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
2582 bna_rx_coalescing_timeo_set(struct bna_rx
*rx
, int coalescing_timeo
)
2584 struct bna_rxp
*rxp
;
2585 struct list_head
*qe
;
2587 list_for_each(qe
, &rx
->rxp_q
) {
2588 rxp
= (struct bna_rxp
*)qe
;
2589 rxp
->cq
.ccb
->rx_coalescing_timeo
= coalescing_timeo
;
2590 bna_ib_coalescing_timeo_set(rxp
->cq
.ib
, coalescing_timeo
);
2596 bna_rx_dim_reconfig(struct bna
*bna
, const u32 vector
[][BNA_BIAS_T_MAX
])
2600 for (i
= 0; i
< BNA_LOAD_T_MAX
; i
++)
2601 for (j
= 0; j
< BNA_BIAS_T_MAX
; j
++)
2602 bna
->rx_mod
.dim_vector
[i
][j
] = vector
[i
][j
];
2607 bna_rx_dim_update(struct bna_ccb
*ccb
)
2609 struct bna
*bna
= ccb
->cq
->rx
->bna
;
2611 u32 pkt_rt
, small_rt
, large_rt
;
2612 u8 coalescing_timeo
;
2614 if ((ccb
->pkt_rate
.small_pkt_cnt
== 0) &&
2615 (ccb
->pkt_rate
.large_pkt_cnt
== 0))
2618 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2620 small_rt
= ccb
->pkt_rate
.small_pkt_cnt
;
2621 large_rt
= ccb
->pkt_rate
.large_pkt_cnt
;
2623 pkt_rt
= small_rt
+ large_rt
;
2625 if (pkt_rt
< BNA_PKT_RATE_10K
)
2626 load
= BNA_LOAD_T_LOW_4
;
2627 else if (pkt_rt
< BNA_PKT_RATE_20K
)
2628 load
= BNA_LOAD_T_LOW_3
;
2629 else if (pkt_rt
< BNA_PKT_RATE_30K
)
2630 load
= BNA_LOAD_T_LOW_2
;
2631 else if (pkt_rt
< BNA_PKT_RATE_40K
)
2632 load
= BNA_LOAD_T_LOW_1
;
2633 else if (pkt_rt
< BNA_PKT_RATE_50K
)
2634 load
= BNA_LOAD_T_HIGH_1
;
2635 else if (pkt_rt
< BNA_PKT_RATE_60K
)
2636 load
= BNA_LOAD_T_HIGH_2
;
2637 else if (pkt_rt
< BNA_PKT_RATE_80K
)
2638 load
= BNA_LOAD_T_HIGH_3
;
2640 load
= BNA_LOAD_T_HIGH_4
;
2642 if (small_rt
> (large_rt
<< 1))
2647 ccb
->pkt_rate
.small_pkt_cnt
= 0;
2648 ccb
->pkt_rate
.large_pkt_cnt
= 0;
2650 coalescing_timeo
= bna
->rx_mod
.dim_vector
[load
][bias
];
2651 ccb
->rx_coalescing_timeo
= coalescing_timeo
;
2654 bna_ib_coalescing_timeo_set(ccb
->cq
->ib
, coalescing_timeo
);
2660 bna_tx_coalescing_timeo_set(struct bna_tx
*tx
, int coalescing_timeo
)
2662 struct bna_txq
*txq
;
2663 struct list_head
*qe
;
2665 list_for_each(qe
, &tx
->txq_q
) {
2666 txq
= (struct bna_txq
*)qe
;
2667 bna_ib_coalescing_timeo_set(txq
->ib
, coalescing_timeo
);
2675 struct bna_ritseg_pool_cfg
{
2677 u32 pool_entry_size
;
2679 init_ritseg_pool(ritseg_pool_cfg
);
2685 bna_ucam_mod_init(struct bna_ucam_mod
*ucam_mod
, struct bna
*bna
,
2686 struct bna_res_info
*res_info
)
2690 ucam_mod
->ucmac
= (struct bna_mac
*)
2691 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2693 INIT_LIST_HEAD(&ucam_mod
->free_q
);
2694 for (i
= 0; i
< BFI_MAX_UCMAC
; i
++) {
2695 bfa_q_qe_init(&ucam_mod
->ucmac
[i
].qe
);
2696 list_add_tail(&ucam_mod
->ucmac
[i
].qe
, &ucam_mod
->free_q
);
2699 ucam_mod
->bna
= bna
;
2703 bna_ucam_mod_uninit(struct bna_ucam_mod
*ucam_mod
)
2705 struct list_head
*qe
;
2708 list_for_each(qe
, &ucam_mod
->free_q
)
2711 ucam_mod
->bna
= NULL
;
2715 bna_mcam_mod_init(struct bna_mcam_mod
*mcam_mod
, struct bna
*bna
,
2716 struct bna_res_info
*res_info
)
2720 mcam_mod
->mcmac
= (struct bna_mac
*)
2721 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2723 INIT_LIST_HEAD(&mcam_mod
->free_q
);
2724 for (i
= 0; i
< BFI_MAX_MCMAC
; i
++) {
2725 bfa_q_qe_init(&mcam_mod
->mcmac
[i
].qe
);
2726 list_add_tail(&mcam_mod
->mcmac
[i
].qe
, &mcam_mod
->free_q
);
2729 mcam_mod
->bna
= bna
;
2733 bna_mcam_mod_uninit(struct bna_mcam_mod
*mcam_mod
)
2735 struct list_head
*qe
;
2738 list_for_each(qe
, &mcam_mod
->free_q
)
2741 mcam_mod
->bna
= NULL
;
2745 bna_rit_mod_init(struct bna_rit_mod
*rit_mod
,
2746 struct bna_res_info
*res_info
)
2753 rit_mod
->rit
= (struct bna_rit_entry
*)
2754 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.mdl
[0].kva
;
2755 rit_mod
->rit_segment
= (struct bna_rit_segment
*)
2756 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.mdl
[0].kva
;
2760 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
2761 INIT_LIST_HEAD(&rit_mod
->rit_seg_pool
[i
]);
2762 for (j
= 0; j
< ritseg_pool_cfg
[i
].pool_size
; j
++) {
2763 bfa_q_qe_init(&rit_mod
->rit_segment
[count
].qe
);
2764 rit_mod
->rit_segment
[count
].max_rit_size
=
2765 ritseg_pool_cfg
[i
].pool_entry_size
;
2766 rit_mod
->rit_segment
[count
].rit_offset
= offset
;
2767 rit_mod
->rit_segment
[count
].rit
=
2768 &rit_mod
->rit
[offset
];
2769 list_add_tail(&rit_mod
->rit_segment
[count
].qe
,
2770 &rit_mod
->rit_seg_pool
[i
]);
2772 offset
+= ritseg_pool_cfg
[i
].pool_entry_size
;
2778 bna_rit_mod_uninit(struct bna_rit_mod
*rit_mod
)
2780 struct bna_rit_segment
*rit_segment
;
2781 struct list_head
*qe
;
2785 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
2787 list_for_each(qe
, &rit_mod
->rit_seg_pool
[i
]) {
2788 rit_segment
= (struct bna_rit_segment
*)qe
;
2798 /* Called during probe(), before calling bna_init() */
2800 bna_res_req(struct bna_res_info
*res_info
)
2802 bna_adv_res_req(res_info
);
2804 /* DMA memory for retrieving IOC attributes */
2805 res_info
[BNA_RES_MEM_T_ATTR
].res_type
= BNA_RES_T_MEM
;
2806 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
2807 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.num
= 1;
2808 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.len
=
2809 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE
);
2811 /* DMA memory for index segment of an IB */
2812 res_info
[BNA_RES_MEM_T_IBIDX
].res_type
= BNA_RES_T_MEM
;
2813 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
2814 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.len
=
2815 BFI_IBIDX_SIZE
* BFI_IBIDX_MAX_SEGSIZE
;
2816 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.num
= BFI_MAX_IB
;
2818 /* Virtual memory for IB objects - stored by IB module */
2819 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_type
= BNA_RES_T_MEM
;
2820 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.mem_type
=
2822 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.num
= 1;
2823 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.len
=
2824 BFI_MAX_IB
* sizeof(struct bna_ib
);
2826 /* Virtual memory for intr objects - stored by IB module */
2827 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_type
= BNA_RES_T_MEM
;
2828 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.mem_type
=
2830 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.num
= 1;
2831 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.len
=
2832 BFI_MAX_IB
* sizeof(struct bna_intr
);
2834 /* Virtual memory for idx_seg objects - stored by IB module */
2835 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_type
= BNA_RES_T_MEM
;
2836 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.mem_type
=
2838 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.num
= 1;
2839 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.len
=
2840 BFI_IBIDX_TOTAL_SEGS
* sizeof(struct bna_ibidx_seg
);
2842 /* Virtual memory for Tx objects - stored by Tx module */
2843 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_type
= BNA_RES_T_MEM
;
2844 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.mem_type
=
2846 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.num
= 1;
2847 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.len
=
2848 BFI_MAX_TXQ
* sizeof(struct bna_tx
);
2850 /* Virtual memory for TxQ - stored by Tx module */
2851 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
2852 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.mem_type
=
2854 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.num
= 1;
2855 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.len
=
2856 BFI_MAX_TXQ
* sizeof(struct bna_txq
);
2858 /* Virtual memory for Rx objects - stored by Rx module */
2859 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_type
= BNA_RES_T_MEM
;
2860 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.mem_type
=
2862 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.num
= 1;
2863 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.len
=
2864 BFI_MAX_RXQ
* sizeof(struct bna_rx
);
2866 /* Virtual memory for RxPath - stored by Rx module */
2867 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_type
= BNA_RES_T_MEM
;
2868 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.mem_type
=
2870 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.num
= 1;
2871 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.len
=
2872 BFI_MAX_RXQ
* sizeof(struct bna_rxp
);
2874 /* Virtual memory for RxQ - stored by Rx module */
2875 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
2876 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.mem_type
=
2878 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.num
= 1;
2879 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.len
=
2880 BFI_MAX_RXQ
* sizeof(struct bna_rxq
);
2882 /* Virtual memory for Unicast MAC address - stored by ucam module */
2883 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
2884 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
2886 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
2887 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.len
=
2888 BFI_MAX_UCMAC
* sizeof(struct bna_mac
);
2890 /* Virtual memory for Multicast MAC address - stored by mcam module */
2891 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
2892 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
2894 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
2895 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.len
=
2896 BFI_MAX_MCMAC
* sizeof(struct bna_mac
);
2898 /* Virtual memory for RIT entries */
2899 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_type
= BNA_RES_T_MEM
;
2900 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.mem_type
=
2902 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.num
= 1;
2903 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.len
=
2904 BFI_MAX_RIT_SIZE
* sizeof(struct bna_rit_entry
);
2906 /* Virtual memory for RIT segment table */
2907 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_type
= BNA_RES_T_MEM
;
2908 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.mem_type
=
2910 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.num
= 1;
2911 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.len
=
2912 BFI_RIT_TOTAL_SEGS
* sizeof(struct bna_rit_segment
);
2914 /* Interrupt resource for mailbox interrupt */
2915 res_info
[BNA_RES_INTR_T_MBOX
].res_type
= BNA_RES_T_INTR
;
2916 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.intr_type
=
2918 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.num
= 1;
2921 /* Called during probe() */
2923 bna_init(struct bna
*bna
, struct bnad
*bnad
, struct bfa_pcidev
*pcidev
,
2924 struct bna_res_info
*res_info
)
2927 bna
->pcidev
= *pcidev
;
2929 bna
->stats
.hw_stats
= (struct bfi_ll_stats
*)
2930 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].kva
;
2931 bna
->hw_stats_dma
.msb
=
2932 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.msb
;
2933 bna
->hw_stats_dma
.lsb
=
2934 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.lsb
;
2935 bna
->stats
.sw_stats
= (struct bna_sw_stats
*)
2936 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.mdl
[0].kva
;
2938 bna
->regs
.page_addr
= bna
->pcidev
.pci_bar_kva
+
2939 reg_offset
[bna
->pcidev
.pci_func
].page_addr
;
2940 bna
->regs
.fn_int_status
= bna
->pcidev
.pci_bar_kva
+
2941 reg_offset
[bna
->pcidev
.pci_func
].fn_int_status
;
2942 bna
->regs
.fn_int_mask
= bna
->pcidev
.pci_bar_kva
+
2943 reg_offset
[bna
->pcidev
.pci_func
].fn_int_mask
;
2945 if (bna
->pcidev
.pci_func
< 3)
2950 /* Also initializes diag, cee, sfp, phy_port and mbox_mod */
2951 bna_device_init(&bna
->device
, bna
, res_info
);
2953 bna_port_init(&bna
->port
, bna
);
2955 bna_tx_mod_init(&bna
->tx_mod
, bna
, res_info
);
2957 bna_rx_mod_init(&bna
->rx_mod
, bna
, res_info
);
2959 bna_ib_mod_init(&bna
->ib_mod
, bna
, res_info
);
2961 bna_rit_mod_init(&bna
->rit_mod
, res_info
);
2963 bna_ucam_mod_init(&bna
->ucam_mod
, bna
, res_info
);
2965 bna_mcam_mod_init(&bna
->mcam_mod
, bna
, res_info
);
2967 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2969 /* Mbox q element for posting stat request to f/w */
2970 bfa_q_qe_init(&bna
->mbox_qe
.qe
);
2974 bna_uninit(struct bna
*bna
)
2976 bna_mcam_mod_uninit(&bna
->mcam_mod
);
2978 bna_ucam_mod_uninit(&bna
->ucam_mod
);
2980 bna_rit_mod_uninit(&bna
->rit_mod
);
2982 bna_ib_mod_uninit(&bna
->ib_mod
);
2984 bna_rx_mod_uninit(&bna
->rx_mod
);
2986 bna_tx_mod_uninit(&bna
->tx_mod
);
2988 bna_port_uninit(&bna
->port
);
2990 bna_device_uninit(&bna
->device
);
2996 bna_ucam_mod_mac_get(struct bna_ucam_mod
*ucam_mod
)
2998 struct list_head
*qe
;
3000 if (list_empty(&ucam_mod
->free_q
))
3003 bfa_q_deq(&ucam_mod
->free_q
, &qe
);
3005 return (struct bna_mac
*)qe
;
3009 bna_ucam_mod_mac_put(struct bna_ucam_mod
*ucam_mod
, struct bna_mac
*mac
)
3011 list_add_tail(&mac
->qe
, &ucam_mod
->free_q
);
3015 bna_mcam_mod_mac_get(struct bna_mcam_mod
*mcam_mod
)
3017 struct list_head
*qe
;
3019 if (list_empty(&mcam_mod
->free_q
))
3022 bfa_q_deq(&mcam_mod
->free_q
, &qe
);
3024 return (struct bna_mac
*)qe
;
3028 bna_mcam_mod_mac_put(struct bna_mcam_mod
*mcam_mod
, struct bna_mac
*mac
)
3030 list_add_tail(&mac
->qe
, &mcam_mod
->free_q
);
3034 * Note: This should be called in the same locking context as the call to
3035 * bna_rit_mod_seg_get()
3038 bna_rit_mod_can_satisfy(struct bna_rit_mod
*rit_mod
, int seg_size
)
3042 /* Select the pool for seg_size */
3043 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3044 if (seg_size
<= ritseg_pool_cfg
[i
].pool_entry_size
)
3048 if (i
== BFI_RIT_SEG_TOTAL_POOLS
)
3051 if (list_empty(&rit_mod
->rit_seg_pool
[i
]))
3057 struct bna_rit_segment
*
3058 bna_rit_mod_seg_get(struct bna_rit_mod
*rit_mod
, int seg_size
)
3060 struct bna_rit_segment
*seg
;
3061 struct list_head
*qe
;
3064 /* Select the pool for seg_size */
3065 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3066 if (seg_size
<= ritseg_pool_cfg
[i
].pool_entry_size
)
3070 if (i
== BFI_RIT_SEG_TOTAL_POOLS
)
3073 if (list_empty(&rit_mod
->rit_seg_pool
[i
]))
3076 bfa_q_deq(&rit_mod
->rit_seg_pool
[i
], &qe
);
3077 seg
= (struct bna_rit_segment
*)qe
;
3078 bfa_q_qe_init(&seg
->qe
);
3079 seg
->rit_size
= seg_size
;
3085 bna_rit_mod_seg_put(struct bna_rit_mod
*rit_mod
,
3086 struct bna_rit_segment
*seg
)
3090 /* Select the pool for seg->max_rit_size */
3091 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3092 if (seg
->max_rit_size
== ritseg_pool_cfg
[i
].pool_entry_size
)
3097 list_add_tail(&seg
->qe
, &rit_mod
->rit_seg_pool
[i
]);