2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
22 static void bna_device_cb_port_stopped(void *arg
, enum bna_cb_status status
);
25 bna_port_cb_link_up(struct bna_port
*port
, struct bfi_ll_aen
*aen
,
31 port
->llport
.link_status
= BNA_LINK_UP
;
33 port
->llport
.link_status
= BNA_CEE_UP
;
35 /* Compute the priority */
36 prio_map
= aen
->prio_map
;
38 for (i
= 0; i
< 8; i
++) {
39 if ((prio_map
>> i
) & 0x1)
47 bna_tx_mod_cee_link_status(&port
->bna
->tx_mod
, aen
->cee_linkup
);
48 bna_tx_mod_prio_changed(&port
->bna
->tx_mod
, port
->priority
);
49 port
->link_cbfn(port
->bna
->bnad
, port
->llport
.link_status
);
53 bna_port_cb_link_down(struct bna_port
*port
, int status
)
55 port
->llport
.link_status
= BNA_LINK_DOWN
;
58 bna_tx_mod_cee_link_status(&port
->bna
->tx_mod
, BNA_LINK_DOWN
);
59 port
->link_cbfn(port
->bna
->bnad
, BNA_LINK_DOWN
);
63 llport_can_be_up(struct bna_llport
*llport
)
66 if (llport
->type
== BNA_PORT_T_REGULAR
)
67 ready
= ((llport
->flags
& BNA_LLPORT_F_ADMIN_UP
) &&
68 (llport
->flags
& BNA_LLPORT_F_RX_STARTED
) &&
69 (llport
->flags
& BNA_LLPORT_F_PORT_ENABLED
));
71 ready
= ((llport
->flags
& BNA_LLPORT_F_ADMIN_UP
) &&
72 (llport
->flags
& BNA_LLPORT_F_RX_STARTED
) &&
73 !(llport
->flags
& BNA_LLPORT_F_PORT_ENABLED
));
77 #define llport_is_up llport_can_be_up
79 enum bna_llport_event
{
85 LLPORT_E_FWRESP_UP_OK
= 6,
86 LLPORT_E_FWRESP_UP_FAIL
= 7,
87 LLPORT_E_FWRESP_DOWN
= 8
91 bna_llport_cb_port_enabled(struct bna_llport
*llport
)
93 llport
->flags
|= BNA_LLPORT_F_PORT_ENABLED
;
95 if (llport_can_be_up(llport
))
96 bfa_fsm_send_event(llport
, LLPORT_E_UP
);
100 bna_llport_cb_port_disabled(struct bna_llport
*llport
)
102 int llport_up
= llport_is_up(llport
);
104 llport
->flags
&= ~BNA_LLPORT_F_PORT_ENABLED
;
107 bfa_fsm_send_event(llport
, LLPORT_E_DOWN
);
114 bna_is_aen(u8 msg_id
)
117 case BFI_LL_I2H_LINK_DOWN_AEN
:
118 case BFI_LL_I2H_LINK_UP_AEN
:
119 case BFI_LL_I2H_PORT_ENABLE_AEN
:
120 case BFI_LL_I2H_PORT_DISABLE_AEN
:
129 bna_mbox_aen_callback(struct bna
*bna
, struct bfi_mbmsg
*msg
)
131 struct bfi_ll_aen
*aen
= (struct bfi_ll_aen
*)(msg
);
133 switch (aen
->mh
.msg_id
) {
134 case BFI_LL_I2H_LINK_UP_AEN
:
135 bna_port_cb_link_up(&bna
->port
, aen
, aen
->reason
);
137 case BFI_LL_I2H_LINK_DOWN_AEN
:
138 bna_port_cb_link_down(&bna
->port
, aen
->reason
);
140 case BFI_LL_I2H_PORT_ENABLE_AEN
:
141 bna_llport_cb_port_enabled(&bna
->port
.llport
);
143 case BFI_LL_I2H_PORT_DISABLE_AEN
:
144 bna_llport_cb_port_disabled(&bna
->port
.llport
);
152 bna_ll_isr(void *llarg
, struct bfi_mbmsg
*msg
)
154 struct bna
*bna
= (struct bna
*)(llarg
);
155 struct bfi_ll_rsp
*mb_rsp
= (struct bfi_ll_rsp
*)(msg
);
156 struct bfi_mhdr
*cmd_h
, *rsp_h
;
157 struct bna_mbox_qe
*mb_qe
= NULL
;
160 char message
[BNA_MESSAGE_SIZE
];
162 aen
= bna_is_aen(mb_rsp
->mh
.msg_id
);
165 mb_qe
= bfa_q_first(&bna
->mbox_mod
.posted_q
);
166 cmd_h
= (struct bfi_mhdr
*)(&mb_qe
->cmd
.msg
[0]);
167 rsp_h
= (struct bfi_mhdr
*)(&mb_rsp
->mh
);
169 if ((BFA_I2HM(cmd_h
->msg_id
) == rsp_h
->msg_id
) &&
170 (cmd_h
->mtag
.i2htok
== rsp_h
->mtag
.i2htok
)) {
171 /* Remove the request from posted_q, update state */
172 list_del(&mb_qe
->qe
);
173 bna
->mbox_mod
.msg_pending
--;
174 if (list_empty(&bna
->mbox_mod
.posted_q
))
175 bna
->mbox_mod
.state
= BNA_MBOX_FREE
;
179 /* Dispatch the cbfn */
181 mb_qe
->cbfn(mb_qe
->cbarg
, mb_rsp
->error
);
183 /* Post the next entry, if needed */
185 mb_qe
= bfa_q_first(&bna
->mbox_mod
.posted_q
);
186 bfa_nw_ioc_mbox_queue(&bna
->device
.ioc
,
190 snprintf(message
, BNA_MESSAGE_SIZE
,
191 "No matching rsp for [%d:%d:%d]\n",
192 mb_rsp
->mh
.msg_class
, mb_rsp
->mh
.msg_id
,
193 mb_rsp
->mh
.mtag
.i2htok
);
194 pr_info("%s", message
);
198 bna_mbox_aen_callback(bna
, msg
);
202 bna_err_handler(struct bna
*bna
, u32 intr_status
)
206 if (intr_status
& __HALT_STATUS_BITS
) {
207 init_halt
= readl(bna
->device
.ioc
.ioc_regs
.ll_halt
);
208 init_halt
&= ~__FW_INIT_HALT_P
;
209 writel(init_halt
, bna
->device
.ioc
.ioc_regs
.ll_halt
);
212 bfa_nw_ioc_error_isr(&bna
->device
.ioc
);
216 bna_mbox_handler(struct bna
*bna
, u32 intr_status
)
218 if (BNA_IS_ERR_INTR(intr_status
)) {
219 bna_err_handler(bna
, intr_status
);
222 if (BNA_IS_MBOX_INTR(intr_status
))
223 bfa_nw_ioc_mbox_isr(&bna
->device
.ioc
);
227 bna_mbox_send(struct bna
*bna
, struct bna_mbox_qe
*mbox_qe
)
231 mh
= (struct bfi_mhdr
*)(&mbox_qe
->cmd
.msg
[0]);
233 mh
->mtag
.i2htok
= htons(bna
->mbox_mod
.msg_ctr
);
234 bna
->mbox_mod
.msg_ctr
++;
235 bna
->mbox_mod
.msg_pending
++;
236 if (bna
->mbox_mod
.state
== BNA_MBOX_FREE
) {
237 list_add_tail(&mbox_qe
->qe
, &bna
->mbox_mod
.posted_q
);
238 bfa_nw_ioc_mbox_queue(&bna
->device
.ioc
, &mbox_qe
->cmd
);
239 bna
->mbox_mod
.state
= BNA_MBOX_POSTED
;
241 list_add_tail(&mbox_qe
->qe
, &bna
->mbox_mod
.posted_q
);
246 bna_mbox_flush_q(struct bna
*bna
, struct list_head
*q
)
248 struct bna_mbox_qe
*mb_qe
= NULL
;
249 struct list_head
*mb_q
;
250 void (*cbfn
)(void *arg
, int status
);
253 mb_q
= &bna
->mbox_mod
.posted_q
;
255 while (!list_empty(mb_q
)) {
256 bfa_q_deq(mb_q
, &mb_qe
);
258 cbarg
= mb_qe
->cbarg
;
259 bfa_q_qe_init(mb_qe
);
260 bna
->mbox_mod
.msg_pending
--;
263 cbfn(cbarg
, BNA_CB_NOT_EXEC
);
266 bna
->mbox_mod
.state
= BNA_MBOX_FREE
;
270 bna_mbox_mod_start(struct bna_mbox_mod
*mbox_mod
)
275 bna_mbox_mod_stop(struct bna_mbox_mod
*mbox_mod
)
277 bna_mbox_flush_q(mbox_mod
->bna
, &mbox_mod
->posted_q
);
281 bna_mbox_mod_init(struct bna_mbox_mod
*mbox_mod
, struct bna
*bna
)
283 bfa_nw_ioc_mbox_regisr(&bna
->device
.ioc
, BFI_MC_LL
, bna_ll_isr
, bna
);
284 mbox_mod
->state
= BNA_MBOX_FREE
;
285 mbox_mod
->msg_ctr
= mbox_mod
->msg_pending
= 0;
286 INIT_LIST_HEAD(&mbox_mod
->posted_q
);
291 bna_mbox_mod_uninit(struct bna_mbox_mod
*mbox_mod
)
293 mbox_mod
->bna
= NULL
;
299 #define call_llport_stop_cbfn(llport, status)\
301 if ((llport)->stop_cbfn)\
302 (llport)->stop_cbfn(&(llport)->bna->port, status);\
303 (llport)->stop_cbfn = NULL;\
306 static void bna_fw_llport_up(struct bna_llport
*llport
);
307 static void bna_fw_cb_llport_up(void *arg
, int status
);
308 static void bna_fw_llport_down(struct bna_llport
*llport
);
309 static void bna_fw_cb_llport_down(void *arg
, int status
);
310 static void bna_llport_start(struct bna_llport
*llport
);
311 static void bna_llport_stop(struct bna_llport
*llport
);
312 static void bna_llport_fail(struct bna_llport
*llport
);
314 enum bna_llport_state
{
315 BNA_LLPORT_STOPPED
= 1,
317 BNA_LLPORT_UP_RESP_WAIT
= 3,
318 BNA_LLPORT_DOWN_RESP_WAIT
= 4,
320 BNA_LLPORT_LAST_RESP_WAIT
= 6
323 bfa_fsm_state_decl(bna_llport
, stopped
, struct bna_llport
,
324 enum bna_llport_event
);
325 bfa_fsm_state_decl(bna_llport
, down
, struct bna_llport
,
326 enum bna_llport_event
);
327 bfa_fsm_state_decl(bna_llport
, up_resp_wait
, struct bna_llport
,
328 enum bna_llport_event
);
329 bfa_fsm_state_decl(bna_llport
, down_resp_wait
, struct bna_llport
,
330 enum bna_llport_event
);
331 bfa_fsm_state_decl(bna_llport
, up
, struct bna_llport
,
332 enum bna_llport_event
);
333 bfa_fsm_state_decl(bna_llport
, last_resp_wait
, struct bna_llport
,
334 enum bna_llport_event
);
336 static struct bfa_sm_table llport_sm_table
[] = {
337 {BFA_SM(bna_llport_sm_stopped
), BNA_LLPORT_STOPPED
},
338 {BFA_SM(bna_llport_sm_down
), BNA_LLPORT_DOWN
},
339 {BFA_SM(bna_llport_sm_up_resp_wait
), BNA_LLPORT_UP_RESP_WAIT
},
340 {BFA_SM(bna_llport_sm_down_resp_wait
), BNA_LLPORT_DOWN_RESP_WAIT
},
341 {BFA_SM(bna_llport_sm_up
), BNA_LLPORT_UP
},
342 {BFA_SM(bna_llport_sm_last_resp_wait
), BNA_LLPORT_LAST_RESP_WAIT
}
346 bna_llport_sm_stopped_entry(struct bna_llport
*llport
)
348 llport
->bna
->port
.link_cbfn((llport
)->bna
->bnad
, BNA_LINK_DOWN
);
349 call_llport_stop_cbfn(llport
, BNA_CB_SUCCESS
);
353 bna_llport_sm_stopped(struct bna_llport
*llport
,
354 enum bna_llport_event event
)
358 bfa_fsm_set_state(llport
, bna_llport_sm_down
);
362 call_llport_stop_cbfn(llport
, BNA_CB_SUCCESS
);
369 /* This event is received due to Rx objects failing */
373 case LLPORT_E_FWRESP_UP_OK
:
374 case LLPORT_E_FWRESP_DOWN
:
376 * These events are received due to flushing of mbox when
383 bfa_sm_fault(llport
->bna
, event
);
388 bna_llport_sm_down_entry(struct bna_llport
*llport
)
390 bnad_cb_port_link_status((llport
)->bna
->bnad
, BNA_LINK_DOWN
);
394 bna_llport_sm_down(struct bna_llport
*llport
,
395 enum bna_llport_event event
)
399 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
403 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
407 bfa_fsm_set_state(llport
, bna_llport_sm_up_resp_wait
);
408 bna_fw_llport_up(llport
);
412 bfa_sm_fault(llport
->bna
, event
);
417 bna_llport_sm_up_resp_wait_entry(struct bna_llport
*llport
)
419 BUG_ON(!llport_can_be_up(llport
));
421 * NOTE: Do not call bna_fw_llport_up() here. That will over step
422 * mbox due to down_resp_wait -> up_resp_wait transition on event
428 bna_llport_sm_up_resp_wait(struct bna_llport
*llport
,
429 enum bna_llport_event event
)
433 bfa_fsm_set_state(llport
, bna_llport_sm_last_resp_wait
);
437 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
441 bfa_fsm_set_state(llport
, bna_llport_sm_down_resp_wait
);
444 case LLPORT_E_FWRESP_UP_OK
:
445 bfa_fsm_set_state(llport
, bna_llport_sm_up
);
448 case LLPORT_E_FWRESP_UP_FAIL
:
449 bfa_fsm_set_state(llport
, bna_llport_sm_down
);
452 case LLPORT_E_FWRESP_DOWN
:
453 /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
454 bna_fw_llport_up(llport
);
458 bfa_sm_fault(llport
->bna
, event
);
463 bna_llport_sm_down_resp_wait_entry(struct bna_llport
*llport
)
466 * NOTE: Do not call bna_fw_llport_down() here. That will over step
467 * mbox due to up_resp_wait -> down_resp_wait transition on event
473 bna_llport_sm_down_resp_wait(struct bna_llport
*llport
,
474 enum bna_llport_event event
)
478 bfa_fsm_set_state(llport
, bna_llport_sm_last_resp_wait
);
482 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
486 bfa_fsm_set_state(llport
, bna_llport_sm_up_resp_wait
);
489 case LLPORT_E_FWRESP_UP_OK
:
490 /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
491 bna_fw_llport_down(llport
);
494 case LLPORT_E_FWRESP_UP_FAIL
:
495 case LLPORT_E_FWRESP_DOWN
:
496 bfa_fsm_set_state(llport
, bna_llport_sm_down
);
500 bfa_sm_fault(llport
->bna
, event
);
505 bna_llport_sm_up_entry(struct bna_llport
*llport
)
510 bna_llport_sm_up(struct bna_llport
*llport
,
511 enum bna_llport_event event
)
515 bfa_fsm_set_state(llport
, bna_llport_sm_last_resp_wait
);
516 bna_fw_llport_down(llport
);
520 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
524 bfa_fsm_set_state(llport
, bna_llport_sm_down_resp_wait
);
525 bna_fw_llport_down(llport
);
529 bfa_sm_fault(llport
->bna
, event
);
534 bna_llport_sm_last_resp_wait_entry(struct bna_llport
*llport
)
539 bna_llport_sm_last_resp_wait(struct bna_llport
*llport
,
540 enum bna_llport_event event
)
544 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
549 * This event is received due to Rx objects stopping in
555 case LLPORT_E_FWRESP_UP_OK
:
556 /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
557 bna_fw_llport_down(llport
);
560 case LLPORT_E_FWRESP_UP_FAIL
:
561 case LLPORT_E_FWRESP_DOWN
:
562 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
566 bfa_sm_fault(llport
->bna
, event
);
571 bna_fw_llport_admin_up(struct bna_llport
*llport
)
573 struct bfi_ll_port_admin_req ll_req
;
575 memset(&ll_req
, 0, sizeof(ll_req
));
576 ll_req
.mh
.msg_class
= BFI_MC_LL
;
577 ll_req
.mh
.msg_id
= BFI_LL_H2I_PORT_ADMIN_REQ
;
578 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
580 ll_req
.up
= BNA_STATUS_T_ENABLED
;
582 bna_mbox_qe_fill(&llport
->mbox_qe
, &ll_req
, sizeof(ll_req
),
583 bna_fw_cb_llport_up
, llport
);
585 bna_mbox_send(llport
->bna
, &llport
->mbox_qe
);
589 bna_fw_llport_up(struct bna_llport
*llport
)
591 if (llport
->type
== BNA_PORT_T_REGULAR
)
592 bna_fw_llport_admin_up(llport
);
596 bna_fw_cb_llport_up(void *arg
, int status
)
598 struct bna_llport
*llport
= (struct bna_llport
*)arg
;
600 bfa_q_qe_init(&llport
->mbox_qe
.qe
);
601 if (status
== BFI_LL_CMD_FAIL
) {
602 if (llport
->type
== BNA_PORT_T_REGULAR
)
603 llport
->flags
&= ~BNA_LLPORT_F_PORT_ENABLED
;
605 llport
->flags
&= ~BNA_LLPORT_F_ADMIN_UP
;
606 bfa_fsm_send_event(llport
, LLPORT_E_FWRESP_UP_FAIL
);
608 bfa_fsm_send_event(llport
, LLPORT_E_FWRESP_UP_OK
);
612 bna_fw_llport_admin_down(struct bna_llport
*llport
)
614 struct bfi_ll_port_admin_req ll_req
;
616 memset(&ll_req
, 0, sizeof(ll_req
));
617 ll_req
.mh
.msg_class
= BFI_MC_LL
;
618 ll_req
.mh
.msg_id
= BFI_LL_H2I_PORT_ADMIN_REQ
;
619 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
621 ll_req
.up
= BNA_STATUS_T_DISABLED
;
623 bna_mbox_qe_fill(&llport
->mbox_qe
, &ll_req
, sizeof(ll_req
),
624 bna_fw_cb_llport_down
, llport
);
626 bna_mbox_send(llport
->bna
, &llport
->mbox_qe
);
630 bna_fw_llport_down(struct bna_llport
*llport
)
632 if (llport
->type
== BNA_PORT_T_REGULAR
)
633 bna_fw_llport_admin_down(llport
);
637 bna_fw_cb_llport_down(void *arg
, int status
)
639 struct bna_llport
*llport
= (struct bna_llport
*)arg
;
641 bfa_q_qe_init(&llport
->mbox_qe
.qe
);
642 bfa_fsm_send_event(llport
, LLPORT_E_FWRESP_DOWN
);
646 bna_port_cb_llport_stopped(struct bna_port
*port
,
647 enum bna_cb_status status
)
649 bfa_wc_down(&port
->chld_stop_wc
);
653 bna_llport_init(struct bna_llport
*llport
, struct bna
*bna
)
655 llport
->flags
|= BNA_LLPORT_F_ADMIN_UP
;
656 llport
->flags
|= BNA_LLPORT_F_PORT_ENABLED
;
657 llport
->type
= BNA_PORT_T_REGULAR
;
660 llport
->link_status
= BNA_LINK_DOWN
;
662 llport
->rx_started_count
= 0;
664 llport
->stop_cbfn
= NULL
;
666 bfa_q_qe_init(&llport
->mbox_qe
.qe
);
668 bfa_fsm_set_state(llport
, bna_llport_sm_stopped
);
672 bna_llport_uninit(struct bna_llport
*llport
)
674 llport
->flags
&= ~BNA_LLPORT_F_ADMIN_UP
;
675 llport
->flags
&= ~BNA_LLPORT_F_PORT_ENABLED
;
681 bna_llport_start(struct bna_llport
*llport
)
683 bfa_fsm_send_event(llport
, LLPORT_E_START
);
687 bna_llport_stop(struct bna_llport
*llport
)
689 llport
->stop_cbfn
= bna_port_cb_llport_stopped
;
691 bfa_fsm_send_event(llport
, LLPORT_E_STOP
);
695 bna_llport_fail(struct bna_llport
*llport
)
697 /* Reset the physical port status to enabled */
698 llport
->flags
|= BNA_LLPORT_F_PORT_ENABLED
;
699 bfa_fsm_send_event(llport
, LLPORT_E_FAIL
);
703 bna_llport_state_get(struct bna_llport
*llport
)
705 return bfa_sm_to_state(llport_sm_table
, llport
->fsm
);
709 bna_llport_rx_started(struct bna_llport
*llport
)
711 llport
->rx_started_count
++;
713 if (llport
->rx_started_count
== 1) {
715 llport
->flags
|= BNA_LLPORT_F_RX_STARTED
;
717 if (llport_can_be_up(llport
))
718 bfa_fsm_send_event(llport
, LLPORT_E_UP
);
723 bna_llport_rx_stopped(struct bna_llport
*llport
)
725 int llport_up
= llport_is_up(llport
);
727 llport
->rx_started_count
--;
729 if (llport
->rx_started_count
== 0) {
731 llport
->flags
&= ~BNA_LLPORT_F_RX_STARTED
;
734 bfa_fsm_send_event(llport
, LLPORT_E_DOWN
);
741 #define bna_port_chld_start(port)\
743 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
744 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
745 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
746 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
747 bna_llport_start(&(port)->llport);\
748 bna_tx_mod_start(&(port)->bna->tx_mod, tx_type);\
749 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
752 #define bna_port_chld_stop(port)\
754 enum bna_tx_type tx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
755 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;\
756 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
757 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
758 bfa_wc_up(&(port)->chld_stop_wc);\
759 bfa_wc_up(&(port)->chld_stop_wc);\
760 bfa_wc_up(&(port)->chld_stop_wc);\
761 bna_llport_stop(&(port)->llport);\
762 bna_tx_mod_stop(&(port)->bna->tx_mod, tx_type);\
763 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
766 #define bna_port_chld_fail(port)\
768 bna_llport_fail(&(port)->llport);\
769 bna_tx_mod_fail(&(port)->bna->tx_mod);\
770 bna_rx_mod_fail(&(port)->bna->rx_mod);\
773 #define bna_port_rx_start(port)\
775 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
776 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
777 bna_rx_mod_start(&(port)->bna->rx_mod, rx_type);\
780 #define bna_port_rx_stop(port)\
782 enum bna_rx_type rx_type = ((port)->type == BNA_PORT_T_REGULAR) ?\
783 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;\
784 bfa_wc_up(&(port)->chld_stop_wc);\
785 bna_rx_mod_stop(&(port)->bna->rx_mod, rx_type);\
788 #define call_port_stop_cbfn(port, status)\
790 if ((port)->stop_cbfn)\
791 (port)->stop_cbfn((port)->stop_cbarg, status);\
792 (port)->stop_cbfn = NULL;\
793 (port)->stop_cbarg = NULL;\
796 #define call_port_pause_cbfn(port, status)\
798 if ((port)->pause_cbfn)\
799 (port)->pause_cbfn((port)->bna->bnad, status);\
800 (port)->pause_cbfn = NULL;\
803 #define call_port_mtu_cbfn(port, status)\
805 if ((port)->mtu_cbfn)\
806 (port)->mtu_cbfn((port)->bna->bnad, status);\
807 (port)->mtu_cbfn = NULL;\
810 static void bna_fw_pause_set(struct bna_port
*port
);
811 static void bna_fw_cb_pause_set(void *arg
, int status
);
812 static void bna_fw_mtu_set(struct bna_port
*port
);
813 static void bna_fw_cb_mtu_set(void *arg
, int status
);
815 enum bna_port_event
{
819 PORT_E_PAUSE_CFG
= 4,
821 PORT_E_CHLD_STOPPED
= 6,
822 PORT_E_FWRESP_PAUSE
= 7,
823 PORT_E_FWRESP_MTU
= 8
826 enum bna_port_state
{
827 BNA_PORT_STOPPED
= 1,
828 BNA_PORT_MTU_INIT_WAIT
= 2,
829 BNA_PORT_PAUSE_INIT_WAIT
= 3,
830 BNA_PORT_LAST_RESP_WAIT
= 4,
831 BNA_PORT_STARTED
= 5,
832 BNA_PORT_PAUSE_CFG_WAIT
= 6,
833 BNA_PORT_RX_STOP_WAIT
= 7,
834 BNA_PORT_MTU_CFG_WAIT
= 8,
835 BNA_PORT_CHLD_STOP_WAIT
= 9
838 bfa_fsm_state_decl(bna_port
, stopped
, struct bna_port
,
839 enum bna_port_event
);
840 bfa_fsm_state_decl(bna_port
, mtu_init_wait
, struct bna_port
,
841 enum bna_port_event
);
842 bfa_fsm_state_decl(bna_port
, pause_init_wait
, struct bna_port
,
843 enum bna_port_event
);
844 bfa_fsm_state_decl(bna_port
, last_resp_wait
, struct bna_port
,
845 enum bna_port_event
);
846 bfa_fsm_state_decl(bna_port
, started
, struct bna_port
,
847 enum bna_port_event
);
848 bfa_fsm_state_decl(bna_port
, pause_cfg_wait
, struct bna_port
,
849 enum bna_port_event
);
850 bfa_fsm_state_decl(bna_port
, rx_stop_wait
, struct bna_port
,
851 enum bna_port_event
);
852 bfa_fsm_state_decl(bna_port
, mtu_cfg_wait
, struct bna_port
,
853 enum bna_port_event
);
854 bfa_fsm_state_decl(bna_port
, chld_stop_wait
, struct bna_port
,
855 enum bna_port_event
);
857 static struct bfa_sm_table port_sm_table
[] = {
858 {BFA_SM(bna_port_sm_stopped
), BNA_PORT_STOPPED
},
859 {BFA_SM(bna_port_sm_mtu_init_wait
), BNA_PORT_MTU_INIT_WAIT
},
860 {BFA_SM(bna_port_sm_pause_init_wait
), BNA_PORT_PAUSE_INIT_WAIT
},
861 {BFA_SM(bna_port_sm_last_resp_wait
), BNA_PORT_LAST_RESP_WAIT
},
862 {BFA_SM(bna_port_sm_started
), BNA_PORT_STARTED
},
863 {BFA_SM(bna_port_sm_pause_cfg_wait
), BNA_PORT_PAUSE_CFG_WAIT
},
864 {BFA_SM(bna_port_sm_rx_stop_wait
), BNA_PORT_RX_STOP_WAIT
},
865 {BFA_SM(bna_port_sm_mtu_cfg_wait
), BNA_PORT_MTU_CFG_WAIT
},
866 {BFA_SM(bna_port_sm_chld_stop_wait
), BNA_PORT_CHLD_STOP_WAIT
}
870 bna_port_sm_stopped_entry(struct bna_port
*port
)
872 call_port_pause_cbfn(port
, BNA_CB_SUCCESS
);
873 call_port_mtu_cbfn(port
, BNA_CB_SUCCESS
);
874 call_port_stop_cbfn(port
, BNA_CB_SUCCESS
);
878 bna_port_sm_stopped(struct bna_port
*port
, enum bna_port_event event
)
882 bfa_fsm_set_state(port
, bna_port_sm_mtu_init_wait
);
886 call_port_stop_cbfn(port
, BNA_CB_SUCCESS
);
893 case PORT_E_PAUSE_CFG
:
894 call_port_pause_cbfn(port
, BNA_CB_SUCCESS
);
898 call_port_mtu_cbfn(port
, BNA_CB_SUCCESS
);
901 case PORT_E_CHLD_STOPPED
:
903 * This event is received due to LLPort, Tx and Rx objects
909 case PORT_E_FWRESP_PAUSE
:
910 case PORT_E_FWRESP_MTU
:
912 * These events are received due to flushing of mbox when
919 bfa_sm_fault(port
->bna
, event
);
924 bna_port_sm_mtu_init_wait_entry(struct bna_port
*port
)
926 bna_fw_mtu_set(port
);
930 bna_port_sm_mtu_init_wait(struct bna_port
*port
, enum bna_port_event event
)
934 bfa_fsm_set_state(port
, bna_port_sm_last_resp_wait
);
938 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
941 case PORT_E_PAUSE_CFG
:
946 port
->flags
|= BNA_PORT_F_MTU_CHANGED
;
949 case PORT_E_FWRESP_MTU
:
950 if (port
->flags
& BNA_PORT_F_MTU_CHANGED
) {
951 port
->flags
&= ~BNA_PORT_F_MTU_CHANGED
;
952 bna_fw_mtu_set(port
);
954 bfa_fsm_set_state(port
, bna_port_sm_pause_init_wait
);
959 bfa_sm_fault(port
->bna
, event
);
964 bna_port_sm_pause_init_wait_entry(struct bna_port
*port
)
966 bna_fw_pause_set(port
);
970 bna_port_sm_pause_init_wait(struct bna_port
*port
,
971 enum bna_port_event event
)
975 bfa_fsm_set_state(port
, bna_port_sm_last_resp_wait
);
979 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
982 case PORT_E_PAUSE_CFG
:
983 port
->flags
|= BNA_PORT_F_PAUSE_CHANGED
;
987 port
->flags
|= BNA_PORT_F_MTU_CHANGED
;
990 case PORT_E_FWRESP_PAUSE
:
991 if (port
->flags
& BNA_PORT_F_PAUSE_CHANGED
) {
992 port
->flags
&= ~BNA_PORT_F_PAUSE_CHANGED
;
993 bna_fw_pause_set(port
);
994 } else if (port
->flags
& BNA_PORT_F_MTU_CHANGED
) {
995 port
->flags
&= ~BNA_PORT_F_MTU_CHANGED
;
996 bfa_fsm_set_state(port
, bna_port_sm_mtu_init_wait
);
998 bfa_fsm_set_state(port
, bna_port_sm_started
);
999 bna_port_chld_start(port
);
1004 bfa_sm_fault(port
->bna
, event
);
1009 bna_port_sm_last_resp_wait_entry(struct bna_port
*port
)
1014 bna_port_sm_last_resp_wait(struct bna_port
*port
,
1015 enum bna_port_event event
)
1019 case PORT_E_FWRESP_PAUSE
:
1020 case PORT_E_FWRESP_MTU
:
1021 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1025 bfa_sm_fault(port
->bna
, event
);
1030 bna_port_sm_started_entry(struct bna_port
*port
)
1033 * NOTE: Do not call bna_port_chld_start() here, since it will be
1034 * inadvertently called during pause_cfg_wait->started transition
1037 call_port_pause_cbfn(port
, BNA_CB_SUCCESS
);
1038 call_port_mtu_cbfn(port
, BNA_CB_SUCCESS
);
1042 bna_port_sm_started(struct bna_port
*port
,
1043 enum bna_port_event event
)
1047 bfa_fsm_set_state(port
, bna_port_sm_chld_stop_wait
);
1051 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1052 bna_port_chld_fail(port
);
1055 case PORT_E_PAUSE_CFG
:
1056 bfa_fsm_set_state(port
, bna_port_sm_pause_cfg_wait
);
1059 case PORT_E_MTU_CFG
:
1060 bfa_fsm_set_state(port
, bna_port_sm_rx_stop_wait
);
1064 bfa_sm_fault(port
->bna
, event
);
1069 bna_port_sm_pause_cfg_wait_entry(struct bna_port
*port
)
1071 bna_fw_pause_set(port
);
1075 bna_port_sm_pause_cfg_wait(struct bna_port
*port
,
1076 enum bna_port_event event
)
1080 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1081 bna_port_chld_fail(port
);
1084 case PORT_E_FWRESP_PAUSE
:
1085 bfa_fsm_set_state(port
, bna_port_sm_started
);
1089 bfa_sm_fault(port
->bna
, event
);
1094 bna_port_sm_rx_stop_wait_entry(struct bna_port
*port
)
1096 bna_port_rx_stop(port
);
1100 bna_port_sm_rx_stop_wait(struct bna_port
*port
,
1101 enum bna_port_event event
)
1105 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1106 bna_port_chld_fail(port
);
1109 case PORT_E_CHLD_STOPPED
:
1110 bfa_fsm_set_state(port
, bna_port_sm_mtu_cfg_wait
);
1114 bfa_sm_fault(port
->bna
, event
);
1119 bna_port_sm_mtu_cfg_wait_entry(struct bna_port
*port
)
1121 bna_fw_mtu_set(port
);
1125 bna_port_sm_mtu_cfg_wait(struct bna_port
*port
, enum bna_port_event event
)
1129 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1130 bna_port_chld_fail(port
);
1133 case PORT_E_FWRESP_MTU
:
1134 bfa_fsm_set_state(port
, bna_port_sm_started
);
1135 bna_port_rx_start(port
);
1139 bfa_sm_fault(port
->bna
, event
);
1144 bna_port_sm_chld_stop_wait_entry(struct bna_port
*port
)
1146 bna_port_chld_stop(port
);
1150 bna_port_sm_chld_stop_wait(struct bna_port
*port
,
1151 enum bna_port_event event
)
1155 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1156 bna_port_chld_fail(port
);
1159 case PORT_E_CHLD_STOPPED
:
1160 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1164 bfa_sm_fault(port
->bna
, event
);
1169 bna_fw_pause_set(struct bna_port
*port
)
1171 struct bfi_ll_set_pause_req ll_req
;
1173 memset(&ll_req
, 0, sizeof(ll_req
));
1174 ll_req
.mh
.msg_class
= BFI_MC_LL
;
1175 ll_req
.mh
.msg_id
= BFI_LL_H2I_SET_PAUSE_REQ
;
1176 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
1178 ll_req
.tx_pause
= port
->pause_config
.tx_pause
;
1179 ll_req
.rx_pause
= port
->pause_config
.rx_pause
;
1181 bna_mbox_qe_fill(&port
->mbox_qe
, &ll_req
, sizeof(ll_req
),
1182 bna_fw_cb_pause_set
, port
);
1184 bna_mbox_send(port
->bna
, &port
->mbox_qe
);
1188 bna_fw_cb_pause_set(void *arg
, int status
)
1190 struct bna_port
*port
= (struct bna_port
*)arg
;
1192 bfa_q_qe_init(&port
->mbox_qe
.qe
);
1193 bfa_fsm_send_event(port
, PORT_E_FWRESP_PAUSE
);
1197 bna_fw_mtu_set(struct bna_port
*port
)
1199 struct bfi_ll_mtu_info_req ll_req
;
1201 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_MTU_INFO_REQ
, 0);
1202 ll_req
.mtu
= htons((u16
)port
->mtu
);
1204 bna_mbox_qe_fill(&port
->mbox_qe
, &ll_req
, sizeof(ll_req
),
1205 bna_fw_cb_mtu_set
, port
);
1206 bna_mbox_send(port
->bna
, &port
->mbox_qe
);
1210 bna_fw_cb_mtu_set(void *arg
, int status
)
1212 struct bna_port
*port
= (struct bna_port
*)arg
;
1214 bfa_q_qe_init(&port
->mbox_qe
.qe
);
1215 bfa_fsm_send_event(port
, PORT_E_FWRESP_MTU
);
1219 bna_port_cb_chld_stopped(void *arg
)
1221 struct bna_port
*port
= (struct bna_port
*)arg
;
1223 bfa_fsm_send_event(port
, PORT_E_CHLD_STOPPED
);
1227 bna_port_init(struct bna_port
*port
, struct bna
*bna
)
1232 port
->type
= BNA_PORT_T_REGULAR
;
1234 port
->link_cbfn
= bnad_cb_port_link_status
;
1236 port
->chld_stop_wc
.wc_resume
= bna_port_cb_chld_stopped
;
1237 port
->chld_stop_wc
.wc_cbarg
= port
;
1238 port
->chld_stop_wc
.wc_count
= 0;
1240 port
->stop_cbfn
= NULL
;
1241 port
->stop_cbarg
= NULL
;
1243 port
->pause_cbfn
= NULL
;
1245 port
->mtu_cbfn
= NULL
;
1247 bfa_q_qe_init(&port
->mbox_qe
.qe
);
1249 bfa_fsm_set_state(port
, bna_port_sm_stopped
);
1251 bna_llport_init(&port
->llport
, bna
);
1255 bna_port_uninit(struct bna_port
*port
)
1257 bna_llport_uninit(&port
->llport
);
1265 bna_port_state_get(struct bna_port
*port
)
1267 return bfa_sm_to_state(port_sm_table
, port
->fsm
);
1271 bna_port_start(struct bna_port
*port
)
1273 port
->flags
|= BNA_PORT_F_DEVICE_READY
;
1274 if (port
->flags
& BNA_PORT_F_ENABLED
)
1275 bfa_fsm_send_event(port
, PORT_E_START
);
1279 bna_port_stop(struct bna_port
*port
)
1281 port
->stop_cbfn
= bna_device_cb_port_stopped
;
1282 port
->stop_cbarg
= &port
->bna
->device
;
1284 port
->flags
&= ~BNA_PORT_F_DEVICE_READY
;
1285 bfa_fsm_send_event(port
, PORT_E_STOP
);
1289 bna_port_fail(struct bna_port
*port
)
1291 port
->flags
&= ~BNA_PORT_F_DEVICE_READY
;
1292 bfa_fsm_send_event(port
, PORT_E_FAIL
);
1296 bna_port_cb_tx_stopped(struct bna_port
*port
, enum bna_cb_status status
)
1298 bfa_wc_down(&port
->chld_stop_wc
);
1302 bna_port_cb_rx_stopped(struct bna_port
*port
, enum bna_cb_status status
)
1304 bfa_wc_down(&port
->chld_stop_wc
);
1308 bna_port_mtu_get(struct bna_port
*port
)
1314 bna_port_enable(struct bna_port
*port
)
1316 if (port
->fsm
!= (bfa_sm_t
)bna_port_sm_stopped
)
1319 port
->flags
|= BNA_PORT_F_ENABLED
;
1321 if (port
->flags
& BNA_PORT_F_DEVICE_READY
)
1322 bfa_fsm_send_event(port
, PORT_E_START
);
1326 bna_port_disable(struct bna_port
*port
, enum bna_cleanup_type type
,
1327 void (*cbfn
)(void *, enum bna_cb_status
))
1329 if (type
== BNA_SOFT_CLEANUP
) {
1330 (*cbfn
)(port
->bna
->bnad
, BNA_CB_SUCCESS
);
1334 port
->stop_cbfn
= cbfn
;
1335 port
->stop_cbarg
= port
->bna
->bnad
;
1337 port
->flags
&= ~BNA_PORT_F_ENABLED
;
1339 bfa_fsm_send_event(port
, PORT_E_STOP
);
1343 bna_port_pause_config(struct bna_port
*port
,
1344 struct bna_pause_config
*pause_config
,
1345 void (*cbfn
)(struct bnad
*, enum bna_cb_status
))
1347 port
->pause_config
= *pause_config
;
1349 port
->pause_cbfn
= cbfn
;
1351 bfa_fsm_send_event(port
, PORT_E_PAUSE_CFG
);
1355 bna_port_mtu_set(struct bna_port
*port
, int mtu
,
1356 void (*cbfn
)(struct bnad
*, enum bna_cb_status
))
1360 port
->mtu_cbfn
= cbfn
;
1362 bfa_fsm_send_event(port
, PORT_E_MTU_CFG
);
1366 bna_port_mac_get(struct bna_port
*port
, mac_t
*mac
)
1368 *mac
= bfa_nw_ioc_get_mac(&port
->bna
->device
.ioc
);
1374 #define enable_mbox_intr(_device)\
1377 bna_intr_status_get((_device)->bna, intr_status);\
1378 bnad_cb_device_enable_mbox_intr((_device)->bna->bnad);\
1379 bna_mbox_intr_enable((_device)->bna);\
1382 #define disable_mbox_intr(_device)\
1384 bna_mbox_intr_disable((_device)->bna);\
1385 bnad_cb_device_disable_mbox_intr((_device)->bna->bnad);\
1388 static const struct bna_chip_regs_offset reg_offset
[] =
1389 {{HOST_PAGE_NUM_FN0
, HOSTFN0_INT_STATUS
,
1390 HOSTFN0_INT_MASK
, HOST_MSIX_ERR_INDEX_FN0
},
1391 {HOST_PAGE_NUM_FN1
, HOSTFN1_INT_STATUS
,
1392 HOSTFN1_INT_MASK
, HOST_MSIX_ERR_INDEX_FN1
},
1393 {HOST_PAGE_NUM_FN2
, HOSTFN2_INT_STATUS
,
1394 HOSTFN2_INT_MASK
, HOST_MSIX_ERR_INDEX_FN2
},
1395 {HOST_PAGE_NUM_FN3
, HOSTFN3_INT_STATUS
,
1396 HOSTFN3_INT_MASK
, HOST_MSIX_ERR_INDEX_FN3
},
1399 enum bna_device_event
{
1400 DEVICE_E_ENABLE
= 1,
1401 DEVICE_E_DISABLE
= 2,
1402 DEVICE_E_IOC_READY
= 3,
1403 DEVICE_E_IOC_FAILED
= 4,
1404 DEVICE_E_IOC_DISABLED
= 5,
1405 DEVICE_E_IOC_RESET
= 6,
1406 DEVICE_E_PORT_STOPPED
= 7,
1409 enum bna_device_state
{
1410 BNA_DEVICE_STOPPED
= 1,
1411 BNA_DEVICE_IOC_READY_WAIT
= 2,
1412 BNA_DEVICE_READY
= 3,
1413 BNA_DEVICE_PORT_STOP_WAIT
= 4,
1414 BNA_DEVICE_IOC_DISABLE_WAIT
= 5,
1415 BNA_DEVICE_FAILED
= 6
1418 bfa_fsm_state_decl(bna_device
, stopped
, struct bna_device
,
1419 enum bna_device_event
);
1420 bfa_fsm_state_decl(bna_device
, ioc_ready_wait
, struct bna_device
,
1421 enum bna_device_event
);
1422 bfa_fsm_state_decl(bna_device
, ready
, struct bna_device
,
1423 enum bna_device_event
);
1424 bfa_fsm_state_decl(bna_device
, port_stop_wait
, struct bna_device
,
1425 enum bna_device_event
);
1426 bfa_fsm_state_decl(bna_device
, ioc_disable_wait
, struct bna_device
,
1427 enum bna_device_event
);
1428 bfa_fsm_state_decl(bna_device
, failed
, struct bna_device
,
1429 enum bna_device_event
);
1431 static struct bfa_sm_table device_sm_table
[] = {
1432 {BFA_SM(bna_device_sm_stopped
), BNA_DEVICE_STOPPED
},
1433 {BFA_SM(bna_device_sm_ioc_ready_wait
), BNA_DEVICE_IOC_READY_WAIT
},
1434 {BFA_SM(bna_device_sm_ready
), BNA_DEVICE_READY
},
1435 {BFA_SM(bna_device_sm_port_stop_wait
), BNA_DEVICE_PORT_STOP_WAIT
},
1436 {BFA_SM(bna_device_sm_ioc_disable_wait
), BNA_DEVICE_IOC_DISABLE_WAIT
},
1437 {BFA_SM(bna_device_sm_failed
), BNA_DEVICE_FAILED
},
1441 bna_device_sm_stopped_entry(struct bna_device
*device
)
1443 if (device
->stop_cbfn
)
1444 device
->stop_cbfn(device
->stop_cbarg
, BNA_CB_SUCCESS
);
1446 device
->stop_cbfn
= NULL
;
1447 device
->stop_cbarg
= NULL
;
1451 bna_device_sm_stopped(struct bna_device
*device
,
1452 enum bna_device_event event
)
1455 case DEVICE_E_ENABLE
:
1456 if (device
->intr_type
== BNA_INTR_T_MSIX
)
1457 bna_mbox_msix_idx_set(device
);
1458 bfa_nw_ioc_enable(&device
->ioc
);
1459 bfa_fsm_set_state(device
, bna_device_sm_ioc_ready_wait
);
1462 case DEVICE_E_DISABLE
:
1463 bfa_fsm_set_state(device
, bna_device_sm_stopped
);
1466 case DEVICE_E_IOC_RESET
:
1467 enable_mbox_intr(device
);
1470 case DEVICE_E_IOC_FAILED
:
1471 bfa_fsm_set_state(device
, bna_device_sm_failed
);
1475 bfa_sm_fault(device
->bna
, event
);
1480 bna_device_sm_ioc_ready_wait_entry(struct bna_device
*device
)
1483 * Do not call bfa_ioc_enable() here. It must be called in the
1484 * previous state due to failed -> ioc_ready_wait transition.
1489 bna_device_sm_ioc_ready_wait(struct bna_device
*device
,
1490 enum bna_device_event event
)
1493 case DEVICE_E_DISABLE
:
1494 if (device
->ready_cbfn
)
1495 device
->ready_cbfn(device
->ready_cbarg
,
1497 device
->ready_cbfn
= NULL
;
1498 device
->ready_cbarg
= NULL
;
1499 bfa_fsm_set_state(device
, bna_device_sm_ioc_disable_wait
);
1502 case DEVICE_E_IOC_READY
:
1503 bfa_fsm_set_state(device
, bna_device_sm_ready
);
1506 case DEVICE_E_IOC_FAILED
:
1507 bfa_fsm_set_state(device
, bna_device_sm_failed
);
1510 case DEVICE_E_IOC_RESET
:
1511 enable_mbox_intr(device
);
1515 bfa_sm_fault(device
->bna
, event
);
1520 bna_device_sm_ready_entry(struct bna_device
*device
)
1522 bna_mbox_mod_start(&device
->bna
->mbox_mod
);
1523 bna_port_start(&device
->bna
->port
);
1525 if (device
->ready_cbfn
)
1526 device
->ready_cbfn(device
->ready_cbarg
,
1528 device
->ready_cbfn
= NULL
;
1529 device
->ready_cbarg
= NULL
;
1533 bna_device_sm_ready(struct bna_device
*device
, enum bna_device_event event
)
1536 case DEVICE_E_DISABLE
:
1537 bfa_fsm_set_state(device
, bna_device_sm_port_stop_wait
);
1540 case DEVICE_E_IOC_FAILED
:
1541 bfa_fsm_set_state(device
, bna_device_sm_failed
);
1545 bfa_sm_fault(device
->bna
, event
);
1550 bna_device_sm_port_stop_wait_entry(struct bna_device
*device
)
1552 bna_port_stop(&device
->bna
->port
);
1556 bna_device_sm_port_stop_wait(struct bna_device
*device
,
1557 enum bna_device_event event
)
1560 case DEVICE_E_PORT_STOPPED
:
1561 bna_mbox_mod_stop(&device
->bna
->mbox_mod
);
1562 bfa_fsm_set_state(device
, bna_device_sm_ioc_disable_wait
);
1565 case DEVICE_E_IOC_FAILED
:
1566 disable_mbox_intr(device
);
1567 bna_port_fail(&device
->bna
->port
);
1571 bfa_sm_fault(device
->bna
, event
);
1576 bna_device_sm_ioc_disable_wait_entry(struct bna_device
*device
)
1578 bfa_nw_ioc_disable(&device
->ioc
);
1582 bna_device_sm_ioc_disable_wait(struct bna_device
*device
,
1583 enum bna_device_event event
)
1586 case DEVICE_E_IOC_DISABLED
:
1587 disable_mbox_intr(device
);
1588 bfa_fsm_set_state(device
, bna_device_sm_stopped
);
1592 bfa_sm_fault(device
->bna
, event
);
1597 bna_device_sm_failed_entry(struct bna_device
*device
)
1599 disable_mbox_intr(device
);
1600 bna_port_fail(&device
->bna
->port
);
1601 bna_mbox_mod_stop(&device
->bna
->mbox_mod
);
1603 if (device
->ready_cbfn
)
1604 device
->ready_cbfn(device
->ready_cbarg
,
1606 device
->ready_cbfn
= NULL
;
1607 device
->ready_cbarg
= NULL
;
1611 bna_device_sm_failed(struct bna_device
*device
,
1612 enum bna_device_event event
)
1615 case DEVICE_E_DISABLE
:
1616 bfa_fsm_set_state(device
, bna_device_sm_ioc_disable_wait
);
1619 case DEVICE_E_IOC_RESET
:
1620 enable_mbox_intr(device
);
1621 bfa_fsm_set_state(device
, bna_device_sm_ioc_ready_wait
);
1625 bfa_sm_fault(device
->bna
, event
);
1629 /* IOC callback functions */
1632 bna_device_cb_iocll_ready(void *dev
, enum bfa_status error
)
1634 struct bna_device
*device
= (struct bna_device
*)dev
;
1637 bfa_fsm_send_event(device
, DEVICE_E_IOC_FAILED
);
1639 bfa_fsm_send_event(device
, DEVICE_E_IOC_READY
);
1643 bna_device_cb_iocll_disabled(void *dev
)
1645 struct bna_device
*device
= (struct bna_device
*)dev
;
1647 bfa_fsm_send_event(device
, DEVICE_E_IOC_DISABLED
);
1651 bna_device_cb_iocll_failed(void *dev
)
1653 struct bna_device
*device
= (struct bna_device
*)dev
;
1655 bfa_fsm_send_event(device
, DEVICE_E_IOC_FAILED
);
1659 bna_device_cb_iocll_reset(void *dev
)
1661 struct bna_device
*device
= (struct bna_device
*)dev
;
1663 bfa_fsm_send_event(device
, DEVICE_E_IOC_RESET
);
1666 static struct bfa_ioc_cbfn bfa_iocll_cbfn
= {
1667 bna_device_cb_iocll_ready
,
1668 bna_device_cb_iocll_disabled
,
1669 bna_device_cb_iocll_failed
,
1670 bna_device_cb_iocll_reset
1675 bna_adv_device_init(struct bna_device
*device
, struct bna
*bna
,
1676 struct bna_res_info
*res_info
)
1683 kva
= res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mdl
[0].kva
;
1686 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1690 &res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1691 kva
= res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mdl
[0].kva
;
1693 bfa_nw_cee_attach(&bna
->cee
, &device
->ioc
, bna
);
1694 bfa_nw_cee_mem_claim(&bna
->cee
, kva
, dma
);
1695 kva
+= bfa_nw_cee_meminfo();
1696 dma
+= bfa_nw_cee_meminfo();
1701 bna_device_init(struct bna_device
*device
, struct bna
*bna
,
1702 struct bna_res_info
*res_info
)
1709 * Attach IOC and claim:
1710 * 1. DMA memory for IOC attributes
1711 * 2. Kernel memory for FW trace
1713 bfa_nw_ioc_attach(&device
->ioc
, device
, &bfa_iocll_cbfn
);
1714 bfa_nw_ioc_pci_init(&device
->ioc
, &bna
->pcidev
, BFI_MC_LL
);
1717 &res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].dma
, dma
);
1718 bfa_nw_ioc_mem_claim(&device
->ioc
,
1719 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mdl
[0].kva
,
1722 bna_adv_device_init(device
, bna
, res_info
);
1724 * Initialize mbox_mod only after IOC, so that mbox handler
1725 * registration goes through
1728 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.intr_type
;
1730 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.idl
[0].vector
;
1731 bna_mbox_mod_init(&bna
->mbox_mod
, bna
);
1733 device
->ready_cbfn
= device
->stop_cbfn
= NULL
;
1734 device
->ready_cbarg
= device
->stop_cbarg
= NULL
;
1736 bfa_fsm_set_state(device
, bna_device_sm_stopped
);
1740 bna_device_uninit(struct bna_device
*device
)
1742 bna_mbox_mod_uninit(&device
->bna
->mbox_mod
);
1744 bfa_nw_ioc_detach(&device
->ioc
);
1750 bna_device_cb_port_stopped(void *arg
, enum bna_cb_status status
)
1752 struct bna_device
*device
= (struct bna_device
*)arg
;
1754 bfa_fsm_send_event(device
, DEVICE_E_PORT_STOPPED
);
1758 bna_device_status_get(struct bna_device
*device
)
1760 return device
->fsm
== (bfa_fsm_t
)bna_device_sm_ready
;
1764 bna_device_enable(struct bna_device
*device
)
1766 if (device
->fsm
!= (bfa_fsm_t
)bna_device_sm_stopped
) {
1767 bnad_cb_device_enabled(device
->bna
->bnad
, BNA_CB_BUSY
);
1771 device
->ready_cbfn
= bnad_cb_device_enabled
;
1772 device
->ready_cbarg
= device
->bna
->bnad
;
1774 bfa_fsm_send_event(device
, DEVICE_E_ENABLE
);
1778 bna_device_disable(struct bna_device
*device
, enum bna_cleanup_type type
)
1780 if (type
== BNA_SOFT_CLEANUP
) {
1781 bnad_cb_device_disabled(device
->bna
->bnad
, BNA_CB_SUCCESS
);
1785 device
->stop_cbfn
= bnad_cb_device_disabled
;
1786 device
->stop_cbarg
= device
->bna
->bnad
;
1788 bfa_fsm_send_event(device
, DEVICE_E_DISABLE
);
1792 bna_device_state_get(struct bna_device
*device
)
1794 return bfa_sm_to_state(device_sm_table
, device
->fsm
);
1797 const u32 bna_napi_dim_vector
[BNA_LOAD_T_MAX
][BNA_BIAS_T_MAX
] = {
1811 bna_adv_res_req(struct bna_res_info
*res_info
)
1813 /* DMA memory for COMMON_MODULE */
1814 res_info
[BNA_RES_MEM_T_COM
].res_type
= BNA_RES_T_MEM
;
1815 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1816 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.num
= 1;
1817 res_info
[BNA_RES_MEM_T_COM
].res_u
.mem_info
.len
= ALIGN(
1818 bfa_nw_cee_meminfo(), PAGE_SIZE
);
1820 /* Virtual memory for retreiving fw_trc */
1821 res_info
[BNA_RES_MEM_T_FWTRC
].res_type
= BNA_RES_T_MEM
;
1822 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.mem_type
= BNA_MEM_T_KVA
;
1823 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.num
= 0;
1824 res_info
[BNA_RES_MEM_T_FWTRC
].res_u
.mem_info
.len
= 0;
1826 /* DMA memory for retreiving stats */
1827 res_info
[BNA_RES_MEM_T_STATS
].res_type
= BNA_RES_T_MEM
;
1828 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
1829 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.num
= 1;
1830 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.len
=
1831 ALIGN(BFI_HW_STATS_SIZE
, PAGE_SIZE
);
1833 /* Virtual memory for soft stats */
1834 res_info
[BNA_RES_MEM_T_SWSTATS
].res_type
= BNA_RES_T_MEM
;
1835 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.mem_type
= BNA_MEM_T_KVA
;
1836 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.num
= 1;
1837 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.len
=
1838 sizeof(struct bna_sw_stats
);
1842 bna_sw_stats_get(struct bna
*bna
, struct bna_sw_stats
*sw_stats
)
1845 struct bna_txq
*txq
;
1847 struct bna_rxp
*rxp
;
1848 struct list_head
*qe
;
1849 struct list_head
*txq_qe
;
1850 struct list_head
*rxp_qe
;
1851 struct list_head
*mac_qe
;
1854 sw_stats
->device_state
= bna_device_state_get(&bna
->device
);
1855 sw_stats
->port_state
= bna_port_state_get(&bna
->port
);
1856 sw_stats
->port_flags
= bna
->port
.flags
;
1857 sw_stats
->llport_state
= bna_llport_state_get(&bna
->port
.llport
);
1858 sw_stats
->priority
= bna
->port
.priority
;
1861 list_for_each(qe
, &bna
->tx_mod
.tx_active_q
) {
1862 tx
= (struct bna_tx
*)qe
;
1863 sw_stats
->tx_stats
[i
].tx_state
= bna_tx_state_get(tx
);
1864 sw_stats
->tx_stats
[i
].tx_flags
= tx
->flags
;
1866 sw_stats
->tx_stats
[i
].num_txqs
= 0;
1867 sw_stats
->tx_stats
[i
].txq_bmap
[0] = 0;
1868 sw_stats
->tx_stats
[i
].txq_bmap
[1] = 0;
1869 list_for_each(txq_qe
, &tx
->txq_q
) {
1870 txq
= (struct bna_txq
*)txq_qe
;
1871 if (txq
->txq_id
< 32)
1872 sw_stats
->tx_stats
[i
].txq_bmap
[0] |=
1873 ((u32
)1 << txq
->txq_id
);
1875 sw_stats
->tx_stats
[i
].txq_bmap
[1] |=
1877 1 << (txq
->txq_id
- 32));
1878 sw_stats
->tx_stats
[i
].num_txqs
++;
1881 sw_stats
->tx_stats
[i
].txf_id
= tx
->txf
.txf_id
;
1885 sw_stats
->num_active_tx
= i
;
1888 list_for_each(qe
, &bna
->rx_mod
.rx_active_q
) {
1889 rx
= (struct bna_rx
*)qe
;
1890 sw_stats
->rx_stats
[i
].rx_state
= bna_rx_state_get(rx
);
1891 sw_stats
->rx_stats
[i
].rx_flags
= rx
->rx_flags
;
1893 sw_stats
->rx_stats
[i
].num_rxps
= 0;
1894 sw_stats
->rx_stats
[i
].num_rxqs
= 0;
1895 sw_stats
->rx_stats
[i
].rxq_bmap
[0] = 0;
1896 sw_stats
->rx_stats
[i
].rxq_bmap
[1] = 0;
1897 sw_stats
->rx_stats
[i
].cq_bmap
[0] = 0;
1898 sw_stats
->rx_stats
[i
].cq_bmap
[1] = 0;
1899 list_for_each(rxp_qe
, &rx
->rxp_q
) {
1900 rxp
= (struct bna_rxp
*)rxp_qe
;
1902 sw_stats
->rx_stats
[i
].num_rxqs
+= 1;
1904 if (rxp
->type
== BNA_RXP_SINGLE
) {
1905 if (rxp
->rxq
.single
.only
->rxq_id
< 32) {
1906 sw_stats
->rx_stats
[i
].rxq_bmap
[0] |=
1908 rxp
->rxq
.single
.only
->rxq_id
);
1910 sw_stats
->rx_stats
[i
].rxq_bmap
[1] |=
1912 (rxp
->rxq
.single
.only
->rxq_id
- 32));
1915 if (rxp
->rxq
.slr
.large
->rxq_id
< 32) {
1916 sw_stats
->rx_stats
[i
].rxq_bmap
[0] |=
1918 rxp
->rxq
.slr
.large
->rxq_id
);
1920 sw_stats
->rx_stats
[i
].rxq_bmap
[1] |=
1922 (rxp
->rxq
.slr
.large
->rxq_id
- 32));
1925 if (rxp
->rxq
.slr
.small
->rxq_id
< 32) {
1926 sw_stats
->rx_stats
[i
].rxq_bmap
[0] |=
1928 rxp
->rxq
.slr
.small
->rxq_id
);
1930 sw_stats
->rx_stats
[i
].rxq_bmap
[1] |=
1932 (rxp
->rxq
.slr
.small
->rxq_id
- 32));
1934 sw_stats
->rx_stats
[i
].num_rxqs
+= 1;
1937 if (rxp
->cq
.cq_id
< 32)
1938 sw_stats
->rx_stats
[i
].cq_bmap
[0] |=
1939 (1 << rxp
->cq
.cq_id
);
1941 sw_stats
->rx_stats
[i
].cq_bmap
[1] |=
1942 (1 << (rxp
->cq
.cq_id
- 32));
1944 sw_stats
->rx_stats
[i
].num_rxps
++;
1947 sw_stats
->rx_stats
[i
].rxf_id
= rx
->rxf
.rxf_id
;
1948 sw_stats
->rx_stats
[i
].rxf_state
= bna_rxf_state_get(&rx
->rxf
);
1949 sw_stats
->rx_stats
[i
].rxf_oper_state
= rx
->rxf
.rxf_oper_state
;
1951 sw_stats
->rx_stats
[i
].num_active_ucast
= 0;
1952 if (rx
->rxf
.ucast_active_mac
)
1953 sw_stats
->rx_stats
[i
].num_active_ucast
++;
1954 list_for_each(mac_qe
, &rx
->rxf
.ucast_active_q
)
1955 sw_stats
->rx_stats
[i
].num_active_ucast
++;
1957 sw_stats
->rx_stats
[i
].num_active_mcast
= 0;
1958 list_for_each(mac_qe
, &rx
->rxf
.mcast_active_q
)
1959 sw_stats
->rx_stats
[i
].num_active_mcast
++;
1961 sw_stats
->rx_stats
[i
].rxmode_active
= rx
->rxf
.rxmode_active
;
1962 sw_stats
->rx_stats
[i
].vlan_filter_status
=
1963 rx
->rxf
.vlan_filter_status
;
1964 memcpy(sw_stats
->rx_stats
[i
].vlan_filter_table
,
1965 rx
->rxf
.vlan_filter_table
,
1966 sizeof(u32
) * ((BFI_MAX_VLAN
+ 1) / 32));
1968 sw_stats
->rx_stats
[i
].rss_status
= rx
->rxf
.rss_status
;
1969 sw_stats
->rx_stats
[i
].hds_status
= rx
->rxf
.hds_status
;
1973 sw_stats
->num_active_rx
= i
;
1977 bna_fw_cb_stats_get(void *arg
, int status
)
1979 struct bna
*bna
= (struct bna
*)arg
;
1982 int rxf_count
, txf_count
;
1983 u64 rxf_bmap
, txf_bmap
;
1985 bfa_q_qe_init(&bna
->mbox_qe
.qe
);
1988 p_stats
= (u64
*)bna
->stats
.hw_stats
;
1989 count
= sizeof(struct bfi_ll_stats
) / sizeof(u64
);
1990 for (i
= 0; i
< count
; i
++)
1991 p_stats
[i
] = cpu_to_be64(p_stats
[i
]);
1994 rxf_bmap
= (u64
)bna
->stats
.rxf_bmap
[0] |
1995 ((u64
)bna
->stats
.rxf_bmap
[1] << 32);
1996 for (i
= 0; i
< BFI_LL_RXF_ID_MAX
; i
++)
1997 if (rxf_bmap
& ((u64
)1 << i
))
2001 txf_bmap
= (u64
)bna
->stats
.txf_bmap
[0] |
2002 ((u64
)bna
->stats
.txf_bmap
[1] << 32);
2003 for (i
= 0; i
< BFI_LL_TXF_ID_MAX
; i
++)
2004 if (txf_bmap
& ((u64
)1 << i
))
2007 p_stats
= (u64
*)&bna
->stats
.hw_stats
->rxf_stats
[0] +
2008 ((rxf_count
* sizeof(struct bfi_ll_stats_rxf
) +
2009 txf_count
* sizeof(struct bfi_ll_stats_txf
))/
2012 /* Populate the TXF stats from the firmware DMAed copy */
2013 for (i
= (BFI_LL_TXF_ID_MAX
- 1); i
>= 0; i
--)
2014 if (txf_bmap
& ((u64
)1 << i
)) {
2015 p_stats
-= sizeof(struct bfi_ll_stats_txf
)/
2017 memcpy(&bna
->stats
.hw_stats
->txf_stats
[i
],
2019 sizeof(struct bfi_ll_stats_txf
));
2022 /* Populate the RXF stats from the firmware DMAed copy */
2023 for (i
= (BFI_LL_RXF_ID_MAX
- 1); i
>= 0; i
--)
2024 if (rxf_bmap
& ((u64
)1 << i
)) {
2025 p_stats
-= sizeof(struct bfi_ll_stats_rxf
)/
2027 memcpy(&bna
->stats
.hw_stats
->rxf_stats
[i
],
2029 sizeof(struct bfi_ll_stats_rxf
));
2032 bna_sw_stats_get(bna
, bna
->stats
.sw_stats
);
2033 bnad_cb_stats_get(bna
->bnad
, BNA_CB_SUCCESS
, &bna
->stats
);
2035 bnad_cb_stats_get(bna
->bnad
, BNA_CB_FAIL
, &bna
->stats
);
2039 bna_fw_stats_get(struct bna
*bna
)
2041 struct bfi_ll_stats_req ll_req
;
2043 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_STATS_GET_REQ
, 0);
2044 ll_req
.stats_mask
= htons(BFI_LL_STATS_ALL
);
2046 ll_req
.rxf_id_mask
[0] = htonl(bna
->rx_mod
.rxf_bmap
[0]);
2047 ll_req
.rxf_id_mask
[1] = htonl(bna
->rx_mod
.rxf_bmap
[1]);
2048 ll_req
.txf_id_mask
[0] = htonl(bna
->tx_mod
.txf_bmap
[0]);
2049 ll_req
.txf_id_mask
[1] = htonl(bna
->tx_mod
.txf_bmap
[1]);
2051 ll_req
.host_buffer
.a32
.addr_hi
= bna
->hw_stats_dma
.msb
;
2052 ll_req
.host_buffer
.a32
.addr_lo
= bna
->hw_stats_dma
.lsb
;
2054 bna_mbox_qe_fill(&bna
->mbox_qe
, &ll_req
, sizeof(ll_req
),
2055 bna_fw_cb_stats_get
, bna
);
2056 bna_mbox_send(bna
, &bna
->mbox_qe
);
2058 bna
->stats
.rxf_bmap
[0] = bna
->rx_mod
.rxf_bmap
[0];
2059 bna
->stats
.rxf_bmap
[1] = bna
->rx_mod
.rxf_bmap
[1];
2060 bna
->stats
.txf_bmap
[0] = bna
->tx_mod
.txf_bmap
[0];
2061 bna
->stats
.txf_bmap
[1] = bna
->tx_mod
.txf_bmap
[1];
2065 bna_stats_get(struct bna
*bna
)
2067 if (bna_device_status_get(&bna
->device
))
2068 bna_fw_stats_get(bna
);
2070 bnad_cb_stats_get(bna
->bnad
, BNA_CB_FAIL
, &bna
->stats
);
2075 bna_ib_coalescing_timeo_set(struct bna_ib
*ib
, u8 coalescing_timeo
)
2077 ib
->ib_config
.coalescing_timeo
= coalescing_timeo
;
2079 if (ib
->start_count
)
2080 ib
->door_bell
.doorbell_ack
= BNA_DOORBELL_IB_INT_ACK(
2081 (u32
)ib
->ib_config
.coalescing_timeo
, 0);
2086 bna_rxf_adv_init(struct bna_rxf
*rxf
,
2088 struct bna_rx_config
*q_config
)
2090 switch (q_config
->rxp_type
) {
2091 case BNA_RXP_SINGLE
:
2095 rxf
->ctrl_flags
|= BNA_RXF_CF_SM_LG_RXQ
;
2098 rxf
->hds_cfg
.hdr_type
= q_config
->hds_config
.hdr_type
;
2099 rxf
->hds_cfg
.header_size
=
2100 q_config
->hds_config
.header_size
;
2101 rxf
->forced_offset
= 0;
2107 if (q_config
->rss_status
== BNA_STATUS_T_ENABLED
) {
2108 rxf
->ctrl_flags
|= BNA_RXF_CF_RSS_ENABLE
;
2109 rxf
->rss_cfg
.hash_type
= q_config
->rss_config
.hash_type
;
2110 rxf
->rss_cfg
.hash_mask
= q_config
->rss_config
.hash_mask
;
2111 memcpy(&rxf
->rss_cfg
.toeplitz_hash_key
[0],
2112 &q_config
->rss_config
.toeplitz_hash_key
[0],
2113 sizeof(rxf
->rss_cfg
.toeplitz_hash_key
));
2118 rxf_fltr_mbox_cmd(struct bna_rxf
*rxf
, u8 cmd
, enum bna_status status
)
2120 struct bfi_ll_rxf_req req
;
2122 bfi_h2i_set(req
.mh
, BFI_MC_LL
, cmd
, 0);
2124 req
.rxf_id
= rxf
->rxf_id
;
2125 req
.enable
= status
;
2127 bna_mbox_qe_fill(&rxf
->mbox_qe
, &req
, sizeof(req
),
2128 rxf_cb_cam_fltr_mbox_cmd
, rxf
);
2130 bna_mbox_send(rxf
->rx
->bna
, &rxf
->mbox_qe
);
2134 rxf_process_packet_filter_ucast(struct bna_rxf
*rxf
)
2136 struct bna_mac
*mac
= NULL
;
2137 struct list_head
*qe
;
2139 /* Add additional MAC entries */
2140 if (!list_empty(&rxf
->ucast_pending_add_q
)) {
2141 bfa_q_deq(&rxf
->ucast_pending_add_q
, &qe
);
2143 mac
= (struct bna_mac
*)qe
;
2144 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_ADD_REQ
, mac
);
2145 list_add_tail(&mac
->qe
, &rxf
->ucast_active_q
);
2149 /* Delete MAC addresses previousely added */
2150 if (!list_empty(&rxf
->ucast_pending_del_q
)) {
2151 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
2153 mac
= (struct bna_mac
*)qe
;
2154 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_DEL_REQ
, mac
);
2155 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
2163 rxf_process_packet_filter_promisc(struct bna_rxf
*rxf
)
2165 struct bna
*bna
= rxf
->rx
->bna
;
2167 /* Enable/disable promiscuous mode */
2168 if (is_promisc_enable(rxf
->rxmode_pending
,
2169 rxf
->rxmode_pending_bitmask
)) {
2170 /* move promisc configuration from pending -> active */
2171 promisc_inactive(rxf
->rxmode_pending
,
2172 rxf
->rxmode_pending_bitmask
);
2173 rxf
->rxmode_active
|= BNA_RXMODE_PROMISC
;
2175 /* Disable VLAN filter to allow all VLANs */
2176 __rxf_vlan_filter_set(rxf
, BNA_STATUS_T_DISABLED
);
2177 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2178 BNA_STATUS_T_ENABLED
);
2180 } else if (is_promisc_disable(rxf
->rxmode_pending
,
2181 rxf
->rxmode_pending_bitmask
)) {
2182 /* move promisc configuration from pending -> active */
2183 promisc_inactive(rxf
->rxmode_pending
,
2184 rxf
->rxmode_pending_bitmask
);
2185 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2186 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2188 /* Revert VLAN filter */
2189 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2190 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2191 BNA_STATUS_T_DISABLED
);
2199 rxf_process_packet_filter_allmulti(struct bna_rxf
*rxf
)
2201 /* Enable/disable allmulti mode */
2202 if (is_allmulti_enable(rxf
->rxmode_pending
,
2203 rxf
->rxmode_pending_bitmask
)) {
2204 /* move allmulti configuration from pending -> active */
2205 allmulti_inactive(rxf
->rxmode_pending
,
2206 rxf
->rxmode_pending_bitmask
);
2207 rxf
->rxmode_active
|= BNA_RXMODE_ALLMULTI
;
2209 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2210 BNA_STATUS_T_ENABLED
);
2212 } else if (is_allmulti_disable(rxf
->rxmode_pending
,
2213 rxf
->rxmode_pending_bitmask
)) {
2214 /* move allmulti configuration from pending -> active */
2215 allmulti_inactive(rxf
->rxmode_pending
,
2216 rxf
->rxmode_pending_bitmask
);
2217 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2219 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2220 BNA_STATUS_T_DISABLED
);
2228 rxf_clear_packet_filter_ucast(struct bna_rxf
*rxf
)
2230 struct bna_mac
*mac
= NULL
;
2231 struct list_head
*qe
;
2233 /* 1. delete pending ucast entries */
2234 if (!list_empty(&rxf
->ucast_pending_del_q
)) {
2235 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
2237 mac
= (struct bna_mac
*)qe
;
2238 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_DEL_REQ
, mac
);
2239 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
2243 /* 2. clear active ucast entries; move them to pending_add_q */
2244 if (!list_empty(&rxf
->ucast_active_q
)) {
2245 bfa_q_deq(&rxf
->ucast_active_q
, &qe
);
2247 mac
= (struct bna_mac
*)qe
;
2248 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_DEL_REQ
, mac
);
2249 list_add_tail(&mac
->qe
, &rxf
->ucast_pending_add_q
);
2257 rxf_clear_packet_filter_promisc(struct bna_rxf
*rxf
)
2259 struct bna
*bna
= rxf
->rx
->bna
;
2261 /* 6. Execute pending promisc mode disable command */
2262 if (is_promisc_disable(rxf
->rxmode_pending
,
2263 rxf
->rxmode_pending_bitmask
)) {
2264 /* move promisc configuration from pending -> active */
2265 promisc_inactive(rxf
->rxmode_pending
,
2266 rxf
->rxmode_pending_bitmask
);
2267 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2268 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2270 /* Revert VLAN filter */
2271 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2272 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2273 BNA_STATUS_T_DISABLED
);
2277 /* 7. Clear active promisc mode; move it to pending enable */
2278 if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
2279 /* move promisc configuration from active -> pending */
2280 promisc_enable(rxf
->rxmode_pending
,
2281 rxf
->rxmode_pending_bitmask
);
2282 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2284 /* Revert VLAN filter */
2285 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
2286 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_RXF_PROMISCUOUS_SET_REQ
,
2287 BNA_STATUS_T_DISABLED
);
2295 rxf_clear_packet_filter_allmulti(struct bna_rxf
*rxf
)
2297 /* 10. Execute pending allmulti mode disable command */
2298 if (is_allmulti_disable(rxf
->rxmode_pending
,
2299 rxf
->rxmode_pending_bitmask
)) {
2300 /* move allmulti configuration from pending -> active */
2301 allmulti_inactive(rxf
->rxmode_pending
,
2302 rxf
->rxmode_pending_bitmask
);
2303 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2304 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2305 BNA_STATUS_T_DISABLED
);
2309 /* 11. Clear active allmulti mode; move it to pending enable */
2310 if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
2311 /* move allmulti configuration from active -> pending */
2312 allmulti_enable(rxf
->rxmode_pending
,
2313 rxf
->rxmode_pending_bitmask
);
2314 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2315 rxf_fltr_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_FILTER_REQ
,
2316 BNA_STATUS_T_DISABLED
);
2324 rxf_reset_packet_filter_ucast(struct bna_rxf
*rxf
)
2326 struct list_head
*qe
;
2327 struct bna_mac
*mac
;
2329 /* 1. Move active ucast entries to pending_add_q */
2330 while (!list_empty(&rxf
->ucast_active_q
)) {
2331 bfa_q_deq(&rxf
->ucast_active_q
, &qe
);
2333 list_add_tail(qe
, &rxf
->ucast_pending_add_q
);
2336 /* 2. Throw away delete pending ucast entries */
2337 while (!list_empty(&rxf
->ucast_pending_del_q
)) {
2338 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
2340 mac
= (struct bna_mac
*)qe
;
2341 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
2346 rxf_reset_packet_filter_promisc(struct bna_rxf
*rxf
)
2348 struct bna
*bna
= rxf
->rx
->bna
;
2350 /* 6. Clear pending promisc mode disable */
2351 if (is_promisc_disable(rxf
->rxmode_pending
,
2352 rxf
->rxmode_pending_bitmask
)) {
2353 promisc_inactive(rxf
->rxmode_pending
,
2354 rxf
->rxmode_pending_bitmask
);
2355 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2356 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2359 /* 7. Move promisc mode config from active -> pending */
2360 if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
2361 promisc_enable(rxf
->rxmode_pending
,
2362 rxf
->rxmode_pending_bitmask
);
2363 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
2369 rxf_reset_packet_filter_allmulti(struct bna_rxf
*rxf
)
2371 /* 10. Clear pending allmulti mode disable */
2372 if (is_allmulti_disable(rxf
->rxmode_pending
,
2373 rxf
->rxmode_pending_bitmask
)) {
2374 allmulti_inactive(rxf
->rxmode_pending
,
2375 rxf
->rxmode_pending_bitmask
);
2376 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2379 /* 11. Move allmulti mode config from active -> pending */
2380 if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
2381 allmulti_enable(rxf
->rxmode_pending
,
2382 rxf
->rxmode_pending_bitmask
);
2383 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
2388 * Should only be called by bna_rxf_mode_set.
2389 * Helps deciding if h/w configuration is needed or not.
2392 * 1 = need h/w change
2395 rxf_promisc_enable(struct bna_rxf
*rxf
)
2397 struct bna
*bna
= rxf
->rx
->bna
;
2400 /* There can not be any pending disable command */
2402 /* Do nothing if pending enable or already enabled */
2403 if (is_promisc_enable(rxf
->rxmode_pending
,
2404 rxf
->rxmode_pending_bitmask
) ||
2405 (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
)) {
2406 /* Schedule enable */
2408 /* Promisc mode should not be active in the system */
2409 promisc_enable(rxf
->rxmode_pending
,
2410 rxf
->rxmode_pending_bitmask
);
2411 bna
->rxf_promisc_id
= rxf
->rxf_id
;
2419 * Should only be called by bna_rxf_mode_set.
2420 * Helps deciding if h/w configuration is needed or not.
2423 * 1 = need h/w change
2426 rxf_promisc_disable(struct bna_rxf
*rxf
)
2428 struct bna
*bna
= rxf
->rx
->bna
;
2431 /* There can not be any pending disable */
2433 /* Turn off pending enable command , if any */
2434 if (is_promisc_enable(rxf
->rxmode_pending
,
2435 rxf
->rxmode_pending_bitmask
)) {
2436 /* Promisc mode should not be active */
2437 /* system promisc state should be pending */
2438 promisc_inactive(rxf
->rxmode_pending
,
2439 rxf
->rxmode_pending_bitmask
);
2440 /* Remove the promisc state from the system */
2441 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2443 /* Schedule disable */
2444 } else if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
2445 /* Promisc mode should be active in the system */
2446 promisc_disable(rxf
->rxmode_pending
,
2447 rxf
->rxmode_pending_bitmask
);
2450 /* Do nothing if already disabled */
2458 * Should only be called by bna_rxf_mode_set.
2459 * Helps deciding if h/w configuration is needed or not.
2462 * 1 = need h/w change
2465 rxf_allmulti_enable(struct bna_rxf
*rxf
)
2469 /* There can not be any pending disable command */
2471 /* Do nothing if pending enable or already enabled */
2472 if (is_allmulti_enable(rxf
->rxmode_pending
,
2473 rxf
->rxmode_pending_bitmask
) ||
2474 (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
)) {
2475 /* Schedule enable */
2477 allmulti_enable(rxf
->rxmode_pending
,
2478 rxf
->rxmode_pending_bitmask
);
2486 * Should only be called by bna_rxf_mode_set.
2487 * Helps deciding if h/w configuration is needed or not.
2490 * 1 = need h/w change
2493 rxf_allmulti_disable(struct bna_rxf
*rxf
)
2497 /* There can not be any pending disable */
2499 /* Turn off pending enable command , if any */
2500 if (is_allmulti_enable(rxf
->rxmode_pending
,
2501 rxf
->rxmode_pending_bitmask
)) {
2502 /* Allmulti mode should not be active */
2503 allmulti_inactive(rxf
->rxmode_pending
,
2504 rxf
->rxmode_pending_bitmask
);
2506 /* Schedule disable */
2507 } else if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
2508 allmulti_disable(rxf
->rxmode_pending
,
2509 rxf
->rxmode_pending_bitmask
);
2518 bna_rx_mode_set(struct bna_rx
*rx
, enum bna_rxmode new_mode
,
2519 enum bna_rxmode bitmask
,
2520 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
2521 enum bna_cb_status
))
2523 struct bna_rxf
*rxf
= &rx
->rxf
;
2524 int need_hw_config
= 0;
2526 /* Process the commands */
2528 if (is_promisc_enable(new_mode
, bitmask
)) {
2529 /* If promisc mode is already enabled elsewhere in the system */
2530 if ((rx
->bna
->rxf_promisc_id
!= BFI_MAX_RXF
) &&
2531 (rx
->bna
->rxf_promisc_id
!= rxf
->rxf_id
))
2533 if (rxf_promisc_enable(rxf
))
2535 } else if (is_promisc_disable(new_mode
, bitmask
)) {
2536 if (rxf_promisc_disable(rxf
))
2540 if (is_allmulti_enable(new_mode
, bitmask
)) {
2541 if (rxf_allmulti_enable(rxf
))
2543 } else if (is_allmulti_disable(new_mode
, bitmask
)) {
2544 if (rxf_allmulti_disable(rxf
))
2548 /* Trigger h/w if needed */
2550 if (need_hw_config
) {
2551 rxf
->cam_fltr_cbfn
= cbfn
;
2552 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
2553 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
2555 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
2557 return BNA_CB_SUCCESS
;
2565 bna_rx_vlanfilter_enable(struct bna_rx
*rx
)
2567 struct bna_rxf
*rxf
= &rx
->rxf
;
2569 if (rxf
->vlan_filter_status
== BNA_STATUS_T_DISABLED
) {
2570 rxf
->rxf_flags
|= BNA_RXF_FL_VLAN_CONFIG_PENDING
;
2571 rxf
->vlan_filter_status
= BNA_STATUS_T_ENABLED
;
2572 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
2580 bna_rx_coalescing_timeo_set(struct bna_rx
*rx
, int coalescing_timeo
)
2582 struct bna_rxp
*rxp
;
2583 struct list_head
*qe
;
2585 list_for_each(qe
, &rx
->rxp_q
) {
2586 rxp
= (struct bna_rxp
*)qe
;
2587 rxp
->cq
.ccb
->rx_coalescing_timeo
= coalescing_timeo
;
2588 bna_ib_coalescing_timeo_set(rxp
->cq
.ib
, coalescing_timeo
);
2594 bna_rx_dim_reconfig(struct bna
*bna
, const u32 vector
[][BNA_BIAS_T_MAX
])
2598 for (i
= 0; i
< BNA_LOAD_T_MAX
; i
++)
2599 for (j
= 0; j
< BNA_BIAS_T_MAX
; j
++)
2600 bna
->rx_mod
.dim_vector
[i
][j
] = vector
[i
][j
];
2605 bna_rx_dim_update(struct bna_ccb
*ccb
)
2607 struct bna
*bna
= ccb
->cq
->rx
->bna
;
2609 u32 pkt_rt
, small_rt
, large_rt
;
2610 u8 coalescing_timeo
;
2612 if ((ccb
->pkt_rate
.small_pkt_cnt
== 0) &&
2613 (ccb
->pkt_rate
.large_pkt_cnt
== 0))
2616 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2618 small_rt
= ccb
->pkt_rate
.small_pkt_cnt
;
2619 large_rt
= ccb
->pkt_rate
.large_pkt_cnt
;
2621 pkt_rt
= small_rt
+ large_rt
;
2623 if (pkt_rt
< BNA_PKT_RATE_10K
)
2624 load
= BNA_LOAD_T_LOW_4
;
2625 else if (pkt_rt
< BNA_PKT_RATE_20K
)
2626 load
= BNA_LOAD_T_LOW_3
;
2627 else if (pkt_rt
< BNA_PKT_RATE_30K
)
2628 load
= BNA_LOAD_T_LOW_2
;
2629 else if (pkt_rt
< BNA_PKT_RATE_40K
)
2630 load
= BNA_LOAD_T_LOW_1
;
2631 else if (pkt_rt
< BNA_PKT_RATE_50K
)
2632 load
= BNA_LOAD_T_HIGH_1
;
2633 else if (pkt_rt
< BNA_PKT_RATE_60K
)
2634 load
= BNA_LOAD_T_HIGH_2
;
2635 else if (pkt_rt
< BNA_PKT_RATE_80K
)
2636 load
= BNA_LOAD_T_HIGH_3
;
2638 load
= BNA_LOAD_T_HIGH_4
;
2640 if (small_rt
> (large_rt
<< 1))
2645 ccb
->pkt_rate
.small_pkt_cnt
= 0;
2646 ccb
->pkt_rate
.large_pkt_cnt
= 0;
2648 coalescing_timeo
= bna
->rx_mod
.dim_vector
[load
][bias
];
2649 ccb
->rx_coalescing_timeo
= coalescing_timeo
;
2652 bna_ib_coalescing_timeo_set(ccb
->cq
->ib
, coalescing_timeo
);
2658 bna_tx_coalescing_timeo_set(struct bna_tx
*tx
, int coalescing_timeo
)
2660 struct bna_txq
*txq
;
2661 struct list_head
*qe
;
2663 list_for_each(qe
, &tx
->txq_q
) {
2664 txq
= (struct bna_txq
*)qe
;
2665 bna_ib_coalescing_timeo_set(txq
->ib
, coalescing_timeo
);
2673 struct bna_ritseg_pool_cfg
{
2675 u32 pool_entry_size
;
2677 init_ritseg_pool(ritseg_pool_cfg
);
2683 bna_ucam_mod_init(struct bna_ucam_mod
*ucam_mod
, struct bna
*bna
,
2684 struct bna_res_info
*res_info
)
2688 ucam_mod
->ucmac
= (struct bna_mac
*)
2689 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2691 INIT_LIST_HEAD(&ucam_mod
->free_q
);
2692 for (i
= 0; i
< BFI_MAX_UCMAC
; i
++) {
2693 bfa_q_qe_init(&ucam_mod
->ucmac
[i
].qe
);
2694 list_add_tail(&ucam_mod
->ucmac
[i
].qe
, &ucam_mod
->free_q
);
2697 ucam_mod
->bna
= bna
;
2701 bna_ucam_mod_uninit(struct bna_ucam_mod
*ucam_mod
)
2703 struct list_head
*qe
;
2706 list_for_each(qe
, &ucam_mod
->free_q
)
2709 ucam_mod
->bna
= NULL
;
2713 bna_mcam_mod_init(struct bna_mcam_mod
*mcam_mod
, struct bna
*bna
,
2714 struct bna_res_info
*res_info
)
2718 mcam_mod
->mcmac
= (struct bna_mac
*)
2719 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2721 INIT_LIST_HEAD(&mcam_mod
->free_q
);
2722 for (i
= 0; i
< BFI_MAX_MCMAC
; i
++) {
2723 bfa_q_qe_init(&mcam_mod
->mcmac
[i
].qe
);
2724 list_add_tail(&mcam_mod
->mcmac
[i
].qe
, &mcam_mod
->free_q
);
2727 mcam_mod
->bna
= bna
;
2731 bna_mcam_mod_uninit(struct bna_mcam_mod
*mcam_mod
)
2733 struct list_head
*qe
;
2736 list_for_each(qe
, &mcam_mod
->free_q
)
2739 mcam_mod
->bna
= NULL
;
2743 bna_rit_mod_init(struct bna_rit_mod
*rit_mod
,
2744 struct bna_res_info
*res_info
)
2751 rit_mod
->rit
= (struct bna_rit_entry
*)
2752 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.mdl
[0].kva
;
2753 rit_mod
->rit_segment
= (struct bna_rit_segment
*)
2754 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.mdl
[0].kva
;
2758 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
2759 INIT_LIST_HEAD(&rit_mod
->rit_seg_pool
[i
]);
2760 for (j
= 0; j
< ritseg_pool_cfg
[i
].pool_size
; j
++) {
2761 bfa_q_qe_init(&rit_mod
->rit_segment
[count
].qe
);
2762 rit_mod
->rit_segment
[count
].max_rit_size
=
2763 ritseg_pool_cfg
[i
].pool_entry_size
;
2764 rit_mod
->rit_segment
[count
].rit_offset
= offset
;
2765 rit_mod
->rit_segment
[count
].rit
=
2766 &rit_mod
->rit
[offset
];
2767 list_add_tail(&rit_mod
->rit_segment
[count
].qe
,
2768 &rit_mod
->rit_seg_pool
[i
]);
2770 offset
+= ritseg_pool_cfg
[i
].pool_entry_size
;
2779 /* Called during probe(), before calling bna_init() */
2781 bna_res_req(struct bna_res_info
*res_info
)
2783 bna_adv_res_req(res_info
);
2785 /* DMA memory for retrieving IOC attributes */
2786 res_info
[BNA_RES_MEM_T_ATTR
].res_type
= BNA_RES_T_MEM
;
2787 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
2788 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.num
= 1;
2789 res_info
[BNA_RES_MEM_T_ATTR
].res_u
.mem_info
.len
=
2790 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE
);
2792 /* DMA memory for index segment of an IB */
2793 res_info
[BNA_RES_MEM_T_IBIDX
].res_type
= BNA_RES_T_MEM
;
2794 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.mem_type
= BNA_MEM_T_DMA
;
2795 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.len
=
2796 BFI_IBIDX_SIZE
* BFI_IBIDX_MAX_SEGSIZE
;
2797 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.num
= BFI_MAX_IB
;
2799 /* Virtual memory for IB objects - stored by IB module */
2800 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_type
= BNA_RES_T_MEM
;
2801 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.mem_type
=
2803 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.num
= 1;
2804 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.len
=
2805 BFI_MAX_IB
* sizeof(struct bna_ib
);
2807 /* Virtual memory for intr objects - stored by IB module */
2808 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_type
= BNA_RES_T_MEM
;
2809 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.mem_type
=
2811 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.num
= 1;
2812 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.len
=
2813 BFI_MAX_IB
* sizeof(struct bna_intr
);
2815 /* Virtual memory for idx_seg objects - stored by IB module */
2816 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_type
= BNA_RES_T_MEM
;
2817 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.mem_type
=
2819 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.num
= 1;
2820 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.len
=
2821 BFI_IBIDX_TOTAL_SEGS
* sizeof(struct bna_ibidx_seg
);
2823 /* Virtual memory for Tx objects - stored by Tx module */
2824 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_type
= BNA_RES_T_MEM
;
2825 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.mem_type
=
2827 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.num
= 1;
2828 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.len
=
2829 BFI_MAX_TXQ
* sizeof(struct bna_tx
);
2831 /* Virtual memory for TxQ - stored by Tx module */
2832 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
2833 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.mem_type
=
2835 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.num
= 1;
2836 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.len
=
2837 BFI_MAX_TXQ
* sizeof(struct bna_txq
);
2839 /* Virtual memory for Rx objects - stored by Rx module */
2840 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_type
= BNA_RES_T_MEM
;
2841 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.mem_type
=
2843 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.num
= 1;
2844 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.len
=
2845 BFI_MAX_RXQ
* sizeof(struct bna_rx
);
2847 /* Virtual memory for RxPath - stored by Rx module */
2848 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_type
= BNA_RES_T_MEM
;
2849 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.mem_type
=
2851 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.num
= 1;
2852 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.len
=
2853 BFI_MAX_RXQ
* sizeof(struct bna_rxp
);
2855 /* Virtual memory for RxQ - stored by Rx module */
2856 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_type
= BNA_RES_T_MEM
;
2857 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.mem_type
=
2859 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.num
= 1;
2860 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.len
=
2861 BFI_MAX_RXQ
* sizeof(struct bna_rxq
);
2863 /* Virtual memory for Unicast MAC address - stored by ucam module */
2864 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
2865 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
2867 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
2868 res_info
[BNA_RES_MEM_T_UCMAC_ARRAY
].res_u
.mem_info
.len
=
2869 BFI_MAX_UCMAC
* sizeof(struct bna_mac
);
2871 /* Virtual memory for Multicast MAC address - stored by mcam module */
2872 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_type
= BNA_RES_T_MEM
;
2873 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.mem_type
=
2875 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.num
= 1;
2876 res_info
[BNA_RES_MEM_T_MCMAC_ARRAY
].res_u
.mem_info
.len
=
2877 BFI_MAX_MCMAC
* sizeof(struct bna_mac
);
2879 /* Virtual memory for RIT entries */
2880 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_type
= BNA_RES_T_MEM
;
2881 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.mem_type
=
2883 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.num
= 1;
2884 res_info
[BNA_RES_MEM_T_RIT_ENTRY
].res_u
.mem_info
.len
=
2885 BFI_MAX_RIT_SIZE
* sizeof(struct bna_rit_entry
);
2887 /* Virtual memory for RIT segment table */
2888 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_type
= BNA_RES_T_MEM
;
2889 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.mem_type
=
2891 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.num
= 1;
2892 res_info
[BNA_RES_MEM_T_RIT_SEGMENT
].res_u
.mem_info
.len
=
2893 BFI_RIT_TOTAL_SEGS
* sizeof(struct bna_rit_segment
);
2895 /* Interrupt resource for mailbox interrupt */
2896 res_info
[BNA_RES_INTR_T_MBOX
].res_type
= BNA_RES_T_INTR
;
2897 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.intr_type
=
2899 res_info
[BNA_RES_INTR_T_MBOX
].res_u
.intr_info
.num
= 1;
2902 /* Called during probe() */
2904 bna_init(struct bna
*bna
, struct bnad
*bnad
, struct bfa_pcidev
*pcidev
,
2905 struct bna_res_info
*res_info
)
2908 bna
->pcidev
= *pcidev
;
2910 bna
->stats
.hw_stats
= (struct bfi_ll_stats
*)
2911 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].kva
;
2912 bna
->hw_stats_dma
.msb
=
2913 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.msb
;
2914 bna
->hw_stats_dma
.lsb
=
2915 res_info
[BNA_RES_MEM_T_STATS
].res_u
.mem_info
.mdl
[0].dma
.lsb
;
2916 bna
->stats
.sw_stats
= (struct bna_sw_stats
*)
2917 res_info
[BNA_RES_MEM_T_SWSTATS
].res_u
.mem_info
.mdl
[0].kva
;
2919 bna
->regs
.page_addr
= bna
->pcidev
.pci_bar_kva
+
2920 reg_offset
[bna
->pcidev
.pci_func
].page_addr
;
2921 bna
->regs
.fn_int_status
= bna
->pcidev
.pci_bar_kva
+
2922 reg_offset
[bna
->pcidev
.pci_func
].fn_int_status
;
2923 bna
->regs
.fn_int_mask
= bna
->pcidev
.pci_bar_kva
+
2924 reg_offset
[bna
->pcidev
.pci_func
].fn_int_mask
;
2926 if (bna
->pcidev
.pci_func
< 3)
2931 /* Also initializes diag, cee, sfp, phy_port and mbox_mod */
2932 bna_device_init(&bna
->device
, bna
, res_info
);
2934 bna_port_init(&bna
->port
, bna
);
2936 bna_tx_mod_init(&bna
->tx_mod
, bna
, res_info
);
2938 bna_rx_mod_init(&bna
->rx_mod
, bna
, res_info
);
2940 bna_ib_mod_init(&bna
->ib_mod
, bna
, res_info
);
2942 bna_rit_mod_init(&bna
->rit_mod
, res_info
);
2944 bna_ucam_mod_init(&bna
->ucam_mod
, bna
, res_info
);
2946 bna_mcam_mod_init(&bna
->mcam_mod
, bna
, res_info
);
2948 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
2950 /* Mbox q element for posting stat request to f/w */
2951 bfa_q_qe_init(&bna
->mbox_qe
.qe
);
2955 bna_uninit(struct bna
*bna
)
2957 bna_mcam_mod_uninit(&bna
->mcam_mod
);
2959 bna_ucam_mod_uninit(&bna
->ucam_mod
);
2961 bna_ib_mod_uninit(&bna
->ib_mod
);
2963 bna_rx_mod_uninit(&bna
->rx_mod
);
2965 bna_tx_mod_uninit(&bna
->tx_mod
);
2967 bna_port_uninit(&bna
->port
);
2969 bna_device_uninit(&bna
->device
);
2975 bna_ucam_mod_mac_get(struct bna_ucam_mod
*ucam_mod
)
2977 struct list_head
*qe
;
2979 if (list_empty(&ucam_mod
->free_q
))
2982 bfa_q_deq(&ucam_mod
->free_q
, &qe
);
2984 return (struct bna_mac
*)qe
;
2988 bna_ucam_mod_mac_put(struct bna_ucam_mod
*ucam_mod
, struct bna_mac
*mac
)
2990 list_add_tail(&mac
->qe
, &ucam_mod
->free_q
);
2994 bna_mcam_mod_mac_get(struct bna_mcam_mod
*mcam_mod
)
2996 struct list_head
*qe
;
2998 if (list_empty(&mcam_mod
->free_q
))
3001 bfa_q_deq(&mcam_mod
->free_q
, &qe
);
3003 return (struct bna_mac
*)qe
;
3007 bna_mcam_mod_mac_put(struct bna_mcam_mod
*mcam_mod
, struct bna_mac
*mac
)
3009 list_add_tail(&mac
->qe
, &mcam_mod
->free_q
);
3013 * Note: This should be called in the same locking context as the call to
3014 * bna_rit_mod_seg_get()
3017 bna_rit_mod_can_satisfy(struct bna_rit_mod
*rit_mod
, int seg_size
)
3021 /* Select the pool for seg_size */
3022 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3023 if (seg_size
<= ritseg_pool_cfg
[i
].pool_entry_size
)
3027 if (i
== BFI_RIT_SEG_TOTAL_POOLS
)
3030 if (list_empty(&rit_mod
->rit_seg_pool
[i
]))
3036 struct bna_rit_segment
*
3037 bna_rit_mod_seg_get(struct bna_rit_mod
*rit_mod
, int seg_size
)
3039 struct bna_rit_segment
*seg
;
3040 struct list_head
*qe
;
3043 /* Select the pool for seg_size */
3044 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3045 if (seg_size
<= ritseg_pool_cfg
[i
].pool_entry_size
)
3049 if (i
== BFI_RIT_SEG_TOTAL_POOLS
)
3052 if (list_empty(&rit_mod
->rit_seg_pool
[i
]))
3055 bfa_q_deq(&rit_mod
->rit_seg_pool
[i
], &qe
);
3056 seg
= (struct bna_rit_segment
*)qe
;
3057 bfa_q_qe_init(&seg
->qe
);
3058 seg
->rit_size
= seg_size
;
3064 bna_rit_mod_seg_put(struct bna_rit_mod
*rit_mod
,
3065 struct bna_rit_segment
*seg
)
3069 /* Select the pool for seg->max_rit_size */
3070 for (i
= 0; i
< BFI_RIT_SEG_TOTAL_POOLS
; i
++) {
3071 if (seg
->max_rit_size
== ritseg_pool_cfg
[i
].pool_entry_size
)
3076 list_add_tail(&seg
->qe
, &rit_mod
->rit_seg_pool
[i
]);