2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
25 bna_ib_coalescing_timeo_set(struct bna_ib
*ib
, u8 coalescing_timeo
)
27 ib
->coalescing_timeo
= coalescing_timeo
;
28 ib
->door_bell
.doorbell_ack
= BNA_DOORBELL_IB_INT_ACK(
29 (u32
)ib
->coalescing_timeo
, 0);
36 #define bna_rxf_vlan_cfg_soft_reset(rxf) \
38 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
39 (rxf)->vlan_strip_pending = true; \
42 #define bna_rxf_rss_cfg_soft_reset(rxf) \
44 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
45 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
46 BNA_RSS_F_CFG_PENDING | \
47 BNA_RSS_F_STATUS_PENDING); \
50 static int bna_rxf_cfg_apply(struct bna_rxf
*rxf
);
51 static void bna_rxf_cfg_reset(struct bna_rxf
*rxf
);
52 static int bna_rxf_fltr_clear(struct bna_rxf
*rxf
);
53 static int bna_rxf_ucast_cfg_apply(struct bna_rxf
*rxf
);
54 static int bna_rxf_promisc_cfg_apply(struct bna_rxf
*rxf
);
55 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf
*rxf
);
56 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf
*rxf
);
57 static int bna_rxf_ucast_cfg_reset(struct bna_rxf
*rxf
,
58 enum bna_cleanup_type cleanup
);
59 static int bna_rxf_promisc_cfg_reset(struct bna_rxf
*rxf
,
60 enum bna_cleanup_type cleanup
);
61 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf
*rxf
,
62 enum bna_cleanup_type cleanup
);
64 bfa_fsm_state_decl(bna_rxf
, stopped
, struct bna_rxf
,
66 bfa_fsm_state_decl(bna_rxf
, paused
, struct bna_rxf
,
68 bfa_fsm_state_decl(bna_rxf
, cfg_wait
, struct bna_rxf
,
70 bfa_fsm_state_decl(bna_rxf
, started
, struct bna_rxf
,
72 bfa_fsm_state_decl(bna_rxf
, fltr_clr_wait
, struct bna_rxf
,
74 bfa_fsm_state_decl(bna_rxf
, last_resp_wait
, struct bna_rxf
,
78 bna_rxf_sm_stopped_entry(struct bna_rxf
*rxf
)
80 call_rxf_stop_cbfn(rxf
);
84 bna_rxf_sm_stopped(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
88 if (rxf
->flags
& BNA_RXF_F_PAUSED
) {
89 bfa_fsm_set_state(rxf
, bna_rxf_sm_paused
);
90 call_rxf_start_cbfn(rxf
);
92 bfa_fsm_set_state(rxf
, bna_rxf_sm_cfg_wait
);
96 call_rxf_stop_cbfn(rxf
);
104 call_rxf_cam_fltr_cbfn(rxf
);
108 rxf
->flags
|= BNA_RXF_F_PAUSED
;
109 call_rxf_pause_cbfn(rxf
);
113 rxf
->flags
&= ~BNA_RXF_F_PAUSED
;
114 call_rxf_resume_cbfn(rxf
);
123 bna_rxf_sm_paused_entry(struct bna_rxf
*rxf
)
125 call_rxf_pause_cbfn(rxf
);
129 bna_rxf_sm_paused(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
134 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
138 call_rxf_cam_fltr_cbfn(rxf
);
142 rxf
->flags
&= ~BNA_RXF_F_PAUSED
;
143 bfa_fsm_set_state(rxf
, bna_rxf_sm_cfg_wait
);
152 bna_rxf_sm_cfg_wait_entry(struct bna_rxf
*rxf
)
154 if (!bna_rxf_cfg_apply(rxf
)) {
155 /* No more pending config updates */
156 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
161 bna_rxf_sm_cfg_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
165 bfa_fsm_set_state(rxf
, bna_rxf_sm_last_resp_wait
);
169 bna_rxf_cfg_reset(rxf
);
170 call_rxf_start_cbfn(rxf
);
171 call_rxf_cam_fltr_cbfn(rxf
);
172 call_rxf_resume_cbfn(rxf
);
173 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
181 rxf
->flags
|= BNA_RXF_F_PAUSED
;
182 call_rxf_start_cbfn(rxf
);
183 bfa_fsm_set_state(rxf
, bna_rxf_sm_fltr_clr_wait
);
187 if (!bna_rxf_cfg_apply(rxf
)) {
188 /* No more pending config updates */
189 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
199 bna_rxf_sm_started_entry(struct bna_rxf
*rxf
)
201 call_rxf_start_cbfn(rxf
);
202 call_rxf_cam_fltr_cbfn(rxf
);
203 call_rxf_resume_cbfn(rxf
);
207 bna_rxf_sm_started(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
212 bna_rxf_cfg_reset(rxf
);
213 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
217 bfa_fsm_set_state(rxf
, bna_rxf_sm_cfg_wait
);
221 rxf
->flags
|= BNA_RXF_F_PAUSED
;
222 if (!bna_rxf_fltr_clear(rxf
))
223 bfa_fsm_set_state(rxf
, bna_rxf_sm_paused
);
225 bfa_fsm_set_state(rxf
, bna_rxf_sm_fltr_clr_wait
);
234 bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf
*rxf
)
239 bna_rxf_sm_fltr_clr_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
243 bna_rxf_cfg_reset(rxf
);
244 call_rxf_pause_cbfn(rxf
);
245 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
249 if (!bna_rxf_fltr_clear(rxf
)) {
250 /* No more pending CAM entries to clear */
251 bfa_fsm_set_state(rxf
, bna_rxf_sm_paused
);
261 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf
*rxf
)
266 bna_rxf_sm_last_resp_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
271 bna_rxf_cfg_reset(rxf
);
272 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
281 bna_bfi_ucast_req(struct bna_rxf
*rxf
, struct bna_mac
*mac
,
282 enum bfi_enet_h2i_msgs req_type
)
284 struct bfi_enet_ucast_req
*req
= &rxf
->bfi_enet_cmd
.ucast_req
;
286 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
, req_type
, 0, rxf
->rx
->rid
);
287 req
->mh
.num_entries
= htons(
288 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req
)));
289 memcpy(&req
->mac_addr
, &mac
->addr
, sizeof(mac_t
));
290 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
291 sizeof(struct bfi_enet_ucast_req
), &req
->mh
);
292 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
296 bna_bfi_mcast_add_req(struct bna_rxf
*rxf
, struct bna_mac
*mac
)
298 struct bfi_enet_mcast_add_req
*req
=
299 &rxf
->bfi_enet_cmd
.mcast_add_req
;
301 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
, BFI_ENET_H2I_MAC_MCAST_ADD_REQ
,
303 req
->mh
.num_entries
= htons(
304 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req
)));
305 memcpy(&req
->mac_addr
, &mac
->addr
, sizeof(mac_t
));
306 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
307 sizeof(struct bfi_enet_mcast_add_req
), &req
->mh
);
308 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
312 bna_bfi_mcast_del_req(struct bna_rxf
*rxf
, u16 handle
)
314 struct bfi_enet_mcast_del_req
*req
=
315 &rxf
->bfi_enet_cmd
.mcast_del_req
;
317 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
, BFI_ENET_H2I_MAC_MCAST_DEL_REQ
,
319 req
->mh
.num_entries
= htons(
320 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req
)));
321 req
->handle
= htons(handle
);
322 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
323 sizeof(struct bfi_enet_mcast_del_req
), &req
->mh
);
324 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
328 bna_bfi_mcast_filter_req(struct bna_rxf
*rxf
, enum bna_status status
)
330 struct bfi_enet_enable_req
*req
= &rxf
->bfi_enet_cmd
.req
;
332 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
333 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ
, 0, rxf
->rx
->rid
);
334 req
->mh
.num_entries
= htons(
335 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
336 req
->enable
= status
;
337 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
338 sizeof(struct bfi_enet_enable_req
), &req
->mh
);
339 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
343 bna_bfi_rx_promisc_req(struct bna_rxf
*rxf
, enum bna_status status
)
345 struct bfi_enet_enable_req
*req
= &rxf
->bfi_enet_cmd
.req
;
347 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
348 BFI_ENET_H2I_RX_PROMISCUOUS_REQ
, 0, rxf
->rx
->rid
);
349 req
->mh
.num_entries
= htons(
350 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
351 req
->enable
= status
;
352 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
353 sizeof(struct bfi_enet_enable_req
), &req
->mh
);
354 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
358 bna_bfi_rx_vlan_filter_set(struct bna_rxf
*rxf
, u8 block_idx
)
360 struct bfi_enet_rx_vlan_req
*req
= &rxf
->bfi_enet_cmd
.vlan_req
;
364 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
365 BFI_ENET_H2I_RX_VLAN_SET_REQ
, 0, rxf
->rx
->rid
);
366 req
->mh
.num_entries
= htons(
367 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req
)));
368 req
->block_idx
= block_idx
;
369 for (i
= 0; i
< (BFI_ENET_VLAN_BLOCK_SIZE
/ 32); i
++) {
370 j
= (block_idx
* (BFI_ENET_VLAN_BLOCK_SIZE
/ 32)) + i
;
371 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
)
373 htonl(rxf
->vlan_filter_table
[j
]);
375 req
->bit_mask
[i
] = 0xFFFFFFFF;
377 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
378 sizeof(struct bfi_enet_rx_vlan_req
), &req
->mh
);
379 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
383 bna_bfi_vlan_strip_enable(struct bna_rxf
*rxf
)
385 struct bfi_enet_enable_req
*req
= &rxf
->bfi_enet_cmd
.req
;
387 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
388 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ
, 0, rxf
->rx
->rid
);
389 req
->mh
.num_entries
= htons(
390 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
391 req
->enable
= rxf
->vlan_strip_status
;
392 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
393 sizeof(struct bfi_enet_enable_req
), &req
->mh
);
394 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
398 bna_bfi_rit_cfg(struct bna_rxf
*rxf
)
400 struct bfi_enet_rit_req
*req
= &rxf
->bfi_enet_cmd
.rit_req
;
402 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
403 BFI_ENET_H2I_RIT_CFG_REQ
, 0, rxf
->rx
->rid
);
404 req
->mh
.num_entries
= htons(
405 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req
)));
406 req
->size
= htons(rxf
->rit_size
);
407 memcpy(&req
->table
[0], rxf
->rit
, rxf
->rit_size
);
408 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
409 sizeof(struct bfi_enet_rit_req
), &req
->mh
);
410 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
414 bna_bfi_rss_cfg(struct bna_rxf
*rxf
)
416 struct bfi_enet_rss_cfg_req
*req
= &rxf
->bfi_enet_cmd
.rss_req
;
419 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
420 BFI_ENET_H2I_RSS_CFG_REQ
, 0, rxf
->rx
->rid
);
421 req
->mh
.num_entries
= htons(
422 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req
)));
423 req
->cfg
.type
= rxf
->rss_cfg
.hash_type
;
424 req
->cfg
.mask
= rxf
->rss_cfg
.hash_mask
;
425 for (i
= 0; i
< BFI_ENET_RSS_KEY_LEN
; i
++)
427 htonl(rxf
->rss_cfg
.toeplitz_hash_key
[i
]);
428 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
429 sizeof(struct bfi_enet_rss_cfg_req
), &req
->mh
);
430 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
434 bna_bfi_rss_enable(struct bna_rxf
*rxf
)
436 struct bfi_enet_enable_req
*req
= &rxf
->bfi_enet_cmd
.req
;
438 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
439 BFI_ENET_H2I_RSS_ENABLE_REQ
, 0, rxf
->rx
->rid
);
440 req
->mh
.num_entries
= htons(
441 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
442 req
->enable
= rxf
->rss_status
;
443 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
444 sizeof(struct bfi_enet_enable_req
), &req
->mh
);
445 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
448 /* This function gets the multicast MAC that has already been added to CAM */
449 static struct bna_mac
*
450 bna_rxf_mcmac_get(struct bna_rxf
*rxf
, u8
*mac_addr
)
453 struct list_head
*qe
;
455 list_for_each(qe
, &rxf
->mcast_active_q
) {
456 mac
= (struct bna_mac
*)qe
;
457 if (BNA_MAC_IS_EQUAL(&mac
->addr
, mac_addr
))
461 list_for_each(qe
, &rxf
->mcast_pending_del_q
) {
462 mac
= (struct bna_mac
*)qe
;
463 if (BNA_MAC_IS_EQUAL(&mac
->addr
, mac_addr
))
470 static struct bna_mcam_handle
*
471 bna_rxf_mchandle_get(struct bna_rxf
*rxf
, int handle
)
473 struct bna_mcam_handle
*mchandle
;
474 struct list_head
*qe
;
476 list_for_each(qe
, &rxf
->mcast_handle_q
) {
477 mchandle
= (struct bna_mcam_handle
*)qe
;
478 if (mchandle
->handle
== handle
)
486 bna_rxf_mchandle_attach(struct bna_rxf
*rxf
, u8
*mac_addr
, int handle
)
488 struct bna_mac
*mcmac
;
489 struct bna_mcam_handle
*mchandle
;
491 mcmac
= bna_rxf_mcmac_get(rxf
, mac_addr
);
492 mchandle
= bna_rxf_mchandle_get(rxf
, handle
);
493 if (mchandle
== NULL
) {
494 mchandle
= bna_mcam_mod_handle_get(&rxf
->rx
->bna
->mcam_mod
);
495 mchandle
->handle
= handle
;
496 mchandle
->refcnt
= 0;
497 list_add_tail(&mchandle
->qe
, &rxf
->mcast_handle_q
);
500 mcmac
->handle
= mchandle
;
504 bna_rxf_mcast_del(struct bna_rxf
*rxf
, struct bna_mac
*mac
,
505 enum bna_cleanup_type cleanup
)
507 struct bna_mcam_handle
*mchandle
;
510 mchandle
= mac
->handle
;
511 if (mchandle
== NULL
)
515 if (mchandle
->refcnt
== 0) {
516 if (cleanup
== BNA_HARD_CLEANUP
) {
517 bna_bfi_mcast_del_req(rxf
, mchandle
->handle
);
520 list_del(&mchandle
->qe
);
521 bfa_q_qe_init(&mchandle
->qe
);
522 bna_mcam_mod_handle_put(&rxf
->rx
->bna
->mcam_mod
, mchandle
);
530 bna_rxf_mcast_cfg_apply(struct bna_rxf
*rxf
)
532 struct bna_mac
*mac
= NULL
;
533 struct list_head
*qe
;
536 /* Delete multicast entries previousely added */
537 while (!list_empty(&rxf
->mcast_pending_del_q
)) {
538 bfa_q_deq(&rxf
->mcast_pending_del_q
, &qe
);
540 mac
= (struct bna_mac
*)qe
;
541 ret
= bna_rxf_mcast_del(rxf
, mac
, BNA_HARD_CLEANUP
);
542 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
547 /* Add multicast entries */
548 if (!list_empty(&rxf
->mcast_pending_add_q
)) {
549 bfa_q_deq(&rxf
->mcast_pending_add_q
, &qe
);
551 mac
= (struct bna_mac
*)qe
;
552 list_add_tail(&mac
->qe
, &rxf
->mcast_active_q
);
553 bna_bfi_mcast_add_req(rxf
, mac
);
561 bna_rxf_vlan_cfg_apply(struct bna_rxf
*rxf
)
563 u8 vlan_pending_bitmask
;
566 if (rxf
->vlan_pending_bitmask
) {
567 vlan_pending_bitmask
= rxf
->vlan_pending_bitmask
;
568 while (!(vlan_pending_bitmask
& 0x1)) {
570 vlan_pending_bitmask
>>= 1;
572 rxf
->vlan_pending_bitmask
&= ~(1 << block_idx
);
573 bna_bfi_rx_vlan_filter_set(rxf
, block_idx
);
581 bna_rxf_mcast_cfg_reset(struct bna_rxf
*rxf
, enum bna_cleanup_type cleanup
)
583 struct list_head
*qe
;
587 /* Throw away delete pending mcast entries */
588 while (!list_empty(&rxf
->mcast_pending_del_q
)) {
589 bfa_q_deq(&rxf
->mcast_pending_del_q
, &qe
);
591 mac
= (struct bna_mac
*)qe
;
592 ret
= bna_rxf_mcast_del(rxf
, mac
, cleanup
);
593 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
598 /* Move active mcast entries to pending_add_q */
599 while (!list_empty(&rxf
->mcast_active_q
)) {
600 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
602 list_add_tail(qe
, &rxf
->mcast_pending_add_q
);
603 mac
= (struct bna_mac
*)qe
;
604 if (bna_rxf_mcast_del(rxf
, mac
, cleanup
))
612 bna_rxf_rss_cfg_apply(struct bna_rxf
*rxf
)
614 if (rxf
->rss_pending
) {
615 if (rxf
->rss_pending
& BNA_RSS_F_RIT_PENDING
) {
616 rxf
->rss_pending
&= ~BNA_RSS_F_RIT_PENDING
;
617 bna_bfi_rit_cfg(rxf
);
621 if (rxf
->rss_pending
& BNA_RSS_F_CFG_PENDING
) {
622 rxf
->rss_pending
&= ~BNA_RSS_F_CFG_PENDING
;
623 bna_bfi_rss_cfg(rxf
);
627 if (rxf
->rss_pending
& BNA_RSS_F_STATUS_PENDING
) {
628 rxf
->rss_pending
&= ~BNA_RSS_F_STATUS_PENDING
;
629 bna_bfi_rss_enable(rxf
);
638 bna_rxf_cfg_apply(struct bna_rxf
*rxf
)
640 if (bna_rxf_ucast_cfg_apply(rxf
))
643 if (bna_rxf_mcast_cfg_apply(rxf
))
646 if (bna_rxf_promisc_cfg_apply(rxf
))
649 if (bna_rxf_allmulti_cfg_apply(rxf
))
652 if (bna_rxf_vlan_cfg_apply(rxf
))
655 if (bna_rxf_vlan_strip_cfg_apply(rxf
))
658 if (bna_rxf_rss_cfg_apply(rxf
))
664 /* Only software reset */
666 bna_rxf_fltr_clear(struct bna_rxf
*rxf
)
668 if (bna_rxf_ucast_cfg_reset(rxf
, BNA_HARD_CLEANUP
))
671 if (bna_rxf_mcast_cfg_reset(rxf
, BNA_HARD_CLEANUP
))
674 if (bna_rxf_promisc_cfg_reset(rxf
, BNA_HARD_CLEANUP
))
677 if (bna_rxf_allmulti_cfg_reset(rxf
, BNA_HARD_CLEANUP
))
684 bna_rxf_cfg_reset(struct bna_rxf
*rxf
)
686 bna_rxf_ucast_cfg_reset(rxf
, BNA_SOFT_CLEANUP
);
687 bna_rxf_mcast_cfg_reset(rxf
, BNA_SOFT_CLEANUP
);
688 bna_rxf_promisc_cfg_reset(rxf
, BNA_SOFT_CLEANUP
);
689 bna_rxf_allmulti_cfg_reset(rxf
, BNA_SOFT_CLEANUP
);
690 bna_rxf_vlan_cfg_soft_reset(rxf
);
691 bna_rxf_rss_cfg_soft_reset(rxf
);
695 bna_rit_init(struct bna_rxf
*rxf
, int rit_size
)
697 struct bna_rx
*rx
= rxf
->rx
;
699 struct list_head
*qe
;
702 rxf
->rit_size
= rit_size
;
703 list_for_each(qe
, &rx
->rxp_q
) {
704 rxp
= (struct bna_rxp
*)qe
;
705 rxf
->rit
[offset
] = rxp
->cq
.ccb
->id
;
712 bna_bfi_rxf_cfg_rsp(struct bna_rxf
*rxf
, struct bfi_msgq_mhdr
*msghdr
)
714 bfa_fsm_send_event(rxf
, RXF_E_FW_RESP
);
718 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf
*rxf
,
719 struct bfi_msgq_mhdr
*msghdr
)
721 struct bfi_enet_mcast_add_req
*req
=
722 &rxf
->bfi_enet_cmd
.mcast_add_req
;
723 struct bfi_enet_mcast_add_rsp
*rsp
=
724 (struct bfi_enet_mcast_add_rsp
*)msghdr
;
726 bna_rxf_mchandle_attach(rxf
, (u8
*)&req
->mac_addr
,
728 bfa_fsm_send_event(rxf
, RXF_E_FW_RESP
);
732 bna_rxf_init(struct bna_rxf
*rxf
,
734 struct bna_rx_config
*q_config
,
735 struct bna_res_info
*res_info
)
739 INIT_LIST_HEAD(&rxf
->ucast_pending_add_q
);
740 INIT_LIST_HEAD(&rxf
->ucast_pending_del_q
);
741 rxf
->ucast_pending_set
= 0;
742 rxf
->ucast_active_set
= 0;
743 INIT_LIST_HEAD(&rxf
->ucast_active_q
);
744 rxf
->ucast_pending_mac
= NULL
;
746 INIT_LIST_HEAD(&rxf
->mcast_pending_add_q
);
747 INIT_LIST_HEAD(&rxf
->mcast_pending_del_q
);
748 INIT_LIST_HEAD(&rxf
->mcast_active_q
);
749 INIT_LIST_HEAD(&rxf
->mcast_handle_q
);
751 if (q_config
->paused
)
752 rxf
->flags
|= BNA_RXF_F_PAUSED
;
755 res_info
[BNA_RX_RES_MEM_T_RIT
].res_u
.mem_info
.mdl
[0].kva
;
756 bna_rit_init(rxf
, q_config
->num_paths
);
758 rxf
->rss_status
= q_config
->rss_status
;
759 if (rxf
->rss_status
== BNA_STATUS_T_ENABLED
) {
760 rxf
->rss_cfg
= q_config
->rss_config
;
761 rxf
->rss_pending
|= BNA_RSS_F_CFG_PENDING
;
762 rxf
->rss_pending
|= BNA_RSS_F_RIT_PENDING
;
763 rxf
->rss_pending
|= BNA_RSS_F_STATUS_PENDING
;
766 rxf
->vlan_filter_status
= BNA_STATUS_T_DISABLED
;
767 memset(rxf
->vlan_filter_table
, 0,
768 (sizeof(u32
) * (BFI_ENET_VLAN_ID_MAX
/ 32)));
769 rxf
->vlan_filter_table
[0] |= 1; /* for pure priority tagged frames */
770 rxf
->vlan_pending_bitmask
= (u8
)BFI_VLAN_BMASK_ALL
;
772 rxf
->vlan_strip_status
= q_config
->vlan_strip_status
;
774 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
778 bna_rxf_uninit(struct bna_rxf
*rxf
)
782 rxf
->ucast_pending_set
= 0;
783 rxf
->ucast_active_set
= 0;
785 while (!list_empty(&rxf
->ucast_pending_add_q
)) {
786 bfa_q_deq(&rxf
->ucast_pending_add_q
, &mac
);
787 bfa_q_qe_init(&mac
->qe
);
788 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
791 if (rxf
->ucast_pending_mac
) {
792 bfa_q_qe_init(&rxf
->ucast_pending_mac
->qe
);
793 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
,
794 rxf
->ucast_pending_mac
);
795 rxf
->ucast_pending_mac
= NULL
;
798 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
799 bfa_q_deq(&rxf
->mcast_pending_add_q
, &mac
);
800 bfa_q_qe_init(&mac
->qe
);
801 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
804 rxf
->rxmode_pending
= 0;
805 rxf
->rxmode_pending_bitmask
= 0;
806 if (rxf
->rx
->bna
->promisc_rid
== rxf
->rx
->rid
)
807 rxf
->rx
->bna
->promisc_rid
= BFI_INVALID_RID
;
808 if (rxf
->rx
->bna
->default_mode_rid
== rxf
->rx
->rid
)
809 rxf
->rx
->bna
->default_mode_rid
= BFI_INVALID_RID
;
811 rxf
->rss_pending
= 0;
812 rxf
->vlan_strip_pending
= false;
820 bna_rx_cb_rxf_started(struct bna_rx
*rx
)
822 bfa_fsm_send_event(rx
, RX_E_RXF_STARTED
);
826 bna_rxf_start(struct bna_rxf
*rxf
)
828 rxf
->start_cbfn
= bna_rx_cb_rxf_started
;
829 rxf
->start_cbarg
= rxf
->rx
;
830 bfa_fsm_send_event(rxf
, RXF_E_START
);
834 bna_rx_cb_rxf_stopped(struct bna_rx
*rx
)
836 bfa_fsm_send_event(rx
, RX_E_RXF_STOPPED
);
840 bna_rxf_stop(struct bna_rxf
*rxf
)
842 rxf
->stop_cbfn
= bna_rx_cb_rxf_stopped
;
843 rxf
->stop_cbarg
= rxf
->rx
;
844 bfa_fsm_send_event(rxf
, RXF_E_STOP
);
848 bna_rxf_fail(struct bna_rxf
*rxf
)
850 bfa_fsm_send_event(rxf
, RXF_E_FAIL
);
854 bna_rx_ucast_set(struct bna_rx
*rx
, u8
*ucmac
,
855 void (*cbfn
)(struct bnad
*, struct bna_rx
*))
857 struct bna_rxf
*rxf
= &rx
->rxf
;
859 if (rxf
->ucast_pending_mac
== NULL
) {
860 rxf
->ucast_pending_mac
=
861 bna_ucam_mod_mac_get(&rxf
->rx
->bna
->ucam_mod
);
862 if (rxf
->ucast_pending_mac
== NULL
)
863 return BNA_CB_UCAST_CAM_FULL
;
864 bfa_q_qe_init(&rxf
->ucast_pending_mac
->qe
);
867 memcpy(rxf
->ucast_pending_mac
->addr
, ucmac
, ETH_ALEN
);
868 rxf
->ucast_pending_set
= 1;
869 rxf
->cam_fltr_cbfn
= cbfn
;
870 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
872 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
874 return BNA_CB_SUCCESS
;
878 bna_rx_mcast_add(struct bna_rx
*rx
, u8
*addr
,
879 void (*cbfn
)(struct bnad
*, struct bna_rx
*))
881 struct bna_rxf
*rxf
= &rx
->rxf
;
884 /* Check if already added or pending addition */
885 if (bna_mac_find(&rxf
->mcast_active_q
, addr
) ||
886 bna_mac_find(&rxf
->mcast_pending_add_q
, addr
)) {
888 cbfn(rx
->bna
->bnad
, rx
);
889 return BNA_CB_SUCCESS
;
892 mac
= bna_mcam_mod_mac_get(&rxf
->rx
->bna
->mcam_mod
);
894 return BNA_CB_MCAST_LIST_FULL
;
895 bfa_q_qe_init(&mac
->qe
);
896 memcpy(mac
->addr
, addr
, ETH_ALEN
);
897 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
899 rxf
->cam_fltr_cbfn
= cbfn
;
900 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
902 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
904 return BNA_CB_SUCCESS
;
908 bna_rx_mcast_listset(struct bna_rx
*rx
, int count
, u8
*mclist
,
909 void (*cbfn
)(struct bnad
*, struct bna_rx
*))
911 struct bna_rxf
*rxf
= &rx
->rxf
;
912 struct list_head list_head
;
913 struct list_head
*qe
;
919 INIT_LIST_HEAD(&list_head
);
920 for (i
= 0, mcaddr
= mclist
; i
< count
; i
++) {
921 mac
= bna_mcam_mod_mac_get(&rxf
->rx
->bna
->mcam_mod
);
924 bfa_q_qe_init(&mac
->qe
);
925 memcpy(mac
->addr
, mcaddr
, ETH_ALEN
);
926 list_add_tail(&mac
->qe
, &list_head
);
931 /* Purge the pending_add_q */
932 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
933 bfa_q_deq(&rxf
->mcast_pending_add_q
, &qe
);
935 mac
= (struct bna_mac
*)qe
;
936 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
939 /* Schedule active_q entries for deletion */
940 while (!list_empty(&rxf
->mcast_active_q
)) {
941 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
942 mac
= (struct bna_mac
*)qe
;
943 bfa_q_qe_init(&mac
->qe
);
944 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_del_q
);
947 /* Add the new entries */
948 while (!list_empty(&list_head
)) {
949 bfa_q_deq(&list_head
, &qe
);
950 mac
= (struct bna_mac
*)qe
;
951 bfa_q_qe_init(&mac
->qe
);
952 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
955 rxf
->cam_fltr_cbfn
= cbfn
;
956 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
957 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
959 return BNA_CB_SUCCESS
;
962 while (!list_empty(&list_head
)) {
963 bfa_q_deq(&list_head
, &qe
);
964 mac
= (struct bna_mac
*)qe
;
965 bfa_q_qe_init(&mac
->qe
);
966 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
969 return BNA_CB_MCAST_LIST_FULL
;
973 bna_rx_vlan_add(struct bna_rx
*rx
, int vlan_id
)
975 struct bna_rxf
*rxf
= &rx
->rxf
;
976 int index
= (vlan_id
>> BFI_VLAN_WORD_SHIFT
);
977 int bit
= (1 << (vlan_id
& BFI_VLAN_WORD_MASK
));
978 int group_id
= (vlan_id
>> BFI_VLAN_BLOCK_SHIFT
);
980 rxf
->vlan_filter_table
[index
] |= bit
;
981 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
) {
982 rxf
->vlan_pending_bitmask
|= (1 << group_id
);
983 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
988 bna_rx_vlan_del(struct bna_rx
*rx
, int vlan_id
)
990 struct bna_rxf
*rxf
= &rx
->rxf
;
991 int index
= (vlan_id
>> BFI_VLAN_WORD_SHIFT
);
992 int bit
= (1 << (vlan_id
& BFI_VLAN_WORD_MASK
));
993 int group_id
= (vlan_id
>> BFI_VLAN_BLOCK_SHIFT
);
995 rxf
->vlan_filter_table
[index
] &= ~bit
;
996 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
) {
997 rxf
->vlan_pending_bitmask
|= (1 << group_id
);
998 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
1003 bna_rxf_ucast_cfg_apply(struct bna_rxf
*rxf
)
1005 struct bna_mac
*mac
= NULL
;
1006 struct list_head
*qe
;
1008 /* Delete MAC addresses previousely added */
1009 if (!list_empty(&rxf
->ucast_pending_del_q
)) {
1010 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
1012 mac
= (struct bna_mac
*)qe
;
1013 bna_bfi_ucast_req(rxf
, mac
, BFI_ENET_H2I_MAC_UCAST_DEL_REQ
);
1014 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
1018 /* Set default unicast MAC */
1019 if (rxf
->ucast_pending_set
) {
1020 rxf
->ucast_pending_set
= 0;
1021 memcpy(rxf
->ucast_active_mac
.addr
,
1022 rxf
->ucast_pending_mac
->addr
, ETH_ALEN
);
1023 rxf
->ucast_active_set
= 1;
1024 bna_bfi_ucast_req(rxf
, &rxf
->ucast_active_mac
,
1025 BFI_ENET_H2I_MAC_UCAST_SET_REQ
);
1029 /* Add additional MAC entries */
1030 if (!list_empty(&rxf
->ucast_pending_add_q
)) {
1031 bfa_q_deq(&rxf
->ucast_pending_add_q
, &qe
);
1033 mac
= (struct bna_mac
*)qe
;
1034 list_add_tail(&mac
->qe
, &rxf
->ucast_active_q
);
1035 bna_bfi_ucast_req(rxf
, mac
, BFI_ENET_H2I_MAC_UCAST_ADD_REQ
);
1043 bna_rxf_ucast_cfg_reset(struct bna_rxf
*rxf
, enum bna_cleanup_type cleanup
)
1045 struct list_head
*qe
;
1046 struct bna_mac
*mac
;
1048 /* Throw away delete pending ucast entries */
1049 while (!list_empty(&rxf
->ucast_pending_del_q
)) {
1050 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
1052 mac
= (struct bna_mac
*)qe
;
1053 if (cleanup
== BNA_SOFT_CLEANUP
)
1054 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
1056 bna_bfi_ucast_req(rxf
, mac
,
1057 BFI_ENET_H2I_MAC_UCAST_DEL_REQ
);
1058 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
1063 /* Move active ucast entries to pending_add_q */
1064 while (!list_empty(&rxf
->ucast_active_q
)) {
1065 bfa_q_deq(&rxf
->ucast_active_q
, &qe
);
1067 list_add_tail(qe
, &rxf
->ucast_pending_add_q
);
1068 if (cleanup
== BNA_HARD_CLEANUP
) {
1069 mac
= (struct bna_mac
*)qe
;
1070 bna_bfi_ucast_req(rxf
, mac
,
1071 BFI_ENET_H2I_MAC_UCAST_DEL_REQ
);
1076 if (rxf
->ucast_active_set
) {
1077 rxf
->ucast_pending_set
= 1;
1078 rxf
->ucast_active_set
= 0;
1079 if (cleanup
== BNA_HARD_CLEANUP
) {
1080 bna_bfi_ucast_req(rxf
, &rxf
->ucast_active_mac
,
1081 BFI_ENET_H2I_MAC_UCAST_CLR_REQ
);
1090 bna_rxf_promisc_cfg_apply(struct bna_rxf
*rxf
)
1092 struct bna
*bna
= rxf
->rx
->bna
;
1094 /* Enable/disable promiscuous mode */
1095 if (is_promisc_enable(rxf
->rxmode_pending
,
1096 rxf
->rxmode_pending_bitmask
)) {
1097 /* move promisc configuration from pending -> active */
1098 promisc_inactive(rxf
->rxmode_pending
,
1099 rxf
->rxmode_pending_bitmask
);
1100 rxf
->rxmode_active
|= BNA_RXMODE_PROMISC
;
1101 bna_bfi_rx_promisc_req(rxf
, BNA_STATUS_T_ENABLED
);
1103 } else if (is_promisc_disable(rxf
->rxmode_pending
,
1104 rxf
->rxmode_pending_bitmask
)) {
1105 /* move promisc configuration from pending -> active */
1106 promisc_inactive(rxf
->rxmode_pending
,
1107 rxf
->rxmode_pending_bitmask
);
1108 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
1109 bna
->promisc_rid
= BFI_INVALID_RID
;
1110 bna_bfi_rx_promisc_req(rxf
, BNA_STATUS_T_DISABLED
);
1118 bna_rxf_promisc_cfg_reset(struct bna_rxf
*rxf
, enum bna_cleanup_type cleanup
)
1120 struct bna
*bna
= rxf
->rx
->bna
;
1122 /* Clear pending promisc mode disable */
1123 if (is_promisc_disable(rxf
->rxmode_pending
,
1124 rxf
->rxmode_pending_bitmask
)) {
1125 promisc_inactive(rxf
->rxmode_pending
,
1126 rxf
->rxmode_pending_bitmask
);
1127 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
1128 bna
->promisc_rid
= BFI_INVALID_RID
;
1129 if (cleanup
== BNA_HARD_CLEANUP
) {
1130 bna_bfi_rx_promisc_req(rxf
, BNA_STATUS_T_DISABLED
);
1135 /* Move promisc mode config from active -> pending */
1136 if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
1137 promisc_enable(rxf
->rxmode_pending
,
1138 rxf
->rxmode_pending_bitmask
);
1139 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
1140 if (cleanup
== BNA_HARD_CLEANUP
) {
1141 bna_bfi_rx_promisc_req(rxf
, BNA_STATUS_T_DISABLED
);
1150 bna_rxf_allmulti_cfg_apply(struct bna_rxf
*rxf
)
1152 /* Enable/disable allmulti mode */
1153 if (is_allmulti_enable(rxf
->rxmode_pending
,
1154 rxf
->rxmode_pending_bitmask
)) {
1155 /* move allmulti configuration from pending -> active */
1156 allmulti_inactive(rxf
->rxmode_pending
,
1157 rxf
->rxmode_pending_bitmask
);
1158 rxf
->rxmode_active
|= BNA_RXMODE_ALLMULTI
;
1159 bna_bfi_mcast_filter_req(rxf
, BNA_STATUS_T_DISABLED
);
1161 } else if (is_allmulti_disable(rxf
->rxmode_pending
,
1162 rxf
->rxmode_pending_bitmask
)) {
1163 /* move allmulti configuration from pending -> active */
1164 allmulti_inactive(rxf
->rxmode_pending
,
1165 rxf
->rxmode_pending_bitmask
);
1166 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
1167 bna_bfi_mcast_filter_req(rxf
, BNA_STATUS_T_ENABLED
);
1175 bna_rxf_allmulti_cfg_reset(struct bna_rxf
*rxf
, enum bna_cleanup_type cleanup
)
1177 /* Clear pending allmulti mode disable */
1178 if (is_allmulti_disable(rxf
->rxmode_pending
,
1179 rxf
->rxmode_pending_bitmask
)) {
1180 allmulti_inactive(rxf
->rxmode_pending
,
1181 rxf
->rxmode_pending_bitmask
);
1182 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
1183 if (cleanup
== BNA_HARD_CLEANUP
) {
1184 bna_bfi_mcast_filter_req(rxf
, BNA_STATUS_T_ENABLED
);
1189 /* Move allmulti mode config from active -> pending */
1190 if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
1191 allmulti_enable(rxf
->rxmode_pending
,
1192 rxf
->rxmode_pending_bitmask
);
1193 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
1194 if (cleanup
== BNA_HARD_CLEANUP
) {
1195 bna_bfi_mcast_filter_req(rxf
, BNA_STATUS_T_ENABLED
);
1204 bna_rxf_promisc_enable(struct bna_rxf
*rxf
)
1206 struct bna
*bna
= rxf
->rx
->bna
;
1209 if (is_promisc_enable(rxf
->rxmode_pending
,
1210 rxf
->rxmode_pending_bitmask
) ||
1211 (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
)) {
1212 /* Do nothing if pending enable or already enabled */
1213 } else if (is_promisc_disable(rxf
->rxmode_pending
,
1214 rxf
->rxmode_pending_bitmask
)) {
1215 /* Turn off pending disable command */
1216 promisc_inactive(rxf
->rxmode_pending
,
1217 rxf
->rxmode_pending_bitmask
);
1219 /* Schedule enable */
1220 promisc_enable(rxf
->rxmode_pending
,
1221 rxf
->rxmode_pending_bitmask
);
1222 bna
->promisc_rid
= rxf
->rx
->rid
;
1230 bna_rxf_promisc_disable(struct bna_rxf
*rxf
)
1232 struct bna
*bna
= rxf
->rx
->bna
;
1235 if (is_promisc_disable(rxf
->rxmode_pending
,
1236 rxf
->rxmode_pending_bitmask
) ||
1237 (!(rxf
->rxmode_active
& BNA_RXMODE_PROMISC
))) {
1238 /* Do nothing if pending disable or already disabled */
1239 } else if (is_promisc_enable(rxf
->rxmode_pending
,
1240 rxf
->rxmode_pending_bitmask
)) {
1241 /* Turn off pending enable command */
1242 promisc_inactive(rxf
->rxmode_pending
,
1243 rxf
->rxmode_pending_bitmask
);
1244 bna
->promisc_rid
= BFI_INVALID_RID
;
1245 } else if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
1246 /* Schedule disable */
1247 promisc_disable(rxf
->rxmode_pending
,
1248 rxf
->rxmode_pending_bitmask
);
1256 bna_rxf_allmulti_enable(struct bna_rxf
*rxf
)
1260 if (is_allmulti_enable(rxf
->rxmode_pending
,
1261 rxf
->rxmode_pending_bitmask
) ||
1262 (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
)) {
1263 /* Do nothing if pending enable or already enabled */
1264 } else if (is_allmulti_disable(rxf
->rxmode_pending
,
1265 rxf
->rxmode_pending_bitmask
)) {
1266 /* Turn off pending disable command */
1267 allmulti_inactive(rxf
->rxmode_pending
,
1268 rxf
->rxmode_pending_bitmask
);
1270 /* Schedule enable */
1271 allmulti_enable(rxf
->rxmode_pending
,
1272 rxf
->rxmode_pending_bitmask
);
1280 bna_rxf_allmulti_disable(struct bna_rxf
*rxf
)
1284 if (is_allmulti_disable(rxf
->rxmode_pending
,
1285 rxf
->rxmode_pending_bitmask
) ||
1286 (!(rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
))) {
1287 /* Do nothing if pending disable or already disabled */
1288 } else if (is_allmulti_enable(rxf
->rxmode_pending
,
1289 rxf
->rxmode_pending_bitmask
)) {
1290 /* Turn off pending enable command */
1291 allmulti_inactive(rxf
->rxmode_pending
,
1292 rxf
->rxmode_pending_bitmask
);
1293 } else if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
1294 /* Schedule disable */
1295 allmulti_disable(rxf
->rxmode_pending
,
1296 rxf
->rxmode_pending_bitmask
);
1304 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf
*rxf
)
1306 if (rxf
->vlan_strip_pending
) {
1307 rxf
->vlan_strip_pending
= false;
1308 bna_bfi_vlan_strip_enable(rxf
);
1319 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1320 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1322 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1323 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1325 #define call_rx_stop_cbfn(rx) \
1327 if ((rx)->stop_cbfn) { \
1328 void (*cbfn)(void *, struct bna_rx *); \
1330 cbfn = (rx)->stop_cbfn; \
1331 cbarg = (rx)->stop_cbarg; \
1332 (rx)->stop_cbfn = NULL; \
1333 (rx)->stop_cbarg = NULL; \
1338 #define call_rx_stall_cbfn(rx) \
1340 if ((rx)->rx_stall_cbfn) \
1341 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1344 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
1346 struct bna_dma_addr cur_q_addr = \
1347 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1348 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1349 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1350 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1351 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1352 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1353 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1356 static void bna_bfi_rx_enet_start(struct bna_rx
*rx
);
1357 static void bna_rx_enet_stop(struct bna_rx
*rx
);
1358 static void bna_rx_mod_cb_rx_stopped(void *arg
, struct bna_rx
*rx
);
1360 bfa_fsm_state_decl(bna_rx
, stopped
,
1361 struct bna_rx
, enum bna_rx_event
);
1362 bfa_fsm_state_decl(bna_rx
, start_wait
,
1363 struct bna_rx
, enum bna_rx_event
);
1364 bfa_fsm_state_decl(bna_rx
, rxf_start_wait
,
1365 struct bna_rx
, enum bna_rx_event
);
1366 bfa_fsm_state_decl(bna_rx
, started
,
1367 struct bna_rx
, enum bna_rx_event
);
1368 bfa_fsm_state_decl(bna_rx
, rxf_stop_wait
,
1369 struct bna_rx
, enum bna_rx_event
);
1370 bfa_fsm_state_decl(bna_rx
, stop_wait
,
1371 struct bna_rx
, enum bna_rx_event
);
1372 bfa_fsm_state_decl(bna_rx
, cleanup_wait
,
1373 struct bna_rx
, enum bna_rx_event
);
1374 bfa_fsm_state_decl(bna_rx
, failed
,
1375 struct bna_rx
, enum bna_rx_event
);
1376 bfa_fsm_state_decl(bna_rx
, quiesce_wait
,
1377 struct bna_rx
, enum bna_rx_event
);
1379 static void bna_rx_sm_stopped_entry(struct bna_rx
*rx
)
1381 call_rx_stop_cbfn(rx
);
1384 static void bna_rx_sm_stopped(struct bna_rx
*rx
,
1385 enum bna_rx_event event
)
1389 bfa_fsm_set_state(rx
, bna_rx_sm_start_wait
);
1393 call_rx_stop_cbfn(rx
);
1401 bfa_sm_fault(event
);
1406 static void bna_rx_sm_start_wait_entry(struct bna_rx
*rx
)
1408 bna_bfi_rx_enet_start(rx
);
1412 bna_rx_sm_stop_wait_entry(struct bna_rx
*rx
)
1417 bna_rx_sm_stop_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1422 bfa_fsm_set_state(rx
, bna_rx_sm_cleanup_wait
);
1423 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rx
);
1427 bna_rx_enet_stop(rx
);
1431 bfa_sm_fault(event
);
1436 static void bna_rx_sm_start_wait(struct bna_rx
*rx
,
1437 enum bna_rx_event event
)
1441 bfa_fsm_set_state(rx
, bna_rx_sm_stop_wait
);
1445 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1449 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_start_wait
);
1453 bfa_sm_fault(event
);
1458 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx
*rx
)
1460 rx
->rx_post_cbfn(rx
->bna
->bnad
, rx
);
1461 bna_rxf_start(&rx
->rxf
);
1465 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx
*rx
)
1470 bna_rx_sm_rxf_stop_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1474 bfa_fsm_set_state(rx
, bna_rx_sm_cleanup_wait
);
1475 bna_rxf_fail(&rx
->rxf
);
1476 call_rx_stall_cbfn(rx
);
1477 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rx
);
1480 case RX_E_RXF_STARTED
:
1481 bna_rxf_stop(&rx
->rxf
);
1484 case RX_E_RXF_STOPPED
:
1485 bfa_fsm_set_state(rx
, bna_rx_sm_stop_wait
);
1486 call_rx_stall_cbfn(rx
);
1487 bna_rx_enet_stop(rx
);
1491 bfa_sm_fault(event
);
1498 bna_rx_sm_started_entry(struct bna_rx
*rx
)
1500 struct bna_rxp
*rxp
;
1501 struct list_head
*qe_rxp
;
1502 int is_regular
= (rx
->type
== BNA_RX_T_REGULAR
);
1505 list_for_each(qe_rxp
, &rx
->rxp_q
) {
1506 rxp
= (struct bna_rxp
*)qe_rxp
;
1507 bna_ib_start(rx
->bna
, &rxp
->cq
.ib
, is_regular
);
1510 bna_ethport_cb_rx_started(&rx
->bna
->ethport
);
1514 bna_rx_sm_started(struct bna_rx
*rx
, enum bna_rx_event event
)
1518 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_stop_wait
);
1519 bna_ethport_cb_rx_stopped(&rx
->bna
->ethport
);
1520 bna_rxf_stop(&rx
->rxf
);
1524 bfa_fsm_set_state(rx
, bna_rx_sm_failed
);
1525 bna_ethport_cb_rx_stopped(&rx
->bna
->ethport
);
1526 bna_rxf_fail(&rx
->rxf
);
1527 call_rx_stall_cbfn(rx
);
1528 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rx
);
1532 bfa_sm_fault(event
);
1537 static void bna_rx_sm_rxf_start_wait(struct bna_rx
*rx
,
1538 enum bna_rx_event event
)
1542 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_stop_wait
);
1546 bfa_fsm_set_state(rx
, bna_rx_sm_failed
);
1547 bna_rxf_fail(&rx
->rxf
);
1548 call_rx_stall_cbfn(rx
);
1549 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rx
);
1552 case RX_E_RXF_STARTED
:
1553 bfa_fsm_set_state(rx
, bna_rx_sm_started
);
1557 bfa_sm_fault(event
);
1563 bna_rx_sm_cleanup_wait_entry(struct bna_rx
*rx
)
1568 bna_rx_sm_cleanup_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1572 case RX_E_RXF_STOPPED
:
1576 case RX_E_CLEANUP_DONE
:
1577 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1581 bfa_sm_fault(event
);
1587 bna_rx_sm_failed_entry(struct bna_rx
*rx
)
1592 bna_rx_sm_failed(struct bna_rx
*rx
, enum bna_rx_event event
)
1596 bfa_fsm_set_state(rx
, bna_rx_sm_quiesce_wait
);
1600 bfa_fsm_set_state(rx
, bna_rx_sm_cleanup_wait
);
1604 case RX_E_RXF_STARTED
:
1605 case RX_E_RXF_STOPPED
:
1609 case RX_E_CLEANUP_DONE
:
1610 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1614 bfa_sm_fault(event
);
1619 bna_rx_sm_quiesce_wait_entry(struct bna_rx
*rx
)
1624 bna_rx_sm_quiesce_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1628 bfa_fsm_set_state(rx
, bna_rx_sm_cleanup_wait
);
1632 bfa_fsm_set_state(rx
, bna_rx_sm_failed
);
1635 case RX_E_CLEANUP_DONE
:
1636 bfa_fsm_set_state(rx
, bna_rx_sm_start_wait
);
1640 bfa_sm_fault(event
);
1646 bna_bfi_rx_enet_start(struct bna_rx
*rx
)
1648 struct bfi_enet_rx_cfg_req
*cfg_req
= &rx
->bfi_enet_cmd
.cfg_req
;
1649 struct bna_rxp
*rxp
= NULL
;
1650 struct bna_rxq
*q0
= NULL
, *q1
= NULL
;
1651 struct list_head
*rxp_qe
;
1654 bfi_msgq_mhdr_set(cfg_req
->mh
, BFI_MC_ENET
,
1655 BFI_ENET_H2I_RX_CFG_SET_REQ
, 0, rx
->rid
);
1656 cfg_req
->mh
.num_entries
= htons(
1657 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req
)));
1659 cfg_req
->num_queue_sets
= rx
->num_paths
;
1660 for (i
= 0, rxp_qe
= bfa_q_first(&rx
->rxp_q
);
1662 i
++, rxp_qe
= bfa_q_next(rxp_qe
)) {
1663 rxp
= (struct bna_rxp
*)rxp_qe
;
1665 GET_RXQS(rxp
, q0
, q1
);
1666 switch (rxp
->type
) {
1670 bfi_enet_datapath_q_init(&cfg_req
->q_cfg
[i
].qs
.q
,
1672 cfg_req
->q_cfg
[i
].qs
.rx_buffer_size
=
1673 htons((u16
)q1
->buffer_size
);
1676 case BNA_RXP_SINGLE
:
1677 /* Large/Single RxQ */
1678 bfi_enet_datapath_q_init(&cfg_req
->q_cfg
[i
].ql
.q
,
1681 bna_enet_mtu_get(&rx
->bna
->enet
);
1682 cfg_req
->q_cfg
[i
].ql
.rx_buffer_size
=
1683 htons((u16
)q0
->buffer_size
);
1690 bfi_enet_datapath_q_init(&cfg_req
->q_cfg
[i
].cq
.q
,
1693 cfg_req
->q_cfg
[i
].ib
.index_addr
.a32
.addr_lo
=
1694 rxp
->cq
.ib
.ib_seg_host_addr
.lsb
;
1695 cfg_req
->q_cfg
[i
].ib
.index_addr
.a32
.addr_hi
=
1696 rxp
->cq
.ib
.ib_seg_host_addr
.msb
;
1697 cfg_req
->q_cfg
[i
].ib
.intr
.msix_index
=
1698 htons((u16
)rxp
->cq
.ib
.intr_vector
);
1701 cfg_req
->ib_cfg
.int_pkt_dma
= BNA_STATUS_T_DISABLED
;
1702 cfg_req
->ib_cfg
.int_enabled
= BNA_STATUS_T_ENABLED
;
1703 cfg_req
->ib_cfg
.int_pkt_enabled
= BNA_STATUS_T_DISABLED
;
1704 cfg_req
->ib_cfg
.continuous_coalescing
= BNA_STATUS_T_DISABLED
;
1705 cfg_req
->ib_cfg
.msix
= (rxp
->cq
.ib
.intr_type
== BNA_INTR_T_MSIX
)
1706 ? BNA_STATUS_T_ENABLED
:
1707 BNA_STATUS_T_DISABLED
;
1708 cfg_req
->ib_cfg
.coalescing_timeout
=
1709 htonl((u32
)rxp
->cq
.ib
.coalescing_timeo
);
1710 cfg_req
->ib_cfg
.inter_pkt_timeout
=
1711 htonl((u32
)rxp
->cq
.ib
.interpkt_timeo
);
1712 cfg_req
->ib_cfg
.inter_pkt_count
= (u8
)rxp
->cq
.ib
.interpkt_count
;
1714 switch (rxp
->type
) {
1716 cfg_req
->rx_cfg
.rxq_type
= BFI_ENET_RXQ_LARGE_SMALL
;
1720 cfg_req
->rx_cfg
.rxq_type
= BFI_ENET_RXQ_HDS
;
1721 cfg_req
->rx_cfg
.hds
.type
= rx
->hds_cfg
.hdr_type
;
1722 cfg_req
->rx_cfg
.hds
.force_offset
= rx
->hds_cfg
.forced_offset
;
1723 cfg_req
->rx_cfg
.hds
.max_header_size
= rx
->hds_cfg
.forced_offset
;
1726 case BNA_RXP_SINGLE
:
1727 cfg_req
->rx_cfg
.rxq_type
= BFI_ENET_RXQ_SINGLE
;
1733 cfg_req
->rx_cfg
.strip_vlan
= rx
->rxf
.vlan_strip_status
;
1735 bfa_msgq_cmd_set(&rx
->msgq_cmd
, NULL
, NULL
,
1736 sizeof(struct bfi_enet_rx_cfg_req
), &cfg_req
->mh
);
1737 bfa_msgq_cmd_post(&rx
->bna
->msgq
, &rx
->msgq_cmd
);
1741 bna_bfi_rx_enet_stop(struct bna_rx
*rx
)
1743 struct bfi_enet_req
*req
= &rx
->bfi_enet_cmd
.req
;
1745 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
1746 BFI_ENET_H2I_RX_CFG_CLR_REQ
, 0, rx
->rid
);
1747 req
->mh
.num_entries
= htons(
1748 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req
)));
1749 bfa_msgq_cmd_set(&rx
->msgq_cmd
, NULL
, NULL
, sizeof(struct bfi_enet_req
),
1751 bfa_msgq_cmd_post(&rx
->bna
->msgq
, &rx
->msgq_cmd
);
1755 bna_rx_enet_stop(struct bna_rx
*rx
)
1757 struct bna_rxp
*rxp
;
1758 struct list_head
*qe_rxp
;
1761 list_for_each(qe_rxp
, &rx
->rxp_q
) {
1762 rxp
= (struct bna_rxp
*)qe_rxp
;
1763 bna_ib_stop(rx
->bna
, &rxp
->cq
.ib
);
1766 bna_bfi_rx_enet_stop(rx
);
1770 bna_rx_res_check(struct bna_rx_mod
*rx_mod
, struct bna_rx_config
*rx_cfg
)
1772 if ((rx_mod
->rx_free_count
== 0) ||
1773 (rx_mod
->rxp_free_count
== 0) ||
1774 (rx_mod
->rxq_free_count
== 0))
1777 if (rx_cfg
->rxp_type
== BNA_RXP_SINGLE
) {
1778 if ((rx_mod
->rxp_free_count
< rx_cfg
->num_paths
) ||
1779 (rx_mod
->rxq_free_count
< rx_cfg
->num_paths
))
1782 if ((rx_mod
->rxp_free_count
< rx_cfg
->num_paths
) ||
1783 (rx_mod
->rxq_free_count
< (2 * rx_cfg
->num_paths
)))
1790 static struct bna_rxq
*
1791 bna_rxq_get(struct bna_rx_mod
*rx_mod
)
1793 struct bna_rxq
*rxq
= NULL
;
1794 struct list_head
*qe
= NULL
;
1796 bfa_q_deq(&rx_mod
->rxq_free_q
, &qe
);
1797 rx_mod
->rxq_free_count
--;
1798 rxq
= (struct bna_rxq
*)qe
;
1799 bfa_q_qe_init(&rxq
->qe
);
1805 bna_rxq_put(struct bna_rx_mod
*rx_mod
, struct bna_rxq
*rxq
)
1807 bfa_q_qe_init(&rxq
->qe
);
1808 list_add_tail(&rxq
->qe
, &rx_mod
->rxq_free_q
);
1809 rx_mod
->rxq_free_count
++;
1812 static struct bna_rxp
*
1813 bna_rxp_get(struct bna_rx_mod
*rx_mod
)
1815 struct list_head
*qe
= NULL
;
1816 struct bna_rxp
*rxp
= NULL
;
1818 bfa_q_deq(&rx_mod
->rxp_free_q
, &qe
);
1819 rx_mod
->rxp_free_count
--;
1820 rxp
= (struct bna_rxp
*)qe
;
1821 bfa_q_qe_init(&rxp
->qe
);
1827 bna_rxp_put(struct bna_rx_mod
*rx_mod
, struct bna_rxp
*rxp
)
1829 bfa_q_qe_init(&rxp
->qe
);
1830 list_add_tail(&rxp
->qe
, &rx_mod
->rxp_free_q
);
1831 rx_mod
->rxp_free_count
++;
1834 static struct bna_rx
*
1835 bna_rx_get(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
1837 struct list_head
*qe
= NULL
;
1838 struct bna_rx
*rx
= NULL
;
1840 if (type
== BNA_RX_T_REGULAR
) {
1841 bfa_q_deq(&rx_mod
->rx_free_q
, &qe
);
1843 bfa_q_deq_tail(&rx_mod
->rx_free_q
, &qe
);
1845 rx_mod
->rx_free_count
--;
1846 rx
= (struct bna_rx
*)qe
;
1847 bfa_q_qe_init(&rx
->qe
);
1848 list_add_tail(&rx
->qe
, &rx_mod
->rx_active_q
);
1855 bna_rx_put(struct bna_rx_mod
*rx_mod
, struct bna_rx
*rx
)
1857 struct list_head
*prev_qe
= NULL
;
1858 struct list_head
*qe
;
1860 bfa_q_qe_init(&rx
->qe
);
1862 list_for_each(qe
, &rx_mod
->rx_free_q
) {
1863 if (((struct bna_rx
*)qe
)->rid
< rx
->rid
)
1869 if (prev_qe
== NULL
) {
1870 /* This is the first entry */
1871 bfa_q_enq_head(&rx_mod
->rx_free_q
, &rx
->qe
);
1872 } else if (bfa_q_next(prev_qe
) == &rx_mod
->rx_free_q
) {
1873 /* This is the last entry */
1874 list_add_tail(&rx
->qe
, &rx_mod
->rx_free_q
);
1876 /* Somewhere in the middle */
1877 bfa_q_next(&rx
->qe
) = bfa_q_next(prev_qe
);
1878 bfa_q_prev(&rx
->qe
) = prev_qe
;
1879 bfa_q_next(prev_qe
) = &rx
->qe
;
1880 bfa_q_prev(bfa_q_next(&rx
->qe
)) = &rx
->qe
;
1883 rx_mod
->rx_free_count
++;
1887 bna_rxp_add_rxqs(struct bna_rxp
*rxp
, struct bna_rxq
*q0
,
1890 switch (rxp
->type
) {
1891 case BNA_RXP_SINGLE
:
1892 rxp
->rxq
.single
.only
= q0
;
1893 rxp
->rxq
.single
.reserved
= NULL
;
1896 rxp
->rxq
.slr
.large
= q0
;
1897 rxp
->rxq
.slr
.small
= q1
;
1900 rxp
->rxq
.hds
.data
= q0
;
1901 rxp
->rxq
.hds
.hdr
= q1
;
1909 bna_rxq_qpt_setup(struct bna_rxq
*rxq
,
1910 struct bna_rxp
*rxp
,
1913 struct bna_mem_descr
*qpt_mem
,
1914 struct bna_mem_descr
*swqpt_mem
,
1915 struct bna_mem_descr
*page_mem
)
1919 rxq
->qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
1920 rxq
->qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
1921 rxq
->qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
1922 rxq
->qpt
.page_count
= page_count
;
1923 rxq
->qpt
.page_size
= page_size
;
1925 rxq
->rcb
->sw_qpt
= (void **) swqpt_mem
->kva
;
1927 for (i
= 0; i
< rxq
->qpt
.page_count
; i
++) {
1928 rxq
->rcb
->sw_qpt
[i
] = page_mem
[i
].kva
;
1929 ((struct bna_dma_addr
*)rxq
->qpt
.kv_qpt_ptr
)[i
].lsb
=
1930 page_mem
[i
].dma
.lsb
;
1931 ((struct bna_dma_addr
*)rxq
->qpt
.kv_qpt_ptr
)[i
].msb
=
1932 page_mem
[i
].dma
.msb
;
1937 bna_rxp_cqpt_setup(struct bna_rxp
*rxp
,
1940 struct bna_mem_descr
*qpt_mem
,
1941 struct bna_mem_descr
*swqpt_mem
,
1942 struct bna_mem_descr
*page_mem
)
1946 rxp
->cq
.qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
1947 rxp
->cq
.qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
1948 rxp
->cq
.qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
1949 rxp
->cq
.qpt
.page_count
= page_count
;
1950 rxp
->cq
.qpt
.page_size
= page_size
;
1952 rxp
->cq
.ccb
->sw_qpt
= (void **) swqpt_mem
->kva
;
1954 for (i
= 0; i
< rxp
->cq
.qpt
.page_count
; i
++) {
1955 rxp
->cq
.ccb
->sw_qpt
[i
] = page_mem
[i
].kva
;
1957 ((struct bna_dma_addr
*)rxp
->cq
.qpt
.kv_qpt_ptr
)[i
].lsb
=
1958 page_mem
[i
].dma
.lsb
;
1959 ((struct bna_dma_addr
*)rxp
->cq
.qpt
.kv_qpt_ptr
)[i
].msb
=
1960 page_mem
[i
].dma
.msb
;
1965 bna_rx_mod_cb_rx_stopped(void *arg
, struct bna_rx
*rx
)
1967 struct bna_rx_mod
*rx_mod
= (struct bna_rx_mod
*)arg
;
1969 bfa_wc_down(&rx_mod
->rx_stop_wc
);
1973 bna_rx_mod_cb_rx_stopped_all(void *arg
)
1975 struct bna_rx_mod
*rx_mod
= (struct bna_rx_mod
*)arg
;
1977 if (rx_mod
->stop_cbfn
)
1978 rx_mod
->stop_cbfn(&rx_mod
->bna
->enet
);
1979 rx_mod
->stop_cbfn
= NULL
;
1983 bna_rx_start(struct bna_rx
*rx
)
1985 rx
->rx_flags
|= BNA_RX_F_ENET_STARTED
;
1986 if (rx
->rx_flags
& BNA_RX_F_ENABLED
)
1987 bfa_fsm_send_event(rx
, RX_E_START
);
1991 bna_rx_stop(struct bna_rx
*rx
)
1993 rx
->rx_flags
&= ~BNA_RX_F_ENET_STARTED
;
1994 if (rx
->fsm
== (bfa_fsm_t
) bna_rx_sm_stopped
)
1995 bna_rx_mod_cb_rx_stopped(&rx
->bna
->rx_mod
, rx
);
1997 rx
->stop_cbfn
= bna_rx_mod_cb_rx_stopped
;
1998 rx
->stop_cbarg
= &rx
->bna
->rx_mod
;
1999 bfa_fsm_send_event(rx
, RX_E_STOP
);
2004 bna_rx_fail(struct bna_rx
*rx
)
2006 /* Indicate Enet is not enabled, and failed */
2007 rx
->rx_flags
&= ~BNA_RX_F_ENET_STARTED
;
2008 bfa_fsm_send_event(rx
, RX_E_FAIL
);
2012 bna_rx_mod_start(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
2015 struct list_head
*qe
;
2017 rx_mod
->flags
|= BNA_RX_MOD_F_ENET_STARTED
;
2018 if (type
== BNA_RX_T_LOOPBACK
)
2019 rx_mod
->flags
|= BNA_RX_MOD_F_ENET_LOOPBACK
;
2021 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2022 rx
= (struct bna_rx
*)qe
;
2023 if (rx
->type
== type
)
2029 bna_rx_mod_stop(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
2032 struct list_head
*qe
;
2034 rx_mod
->flags
&= ~BNA_RX_MOD_F_ENET_STARTED
;
2035 rx_mod
->flags
&= ~BNA_RX_MOD_F_ENET_LOOPBACK
;
2037 rx_mod
->stop_cbfn
= bna_enet_cb_rx_stopped
;
2039 bfa_wc_init(&rx_mod
->rx_stop_wc
, bna_rx_mod_cb_rx_stopped_all
, rx_mod
);
2041 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2042 rx
= (struct bna_rx
*)qe
;
2043 if (rx
->type
== type
) {
2044 bfa_wc_up(&rx_mod
->rx_stop_wc
);
2049 bfa_wc_wait(&rx_mod
->rx_stop_wc
);
2053 bna_rx_mod_fail(struct bna_rx_mod
*rx_mod
)
2056 struct list_head
*qe
;
2058 rx_mod
->flags
&= ~BNA_RX_MOD_F_ENET_STARTED
;
2059 rx_mod
->flags
&= ~BNA_RX_MOD_F_ENET_LOOPBACK
;
2061 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2062 rx
= (struct bna_rx
*)qe
;
2067 void bna_rx_mod_init(struct bna_rx_mod
*rx_mod
, struct bna
*bna
,
2068 struct bna_res_info
*res_info
)
2071 struct bna_rx
*rx_ptr
;
2072 struct bna_rxp
*rxp_ptr
;
2073 struct bna_rxq
*rxq_ptr
;
2078 rx_mod
->rx
= (struct bna_rx
*)
2079 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2080 rx_mod
->rxp
= (struct bna_rxp
*)
2081 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2082 rx_mod
->rxq
= (struct bna_rxq
*)
2083 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2085 /* Initialize the queues */
2086 INIT_LIST_HEAD(&rx_mod
->rx_free_q
);
2087 rx_mod
->rx_free_count
= 0;
2088 INIT_LIST_HEAD(&rx_mod
->rxq_free_q
);
2089 rx_mod
->rxq_free_count
= 0;
2090 INIT_LIST_HEAD(&rx_mod
->rxp_free_q
);
2091 rx_mod
->rxp_free_count
= 0;
2092 INIT_LIST_HEAD(&rx_mod
->rx_active_q
);
2094 /* Build RX queues */
2095 for (index
= 0; index
< bna
->ioceth
.attr
.num_rxp
; index
++) {
2096 rx_ptr
= &rx_mod
->rx
[index
];
2098 bfa_q_qe_init(&rx_ptr
->qe
);
2099 INIT_LIST_HEAD(&rx_ptr
->rxp_q
);
2101 rx_ptr
->rid
= index
;
2102 rx_ptr
->stop_cbfn
= NULL
;
2103 rx_ptr
->stop_cbarg
= NULL
;
2105 list_add_tail(&rx_ptr
->qe
, &rx_mod
->rx_free_q
);
2106 rx_mod
->rx_free_count
++;
2109 /* build RX-path queue */
2110 for (index
= 0; index
< bna
->ioceth
.attr
.num_rxp
; index
++) {
2111 rxp_ptr
= &rx_mod
->rxp
[index
];
2112 bfa_q_qe_init(&rxp_ptr
->qe
);
2113 list_add_tail(&rxp_ptr
->qe
, &rx_mod
->rxp_free_q
);
2114 rx_mod
->rxp_free_count
++;
2117 /* build RXQ queue */
2118 for (index
= 0; index
< (bna
->ioceth
.attr
.num_rxp
* 2); index
++) {
2119 rxq_ptr
= &rx_mod
->rxq
[index
];
2120 bfa_q_qe_init(&rxq_ptr
->qe
);
2121 list_add_tail(&rxq_ptr
->qe
, &rx_mod
->rxq_free_q
);
2122 rx_mod
->rxq_free_count
++;
2127 bna_rx_mod_uninit(struct bna_rx_mod
*rx_mod
)
2129 struct list_head
*qe
;
2133 list_for_each(qe
, &rx_mod
->rx_free_q
)
2137 list_for_each(qe
, &rx_mod
->rxp_free_q
)
2141 list_for_each(qe
, &rx_mod
->rxq_free_q
)
2148 bna_bfi_rx_enet_start_rsp(struct bna_rx
*rx
, struct bfi_msgq_mhdr
*msghdr
)
2150 struct bfi_enet_rx_cfg_rsp
*cfg_rsp
= &rx
->bfi_enet_cmd
.cfg_rsp
;
2151 struct bna_rxp
*rxp
= NULL
;
2152 struct bna_rxq
*q0
= NULL
, *q1
= NULL
;
2153 struct list_head
*rxp_qe
;
2156 bfa_msgq_rsp_copy(&rx
->bna
->msgq
, (u8
*)cfg_rsp
,
2157 sizeof(struct bfi_enet_rx_cfg_rsp
));
2159 rx
->hw_id
= cfg_rsp
->hw_id
;
2161 for (i
= 0, rxp_qe
= bfa_q_first(&rx
->rxp_q
);
2163 i
++, rxp_qe
= bfa_q_next(rxp_qe
)) {
2164 rxp
= (struct bna_rxp
*)rxp_qe
;
2165 GET_RXQS(rxp
, q0
, q1
);
2167 /* Setup doorbells */
2168 rxp
->cq
.ccb
->i_dbell
->doorbell_addr
=
2169 rx
->bna
->pcidev
.pci_bar_kva
2170 + ntohl(cfg_rsp
->q_handles
[i
].i_dbell
);
2171 rxp
->hw_id
= cfg_rsp
->q_handles
[i
].hw_cqid
;
2173 rx
->bna
->pcidev
.pci_bar_kva
2174 + ntohl(cfg_rsp
->q_handles
[i
].ql_dbell
);
2175 q0
->hw_id
= cfg_rsp
->q_handles
[i
].hw_lqid
;
2178 rx
->bna
->pcidev
.pci_bar_kva
2179 + ntohl(cfg_rsp
->q_handles
[i
].qs_dbell
);
2180 q1
->hw_id
= cfg_rsp
->q_handles
[i
].hw_sqid
;
2183 /* Initialize producer/consumer indexes */
2184 (*rxp
->cq
.ccb
->hw_producer_index
) = 0;
2185 rxp
->cq
.ccb
->producer_index
= 0;
2186 q0
->rcb
->producer_index
= q0
->rcb
->consumer_index
= 0;
2188 q1
->rcb
->producer_index
= q1
->rcb
->consumer_index
= 0;
2191 bfa_fsm_send_event(rx
, RX_E_STARTED
);
2195 bna_bfi_rx_enet_stop_rsp(struct bna_rx
*rx
, struct bfi_msgq_mhdr
*msghdr
)
2197 bfa_fsm_send_event(rx
, RX_E_STOPPED
);
2201 bna_rx_res_req(struct bna_rx_config
*q_cfg
, struct bna_res_info
*res_info
)
2203 u32 cq_size
, hq_size
, dq_size
;
2204 u32 cpage_count
, hpage_count
, dpage_count
;
2205 struct bna_mem_info
*mem_info
;
2210 dq_depth
= q_cfg
->q_depth
;
2211 hq_depth
= ((q_cfg
->rxp_type
== BNA_RXP_SINGLE
) ? 0 : q_cfg
->q_depth
);
2212 cq_depth
= dq_depth
+ hq_depth
;
2214 BNA_TO_POWER_OF_2_HIGH(cq_depth
);
2215 cq_size
= cq_depth
* BFI_CQ_WI_SIZE
;
2216 cq_size
= ALIGN(cq_size
, PAGE_SIZE
);
2217 cpage_count
= SIZE_TO_PAGES(cq_size
);
2219 BNA_TO_POWER_OF_2_HIGH(dq_depth
);
2220 dq_size
= dq_depth
* BFI_RXQ_WI_SIZE
;
2221 dq_size
= ALIGN(dq_size
, PAGE_SIZE
);
2222 dpage_count
= SIZE_TO_PAGES(dq_size
);
2224 if (BNA_RXP_SINGLE
!= q_cfg
->rxp_type
) {
2225 BNA_TO_POWER_OF_2_HIGH(hq_depth
);
2226 hq_size
= hq_depth
* BFI_RXQ_WI_SIZE
;
2227 hq_size
= ALIGN(hq_size
, PAGE_SIZE
);
2228 hpage_count
= SIZE_TO_PAGES(hq_size
);
2232 res_info
[BNA_RX_RES_MEM_T_CCB
].res_type
= BNA_RES_T_MEM
;
2233 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CCB
].res_u
.mem_info
;
2234 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2235 mem_info
->len
= sizeof(struct bna_ccb
);
2236 mem_info
->num
= q_cfg
->num_paths
;
2238 res_info
[BNA_RX_RES_MEM_T_RCB
].res_type
= BNA_RES_T_MEM
;
2239 mem_info
= &res_info
[BNA_RX_RES_MEM_T_RCB
].res_u
.mem_info
;
2240 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2241 mem_info
->len
= sizeof(struct bna_rcb
);
2242 mem_info
->num
= BNA_GET_RXQS(q_cfg
);
2244 res_info
[BNA_RX_RES_MEM_T_CQPT
].res_type
= BNA_RES_T_MEM
;
2245 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CQPT
].res_u
.mem_info
;
2246 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2247 mem_info
->len
= cpage_count
* sizeof(struct bna_dma_addr
);
2248 mem_info
->num
= q_cfg
->num_paths
;
2250 res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_type
= BNA_RES_T_MEM
;
2251 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_u
.mem_info
;
2252 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2253 mem_info
->len
= cpage_count
* sizeof(void *);
2254 mem_info
->num
= q_cfg
->num_paths
;
2256 res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_type
= BNA_RES_T_MEM
;
2257 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
;
2258 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2259 mem_info
->len
= PAGE_SIZE
;
2260 mem_info
->num
= cpage_count
* q_cfg
->num_paths
;
2262 res_info
[BNA_RX_RES_MEM_T_DQPT
].res_type
= BNA_RES_T_MEM
;
2263 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DQPT
].res_u
.mem_info
;
2264 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2265 mem_info
->len
= dpage_count
* sizeof(struct bna_dma_addr
);
2266 mem_info
->num
= q_cfg
->num_paths
;
2268 res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_type
= BNA_RES_T_MEM
;
2269 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_u
.mem_info
;
2270 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2271 mem_info
->len
= dpage_count
* sizeof(void *);
2272 mem_info
->num
= q_cfg
->num_paths
;
2274 res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_type
= BNA_RES_T_MEM
;
2275 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
;
2276 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2277 mem_info
->len
= PAGE_SIZE
;
2278 mem_info
->num
= dpage_count
* q_cfg
->num_paths
;
2280 res_info
[BNA_RX_RES_MEM_T_HQPT
].res_type
= BNA_RES_T_MEM
;
2281 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HQPT
].res_u
.mem_info
;
2282 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2283 mem_info
->len
= hpage_count
* sizeof(struct bna_dma_addr
);
2284 mem_info
->num
= (hpage_count
? q_cfg
->num_paths
: 0);
2286 res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_type
= BNA_RES_T_MEM
;
2287 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_u
.mem_info
;
2288 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2289 mem_info
->len
= hpage_count
* sizeof(void *);
2290 mem_info
->num
= (hpage_count
? q_cfg
->num_paths
: 0);
2292 res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_type
= BNA_RES_T_MEM
;
2293 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
;
2294 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2295 mem_info
->len
= (hpage_count
? PAGE_SIZE
: 0);
2296 mem_info
->num
= (hpage_count
? (hpage_count
* q_cfg
->num_paths
) : 0);
2298 res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_type
= BNA_RES_T_MEM
;
2299 mem_info
= &res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_u
.mem_info
;
2300 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2301 mem_info
->len
= BFI_IBIDX_SIZE
;
2302 mem_info
->num
= q_cfg
->num_paths
;
2304 res_info
[BNA_RX_RES_MEM_T_RIT
].res_type
= BNA_RES_T_MEM
;
2305 mem_info
= &res_info
[BNA_RX_RES_MEM_T_RIT
].res_u
.mem_info
;
2306 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2307 mem_info
->len
= BFI_ENET_RSS_RIT_MAX
;
2310 res_info
[BNA_RX_RES_T_INTR
].res_type
= BNA_RES_T_INTR
;
2311 res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
.intr_type
= BNA_INTR_T_MSIX
;
2312 res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
.num
= q_cfg
->num_paths
;
2316 bna_rx_create(struct bna
*bna
, struct bnad
*bnad
,
2317 struct bna_rx_config
*rx_cfg
,
2318 const struct bna_rx_event_cbfn
*rx_cbfn
,
2319 struct bna_res_info
*res_info
,
2322 struct bna_rx_mod
*rx_mod
= &bna
->rx_mod
;
2324 struct bna_rxp
*rxp
;
2327 struct bna_intr_info
*intr_info
;
2329 struct bna_mem_descr
*ccb_mem
;
2330 struct bna_mem_descr
*rcb_mem
;
2331 struct bna_mem_descr
*unmapq_mem
;
2332 struct bna_mem_descr
*cqpt_mem
;
2333 struct bna_mem_descr
*cswqpt_mem
;
2334 struct bna_mem_descr
*cpage_mem
;
2335 struct bna_mem_descr
*hqpt_mem
;
2336 struct bna_mem_descr
*dqpt_mem
;
2337 struct bna_mem_descr
*hsqpt_mem
;
2338 struct bna_mem_descr
*dsqpt_mem
;
2339 struct bna_mem_descr
*hpage_mem
;
2340 struct bna_mem_descr
*dpage_mem
;
2341 int i
, cpage_idx
= 0, dpage_idx
= 0, hpage_idx
= 0;
2342 int dpage_count
, hpage_count
, rcb_idx
;
2344 if (!bna_rx_res_check(rx_mod
, rx_cfg
))
2347 intr_info
= &res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
;
2348 ccb_mem
= &res_info
[BNA_RX_RES_MEM_T_CCB
].res_u
.mem_info
.mdl
[0];
2349 rcb_mem
= &res_info
[BNA_RX_RES_MEM_T_RCB
].res_u
.mem_info
.mdl
[0];
2350 unmapq_mem
= &res_info
[BNA_RX_RES_MEM_T_UNMAPQ
].res_u
.mem_info
.mdl
[0];
2351 cqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_CQPT
].res_u
.mem_info
.mdl
[0];
2352 cswqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_u
.mem_info
.mdl
[0];
2353 cpage_mem
= &res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
.mdl
[0];
2354 hqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_HQPT
].res_u
.mem_info
.mdl
[0];
2355 dqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_DQPT
].res_u
.mem_info
.mdl
[0];
2356 hsqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_u
.mem_info
.mdl
[0];
2357 dsqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_u
.mem_info
.mdl
[0];
2358 hpage_mem
= &res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
.mdl
[0];
2359 dpage_mem
= &res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
.mdl
[0];
2361 page_count
= res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
.num
/
2364 dpage_count
= res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
.num
/
2367 hpage_count
= res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
.num
/
2370 rx
= bna_rx_get(rx_mod
, rx_cfg
->rx_type
);
2373 INIT_LIST_HEAD(&rx
->rxp_q
);
2374 rx
->stop_cbfn
= NULL
;
2375 rx
->stop_cbarg
= NULL
;
2378 rx
->rcb_setup_cbfn
= rx_cbfn
->rcb_setup_cbfn
;
2379 rx
->rcb_destroy_cbfn
= rx_cbfn
->rcb_destroy_cbfn
;
2380 rx
->ccb_setup_cbfn
= rx_cbfn
->ccb_setup_cbfn
;
2381 rx
->ccb_destroy_cbfn
= rx_cbfn
->ccb_destroy_cbfn
;
2382 rx
->rx_stall_cbfn
= rx_cbfn
->rx_stall_cbfn
;
2383 /* Following callbacks are mandatory */
2384 rx
->rx_cleanup_cbfn
= rx_cbfn
->rx_cleanup_cbfn
;
2385 rx
->rx_post_cbfn
= rx_cbfn
->rx_post_cbfn
;
2387 if (rx
->bna
->rx_mod
.flags
& BNA_RX_MOD_F_ENET_STARTED
) {
2389 case BNA_RX_T_REGULAR
:
2390 if (!(rx
->bna
->rx_mod
.flags
&
2391 BNA_RX_MOD_F_ENET_LOOPBACK
))
2392 rx
->rx_flags
|= BNA_RX_F_ENET_STARTED
;
2394 case BNA_RX_T_LOOPBACK
:
2395 if (rx
->bna
->rx_mod
.flags
& BNA_RX_MOD_F_ENET_LOOPBACK
)
2396 rx
->rx_flags
|= BNA_RX_F_ENET_STARTED
;
2401 rx
->num_paths
= rx_cfg
->num_paths
;
2402 for (i
= 0, rcb_idx
= 0; i
< rx
->num_paths
; i
++) {
2403 rxp
= bna_rxp_get(rx_mod
);
2404 list_add_tail(&rxp
->qe
, &rx
->rxp_q
);
2405 rxp
->type
= rx_cfg
->rxp_type
;
2409 q0
= bna_rxq_get(rx_mod
);
2410 if (BNA_RXP_SINGLE
== rx_cfg
->rxp_type
)
2413 q1
= bna_rxq_get(rx_mod
);
2415 if (1 == intr_info
->num
)
2416 rxp
->vector
= intr_info
->idl
[0].vector
;
2418 rxp
->vector
= intr_info
->idl
[i
].vector
;
2422 rxp
->cq
.ib
.ib_seg_host_addr
.lsb
=
2423 res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.lsb
;
2424 rxp
->cq
.ib
.ib_seg_host_addr
.msb
=
2425 res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.msb
;
2426 rxp
->cq
.ib
.ib_seg_host_addr_kva
=
2427 res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].kva
;
2428 rxp
->cq
.ib
.intr_type
= intr_info
->intr_type
;
2429 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
)
2430 rxp
->cq
.ib
.intr_vector
= rxp
->vector
;
2432 rxp
->cq
.ib
.intr_vector
= (1 << rxp
->vector
);
2433 rxp
->cq
.ib
.coalescing_timeo
= rx_cfg
->coalescing_timeo
;
2434 rxp
->cq
.ib
.interpkt_count
= BFI_RX_INTERPKT_COUNT
;
2435 rxp
->cq
.ib
.interpkt_timeo
= BFI_RX_INTERPKT_TIMEO
;
2437 bna_rxp_add_rxqs(rxp
, q0
, q1
);
2444 q0
->rcb
= (struct bna_rcb
*) rcb_mem
[rcb_idx
].kva
;
2445 q0
->rcb
->unmap_q
= (void *)unmapq_mem
[rcb_idx
].kva
;
2447 q0
->rcb
->q_depth
= rx_cfg
->q_depth
;
2449 q0
->rcb
->bnad
= bna
->bnad
;
2451 q0
->rx_packets
= q0
->rx_bytes
= 0;
2452 q0
->rx_packets_with_error
= q0
->rxbuf_alloc_failed
= 0;
2454 bna_rxq_qpt_setup(q0
, rxp
, dpage_count
, PAGE_SIZE
,
2455 &dqpt_mem
[i
], &dsqpt_mem
[i
], &dpage_mem
[dpage_idx
]);
2456 q0
->rcb
->page_idx
= dpage_idx
;
2457 q0
->rcb
->page_count
= dpage_count
;
2458 dpage_idx
+= dpage_count
;
2460 if (rx
->rcb_setup_cbfn
)
2461 rx
->rcb_setup_cbfn(bnad
, q0
->rcb
);
2469 q1
->rcb
= (struct bna_rcb
*) rcb_mem
[rcb_idx
].kva
;
2470 q1
->rcb
->unmap_q
= (void *)unmapq_mem
[rcb_idx
].kva
;
2472 q1
->rcb
->q_depth
= rx_cfg
->q_depth
;
2474 q1
->rcb
->bnad
= bna
->bnad
;
2476 q1
->buffer_size
= (rx_cfg
->rxp_type
== BNA_RXP_HDS
) ?
2477 rx_cfg
->hds_config
.forced_offset
2478 : rx_cfg
->small_buff_size
;
2479 q1
->rx_packets
= q1
->rx_bytes
= 0;
2480 q1
->rx_packets_with_error
= q1
->rxbuf_alloc_failed
= 0;
2482 bna_rxq_qpt_setup(q1
, rxp
, hpage_count
, PAGE_SIZE
,
2483 &hqpt_mem
[i
], &hsqpt_mem
[i
],
2484 &hpage_mem
[hpage_idx
]);
2485 q1
->rcb
->page_idx
= hpage_idx
;
2486 q1
->rcb
->page_count
= hpage_count
;
2487 hpage_idx
+= hpage_count
;
2489 if (rx
->rcb_setup_cbfn
)
2490 rx
->rcb_setup_cbfn(bnad
, q1
->rcb
);
2495 rxp
->cq
.ccb
= (struct bna_ccb
*) ccb_mem
[i
].kva
;
2496 rxp
->cq
.ccb
->q_depth
= rx_cfg
->q_depth
+
2497 ((rx_cfg
->rxp_type
== BNA_RXP_SINGLE
) ?
2498 0 : rx_cfg
->q_depth
);
2499 rxp
->cq
.ccb
->cq
= &rxp
->cq
;
2500 rxp
->cq
.ccb
->rcb
[0] = q0
->rcb
;
2501 q0
->rcb
->ccb
= rxp
->cq
.ccb
;
2503 rxp
->cq
.ccb
->rcb
[1] = q1
->rcb
;
2504 q1
->rcb
->ccb
= rxp
->cq
.ccb
;
2506 rxp
->cq
.ccb
->hw_producer_index
=
2507 (u32
*)rxp
->cq
.ib
.ib_seg_host_addr_kva
;
2508 rxp
->cq
.ccb
->i_dbell
= &rxp
->cq
.ib
.door_bell
;
2509 rxp
->cq
.ccb
->intr_type
= rxp
->cq
.ib
.intr_type
;
2510 rxp
->cq
.ccb
->intr_vector
= rxp
->cq
.ib
.intr_vector
;
2511 rxp
->cq
.ccb
->rx_coalescing_timeo
=
2512 rxp
->cq
.ib
.coalescing_timeo
;
2513 rxp
->cq
.ccb
->pkt_rate
.small_pkt_cnt
= 0;
2514 rxp
->cq
.ccb
->pkt_rate
.large_pkt_cnt
= 0;
2515 rxp
->cq
.ccb
->bnad
= bna
->bnad
;
2516 rxp
->cq
.ccb
->id
= i
;
2518 bna_rxp_cqpt_setup(rxp
, page_count
, PAGE_SIZE
,
2519 &cqpt_mem
[i
], &cswqpt_mem
[i
], &cpage_mem
[cpage_idx
]);
2520 rxp
->cq
.ccb
->page_idx
= cpage_idx
;
2521 rxp
->cq
.ccb
->page_count
= page_count
;
2522 cpage_idx
+= page_count
;
2524 if (rx
->ccb_setup_cbfn
)
2525 rx
->ccb_setup_cbfn(bnad
, rxp
->cq
.ccb
);
2528 rx
->hds_cfg
= rx_cfg
->hds_config
;
2530 bna_rxf_init(&rx
->rxf
, rx
, rx_cfg
, res_info
);
2532 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
2534 rx_mod
->rid_mask
|= (1 << rx
->rid
);
2540 bna_rx_destroy(struct bna_rx
*rx
)
2542 struct bna_rx_mod
*rx_mod
= &rx
->bna
->rx_mod
;
2543 struct bna_rxq
*q0
= NULL
;
2544 struct bna_rxq
*q1
= NULL
;
2545 struct bna_rxp
*rxp
;
2546 struct list_head
*qe
;
2548 bna_rxf_uninit(&rx
->rxf
);
2550 while (!list_empty(&rx
->rxp_q
)) {
2551 bfa_q_deq(&rx
->rxp_q
, &rxp
);
2552 GET_RXQS(rxp
, q0
, q1
);
2553 if (rx
->rcb_destroy_cbfn
)
2554 rx
->rcb_destroy_cbfn(rx
->bna
->bnad
, q0
->rcb
);
2558 bna_rxq_put(rx_mod
, q0
);
2561 if (rx
->rcb_destroy_cbfn
)
2562 rx
->rcb_destroy_cbfn(rx
->bna
->bnad
, q1
->rcb
);
2566 bna_rxq_put(rx_mod
, q1
);
2568 rxp
->rxq
.slr
.large
= NULL
;
2569 rxp
->rxq
.slr
.small
= NULL
;
2571 if (rx
->ccb_destroy_cbfn
)
2572 rx
->ccb_destroy_cbfn(rx
->bna
->bnad
, rxp
->cq
.ccb
);
2575 bna_rxp_put(rx_mod
, rxp
);
2578 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2579 if (qe
== &rx
->qe
) {
2581 bfa_q_qe_init(&rx
->qe
);
2586 rx_mod
->rid_mask
&= ~(1 << rx
->rid
);
2590 bna_rx_put(rx_mod
, rx
);
2594 bna_rx_enable(struct bna_rx
*rx
)
2596 if (rx
->fsm
!= (bfa_sm_t
)bna_rx_sm_stopped
)
2599 rx
->rx_flags
|= BNA_RX_F_ENABLED
;
2600 if (rx
->rx_flags
& BNA_RX_F_ENET_STARTED
)
2601 bfa_fsm_send_event(rx
, RX_E_START
);
2605 bna_rx_disable(struct bna_rx
*rx
, enum bna_cleanup_type type
,
2606 void (*cbfn
)(void *, struct bna_rx
*))
2608 if (type
== BNA_SOFT_CLEANUP
) {
2609 /* h/w should not be accessed. Treat we're stopped */
2610 (*cbfn
)(rx
->bna
->bnad
, rx
);
2612 rx
->stop_cbfn
= cbfn
;
2613 rx
->stop_cbarg
= rx
->bna
->bnad
;
2615 rx
->rx_flags
&= ~BNA_RX_F_ENABLED
;
2617 bfa_fsm_send_event(rx
, RX_E_STOP
);
2622 bna_rx_cleanup_complete(struct bna_rx
*rx
)
2624 bfa_fsm_send_event(rx
, RX_E_CLEANUP_DONE
);
2628 bna_rx_mode_set(struct bna_rx
*rx
, enum bna_rxmode new_mode
,
2629 enum bna_rxmode bitmask
,
2630 void (*cbfn
)(struct bnad
*, struct bna_rx
*))
2632 struct bna_rxf
*rxf
= &rx
->rxf
;
2633 int need_hw_config
= 0;
2637 if (is_promisc_enable(new_mode
, bitmask
)) {
2638 /* If promisc mode is already enabled elsewhere in the system */
2639 if ((rx
->bna
->promisc_rid
!= BFI_INVALID_RID
) &&
2640 (rx
->bna
->promisc_rid
!= rxf
->rx
->rid
))
2643 /* If default mode is already enabled in the system */
2644 if (rx
->bna
->default_mode_rid
!= BFI_INVALID_RID
)
2647 /* Trying to enable promiscuous and default mode together */
2648 if (is_default_enable(new_mode
, bitmask
))
2652 if (is_default_enable(new_mode
, bitmask
)) {
2653 /* If default mode is already enabled elsewhere in the system */
2654 if ((rx
->bna
->default_mode_rid
!= BFI_INVALID_RID
) &&
2655 (rx
->bna
->default_mode_rid
!= rxf
->rx
->rid
)) {
2659 /* If promiscuous mode is already enabled in the system */
2660 if (rx
->bna
->promisc_rid
!= BFI_INVALID_RID
)
2664 /* Process the commands */
2666 if (is_promisc_enable(new_mode
, bitmask
)) {
2667 if (bna_rxf_promisc_enable(rxf
))
2669 } else if (is_promisc_disable(new_mode
, bitmask
)) {
2670 if (bna_rxf_promisc_disable(rxf
))
2674 if (is_allmulti_enable(new_mode
, bitmask
)) {
2675 if (bna_rxf_allmulti_enable(rxf
))
2677 } else if (is_allmulti_disable(new_mode
, bitmask
)) {
2678 if (bna_rxf_allmulti_disable(rxf
))
2682 /* Trigger h/w if needed */
2684 if (need_hw_config
) {
2685 rxf
->cam_fltr_cbfn
= cbfn
;
2686 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
2687 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
2689 (*cbfn
)(rx
->bna
->bnad
, rx
);
2691 return BNA_CB_SUCCESS
;
2698 bna_rx_vlanfilter_enable(struct bna_rx
*rx
)
2700 struct bna_rxf
*rxf
= &rx
->rxf
;
2702 if (rxf
->vlan_filter_status
== BNA_STATUS_T_DISABLED
) {
2703 rxf
->vlan_filter_status
= BNA_STATUS_T_ENABLED
;
2704 rxf
->vlan_pending_bitmask
= (u8
)BFI_VLAN_BMASK_ALL
;
2705 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
2710 bna_rx_coalescing_timeo_set(struct bna_rx
*rx
, int coalescing_timeo
)
2712 struct bna_rxp
*rxp
;
2713 struct list_head
*qe
;
2715 list_for_each(qe
, &rx
->rxp_q
) {
2716 rxp
= (struct bna_rxp
*)qe
;
2717 rxp
->cq
.ccb
->rx_coalescing_timeo
= coalescing_timeo
;
2718 bna_ib_coalescing_timeo_set(&rxp
->cq
.ib
, coalescing_timeo
);
2723 bna_rx_dim_reconfig(struct bna
*bna
, const u32 vector
[][BNA_BIAS_T_MAX
])
2727 for (i
= 0; i
< BNA_LOAD_T_MAX
; i
++)
2728 for (j
= 0; j
< BNA_BIAS_T_MAX
; j
++)
2729 bna
->rx_mod
.dim_vector
[i
][j
] = vector
[i
][j
];
2733 bna_rx_dim_update(struct bna_ccb
*ccb
)
2735 struct bna
*bna
= ccb
->cq
->rx
->bna
;
2737 u32 pkt_rt
, small_rt
, large_rt
;
2738 u8 coalescing_timeo
;
2740 if ((ccb
->pkt_rate
.small_pkt_cnt
== 0) &&
2741 (ccb
->pkt_rate
.large_pkt_cnt
== 0))
2744 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2746 small_rt
= ccb
->pkt_rate
.small_pkt_cnt
;
2747 large_rt
= ccb
->pkt_rate
.large_pkt_cnt
;
2749 pkt_rt
= small_rt
+ large_rt
;
2751 if (pkt_rt
< BNA_PKT_RATE_10K
)
2752 load
= BNA_LOAD_T_LOW_4
;
2753 else if (pkt_rt
< BNA_PKT_RATE_20K
)
2754 load
= BNA_LOAD_T_LOW_3
;
2755 else if (pkt_rt
< BNA_PKT_RATE_30K
)
2756 load
= BNA_LOAD_T_LOW_2
;
2757 else if (pkt_rt
< BNA_PKT_RATE_40K
)
2758 load
= BNA_LOAD_T_LOW_1
;
2759 else if (pkt_rt
< BNA_PKT_RATE_50K
)
2760 load
= BNA_LOAD_T_HIGH_1
;
2761 else if (pkt_rt
< BNA_PKT_RATE_60K
)
2762 load
= BNA_LOAD_T_HIGH_2
;
2763 else if (pkt_rt
< BNA_PKT_RATE_80K
)
2764 load
= BNA_LOAD_T_HIGH_3
;
2766 load
= BNA_LOAD_T_HIGH_4
;
2768 if (small_rt
> (large_rt
<< 1))
2773 ccb
->pkt_rate
.small_pkt_cnt
= 0;
2774 ccb
->pkt_rate
.large_pkt_cnt
= 0;
2776 coalescing_timeo
= bna
->rx_mod
.dim_vector
[load
][bias
];
2777 ccb
->rx_coalescing_timeo
= coalescing_timeo
;
2780 bna_ib_coalescing_timeo_set(&ccb
->cq
->ib
, coalescing_timeo
);
2783 const u32 bna_napi_dim_vector
[BNA_LOAD_T_MAX
][BNA_BIAS_T_MAX
] = {
2797 #define call_tx_stop_cbfn(tx) \
2799 if ((tx)->stop_cbfn) { \
2800 void (*cbfn)(void *, struct bna_tx *); \
2802 cbfn = (tx)->stop_cbfn; \
2803 cbarg = (tx)->stop_cbarg; \
2804 (tx)->stop_cbfn = NULL; \
2805 (tx)->stop_cbarg = NULL; \
2806 cbfn(cbarg, (tx)); \
2810 #define call_tx_prio_change_cbfn(tx) \
2812 if ((tx)->prio_change_cbfn) { \
2813 void (*cbfn)(struct bnad *, struct bna_tx *); \
2814 cbfn = (tx)->prio_change_cbfn; \
2815 (tx)->prio_change_cbfn = NULL; \
2816 cbfn((tx)->bna->bnad, (tx)); \
2820 static void bna_tx_mod_cb_tx_stopped(void *tx_mod
, struct bna_tx
*tx
);
2821 static void bna_bfi_tx_enet_start(struct bna_tx
*tx
);
2822 static void bna_tx_enet_stop(struct bna_tx
*tx
);
2830 TX_E_PRIO_CHANGE
= 6,
2831 TX_E_CLEANUP_DONE
= 7,
2835 bfa_fsm_state_decl(bna_tx
, stopped
, struct bna_tx
, enum bna_tx_event
);
2836 bfa_fsm_state_decl(bna_tx
, start_wait
, struct bna_tx
, enum bna_tx_event
);
2837 bfa_fsm_state_decl(bna_tx
, started
, struct bna_tx
, enum bna_tx_event
);
2838 bfa_fsm_state_decl(bna_tx
, stop_wait
, struct bna_tx
, enum bna_tx_event
);
2839 bfa_fsm_state_decl(bna_tx
, cleanup_wait
, struct bna_tx
,
2841 bfa_fsm_state_decl(bna_tx
, prio_stop_wait
, struct bna_tx
,
2843 bfa_fsm_state_decl(bna_tx
, prio_cleanup_wait
, struct bna_tx
,
2845 bfa_fsm_state_decl(bna_tx
, failed
, struct bna_tx
, enum bna_tx_event
);
2846 bfa_fsm_state_decl(bna_tx
, quiesce_wait
, struct bna_tx
,
2850 bna_tx_sm_stopped_entry(struct bna_tx
*tx
)
2852 call_tx_stop_cbfn(tx
);
2856 bna_tx_sm_stopped(struct bna_tx
*tx
, enum bna_tx_event event
)
2860 bfa_fsm_set_state(tx
, bna_tx_sm_start_wait
);
2864 call_tx_stop_cbfn(tx
);
2871 case TX_E_PRIO_CHANGE
:
2872 call_tx_prio_change_cbfn(tx
);
2875 case TX_E_BW_UPDATE
:
2880 bfa_sm_fault(event
);
2885 bna_tx_sm_start_wait_entry(struct bna_tx
*tx
)
2887 bna_bfi_tx_enet_start(tx
);
2891 bna_tx_sm_start_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
2895 tx
->flags
&= ~(BNA_TX_F_PRIO_CHANGED
| BNA_TX_F_BW_UPDATED
);
2896 bfa_fsm_set_state(tx
, bna_tx_sm_stop_wait
);
2900 tx
->flags
&= ~(BNA_TX_F_PRIO_CHANGED
| BNA_TX_F_BW_UPDATED
);
2901 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
2905 if (tx
->flags
& (BNA_TX_F_PRIO_CHANGED
| BNA_TX_F_BW_UPDATED
)) {
2906 tx
->flags
&= ~(BNA_TX_F_PRIO_CHANGED
|
2907 BNA_TX_F_BW_UPDATED
);
2908 bfa_fsm_set_state(tx
, bna_tx_sm_prio_stop_wait
);
2910 bfa_fsm_set_state(tx
, bna_tx_sm_started
);
2913 case TX_E_PRIO_CHANGE
:
2914 tx
->flags
|= BNA_TX_F_PRIO_CHANGED
;
2917 case TX_E_BW_UPDATE
:
2918 tx
->flags
|= BNA_TX_F_BW_UPDATED
;
2922 bfa_sm_fault(event
);
2927 bna_tx_sm_started_entry(struct bna_tx
*tx
)
2929 struct bna_txq
*txq
;
2930 struct list_head
*qe
;
2931 int is_regular
= (tx
->type
== BNA_TX_T_REGULAR
);
2933 list_for_each(qe
, &tx
->txq_q
) {
2934 txq
= (struct bna_txq
*)qe
;
2935 txq
->tcb
->priority
= txq
->priority
;
2937 bna_ib_start(tx
->bna
, &txq
->ib
, is_regular
);
2939 tx
->tx_resume_cbfn(tx
->bna
->bnad
, tx
);
2943 bna_tx_sm_started(struct bna_tx
*tx
, enum bna_tx_event event
)
2947 bfa_fsm_set_state(tx
, bna_tx_sm_stop_wait
);
2948 tx
->tx_stall_cbfn(tx
->bna
->bnad
, tx
);
2949 bna_tx_enet_stop(tx
);
2953 bfa_fsm_set_state(tx
, bna_tx_sm_failed
);
2954 tx
->tx_stall_cbfn(tx
->bna
->bnad
, tx
);
2955 tx
->tx_cleanup_cbfn(tx
->bna
->bnad
, tx
);
2958 case TX_E_PRIO_CHANGE
:
2959 case TX_E_BW_UPDATE
:
2960 bfa_fsm_set_state(tx
, bna_tx_sm_prio_stop_wait
);
2964 bfa_sm_fault(event
);
2969 bna_tx_sm_stop_wait_entry(struct bna_tx
*tx
)
2974 bna_tx_sm_stop_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
2979 bfa_fsm_set_state(tx
, bna_tx_sm_cleanup_wait
);
2980 tx
->tx_cleanup_cbfn(tx
->bna
->bnad
, tx
);
2985 * We are here due to start_wait -> stop_wait transition on
2988 bna_tx_enet_stop(tx
);
2991 case TX_E_PRIO_CHANGE
:
2992 case TX_E_BW_UPDATE
:
2997 bfa_sm_fault(event
);
3002 bna_tx_sm_cleanup_wait_entry(struct bna_tx
*tx
)
3007 bna_tx_sm_cleanup_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3011 case TX_E_PRIO_CHANGE
:
3012 case TX_E_BW_UPDATE
:
3016 case TX_E_CLEANUP_DONE
:
3017 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3021 bfa_sm_fault(event
);
3026 bna_tx_sm_prio_stop_wait_entry(struct bna_tx
*tx
)
3028 tx
->tx_stall_cbfn(tx
->bna
->bnad
, tx
);
3029 bna_tx_enet_stop(tx
);
3033 bna_tx_sm_prio_stop_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3037 bfa_fsm_set_state(tx
, bna_tx_sm_stop_wait
);
3041 bfa_fsm_set_state(tx
, bna_tx_sm_failed
);
3042 call_tx_prio_change_cbfn(tx
);
3043 tx
->tx_cleanup_cbfn(tx
->bna
->bnad
, tx
);
3047 bfa_fsm_set_state(tx
, bna_tx_sm_prio_cleanup_wait
);
3050 case TX_E_PRIO_CHANGE
:
3051 case TX_E_BW_UPDATE
:
3056 bfa_sm_fault(event
);
3061 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx
*tx
)
3063 call_tx_prio_change_cbfn(tx
);
3064 tx
->tx_cleanup_cbfn(tx
->bna
->bnad
, tx
);
3068 bna_tx_sm_prio_cleanup_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3072 bfa_fsm_set_state(tx
, bna_tx_sm_cleanup_wait
);
3076 bfa_fsm_set_state(tx
, bna_tx_sm_failed
);
3079 case TX_E_PRIO_CHANGE
:
3080 case TX_E_BW_UPDATE
:
3084 case TX_E_CLEANUP_DONE
:
3085 bfa_fsm_set_state(tx
, bna_tx_sm_start_wait
);
3089 bfa_sm_fault(event
);
3094 bna_tx_sm_failed_entry(struct bna_tx
*tx
)
3099 bna_tx_sm_failed(struct bna_tx
*tx
, enum bna_tx_event event
)
3103 bfa_fsm_set_state(tx
, bna_tx_sm_quiesce_wait
);
3107 bfa_fsm_set_state(tx
, bna_tx_sm_cleanup_wait
);
3114 case TX_E_CLEANUP_DONE
:
3115 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3119 bfa_sm_fault(event
);
3124 bna_tx_sm_quiesce_wait_entry(struct bna_tx
*tx
)
3129 bna_tx_sm_quiesce_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3133 bfa_fsm_set_state(tx
, bna_tx_sm_cleanup_wait
);
3137 bfa_fsm_set_state(tx
, bna_tx_sm_failed
);
3140 case TX_E_CLEANUP_DONE
:
3141 bfa_fsm_set_state(tx
, bna_tx_sm_start_wait
);
3144 case TX_E_BW_UPDATE
:
3149 bfa_sm_fault(event
);
3154 bna_bfi_tx_enet_start(struct bna_tx
*tx
)
3156 struct bfi_enet_tx_cfg_req
*cfg_req
= &tx
->bfi_enet_cmd
.cfg_req
;
3157 struct bna_txq
*txq
= NULL
;
3158 struct list_head
*qe
;
3161 bfi_msgq_mhdr_set(cfg_req
->mh
, BFI_MC_ENET
,
3162 BFI_ENET_H2I_TX_CFG_SET_REQ
, 0, tx
->rid
);
3163 cfg_req
->mh
.num_entries
= htons(
3164 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req
)));
3166 cfg_req
->num_queues
= tx
->num_txq
;
3167 for (i
= 0, qe
= bfa_q_first(&tx
->txq_q
);
3169 i
++, qe
= bfa_q_next(qe
)) {
3170 txq
= (struct bna_txq
*)qe
;
3172 bfi_enet_datapath_q_init(&cfg_req
->q_cfg
[i
].q
.q
, &txq
->qpt
);
3173 cfg_req
->q_cfg
[i
].q
.priority
= txq
->priority
;
3175 cfg_req
->q_cfg
[i
].ib
.index_addr
.a32
.addr_lo
=
3176 txq
->ib
.ib_seg_host_addr
.lsb
;
3177 cfg_req
->q_cfg
[i
].ib
.index_addr
.a32
.addr_hi
=
3178 txq
->ib
.ib_seg_host_addr
.msb
;
3179 cfg_req
->q_cfg
[i
].ib
.intr
.msix_index
=
3180 htons((u16
)txq
->ib
.intr_vector
);
3183 cfg_req
->ib_cfg
.int_pkt_dma
= BNA_STATUS_T_ENABLED
;
3184 cfg_req
->ib_cfg
.int_enabled
= BNA_STATUS_T_ENABLED
;
3185 cfg_req
->ib_cfg
.int_pkt_enabled
= BNA_STATUS_T_DISABLED
;
3186 cfg_req
->ib_cfg
.continuous_coalescing
= BNA_STATUS_T_ENABLED
;
3187 cfg_req
->ib_cfg
.msix
= (txq
->ib
.intr_type
== BNA_INTR_T_MSIX
)
3188 ? BNA_STATUS_T_ENABLED
: BNA_STATUS_T_DISABLED
;
3189 cfg_req
->ib_cfg
.coalescing_timeout
=
3190 htonl((u32
)txq
->ib
.coalescing_timeo
);
3191 cfg_req
->ib_cfg
.inter_pkt_timeout
=
3192 htonl((u32
)txq
->ib
.interpkt_timeo
);
3193 cfg_req
->ib_cfg
.inter_pkt_count
= (u8
)txq
->ib
.interpkt_count
;
3195 cfg_req
->tx_cfg
.vlan_mode
= BFI_ENET_TX_VLAN_WI
;
3196 cfg_req
->tx_cfg
.vlan_id
= htons((u16
)tx
->txf_vlan_id
);
3197 cfg_req
->tx_cfg
.admit_tagged_frame
= BNA_STATUS_T_DISABLED
;
3198 cfg_req
->tx_cfg
.apply_vlan_filter
= BNA_STATUS_T_DISABLED
;
3200 bfa_msgq_cmd_set(&tx
->msgq_cmd
, NULL
, NULL
,
3201 sizeof(struct bfi_enet_tx_cfg_req
), &cfg_req
->mh
);
3202 bfa_msgq_cmd_post(&tx
->bna
->msgq
, &tx
->msgq_cmd
);
3206 bna_bfi_tx_enet_stop(struct bna_tx
*tx
)
3208 struct bfi_enet_req
*req
= &tx
->bfi_enet_cmd
.req
;
3210 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
3211 BFI_ENET_H2I_TX_CFG_CLR_REQ
, 0, tx
->rid
);
3212 req
->mh
.num_entries
= htons(
3213 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req
)));
3214 bfa_msgq_cmd_set(&tx
->msgq_cmd
, NULL
, NULL
, sizeof(struct bfi_enet_req
),
3216 bfa_msgq_cmd_post(&tx
->bna
->msgq
, &tx
->msgq_cmd
);
3220 bna_tx_enet_stop(struct bna_tx
*tx
)
3222 struct bna_txq
*txq
;
3223 struct list_head
*qe
;
3226 list_for_each(qe
, &tx
->txq_q
) {
3227 txq
= (struct bna_txq
*)qe
;
3228 bna_ib_stop(tx
->bna
, &txq
->ib
);
3231 bna_bfi_tx_enet_stop(tx
);
3235 bna_txq_qpt_setup(struct bna_txq
*txq
, int page_count
, int page_size
,
3236 struct bna_mem_descr
*qpt_mem
,
3237 struct bna_mem_descr
*swqpt_mem
,
3238 struct bna_mem_descr
*page_mem
)
3242 txq
->qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
3243 txq
->qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
3244 txq
->qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
3245 txq
->qpt
.page_count
= page_count
;
3246 txq
->qpt
.page_size
= page_size
;
3248 txq
->tcb
->sw_qpt
= (void **) swqpt_mem
->kva
;
3250 for (i
= 0; i
< page_count
; i
++) {
3251 txq
->tcb
->sw_qpt
[i
] = page_mem
[i
].kva
;
3253 ((struct bna_dma_addr
*)txq
->qpt
.kv_qpt_ptr
)[i
].lsb
=
3254 page_mem
[i
].dma
.lsb
;
3255 ((struct bna_dma_addr
*)txq
->qpt
.kv_qpt_ptr
)[i
].msb
=
3256 page_mem
[i
].dma
.msb
;
3260 static struct bna_tx
*
3261 bna_tx_get(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
3263 struct list_head
*qe
= NULL
;
3264 struct bna_tx
*tx
= NULL
;
3266 if (list_empty(&tx_mod
->tx_free_q
))
3268 if (type
== BNA_TX_T_REGULAR
) {
3269 bfa_q_deq(&tx_mod
->tx_free_q
, &qe
);
3271 bfa_q_deq_tail(&tx_mod
->tx_free_q
, &qe
);
3273 tx
= (struct bna_tx
*)qe
;
3274 bfa_q_qe_init(&tx
->qe
);
3281 bna_tx_free(struct bna_tx
*tx
)
3283 struct bna_tx_mod
*tx_mod
= &tx
->bna
->tx_mod
;
3284 struct bna_txq
*txq
;
3285 struct list_head
*prev_qe
;
3286 struct list_head
*qe
;
3288 while (!list_empty(&tx
->txq_q
)) {
3289 bfa_q_deq(&tx
->txq_q
, &txq
);
3290 bfa_q_qe_init(&txq
->qe
);
3293 list_add_tail(&txq
->qe
, &tx_mod
->txq_free_q
);
3296 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3297 if (qe
== &tx
->qe
) {
3299 bfa_q_qe_init(&tx
->qe
);
3308 list_for_each(qe
, &tx_mod
->tx_free_q
) {
3309 if (((struct bna_tx
*)qe
)->rid
< tx
->rid
)
3316 if (prev_qe
== NULL
) {
3317 /* This is the first entry */
3318 bfa_q_enq_head(&tx_mod
->tx_free_q
, &tx
->qe
);
3319 } else if (bfa_q_next(prev_qe
) == &tx_mod
->tx_free_q
) {
3320 /* This is the last entry */
3321 list_add_tail(&tx
->qe
, &tx_mod
->tx_free_q
);
3323 /* Somewhere in the middle */
3324 bfa_q_next(&tx
->qe
) = bfa_q_next(prev_qe
);
3325 bfa_q_prev(&tx
->qe
) = prev_qe
;
3326 bfa_q_next(prev_qe
) = &tx
->qe
;
3327 bfa_q_prev(bfa_q_next(&tx
->qe
)) = &tx
->qe
;
3332 bna_tx_start(struct bna_tx
*tx
)
3334 tx
->flags
|= BNA_TX_F_ENET_STARTED
;
3335 if (tx
->flags
& BNA_TX_F_ENABLED
)
3336 bfa_fsm_send_event(tx
, TX_E_START
);
3340 bna_tx_stop(struct bna_tx
*tx
)
3342 tx
->stop_cbfn
= bna_tx_mod_cb_tx_stopped
;
3343 tx
->stop_cbarg
= &tx
->bna
->tx_mod
;
3345 tx
->flags
&= ~BNA_TX_F_ENET_STARTED
;
3346 bfa_fsm_send_event(tx
, TX_E_STOP
);
3350 bna_tx_fail(struct bna_tx
*tx
)
3352 tx
->flags
&= ~BNA_TX_F_ENET_STARTED
;
3353 bfa_fsm_send_event(tx
, TX_E_FAIL
);
3357 bna_bfi_tx_enet_start_rsp(struct bna_tx
*tx
, struct bfi_msgq_mhdr
*msghdr
)
3359 struct bfi_enet_tx_cfg_rsp
*cfg_rsp
= &tx
->bfi_enet_cmd
.cfg_rsp
;
3360 struct bna_txq
*txq
= NULL
;
3361 struct list_head
*qe
;
3364 bfa_msgq_rsp_copy(&tx
->bna
->msgq
, (u8
*)cfg_rsp
,
3365 sizeof(struct bfi_enet_tx_cfg_rsp
));
3367 tx
->hw_id
= cfg_rsp
->hw_id
;
3369 for (i
= 0, qe
= bfa_q_first(&tx
->txq_q
);
3370 i
< tx
->num_txq
; i
++, qe
= bfa_q_next(qe
)) {
3371 txq
= (struct bna_txq
*)qe
;
3373 /* Setup doorbells */
3374 txq
->tcb
->i_dbell
->doorbell_addr
=
3375 tx
->bna
->pcidev
.pci_bar_kva
3376 + ntohl(cfg_rsp
->q_handles
[i
].i_dbell
);
3378 tx
->bna
->pcidev
.pci_bar_kva
3379 + ntohl(cfg_rsp
->q_handles
[i
].q_dbell
);
3380 txq
->hw_id
= cfg_rsp
->q_handles
[i
].hw_qid
;
3382 /* Initialize producer/consumer indexes */
3383 (*txq
->tcb
->hw_consumer_index
) = 0;
3384 txq
->tcb
->producer_index
= txq
->tcb
->consumer_index
= 0;
3387 bfa_fsm_send_event(tx
, TX_E_STARTED
);
3391 bna_bfi_tx_enet_stop_rsp(struct bna_tx
*tx
, struct bfi_msgq_mhdr
*msghdr
)
3393 bfa_fsm_send_event(tx
, TX_E_STOPPED
);
3397 bna_bfi_bw_update_aen(struct bna_tx_mod
*tx_mod
)
3400 struct list_head
*qe
;
3402 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3403 tx
= (struct bna_tx
*)qe
;
3404 bfa_fsm_send_event(tx
, TX_E_BW_UPDATE
);
3409 bna_tx_res_req(int num_txq
, int txq_depth
, struct bna_res_info
*res_info
)
3413 struct bna_mem_info
*mem_info
;
3415 res_info
[BNA_TX_RES_MEM_T_TCB
].res_type
= BNA_RES_T_MEM
;
3416 mem_info
= &res_info
[BNA_TX_RES_MEM_T_TCB
].res_u
.mem_info
;
3417 mem_info
->mem_type
= BNA_MEM_T_KVA
;
3418 mem_info
->len
= sizeof(struct bna_tcb
);
3419 mem_info
->num
= num_txq
;
3421 q_size
= txq_depth
* BFI_TXQ_WI_SIZE
;
3422 q_size
= ALIGN(q_size
, PAGE_SIZE
);
3423 page_count
= q_size
>> PAGE_SHIFT
;
3425 res_info
[BNA_TX_RES_MEM_T_QPT
].res_type
= BNA_RES_T_MEM
;
3426 mem_info
= &res_info
[BNA_TX_RES_MEM_T_QPT
].res_u
.mem_info
;
3427 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3428 mem_info
->len
= page_count
* sizeof(struct bna_dma_addr
);
3429 mem_info
->num
= num_txq
;
3431 res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_type
= BNA_RES_T_MEM
;
3432 mem_info
= &res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_u
.mem_info
;
3433 mem_info
->mem_type
= BNA_MEM_T_KVA
;
3434 mem_info
->len
= page_count
* sizeof(void *);
3435 mem_info
->num
= num_txq
;
3437 res_info
[BNA_TX_RES_MEM_T_PAGE
].res_type
= BNA_RES_T_MEM
;
3438 mem_info
= &res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
;
3439 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3440 mem_info
->len
= PAGE_SIZE
;
3441 mem_info
->num
= num_txq
* page_count
;
3443 res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_type
= BNA_RES_T_MEM
;
3444 mem_info
= &res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_u
.mem_info
;
3445 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3446 mem_info
->len
= BFI_IBIDX_SIZE
;
3447 mem_info
->num
= num_txq
;
3449 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_type
= BNA_RES_T_INTR
;
3450 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
.intr_type
=
3452 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
.num
= num_txq
;
3456 bna_tx_create(struct bna
*bna
, struct bnad
*bnad
,
3457 struct bna_tx_config
*tx_cfg
,
3458 const struct bna_tx_event_cbfn
*tx_cbfn
,
3459 struct bna_res_info
*res_info
, void *priv
)
3461 struct bna_intr_info
*intr_info
;
3462 struct bna_tx_mod
*tx_mod
= &bna
->tx_mod
;
3464 struct bna_txq
*txq
;
3465 struct list_head
*qe
;
3471 intr_info
= &res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
;
3472 page_count
= (res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
.num
) /
3474 page_size
= res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
.len
;
3480 if ((intr_info
->num
!= 1) && (intr_info
->num
!= tx_cfg
->num_txq
))
3485 tx
= bna_tx_get(tx_mod
, tx_cfg
->tx_type
);
3493 INIT_LIST_HEAD(&tx
->txq_q
);
3494 for (i
= 0; i
< tx_cfg
->num_txq
; i
++) {
3495 if (list_empty(&tx_mod
->txq_free_q
))
3498 bfa_q_deq(&tx_mod
->txq_free_q
, &txq
);
3499 bfa_q_qe_init(&txq
->qe
);
3500 list_add_tail(&txq
->qe
, &tx
->txq_q
);
3510 tx
->tcb_setup_cbfn
= tx_cbfn
->tcb_setup_cbfn
;
3511 tx
->tcb_destroy_cbfn
= tx_cbfn
->tcb_destroy_cbfn
;
3512 /* Following callbacks are mandatory */
3513 tx
->tx_stall_cbfn
= tx_cbfn
->tx_stall_cbfn
;
3514 tx
->tx_resume_cbfn
= tx_cbfn
->tx_resume_cbfn
;
3515 tx
->tx_cleanup_cbfn
= tx_cbfn
->tx_cleanup_cbfn
;
3517 list_add_tail(&tx
->qe
, &tx_mod
->tx_active_q
);
3519 tx
->num_txq
= tx_cfg
->num_txq
;
3522 if (tx
->bna
->tx_mod
.flags
& BNA_TX_MOD_F_ENET_STARTED
) {
3524 case BNA_TX_T_REGULAR
:
3525 if (!(tx
->bna
->tx_mod
.flags
&
3526 BNA_TX_MOD_F_ENET_LOOPBACK
))
3527 tx
->flags
|= BNA_TX_F_ENET_STARTED
;
3529 case BNA_TX_T_LOOPBACK
:
3530 if (tx
->bna
->tx_mod
.flags
& BNA_TX_MOD_F_ENET_LOOPBACK
)
3531 tx
->flags
|= BNA_TX_F_ENET_STARTED
;
3540 list_for_each(qe
, &tx
->txq_q
) {
3541 txq
= (struct bna_txq
*)qe
;
3542 txq
->tcb
= (struct bna_tcb
*)
3543 res_info
[BNA_TX_RES_MEM_T_TCB
].res_u
.mem_info
.mdl
[i
].kva
;
3544 txq
->tx_packets
= 0;
3548 txq
->ib
.ib_seg_host_addr
.lsb
=
3549 res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.lsb
;
3550 txq
->ib
.ib_seg_host_addr
.msb
=
3551 res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.msb
;
3552 txq
->ib
.ib_seg_host_addr_kva
=
3553 res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].kva
;
3554 txq
->ib
.intr_type
= intr_info
->intr_type
;
3555 txq
->ib
.intr_vector
= (intr_info
->num
== 1) ?
3556 intr_info
->idl
[0].vector
:
3557 intr_info
->idl
[i
].vector
;
3558 if (intr_info
->intr_type
== BNA_INTR_T_INTX
)
3559 txq
->ib
.intr_vector
= (1 << txq
->ib
.intr_vector
);
3560 txq
->ib
.coalescing_timeo
= tx_cfg
->coalescing_timeo
;
3561 txq
->ib
.interpkt_timeo
= 0; /* Not used */
3562 txq
->ib
.interpkt_count
= BFI_TX_INTERPKT_COUNT
;
3566 txq
->tcb
->q_depth
= tx_cfg
->txq_depth
;
3567 txq
->tcb
->unmap_q
= (void *)
3568 res_info
[BNA_TX_RES_MEM_T_UNMAPQ
].res_u
.mem_info
.mdl
[i
].kva
;
3569 txq
->tcb
->hw_consumer_index
=
3570 (u32
*)txq
->ib
.ib_seg_host_addr_kva
;
3571 txq
->tcb
->i_dbell
= &txq
->ib
.door_bell
;
3572 txq
->tcb
->intr_type
= txq
->ib
.intr_type
;
3573 txq
->tcb
->intr_vector
= txq
->ib
.intr_vector
;
3574 txq
->tcb
->txq
= txq
;
3575 txq
->tcb
->bnad
= bnad
;
3578 /* QPT, SWQPT, Pages */
3579 bna_txq_qpt_setup(txq
, page_count
, page_size
,
3580 &res_info
[BNA_TX_RES_MEM_T_QPT
].res_u
.mem_info
.mdl
[i
],
3581 &res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_u
.mem_info
.mdl
[i
],
3582 &res_info
[BNA_TX_RES_MEM_T_PAGE
].
3583 res_u
.mem_info
.mdl
[page_idx
]);
3584 txq
->tcb
->page_idx
= page_idx
;
3585 txq
->tcb
->page_count
= page_count
;
3586 page_idx
+= page_count
;
3588 /* Callback to bnad for setting up TCB */
3589 if (tx
->tcb_setup_cbfn
)
3590 (tx
->tcb_setup_cbfn
)(bna
->bnad
, txq
->tcb
);
3592 if (tx_cfg
->num_txq
== BFI_TX_MAX_PRIO
)
3593 txq
->priority
= txq
->tcb
->id
;
3595 txq
->priority
= tx_mod
->default_prio
;
3600 tx
->txf_vlan_id
= 0;
3602 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3604 tx_mod
->rid_mask
|= (1 << tx
->rid
);
3614 bna_tx_destroy(struct bna_tx
*tx
)
3616 struct bna_txq
*txq
;
3617 struct list_head
*qe
;
3619 list_for_each(qe
, &tx
->txq_q
) {
3620 txq
= (struct bna_txq
*)qe
;
3621 if (tx
->tcb_destroy_cbfn
)
3622 (tx
->tcb_destroy_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3625 tx
->bna
->tx_mod
.rid_mask
&= ~(1 << tx
->rid
);
3630 bna_tx_enable(struct bna_tx
*tx
)
3632 if (tx
->fsm
!= (bfa_sm_t
)bna_tx_sm_stopped
)
3635 tx
->flags
|= BNA_TX_F_ENABLED
;
3637 if (tx
->flags
& BNA_TX_F_ENET_STARTED
)
3638 bfa_fsm_send_event(tx
, TX_E_START
);
3642 bna_tx_disable(struct bna_tx
*tx
, enum bna_cleanup_type type
,
3643 void (*cbfn
)(void *, struct bna_tx
*))
3645 if (type
== BNA_SOFT_CLEANUP
) {
3646 (*cbfn
)(tx
->bna
->bnad
, tx
);
3650 tx
->stop_cbfn
= cbfn
;
3651 tx
->stop_cbarg
= tx
->bna
->bnad
;
3653 tx
->flags
&= ~BNA_TX_F_ENABLED
;
3655 bfa_fsm_send_event(tx
, TX_E_STOP
);
3659 bna_tx_cleanup_complete(struct bna_tx
*tx
)
3661 bfa_fsm_send_event(tx
, TX_E_CLEANUP_DONE
);
3665 bna_tx_mod_cb_tx_stopped(void *arg
, struct bna_tx
*tx
)
3667 struct bna_tx_mod
*tx_mod
= (struct bna_tx_mod
*)arg
;
3669 bfa_wc_down(&tx_mod
->tx_stop_wc
);
3673 bna_tx_mod_cb_tx_stopped_all(void *arg
)
3675 struct bna_tx_mod
*tx_mod
= (struct bna_tx_mod
*)arg
;
3677 if (tx_mod
->stop_cbfn
)
3678 tx_mod
->stop_cbfn(&tx_mod
->bna
->enet
);
3679 tx_mod
->stop_cbfn
= NULL
;
3683 bna_tx_mod_init(struct bna_tx_mod
*tx_mod
, struct bna
*bna
,
3684 struct bna_res_info
*res_info
)
3691 tx_mod
->tx
= (struct bna_tx
*)
3692 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
3693 tx_mod
->txq
= (struct bna_txq
*)
3694 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
3696 INIT_LIST_HEAD(&tx_mod
->tx_free_q
);
3697 INIT_LIST_HEAD(&tx_mod
->tx_active_q
);
3699 INIT_LIST_HEAD(&tx_mod
->txq_free_q
);
3701 for (i
= 0; i
< bna
->ioceth
.attr
.num_txq
; i
++) {
3702 tx_mod
->tx
[i
].rid
= i
;
3703 bfa_q_qe_init(&tx_mod
->tx
[i
].qe
);
3704 list_add_tail(&tx_mod
->tx
[i
].qe
, &tx_mod
->tx_free_q
);
3705 bfa_q_qe_init(&tx_mod
->txq
[i
].qe
);
3706 list_add_tail(&tx_mod
->txq
[i
].qe
, &tx_mod
->txq_free_q
);
3709 tx_mod
->prio_map
= BFI_TX_PRIO_MAP_ALL
;
3710 tx_mod
->default_prio
= 0;
3711 tx_mod
->iscsi_over_cee
= BNA_STATUS_T_DISABLED
;
3712 tx_mod
->iscsi_prio
= -1;
3716 bna_tx_mod_uninit(struct bna_tx_mod
*tx_mod
)
3718 struct list_head
*qe
;
3722 list_for_each(qe
, &tx_mod
->tx_free_q
)
3726 list_for_each(qe
, &tx_mod
->txq_free_q
)
3733 bna_tx_mod_start(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
3736 struct list_head
*qe
;
3738 tx_mod
->flags
|= BNA_TX_MOD_F_ENET_STARTED
;
3739 if (type
== BNA_TX_T_LOOPBACK
)
3740 tx_mod
->flags
|= BNA_TX_MOD_F_ENET_LOOPBACK
;
3742 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3743 tx
= (struct bna_tx
*)qe
;
3744 if (tx
->type
== type
)
3750 bna_tx_mod_stop(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
3753 struct list_head
*qe
;
3755 tx_mod
->flags
&= ~BNA_TX_MOD_F_ENET_STARTED
;
3756 tx_mod
->flags
&= ~BNA_TX_MOD_F_ENET_LOOPBACK
;
3758 tx_mod
->stop_cbfn
= bna_enet_cb_tx_stopped
;
3760 bfa_wc_init(&tx_mod
->tx_stop_wc
, bna_tx_mod_cb_tx_stopped_all
, tx_mod
);
3762 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3763 tx
= (struct bna_tx
*)qe
;
3764 if (tx
->type
== type
) {
3765 bfa_wc_up(&tx_mod
->tx_stop_wc
);
3770 bfa_wc_wait(&tx_mod
->tx_stop_wc
);
3774 bna_tx_mod_fail(struct bna_tx_mod
*tx_mod
)
3777 struct list_head
*qe
;
3779 tx_mod
->flags
&= ~BNA_TX_MOD_F_ENET_STARTED
;
3780 tx_mod
->flags
&= ~BNA_TX_MOD_F_ENET_LOOPBACK
;
3782 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3783 tx
= (struct bna_tx
*)qe
;
3789 bna_tx_coalescing_timeo_set(struct bna_tx
*tx
, int coalescing_timeo
)
3791 struct bna_txq
*txq
;
3792 struct list_head
*qe
;
3794 list_for_each(qe
, &tx
->txq_q
) {
3795 txq
= (struct bna_txq
*)qe
;
3796 bna_ib_coalescing_timeo_set(&txq
->ib
, coalescing_timeo
);