2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
23 bna_ib_coalescing_timeo_set(struct bna_ib
*ib
, u8 coalescing_timeo
)
25 ib
->coalescing_timeo
= coalescing_timeo
;
26 ib
->door_bell
.doorbell_ack
= BNA_DOORBELL_IB_INT_ACK(
27 (u32
)ib
->coalescing_timeo
, 0);
32 #define bna_rxf_vlan_cfg_soft_reset(rxf) \
34 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
35 (rxf)->vlan_strip_pending = true; \
38 #define bna_rxf_rss_cfg_soft_reset(rxf) \
40 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
41 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
42 BNA_RSS_F_CFG_PENDING | \
43 BNA_RSS_F_STATUS_PENDING); \
46 static int bna_rxf_cfg_apply(struct bna_rxf
*rxf
);
47 static void bna_rxf_cfg_reset(struct bna_rxf
*rxf
);
48 static int bna_rxf_fltr_clear(struct bna_rxf
*rxf
);
49 static int bna_rxf_ucast_cfg_apply(struct bna_rxf
*rxf
);
50 static int bna_rxf_promisc_cfg_apply(struct bna_rxf
*rxf
);
51 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf
*rxf
);
52 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf
*rxf
);
53 static int bna_rxf_ucast_cfg_reset(struct bna_rxf
*rxf
,
54 enum bna_cleanup_type cleanup
);
55 static int bna_rxf_promisc_cfg_reset(struct bna_rxf
*rxf
,
56 enum bna_cleanup_type cleanup
);
57 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf
*rxf
,
58 enum bna_cleanup_type cleanup
);
60 bfa_fsm_state_decl(bna_rxf
, stopped
, struct bna_rxf
,
62 bfa_fsm_state_decl(bna_rxf
, paused
, struct bna_rxf
,
64 bfa_fsm_state_decl(bna_rxf
, cfg_wait
, struct bna_rxf
,
66 bfa_fsm_state_decl(bna_rxf
, started
, struct bna_rxf
,
68 bfa_fsm_state_decl(bna_rxf
, fltr_clr_wait
, struct bna_rxf
,
70 bfa_fsm_state_decl(bna_rxf
, last_resp_wait
, struct bna_rxf
,
74 bna_rxf_sm_stopped_entry(struct bna_rxf
*rxf
)
76 call_rxf_stop_cbfn(rxf
);
80 bna_rxf_sm_stopped(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
84 if (rxf
->flags
& BNA_RXF_F_PAUSED
) {
85 bfa_fsm_set_state(rxf
, bna_rxf_sm_paused
);
86 call_rxf_start_cbfn(rxf
);
88 bfa_fsm_set_state(rxf
, bna_rxf_sm_cfg_wait
);
92 call_rxf_stop_cbfn(rxf
);
100 call_rxf_cam_fltr_cbfn(rxf
);
104 rxf
->flags
|= BNA_RXF_F_PAUSED
;
105 call_rxf_pause_cbfn(rxf
);
109 rxf
->flags
&= ~BNA_RXF_F_PAUSED
;
110 call_rxf_resume_cbfn(rxf
);
119 bna_rxf_sm_paused_entry(struct bna_rxf
*rxf
)
121 call_rxf_pause_cbfn(rxf
);
125 bna_rxf_sm_paused(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
130 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
134 call_rxf_cam_fltr_cbfn(rxf
);
138 rxf
->flags
&= ~BNA_RXF_F_PAUSED
;
139 bfa_fsm_set_state(rxf
, bna_rxf_sm_cfg_wait
);
148 bna_rxf_sm_cfg_wait_entry(struct bna_rxf
*rxf
)
150 if (!bna_rxf_cfg_apply(rxf
)) {
151 /* No more pending config updates */
152 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
157 bna_rxf_sm_cfg_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
161 bfa_fsm_set_state(rxf
, bna_rxf_sm_last_resp_wait
);
165 bna_rxf_cfg_reset(rxf
);
166 call_rxf_start_cbfn(rxf
);
167 call_rxf_cam_fltr_cbfn(rxf
);
168 call_rxf_resume_cbfn(rxf
);
169 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
177 rxf
->flags
|= BNA_RXF_F_PAUSED
;
178 call_rxf_start_cbfn(rxf
);
179 bfa_fsm_set_state(rxf
, bna_rxf_sm_fltr_clr_wait
);
183 if (!bna_rxf_cfg_apply(rxf
)) {
184 /* No more pending config updates */
185 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
195 bna_rxf_sm_started_entry(struct bna_rxf
*rxf
)
197 call_rxf_start_cbfn(rxf
);
198 call_rxf_cam_fltr_cbfn(rxf
);
199 call_rxf_resume_cbfn(rxf
);
203 bna_rxf_sm_started(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
208 bna_rxf_cfg_reset(rxf
);
209 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
213 bfa_fsm_set_state(rxf
, bna_rxf_sm_cfg_wait
);
217 rxf
->flags
|= BNA_RXF_F_PAUSED
;
218 if (!bna_rxf_fltr_clear(rxf
))
219 bfa_fsm_set_state(rxf
, bna_rxf_sm_paused
);
221 bfa_fsm_set_state(rxf
, bna_rxf_sm_fltr_clr_wait
);
230 bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf
*rxf
)
235 bna_rxf_sm_fltr_clr_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
239 bna_rxf_cfg_reset(rxf
);
240 call_rxf_pause_cbfn(rxf
);
241 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
245 if (!bna_rxf_fltr_clear(rxf
)) {
246 /* No more pending CAM entries to clear */
247 bfa_fsm_set_state(rxf
, bna_rxf_sm_paused
);
257 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf
*rxf
)
262 bna_rxf_sm_last_resp_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
267 bna_rxf_cfg_reset(rxf
);
268 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
277 bna_bfi_ucast_req(struct bna_rxf
*rxf
, struct bna_mac
*mac
,
278 enum bfi_enet_h2i_msgs req_type
)
280 struct bfi_enet_ucast_req
*req
= &rxf
->bfi_enet_cmd
.ucast_req
;
282 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
, req_type
, 0, rxf
->rx
->rid
);
283 req
->mh
.num_entries
= htons(
284 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req
)));
285 memcpy(&req
->mac_addr
, &mac
->addr
, sizeof(mac_t
));
286 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
287 sizeof(struct bfi_enet_ucast_req
), &req
->mh
);
288 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
292 bna_bfi_mcast_add_req(struct bna_rxf
*rxf
, struct bna_mac
*mac
)
294 struct bfi_enet_mcast_add_req
*req
=
295 &rxf
->bfi_enet_cmd
.mcast_add_req
;
297 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
, BFI_ENET_H2I_MAC_MCAST_ADD_REQ
,
299 req
->mh
.num_entries
= htons(
300 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req
)));
301 memcpy(&req
->mac_addr
, &mac
->addr
, sizeof(mac_t
));
302 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
303 sizeof(struct bfi_enet_mcast_add_req
), &req
->mh
);
304 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
308 bna_bfi_mcast_del_req(struct bna_rxf
*rxf
, u16 handle
)
310 struct bfi_enet_mcast_del_req
*req
=
311 &rxf
->bfi_enet_cmd
.mcast_del_req
;
313 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
, BFI_ENET_H2I_MAC_MCAST_DEL_REQ
,
315 req
->mh
.num_entries
= htons(
316 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req
)));
317 req
->handle
= htons(handle
);
318 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
319 sizeof(struct bfi_enet_mcast_del_req
), &req
->mh
);
320 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
324 bna_bfi_mcast_filter_req(struct bna_rxf
*rxf
, enum bna_status status
)
326 struct bfi_enet_enable_req
*req
= &rxf
->bfi_enet_cmd
.req
;
328 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
329 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ
, 0, rxf
->rx
->rid
);
330 req
->mh
.num_entries
= htons(
331 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
332 req
->enable
= status
;
333 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
334 sizeof(struct bfi_enet_enable_req
), &req
->mh
);
335 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
339 bna_bfi_rx_promisc_req(struct bna_rxf
*rxf
, enum bna_status status
)
341 struct bfi_enet_enable_req
*req
= &rxf
->bfi_enet_cmd
.req
;
343 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
344 BFI_ENET_H2I_RX_PROMISCUOUS_REQ
, 0, rxf
->rx
->rid
);
345 req
->mh
.num_entries
= htons(
346 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
347 req
->enable
= status
;
348 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
349 sizeof(struct bfi_enet_enable_req
), &req
->mh
);
350 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
354 bna_bfi_rx_vlan_filter_set(struct bna_rxf
*rxf
, u8 block_idx
)
356 struct bfi_enet_rx_vlan_req
*req
= &rxf
->bfi_enet_cmd
.vlan_req
;
360 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
361 BFI_ENET_H2I_RX_VLAN_SET_REQ
, 0, rxf
->rx
->rid
);
362 req
->mh
.num_entries
= htons(
363 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req
)));
364 req
->block_idx
= block_idx
;
365 for (i
= 0; i
< (BFI_ENET_VLAN_BLOCK_SIZE
/ 32); i
++) {
366 j
= (block_idx
* (BFI_ENET_VLAN_BLOCK_SIZE
/ 32)) + i
;
367 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
)
369 htonl(rxf
->vlan_filter_table
[j
]);
371 req
->bit_mask
[i
] = 0xFFFFFFFF;
373 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
374 sizeof(struct bfi_enet_rx_vlan_req
), &req
->mh
);
375 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
379 bna_bfi_vlan_strip_enable(struct bna_rxf
*rxf
)
381 struct bfi_enet_enable_req
*req
= &rxf
->bfi_enet_cmd
.req
;
383 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
384 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ
, 0, rxf
->rx
->rid
);
385 req
->mh
.num_entries
= htons(
386 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
387 req
->enable
= rxf
->vlan_strip_status
;
388 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
389 sizeof(struct bfi_enet_enable_req
), &req
->mh
);
390 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
394 bna_bfi_rit_cfg(struct bna_rxf
*rxf
)
396 struct bfi_enet_rit_req
*req
= &rxf
->bfi_enet_cmd
.rit_req
;
398 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
399 BFI_ENET_H2I_RIT_CFG_REQ
, 0, rxf
->rx
->rid
);
400 req
->mh
.num_entries
= htons(
401 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req
)));
402 req
->size
= htons(rxf
->rit_size
);
403 memcpy(&req
->table
[0], rxf
->rit
, rxf
->rit_size
);
404 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
405 sizeof(struct bfi_enet_rit_req
), &req
->mh
);
406 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
410 bna_bfi_rss_cfg(struct bna_rxf
*rxf
)
412 struct bfi_enet_rss_cfg_req
*req
= &rxf
->bfi_enet_cmd
.rss_req
;
415 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
416 BFI_ENET_H2I_RSS_CFG_REQ
, 0, rxf
->rx
->rid
);
417 req
->mh
.num_entries
= htons(
418 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req
)));
419 req
->cfg
.type
= rxf
->rss_cfg
.hash_type
;
420 req
->cfg
.mask
= rxf
->rss_cfg
.hash_mask
;
421 for (i
= 0; i
< BFI_ENET_RSS_KEY_LEN
; i
++)
423 htonl(rxf
->rss_cfg
.toeplitz_hash_key
[i
]);
424 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
425 sizeof(struct bfi_enet_rss_cfg_req
), &req
->mh
);
426 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
430 bna_bfi_rss_enable(struct bna_rxf
*rxf
)
432 struct bfi_enet_enable_req
*req
= &rxf
->bfi_enet_cmd
.req
;
434 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
435 BFI_ENET_H2I_RSS_ENABLE_REQ
, 0, rxf
->rx
->rid
);
436 req
->mh
.num_entries
= htons(
437 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
438 req
->enable
= rxf
->rss_status
;
439 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
440 sizeof(struct bfi_enet_enable_req
), &req
->mh
);
441 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
444 /* This function gets the multicast MAC that has already been added to CAM */
445 static struct bna_mac
*
446 bna_rxf_mcmac_get(struct bna_rxf
*rxf
, u8
*mac_addr
)
449 struct list_head
*qe
;
451 list_for_each(qe
, &rxf
->mcast_active_q
) {
452 mac
= (struct bna_mac
*)qe
;
453 if (BNA_MAC_IS_EQUAL(&mac
->addr
, mac_addr
))
457 list_for_each(qe
, &rxf
->mcast_pending_del_q
) {
458 mac
= (struct bna_mac
*)qe
;
459 if (BNA_MAC_IS_EQUAL(&mac
->addr
, mac_addr
))
466 static struct bna_mcam_handle
*
467 bna_rxf_mchandle_get(struct bna_rxf
*rxf
, int handle
)
469 struct bna_mcam_handle
*mchandle
;
470 struct list_head
*qe
;
472 list_for_each(qe
, &rxf
->mcast_handle_q
) {
473 mchandle
= (struct bna_mcam_handle
*)qe
;
474 if (mchandle
->handle
== handle
)
482 bna_rxf_mchandle_attach(struct bna_rxf
*rxf
, u8
*mac_addr
, int handle
)
484 struct bna_mac
*mcmac
;
485 struct bna_mcam_handle
*mchandle
;
487 mcmac
= bna_rxf_mcmac_get(rxf
, mac_addr
);
488 mchandle
= bna_rxf_mchandle_get(rxf
, handle
);
489 if (mchandle
== NULL
) {
490 mchandle
= bna_mcam_mod_handle_get(&rxf
->rx
->bna
->mcam_mod
);
491 mchandle
->handle
= handle
;
492 mchandle
->refcnt
= 0;
493 list_add_tail(&mchandle
->qe
, &rxf
->mcast_handle_q
);
496 mcmac
->handle
= mchandle
;
500 bna_rxf_mcast_del(struct bna_rxf
*rxf
, struct bna_mac
*mac
,
501 enum bna_cleanup_type cleanup
)
503 struct bna_mcam_handle
*mchandle
;
506 mchandle
= mac
->handle
;
507 if (mchandle
== NULL
)
511 if (mchandle
->refcnt
== 0) {
512 if (cleanup
== BNA_HARD_CLEANUP
) {
513 bna_bfi_mcast_del_req(rxf
, mchandle
->handle
);
516 list_del(&mchandle
->qe
);
517 bfa_q_qe_init(&mchandle
->qe
);
518 bna_mcam_mod_handle_put(&rxf
->rx
->bna
->mcam_mod
, mchandle
);
526 bna_rxf_mcast_cfg_apply(struct bna_rxf
*rxf
)
528 struct bna_mac
*mac
= NULL
;
529 struct list_head
*qe
;
532 /* Delete multicast entries previousely added */
533 while (!list_empty(&rxf
->mcast_pending_del_q
)) {
534 bfa_q_deq(&rxf
->mcast_pending_del_q
, &qe
);
536 mac
= (struct bna_mac
*)qe
;
537 ret
= bna_rxf_mcast_del(rxf
, mac
, BNA_HARD_CLEANUP
);
538 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
543 /* Add multicast entries */
544 if (!list_empty(&rxf
->mcast_pending_add_q
)) {
545 bfa_q_deq(&rxf
->mcast_pending_add_q
, &qe
);
547 mac
= (struct bna_mac
*)qe
;
548 list_add_tail(&mac
->qe
, &rxf
->mcast_active_q
);
549 bna_bfi_mcast_add_req(rxf
, mac
);
557 bna_rxf_vlan_cfg_apply(struct bna_rxf
*rxf
)
559 u8 vlan_pending_bitmask
;
562 if (rxf
->vlan_pending_bitmask
) {
563 vlan_pending_bitmask
= rxf
->vlan_pending_bitmask
;
564 while (!(vlan_pending_bitmask
& 0x1)) {
566 vlan_pending_bitmask
>>= 1;
568 rxf
->vlan_pending_bitmask
&= ~(1 << block_idx
);
569 bna_bfi_rx_vlan_filter_set(rxf
, block_idx
);
577 bna_rxf_mcast_cfg_reset(struct bna_rxf
*rxf
, enum bna_cleanup_type cleanup
)
579 struct list_head
*qe
;
583 /* Throw away delete pending mcast entries */
584 while (!list_empty(&rxf
->mcast_pending_del_q
)) {
585 bfa_q_deq(&rxf
->mcast_pending_del_q
, &qe
);
587 mac
= (struct bna_mac
*)qe
;
588 ret
= bna_rxf_mcast_del(rxf
, mac
, cleanup
);
589 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
594 /* Move active mcast entries to pending_add_q */
595 while (!list_empty(&rxf
->mcast_active_q
)) {
596 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
598 list_add_tail(qe
, &rxf
->mcast_pending_add_q
);
599 mac
= (struct bna_mac
*)qe
;
600 if (bna_rxf_mcast_del(rxf
, mac
, cleanup
))
608 bna_rxf_rss_cfg_apply(struct bna_rxf
*rxf
)
610 if (rxf
->rss_pending
) {
611 if (rxf
->rss_pending
& BNA_RSS_F_RIT_PENDING
) {
612 rxf
->rss_pending
&= ~BNA_RSS_F_RIT_PENDING
;
613 bna_bfi_rit_cfg(rxf
);
617 if (rxf
->rss_pending
& BNA_RSS_F_CFG_PENDING
) {
618 rxf
->rss_pending
&= ~BNA_RSS_F_CFG_PENDING
;
619 bna_bfi_rss_cfg(rxf
);
623 if (rxf
->rss_pending
& BNA_RSS_F_STATUS_PENDING
) {
624 rxf
->rss_pending
&= ~BNA_RSS_F_STATUS_PENDING
;
625 bna_bfi_rss_enable(rxf
);
634 bna_rxf_cfg_apply(struct bna_rxf
*rxf
)
636 if (bna_rxf_ucast_cfg_apply(rxf
))
639 if (bna_rxf_mcast_cfg_apply(rxf
))
642 if (bna_rxf_promisc_cfg_apply(rxf
))
645 if (bna_rxf_allmulti_cfg_apply(rxf
))
648 if (bna_rxf_vlan_cfg_apply(rxf
))
651 if (bna_rxf_vlan_strip_cfg_apply(rxf
))
654 if (bna_rxf_rss_cfg_apply(rxf
))
660 /* Only software reset */
662 bna_rxf_fltr_clear(struct bna_rxf
*rxf
)
664 if (bna_rxf_ucast_cfg_reset(rxf
, BNA_HARD_CLEANUP
))
667 if (bna_rxf_mcast_cfg_reset(rxf
, BNA_HARD_CLEANUP
))
670 if (bna_rxf_promisc_cfg_reset(rxf
, BNA_HARD_CLEANUP
))
673 if (bna_rxf_allmulti_cfg_reset(rxf
, BNA_HARD_CLEANUP
))
680 bna_rxf_cfg_reset(struct bna_rxf
*rxf
)
682 bna_rxf_ucast_cfg_reset(rxf
, BNA_SOFT_CLEANUP
);
683 bna_rxf_mcast_cfg_reset(rxf
, BNA_SOFT_CLEANUP
);
684 bna_rxf_promisc_cfg_reset(rxf
, BNA_SOFT_CLEANUP
);
685 bna_rxf_allmulti_cfg_reset(rxf
, BNA_SOFT_CLEANUP
);
686 bna_rxf_vlan_cfg_soft_reset(rxf
);
687 bna_rxf_rss_cfg_soft_reset(rxf
);
691 bna_rit_init(struct bna_rxf
*rxf
, int rit_size
)
693 struct bna_rx
*rx
= rxf
->rx
;
695 struct list_head
*qe
;
698 rxf
->rit_size
= rit_size
;
699 list_for_each(qe
, &rx
->rxp_q
) {
700 rxp
= (struct bna_rxp
*)qe
;
701 rxf
->rit
[offset
] = rxp
->cq
.ccb
->id
;
708 bna_bfi_rxf_cfg_rsp(struct bna_rxf
*rxf
, struct bfi_msgq_mhdr
*msghdr
)
710 bfa_fsm_send_event(rxf
, RXF_E_FW_RESP
);
714 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf
*rxf
,
715 struct bfi_msgq_mhdr
*msghdr
)
717 struct bfi_enet_mcast_add_req
*req
=
718 &rxf
->bfi_enet_cmd
.mcast_add_req
;
719 struct bfi_enet_mcast_add_rsp
*rsp
=
720 (struct bfi_enet_mcast_add_rsp
*)msghdr
;
722 bna_rxf_mchandle_attach(rxf
, (u8
*)&req
->mac_addr
,
724 bfa_fsm_send_event(rxf
, RXF_E_FW_RESP
);
728 bna_rxf_init(struct bna_rxf
*rxf
,
730 struct bna_rx_config
*q_config
,
731 struct bna_res_info
*res_info
)
735 INIT_LIST_HEAD(&rxf
->ucast_pending_add_q
);
736 INIT_LIST_HEAD(&rxf
->ucast_pending_del_q
);
737 rxf
->ucast_pending_set
= 0;
738 rxf
->ucast_active_set
= 0;
739 INIT_LIST_HEAD(&rxf
->ucast_active_q
);
740 rxf
->ucast_pending_mac
= NULL
;
742 INIT_LIST_HEAD(&rxf
->mcast_pending_add_q
);
743 INIT_LIST_HEAD(&rxf
->mcast_pending_del_q
);
744 INIT_LIST_HEAD(&rxf
->mcast_active_q
);
745 INIT_LIST_HEAD(&rxf
->mcast_handle_q
);
747 if (q_config
->paused
)
748 rxf
->flags
|= BNA_RXF_F_PAUSED
;
751 res_info
[BNA_RX_RES_MEM_T_RIT
].res_u
.mem_info
.mdl
[0].kva
;
752 bna_rit_init(rxf
, q_config
->num_paths
);
754 rxf
->rss_status
= q_config
->rss_status
;
755 if (rxf
->rss_status
== BNA_STATUS_T_ENABLED
) {
756 rxf
->rss_cfg
= q_config
->rss_config
;
757 rxf
->rss_pending
|= BNA_RSS_F_CFG_PENDING
;
758 rxf
->rss_pending
|= BNA_RSS_F_RIT_PENDING
;
759 rxf
->rss_pending
|= BNA_RSS_F_STATUS_PENDING
;
762 rxf
->vlan_filter_status
= BNA_STATUS_T_DISABLED
;
763 memset(rxf
->vlan_filter_table
, 0,
764 (sizeof(u32
) * (BFI_ENET_VLAN_ID_MAX
/ 32)));
765 rxf
->vlan_filter_table
[0] |= 1; /* for pure priority tagged frames */
766 rxf
->vlan_pending_bitmask
= (u8
)BFI_VLAN_BMASK_ALL
;
768 rxf
->vlan_strip_status
= q_config
->vlan_strip_status
;
770 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
774 bna_rxf_uninit(struct bna_rxf
*rxf
)
778 rxf
->ucast_pending_set
= 0;
779 rxf
->ucast_active_set
= 0;
781 while (!list_empty(&rxf
->ucast_pending_add_q
)) {
782 bfa_q_deq(&rxf
->ucast_pending_add_q
, &mac
);
783 bfa_q_qe_init(&mac
->qe
);
784 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
787 if (rxf
->ucast_pending_mac
) {
788 bfa_q_qe_init(&rxf
->ucast_pending_mac
->qe
);
789 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
,
790 rxf
->ucast_pending_mac
);
791 rxf
->ucast_pending_mac
= NULL
;
794 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
795 bfa_q_deq(&rxf
->mcast_pending_add_q
, &mac
);
796 bfa_q_qe_init(&mac
->qe
);
797 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
800 rxf
->rxmode_pending
= 0;
801 rxf
->rxmode_pending_bitmask
= 0;
802 if (rxf
->rx
->bna
->promisc_rid
== rxf
->rx
->rid
)
803 rxf
->rx
->bna
->promisc_rid
= BFI_INVALID_RID
;
804 if (rxf
->rx
->bna
->default_mode_rid
== rxf
->rx
->rid
)
805 rxf
->rx
->bna
->default_mode_rid
= BFI_INVALID_RID
;
807 rxf
->rss_pending
= 0;
808 rxf
->vlan_strip_pending
= false;
816 bna_rx_cb_rxf_started(struct bna_rx
*rx
)
818 bfa_fsm_send_event(rx
, RX_E_RXF_STARTED
);
822 bna_rxf_start(struct bna_rxf
*rxf
)
824 rxf
->start_cbfn
= bna_rx_cb_rxf_started
;
825 rxf
->start_cbarg
= rxf
->rx
;
826 bfa_fsm_send_event(rxf
, RXF_E_START
);
830 bna_rx_cb_rxf_stopped(struct bna_rx
*rx
)
832 bfa_fsm_send_event(rx
, RX_E_RXF_STOPPED
);
836 bna_rxf_stop(struct bna_rxf
*rxf
)
838 rxf
->stop_cbfn
= bna_rx_cb_rxf_stopped
;
839 rxf
->stop_cbarg
= rxf
->rx
;
840 bfa_fsm_send_event(rxf
, RXF_E_STOP
);
844 bna_rxf_fail(struct bna_rxf
*rxf
)
846 bfa_fsm_send_event(rxf
, RXF_E_FAIL
);
850 bna_rx_ucast_set(struct bna_rx
*rx
, u8
*ucmac
,
851 void (*cbfn
)(struct bnad
*, struct bna_rx
*))
853 struct bna_rxf
*rxf
= &rx
->rxf
;
855 if (rxf
->ucast_pending_mac
== NULL
) {
856 rxf
->ucast_pending_mac
=
857 bna_ucam_mod_mac_get(&rxf
->rx
->bna
->ucam_mod
);
858 if (rxf
->ucast_pending_mac
== NULL
)
859 return BNA_CB_UCAST_CAM_FULL
;
860 bfa_q_qe_init(&rxf
->ucast_pending_mac
->qe
);
863 memcpy(rxf
->ucast_pending_mac
->addr
, ucmac
, ETH_ALEN
);
864 rxf
->ucast_pending_set
= 1;
865 rxf
->cam_fltr_cbfn
= cbfn
;
866 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
868 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
870 return BNA_CB_SUCCESS
;
874 bna_rx_mcast_add(struct bna_rx
*rx
, u8
*addr
,
875 void (*cbfn
)(struct bnad
*, struct bna_rx
*))
877 struct bna_rxf
*rxf
= &rx
->rxf
;
880 /* Check if already added or pending addition */
881 if (bna_mac_find(&rxf
->mcast_active_q
, addr
) ||
882 bna_mac_find(&rxf
->mcast_pending_add_q
, addr
)) {
884 cbfn(rx
->bna
->bnad
, rx
);
885 return BNA_CB_SUCCESS
;
888 mac
= bna_mcam_mod_mac_get(&rxf
->rx
->bna
->mcam_mod
);
890 return BNA_CB_MCAST_LIST_FULL
;
891 bfa_q_qe_init(&mac
->qe
);
892 memcpy(mac
->addr
, addr
, ETH_ALEN
);
893 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
895 rxf
->cam_fltr_cbfn
= cbfn
;
896 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
898 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
900 return BNA_CB_SUCCESS
;
904 bna_rx_mcast_listset(struct bna_rx
*rx
, int count
, u8
*mclist
,
905 void (*cbfn
)(struct bnad
*, struct bna_rx
*))
907 struct bna_rxf
*rxf
= &rx
->rxf
;
908 struct list_head list_head
;
909 struct list_head
*qe
;
915 INIT_LIST_HEAD(&list_head
);
916 for (i
= 0, mcaddr
= mclist
; i
< count
; i
++) {
917 mac
= bna_mcam_mod_mac_get(&rxf
->rx
->bna
->mcam_mod
);
920 bfa_q_qe_init(&mac
->qe
);
921 memcpy(mac
->addr
, mcaddr
, ETH_ALEN
);
922 list_add_tail(&mac
->qe
, &list_head
);
927 /* Purge the pending_add_q */
928 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
929 bfa_q_deq(&rxf
->mcast_pending_add_q
, &qe
);
931 mac
= (struct bna_mac
*)qe
;
932 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
935 /* Schedule active_q entries for deletion */
936 while (!list_empty(&rxf
->mcast_active_q
)) {
937 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
938 mac
= (struct bna_mac
*)qe
;
939 bfa_q_qe_init(&mac
->qe
);
940 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_del_q
);
943 /* Add the new entries */
944 while (!list_empty(&list_head
)) {
945 bfa_q_deq(&list_head
, &qe
);
946 mac
= (struct bna_mac
*)qe
;
947 bfa_q_qe_init(&mac
->qe
);
948 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
951 rxf
->cam_fltr_cbfn
= cbfn
;
952 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
953 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
955 return BNA_CB_SUCCESS
;
958 while (!list_empty(&list_head
)) {
959 bfa_q_deq(&list_head
, &qe
);
960 mac
= (struct bna_mac
*)qe
;
961 bfa_q_qe_init(&mac
->qe
);
962 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
965 return BNA_CB_MCAST_LIST_FULL
;
969 bna_rx_vlan_add(struct bna_rx
*rx
, int vlan_id
)
971 struct bna_rxf
*rxf
= &rx
->rxf
;
972 int index
= (vlan_id
>> BFI_VLAN_WORD_SHIFT
);
973 int bit
= (1 << (vlan_id
& BFI_VLAN_WORD_MASK
));
974 int group_id
= (vlan_id
>> BFI_VLAN_BLOCK_SHIFT
);
976 rxf
->vlan_filter_table
[index
] |= bit
;
977 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
) {
978 rxf
->vlan_pending_bitmask
|= (1 << group_id
);
979 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
984 bna_rx_vlan_del(struct bna_rx
*rx
, int vlan_id
)
986 struct bna_rxf
*rxf
= &rx
->rxf
;
987 int index
= (vlan_id
>> BFI_VLAN_WORD_SHIFT
);
988 int bit
= (1 << (vlan_id
& BFI_VLAN_WORD_MASK
));
989 int group_id
= (vlan_id
>> BFI_VLAN_BLOCK_SHIFT
);
991 rxf
->vlan_filter_table
[index
] &= ~bit
;
992 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
) {
993 rxf
->vlan_pending_bitmask
|= (1 << group_id
);
994 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
999 bna_rxf_ucast_cfg_apply(struct bna_rxf
*rxf
)
1001 struct bna_mac
*mac
= NULL
;
1002 struct list_head
*qe
;
1004 /* Delete MAC addresses previousely added */
1005 if (!list_empty(&rxf
->ucast_pending_del_q
)) {
1006 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
1008 mac
= (struct bna_mac
*)qe
;
1009 bna_bfi_ucast_req(rxf
, mac
, BFI_ENET_H2I_MAC_UCAST_DEL_REQ
);
1010 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
1014 /* Set default unicast MAC */
1015 if (rxf
->ucast_pending_set
) {
1016 rxf
->ucast_pending_set
= 0;
1017 memcpy(rxf
->ucast_active_mac
.addr
,
1018 rxf
->ucast_pending_mac
->addr
, ETH_ALEN
);
1019 rxf
->ucast_active_set
= 1;
1020 bna_bfi_ucast_req(rxf
, &rxf
->ucast_active_mac
,
1021 BFI_ENET_H2I_MAC_UCAST_SET_REQ
);
1025 /* Add additional MAC entries */
1026 if (!list_empty(&rxf
->ucast_pending_add_q
)) {
1027 bfa_q_deq(&rxf
->ucast_pending_add_q
, &qe
);
1029 mac
= (struct bna_mac
*)qe
;
1030 list_add_tail(&mac
->qe
, &rxf
->ucast_active_q
);
1031 bna_bfi_ucast_req(rxf
, mac
, BFI_ENET_H2I_MAC_UCAST_ADD_REQ
);
1039 bna_rxf_ucast_cfg_reset(struct bna_rxf
*rxf
, enum bna_cleanup_type cleanup
)
1041 struct list_head
*qe
;
1042 struct bna_mac
*mac
;
1044 /* Throw away delete pending ucast entries */
1045 while (!list_empty(&rxf
->ucast_pending_del_q
)) {
1046 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
1048 mac
= (struct bna_mac
*)qe
;
1049 if (cleanup
== BNA_SOFT_CLEANUP
)
1050 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
1052 bna_bfi_ucast_req(rxf
, mac
,
1053 BFI_ENET_H2I_MAC_UCAST_DEL_REQ
);
1054 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
1059 /* Move active ucast entries to pending_add_q */
1060 while (!list_empty(&rxf
->ucast_active_q
)) {
1061 bfa_q_deq(&rxf
->ucast_active_q
, &qe
);
1063 list_add_tail(qe
, &rxf
->ucast_pending_add_q
);
1064 if (cleanup
== BNA_HARD_CLEANUP
) {
1065 mac
= (struct bna_mac
*)qe
;
1066 bna_bfi_ucast_req(rxf
, mac
,
1067 BFI_ENET_H2I_MAC_UCAST_DEL_REQ
);
1072 if (rxf
->ucast_active_set
) {
1073 rxf
->ucast_pending_set
= 1;
1074 rxf
->ucast_active_set
= 0;
1075 if (cleanup
== BNA_HARD_CLEANUP
) {
1076 bna_bfi_ucast_req(rxf
, &rxf
->ucast_active_mac
,
1077 BFI_ENET_H2I_MAC_UCAST_CLR_REQ
);
1086 bna_rxf_promisc_cfg_apply(struct bna_rxf
*rxf
)
1088 struct bna
*bna
= rxf
->rx
->bna
;
1090 /* Enable/disable promiscuous mode */
1091 if (is_promisc_enable(rxf
->rxmode_pending
,
1092 rxf
->rxmode_pending_bitmask
)) {
1093 /* move promisc configuration from pending -> active */
1094 promisc_inactive(rxf
->rxmode_pending
,
1095 rxf
->rxmode_pending_bitmask
);
1096 rxf
->rxmode_active
|= BNA_RXMODE_PROMISC
;
1097 bna_bfi_rx_promisc_req(rxf
, BNA_STATUS_T_ENABLED
);
1099 } else if (is_promisc_disable(rxf
->rxmode_pending
,
1100 rxf
->rxmode_pending_bitmask
)) {
1101 /* move promisc configuration from pending -> active */
1102 promisc_inactive(rxf
->rxmode_pending
,
1103 rxf
->rxmode_pending_bitmask
);
1104 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
1105 bna
->promisc_rid
= BFI_INVALID_RID
;
1106 bna_bfi_rx_promisc_req(rxf
, BNA_STATUS_T_DISABLED
);
1114 bna_rxf_promisc_cfg_reset(struct bna_rxf
*rxf
, enum bna_cleanup_type cleanup
)
1116 struct bna
*bna
= rxf
->rx
->bna
;
1118 /* Clear pending promisc mode disable */
1119 if (is_promisc_disable(rxf
->rxmode_pending
,
1120 rxf
->rxmode_pending_bitmask
)) {
1121 promisc_inactive(rxf
->rxmode_pending
,
1122 rxf
->rxmode_pending_bitmask
);
1123 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
1124 bna
->promisc_rid
= BFI_INVALID_RID
;
1125 if (cleanup
== BNA_HARD_CLEANUP
) {
1126 bna_bfi_rx_promisc_req(rxf
, BNA_STATUS_T_DISABLED
);
1131 /* Move promisc mode config from active -> pending */
1132 if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
1133 promisc_enable(rxf
->rxmode_pending
,
1134 rxf
->rxmode_pending_bitmask
);
1135 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
1136 if (cleanup
== BNA_HARD_CLEANUP
) {
1137 bna_bfi_rx_promisc_req(rxf
, BNA_STATUS_T_DISABLED
);
1146 bna_rxf_allmulti_cfg_apply(struct bna_rxf
*rxf
)
1148 /* Enable/disable allmulti mode */
1149 if (is_allmulti_enable(rxf
->rxmode_pending
,
1150 rxf
->rxmode_pending_bitmask
)) {
1151 /* move allmulti configuration from pending -> active */
1152 allmulti_inactive(rxf
->rxmode_pending
,
1153 rxf
->rxmode_pending_bitmask
);
1154 rxf
->rxmode_active
|= BNA_RXMODE_ALLMULTI
;
1155 bna_bfi_mcast_filter_req(rxf
, BNA_STATUS_T_DISABLED
);
1157 } else if (is_allmulti_disable(rxf
->rxmode_pending
,
1158 rxf
->rxmode_pending_bitmask
)) {
1159 /* move allmulti configuration from pending -> active */
1160 allmulti_inactive(rxf
->rxmode_pending
,
1161 rxf
->rxmode_pending_bitmask
);
1162 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
1163 bna_bfi_mcast_filter_req(rxf
, BNA_STATUS_T_ENABLED
);
1171 bna_rxf_allmulti_cfg_reset(struct bna_rxf
*rxf
, enum bna_cleanup_type cleanup
)
1173 /* Clear pending allmulti mode disable */
1174 if (is_allmulti_disable(rxf
->rxmode_pending
,
1175 rxf
->rxmode_pending_bitmask
)) {
1176 allmulti_inactive(rxf
->rxmode_pending
,
1177 rxf
->rxmode_pending_bitmask
);
1178 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
1179 if (cleanup
== BNA_HARD_CLEANUP
) {
1180 bna_bfi_mcast_filter_req(rxf
, BNA_STATUS_T_ENABLED
);
1185 /* Move allmulti mode config from active -> pending */
1186 if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
1187 allmulti_enable(rxf
->rxmode_pending
,
1188 rxf
->rxmode_pending_bitmask
);
1189 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
1190 if (cleanup
== BNA_HARD_CLEANUP
) {
1191 bna_bfi_mcast_filter_req(rxf
, BNA_STATUS_T_ENABLED
);
1200 bna_rxf_promisc_enable(struct bna_rxf
*rxf
)
1202 struct bna
*bna
= rxf
->rx
->bna
;
1205 if (is_promisc_enable(rxf
->rxmode_pending
,
1206 rxf
->rxmode_pending_bitmask
) ||
1207 (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
)) {
1208 /* Do nothing if pending enable or already enabled */
1209 } else if (is_promisc_disable(rxf
->rxmode_pending
,
1210 rxf
->rxmode_pending_bitmask
)) {
1211 /* Turn off pending disable command */
1212 promisc_inactive(rxf
->rxmode_pending
,
1213 rxf
->rxmode_pending_bitmask
);
1215 /* Schedule enable */
1216 promisc_enable(rxf
->rxmode_pending
,
1217 rxf
->rxmode_pending_bitmask
);
1218 bna
->promisc_rid
= rxf
->rx
->rid
;
1226 bna_rxf_promisc_disable(struct bna_rxf
*rxf
)
1228 struct bna
*bna
= rxf
->rx
->bna
;
1231 if (is_promisc_disable(rxf
->rxmode_pending
,
1232 rxf
->rxmode_pending_bitmask
) ||
1233 (!(rxf
->rxmode_active
& BNA_RXMODE_PROMISC
))) {
1234 /* Do nothing if pending disable or already disabled */
1235 } else if (is_promisc_enable(rxf
->rxmode_pending
,
1236 rxf
->rxmode_pending_bitmask
)) {
1237 /* Turn off pending enable command */
1238 promisc_inactive(rxf
->rxmode_pending
,
1239 rxf
->rxmode_pending_bitmask
);
1240 bna
->promisc_rid
= BFI_INVALID_RID
;
1241 } else if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
1242 /* Schedule disable */
1243 promisc_disable(rxf
->rxmode_pending
,
1244 rxf
->rxmode_pending_bitmask
);
1252 bna_rxf_allmulti_enable(struct bna_rxf
*rxf
)
1256 if (is_allmulti_enable(rxf
->rxmode_pending
,
1257 rxf
->rxmode_pending_bitmask
) ||
1258 (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
)) {
1259 /* Do nothing if pending enable or already enabled */
1260 } else if (is_allmulti_disable(rxf
->rxmode_pending
,
1261 rxf
->rxmode_pending_bitmask
)) {
1262 /* Turn off pending disable command */
1263 allmulti_inactive(rxf
->rxmode_pending
,
1264 rxf
->rxmode_pending_bitmask
);
1266 /* Schedule enable */
1267 allmulti_enable(rxf
->rxmode_pending
,
1268 rxf
->rxmode_pending_bitmask
);
1276 bna_rxf_allmulti_disable(struct bna_rxf
*rxf
)
1280 if (is_allmulti_disable(rxf
->rxmode_pending
,
1281 rxf
->rxmode_pending_bitmask
) ||
1282 (!(rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
))) {
1283 /* Do nothing if pending disable or already disabled */
1284 } else if (is_allmulti_enable(rxf
->rxmode_pending
,
1285 rxf
->rxmode_pending_bitmask
)) {
1286 /* Turn off pending enable command */
1287 allmulti_inactive(rxf
->rxmode_pending
,
1288 rxf
->rxmode_pending_bitmask
);
1289 } else if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
1290 /* Schedule disable */
1291 allmulti_disable(rxf
->rxmode_pending
,
1292 rxf
->rxmode_pending_bitmask
);
1300 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf
*rxf
)
1302 if (rxf
->vlan_strip_pending
) {
1303 rxf
->vlan_strip_pending
= false;
1304 bna_bfi_vlan_strip_enable(rxf
);
1313 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1314 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1316 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1317 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1319 #define call_rx_stop_cbfn(rx) \
1321 if ((rx)->stop_cbfn) { \
1322 void (*cbfn)(void *, struct bna_rx *); \
1324 cbfn = (rx)->stop_cbfn; \
1325 cbarg = (rx)->stop_cbarg; \
1326 (rx)->stop_cbfn = NULL; \
1327 (rx)->stop_cbarg = NULL; \
1332 #define call_rx_stall_cbfn(rx) \
1334 if ((rx)->rx_stall_cbfn) \
1335 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1338 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
1340 struct bna_dma_addr cur_q_addr = \
1341 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1342 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1343 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1344 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1345 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1346 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1347 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1350 static void bna_bfi_rx_enet_start(struct bna_rx
*rx
);
1351 static void bna_rx_enet_stop(struct bna_rx
*rx
);
1352 static void bna_rx_mod_cb_rx_stopped(void *arg
, struct bna_rx
*rx
);
1354 bfa_fsm_state_decl(bna_rx
, stopped
,
1355 struct bna_rx
, enum bna_rx_event
);
1356 bfa_fsm_state_decl(bna_rx
, start_wait
,
1357 struct bna_rx
, enum bna_rx_event
);
1358 bfa_fsm_state_decl(bna_rx
, rxf_start_wait
,
1359 struct bna_rx
, enum bna_rx_event
);
1360 bfa_fsm_state_decl(bna_rx
, started
,
1361 struct bna_rx
, enum bna_rx_event
);
1362 bfa_fsm_state_decl(bna_rx
, rxf_stop_wait
,
1363 struct bna_rx
, enum bna_rx_event
);
1364 bfa_fsm_state_decl(bna_rx
, stop_wait
,
1365 struct bna_rx
, enum bna_rx_event
);
1366 bfa_fsm_state_decl(bna_rx
, cleanup_wait
,
1367 struct bna_rx
, enum bna_rx_event
);
1368 bfa_fsm_state_decl(bna_rx
, failed
,
1369 struct bna_rx
, enum bna_rx_event
);
1370 bfa_fsm_state_decl(bna_rx
, quiesce_wait
,
1371 struct bna_rx
, enum bna_rx_event
);
1373 static void bna_rx_sm_stopped_entry(struct bna_rx
*rx
)
1375 call_rx_stop_cbfn(rx
);
1378 static void bna_rx_sm_stopped(struct bna_rx
*rx
,
1379 enum bna_rx_event event
)
1383 bfa_fsm_set_state(rx
, bna_rx_sm_start_wait
);
1387 call_rx_stop_cbfn(rx
);
1395 bfa_sm_fault(event
);
1400 static void bna_rx_sm_start_wait_entry(struct bna_rx
*rx
)
1402 bna_bfi_rx_enet_start(rx
);
1406 bna_rx_sm_stop_wait_entry(struct bna_rx
*rx
)
1411 bna_rx_sm_stop_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1416 bfa_fsm_set_state(rx
, bna_rx_sm_cleanup_wait
);
1417 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rx
);
1421 bna_rx_enet_stop(rx
);
1425 bfa_sm_fault(event
);
1430 static void bna_rx_sm_start_wait(struct bna_rx
*rx
,
1431 enum bna_rx_event event
)
1435 bfa_fsm_set_state(rx
, bna_rx_sm_stop_wait
);
1439 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1443 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_start_wait
);
1447 bfa_sm_fault(event
);
1452 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx
*rx
)
1454 rx
->rx_post_cbfn(rx
->bna
->bnad
, rx
);
1455 bna_rxf_start(&rx
->rxf
);
1459 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx
*rx
)
1464 bna_rx_sm_rxf_stop_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1468 bfa_fsm_set_state(rx
, bna_rx_sm_cleanup_wait
);
1469 bna_rxf_fail(&rx
->rxf
);
1470 call_rx_stall_cbfn(rx
);
1471 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rx
);
1474 case RX_E_RXF_STARTED
:
1475 bna_rxf_stop(&rx
->rxf
);
1478 case RX_E_RXF_STOPPED
:
1479 bfa_fsm_set_state(rx
, bna_rx_sm_stop_wait
);
1480 call_rx_stall_cbfn(rx
);
1481 bna_rx_enet_stop(rx
);
1485 bfa_sm_fault(event
);
1492 bna_rx_sm_started_entry(struct bna_rx
*rx
)
1494 struct bna_rxp
*rxp
;
1495 struct list_head
*qe_rxp
;
1496 int is_regular
= (rx
->type
== BNA_RX_T_REGULAR
);
1499 list_for_each(qe_rxp
, &rx
->rxp_q
) {
1500 rxp
= (struct bna_rxp
*)qe_rxp
;
1501 bna_ib_start(rx
->bna
, &rxp
->cq
.ib
, is_regular
);
1504 bna_ethport_cb_rx_started(&rx
->bna
->ethport
);
1508 bna_rx_sm_started(struct bna_rx
*rx
, enum bna_rx_event event
)
1512 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_stop_wait
);
1513 bna_ethport_cb_rx_stopped(&rx
->bna
->ethport
);
1514 bna_rxf_stop(&rx
->rxf
);
1518 bfa_fsm_set_state(rx
, bna_rx_sm_failed
);
1519 bna_ethport_cb_rx_stopped(&rx
->bna
->ethport
);
1520 bna_rxf_fail(&rx
->rxf
);
1521 call_rx_stall_cbfn(rx
);
1522 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rx
);
1526 bfa_sm_fault(event
);
1531 static void bna_rx_sm_rxf_start_wait(struct bna_rx
*rx
,
1532 enum bna_rx_event event
)
1536 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_stop_wait
);
1540 bfa_fsm_set_state(rx
, bna_rx_sm_failed
);
1541 bna_rxf_fail(&rx
->rxf
);
1542 call_rx_stall_cbfn(rx
);
1543 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rx
);
1546 case RX_E_RXF_STARTED
:
1547 bfa_fsm_set_state(rx
, bna_rx_sm_started
);
1551 bfa_sm_fault(event
);
1557 bna_rx_sm_cleanup_wait_entry(struct bna_rx
*rx
)
1562 bna_rx_sm_cleanup_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1566 case RX_E_RXF_STOPPED
:
1570 case RX_E_CLEANUP_DONE
:
1571 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1575 bfa_sm_fault(event
);
1581 bna_rx_sm_failed_entry(struct bna_rx
*rx
)
1586 bna_rx_sm_failed(struct bna_rx
*rx
, enum bna_rx_event event
)
1590 bfa_fsm_set_state(rx
, bna_rx_sm_quiesce_wait
);
1594 bfa_fsm_set_state(rx
, bna_rx_sm_cleanup_wait
);
1598 case RX_E_RXF_STARTED
:
1599 case RX_E_RXF_STOPPED
:
1603 case RX_E_CLEANUP_DONE
:
1604 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1608 bfa_sm_fault(event
);
1613 bna_rx_sm_quiesce_wait_entry(struct bna_rx
*rx
)
1618 bna_rx_sm_quiesce_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1622 bfa_fsm_set_state(rx
, bna_rx_sm_cleanup_wait
);
1626 bfa_fsm_set_state(rx
, bna_rx_sm_failed
);
1629 case RX_E_CLEANUP_DONE
:
1630 bfa_fsm_set_state(rx
, bna_rx_sm_start_wait
);
1634 bfa_sm_fault(event
);
1640 bna_bfi_rx_enet_start(struct bna_rx
*rx
)
1642 struct bfi_enet_rx_cfg_req
*cfg_req
= &rx
->bfi_enet_cmd
.cfg_req
;
1643 struct bna_rxp
*rxp
= NULL
;
1644 struct bna_rxq
*q0
= NULL
, *q1
= NULL
;
1645 struct list_head
*rxp_qe
;
1648 bfi_msgq_mhdr_set(cfg_req
->mh
, BFI_MC_ENET
,
1649 BFI_ENET_H2I_RX_CFG_SET_REQ
, 0, rx
->rid
);
1650 cfg_req
->mh
.num_entries
= htons(
1651 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req
)));
1653 cfg_req
->num_queue_sets
= rx
->num_paths
;
1654 for (i
= 0, rxp_qe
= bfa_q_first(&rx
->rxp_q
);
1656 i
++, rxp_qe
= bfa_q_next(rxp_qe
)) {
1657 rxp
= (struct bna_rxp
*)rxp_qe
;
1659 GET_RXQS(rxp
, q0
, q1
);
1660 switch (rxp
->type
) {
1664 bfi_enet_datapath_q_init(&cfg_req
->q_cfg
[i
].qs
.q
,
1666 cfg_req
->q_cfg
[i
].qs
.rx_buffer_size
=
1667 htons((u16
)q1
->buffer_size
);
1670 case BNA_RXP_SINGLE
:
1671 /* Large/Single RxQ */
1672 bfi_enet_datapath_q_init(&cfg_req
->q_cfg
[i
].ql
.q
,
1675 bna_enet_mtu_get(&rx
->bna
->enet
);
1676 cfg_req
->q_cfg
[i
].ql
.rx_buffer_size
=
1677 htons((u16
)q0
->buffer_size
);
1684 bfi_enet_datapath_q_init(&cfg_req
->q_cfg
[i
].cq
.q
,
1687 cfg_req
->q_cfg
[i
].ib
.index_addr
.a32
.addr_lo
=
1688 rxp
->cq
.ib
.ib_seg_host_addr
.lsb
;
1689 cfg_req
->q_cfg
[i
].ib
.index_addr
.a32
.addr_hi
=
1690 rxp
->cq
.ib
.ib_seg_host_addr
.msb
;
1691 cfg_req
->q_cfg
[i
].ib
.intr
.msix_index
=
1692 htons((u16
)rxp
->cq
.ib
.intr_vector
);
1695 cfg_req
->ib_cfg
.int_pkt_dma
= BNA_STATUS_T_DISABLED
;
1696 cfg_req
->ib_cfg
.int_enabled
= BNA_STATUS_T_ENABLED
;
1697 cfg_req
->ib_cfg
.int_pkt_enabled
= BNA_STATUS_T_DISABLED
;
1698 cfg_req
->ib_cfg
.continuous_coalescing
= BNA_STATUS_T_DISABLED
;
1699 cfg_req
->ib_cfg
.msix
= (rxp
->cq
.ib
.intr_type
== BNA_INTR_T_MSIX
)
1700 ? BNA_STATUS_T_ENABLED
:
1701 BNA_STATUS_T_DISABLED
;
1702 cfg_req
->ib_cfg
.coalescing_timeout
=
1703 htonl((u32
)rxp
->cq
.ib
.coalescing_timeo
);
1704 cfg_req
->ib_cfg
.inter_pkt_timeout
=
1705 htonl((u32
)rxp
->cq
.ib
.interpkt_timeo
);
1706 cfg_req
->ib_cfg
.inter_pkt_count
= (u8
)rxp
->cq
.ib
.interpkt_count
;
1708 switch (rxp
->type
) {
1710 cfg_req
->rx_cfg
.rxq_type
= BFI_ENET_RXQ_LARGE_SMALL
;
1714 cfg_req
->rx_cfg
.rxq_type
= BFI_ENET_RXQ_HDS
;
1715 cfg_req
->rx_cfg
.hds
.type
= rx
->hds_cfg
.hdr_type
;
1716 cfg_req
->rx_cfg
.hds
.force_offset
= rx
->hds_cfg
.forced_offset
;
1717 cfg_req
->rx_cfg
.hds
.max_header_size
= rx
->hds_cfg
.forced_offset
;
1720 case BNA_RXP_SINGLE
:
1721 cfg_req
->rx_cfg
.rxq_type
= BFI_ENET_RXQ_SINGLE
;
1727 cfg_req
->rx_cfg
.strip_vlan
= rx
->rxf
.vlan_strip_status
;
1729 bfa_msgq_cmd_set(&rx
->msgq_cmd
, NULL
, NULL
,
1730 sizeof(struct bfi_enet_rx_cfg_req
), &cfg_req
->mh
);
1731 bfa_msgq_cmd_post(&rx
->bna
->msgq
, &rx
->msgq_cmd
);
1735 bna_bfi_rx_enet_stop(struct bna_rx
*rx
)
1737 struct bfi_enet_req
*req
= &rx
->bfi_enet_cmd
.req
;
1739 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
1740 BFI_ENET_H2I_RX_CFG_CLR_REQ
, 0, rx
->rid
);
1741 req
->mh
.num_entries
= htons(
1742 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req
)));
1743 bfa_msgq_cmd_set(&rx
->msgq_cmd
, NULL
, NULL
, sizeof(struct bfi_enet_req
),
1745 bfa_msgq_cmd_post(&rx
->bna
->msgq
, &rx
->msgq_cmd
);
1749 bna_rx_enet_stop(struct bna_rx
*rx
)
1751 struct bna_rxp
*rxp
;
1752 struct list_head
*qe_rxp
;
1755 list_for_each(qe_rxp
, &rx
->rxp_q
) {
1756 rxp
= (struct bna_rxp
*)qe_rxp
;
1757 bna_ib_stop(rx
->bna
, &rxp
->cq
.ib
);
1760 bna_bfi_rx_enet_stop(rx
);
1764 bna_rx_res_check(struct bna_rx_mod
*rx_mod
, struct bna_rx_config
*rx_cfg
)
1766 if ((rx_mod
->rx_free_count
== 0) ||
1767 (rx_mod
->rxp_free_count
== 0) ||
1768 (rx_mod
->rxq_free_count
== 0))
1771 if (rx_cfg
->rxp_type
== BNA_RXP_SINGLE
) {
1772 if ((rx_mod
->rxp_free_count
< rx_cfg
->num_paths
) ||
1773 (rx_mod
->rxq_free_count
< rx_cfg
->num_paths
))
1776 if ((rx_mod
->rxp_free_count
< rx_cfg
->num_paths
) ||
1777 (rx_mod
->rxq_free_count
< (2 * rx_cfg
->num_paths
)))
1784 static struct bna_rxq
*
1785 bna_rxq_get(struct bna_rx_mod
*rx_mod
)
1787 struct bna_rxq
*rxq
= NULL
;
1788 struct list_head
*qe
= NULL
;
1790 bfa_q_deq(&rx_mod
->rxq_free_q
, &qe
);
1791 rx_mod
->rxq_free_count
--;
1792 rxq
= (struct bna_rxq
*)qe
;
1793 bfa_q_qe_init(&rxq
->qe
);
1799 bna_rxq_put(struct bna_rx_mod
*rx_mod
, struct bna_rxq
*rxq
)
1801 bfa_q_qe_init(&rxq
->qe
);
1802 list_add_tail(&rxq
->qe
, &rx_mod
->rxq_free_q
);
1803 rx_mod
->rxq_free_count
++;
1806 static struct bna_rxp
*
1807 bna_rxp_get(struct bna_rx_mod
*rx_mod
)
1809 struct list_head
*qe
= NULL
;
1810 struct bna_rxp
*rxp
= NULL
;
1812 bfa_q_deq(&rx_mod
->rxp_free_q
, &qe
);
1813 rx_mod
->rxp_free_count
--;
1814 rxp
= (struct bna_rxp
*)qe
;
1815 bfa_q_qe_init(&rxp
->qe
);
1821 bna_rxp_put(struct bna_rx_mod
*rx_mod
, struct bna_rxp
*rxp
)
1823 bfa_q_qe_init(&rxp
->qe
);
1824 list_add_tail(&rxp
->qe
, &rx_mod
->rxp_free_q
);
1825 rx_mod
->rxp_free_count
++;
1828 static struct bna_rx
*
1829 bna_rx_get(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
1831 struct list_head
*qe
= NULL
;
1832 struct bna_rx
*rx
= NULL
;
1834 if (type
== BNA_RX_T_REGULAR
) {
1835 bfa_q_deq(&rx_mod
->rx_free_q
, &qe
);
1837 bfa_q_deq_tail(&rx_mod
->rx_free_q
, &qe
);
1839 rx_mod
->rx_free_count
--;
1840 rx
= (struct bna_rx
*)qe
;
1841 bfa_q_qe_init(&rx
->qe
);
1842 list_add_tail(&rx
->qe
, &rx_mod
->rx_active_q
);
1849 bna_rx_put(struct bna_rx_mod
*rx_mod
, struct bna_rx
*rx
)
1851 struct list_head
*prev_qe
= NULL
;
1852 struct list_head
*qe
;
1854 bfa_q_qe_init(&rx
->qe
);
1856 list_for_each(qe
, &rx_mod
->rx_free_q
) {
1857 if (((struct bna_rx
*)qe
)->rid
< rx
->rid
)
1863 if (prev_qe
== NULL
) {
1864 /* This is the first entry */
1865 bfa_q_enq_head(&rx_mod
->rx_free_q
, &rx
->qe
);
1866 } else if (bfa_q_next(prev_qe
) == &rx_mod
->rx_free_q
) {
1867 /* This is the last entry */
1868 list_add_tail(&rx
->qe
, &rx_mod
->rx_free_q
);
1870 /* Somewhere in the middle */
1871 bfa_q_next(&rx
->qe
) = bfa_q_next(prev_qe
);
1872 bfa_q_prev(&rx
->qe
) = prev_qe
;
1873 bfa_q_next(prev_qe
) = &rx
->qe
;
1874 bfa_q_prev(bfa_q_next(&rx
->qe
)) = &rx
->qe
;
1877 rx_mod
->rx_free_count
++;
1881 bna_rxp_add_rxqs(struct bna_rxp
*rxp
, struct bna_rxq
*q0
,
1884 switch (rxp
->type
) {
1885 case BNA_RXP_SINGLE
:
1886 rxp
->rxq
.single
.only
= q0
;
1887 rxp
->rxq
.single
.reserved
= NULL
;
1890 rxp
->rxq
.slr
.large
= q0
;
1891 rxp
->rxq
.slr
.small
= q1
;
1894 rxp
->rxq
.hds
.data
= q0
;
1895 rxp
->rxq
.hds
.hdr
= q1
;
1903 bna_rxq_qpt_setup(struct bna_rxq
*rxq
,
1904 struct bna_rxp
*rxp
,
1907 struct bna_mem_descr
*qpt_mem
,
1908 struct bna_mem_descr
*swqpt_mem
,
1909 struct bna_mem_descr
*page_mem
)
1913 rxq
->qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
1914 rxq
->qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
1915 rxq
->qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
1916 rxq
->qpt
.page_count
= page_count
;
1917 rxq
->qpt
.page_size
= page_size
;
1919 rxq
->rcb
->sw_qpt
= (void **) swqpt_mem
->kva
;
1921 for (i
= 0; i
< rxq
->qpt
.page_count
; i
++) {
1922 rxq
->rcb
->sw_qpt
[i
] = page_mem
[i
].kva
;
1923 ((struct bna_dma_addr
*)rxq
->qpt
.kv_qpt_ptr
)[i
].lsb
=
1924 page_mem
[i
].dma
.lsb
;
1925 ((struct bna_dma_addr
*)rxq
->qpt
.kv_qpt_ptr
)[i
].msb
=
1926 page_mem
[i
].dma
.msb
;
1931 bna_rxp_cqpt_setup(struct bna_rxp
*rxp
,
1934 struct bna_mem_descr
*qpt_mem
,
1935 struct bna_mem_descr
*swqpt_mem
,
1936 struct bna_mem_descr
*page_mem
)
1940 rxp
->cq
.qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
1941 rxp
->cq
.qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
1942 rxp
->cq
.qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
1943 rxp
->cq
.qpt
.page_count
= page_count
;
1944 rxp
->cq
.qpt
.page_size
= page_size
;
1946 rxp
->cq
.ccb
->sw_qpt
= (void **) swqpt_mem
->kva
;
1948 for (i
= 0; i
< rxp
->cq
.qpt
.page_count
; i
++) {
1949 rxp
->cq
.ccb
->sw_qpt
[i
] = page_mem
[i
].kva
;
1951 ((struct bna_dma_addr
*)rxp
->cq
.qpt
.kv_qpt_ptr
)[i
].lsb
=
1952 page_mem
[i
].dma
.lsb
;
1953 ((struct bna_dma_addr
*)rxp
->cq
.qpt
.kv_qpt_ptr
)[i
].msb
=
1954 page_mem
[i
].dma
.msb
;
1959 bna_rx_mod_cb_rx_stopped(void *arg
, struct bna_rx
*rx
)
1961 struct bna_rx_mod
*rx_mod
= (struct bna_rx_mod
*)arg
;
1963 bfa_wc_down(&rx_mod
->rx_stop_wc
);
1967 bna_rx_mod_cb_rx_stopped_all(void *arg
)
1969 struct bna_rx_mod
*rx_mod
= (struct bna_rx_mod
*)arg
;
1971 if (rx_mod
->stop_cbfn
)
1972 rx_mod
->stop_cbfn(&rx_mod
->bna
->enet
);
1973 rx_mod
->stop_cbfn
= NULL
;
1977 bna_rx_start(struct bna_rx
*rx
)
1979 rx
->rx_flags
|= BNA_RX_F_ENET_STARTED
;
1980 if (rx
->rx_flags
& BNA_RX_F_ENABLED
)
1981 bfa_fsm_send_event(rx
, RX_E_START
);
1985 bna_rx_stop(struct bna_rx
*rx
)
1987 rx
->rx_flags
&= ~BNA_RX_F_ENET_STARTED
;
1988 if (rx
->fsm
== (bfa_fsm_t
) bna_rx_sm_stopped
)
1989 bna_rx_mod_cb_rx_stopped(&rx
->bna
->rx_mod
, rx
);
1991 rx
->stop_cbfn
= bna_rx_mod_cb_rx_stopped
;
1992 rx
->stop_cbarg
= &rx
->bna
->rx_mod
;
1993 bfa_fsm_send_event(rx
, RX_E_STOP
);
1998 bna_rx_fail(struct bna_rx
*rx
)
2000 /* Indicate Enet is not enabled, and failed */
2001 rx
->rx_flags
&= ~BNA_RX_F_ENET_STARTED
;
2002 bfa_fsm_send_event(rx
, RX_E_FAIL
);
2006 bna_rx_mod_start(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
2009 struct list_head
*qe
;
2011 rx_mod
->flags
|= BNA_RX_MOD_F_ENET_STARTED
;
2012 if (type
== BNA_RX_T_LOOPBACK
)
2013 rx_mod
->flags
|= BNA_RX_MOD_F_ENET_LOOPBACK
;
2015 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2016 rx
= (struct bna_rx
*)qe
;
2017 if (rx
->type
== type
)
2023 bna_rx_mod_stop(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
2026 struct list_head
*qe
;
2028 rx_mod
->flags
&= ~BNA_RX_MOD_F_ENET_STARTED
;
2029 rx_mod
->flags
&= ~BNA_RX_MOD_F_ENET_LOOPBACK
;
2031 rx_mod
->stop_cbfn
= bna_enet_cb_rx_stopped
;
2033 bfa_wc_init(&rx_mod
->rx_stop_wc
, bna_rx_mod_cb_rx_stopped_all
, rx_mod
);
2035 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2036 rx
= (struct bna_rx
*)qe
;
2037 if (rx
->type
== type
) {
2038 bfa_wc_up(&rx_mod
->rx_stop_wc
);
2043 bfa_wc_wait(&rx_mod
->rx_stop_wc
);
2047 bna_rx_mod_fail(struct bna_rx_mod
*rx_mod
)
2050 struct list_head
*qe
;
2052 rx_mod
->flags
&= ~BNA_RX_MOD_F_ENET_STARTED
;
2053 rx_mod
->flags
&= ~BNA_RX_MOD_F_ENET_LOOPBACK
;
2055 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2056 rx
= (struct bna_rx
*)qe
;
2061 void bna_rx_mod_init(struct bna_rx_mod
*rx_mod
, struct bna
*bna
,
2062 struct bna_res_info
*res_info
)
2065 struct bna_rx
*rx_ptr
;
2066 struct bna_rxp
*rxp_ptr
;
2067 struct bna_rxq
*rxq_ptr
;
2072 rx_mod
->rx
= (struct bna_rx
*)
2073 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2074 rx_mod
->rxp
= (struct bna_rxp
*)
2075 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2076 rx_mod
->rxq
= (struct bna_rxq
*)
2077 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2079 /* Initialize the queues */
2080 INIT_LIST_HEAD(&rx_mod
->rx_free_q
);
2081 rx_mod
->rx_free_count
= 0;
2082 INIT_LIST_HEAD(&rx_mod
->rxq_free_q
);
2083 rx_mod
->rxq_free_count
= 0;
2084 INIT_LIST_HEAD(&rx_mod
->rxp_free_q
);
2085 rx_mod
->rxp_free_count
= 0;
2086 INIT_LIST_HEAD(&rx_mod
->rx_active_q
);
2088 /* Build RX queues */
2089 for (index
= 0; index
< bna
->ioceth
.attr
.num_rxp
; index
++) {
2090 rx_ptr
= &rx_mod
->rx
[index
];
2092 bfa_q_qe_init(&rx_ptr
->qe
);
2093 INIT_LIST_HEAD(&rx_ptr
->rxp_q
);
2095 rx_ptr
->rid
= index
;
2096 rx_ptr
->stop_cbfn
= NULL
;
2097 rx_ptr
->stop_cbarg
= NULL
;
2099 list_add_tail(&rx_ptr
->qe
, &rx_mod
->rx_free_q
);
2100 rx_mod
->rx_free_count
++;
2103 /* build RX-path queue */
2104 for (index
= 0; index
< bna
->ioceth
.attr
.num_rxp
; index
++) {
2105 rxp_ptr
= &rx_mod
->rxp
[index
];
2106 bfa_q_qe_init(&rxp_ptr
->qe
);
2107 list_add_tail(&rxp_ptr
->qe
, &rx_mod
->rxp_free_q
);
2108 rx_mod
->rxp_free_count
++;
2111 /* build RXQ queue */
2112 for (index
= 0; index
< (bna
->ioceth
.attr
.num_rxp
* 2); index
++) {
2113 rxq_ptr
= &rx_mod
->rxq
[index
];
2114 bfa_q_qe_init(&rxq_ptr
->qe
);
2115 list_add_tail(&rxq_ptr
->qe
, &rx_mod
->rxq_free_q
);
2116 rx_mod
->rxq_free_count
++;
2121 bna_rx_mod_uninit(struct bna_rx_mod
*rx_mod
)
2123 struct list_head
*qe
;
2127 list_for_each(qe
, &rx_mod
->rx_free_q
)
2131 list_for_each(qe
, &rx_mod
->rxp_free_q
)
2135 list_for_each(qe
, &rx_mod
->rxq_free_q
)
2142 bna_bfi_rx_enet_start_rsp(struct bna_rx
*rx
, struct bfi_msgq_mhdr
*msghdr
)
2144 struct bfi_enet_rx_cfg_rsp
*cfg_rsp
= &rx
->bfi_enet_cmd
.cfg_rsp
;
2145 struct bna_rxp
*rxp
= NULL
;
2146 struct bna_rxq
*q0
= NULL
, *q1
= NULL
;
2147 struct list_head
*rxp_qe
;
2150 bfa_msgq_rsp_copy(&rx
->bna
->msgq
, (u8
*)cfg_rsp
,
2151 sizeof(struct bfi_enet_rx_cfg_rsp
));
2153 rx
->hw_id
= cfg_rsp
->hw_id
;
2155 for (i
= 0, rxp_qe
= bfa_q_first(&rx
->rxp_q
);
2157 i
++, rxp_qe
= bfa_q_next(rxp_qe
)) {
2158 rxp
= (struct bna_rxp
*)rxp_qe
;
2159 GET_RXQS(rxp
, q0
, q1
);
2161 /* Setup doorbells */
2162 rxp
->cq
.ccb
->i_dbell
->doorbell_addr
=
2163 rx
->bna
->pcidev
.pci_bar_kva
2164 + ntohl(cfg_rsp
->q_handles
[i
].i_dbell
);
2165 rxp
->hw_id
= cfg_rsp
->q_handles
[i
].hw_cqid
;
2167 rx
->bna
->pcidev
.pci_bar_kva
2168 + ntohl(cfg_rsp
->q_handles
[i
].ql_dbell
);
2169 q0
->hw_id
= cfg_rsp
->q_handles
[i
].hw_lqid
;
2172 rx
->bna
->pcidev
.pci_bar_kva
2173 + ntohl(cfg_rsp
->q_handles
[i
].qs_dbell
);
2174 q1
->hw_id
= cfg_rsp
->q_handles
[i
].hw_sqid
;
2177 /* Initialize producer/consumer indexes */
2178 (*rxp
->cq
.ccb
->hw_producer_index
) = 0;
2179 rxp
->cq
.ccb
->producer_index
= 0;
2180 q0
->rcb
->producer_index
= q0
->rcb
->consumer_index
= 0;
2182 q1
->rcb
->producer_index
= q1
->rcb
->consumer_index
= 0;
2185 bfa_fsm_send_event(rx
, RX_E_STARTED
);
2189 bna_bfi_rx_enet_stop_rsp(struct bna_rx
*rx
, struct bfi_msgq_mhdr
*msghdr
)
2191 bfa_fsm_send_event(rx
, RX_E_STOPPED
);
2195 bna_rx_res_req(struct bna_rx_config
*q_cfg
, struct bna_res_info
*res_info
)
2197 u32 cq_size
, hq_size
, dq_size
;
2198 u32 cpage_count
, hpage_count
, dpage_count
;
2199 struct bna_mem_info
*mem_info
;
2204 dq_depth
= q_cfg
->q_depth
;
2205 hq_depth
= ((q_cfg
->rxp_type
== BNA_RXP_SINGLE
) ? 0 : q_cfg
->q_depth
);
2206 cq_depth
= dq_depth
+ hq_depth
;
2208 BNA_TO_POWER_OF_2_HIGH(cq_depth
);
2209 cq_size
= cq_depth
* BFI_CQ_WI_SIZE
;
2210 cq_size
= ALIGN(cq_size
, PAGE_SIZE
);
2211 cpage_count
= SIZE_TO_PAGES(cq_size
);
2213 BNA_TO_POWER_OF_2_HIGH(dq_depth
);
2214 dq_size
= dq_depth
* BFI_RXQ_WI_SIZE
;
2215 dq_size
= ALIGN(dq_size
, PAGE_SIZE
);
2216 dpage_count
= SIZE_TO_PAGES(dq_size
);
2218 if (BNA_RXP_SINGLE
!= q_cfg
->rxp_type
) {
2219 BNA_TO_POWER_OF_2_HIGH(hq_depth
);
2220 hq_size
= hq_depth
* BFI_RXQ_WI_SIZE
;
2221 hq_size
= ALIGN(hq_size
, PAGE_SIZE
);
2222 hpage_count
= SIZE_TO_PAGES(hq_size
);
2226 res_info
[BNA_RX_RES_MEM_T_CCB
].res_type
= BNA_RES_T_MEM
;
2227 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CCB
].res_u
.mem_info
;
2228 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2229 mem_info
->len
= sizeof(struct bna_ccb
);
2230 mem_info
->num
= q_cfg
->num_paths
;
2232 res_info
[BNA_RX_RES_MEM_T_RCB
].res_type
= BNA_RES_T_MEM
;
2233 mem_info
= &res_info
[BNA_RX_RES_MEM_T_RCB
].res_u
.mem_info
;
2234 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2235 mem_info
->len
= sizeof(struct bna_rcb
);
2236 mem_info
->num
= BNA_GET_RXQS(q_cfg
);
2238 res_info
[BNA_RX_RES_MEM_T_CQPT
].res_type
= BNA_RES_T_MEM
;
2239 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CQPT
].res_u
.mem_info
;
2240 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2241 mem_info
->len
= cpage_count
* sizeof(struct bna_dma_addr
);
2242 mem_info
->num
= q_cfg
->num_paths
;
2244 res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_type
= BNA_RES_T_MEM
;
2245 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_u
.mem_info
;
2246 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2247 mem_info
->len
= cpage_count
* sizeof(void *);
2248 mem_info
->num
= q_cfg
->num_paths
;
2250 res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_type
= BNA_RES_T_MEM
;
2251 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
;
2252 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2253 mem_info
->len
= PAGE_SIZE
;
2254 mem_info
->num
= cpage_count
* q_cfg
->num_paths
;
2256 res_info
[BNA_RX_RES_MEM_T_DQPT
].res_type
= BNA_RES_T_MEM
;
2257 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DQPT
].res_u
.mem_info
;
2258 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2259 mem_info
->len
= dpage_count
* sizeof(struct bna_dma_addr
);
2260 mem_info
->num
= q_cfg
->num_paths
;
2262 res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_type
= BNA_RES_T_MEM
;
2263 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_u
.mem_info
;
2264 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2265 mem_info
->len
= dpage_count
* sizeof(void *);
2266 mem_info
->num
= q_cfg
->num_paths
;
2268 res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_type
= BNA_RES_T_MEM
;
2269 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
;
2270 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2271 mem_info
->len
= PAGE_SIZE
;
2272 mem_info
->num
= dpage_count
* q_cfg
->num_paths
;
2274 res_info
[BNA_RX_RES_MEM_T_HQPT
].res_type
= BNA_RES_T_MEM
;
2275 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HQPT
].res_u
.mem_info
;
2276 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2277 mem_info
->len
= hpage_count
* sizeof(struct bna_dma_addr
);
2278 mem_info
->num
= (hpage_count
? q_cfg
->num_paths
: 0);
2280 res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_type
= BNA_RES_T_MEM
;
2281 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_u
.mem_info
;
2282 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2283 mem_info
->len
= hpage_count
* sizeof(void *);
2284 mem_info
->num
= (hpage_count
? q_cfg
->num_paths
: 0);
2286 res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_type
= BNA_RES_T_MEM
;
2287 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
;
2288 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2289 mem_info
->len
= (hpage_count
? PAGE_SIZE
: 0);
2290 mem_info
->num
= (hpage_count
? (hpage_count
* q_cfg
->num_paths
) : 0);
2292 res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_type
= BNA_RES_T_MEM
;
2293 mem_info
= &res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_u
.mem_info
;
2294 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2295 mem_info
->len
= BFI_IBIDX_SIZE
;
2296 mem_info
->num
= q_cfg
->num_paths
;
2298 res_info
[BNA_RX_RES_MEM_T_RIT
].res_type
= BNA_RES_T_MEM
;
2299 mem_info
= &res_info
[BNA_RX_RES_MEM_T_RIT
].res_u
.mem_info
;
2300 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2301 mem_info
->len
= BFI_ENET_RSS_RIT_MAX
;
2304 res_info
[BNA_RX_RES_T_INTR
].res_type
= BNA_RES_T_INTR
;
2305 res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
.intr_type
= BNA_INTR_T_MSIX
;
2306 res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
.num
= q_cfg
->num_paths
;
2310 bna_rx_create(struct bna
*bna
, struct bnad
*bnad
,
2311 struct bna_rx_config
*rx_cfg
,
2312 const struct bna_rx_event_cbfn
*rx_cbfn
,
2313 struct bna_res_info
*res_info
,
2316 struct bna_rx_mod
*rx_mod
= &bna
->rx_mod
;
2318 struct bna_rxp
*rxp
;
2321 struct bna_intr_info
*intr_info
;
2323 struct bna_mem_descr
*ccb_mem
;
2324 struct bna_mem_descr
*rcb_mem
;
2325 struct bna_mem_descr
*unmapq_mem
;
2326 struct bna_mem_descr
*cqpt_mem
;
2327 struct bna_mem_descr
*cswqpt_mem
;
2328 struct bna_mem_descr
*cpage_mem
;
2329 struct bna_mem_descr
*hqpt_mem
;
2330 struct bna_mem_descr
*dqpt_mem
;
2331 struct bna_mem_descr
*hsqpt_mem
;
2332 struct bna_mem_descr
*dsqpt_mem
;
2333 struct bna_mem_descr
*hpage_mem
;
2334 struct bna_mem_descr
*dpage_mem
;
2335 int i
, cpage_idx
= 0, dpage_idx
= 0, hpage_idx
= 0;
2336 int dpage_count
, hpage_count
, rcb_idx
;
2338 if (!bna_rx_res_check(rx_mod
, rx_cfg
))
2341 intr_info
= &res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
;
2342 ccb_mem
= &res_info
[BNA_RX_RES_MEM_T_CCB
].res_u
.mem_info
.mdl
[0];
2343 rcb_mem
= &res_info
[BNA_RX_RES_MEM_T_RCB
].res_u
.mem_info
.mdl
[0];
2344 unmapq_mem
= &res_info
[BNA_RX_RES_MEM_T_UNMAPQ
].res_u
.mem_info
.mdl
[0];
2345 cqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_CQPT
].res_u
.mem_info
.mdl
[0];
2346 cswqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_u
.mem_info
.mdl
[0];
2347 cpage_mem
= &res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
.mdl
[0];
2348 hqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_HQPT
].res_u
.mem_info
.mdl
[0];
2349 dqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_DQPT
].res_u
.mem_info
.mdl
[0];
2350 hsqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_u
.mem_info
.mdl
[0];
2351 dsqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_u
.mem_info
.mdl
[0];
2352 hpage_mem
= &res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
.mdl
[0];
2353 dpage_mem
= &res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
.mdl
[0];
2355 page_count
= res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
.num
/
2358 dpage_count
= res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
.num
/
2361 hpage_count
= res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
.num
/
2364 rx
= bna_rx_get(rx_mod
, rx_cfg
->rx_type
);
2367 INIT_LIST_HEAD(&rx
->rxp_q
);
2368 rx
->stop_cbfn
= NULL
;
2369 rx
->stop_cbarg
= NULL
;
2372 rx
->rcb_setup_cbfn
= rx_cbfn
->rcb_setup_cbfn
;
2373 rx
->rcb_destroy_cbfn
= rx_cbfn
->rcb_destroy_cbfn
;
2374 rx
->ccb_setup_cbfn
= rx_cbfn
->ccb_setup_cbfn
;
2375 rx
->ccb_destroy_cbfn
= rx_cbfn
->ccb_destroy_cbfn
;
2376 rx
->rx_stall_cbfn
= rx_cbfn
->rx_stall_cbfn
;
2377 /* Following callbacks are mandatory */
2378 rx
->rx_cleanup_cbfn
= rx_cbfn
->rx_cleanup_cbfn
;
2379 rx
->rx_post_cbfn
= rx_cbfn
->rx_post_cbfn
;
2381 if (rx
->bna
->rx_mod
.flags
& BNA_RX_MOD_F_ENET_STARTED
) {
2383 case BNA_RX_T_REGULAR
:
2384 if (!(rx
->bna
->rx_mod
.flags
&
2385 BNA_RX_MOD_F_ENET_LOOPBACK
))
2386 rx
->rx_flags
|= BNA_RX_F_ENET_STARTED
;
2388 case BNA_RX_T_LOOPBACK
:
2389 if (rx
->bna
->rx_mod
.flags
& BNA_RX_MOD_F_ENET_LOOPBACK
)
2390 rx
->rx_flags
|= BNA_RX_F_ENET_STARTED
;
2395 rx
->num_paths
= rx_cfg
->num_paths
;
2396 for (i
= 0, rcb_idx
= 0; i
< rx
->num_paths
; i
++) {
2397 rxp
= bna_rxp_get(rx_mod
);
2398 list_add_tail(&rxp
->qe
, &rx
->rxp_q
);
2399 rxp
->type
= rx_cfg
->rxp_type
;
2403 q0
= bna_rxq_get(rx_mod
);
2404 if (BNA_RXP_SINGLE
== rx_cfg
->rxp_type
)
2407 q1
= bna_rxq_get(rx_mod
);
2409 if (1 == intr_info
->num
)
2410 rxp
->vector
= intr_info
->idl
[0].vector
;
2412 rxp
->vector
= intr_info
->idl
[i
].vector
;
2416 rxp
->cq
.ib
.ib_seg_host_addr
.lsb
=
2417 res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.lsb
;
2418 rxp
->cq
.ib
.ib_seg_host_addr
.msb
=
2419 res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.msb
;
2420 rxp
->cq
.ib
.ib_seg_host_addr_kva
=
2421 res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].kva
;
2422 rxp
->cq
.ib
.intr_type
= intr_info
->intr_type
;
2423 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
)
2424 rxp
->cq
.ib
.intr_vector
= rxp
->vector
;
2426 rxp
->cq
.ib
.intr_vector
= (1 << rxp
->vector
);
2427 rxp
->cq
.ib
.coalescing_timeo
= rx_cfg
->coalescing_timeo
;
2428 rxp
->cq
.ib
.interpkt_count
= BFI_RX_INTERPKT_COUNT
;
2429 rxp
->cq
.ib
.interpkt_timeo
= BFI_RX_INTERPKT_TIMEO
;
2431 bna_rxp_add_rxqs(rxp
, q0
, q1
);
2438 q0
->rcb
= (struct bna_rcb
*) rcb_mem
[rcb_idx
].kva
;
2439 q0
->rcb
->unmap_q
= (void *)unmapq_mem
[rcb_idx
].kva
;
2441 q0
->rcb
->q_depth
= rx_cfg
->q_depth
;
2443 q0
->rcb
->bnad
= bna
->bnad
;
2445 q0
->rx_packets
= q0
->rx_bytes
= 0;
2446 q0
->rx_packets_with_error
= q0
->rxbuf_alloc_failed
= 0;
2448 bna_rxq_qpt_setup(q0
, rxp
, dpage_count
, PAGE_SIZE
,
2449 &dqpt_mem
[i
], &dsqpt_mem
[i
], &dpage_mem
[dpage_idx
]);
2450 q0
->rcb
->page_idx
= dpage_idx
;
2451 q0
->rcb
->page_count
= dpage_count
;
2452 dpage_idx
+= dpage_count
;
2454 if (rx
->rcb_setup_cbfn
)
2455 rx
->rcb_setup_cbfn(bnad
, q0
->rcb
);
2463 q1
->rcb
= (struct bna_rcb
*) rcb_mem
[rcb_idx
].kva
;
2464 q1
->rcb
->unmap_q
= (void *)unmapq_mem
[rcb_idx
].kva
;
2466 q1
->rcb
->q_depth
= rx_cfg
->q_depth
;
2468 q1
->rcb
->bnad
= bna
->bnad
;
2470 q1
->buffer_size
= (rx_cfg
->rxp_type
== BNA_RXP_HDS
) ?
2471 rx_cfg
->hds_config
.forced_offset
2472 : rx_cfg
->small_buff_size
;
2473 q1
->rx_packets
= q1
->rx_bytes
= 0;
2474 q1
->rx_packets_with_error
= q1
->rxbuf_alloc_failed
= 0;
2476 bna_rxq_qpt_setup(q1
, rxp
, hpage_count
, PAGE_SIZE
,
2477 &hqpt_mem
[i
], &hsqpt_mem
[i
],
2478 &hpage_mem
[hpage_idx
]);
2479 q1
->rcb
->page_idx
= hpage_idx
;
2480 q1
->rcb
->page_count
= hpage_count
;
2481 hpage_idx
+= hpage_count
;
2483 if (rx
->rcb_setup_cbfn
)
2484 rx
->rcb_setup_cbfn(bnad
, q1
->rcb
);
2489 rxp
->cq
.ccb
= (struct bna_ccb
*) ccb_mem
[i
].kva
;
2490 rxp
->cq
.ccb
->q_depth
= rx_cfg
->q_depth
+
2491 ((rx_cfg
->rxp_type
== BNA_RXP_SINGLE
) ?
2492 0 : rx_cfg
->q_depth
);
2493 rxp
->cq
.ccb
->cq
= &rxp
->cq
;
2494 rxp
->cq
.ccb
->rcb
[0] = q0
->rcb
;
2495 q0
->rcb
->ccb
= rxp
->cq
.ccb
;
2497 rxp
->cq
.ccb
->rcb
[1] = q1
->rcb
;
2498 q1
->rcb
->ccb
= rxp
->cq
.ccb
;
2500 rxp
->cq
.ccb
->hw_producer_index
=
2501 (u32
*)rxp
->cq
.ib
.ib_seg_host_addr_kva
;
2502 rxp
->cq
.ccb
->i_dbell
= &rxp
->cq
.ib
.door_bell
;
2503 rxp
->cq
.ccb
->intr_type
= rxp
->cq
.ib
.intr_type
;
2504 rxp
->cq
.ccb
->intr_vector
= rxp
->cq
.ib
.intr_vector
;
2505 rxp
->cq
.ccb
->rx_coalescing_timeo
=
2506 rxp
->cq
.ib
.coalescing_timeo
;
2507 rxp
->cq
.ccb
->pkt_rate
.small_pkt_cnt
= 0;
2508 rxp
->cq
.ccb
->pkt_rate
.large_pkt_cnt
= 0;
2509 rxp
->cq
.ccb
->bnad
= bna
->bnad
;
2510 rxp
->cq
.ccb
->id
= i
;
2512 bna_rxp_cqpt_setup(rxp
, page_count
, PAGE_SIZE
,
2513 &cqpt_mem
[i
], &cswqpt_mem
[i
], &cpage_mem
[cpage_idx
]);
2514 rxp
->cq
.ccb
->page_idx
= cpage_idx
;
2515 rxp
->cq
.ccb
->page_count
= page_count
;
2516 cpage_idx
+= page_count
;
2518 if (rx
->ccb_setup_cbfn
)
2519 rx
->ccb_setup_cbfn(bnad
, rxp
->cq
.ccb
);
2522 rx
->hds_cfg
= rx_cfg
->hds_config
;
2524 bna_rxf_init(&rx
->rxf
, rx
, rx_cfg
, res_info
);
2526 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
2528 rx_mod
->rid_mask
|= (1 << rx
->rid
);
2534 bna_rx_destroy(struct bna_rx
*rx
)
2536 struct bna_rx_mod
*rx_mod
= &rx
->bna
->rx_mod
;
2537 struct bna_rxq
*q0
= NULL
;
2538 struct bna_rxq
*q1
= NULL
;
2539 struct bna_rxp
*rxp
;
2540 struct list_head
*qe
;
2542 bna_rxf_uninit(&rx
->rxf
);
2544 while (!list_empty(&rx
->rxp_q
)) {
2545 bfa_q_deq(&rx
->rxp_q
, &rxp
);
2546 GET_RXQS(rxp
, q0
, q1
);
2547 if (rx
->rcb_destroy_cbfn
)
2548 rx
->rcb_destroy_cbfn(rx
->bna
->bnad
, q0
->rcb
);
2552 bna_rxq_put(rx_mod
, q0
);
2555 if (rx
->rcb_destroy_cbfn
)
2556 rx
->rcb_destroy_cbfn(rx
->bna
->bnad
, q1
->rcb
);
2560 bna_rxq_put(rx_mod
, q1
);
2562 rxp
->rxq
.slr
.large
= NULL
;
2563 rxp
->rxq
.slr
.small
= NULL
;
2565 if (rx
->ccb_destroy_cbfn
)
2566 rx
->ccb_destroy_cbfn(rx
->bna
->bnad
, rxp
->cq
.ccb
);
2569 bna_rxp_put(rx_mod
, rxp
);
2572 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2573 if (qe
== &rx
->qe
) {
2575 bfa_q_qe_init(&rx
->qe
);
2580 rx_mod
->rid_mask
&= ~(1 << rx
->rid
);
2584 bna_rx_put(rx_mod
, rx
);
2588 bna_rx_enable(struct bna_rx
*rx
)
2590 if (rx
->fsm
!= (bfa_sm_t
)bna_rx_sm_stopped
)
2593 rx
->rx_flags
|= BNA_RX_F_ENABLED
;
2594 if (rx
->rx_flags
& BNA_RX_F_ENET_STARTED
)
2595 bfa_fsm_send_event(rx
, RX_E_START
);
2599 bna_rx_disable(struct bna_rx
*rx
, enum bna_cleanup_type type
,
2600 void (*cbfn
)(void *, struct bna_rx
*))
2602 if (type
== BNA_SOFT_CLEANUP
) {
2603 /* h/w should not be accessed. Treat we're stopped */
2604 (*cbfn
)(rx
->bna
->bnad
, rx
);
2606 rx
->stop_cbfn
= cbfn
;
2607 rx
->stop_cbarg
= rx
->bna
->bnad
;
2609 rx
->rx_flags
&= ~BNA_RX_F_ENABLED
;
2611 bfa_fsm_send_event(rx
, RX_E_STOP
);
2616 bna_rx_cleanup_complete(struct bna_rx
*rx
)
2618 bfa_fsm_send_event(rx
, RX_E_CLEANUP_DONE
);
2622 bna_rx_mode_set(struct bna_rx
*rx
, enum bna_rxmode new_mode
,
2623 enum bna_rxmode bitmask
,
2624 void (*cbfn
)(struct bnad
*, struct bna_rx
*))
2626 struct bna_rxf
*rxf
= &rx
->rxf
;
2627 int need_hw_config
= 0;
2631 if (is_promisc_enable(new_mode
, bitmask
)) {
2632 /* If promisc mode is already enabled elsewhere in the system */
2633 if ((rx
->bna
->promisc_rid
!= BFI_INVALID_RID
) &&
2634 (rx
->bna
->promisc_rid
!= rxf
->rx
->rid
))
2637 /* If default mode is already enabled in the system */
2638 if (rx
->bna
->default_mode_rid
!= BFI_INVALID_RID
)
2641 /* Trying to enable promiscuous and default mode together */
2642 if (is_default_enable(new_mode
, bitmask
))
2646 if (is_default_enable(new_mode
, bitmask
)) {
2647 /* If default mode is already enabled elsewhere in the system */
2648 if ((rx
->bna
->default_mode_rid
!= BFI_INVALID_RID
) &&
2649 (rx
->bna
->default_mode_rid
!= rxf
->rx
->rid
)) {
2653 /* If promiscuous mode is already enabled in the system */
2654 if (rx
->bna
->promisc_rid
!= BFI_INVALID_RID
)
2658 /* Process the commands */
2660 if (is_promisc_enable(new_mode
, bitmask
)) {
2661 if (bna_rxf_promisc_enable(rxf
))
2663 } else if (is_promisc_disable(new_mode
, bitmask
)) {
2664 if (bna_rxf_promisc_disable(rxf
))
2668 if (is_allmulti_enable(new_mode
, bitmask
)) {
2669 if (bna_rxf_allmulti_enable(rxf
))
2671 } else if (is_allmulti_disable(new_mode
, bitmask
)) {
2672 if (bna_rxf_allmulti_disable(rxf
))
2676 /* Trigger h/w if needed */
2678 if (need_hw_config
) {
2679 rxf
->cam_fltr_cbfn
= cbfn
;
2680 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
2681 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
2683 (*cbfn
)(rx
->bna
->bnad
, rx
);
2685 return BNA_CB_SUCCESS
;
2692 bna_rx_vlanfilter_enable(struct bna_rx
*rx
)
2694 struct bna_rxf
*rxf
= &rx
->rxf
;
2696 if (rxf
->vlan_filter_status
== BNA_STATUS_T_DISABLED
) {
2697 rxf
->vlan_filter_status
= BNA_STATUS_T_ENABLED
;
2698 rxf
->vlan_pending_bitmask
= (u8
)BFI_VLAN_BMASK_ALL
;
2699 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
2704 bna_rx_coalescing_timeo_set(struct bna_rx
*rx
, int coalescing_timeo
)
2706 struct bna_rxp
*rxp
;
2707 struct list_head
*qe
;
2709 list_for_each(qe
, &rx
->rxp_q
) {
2710 rxp
= (struct bna_rxp
*)qe
;
2711 rxp
->cq
.ccb
->rx_coalescing_timeo
= coalescing_timeo
;
2712 bna_ib_coalescing_timeo_set(&rxp
->cq
.ib
, coalescing_timeo
);
2717 bna_rx_dim_reconfig(struct bna
*bna
, const u32 vector
[][BNA_BIAS_T_MAX
])
2721 for (i
= 0; i
< BNA_LOAD_T_MAX
; i
++)
2722 for (j
= 0; j
< BNA_BIAS_T_MAX
; j
++)
2723 bna
->rx_mod
.dim_vector
[i
][j
] = vector
[i
][j
];
2727 bna_rx_dim_update(struct bna_ccb
*ccb
)
2729 struct bna
*bna
= ccb
->cq
->rx
->bna
;
2731 u32 pkt_rt
, small_rt
, large_rt
;
2732 u8 coalescing_timeo
;
2734 if ((ccb
->pkt_rate
.small_pkt_cnt
== 0) &&
2735 (ccb
->pkt_rate
.large_pkt_cnt
== 0))
2738 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2740 small_rt
= ccb
->pkt_rate
.small_pkt_cnt
;
2741 large_rt
= ccb
->pkt_rate
.large_pkt_cnt
;
2743 pkt_rt
= small_rt
+ large_rt
;
2745 if (pkt_rt
< BNA_PKT_RATE_10K
)
2746 load
= BNA_LOAD_T_LOW_4
;
2747 else if (pkt_rt
< BNA_PKT_RATE_20K
)
2748 load
= BNA_LOAD_T_LOW_3
;
2749 else if (pkt_rt
< BNA_PKT_RATE_30K
)
2750 load
= BNA_LOAD_T_LOW_2
;
2751 else if (pkt_rt
< BNA_PKT_RATE_40K
)
2752 load
= BNA_LOAD_T_LOW_1
;
2753 else if (pkt_rt
< BNA_PKT_RATE_50K
)
2754 load
= BNA_LOAD_T_HIGH_1
;
2755 else if (pkt_rt
< BNA_PKT_RATE_60K
)
2756 load
= BNA_LOAD_T_HIGH_2
;
2757 else if (pkt_rt
< BNA_PKT_RATE_80K
)
2758 load
= BNA_LOAD_T_HIGH_3
;
2760 load
= BNA_LOAD_T_HIGH_4
;
2762 if (small_rt
> (large_rt
<< 1))
2767 ccb
->pkt_rate
.small_pkt_cnt
= 0;
2768 ccb
->pkt_rate
.large_pkt_cnt
= 0;
2770 coalescing_timeo
= bna
->rx_mod
.dim_vector
[load
][bias
];
2771 ccb
->rx_coalescing_timeo
= coalescing_timeo
;
2774 bna_ib_coalescing_timeo_set(&ccb
->cq
->ib
, coalescing_timeo
);
2777 const u32 bna_napi_dim_vector
[BNA_LOAD_T_MAX
][BNA_BIAS_T_MAX
] = {
2790 #define call_tx_stop_cbfn(tx) \
2792 if ((tx)->stop_cbfn) { \
2793 void (*cbfn)(void *, struct bna_tx *); \
2795 cbfn = (tx)->stop_cbfn; \
2796 cbarg = (tx)->stop_cbarg; \
2797 (tx)->stop_cbfn = NULL; \
2798 (tx)->stop_cbarg = NULL; \
2799 cbfn(cbarg, (tx)); \
2803 #define call_tx_prio_change_cbfn(tx) \
2805 if ((tx)->prio_change_cbfn) { \
2806 void (*cbfn)(struct bnad *, struct bna_tx *); \
2807 cbfn = (tx)->prio_change_cbfn; \
2808 (tx)->prio_change_cbfn = NULL; \
2809 cbfn((tx)->bna->bnad, (tx)); \
2813 static void bna_tx_mod_cb_tx_stopped(void *tx_mod
, struct bna_tx
*tx
);
2814 static void bna_bfi_tx_enet_start(struct bna_tx
*tx
);
2815 static void bna_tx_enet_stop(struct bna_tx
*tx
);
2823 TX_E_PRIO_CHANGE
= 6,
2824 TX_E_CLEANUP_DONE
= 7,
2828 bfa_fsm_state_decl(bna_tx
, stopped
, struct bna_tx
, enum bna_tx_event
);
2829 bfa_fsm_state_decl(bna_tx
, start_wait
, struct bna_tx
, enum bna_tx_event
);
2830 bfa_fsm_state_decl(bna_tx
, started
, struct bna_tx
, enum bna_tx_event
);
2831 bfa_fsm_state_decl(bna_tx
, stop_wait
, struct bna_tx
, enum bna_tx_event
);
2832 bfa_fsm_state_decl(bna_tx
, cleanup_wait
, struct bna_tx
,
2834 bfa_fsm_state_decl(bna_tx
, prio_stop_wait
, struct bna_tx
,
2836 bfa_fsm_state_decl(bna_tx
, prio_cleanup_wait
, struct bna_tx
,
2838 bfa_fsm_state_decl(bna_tx
, failed
, struct bna_tx
, enum bna_tx_event
);
2839 bfa_fsm_state_decl(bna_tx
, quiesce_wait
, struct bna_tx
,
2843 bna_tx_sm_stopped_entry(struct bna_tx
*tx
)
2845 call_tx_stop_cbfn(tx
);
2849 bna_tx_sm_stopped(struct bna_tx
*tx
, enum bna_tx_event event
)
2853 bfa_fsm_set_state(tx
, bna_tx_sm_start_wait
);
2857 call_tx_stop_cbfn(tx
);
2864 case TX_E_PRIO_CHANGE
:
2865 call_tx_prio_change_cbfn(tx
);
2868 case TX_E_BW_UPDATE
:
2873 bfa_sm_fault(event
);
2878 bna_tx_sm_start_wait_entry(struct bna_tx
*tx
)
2880 bna_bfi_tx_enet_start(tx
);
2884 bna_tx_sm_start_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
2888 tx
->flags
&= ~(BNA_TX_F_PRIO_CHANGED
| BNA_TX_F_BW_UPDATED
);
2889 bfa_fsm_set_state(tx
, bna_tx_sm_stop_wait
);
2893 tx
->flags
&= ~(BNA_TX_F_PRIO_CHANGED
| BNA_TX_F_BW_UPDATED
);
2894 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
2898 if (tx
->flags
& (BNA_TX_F_PRIO_CHANGED
| BNA_TX_F_BW_UPDATED
)) {
2899 tx
->flags
&= ~(BNA_TX_F_PRIO_CHANGED
|
2900 BNA_TX_F_BW_UPDATED
);
2901 bfa_fsm_set_state(tx
, bna_tx_sm_prio_stop_wait
);
2903 bfa_fsm_set_state(tx
, bna_tx_sm_started
);
2906 case TX_E_PRIO_CHANGE
:
2907 tx
->flags
|= BNA_TX_F_PRIO_CHANGED
;
2910 case TX_E_BW_UPDATE
:
2911 tx
->flags
|= BNA_TX_F_BW_UPDATED
;
2915 bfa_sm_fault(event
);
2920 bna_tx_sm_started_entry(struct bna_tx
*tx
)
2922 struct bna_txq
*txq
;
2923 struct list_head
*qe
;
2924 int is_regular
= (tx
->type
== BNA_TX_T_REGULAR
);
2926 list_for_each(qe
, &tx
->txq_q
) {
2927 txq
= (struct bna_txq
*)qe
;
2928 txq
->tcb
->priority
= txq
->priority
;
2930 bna_ib_start(tx
->bna
, &txq
->ib
, is_regular
);
2932 tx
->tx_resume_cbfn(tx
->bna
->bnad
, tx
);
2936 bna_tx_sm_started(struct bna_tx
*tx
, enum bna_tx_event event
)
2940 bfa_fsm_set_state(tx
, bna_tx_sm_stop_wait
);
2941 tx
->tx_stall_cbfn(tx
->bna
->bnad
, tx
);
2942 bna_tx_enet_stop(tx
);
2946 bfa_fsm_set_state(tx
, bna_tx_sm_failed
);
2947 tx
->tx_stall_cbfn(tx
->bna
->bnad
, tx
);
2948 tx
->tx_cleanup_cbfn(tx
->bna
->bnad
, tx
);
2951 case TX_E_PRIO_CHANGE
:
2952 case TX_E_BW_UPDATE
:
2953 bfa_fsm_set_state(tx
, bna_tx_sm_prio_stop_wait
);
2957 bfa_sm_fault(event
);
2962 bna_tx_sm_stop_wait_entry(struct bna_tx
*tx
)
2967 bna_tx_sm_stop_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
2972 bfa_fsm_set_state(tx
, bna_tx_sm_cleanup_wait
);
2973 tx
->tx_cleanup_cbfn(tx
->bna
->bnad
, tx
);
2978 * We are here due to start_wait -> stop_wait transition on
2981 bna_tx_enet_stop(tx
);
2984 case TX_E_PRIO_CHANGE
:
2985 case TX_E_BW_UPDATE
:
2990 bfa_sm_fault(event
);
2995 bna_tx_sm_cleanup_wait_entry(struct bna_tx
*tx
)
3000 bna_tx_sm_cleanup_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3004 case TX_E_PRIO_CHANGE
:
3005 case TX_E_BW_UPDATE
:
3009 case TX_E_CLEANUP_DONE
:
3010 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3014 bfa_sm_fault(event
);
3019 bna_tx_sm_prio_stop_wait_entry(struct bna_tx
*tx
)
3021 tx
->tx_stall_cbfn(tx
->bna
->bnad
, tx
);
3022 bna_tx_enet_stop(tx
);
3026 bna_tx_sm_prio_stop_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3030 bfa_fsm_set_state(tx
, bna_tx_sm_stop_wait
);
3034 bfa_fsm_set_state(tx
, bna_tx_sm_failed
);
3035 call_tx_prio_change_cbfn(tx
);
3036 tx
->tx_cleanup_cbfn(tx
->bna
->bnad
, tx
);
3040 bfa_fsm_set_state(tx
, bna_tx_sm_prio_cleanup_wait
);
3043 case TX_E_PRIO_CHANGE
:
3044 case TX_E_BW_UPDATE
:
3049 bfa_sm_fault(event
);
3054 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx
*tx
)
3056 call_tx_prio_change_cbfn(tx
);
3057 tx
->tx_cleanup_cbfn(tx
->bna
->bnad
, tx
);
3061 bna_tx_sm_prio_cleanup_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3065 bfa_fsm_set_state(tx
, bna_tx_sm_cleanup_wait
);
3069 bfa_fsm_set_state(tx
, bna_tx_sm_failed
);
3072 case TX_E_PRIO_CHANGE
:
3073 case TX_E_BW_UPDATE
:
3077 case TX_E_CLEANUP_DONE
:
3078 bfa_fsm_set_state(tx
, bna_tx_sm_start_wait
);
3082 bfa_sm_fault(event
);
3087 bna_tx_sm_failed_entry(struct bna_tx
*tx
)
3092 bna_tx_sm_failed(struct bna_tx
*tx
, enum bna_tx_event event
)
3096 bfa_fsm_set_state(tx
, bna_tx_sm_quiesce_wait
);
3100 bfa_fsm_set_state(tx
, bna_tx_sm_cleanup_wait
);
3107 case TX_E_CLEANUP_DONE
:
3108 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3112 bfa_sm_fault(event
);
3117 bna_tx_sm_quiesce_wait_entry(struct bna_tx
*tx
)
3122 bna_tx_sm_quiesce_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3126 bfa_fsm_set_state(tx
, bna_tx_sm_cleanup_wait
);
3130 bfa_fsm_set_state(tx
, bna_tx_sm_failed
);
3133 case TX_E_CLEANUP_DONE
:
3134 bfa_fsm_set_state(tx
, bna_tx_sm_start_wait
);
3137 case TX_E_BW_UPDATE
:
3142 bfa_sm_fault(event
);
3147 bna_bfi_tx_enet_start(struct bna_tx
*tx
)
3149 struct bfi_enet_tx_cfg_req
*cfg_req
= &tx
->bfi_enet_cmd
.cfg_req
;
3150 struct bna_txq
*txq
= NULL
;
3151 struct list_head
*qe
;
3154 bfi_msgq_mhdr_set(cfg_req
->mh
, BFI_MC_ENET
,
3155 BFI_ENET_H2I_TX_CFG_SET_REQ
, 0, tx
->rid
);
3156 cfg_req
->mh
.num_entries
= htons(
3157 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req
)));
3159 cfg_req
->num_queues
= tx
->num_txq
;
3160 for (i
= 0, qe
= bfa_q_first(&tx
->txq_q
);
3162 i
++, qe
= bfa_q_next(qe
)) {
3163 txq
= (struct bna_txq
*)qe
;
3165 bfi_enet_datapath_q_init(&cfg_req
->q_cfg
[i
].q
.q
, &txq
->qpt
);
3166 cfg_req
->q_cfg
[i
].q
.priority
= txq
->priority
;
3168 cfg_req
->q_cfg
[i
].ib
.index_addr
.a32
.addr_lo
=
3169 txq
->ib
.ib_seg_host_addr
.lsb
;
3170 cfg_req
->q_cfg
[i
].ib
.index_addr
.a32
.addr_hi
=
3171 txq
->ib
.ib_seg_host_addr
.msb
;
3172 cfg_req
->q_cfg
[i
].ib
.intr
.msix_index
=
3173 htons((u16
)txq
->ib
.intr_vector
);
3176 cfg_req
->ib_cfg
.int_pkt_dma
= BNA_STATUS_T_ENABLED
;
3177 cfg_req
->ib_cfg
.int_enabled
= BNA_STATUS_T_ENABLED
;
3178 cfg_req
->ib_cfg
.int_pkt_enabled
= BNA_STATUS_T_DISABLED
;
3179 cfg_req
->ib_cfg
.continuous_coalescing
= BNA_STATUS_T_ENABLED
;
3180 cfg_req
->ib_cfg
.msix
= (txq
->ib
.intr_type
== BNA_INTR_T_MSIX
)
3181 ? BNA_STATUS_T_ENABLED
: BNA_STATUS_T_DISABLED
;
3182 cfg_req
->ib_cfg
.coalescing_timeout
=
3183 htonl((u32
)txq
->ib
.coalescing_timeo
);
3184 cfg_req
->ib_cfg
.inter_pkt_timeout
=
3185 htonl((u32
)txq
->ib
.interpkt_timeo
);
3186 cfg_req
->ib_cfg
.inter_pkt_count
= (u8
)txq
->ib
.interpkt_count
;
3188 cfg_req
->tx_cfg
.vlan_mode
= BFI_ENET_TX_VLAN_WI
;
3189 cfg_req
->tx_cfg
.vlan_id
= htons((u16
)tx
->txf_vlan_id
);
3190 cfg_req
->tx_cfg
.admit_tagged_frame
= BNA_STATUS_T_DISABLED
;
3191 cfg_req
->tx_cfg
.apply_vlan_filter
= BNA_STATUS_T_DISABLED
;
3193 bfa_msgq_cmd_set(&tx
->msgq_cmd
, NULL
, NULL
,
3194 sizeof(struct bfi_enet_tx_cfg_req
), &cfg_req
->mh
);
3195 bfa_msgq_cmd_post(&tx
->bna
->msgq
, &tx
->msgq_cmd
);
3199 bna_bfi_tx_enet_stop(struct bna_tx
*tx
)
3201 struct bfi_enet_req
*req
= &tx
->bfi_enet_cmd
.req
;
3203 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
3204 BFI_ENET_H2I_TX_CFG_CLR_REQ
, 0, tx
->rid
);
3205 req
->mh
.num_entries
= htons(
3206 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req
)));
3207 bfa_msgq_cmd_set(&tx
->msgq_cmd
, NULL
, NULL
, sizeof(struct bfi_enet_req
),
3209 bfa_msgq_cmd_post(&tx
->bna
->msgq
, &tx
->msgq_cmd
);
3213 bna_tx_enet_stop(struct bna_tx
*tx
)
3215 struct bna_txq
*txq
;
3216 struct list_head
*qe
;
3219 list_for_each(qe
, &tx
->txq_q
) {
3220 txq
= (struct bna_txq
*)qe
;
3221 bna_ib_stop(tx
->bna
, &txq
->ib
);
3224 bna_bfi_tx_enet_stop(tx
);
3228 bna_txq_qpt_setup(struct bna_txq
*txq
, int page_count
, int page_size
,
3229 struct bna_mem_descr
*qpt_mem
,
3230 struct bna_mem_descr
*swqpt_mem
,
3231 struct bna_mem_descr
*page_mem
)
3235 txq
->qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
3236 txq
->qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
3237 txq
->qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
3238 txq
->qpt
.page_count
= page_count
;
3239 txq
->qpt
.page_size
= page_size
;
3241 txq
->tcb
->sw_qpt
= (void **) swqpt_mem
->kva
;
3243 for (i
= 0; i
< page_count
; i
++) {
3244 txq
->tcb
->sw_qpt
[i
] = page_mem
[i
].kva
;
3246 ((struct bna_dma_addr
*)txq
->qpt
.kv_qpt_ptr
)[i
].lsb
=
3247 page_mem
[i
].dma
.lsb
;
3248 ((struct bna_dma_addr
*)txq
->qpt
.kv_qpt_ptr
)[i
].msb
=
3249 page_mem
[i
].dma
.msb
;
3253 static struct bna_tx
*
3254 bna_tx_get(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
3256 struct list_head
*qe
= NULL
;
3257 struct bna_tx
*tx
= NULL
;
3259 if (list_empty(&tx_mod
->tx_free_q
))
3261 if (type
== BNA_TX_T_REGULAR
) {
3262 bfa_q_deq(&tx_mod
->tx_free_q
, &qe
);
3264 bfa_q_deq_tail(&tx_mod
->tx_free_q
, &qe
);
3266 tx
= (struct bna_tx
*)qe
;
3267 bfa_q_qe_init(&tx
->qe
);
3274 bna_tx_free(struct bna_tx
*tx
)
3276 struct bna_tx_mod
*tx_mod
= &tx
->bna
->tx_mod
;
3277 struct bna_txq
*txq
;
3278 struct list_head
*prev_qe
;
3279 struct list_head
*qe
;
3281 while (!list_empty(&tx
->txq_q
)) {
3282 bfa_q_deq(&tx
->txq_q
, &txq
);
3283 bfa_q_qe_init(&txq
->qe
);
3286 list_add_tail(&txq
->qe
, &tx_mod
->txq_free_q
);
3289 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3290 if (qe
== &tx
->qe
) {
3292 bfa_q_qe_init(&tx
->qe
);
3301 list_for_each(qe
, &tx_mod
->tx_free_q
) {
3302 if (((struct bna_tx
*)qe
)->rid
< tx
->rid
)
3309 if (prev_qe
== NULL
) {
3310 /* This is the first entry */
3311 bfa_q_enq_head(&tx_mod
->tx_free_q
, &tx
->qe
);
3312 } else if (bfa_q_next(prev_qe
) == &tx_mod
->tx_free_q
) {
3313 /* This is the last entry */
3314 list_add_tail(&tx
->qe
, &tx_mod
->tx_free_q
);
3316 /* Somewhere in the middle */
3317 bfa_q_next(&tx
->qe
) = bfa_q_next(prev_qe
);
3318 bfa_q_prev(&tx
->qe
) = prev_qe
;
3319 bfa_q_next(prev_qe
) = &tx
->qe
;
3320 bfa_q_prev(bfa_q_next(&tx
->qe
)) = &tx
->qe
;
3325 bna_tx_start(struct bna_tx
*tx
)
3327 tx
->flags
|= BNA_TX_F_ENET_STARTED
;
3328 if (tx
->flags
& BNA_TX_F_ENABLED
)
3329 bfa_fsm_send_event(tx
, TX_E_START
);
3333 bna_tx_stop(struct bna_tx
*tx
)
3335 tx
->stop_cbfn
= bna_tx_mod_cb_tx_stopped
;
3336 tx
->stop_cbarg
= &tx
->bna
->tx_mod
;
3338 tx
->flags
&= ~BNA_TX_F_ENET_STARTED
;
3339 bfa_fsm_send_event(tx
, TX_E_STOP
);
3343 bna_tx_fail(struct bna_tx
*tx
)
3345 tx
->flags
&= ~BNA_TX_F_ENET_STARTED
;
3346 bfa_fsm_send_event(tx
, TX_E_FAIL
);
3350 bna_bfi_tx_enet_start_rsp(struct bna_tx
*tx
, struct bfi_msgq_mhdr
*msghdr
)
3352 struct bfi_enet_tx_cfg_rsp
*cfg_rsp
= &tx
->bfi_enet_cmd
.cfg_rsp
;
3353 struct bna_txq
*txq
= NULL
;
3354 struct list_head
*qe
;
3357 bfa_msgq_rsp_copy(&tx
->bna
->msgq
, (u8
*)cfg_rsp
,
3358 sizeof(struct bfi_enet_tx_cfg_rsp
));
3360 tx
->hw_id
= cfg_rsp
->hw_id
;
3362 for (i
= 0, qe
= bfa_q_first(&tx
->txq_q
);
3363 i
< tx
->num_txq
; i
++, qe
= bfa_q_next(qe
)) {
3364 txq
= (struct bna_txq
*)qe
;
3366 /* Setup doorbells */
3367 txq
->tcb
->i_dbell
->doorbell_addr
=
3368 tx
->bna
->pcidev
.pci_bar_kva
3369 + ntohl(cfg_rsp
->q_handles
[i
].i_dbell
);
3371 tx
->bna
->pcidev
.pci_bar_kva
3372 + ntohl(cfg_rsp
->q_handles
[i
].q_dbell
);
3373 txq
->hw_id
= cfg_rsp
->q_handles
[i
].hw_qid
;
3375 /* Initialize producer/consumer indexes */
3376 (*txq
->tcb
->hw_consumer_index
) = 0;
3377 txq
->tcb
->producer_index
= txq
->tcb
->consumer_index
= 0;
3380 bfa_fsm_send_event(tx
, TX_E_STARTED
);
3384 bna_bfi_tx_enet_stop_rsp(struct bna_tx
*tx
, struct bfi_msgq_mhdr
*msghdr
)
3386 bfa_fsm_send_event(tx
, TX_E_STOPPED
);
3390 bna_bfi_bw_update_aen(struct bna_tx_mod
*tx_mod
)
3393 struct list_head
*qe
;
3395 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3396 tx
= (struct bna_tx
*)qe
;
3397 bfa_fsm_send_event(tx
, TX_E_BW_UPDATE
);
3402 bna_tx_res_req(int num_txq
, int txq_depth
, struct bna_res_info
*res_info
)
3406 struct bna_mem_info
*mem_info
;
3408 res_info
[BNA_TX_RES_MEM_T_TCB
].res_type
= BNA_RES_T_MEM
;
3409 mem_info
= &res_info
[BNA_TX_RES_MEM_T_TCB
].res_u
.mem_info
;
3410 mem_info
->mem_type
= BNA_MEM_T_KVA
;
3411 mem_info
->len
= sizeof(struct bna_tcb
);
3412 mem_info
->num
= num_txq
;
3414 q_size
= txq_depth
* BFI_TXQ_WI_SIZE
;
3415 q_size
= ALIGN(q_size
, PAGE_SIZE
);
3416 page_count
= q_size
>> PAGE_SHIFT
;
3418 res_info
[BNA_TX_RES_MEM_T_QPT
].res_type
= BNA_RES_T_MEM
;
3419 mem_info
= &res_info
[BNA_TX_RES_MEM_T_QPT
].res_u
.mem_info
;
3420 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3421 mem_info
->len
= page_count
* sizeof(struct bna_dma_addr
);
3422 mem_info
->num
= num_txq
;
3424 res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_type
= BNA_RES_T_MEM
;
3425 mem_info
= &res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_u
.mem_info
;
3426 mem_info
->mem_type
= BNA_MEM_T_KVA
;
3427 mem_info
->len
= page_count
* sizeof(void *);
3428 mem_info
->num
= num_txq
;
3430 res_info
[BNA_TX_RES_MEM_T_PAGE
].res_type
= BNA_RES_T_MEM
;
3431 mem_info
= &res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
;
3432 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3433 mem_info
->len
= PAGE_SIZE
;
3434 mem_info
->num
= num_txq
* page_count
;
3436 res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_type
= BNA_RES_T_MEM
;
3437 mem_info
= &res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_u
.mem_info
;
3438 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3439 mem_info
->len
= BFI_IBIDX_SIZE
;
3440 mem_info
->num
= num_txq
;
3442 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_type
= BNA_RES_T_INTR
;
3443 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
.intr_type
=
3445 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
.num
= num_txq
;
3449 bna_tx_create(struct bna
*bna
, struct bnad
*bnad
,
3450 struct bna_tx_config
*tx_cfg
,
3451 const struct bna_tx_event_cbfn
*tx_cbfn
,
3452 struct bna_res_info
*res_info
, void *priv
)
3454 struct bna_intr_info
*intr_info
;
3455 struct bna_tx_mod
*tx_mod
= &bna
->tx_mod
;
3457 struct bna_txq
*txq
;
3458 struct list_head
*qe
;
3464 intr_info
= &res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
;
3465 page_count
= (res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
.num
) /
3467 page_size
= res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
.len
;
3473 if ((intr_info
->num
!= 1) && (intr_info
->num
!= tx_cfg
->num_txq
))
3478 tx
= bna_tx_get(tx_mod
, tx_cfg
->tx_type
);
3486 INIT_LIST_HEAD(&tx
->txq_q
);
3487 for (i
= 0; i
< tx_cfg
->num_txq
; i
++) {
3488 if (list_empty(&tx_mod
->txq_free_q
))
3491 bfa_q_deq(&tx_mod
->txq_free_q
, &txq
);
3492 bfa_q_qe_init(&txq
->qe
);
3493 list_add_tail(&txq
->qe
, &tx
->txq_q
);
3503 tx
->tcb_setup_cbfn
= tx_cbfn
->tcb_setup_cbfn
;
3504 tx
->tcb_destroy_cbfn
= tx_cbfn
->tcb_destroy_cbfn
;
3505 /* Following callbacks are mandatory */
3506 tx
->tx_stall_cbfn
= tx_cbfn
->tx_stall_cbfn
;
3507 tx
->tx_resume_cbfn
= tx_cbfn
->tx_resume_cbfn
;
3508 tx
->tx_cleanup_cbfn
= tx_cbfn
->tx_cleanup_cbfn
;
3510 list_add_tail(&tx
->qe
, &tx_mod
->tx_active_q
);
3512 tx
->num_txq
= tx_cfg
->num_txq
;
3515 if (tx
->bna
->tx_mod
.flags
& BNA_TX_MOD_F_ENET_STARTED
) {
3517 case BNA_TX_T_REGULAR
:
3518 if (!(tx
->bna
->tx_mod
.flags
&
3519 BNA_TX_MOD_F_ENET_LOOPBACK
))
3520 tx
->flags
|= BNA_TX_F_ENET_STARTED
;
3522 case BNA_TX_T_LOOPBACK
:
3523 if (tx
->bna
->tx_mod
.flags
& BNA_TX_MOD_F_ENET_LOOPBACK
)
3524 tx
->flags
|= BNA_TX_F_ENET_STARTED
;
3533 list_for_each(qe
, &tx
->txq_q
) {
3534 txq
= (struct bna_txq
*)qe
;
3535 txq
->tcb
= (struct bna_tcb
*)
3536 res_info
[BNA_TX_RES_MEM_T_TCB
].res_u
.mem_info
.mdl
[i
].kva
;
3537 txq
->tx_packets
= 0;
3541 txq
->ib
.ib_seg_host_addr
.lsb
=
3542 res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.lsb
;
3543 txq
->ib
.ib_seg_host_addr
.msb
=
3544 res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.msb
;
3545 txq
->ib
.ib_seg_host_addr_kva
=
3546 res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].kva
;
3547 txq
->ib
.intr_type
= intr_info
->intr_type
;
3548 txq
->ib
.intr_vector
= (intr_info
->num
== 1) ?
3549 intr_info
->idl
[0].vector
:
3550 intr_info
->idl
[i
].vector
;
3551 if (intr_info
->intr_type
== BNA_INTR_T_INTX
)
3552 txq
->ib
.intr_vector
= (1 << txq
->ib
.intr_vector
);
3553 txq
->ib
.coalescing_timeo
= tx_cfg
->coalescing_timeo
;
3554 txq
->ib
.interpkt_timeo
= 0; /* Not used */
3555 txq
->ib
.interpkt_count
= BFI_TX_INTERPKT_COUNT
;
3559 txq
->tcb
->q_depth
= tx_cfg
->txq_depth
;
3560 txq
->tcb
->unmap_q
= (void *)
3561 res_info
[BNA_TX_RES_MEM_T_UNMAPQ
].res_u
.mem_info
.mdl
[i
].kva
;
3562 txq
->tcb
->hw_consumer_index
=
3563 (u32
*)txq
->ib
.ib_seg_host_addr_kva
;
3564 txq
->tcb
->i_dbell
= &txq
->ib
.door_bell
;
3565 txq
->tcb
->intr_type
= txq
->ib
.intr_type
;
3566 txq
->tcb
->intr_vector
= txq
->ib
.intr_vector
;
3567 txq
->tcb
->txq
= txq
;
3568 txq
->tcb
->bnad
= bnad
;
3571 /* QPT, SWQPT, Pages */
3572 bna_txq_qpt_setup(txq
, page_count
, page_size
,
3573 &res_info
[BNA_TX_RES_MEM_T_QPT
].res_u
.mem_info
.mdl
[i
],
3574 &res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_u
.mem_info
.mdl
[i
],
3575 &res_info
[BNA_TX_RES_MEM_T_PAGE
].
3576 res_u
.mem_info
.mdl
[page_idx
]);
3577 txq
->tcb
->page_idx
= page_idx
;
3578 txq
->tcb
->page_count
= page_count
;
3579 page_idx
+= page_count
;
3581 /* Callback to bnad for setting up TCB */
3582 if (tx
->tcb_setup_cbfn
)
3583 (tx
->tcb_setup_cbfn
)(bna
->bnad
, txq
->tcb
);
3585 if (tx_cfg
->num_txq
== BFI_TX_MAX_PRIO
)
3586 txq
->priority
= txq
->tcb
->id
;
3588 txq
->priority
= tx_mod
->default_prio
;
3593 tx
->txf_vlan_id
= 0;
3595 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3597 tx_mod
->rid_mask
|= (1 << tx
->rid
);
3607 bna_tx_destroy(struct bna_tx
*tx
)
3609 struct bna_txq
*txq
;
3610 struct list_head
*qe
;
3612 list_for_each(qe
, &tx
->txq_q
) {
3613 txq
= (struct bna_txq
*)qe
;
3614 if (tx
->tcb_destroy_cbfn
)
3615 (tx
->tcb_destroy_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3618 tx
->bna
->tx_mod
.rid_mask
&= ~(1 << tx
->rid
);
3623 bna_tx_enable(struct bna_tx
*tx
)
3625 if (tx
->fsm
!= (bfa_sm_t
)bna_tx_sm_stopped
)
3628 tx
->flags
|= BNA_TX_F_ENABLED
;
3630 if (tx
->flags
& BNA_TX_F_ENET_STARTED
)
3631 bfa_fsm_send_event(tx
, TX_E_START
);
3635 bna_tx_disable(struct bna_tx
*tx
, enum bna_cleanup_type type
,
3636 void (*cbfn
)(void *, struct bna_tx
*))
3638 if (type
== BNA_SOFT_CLEANUP
) {
3639 (*cbfn
)(tx
->bna
->bnad
, tx
);
3643 tx
->stop_cbfn
= cbfn
;
3644 tx
->stop_cbarg
= tx
->bna
->bnad
;
3646 tx
->flags
&= ~BNA_TX_F_ENABLED
;
3648 bfa_fsm_send_event(tx
, TX_E_STOP
);
3652 bna_tx_cleanup_complete(struct bna_tx
*tx
)
3654 bfa_fsm_send_event(tx
, TX_E_CLEANUP_DONE
);
3658 bna_tx_mod_cb_tx_stopped(void *arg
, struct bna_tx
*tx
)
3660 struct bna_tx_mod
*tx_mod
= (struct bna_tx_mod
*)arg
;
3662 bfa_wc_down(&tx_mod
->tx_stop_wc
);
3666 bna_tx_mod_cb_tx_stopped_all(void *arg
)
3668 struct bna_tx_mod
*tx_mod
= (struct bna_tx_mod
*)arg
;
3670 if (tx_mod
->stop_cbfn
)
3671 tx_mod
->stop_cbfn(&tx_mod
->bna
->enet
);
3672 tx_mod
->stop_cbfn
= NULL
;
3676 bna_tx_mod_init(struct bna_tx_mod
*tx_mod
, struct bna
*bna
,
3677 struct bna_res_info
*res_info
)
3684 tx_mod
->tx
= (struct bna_tx
*)
3685 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
3686 tx_mod
->txq
= (struct bna_txq
*)
3687 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
3689 INIT_LIST_HEAD(&tx_mod
->tx_free_q
);
3690 INIT_LIST_HEAD(&tx_mod
->tx_active_q
);
3692 INIT_LIST_HEAD(&tx_mod
->txq_free_q
);
3694 for (i
= 0; i
< bna
->ioceth
.attr
.num_txq
; i
++) {
3695 tx_mod
->tx
[i
].rid
= i
;
3696 bfa_q_qe_init(&tx_mod
->tx
[i
].qe
);
3697 list_add_tail(&tx_mod
->tx
[i
].qe
, &tx_mod
->tx_free_q
);
3698 bfa_q_qe_init(&tx_mod
->txq
[i
].qe
);
3699 list_add_tail(&tx_mod
->txq
[i
].qe
, &tx_mod
->txq_free_q
);
3702 tx_mod
->prio_map
= BFI_TX_PRIO_MAP_ALL
;
3703 tx_mod
->default_prio
= 0;
3704 tx_mod
->iscsi_over_cee
= BNA_STATUS_T_DISABLED
;
3705 tx_mod
->iscsi_prio
= -1;
3709 bna_tx_mod_uninit(struct bna_tx_mod
*tx_mod
)
3711 struct list_head
*qe
;
3715 list_for_each(qe
, &tx_mod
->tx_free_q
)
3719 list_for_each(qe
, &tx_mod
->txq_free_q
)
3726 bna_tx_mod_start(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
3729 struct list_head
*qe
;
3731 tx_mod
->flags
|= BNA_TX_MOD_F_ENET_STARTED
;
3732 if (type
== BNA_TX_T_LOOPBACK
)
3733 tx_mod
->flags
|= BNA_TX_MOD_F_ENET_LOOPBACK
;
3735 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3736 tx
= (struct bna_tx
*)qe
;
3737 if (tx
->type
== type
)
3743 bna_tx_mod_stop(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
3746 struct list_head
*qe
;
3748 tx_mod
->flags
&= ~BNA_TX_MOD_F_ENET_STARTED
;
3749 tx_mod
->flags
&= ~BNA_TX_MOD_F_ENET_LOOPBACK
;
3751 tx_mod
->stop_cbfn
= bna_enet_cb_tx_stopped
;
3753 bfa_wc_init(&tx_mod
->tx_stop_wc
, bna_tx_mod_cb_tx_stopped_all
, tx_mod
);
3755 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3756 tx
= (struct bna_tx
*)qe
;
3757 if (tx
->type
== type
) {
3758 bfa_wc_up(&tx_mod
->tx_stop_wc
);
3763 bfa_wc_wait(&tx_mod
->tx_stop_wc
);
3767 bna_tx_mod_fail(struct bna_tx_mod
*tx_mod
)
3770 struct list_head
*qe
;
3772 tx_mod
->flags
&= ~BNA_TX_MOD_F_ENET_STARTED
;
3773 tx_mod
->flags
&= ~BNA_TX_MOD_F_ENET_LOOPBACK
;
3775 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3776 tx
= (struct bna_tx
*)qe
;
3782 bna_tx_coalescing_timeo_set(struct bna_tx
*tx
, int coalescing_timeo
)
3784 struct bna_txq
*txq
;
3785 struct list_head
*qe
;
3787 list_for_each(qe
, &tx
->txq_q
) {
3788 txq
= (struct bna_txq
*)qe
;
3789 bna_ib_coalescing_timeo_set(&txq
->ib
, coalescing_timeo
);