2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
23 bna_ib_coalescing_timeo_set(struct bna_ib
*ib
, u8 coalescing_timeo
)
25 ib
->coalescing_timeo
= coalescing_timeo
;
26 ib
->door_bell
.doorbell_ack
= BNA_DOORBELL_IB_INT_ACK(
27 (u32
)ib
->coalescing_timeo
, 0);
32 #define bna_rxf_vlan_cfg_soft_reset(rxf) \
34 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
35 (rxf)->vlan_strip_pending = true; \
38 #define bna_rxf_rss_cfg_soft_reset(rxf) \
40 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
41 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
42 BNA_RSS_F_CFG_PENDING | \
43 BNA_RSS_F_STATUS_PENDING); \
46 static int bna_rxf_cfg_apply(struct bna_rxf
*rxf
);
47 static void bna_rxf_cfg_reset(struct bna_rxf
*rxf
);
48 static int bna_rxf_fltr_clear(struct bna_rxf
*rxf
);
49 static int bna_rxf_ucast_cfg_apply(struct bna_rxf
*rxf
);
50 static int bna_rxf_promisc_cfg_apply(struct bna_rxf
*rxf
);
51 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf
*rxf
);
52 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf
*rxf
);
53 static int bna_rxf_ucast_cfg_reset(struct bna_rxf
*rxf
,
54 enum bna_cleanup_type cleanup
);
55 static int bna_rxf_promisc_cfg_reset(struct bna_rxf
*rxf
,
56 enum bna_cleanup_type cleanup
);
57 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf
*rxf
,
58 enum bna_cleanup_type cleanup
);
60 bfa_fsm_state_decl(bna_rxf
, stopped
, struct bna_rxf
,
62 bfa_fsm_state_decl(bna_rxf
, paused
, struct bna_rxf
,
64 bfa_fsm_state_decl(bna_rxf
, cfg_wait
, struct bna_rxf
,
66 bfa_fsm_state_decl(bna_rxf
, started
, struct bna_rxf
,
68 bfa_fsm_state_decl(bna_rxf
, fltr_clr_wait
, struct bna_rxf
,
70 bfa_fsm_state_decl(bna_rxf
, last_resp_wait
, struct bna_rxf
,
74 bna_rxf_sm_stopped_entry(struct bna_rxf
*rxf
)
76 call_rxf_stop_cbfn(rxf
);
80 bna_rxf_sm_stopped(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
84 if (rxf
->flags
& BNA_RXF_F_PAUSED
) {
85 bfa_fsm_set_state(rxf
, bna_rxf_sm_paused
);
86 call_rxf_start_cbfn(rxf
);
88 bfa_fsm_set_state(rxf
, bna_rxf_sm_cfg_wait
);
92 call_rxf_stop_cbfn(rxf
);
100 call_rxf_cam_fltr_cbfn(rxf
);
104 rxf
->flags
|= BNA_RXF_F_PAUSED
;
105 call_rxf_pause_cbfn(rxf
);
109 rxf
->flags
&= ~BNA_RXF_F_PAUSED
;
110 call_rxf_resume_cbfn(rxf
);
119 bna_rxf_sm_paused_entry(struct bna_rxf
*rxf
)
121 call_rxf_pause_cbfn(rxf
);
125 bna_rxf_sm_paused(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
130 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
134 call_rxf_cam_fltr_cbfn(rxf
);
138 rxf
->flags
&= ~BNA_RXF_F_PAUSED
;
139 bfa_fsm_set_state(rxf
, bna_rxf_sm_cfg_wait
);
148 bna_rxf_sm_cfg_wait_entry(struct bna_rxf
*rxf
)
150 if (!bna_rxf_cfg_apply(rxf
)) {
151 /* No more pending config updates */
152 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
157 bna_rxf_sm_cfg_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
161 bfa_fsm_set_state(rxf
, bna_rxf_sm_last_resp_wait
);
165 bna_rxf_cfg_reset(rxf
);
166 call_rxf_start_cbfn(rxf
);
167 call_rxf_cam_fltr_cbfn(rxf
);
168 call_rxf_resume_cbfn(rxf
);
169 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
177 rxf
->flags
|= BNA_RXF_F_PAUSED
;
178 call_rxf_start_cbfn(rxf
);
179 bfa_fsm_set_state(rxf
, bna_rxf_sm_fltr_clr_wait
);
183 if (!bna_rxf_cfg_apply(rxf
)) {
184 /* No more pending config updates */
185 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
195 bna_rxf_sm_started_entry(struct bna_rxf
*rxf
)
197 call_rxf_start_cbfn(rxf
);
198 call_rxf_cam_fltr_cbfn(rxf
);
199 call_rxf_resume_cbfn(rxf
);
203 bna_rxf_sm_started(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
208 bna_rxf_cfg_reset(rxf
);
209 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
213 bfa_fsm_set_state(rxf
, bna_rxf_sm_cfg_wait
);
217 rxf
->flags
|= BNA_RXF_F_PAUSED
;
218 if (!bna_rxf_fltr_clear(rxf
))
219 bfa_fsm_set_state(rxf
, bna_rxf_sm_paused
);
221 bfa_fsm_set_state(rxf
, bna_rxf_sm_fltr_clr_wait
);
230 bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf
*rxf
)
235 bna_rxf_sm_fltr_clr_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
239 bna_rxf_cfg_reset(rxf
);
240 call_rxf_pause_cbfn(rxf
);
241 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
245 if (!bna_rxf_fltr_clear(rxf
)) {
246 /* No more pending CAM entries to clear */
247 bfa_fsm_set_state(rxf
, bna_rxf_sm_paused
);
257 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf
*rxf
)
262 bna_rxf_sm_last_resp_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
267 bna_rxf_cfg_reset(rxf
);
268 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
277 bna_bfi_ucast_req(struct bna_rxf
*rxf
, struct bna_mac
*mac
,
278 enum bfi_enet_h2i_msgs req_type
)
280 struct bfi_enet_ucast_req
*req
= &rxf
->bfi_enet_cmd
.ucast_req
;
282 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
, req_type
, 0, rxf
->rx
->rid
);
283 req
->mh
.num_entries
= htons(
284 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req
)));
285 memcpy(&req
->mac_addr
, &mac
->addr
, sizeof(mac_t
));
286 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
287 sizeof(struct bfi_enet_ucast_req
), &req
->mh
);
288 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
292 bna_bfi_mcast_add_req(struct bna_rxf
*rxf
, struct bna_mac
*mac
)
294 struct bfi_enet_mcast_add_req
*req
=
295 &rxf
->bfi_enet_cmd
.mcast_add_req
;
297 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
, BFI_ENET_H2I_MAC_MCAST_ADD_REQ
,
299 req
->mh
.num_entries
= htons(
300 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req
)));
301 memcpy(&req
->mac_addr
, &mac
->addr
, sizeof(mac_t
));
302 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
303 sizeof(struct bfi_enet_mcast_add_req
), &req
->mh
);
304 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
308 bna_bfi_mcast_del_req(struct bna_rxf
*rxf
, u16 handle
)
310 struct bfi_enet_mcast_del_req
*req
=
311 &rxf
->bfi_enet_cmd
.mcast_del_req
;
313 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
, BFI_ENET_H2I_MAC_MCAST_DEL_REQ
,
315 req
->mh
.num_entries
= htons(
316 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req
)));
317 req
->handle
= htons(handle
);
318 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
319 sizeof(struct bfi_enet_mcast_del_req
), &req
->mh
);
320 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
324 bna_bfi_mcast_filter_req(struct bna_rxf
*rxf
, enum bna_status status
)
326 struct bfi_enet_enable_req
*req
= &rxf
->bfi_enet_cmd
.req
;
328 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
329 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ
, 0, rxf
->rx
->rid
);
330 req
->mh
.num_entries
= htons(
331 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
332 req
->enable
= status
;
333 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
334 sizeof(struct bfi_enet_enable_req
), &req
->mh
);
335 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
339 bna_bfi_rx_promisc_req(struct bna_rxf
*rxf
, enum bna_status status
)
341 struct bfi_enet_enable_req
*req
= &rxf
->bfi_enet_cmd
.req
;
343 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
344 BFI_ENET_H2I_RX_PROMISCUOUS_REQ
, 0, rxf
->rx
->rid
);
345 req
->mh
.num_entries
= htons(
346 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
347 req
->enable
= status
;
348 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
349 sizeof(struct bfi_enet_enable_req
), &req
->mh
);
350 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
354 bna_bfi_rx_vlan_filter_set(struct bna_rxf
*rxf
, u8 block_idx
)
356 struct bfi_enet_rx_vlan_req
*req
= &rxf
->bfi_enet_cmd
.vlan_req
;
360 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
361 BFI_ENET_H2I_RX_VLAN_SET_REQ
, 0, rxf
->rx
->rid
);
362 req
->mh
.num_entries
= htons(
363 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req
)));
364 req
->block_idx
= block_idx
;
365 for (i
= 0; i
< (BFI_ENET_VLAN_BLOCK_SIZE
/ 32); i
++) {
366 j
= (block_idx
* (BFI_ENET_VLAN_BLOCK_SIZE
/ 32)) + i
;
367 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
)
369 htonl(rxf
->vlan_filter_table
[j
]);
371 req
->bit_mask
[i
] = 0xFFFFFFFF;
373 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
374 sizeof(struct bfi_enet_rx_vlan_req
), &req
->mh
);
375 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
379 bna_bfi_vlan_strip_enable(struct bna_rxf
*rxf
)
381 struct bfi_enet_enable_req
*req
= &rxf
->bfi_enet_cmd
.req
;
383 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
384 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ
, 0, rxf
->rx
->rid
);
385 req
->mh
.num_entries
= htons(
386 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
387 req
->enable
= rxf
->vlan_strip_status
;
388 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
389 sizeof(struct bfi_enet_enable_req
), &req
->mh
);
390 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
394 bna_bfi_rit_cfg(struct bna_rxf
*rxf
)
396 struct bfi_enet_rit_req
*req
= &rxf
->bfi_enet_cmd
.rit_req
;
398 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
399 BFI_ENET_H2I_RIT_CFG_REQ
, 0, rxf
->rx
->rid
);
400 req
->mh
.num_entries
= htons(
401 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req
)));
402 req
->size
= htons(rxf
->rit_size
);
403 memcpy(&req
->table
[0], rxf
->rit
, rxf
->rit_size
);
404 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
405 sizeof(struct bfi_enet_rit_req
), &req
->mh
);
406 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
410 bna_bfi_rss_cfg(struct bna_rxf
*rxf
)
412 struct bfi_enet_rss_cfg_req
*req
= &rxf
->bfi_enet_cmd
.rss_req
;
415 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
416 BFI_ENET_H2I_RSS_CFG_REQ
, 0, rxf
->rx
->rid
);
417 req
->mh
.num_entries
= htons(
418 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req
)));
419 req
->cfg
.type
= rxf
->rss_cfg
.hash_type
;
420 req
->cfg
.mask
= rxf
->rss_cfg
.hash_mask
;
421 for (i
= 0; i
< BFI_ENET_RSS_KEY_LEN
; i
++)
423 htonl(rxf
->rss_cfg
.toeplitz_hash_key
[i
]);
424 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
425 sizeof(struct bfi_enet_rss_cfg_req
), &req
->mh
);
426 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
430 bna_bfi_rss_enable(struct bna_rxf
*rxf
)
432 struct bfi_enet_enable_req
*req
= &rxf
->bfi_enet_cmd
.req
;
434 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
435 BFI_ENET_H2I_RSS_ENABLE_REQ
, 0, rxf
->rx
->rid
);
436 req
->mh
.num_entries
= htons(
437 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req
)));
438 req
->enable
= rxf
->rss_status
;
439 bfa_msgq_cmd_set(&rxf
->msgq_cmd
, NULL
, NULL
,
440 sizeof(struct bfi_enet_enable_req
), &req
->mh
);
441 bfa_msgq_cmd_post(&rxf
->rx
->bna
->msgq
, &rxf
->msgq_cmd
);
444 /* This function gets the multicast MAC that has already been added to CAM */
445 static struct bna_mac
*
446 bna_rxf_mcmac_get(struct bna_rxf
*rxf
, u8
*mac_addr
)
449 struct list_head
*qe
;
451 list_for_each(qe
, &rxf
->mcast_active_q
) {
452 mac
= (struct bna_mac
*)qe
;
453 if (BNA_MAC_IS_EQUAL(&mac
->addr
, mac_addr
))
457 list_for_each(qe
, &rxf
->mcast_pending_del_q
) {
458 mac
= (struct bna_mac
*)qe
;
459 if (BNA_MAC_IS_EQUAL(&mac
->addr
, mac_addr
))
466 static struct bna_mcam_handle
*
467 bna_rxf_mchandle_get(struct bna_rxf
*rxf
, int handle
)
469 struct bna_mcam_handle
*mchandle
;
470 struct list_head
*qe
;
472 list_for_each(qe
, &rxf
->mcast_handle_q
) {
473 mchandle
= (struct bna_mcam_handle
*)qe
;
474 if (mchandle
->handle
== handle
)
482 bna_rxf_mchandle_attach(struct bna_rxf
*rxf
, u8
*mac_addr
, int handle
)
484 struct bna_mac
*mcmac
;
485 struct bna_mcam_handle
*mchandle
;
487 mcmac
= bna_rxf_mcmac_get(rxf
, mac_addr
);
488 mchandle
= bna_rxf_mchandle_get(rxf
, handle
);
489 if (mchandle
== NULL
) {
490 mchandle
= bna_mcam_mod_handle_get(&rxf
->rx
->bna
->mcam_mod
);
491 mchandle
->handle
= handle
;
492 mchandle
->refcnt
= 0;
493 list_add_tail(&mchandle
->qe
, &rxf
->mcast_handle_q
);
496 mcmac
->handle
= mchandle
;
500 bna_rxf_mcast_del(struct bna_rxf
*rxf
, struct bna_mac
*mac
,
501 enum bna_cleanup_type cleanup
)
503 struct bna_mcam_handle
*mchandle
;
506 mchandle
= mac
->handle
;
507 if (mchandle
== NULL
)
511 if (mchandle
->refcnt
== 0) {
512 if (cleanup
== BNA_HARD_CLEANUP
) {
513 bna_bfi_mcast_del_req(rxf
, mchandle
->handle
);
516 list_del(&mchandle
->qe
);
517 bfa_q_qe_init(&mchandle
->qe
);
518 bna_mcam_mod_handle_put(&rxf
->rx
->bna
->mcam_mod
, mchandle
);
526 bna_rxf_mcast_cfg_apply(struct bna_rxf
*rxf
)
528 struct bna_mac
*mac
= NULL
;
529 struct list_head
*qe
;
532 /* First delete multicast entries to maintain the count */
533 while (!list_empty(&rxf
->mcast_pending_del_q
)) {
534 bfa_q_deq(&rxf
->mcast_pending_del_q
, &qe
);
536 mac
= (struct bna_mac
*)qe
;
537 ret
= bna_rxf_mcast_del(rxf
, mac
, BNA_HARD_CLEANUP
);
538 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf
->rx
->bna
), mac
);
543 /* Add multicast entries */
544 if (!list_empty(&rxf
->mcast_pending_add_q
)) {
545 bfa_q_deq(&rxf
->mcast_pending_add_q
, &qe
);
547 mac
= (struct bna_mac
*)qe
;
548 list_add_tail(&mac
->qe
, &rxf
->mcast_active_q
);
549 bna_bfi_mcast_add_req(rxf
, mac
);
557 bna_rxf_vlan_cfg_apply(struct bna_rxf
*rxf
)
559 u8 vlan_pending_bitmask
;
562 if (rxf
->vlan_pending_bitmask
) {
563 vlan_pending_bitmask
= rxf
->vlan_pending_bitmask
;
564 while (!(vlan_pending_bitmask
& 0x1)) {
566 vlan_pending_bitmask
>>= 1;
568 rxf
->vlan_pending_bitmask
&= ~(1 << block_idx
);
569 bna_bfi_rx_vlan_filter_set(rxf
, block_idx
);
577 bna_rxf_mcast_cfg_reset(struct bna_rxf
*rxf
, enum bna_cleanup_type cleanup
)
579 struct list_head
*qe
;
583 /* Throw away delete pending mcast entries */
584 while (!list_empty(&rxf
->mcast_pending_del_q
)) {
585 bfa_q_deq(&rxf
->mcast_pending_del_q
, &qe
);
587 mac
= (struct bna_mac
*)qe
;
588 ret
= bna_rxf_mcast_del(rxf
, mac
, cleanup
);
589 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf
->rx
->bna
), mac
);
594 /* Move active mcast entries to pending_add_q */
595 while (!list_empty(&rxf
->mcast_active_q
)) {
596 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
598 list_add_tail(qe
, &rxf
->mcast_pending_add_q
);
599 mac
= (struct bna_mac
*)qe
;
600 if (bna_rxf_mcast_del(rxf
, mac
, cleanup
))
608 bna_rxf_rss_cfg_apply(struct bna_rxf
*rxf
)
610 if (rxf
->rss_pending
) {
611 if (rxf
->rss_pending
& BNA_RSS_F_RIT_PENDING
) {
612 rxf
->rss_pending
&= ~BNA_RSS_F_RIT_PENDING
;
613 bna_bfi_rit_cfg(rxf
);
617 if (rxf
->rss_pending
& BNA_RSS_F_CFG_PENDING
) {
618 rxf
->rss_pending
&= ~BNA_RSS_F_CFG_PENDING
;
619 bna_bfi_rss_cfg(rxf
);
623 if (rxf
->rss_pending
& BNA_RSS_F_STATUS_PENDING
) {
624 rxf
->rss_pending
&= ~BNA_RSS_F_STATUS_PENDING
;
625 bna_bfi_rss_enable(rxf
);
634 bna_rxf_cfg_apply(struct bna_rxf
*rxf
)
636 if (bna_rxf_ucast_cfg_apply(rxf
))
639 if (bna_rxf_mcast_cfg_apply(rxf
))
642 if (bna_rxf_promisc_cfg_apply(rxf
))
645 if (bna_rxf_allmulti_cfg_apply(rxf
))
648 if (bna_rxf_vlan_cfg_apply(rxf
))
651 if (bna_rxf_vlan_strip_cfg_apply(rxf
))
654 if (bna_rxf_rss_cfg_apply(rxf
))
660 /* Only software reset */
662 bna_rxf_fltr_clear(struct bna_rxf
*rxf
)
664 if (bna_rxf_ucast_cfg_reset(rxf
, BNA_HARD_CLEANUP
))
667 if (bna_rxf_mcast_cfg_reset(rxf
, BNA_HARD_CLEANUP
))
670 if (bna_rxf_promisc_cfg_reset(rxf
, BNA_HARD_CLEANUP
))
673 if (bna_rxf_allmulti_cfg_reset(rxf
, BNA_HARD_CLEANUP
))
680 bna_rxf_cfg_reset(struct bna_rxf
*rxf
)
682 bna_rxf_ucast_cfg_reset(rxf
, BNA_SOFT_CLEANUP
);
683 bna_rxf_mcast_cfg_reset(rxf
, BNA_SOFT_CLEANUP
);
684 bna_rxf_promisc_cfg_reset(rxf
, BNA_SOFT_CLEANUP
);
685 bna_rxf_allmulti_cfg_reset(rxf
, BNA_SOFT_CLEANUP
);
686 bna_rxf_vlan_cfg_soft_reset(rxf
);
687 bna_rxf_rss_cfg_soft_reset(rxf
);
691 bna_rit_init(struct bna_rxf
*rxf
, int rit_size
)
693 struct bna_rx
*rx
= rxf
->rx
;
695 struct list_head
*qe
;
698 rxf
->rit_size
= rit_size
;
699 list_for_each(qe
, &rx
->rxp_q
) {
700 rxp
= (struct bna_rxp
*)qe
;
701 rxf
->rit
[offset
] = rxp
->cq
.ccb
->id
;
708 bna_bfi_rxf_cfg_rsp(struct bna_rxf
*rxf
, struct bfi_msgq_mhdr
*msghdr
)
710 bfa_fsm_send_event(rxf
, RXF_E_FW_RESP
);
714 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf
*rxf
,
715 struct bfi_msgq_mhdr
*msghdr
)
717 struct bfi_enet_rsp
*rsp
=
718 (struct bfi_enet_rsp
*)msghdr
;
721 /* Clear ucast from cache */
722 rxf
->ucast_active_set
= 0;
725 bfa_fsm_send_event(rxf
, RXF_E_FW_RESP
);
729 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf
*rxf
,
730 struct bfi_msgq_mhdr
*msghdr
)
732 struct bfi_enet_mcast_add_req
*req
=
733 &rxf
->bfi_enet_cmd
.mcast_add_req
;
734 struct bfi_enet_mcast_add_rsp
*rsp
=
735 (struct bfi_enet_mcast_add_rsp
*)msghdr
;
737 bna_rxf_mchandle_attach(rxf
, (u8
*)&req
->mac_addr
,
739 bfa_fsm_send_event(rxf
, RXF_E_FW_RESP
);
743 bna_rxf_init(struct bna_rxf
*rxf
,
745 struct bna_rx_config
*q_config
,
746 struct bna_res_info
*res_info
)
750 INIT_LIST_HEAD(&rxf
->ucast_pending_add_q
);
751 INIT_LIST_HEAD(&rxf
->ucast_pending_del_q
);
752 rxf
->ucast_pending_set
= 0;
753 rxf
->ucast_active_set
= 0;
754 INIT_LIST_HEAD(&rxf
->ucast_active_q
);
755 rxf
->ucast_pending_mac
= NULL
;
757 INIT_LIST_HEAD(&rxf
->mcast_pending_add_q
);
758 INIT_LIST_HEAD(&rxf
->mcast_pending_del_q
);
759 INIT_LIST_HEAD(&rxf
->mcast_active_q
);
760 INIT_LIST_HEAD(&rxf
->mcast_handle_q
);
762 if (q_config
->paused
)
763 rxf
->flags
|= BNA_RXF_F_PAUSED
;
766 res_info
[BNA_RX_RES_MEM_T_RIT
].res_u
.mem_info
.mdl
[0].kva
;
767 bna_rit_init(rxf
, q_config
->num_paths
);
769 rxf
->rss_status
= q_config
->rss_status
;
770 if (rxf
->rss_status
== BNA_STATUS_T_ENABLED
) {
771 rxf
->rss_cfg
= q_config
->rss_config
;
772 rxf
->rss_pending
|= BNA_RSS_F_CFG_PENDING
;
773 rxf
->rss_pending
|= BNA_RSS_F_RIT_PENDING
;
774 rxf
->rss_pending
|= BNA_RSS_F_STATUS_PENDING
;
777 rxf
->vlan_filter_status
= BNA_STATUS_T_DISABLED
;
778 memset(rxf
->vlan_filter_table
, 0,
779 (sizeof(u32
) * (BFI_ENET_VLAN_ID_MAX
/ 32)));
780 rxf
->vlan_filter_table
[0] |= 1; /* for pure priority tagged frames */
781 rxf
->vlan_pending_bitmask
= (u8
)BFI_VLAN_BMASK_ALL
;
783 rxf
->vlan_strip_status
= q_config
->vlan_strip_status
;
785 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
789 bna_rxf_uninit(struct bna_rxf
*rxf
)
793 rxf
->ucast_pending_set
= 0;
794 rxf
->ucast_active_set
= 0;
796 while (!list_empty(&rxf
->ucast_pending_add_q
)) {
797 bfa_q_deq(&rxf
->ucast_pending_add_q
, &mac
);
798 bfa_q_qe_init(&mac
->qe
);
799 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf
->rx
->bna
), mac
);
802 if (rxf
->ucast_pending_mac
) {
803 bfa_q_qe_init(&rxf
->ucast_pending_mac
->qe
);
804 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf
->rx
->bna
),
805 rxf
->ucast_pending_mac
);
806 rxf
->ucast_pending_mac
= NULL
;
809 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
810 bfa_q_deq(&rxf
->mcast_pending_add_q
, &mac
);
811 bfa_q_qe_init(&mac
->qe
);
812 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf
->rx
->bna
), mac
);
815 rxf
->rxmode_pending
= 0;
816 rxf
->rxmode_pending_bitmask
= 0;
817 if (rxf
->rx
->bna
->promisc_rid
== rxf
->rx
->rid
)
818 rxf
->rx
->bna
->promisc_rid
= BFI_INVALID_RID
;
819 if (rxf
->rx
->bna
->default_mode_rid
== rxf
->rx
->rid
)
820 rxf
->rx
->bna
->default_mode_rid
= BFI_INVALID_RID
;
822 rxf
->rss_pending
= 0;
823 rxf
->vlan_strip_pending
= false;
831 bna_rx_cb_rxf_started(struct bna_rx
*rx
)
833 bfa_fsm_send_event(rx
, RX_E_RXF_STARTED
);
837 bna_rxf_start(struct bna_rxf
*rxf
)
839 rxf
->start_cbfn
= bna_rx_cb_rxf_started
;
840 rxf
->start_cbarg
= rxf
->rx
;
841 bfa_fsm_send_event(rxf
, RXF_E_START
);
845 bna_rx_cb_rxf_stopped(struct bna_rx
*rx
)
847 bfa_fsm_send_event(rx
, RX_E_RXF_STOPPED
);
851 bna_rxf_stop(struct bna_rxf
*rxf
)
853 rxf
->stop_cbfn
= bna_rx_cb_rxf_stopped
;
854 rxf
->stop_cbarg
= rxf
->rx
;
855 bfa_fsm_send_event(rxf
, RXF_E_STOP
);
859 bna_rxf_fail(struct bna_rxf
*rxf
)
861 bfa_fsm_send_event(rxf
, RXF_E_FAIL
);
865 bna_rx_ucast_set(struct bna_rx
*rx
, u8
*ucmac
,
866 void (*cbfn
)(struct bnad
*, struct bna_rx
*))
868 struct bna_rxf
*rxf
= &rx
->rxf
;
870 if (rxf
->ucast_pending_mac
== NULL
) {
871 rxf
->ucast_pending_mac
=
872 bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf
->rx
->bna
));
873 if (rxf
->ucast_pending_mac
== NULL
)
874 return BNA_CB_UCAST_CAM_FULL
;
875 bfa_q_qe_init(&rxf
->ucast_pending_mac
->qe
);
878 memcpy(rxf
->ucast_pending_mac
->addr
, ucmac
, ETH_ALEN
);
879 rxf
->ucast_pending_set
= 1;
880 rxf
->cam_fltr_cbfn
= cbfn
;
881 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
883 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
885 return BNA_CB_SUCCESS
;
889 bna_rx_mcast_add(struct bna_rx
*rx
, u8
*addr
,
890 void (*cbfn
)(struct bnad
*, struct bna_rx
*))
892 struct bna_rxf
*rxf
= &rx
->rxf
;
895 /* Check if already added or pending addition */
896 if (bna_mac_find(&rxf
->mcast_active_q
, addr
) ||
897 bna_mac_find(&rxf
->mcast_pending_add_q
, addr
)) {
899 cbfn(rx
->bna
->bnad
, rx
);
900 return BNA_CB_SUCCESS
;
903 mac
= bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf
->rx
->bna
));
905 return BNA_CB_MCAST_LIST_FULL
;
906 bfa_q_qe_init(&mac
->qe
);
907 memcpy(mac
->addr
, addr
, ETH_ALEN
);
908 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
910 rxf
->cam_fltr_cbfn
= cbfn
;
911 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
913 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
915 return BNA_CB_SUCCESS
;
919 bna_rx_ucast_listset(struct bna_rx
*rx
, int count
, u8
*uclist
,
920 void (*cbfn
)(struct bnad
*, struct bna_rx
*))
922 struct bna_ucam_mod
*ucam_mod
= &rx
->bna
->ucam_mod
;
923 struct bna_rxf
*rxf
= &rx
->rxf
;
924 struct list_head list_head
;
925 struct list_head
*qe
;
927 struct bna_mac
*mac
, *del_mac
;
930 /* Purge the pending_add_q */
931 while (!list_empty(&rxf
->ucast_pending_add_q
)) {
932 bfa_q_deq(&rxf
->ucast_pending_add_q
, &qe
);
934 mac
= (struct bna_mac
*)qe
;
935 bna_cam_mod_mac_put(&ucam_mod
->free_q
, mac
);
938 /* Schedule active_q entries for deletion */
939 while (!list_empty(&rxf
->ucast_active_q
)) {
940 bfa_q_deq(&rxf
->ucast_active_q
, &qe
);
941 mac
= (struct bna_mac
*)qe
;
942 bfa_q_qe_init(&mac
->qe
);
944 del_mac
= bna_cam_mod_mac_get(&ucam_mod
->del_q
);
945 memcpy(del_mac
, mac
, sizeof(*del_mac
));
946 list_add_tail(&del_mac
->qe
, &rxf
->ucast_pending_del_q
);
947 bna_cam_mod_mac_put(&ucam_mod
->free_q
, mac
);
951 INIT_LIST_HEAD(&list_head
);
952 for (i
= 0, mcaddr
= uclist
; i
< count
; i
++) {
953 mac
= bna_cam_mod_mac_get(&ucam_mod
->free_q
);
956 bfa_q_qe_init(&mac
->qe
);
957 memcpy(mac
->addr
, mcaddr
, ETH_ALEN
);
958 list_add_tail(&mac
->qe
, &list_head
);
962 /* Add the new entries */
963 while (!list_empty(&list_head
)) {
964 bfa_q_deq(&list_head
, &qe
);
965 mac
= (struct bna_mac
*)qe
;
966 bfa_q_qe_init(&mac
->qe
);
967 list_add_tail(&mac
->qe
, &rxf
->ucast_pending_add_q
);
970 rxf
->cam_fltr_cbfn
= cbfn
;
971 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
972 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
974 return BNA_CB_SUCCESS
;
977 while (!list_empty(&list_head
)) {
978 bfa_q_deq(&list_head
, &qe
);
979 mac
= (struct bna_mac
*)qe
;
980 bfa_q_qe_init(&mac
->qe
);
981 bna_cam_mod_mac_put(&ucam_mod
->free_q
, mac
);
984 return BNA_CB_UCAST_CAM_FULL
;
988 bna_rx_mcast_listset(struct bna_rx
*rx
, int count
, u8
*mclist
,
989 void (*cbfn
)(struct bnad
*, struct bna_rx
*))
991 struct bna_mcam_mod
*mcam_mod
= &rx
->bna
->mcam_mod
;
992 struct bna_rxf
*rxf
= &rx
->rxf
;
993 struct list_head list_head
;
994 struct list_head
*qe
;
996 struct bna_mac
*mac
, *del_mac
;
999 /* Purge the pending_add_q */
1000 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
1001 bfa_q_deq(&rxf
->mcast_pending_add_q
, &qe
);
1003 mac
= (struct bna_mac
*)qe
;
1004 bna_cam_mod_mac_put(&mcam_mod
->free_q
, mac
);
1007 /* Schedule active_q entries for deletion */
1008 while (!list_empty(&rxf
->mcast_active_q
)) {
1009 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
1010 mac
= (struct bna_mac
*)qe
;
1011 bfa_q_qe_init(&mac
->qe
);
1013 del_mac
= bna_cam_mod_mac_get(&mcam_mod
->del_q
);
1015 memcpy(del_mac
, mac
, sizeof(*del_mac
));
1016 list_add_tail(&del_mac
->qe
, &rxf
->mcast_pending_del_q
);
1018 bna_cam_mod_mac_put(&mcam_mod
->free_q
, mac
);
1021 /* Allocate nodes */
1022 INIT_LIST_HEAD(&list_head
);
1023 for (i
= 0, mcaddr
= mclist
; i
< count
; i
++) {
1024 mac
= bna_cam_mod_mac_get(&mcam_mod
->free_q
);
1027 bfa_q_qe_init(&mac
->qe
);
1028 memcpy(mac
->addr
, mcaddr
, ETH_ALEN
);
1029 list_add_tail(&mac
->qe
, &list_head
);
1034 /* Add the new entries */
1035 while (!list_empty(&list_head
)) {
1036 bfa_q_deq(&list_head
, &qe
);
1037 mac
= (struct bna_mac
*)qe
;
1038 bfa_q_qe_init(&mac
->qe
);
1039 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
1042 rxf
->cam_fltr_cbfn
= cbfn
;
1043 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
1044 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
1046 return BNA_CB_SUCCESS
;
1049 while (!list_empty(&list_head
)) {
1050 bfa_q_deq(&list_head
, &qe
);
1051 mac
= (struct bna_mac
*)qe
;
1052 bfa_q_qe_init(&mac
->qe
);
1053 bna_cam_mod_mac_put(&mcam_mod
->free_q
, mac
);
1056 return BNA_CB_MCAST_LIST_FULL
;
1060 bna_rx_mcast_delall(struct bna_rx
*rx
,
1061 void (*cbfn
)(struct bnad
*, struct bna_rx
*))
1063 struct bna_rxf
*rxf
= &rx
->rxf
;
1064 struct list_head
*qe
;
1065 struct bna_mac
*mac
, *del_mac
;
1066 int need_hw_config
= 0;
1068 /* Purge all entries from pending_add_q */
1069 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
1070 bfa_q_deq(&rxf
->mcast_pending_add_q
, &qe
);
1071 mac
= (struct bna_mac
*)qe
;
1072 bfa_q_qe_init(&mac
->qe
);
1073 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf
->rx
->bna
), mac
);
1076 /* Schedule all entries in active_q for deletion */
1077 while (!list_empty(&rxf
->mcast_active_q
)) {
1078 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
1079 mac
= (struct bna_mac
*)qe
;
1080 bfa_q_qe_init(&mac
->qe
);
1082 del_mac
= bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf
->rx
->bna
));
1084 memcpy(del_mac
, mac
, sizeof(*del_mac
));
1085 list_add_tail(&del_mac
->qe
, &rxf
->mcast_pending_del_q
);
1087 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf
->rx
->bna
), mac
);
1091 if (need_hw_config
) {
1092 rxf
->cam_fltr_cbfn
= cbfn
;
1093 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
1094 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
1099 (*cbfn
)(rx
->bna
->bnad
, rx
);
1103 bna_rx_vlan_add(struct bna_rx
*rx
, int vlan_id
)
1105 struct bna_rxf
*rxf
= &rx
->rxf
;
1106 int index
= (vlan_id
>> BFI_VLAN_WORD_SHIFT
);
1107 int bit
= (1 << (vlan_id
& BFI_VLAN_WORD_MASK
));
1108 int group_id
= (vlan_id
>> BFI_VLAN_BLOCK_SHIFT
);
1110 rxf
->vlan_filter_table
[index
] |= bit
;
1111 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
) {
1112 rxf
->vlan_pending_bitmask
|= (1 << group_id
);
1113 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
1118 bna_rx_vlan_del(struct bna_rx
*rx
, int vlan_id
)
1120 struct bna_rxf
*rxf
= &rx
->rxf
;
1121 int index
= (vlan_id
>> BFI_VLAN_WORD_SHIFT
);
1122 int bit
= (1 << (vlan_id
& BFI_VLAN_WORD_MASK
));
1123 int group_id
= (vlan_id
>> BFI_VLAN_BLOCK_SHIFT
);
1125 rxf
->vlan_filter_table
[index
] &= ~bit
;
1126 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
) {
1127 rxf
->vlan_pending_bitmask
|= (1 << group_id
);
1128 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
1133 bna_rxf_ucast_cfg_apply(struct bna_rxf
*rxf
)
1135 struct bna_mac
*mac
= NULL
;
1136 struct list_head
*qe
;
1138 /* Delete MAC addresses previousely added */
1139 if (!list_empty(&rxf
->ucast_pending_del_q
)) {
1140 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
1142 mac
= (struct bna_mac
*)qe
;
1143 bna_bfi_ucast_req(rxf
, mac
, BFI_ENET_H2I_MAC_UCAST_DEL_REQ
);
1144 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf
->rx
->bna
), mac
);
1148 /* Set default unicast MAC */
1149 if (rxf
->ucast_pending_set
) {
1150 rxf
->ucast_pending_set
= 0;
1151 memcpy(rxf
->ucast_active_mac
.addr
,
1152 rxf
->ucast_pending_mac
->addr
, ETH_ALEN
);
1153 rxf
->ucast_active_set
= 1;
1154 bna_bfi_ucast_req(rxf
, &rxf
->ucast_active_mac
,
1155 BFI_ENET_H2I_MAC_UCAST_SET_REQ
);
1159 /* Add additional MAC entries */
1160 if (!list_empty(&rxf
->ucast_pending_add_q
)) {
1161 bfa_q_deq(&rxf
->ucast_pending_add_q
, &qe
);
1163 mac
= (struct bna_mac
*)qe
;
1164 list_add_tail(&mac
->qe
, &rxf
->ucast_active_q
);
1165 bna_bfi_ucast_req(rxf
, mac
, BFI_ENET_H2I_MAC_UCAST_ADD_REQ
);
1173 bna_rxf_ucast_cfg_reset(struct bna_rxf
*rxf
, enum bna_cleanup_type cleanup
)
1175 struct list_head
*qe
;
1176 struct bna_mac
*mac
;
1178 /* Throw away delete pending ucast entries */
1179 while (!list_empty(&rxf
->ucast_pending_del_q
)) {
1180 bfa_q_deq(&rxf
->ucast_pending_del_q
, &qe
);
1182 mac
= (struct bna_mac
*)qe
;
1183 if (cleanup
== BNA_SOFT_CLEANUP
)
1184 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf
->rx
->bna
),
1187 bna_bfi_ucast_req(rxf
, mac
,
1188 BFI_ENET_H2I_MAC_UCAST_DEL_REQ
);
1189 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf
->rx
->bna
),
1195 /* Move active ucast entries to pending_add_q */
1196 while (!list_empty(&rxf
->ucast_active_q
)) {
1197 bfa_q_deq(&rxf
->ucast_active_q
, &qe
);
1199 list_add_tail(qe
, &rxf
->ucast_pending_add_q
);
1200 if (cleanup
== BNA_HARD_CLEANUP
) {
1201 mac
= (struct bna_mac
*)qe
;
1202 bna_bfi_ucast_req(rxf
, mac
,
1203 BFI_ENET_H2I_MAC_UCAST_DEL_REQ
);
1208 if (rxf
->ucast_active_set
) {
1209 rxf
->ucast_pending_set
= 1;
1210 rxf
->ucast_active_set
= 0;
1211 if (cleanup
== BNA_HARD_CLEANUP
) {
1212 bna_bfi_ucast_req(rxf
, &rxf
->ucast_active_mac
,
1213 BFI_ENET_H2I_MAC_UCAST_CLR_REQ
);
1222 bna_rxf_promisc_cfg_apply(struct bna_rxf
*rxf
)
1224 struct bna
*bna
= rxf
->rx
->bna
;
1226 /* Enable/disable promiscuous mode */
1227 if (is_promisc_enable(rxf
->rxmode_pending
,
1228 rxf
->rxmode_pending_bitmask
)) {
1229 /* move promisc configuration from pending -> active */
1230 promisc_inactive(rxf
->rxmode_pending
,
1231 rxf
->rxmode_pending_bitmask
);
1232 rxf
->rxmode_active
|= BNA_RXMODE_PROMISC
;
1233 bna_bfi_rx_promisc_req(rxf
, BNA_STATUS_T_ENABLED
);
1235 } else if (is_promisc_disable(rxf
->rxmode_pending
,
1236 rxf
->rxmode_pending_bitmask
)) {
1237 /* move promisc configuration from pending -> active */
1238 promisc_inactive(rxf
->rxmode_pending
,
1239 rxf
->rxmode_pending_bitmask
);
1240 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
1241 bna
->promisc_rid
= BFI_INVALID_RID
;
1242 bna_bfi_rx_promisc_req(rxf
, BNA_STATUS_T_DISABLED
);
1250 bna_rxf_promisc_cfg_reset(struct bna_rxf
*rxf
, enum bna_cleanup_type cleanup
)
1252 struct bna
*bna
= rxf
->rx
->bna
;
1254 /* Clear pending promisc mode disable */
1255 if (is_promisc_disable(rxf
->rxmode_pending
,
1256 rxf
->rxmode_pending_bitmask
)) {
1257 promisc_inactive(rxf
->rxmode_pending
,
1258 rxf
->rxmode_pending_bitmask
);
1259 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
1260 bna
->promisc_rid
= BFI_INVALID_RID
;
1261 if (cleanup
== BNA_HARD_CLEANUP
) {
1262 bna_bfi_rx_promisc_req(rxf
, BNA_STATUS_T_DISABLED
);
1267 /* Move promisc mode config from active -> pending */
1268 if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
1269 promisc_enable(rxf
->rxmode_pending
,
1270 rxf
->rxmode_pending_bitmask
);
1271 rxf
->rxmode_active
&= ~BNA_RXMODE_PROMISC
;
1272 if (cleanup
== BNA_HARD_CLEANUP
) {
1273 bna_bfi_rx_promisc_req(rxf
, BNA_STATUS_T_DISABLED
);
1282 bna_rxf_allmulti_cfg_apply(struct bna_rxf
*rxf
)
1284 /* Enable/disable allmulti mode */
1285 if (is_allmulti_enable(rxf
->rxmode_pending
,
1286 rxf
->rxmode_pending_bitmask
)) {
1287 /* move allmulti configuration from pending -> active */
1288 allmulti_inactive(rxf
->rxmode_pending
,
1289 rxf
->rxmode_pending_bitmask
);
1290 rxf
->rxmode_active
|= BNA_RXMODE_ALLMULTI
;
1291 bna_bfi_mcast_filter_req(rxf
, BNA_STATUS_T_DISABLED
);
1293 } else if (is_allmulti_disable(rxf
->rxmode_pending
,
1294 rxf
->rxmode_pending_bitmask
)) {
1295 /* move allmulti configuration from pending -> active */
1296 allmulti_inactive(rxf
->rxmode_pending
,
1297 rxf
->rxmode_pending_bitmask
);
1298 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
1299 bna_bfi_mcast_filter_req(rxf
, BNA_STATUS_T_ENABLED
);
1307 bna_rxf_allmulti_cfg_reset(struct bna_rxf
*rxf
, enum bna_cleanup_type cleanup
)
1309 /* Clear pending allmulti mode disable */
1310 if (is_allmulti_disable(rxf
->rxmode_pending
,
1311 rxf
->rxmode_pending_bitmask
)) {
1312 allmulti_inactive(rxf
->rxmode_pending
,
1313 rxf
->rxmode_pending_bitmask
);
1314 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
1315 if (cleanup
== BNA_HARD_CLEANUP
) {
1316 bna_bfi_mcast_filter_req(rxf
, BNA_STATUS_T_ENABLED
);
1321 /* Move allmulti mode config from active -> pending */
1322 if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
1323 allmulti_enable(rxf
->rxmode_pending
,
1324 rxf
->rxmode_pending_bitmask
);
1325 rxf
->rxmode_active
&= ~BNA_RXMODE_ALLMULTI
;
1326 if (cleanup
== BNA_HARD_CLEANUP
) {
1327 bna_bfi_mcast_filter_req(rxf
, BNA_STATUS_T_ENABLED
);
1336 bna_rxf_promisc_enable(struct bna_rxf
*rxf
)
1338 struct bna
*bna
= rxf
->rx
->bna
;
1341 if (is_promisc_enable(rxf
->rxmode_pending
,
1342 rxf
->rxmode_pending_bitmask
) ||
1343 (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
)) {
1344 /* Do nothing if pending enable or already enabled */
1345 } else if (is_promisc_disable(rxf
->rxmode_pending
,
1346 rxf
->rxmode_pending_bitmask
)) {
1347 /* Turn off pending disable command */
1348 promisc_inactive(rxf
->rxmode_pending
,
1349 rxf
->rxmode_pending_bitmask
);
1351 /* Schedule enable */
1352 promisc_enable(rxf
->rxmode_pending
,
1353 rxf
->rxmode_pending_bitmask
);
1354 bna
->promisc_rid
= rxf
->rx
->rid
;
1362 bna_rxf_promisc_disable(struct bna_rxf
*rxf
)
1364 struct bna
*bna
= rxf
->rx
->bna
;
1367 if (is_promisc_disable(rxf
->rxmode_pending
,
1368 rxf
->rxmode_pending_bitmask
) ||
1369 (!(rxf
->rxmode_active
& BNA_RXMODE_PROMISC
))) {
1370 /* Do nothing if pending disable or already disabled */
1371 } else if (is_promisc_enable(rxf
->rxmode_pending
,
1372 rxf
->rxmode_pending_bitmask
)) {
1373 /* Turn off pending enable command */
1374 promisc_inactive(rxf
->rxmode_pending
,
1375 rxf
->rxmode_pending_bitmask
);
1376 bna
->promisc_rid
= BFI_INVALID_RID
;
1377 } else if (rxf
->rxmode_active
& BNA_RXMODE_PROMISC
) {
1378 /* Schedule disable */
1379 promisc_disable(rxf
->rxmode_pending
,
1380 rxf
->rxmode_pending_bitmask
);
1388 bna_rxf_allmulti_enable(struct bna_rxf
*rxf
)
1392 if (is_allmulti_enable(rxf
->rxmode_pending
,
1393 rxf
->rxmode_pending_bitmask
) ||
1394 (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
)) {
1395 /* Do nothing if pending enable or already enabled */
1396 } else if (is_allmulti_disable(rxf
->rxmode_pending
,
1397 rxf
->rxmode_pending_bitmask
)) {
1398 /* Turn off pending disable command */
1399 allmulti_inactive(rxf
->rxmode_pending
,
1400 rxf
->rxmode_pending_bitmask
);
1402 /* Schedule enable */
1403 allmulti_enable(rxf
->rxmode_pending
,
1404 rxf
->rxmode_pending_bitmask
);
1412 bna_rxf_allmulti_disable(struct bna_rxf
*rxf
)
1416 if (is_allmulti_disable(rxf
->rxmode_pending
,
1417 rxf
->rxmode_pending_bitmask
) ||
1418 (!(rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
))) {
1419 /* Do nothing if pending disable or already disabled */
1420 } else if (is_allmulti_enable(rxf
->rxmode_pending
,
1421 rxf
->rxmode_pending_bitmask
)) {
1422 /* Turn off pending enable command */
1423 allmulti_inactive(rxf
->rxmode_pending
,
1424 rxf
->rxmode_pending_bitmask
);
1425 } else if (rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
) {
1426 /* Schedule disable */
1427 allmulti_disable(rxf
->rxmode_pending
,
1428 rxf
->rxmode_pending_bitmask
);
1436 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf
*rxf
)
1438 if (rxf
->vlan_strip_pending
) {
1439 rxf
->vlan_strip_pending
= false;
1440 bna_bfi_vlan_strip_enable(rxf
);
1449 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1450 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1452 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1453 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1455 #define call_rx_stop_cbfn(rx) \
1457 if ((rx)->stop_cbfn) { \
1458 void (*cbfn)(void *, struct bna_rx *); \
1460 cbfn = (rx)->stop_cbfn; \
1461 cbarg = (rx)->stop_cbarg; \
1462 (rx)->stop_cbfn = NULL; \
1463 (rx)->stop_cbarg = NULL; \
1468 #define call_rx_stall_cbfn(rx) \
1470 if ((rx)->rx_stall_cbfn) \
1471 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1474 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
1476 struct bna_dma_addr cur_q_addr = \
1477 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1478 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1479 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1480 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1481 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1482 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1483 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1486 static void bna_bfi_rx_enet_start(struct bna_rx
*rx
);
1487 static void bna_rx_enet_stop(struct bna_rx
*rx
);
1488 static void bna_rx_mod_cb_rx_stopped(void *arg
, struct bna_rx
*rx
);
1490 bfa_fsm_state_decl(bna_rx
, stopped
,
1491 struct bna_rx
, enum bna_rx_event
);
1492 bfa_fsm_state_decl(bna_rx
, start_wait
,
1493 struct bna_rx
, enum bna_rx_event
);
1494 bfa_fsm_state_decl(bna_rx
, start_stop_wait
,
1495 struct bna_rx
, enum bna_rx_event
);
1496 bfa_fsm_state_decl(bna_rx
, rxf_start_wait
,
1497 struct bna_rx
, enum bna_rx_event
);
1498 bfa_fsm_state_decl(bna_rx
, started
,
1499 struct bna_rx
, enum bna_rx_event
);
1500 bfa_fsm_state_decl(bna_rx
, rxf_stop_wait
,
1501 struct bna_rx
, enum bna_rx_event
);
1502 bfa_fsm_state_decl(bna_rx
, stop_wait
,
1503 struct bna_rx
, enum bna_rx_event
);
1504 bfa_fsm_state_decl(bna_rx
, cleanup_wait
,
1505 struct bna_rx
, enum bna_rx_event
);
1506 bfa_fsm_state_decl(bna_rx
, failed
,
1507 struct bna_rx
, enum bna_rx_event
);
1508 bfa_fsm_state_decl(bna_rx
, quiesce_wait
,
1509 struct bna_rx
, enum bna_rx_event
);
1511 static void bna_rx_sm_stopped_entry(struct bna_rx
*rx
)
1513 call_rx_stop_cbfn(rx
);
1516 static void bna_rx_sm_stopped(struct bna_rx
*rx
,
1517 enum bna_rx_event event
)
1521 bfa_fsm_set_state(rx
, bna_rx_sm_start_wait
);
1525 call_rx_stop_cbfn(rx
);
1533 bfa_sm_fault(event
);
1538 static void bna_rx_sm_start_wait_entry(struct bna_rx
*rx
)
1540 bna_bfi_rx_enet_start(rx
);
1544 bna_rx_sm_stop_wait_entry(struct bna_rx
*rx
)
1549 bna_rx_sm_stop_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1554 bfa_fsm_set_state(rx
, bna_rx_sm_cleanup_wait
);
1555 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rx
);
1559 bna_rx_enet_stop(rx
);
1563 bfa_sm_fault(event
);
1568 static void bna_rx_sm_start_wait(struct bna_rx
*rx
,
1569 enum bna_rx_event event
)
1573 bfa_fsm_set_state(rx
, bna_rx_sm_start_stop_wait
);
1577 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1581 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_start_wait
);
1585 bfa_sm_fault(event
);
1590 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx
*rx
)
1592 rx
->rx_post_cbfn(rx
->bna
->bnad
, rx
);
1593 bna_rxf_start(&rx
->rxf
);
1597 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx
*rx
)
1602 bna_rx_sm_rxf_stop_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1606 bfa_fsm_set_state(rx
, bna_rx_sm_cleanup_wait
);
1607 bna_rxf_fail(&rx
->rxf
);
1608 call_rx_stall_cbfn(rx
);
1609 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rx
);
1612 case RX_E_RXF_STARTED
:
1613 bna_rxf_stop(&rx
->rxf
);
1616 case RX_E_RXF_STOPPED
:
1617 bfa_fsm_set_state(rx
, bna_rx_sm_stop_wait
);
1618 call_rx_stall_cbfn(rx
);
1619 bna_rx_enet_stop(rx
);
1623 bfa_sm_fault(event
);
1630 bna_rx_sm_start_stop_wait_entry(struct bna_rx
*rx
)
1635 bna_rx_sm_start_stop_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1640 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1644 bna_rx_enet_stop(rx
);
1648 bfa_sm_fault(event
);
1653 bna_rx_sm_started_entry(struct bna_rx
*rx
)
1655 struct bna_rxp
*rxp
;
1656 struct list_head
*qe_rxp
;
1657 int is_regular
= (rx
->type
== BNA_RX_T_REGULAR
);
1660 list_for_each(qe_rxp
, &rx
->rxp_q
) {
1661 rxp
= (struct bna_rxp
*)qe_rxp
;
1662 bna_ib_start(rx
->bna
, &rxp
->cq
.ib
, is_regular
);
1665 bna_ethport_cb_rx_started(&rx
->bna
->ethport
);
1669 bna_rx_sm_started(struct bna_rx
*rx
, enum bna_rx_event event
)
1673 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_stop_wait
);
1674 bna_ethport_cb_rx_stopped(&rx
->bna
->ethport
);
1675 bna_rxf_stop(&rx
->rxf
);
1679 bfa_fsm_set_state(rx
, bna_rx_sm_failed
);
1680 bna_ethport_cb_rx_stopped(&rx
->bna
->ethport
);
1681 bna_rxf_fail(&rx
->rxf
);
1682 call_rx_stall_cbfn(rx
);
1683 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rx
);
1687 bfa_sm_fault(event
);
1692 static void bna_rx_sm_rxf_start_wait(struct bna_rx
*rx
,
1693 enum bna_rx_event event
)
1697 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_stop_wait
);
1701 bfa_fsm_set_state(rx
, bna_rx_sm_failed
);
1702 bna_rxf_fail(&rx
->rxf
);
1703 call_rx_stall_cbfn(rx
);
1704 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rx
);
1707 case RX_E_RXF_STARTED
:
1708 bfa_fsm_set_state(rx
, bna_rx_sm_started
);
1712 bfa_sm_fault(event
);
1718 bna_rx_sm_cleanup_wait_entry(struct bna_rx
*rx
)
1723 bna_rx_sm_cleanup_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1727 case RX_E_RXF_STOPPED
:
1731 case RX_E_CLEANUP_DONE
:
1732 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1736 bfa_sm_fault(event
);
1742 bna_rx_sm_failed_entry(struct bna_rx
*rx
)
1747 bna_rx_sm_failed(struct bna_rx
*rx
, enum bna_rx_event event
)
1751 bfa_fsm_set_state(rx
, bna_rx_sm_quiesce_wait
);
1755 bfa_fsm_set_state(rx
, bna_rx_sm_cleanup_wait
);
1759 case RX_E_RXF_STARTED
:
1760 case RX_E_RXF_STOPPED
:
1764 case RX_E_CLEANUP_DONE
:
1765 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1769 bfa_sm_fault(event
);
1774 bna_rx_sm_quiesce_wait_entry(struct bna_rx
*rx
)
1779 bna_rx_sm_quiesce_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1783 bfa_fsm_set_state(rx
, bna_rx_sm_cleanup_wait
);
1787 bfa_fsm_set_state(rx
, bna_rx_sm_failed
);
1790 case RX_E_CLEANUP_DONE
:
1791 bfa_fsm_set_state(rx
, bna_rx_sm_start_wait
);
1795 bfa_sm_fault(event
);
1801 bna_bfi_rx_enet_start(struct bna_rx
*rx
)
1803 struct bfi_enet_rx_cfg_req
*cfg_req
= &rx
->bfi_enet_cmd
.cfg_req
;
1804 struct bna_rxp
*rxp
= NULL
;
1805 struct bna_rxq
*q0
= NULL
, *q1
= NULL
;
1806 struct list_head
*rxp_qe
;
1809 bfi_msgq_mhdr_set(cfg_req
->mh
, BFI_MC_ENET
,
1810 BFI_ENET_H2I_RX_CFG_SET_REQ
, 0, rx
->rid
);
1811 cfg_req
->mh
.num_entries
= htons(
1812 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req
)));
1814 cfg_req
->rx_cfg
.frame_size
= bna_enet_mtu_get(&rx
->bna
->enet
);
1815 cfg_req
->num_queue_sets
= rx
->num_paths
;
1816 for (i
= 0, rxp_qe
= bfa_q_first(&rx
->rxp_q
);
1818 i
++, rxp_qe
= bfa_q_next(rxp_qe
)) {
1819 rxp
= (struct bna_rxp
*)rxp_qe
;
1821 GET_RXQS(rxp
, q0
, q1
);
1822 switch (rxp
->type
) {
1826 bfi_enet_datapath_q_init(&cfg_req
->q_cfg
[i
].qs
.q
,
1828 cfg_req
->q_cfg
[i
].qs
.rx_buffer_size
=
1829 htons((u16
)q1
->buffer_size
);
1832 case BNA_RXP_SINGLE
:
1833 /* Large/Single RxQ */
1834 bfi_enet_datapath_q_init(&cfg_req
->q_cfg
[i
].ql
.q
,
1836 if (q0
->multi_buffer
)
1837 /* multi-buffer is enabled by allocating
1838 * a new rx with new set of resources.
1839 * q0->buffer_size should be initialized to
1842 cfg_req
->rx_cfg
.multi_buffer
=
1843 BNA_STATUS_T_ENABLED
;
1846 bna_enet_mtu_get(&rx
->bna
->enet
);
1847 cfg_req
->q_cfg
[i
].ql
.rx_buffer_size
=
1848 htons((u16
)q0
->buffer_size
);
1855 bfi_enet_datapath_q_init(&cfg_req
->q_cfg
[i
].cq
.q
,
1858 cfg_req
->q_cfg
[i
].ib
.index_addr
.a32
.addr_lo
=
1859 rxp
->cq
.ib
.ib_seg_host_addr
.lsb
;
1860 cfg_req
->q_cfg
[i
].ib
.index_addr
.a32
.addr_hi
=
1861 rxp
->cq
.ib
.ib_seg_host_addr
.msb
;
1862 cfg_req
->q_cfg
[i
].ib
.intr
.msix_index
=
1863 htons((u16
)rxp
->cq
.ib
.intr_vector
);
1866 cfg_req
->ib_cfg
.int_pkt_dma
= BNA_STATUS_T_DISABLED
;
1867 cfg_req
->ib_cfg
.int_enabled
= BNA_STATUS_T_ENABLED
;
1868 cfg_req
->ib_cfg
.int_pkt_enabled
= BNA_STATUS_T_DISABLED
;
1869 cfg_req
->ib_cfg
.continuous_coalescing
= BNA_STATUS_T_DISABLED
;
1870 cfg_req
->ib_cfg
.msix
= (rxp
->cq
.ib
.intr_type
== BNA_INTR_T_MSIX
)
1871 ? BNA_STATUS_T_ENABLED
:
1872 BNA_STATUS_T_DISABLED
;
1873 cfg_req
->ib_cfg
.coalescing_timeout
=
1874 htonl((u32
)rxp
->cq
.ib
.coalescing_timeo
);
1875 cfg_req
->ib_cfg
.inter_pkt_timeout
=
1876 htonl((u32
)rxp
->cq
.ib
.interpkt_timeo
);
1877 cfg_req
->ib_cfg
.inter_pkt_count
= (u8
)rxp
->cq
.ib
.interpkt_count
;
1879 switch (rxp
->type
) {
1881 cfg_req
->rx_cfg
.rxq_type
= BFI_ENET_RXQ_LARGE_SMALL
;
1885 cfg_req
->rx_cfg
.rxq_type
= BFI_ENET_RXQ_HDS
;
1886 cfg_req
->rx_cfg
.hds
.type
= rx
->hds_cfg
.hdr_type
;
1887 cfg_req
->rx_cfg
.hds
.force_offset
= rx
->hds_cfg
.forced_offset
;
1888 cfg_req
->rx_cfg
.hds
.max_header_size
= rx
->hds_cfg
.forced_offset
;
1891 case BNA_RXP_SINGLE
:
1892 cfg_req
->rx_cfg
.rxq_type
= BFI_ENET_RXQ_SINGLE
;
1898 cfg_req
->rx_cfg
.strip_vlan
= rx
->rxf
.vlan_strip_status
;
1900 bfa_msgq_cmd_set(&rx
->msgq_cmd
, NULL
, NULL
,
1901 sizeof(struct bfi_enet_rx_cfg_req
), &cfg_req
->mh
);
1902 bfa_msgq_cmd_post(&rx
->bna
->msgq
, &rx
->msgq_cmd
);
1906 bna_bfi_rx_enet_stop(struct bna_rx
*rx
)
1908 struct bfi_enet_req
*req
= &rx
->bfi_enet_cmd
.req
;
1910 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
1911 BFI_ENET_H2I_RX_CFG_CLR_REQ
, 0, rx
->rid
);
1912 req
->mh
.num_entries
= htons(
1913 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req
)));
1914 bfa_msgq_cmd_set(&rx
->msgq_cmd
, NULL
, NULL
, sizeof(struct bfi_enet_req
),
1916 bfa_msgq_cmd_post(&rx
->bna
->msgq
, &rx
->msgq_cmd
);
1920 bna_rx_enet_stop(struct bna_rx
*rx
)
1922 struct bna_rxp
*rxp
;
1923 struct list_head
*qe_rxp
;
1926 list_for_each(qe_rxp
, &rx
->rxp_q
) {
1927 rxp
= (struct bna_rxp
*)qe_rxp
;
1928 bna_ib_stop(rx
->bna
, &rxp
->cq
.ib
);
1931 bna_bfi_rx_enet_stop(rx
);
1935 bna_rx_res_check(struct bna_rx_mod
*rx_mod
, struct bna_rx_config
*rx_cfg
)
1937 if ((rx_mod
->rx_free_count
== 0) ||
1938 (rx_mod
->rxp_free_count
== 0) ||
1939 (rx_mod
->rxq_free_count
== 0))
1942 if (rx_cfg
->rxp_type
== BNA_RXP_SINGLE
) {
1943 if ((rx_mod
->rxp_free_count
< rx_cfg
->num_paths
) ||
1944 (rx_mod
->rxq_free_count
< rx_cfg
->num_paths
))
1947 if ((rx_mod
->rxp_free_count
< rx_cfg
->num_paths
) ||
1948 (rx_mod
->rxq_free_count
< (2 * rx_cfg
->num_paths
)))
1955 static struct bna_rxq
*
1956 bna_rxq_get(struct bna_rx_mod
*rx_mod
)
1958 struct bna_rxq
*rxq
= NULL
;
1959 struct list_head
*qe
= NULL
;
1961 bfa_q_deq(&rx_mod
->rxq_free_q
, &qe
);
1962 rx_mod
->rxq_free_count
--;
1963 rxq
= (struct bna_rxq
*)qe
;
1964 bfa_q_qe_init(&rxq
->qe
);
1970 bna_rxq_put(struct bna_rx_mod
*rx_mod
, struct bna_rxq
*rxq
)
1972 bfa_q_qe_init(&rxq
->qe
);
1973 list_add_tail(&rxq
->qe
, &rx_mod
->rxq_free_q
);
1974 rx_mod
->rxq_free_count
++;
1977 static struct bna_rxp
*
1978 bna_rxp_get(struct bna_rx_mod
*rx_mod
)
1980 struct list_head
*qe
= NULL
;
1981 struct bna_rxp
*rxp
= NULL
;
1983 bfa_q_deq(&rx_mod
->rxp_free_q
, &qe
);
1984 rx_mod
->rxp_free_count
--;
1985 rxp
= (struct bna_rxp
*)qe
;
1986 bfa_q_qe_init(&rxp
->qe
);
1992 bna_rxp_put(struct bna_rx_mod
*rx_mod
, struct bna_rxp
*rxp
)
1994 bfa_q_qe_init(&rxp
->qe
);
1995 list_add_tail(&rxp
->qe
, &rx_mod
->rxp_free_q
);
1996 rx_mod
->rxp_free_count
++;
1999 static struct bna_rx
*
2000 bna_rx_get(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
2002 struct list_head
*qe
= NULL
;
2003 struct bna_rx
*rx
= NULL
;
2005 if (type
== BNA_RX_T_REGULAR
) {
2006 bfa_q_deq(&rx_mod
->rx_free_q
, &qe
);
2008 bfa_q_deq_tail(&rx_mod
->rx_free_q
, &qe
);
2010 rx_mod
->rx_free_count
--;
2011 rx
= (struct bna_rx
*)qe
;
2012 bfa_q_qe_init(&rx
->qe
);
2013 list_add_tail(&rx
->qe
, &rx_mod
->rx_active_q
);
2020 bna_rx_put(struct bna_rx_mod
*rx_mod
, struct bna_rx
*rx
)
2022 struct list_head
*prev_qe
= NULL
;
2023 struct list_head
*qe
;
2025 bfa_q_qe_init(&rx
->qe
);
2027 list_for_each(qe
, &rx_mod
->rx_free_q
) {
2028 if (((struct bna_rx
*)qe
)->rid
< rx
->rid
)
2034 if (prev_qe
== NULL
) {
2035 /* This is the first entry */
2036 bfa_q_enq_head(&rx_mod
->rx_free_q
, &rx
->qe
);
2037 } else if (bfa_q_next(prev_qe
) == &rx_mod
->rx_free_q
) {
2038 /* This is the last entry */
2039 list_add_tail(&rx
->qe
, &rx_mod
->rx_free_q
);
2041 /* Somewhere in the middle */
2042 bfa_q_next(&rx
->qe
) = bfa_q_next(prev_qe
);
2043 bfa_q_prev(&rx
->qe
) = prev_qe
;
2044 bfa_q_next(prev_qe
) = &rx
->qe
;
2045 bfa_q_prev(bfa_q_next(&rx
->qe
)) = &rx
->qe
;
2048 rx_mod
->rx_free_count
++;
2052 bna_rxp_add_rxqs(struct bna_rxp
*rxp
, struct bna_rxq
*q0
,
2055 switch (rxp
->type
) {
2056 case BNA_RXP_SINGLE
:
2057 rxp
->rxq
.single
.only
= q0
;
2058 rxp
->rxq
.single
.reserved
= NULL
;
2061 rxp
->rxq
.slr
.large
= q0
;
2062 rxp
->rxq
.slr
.small
= q1
;
2065 rxp
->rxq
.hds
.data
= q0
;
2066 rxp
->rxq
.hds
.hdr
= q1
;
2074 bna_rxq_qpt_setup(struct bna_rxq
*rxq
,
2075 struct bna_rxp
*rxp
,
2078 struct bna_mem_descr
*qpt_mem
,
2079 struct bna_mem_descr
*swqpt_mem
,
2080 struct bna_mem_descr
*page_mem
)
2084 struct bna_dma_addr bna_dma
;
2087 rxq
->qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
2088 rxq
->qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
2089 rxq
->qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
2090 rxq
->qpt
.page_count
= page_count
;
2091 rxq
->qpt
.page_size
= page_size
;
2093 rxq
->rcb
->sw_qpt
= (void **) swqpt_mem
->kva
;
2094 rxq
->rcb
->sw_q
= page_mem
->kva
;
2096 kva
= page_mem
->kva
;
2097 BNA_GET_DMA_ADDR(&page_mem
->dma
, dma
);
2099 for (i
= 0; i
< rxq
->qpt
.page_count
; i
++) {
2100 rxq
->rcb
->sw_qpt
[i
] = kva
;
2103 BNA_SET_DMA_ADDR(dma
, &bna_dma
);
2104 ((struct bna_dma_addr
*)rxq
->qpt
.kv_qpt_ptr
)[i
].lsb
=
2106 ((struct bna_dma_addr
*)rxq
->qpt
.kv_qpt_ptr
)[i
].msb
=
2113 bna_rxp_cqpt_setup(struct bna_rxp
*rxp
,
2116 struct bna_mem_descr
*qpt_mem
,
2117 struct bna_mem_descr
*swqpt_mem
,
2118 struct bna_mem_descr
*page_mem
)
2122 struct bna_dma_addr bna_dma
;
2125 rxp
->cq
.qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
2126 rxp
->cq
.qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
2127 rxp
->cq
.qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
2128 rxp
->cq
.qpt
.page_count
= page_count
;
2129 rxp
->cq
.qpt
.page_size
= page_size
;
2131 rxp
->cq
.ccb
->sw_qpt
= (void **) swqpt_mem
->kva
;
2132 rxp
->cq
.ccb
->sw_q
= page_mem
->kva
;
2134 kva
= page_mem
->kva
;
2135 BNA_GET_DMA_ADDR(&page_mem
->dma
, dma
);
2137 for (i
= 0; i
< rxp
->cq
.qpt
.page_count
; i
++) {
2138 rxp
->cq
.ccb
->sw_qpt
[i
] = kva
;
2141 BNA_SET_DMA_ADDR(dma
, &bna_dma
);
2142 ((struct bna_dma_addr
*)rxp
->cq
.qpt
.kv_qpt_ptr
)[i
].lsb
=
2144 ((struct bna_dma_addr
*)rxp
->cq
.qpt
.kv_qpt_ptr
)[i
].msb
=
2151 bna_rx_mod_cb_rx_stopped(void *arg
, struct bna_rx
*rx
)
2153 struct bna_rx_mod
*rx_mod
= (struct bna_rx_mod
*)arg
;
2155 bfa_wc_down(&rx_mod
->rx_stop_wc
);
2159 bna_rx_mod_cb_rx_stopped_all(void *arg
)
2161 struct bna_rx_mod
*rx_mod
= (struct bna_rx_mod
*)arg
;
2163 if (rx_mod
->stop_cbfn
)
2164 rx_mod
->stop_cbfn(&rx_mod
->bna
->enet
);
2165 rx_mod
->stop_cbfn
= NULL
;
2169 bna_rx_start(struct bna_rx
*rx
)
2171 rx
->rx_flags
|= BNA_RX_F_ENET_STARTED
;
2172 if (rx
->rx_flags
& BNA_RX_F_ENABLED
)
2173 bfa_fsm_send_event(rx
, RX_E_START
);
2177 bna_rx_stop(struct bna_rx
*rx
)
2179 rx
->rx_flags
&= ~BNA_RX_F_ENET_STARTED
;
2180 if (rx
->fsm
== (bfa_fsm_t
) bna_rx_sm_stopped
)
2181 bna_rx_mod_cb_rx_stopped(&rx
->bna
->rx_mod
, rx
);
2183 rx
->stop_cbfn
= bna_rx_mod_cb_rx_stopped
;
2184 rx
->stop_cbarg
= &rx
->bna
->rx_mod
;
2185 bfa_fsm_send_event(rx
, RX_E_STOP
);
2190 bna_rx_fail(struct bna_rx
*rx
)
2192 /* Indicate Enet is not enabled, and failed */
2193 rx
->rx_flags
&= ~BNA_RX_F_ENET_STARTED
;
2194 bfa_fsm_send_event(rx
, RX_E_FAIL
);
2198 bna_rx_mod_start(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
2201 struct list_head
*qe
;
2203 rx_mod
->flags
|= BNA_RX_MOD_F_ENET_STARTED
;
2204 if (type
== BNA_RX_T_LOOPBACK
)
2205 rx_mod
->flags
|= BNA_RX_MOD_F_ENET_LOOPBACK
;
2207 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2208 rx
= (struct bna_rx
*)qe
;
2209 if (rx
->type
== type
)
2215 bna_rx_mod_stop(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
2218 struct list_head
*qe
;
2220 rx_mod
->flags
&= ~BNA_RX_MOD_F_ENET_STARTED
;
2221 rx_mod
->flags
&= ~BNA_RX_MOD_F_ENET_LOOPBACK
;
2223 rx_mod
->stop_cbfn
= bna_enet_cb_rx_stopped
;
2225 bfa_wc_init(&rx_mod
->rx_stop_wc
, bna_rx_mod_cb_rx_stopped_all
, rx_mod
);
2227 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2228 rx
= (struct bna_rx
*)qe
;
2229 if (rx
->type
== type
) {
2230 bfa_wc_up(&rx_mod
->rx_stop_wc
);
2235 bfa_wc_wait(&rx_mod
->rx_stop_wc
);
2239 bna_rx_mod_fail(struct bna_rx_mod
*rx_mod
)
2242 struct list_head
*qe
;
2244 rx_mod
->flags
&= ~BNA_RX_MOD_F_ENET_STARTED
;
2245 rx_mod
->flags
&= ~BNA_RX_MOD_F_ENET_LOOPBACK
;
2247 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2248 rx
= (struct bna_rx
*)qe
;
2253 void bna_rx_mod_init(struct bna_rx_mod
*rx_mod
, struct bna
*bna
,
2254 struct bna_res_info
*res_info
)
2257 struct bna_rx
*rx_ptr
;
2258 struct bna_rxp
*rxp_ptr
;
2259 struct bna_rxq
*rxq_ptr
;
2264 rx_mod
->rx
= (struct bna_rx
*)
2265 res_info
[BNA_MOD_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2266 rx_mod
->rxp
= (struct bna_rxp
*)
2267 res_info
[BNA_MOD_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2268 rx_mod
->rxq
= (struct bna_rxq
*)
2269 res_info
[BNA_MOD_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2271 /* Initialize the queues */
2272 INIT_LIST_HEAD(&rx_mod
->rx_free_q
);
2273 rx_mod
->rx_free_count
= 0;
2274 INIT_LIST_HEAD(&rx_mod
->rxq_free_q
);
2275 rx_mod
->rxq_free_count
= 0;
2276 INIT_LIST_HEAD(&rx_mod
->rxp_free_q
);
2277 rx_mod
->rxp_free_count
= 0;
2278 INIT_LIST_HEAD(&rx_mod
->rx_active_q
);
2280 /* Build RX queues */
2281 for (index
= 0; index
< bna
->ioceth
.attr
.num_rxp
; index
++) {
2282 rx_ptr
= &rx_mod
->rx
[index
];
2284 bfa_q_qe_init(&rx_ptr
->qe
);
2285 INIT_LIST_HEAD(&rx_ptr
->rxp_q
);
2287 rx_ptr
->rid
= index
;
2288 rx_ptr
->stop_cbfn
= NULL
;
2289 rx_ptr
->stop_cbarg
= NULL
;
2291 list_add_tail(&rx_ptr
->qe
, &rx_mod
->rx_free_q
);
2292 rx_mod
->rx_free_count
++;
2295 /* build RX-path queue */
2296 for (index
= 0; index
< bna
->ioceth
.attr
.num_rxp
; index
++) {
2297 rxp_ptr
= &rx_mod
->rxp
[index
];
2298 bfa_q_qe_init(&rxp_ptr
->qe
);
2299 list_add_tail(&rxp_ptr
->qe
, &rx_mod
->rxp_free_q
);
2300 rx_mod
->rxp_free_count
++;
2303 /* build RXQ queue */
2304 for (index
= 0; index
< (bna
->ioceth
.attr
.num_rxp
* 2); index
++) {
2305 rxq_ptr
= &rx_mod
->rxq
[index
];
2306 bfa_q_qe_init(&rxq_ptr
->qe
);
2307 list_add_tail(&rxq_ptr
->qe
, &rx_mod
->rxq_free_q
);
2308 rx_mod
->rxq_free_count
++;
2313 bna_rx_mod_uninit(struct bna_rx_mod
*rx_mod
)
2315 struct list_head
*qe
;
2319 list_for_each(qe
, &rx_mod
->rx_free_q
)
2323 list_for_each(qe
, &rx_mod
->rxp_free_q
)
2327 list_for_each(qe
, &rx_mod
->rxq_free_q
)
2334 bna_bfi_rx_enet_start_rsp(struct bna_rx
*rx
, struct bfi_msgq_mhdr
*msghdr
)
2336 struct bfi_enet_rx_cfg_rsp
*cfg_rsp
= &rx
->bfi_enet_cmd
.cfg_rsp
;
2337 struct bna_rxp
*rxp
= NULL
;
2338 struct bna_rxq
*q0
= NULL
, *q1
= NULL
;
2339 struct list_head
*rxp_qe
;
2342 bfa_msgq_rsp_copy(&rx
->bna
->msgq
, (u8
*)cfg_rsp
,
2343 sizeof(struct bfi_enet_rx_cfg_rsp
));
2345 rx
->hw_id
= cfg_rsp
->hw_id
;
2347 for (i
= 0, rxp_qe
= bfa_q_first(&rx
->rxp_q
);
2349 i
++, rxp_qe
= bfa_q_next(rxp_qe
)) {
2350 rxp
= (struct bna_rxp
*)rxp_qe
;
2351 GET_RXQS(rxp
, q0
, q1
);
2353 /* Setup doorbells */
2354 rxp
->cq
.ccb
->i_dbell
->doorbell_addr
=
2355 rx
->bna
->pcidev
.pci_bar_kva
2356 + ntohl(cfg_rsp
->q_handles
[i
].i_dbell
);
2357 rxp
->hw_id
= cfg_rsp
->q_handles
[i
].hw_cqid
;
2359 rx
->bna
->pcidev
.pci_bar_kva
2360 + ntohl(cfg_rsp
->q_handles
[i
].ql_dbell
);
2361 q0
->hw_id
= cfg_rsp
->q_handles
[i
].hw_lqid
;
2364 rx
->bna
->pcidev
.pci_bar_kva
2365 + ntohl(cfg_rsp
->q_handles
[i
].qs_dbell
);
2366 q1
->hw_id
= cfg_rsp
->q_handles
[i
].hw_sqid
;
2369 /* Initialize producer/consumer indexes */
2370 (*rxp
->cq
.ccb
->hw_producer_index
) = 0;
2371 rxp
->cq
.ccb
->producer_index
= 0;
2372 q0
->rcb
->producer_index
= q0
->rcb
->consumer_index
= 0;
2374 q1
->rcb
->producer_index
= q1
->rcb
->consumer_index
= 0;
2377 bfa_fsm_send_event(rx
, RX_E_STARTED
);
2381 bna_bfi_rx_enet_stop_rsp(struct bna_rx
*rx
, struct bfi_msgq_mhdr
*msghdr
)
2383 bfa_fsm_send_event(rx
, RX_E_STOPPED
);
2387 bna_rx_res_req(struct bna_rx_config
*q_cfg
, struct bna_res_info
*res_info
)
2389 u32 cq_size
, hq_size
, dq_size
;
2390 u32 cpage_count
, hpage_count
, dpage_count
;
2391 struct bna_mem_info
*mem_info
;
2396 dq_depth
= q_cfg
->q0_depth
;
2397 hq_depth
= ((q_cfg
->rxp_type
== BNA_RXP_SINGLE
) ? 0 : q_cfg
->q1_depth
);
2398 cq_depth
= dq_depth
+ hq_depth
;
2400 BNA_TO_POWER_OF_2_HIGH(cq_depth
);
2401 cq_size
= cq_depth
* BFI_CQ_WI_SIZE
;
2402 cq_size
= ALIGN(cq_size
, PAGE_SIZE
);
2403 cpage_count
= SIZE_TO_PAGES(cq_size
);
2405 BNA_TO_POWER_OF_2_HIGH(dq_depth
);
2406 dq_size
= dq_depth
* BFI_RXQ_WI_SIZE
;
2407 dq_size
= ALIGN(dq_size
, PAGE_SIZE
);
2408 dpage_count
= SIZE_TO_PAGES(dq_size
);
2410 if (BNA_RXP_SINGLE
!= q_cfg
->rxp_type
) {
2411 BNA_TO_POWER_OF_2_HIGH(hq_depth
);
2412 hq_size
= hq_depth
* BFI_RXQ_WI_SIZE
;
2413 hq_size
= ALIGN(hq_size
, PAGE_SIZE
);
2414 hpage_count
= SIZE_TO_PAGES(hq_size
);
2418 res_info
[BNA_RX_RES_MEM_T_CCB
].res_type
= BNA_RES_T_MEM
;
2419 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CCB
].res_u
.mem_info
;
2420 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2421 mem_info
->len
= sizeof(struct bna_ccb
);
2422 mem_info
->num
= q_cfg
->num_paths
;
2424 res_info
[BNA_RX_RES_MEM_T_RCB
].res_type
= BNA_RES_T_MEM
;
2425 mem_info
= &res_info
[BNA_RX_RES_MEM_T_RCB
].res_u
.mem_info
;
2426 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2427 mem_info
->len
= sizeof(struct bna_rcb
);
2428 mem_info
->num
= BNA_GET_RXQS(q_cfg
);
2430 res_info
[BNA_RX_RES_MEM_T_CQPT
].res_type
= BNA_RES_T_MEM
;
2431 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CQPT
].res_u
.mem_info
;
2432 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2433 mem_info
->len
= cpage_count
* sizeof(struct bna_dma_addr
);
2434 mem_info
->num
= q_cfg
->num_paths
;
2436 res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_type
= BNA_RES_T_MEM
;
2437 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_u
.mem_info
;
2438 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2439 mem_info
->len
= cpage_count
* sizeof(void *);
2440 mem_info
->num
= q_cfg
->num_paths
;
2442 res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_type
= BNA_RES_T_MEM
;
2443 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
;
2444 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2445 mem_info
->len
= PAGE_SIZE
* cpage_count
;
2446 mem_info
->num
= q_cfg
->num_paths
;
2448 res_info
[BNA_RX_RES_MEM_T_DQPT
].res_type
= BNA_RES_T_MEM
;
2449 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DQPT
].res_u
.mem_info
;
2450 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2451 mem_info
->len
= dpage_count
* sizeof(struct bna_dma_addr
);
2452 mem_info
->num
= q_cfg
->num_paths
;
2454 res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_type
= BNA_RES_T_MEM
;
2455 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_u
.mem_info
;
2456 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2457 mem_info
->len
= dpage_count
* sizeof(void *);
2458 mem_info
->num
= q_cfg
->num_paths
;
2460 res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_type
= BNA_RES_T_MEM
;
2461 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
;
2462 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2463 mem_info
->len
= PAGE_SIZE
* dpage_count
;
2464 mem_info
->num
= q_cfg
->num_paths
;
2466 res_info
[BNA_RX_RES_MEM_T_HQPT
].res_type
= BNA_RES_T_MEM
;
2467 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HQPT
].res_u
.mem_info
;
2468 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2469 mem_info
->len
= hpage_count
* sizeof(struct bna_dma_addr
);
2470 mem_info
->num
= (hpage_count
? q_cfg
->num_paths
: 0);
2472 res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_type
= BNA_RES_T_MEM
;
2473 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_u
.mem_info
;
2474 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2475 mem_info
->len
= hpage_count
* sizeof(void *);
2476 mem_info
->num
= (hpage_count
? q_cfg
->num_paths
: 0);
2478 res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_type
= BNA_RES_T_MEM
;
2479 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
;
2480 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2481 mem_info
->len
= PAGE_SIZE
* hpage_count
;
2482 mem_info
->num
= (hpage_count
? q_cfg
->num_paths
: 0);
2484 res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_type
= BNA_RES_T_MEM
;
2485 mem_info
= &res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_u
.mem_info
;
2486 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2487 mem_info
->len
= BFI_IBIDX_SIZE
;
2488 mem_info
->num
= q_cfg
->num_paths
;
2490 res_info
[BNA_RX_RES_MEM_T_RIT
].res_type
= BNA_RES_T_MEM
;
2491 mem_info
= &res_info
[BNA_RX_RES_MEM_T_RIT
].res_u
.mem_info
;
2492 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2493 mem_info
->len
= BFI_ENET_RSS_RIT_MAX
;
2496 res_info
[BNA_RX_RES_T_INTR
].res_type
= BNA_RES_T_INTR
;
2497 res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
.intr_type
= BNA_INTR_T_MSIX
;
2498 res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
.num
= q_cfg
->num_paths
;
2502 bna_rx_create(struct bna
*bna
, struct bnad
*bnad
,
2503 struct bna_rx_config
*rx_cfg
,
2504 const struct bna_rx_event_cbfn
*rx_cbfn
,
2505 struct bna_res_info
*res_info
,
2508 struct bna_rx_mod
*rx_mod
= &bna
->rx_mod
;
2510 struct bna_rxp
*rxp
;
2513 struct bna_intr_info
*intr_info
;
2514 struct bna_mem_descr
*hqunmap_mem
;
2515 struct bna_mem_descr
*dqunmap_mem
;
2516 struct bna_mem_descr
*ccb_mem
;
2517 struct bna_mem_descr
*rcb_mem
;
2518 struct bna_mem_descr
*cqpt_mem
;
2519 struct bna_mem_descr
*cswqpt_mem
;
2520 struct bna_mem_descr
*cpage_mem
;
2521 struct bna_mem_descr
*hqpt_mem
;
2522 struct bna_mem_descr
*dqpt_mem
;
2523 struct bna_mem_descr
*hsqpt_mem
;
2524 struct bna_mem_descr
*dsqpt_mem
;
2525 struct bna_mem_descr
*hpage_mem
;
2526 struct bna_mem_descr
*dpage_mem
;
2527 u32 dpage_count
, hpage_count
;
2528 u32 hq_idx
, dq_idx
, rcb_idx
;
2532 if (!bna_rx_res_check(rx_mod
, rx_cfg
))
2535 intr_info
= &res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
;
2536 ccb_mem
= &res_info
[BNA_RX_RES_MEM_T_CCB
].res_u
.mem_info
.mdl
[0];
2537 rcb_mem
= &res_info
[BNA_RX_RES_MEM_T_RCB
].res_u
.mem_info
.mdl
[0];
2538 dqunmap_mem
= &res_info
[BNA_RX_RES_MEM_T_UNMAPDQ
].res_u
.mem_info
.mdl
[0];
2539 hqunmap_mem
= &res_info
[BNA_RX_RES_MEM_T_UNMAPHQ
].res_u
.mem_info
.mdl
[0];
2540 cqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_CQPT
].res_u
.mem_info
.mdl
[0];
2541 cswqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_u
.mem_info
.mdl
[0];
2542 cpage_mem
= &res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
.mdl
[0];
2543 hqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_HQPT
].res_u
.mem_info
.mdl
[0];
2544 dqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_DQPT
].res_u
.mem_info
.mdl
[0];
2545 hsqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_u
.mem_info
.mdl
[0];
2546 dsqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_u
.mem_info
.mdl
[0];
2547 hpage_mem
= &res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
.mdl
[0];
2548 dpage_mem
= &res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
.mdl
[0];
2550 page_count
= res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
.len
/
2553 dpage_count
= res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
.len
/
2556 hpage_count
= res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
.len
/
2559 rx
= bna_rx_get(rx_mod
, rx_cfg
->rx_type
);
2562 INIT_LIST_HEAD(&rx
->rxp_q
);
2563 rx
->stop_cbfn
= NULL
;
2564 rx
->stop_cbarg
= NULL
;
2567 rx
->rcb_setup_cbfn
= rx_cbfn
->rcb_setup_cbfn
;
2568 rx
->rcb_destroy_cbfn
= rx_cbfn
->rcb_destroy_cbfn
;
2569 rx
->ccb_setup_cbfn
= rx_cbfn
->ccb_setup_cbfn
;
2570 rx
->ccb_destroy_cbfn
= rx_cbfn
->ccb_destroy_cbfn
;
2571 rx
->rx_stall_cbfn
= rx_cbfn
->rx_stall_cbfn
;
2572 /* Following callbacks are mandatory */
2573 rx
->rx_cleanup_cbfn
= rx_cbfn
->rx_cleanup_cbfn
;
2574 rx
->rx_post_cbfn
= rx_cbfn
->rx_post_cbfn
;
2576 if (rx
->bna
->rx_mod
.flags
& BNA_RX_MOD_F_ENET_STARTED
) {
2578 case BNA_RX_T_REGULAR
:
2579 if (!(rx
->bna
->rx_mod
.flags
&
2580 BNA_RX_MOD_F_ENET_LOOPBACK
))
2581 rx
->rx_flags
|= BNA_RX_F_ENET_STARTED
;
2583 case BNA_RX_T_LOOPBACK
:
2584 if (rx
->bna
->rx_mod
.flags
& BNA_RX_MOD_F_ENET_LOOPBACK
)
2585 rx
->rx_flags
|= BNA_RX_F_ENET_STARTED
;
2590 rx
->num_paths
= rx_cfg
->num_paths
;
2591 for (i
= 0, hq_idx
= 0, dq_idx
= 0, rcb_idx
= 0;
2592 i
< rx
->num_paths
; i
++) {
2593 rxp
= bna_rxp_get(rx_mod
);
2594 list_add_tail(&rxp
->qe
, &rx
->rxp_q
);
2595 rxp
->type
= rx_cfg
->rxp_type
;
2599 q0
= bna_rxq_get(rx_mod
);
2600 if (BNA_RXP_SINGLE
== rx_cfg
->rxp_type
)
2603 q1
= bna_rxq_get(rx_mod
);
2605 if (1 == intr_info
->num
)
2606 rxp
->vector
= intr_info
->idl
[0].vector
;
2608 rxp
->vector
= intr_info
->idl
[i
].vector
;
2612 rxp
->cq
.ib
.ib_seg_host_addr
.lsb
=
2613 res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.lsb
;
2614 rxp
->cq
.ib
.ib_seg_host_addr
.msb
=
2615 res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.msb
;
2616 rxp
->cq
.ib
.ib_seg_host_addr_kva
=
2617 res_info
[BNA_RX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].kva
;
2618 rxp
->cq
.ib
.intr_type
= intr_info
->intr_type
;
2619 if (intr_info
->intr_type
== BNA_INTR_T_MSIX
)
2620 rxp
->cq
.ib
.intr_vector
= rxp
->vector
;
2622 rxp
->cq
.ib
.intr_vector
= (1 << rxp
->vector
);
2623 rxp
->cq
.ib
.coalescing_timeo
= rx_cfg
->coalescing_timeo
;
2624 rxp
->cq
.ib
.interpkt_count
= BFI_RX_INTERPKT_COUNT
;
2625 rxp
->cq
.ib
.interpkt_timeo
= BFI_RX_INTERPKT_TIMEO
;
2627 bna_rxp_add_rxqs(rxp
, q0
, q1
);
2634 q0
->rcb
= (struct bna_rcb
*) rcb_mem
[rcb_idx
].kva
;
2635 q0
->rcb
->unmap_q
= (void *)dqunmap_mem
[dq_idx
].kva
;
2636 rcb_idx
++; dq_idx
++;
2637 q0
->rcb
->q_depth
= rx_cfg
->q0_depth
;
2638 q0
->q_depth
= rx_cfg
->q0_depth
;
2639 q0
->multi_buffer
= rx_cfg
->q0_multi_buf
;
2640 q0
->buffer_size
= rx_cfg
->q0_buf_size
;
2641 q0
->num_vecs
= rx_cfg
->q0_num_vecs
;
2643 q0
->rcb
->bnad
= bna
->bnad
;
2645 q0
->rx_packets
= q0
->rx_bytes
= 0;
2646 q0
->rx_packets_with_error
= q0
->rxbuf_alloc_failed
= 0;
2648 bna_rxq_qpt_setup(q0
, rxp
, dpage_count
, PAGE_SIZE
,
2649 &dqpt_mem
[i
], &dsqpt_mem
[i
], &dpage_mem
[i
]);
2651 if (rx
->rcb_setup_cbfn
)
2652 rx
->rcb_setup_cbfn(bnad
, q0
->rcb
);
2660 q1
->rcb
= (struct bna_rcb
*) rcb_mem
[rcb_idx
].kva
;
2661 q1
->rcb
->unmap_q
= (void *)hqunmap_mem
[hq_idx
].kva
;
2662 rcb_idx
++; hq_idx
++;
2663 q1
->rcb
->q_depth
= rx_cfg
->q1_depth
;
2664 q1
->q_depth
= rx_cfg
->q1_depth
;
2665 q1
->multi_buffer
= BNA_STATUS_T_DISABLED
;
2668 q1
->rcb
->bnad
= bna
->bnad
;
2670 q1
->buffer_size
= (rx_cfg
->rxp_type
== BNA_RXP_HDS
) ?
2671 rx_cfg
->hds_config
.forced_offset
2672 : rx_cfg
->q1_buf_size
;
2673 q1
->rx_packets
= q1
->rx_bytes
= 0;
2674 q1
->rx_packets_with_error
= q1
->rxbuf_alloc_failed
= 0;
2676 bna_rxq_qpt_setup(q1
, rxp
, hpage_count
, PAGE_SIZE
,
2677 &hqpt_mem
[i
], &hsqpt_mem
[i
],
2680 if (rx
->rcb_setup_cbfn
)
2681 rx
->rcb_setup_cbfn(bnad
, q1
->rcb
);
2686 rxp
->cq
.ccb
= (struct bna_ccb
*) ccb_mem
[i
].kva
;
2687 cq_depth
= rx_cfg
->q0_depth
+
2688 ((rx_cfg
->rxp_type
== BNA_RXP_SINGLE
) ?
2689 0 : rx_cfg
->q1_depth
);
2690 /* if multi-buffer is enabled sum of q0_depth
2691 * and q1_depth need not be a power of 2
2693 BNA_TO_POWER_OF_2_HIGH(cq_depth
);
2694 rxp
->cq
.ccb
->q_depth
= cq_depth
;
2695 rxp
->cq
.ccb
->cq
= &rxp
->cq
;
2696 rxp
->cq
.ccb
->rcb
[0] = q0
->rcb
;
2697 q0
->rcb
->ccb
= rxp
->cq
.ccb
;
2699 rxp
->cq
.ccb
->rcb
[1] = q1
->rcb
;
2700 q1
->rcb
->ccb
= rxp
->cq
.ccb
;
2702 rxp
->cq
.ccb
->hw_producer_index
=
2703 (u32
*)rxp
->cq
.ib
.ib_seg_host_addr_kva
;
2704 rxp
->cq
.ccb
->i_dbell
= &rxp
->cq
.ib
.door_bell
;
2705 rxp
->cq
.ccb
->intr_type
= rxp
->cq
.ib
.intr_type
;
2706 rxp
->cq
.ccb
->intr_vector
= rxp
->cq
.ib
.intr_vector
;
2707 rxp
->cq
.ccb
->rx_coalescing_timeo
=
2708 rxp
->cq
.ib
.coalescing_timeo
;
2709 rxp
->cq
.ccb
->pkt_rate
.small_pkt_cnt
= 0;
2710 rxp
->cq
.ccb
->pkt_rate
.large_pkt_cnt
= 0;
2711 rxp
->cq
.ccb
->bnad
= bna
->bnad
;
2712 rxp
->cq
.ccb
->id
= i
;
2714 bna_rxp_cqpt_setup(rxp
, page_count
, PAGE_SIZE
,
2715 &cqpt_mem
[i
], &cswqpt_mem
[i
], &cpage_mem
[i
]);
2717 if (rx
->ccb_setup_cbfn
)
2718 rx
->ccb_setup_cbfn(bnad
, rxp
->cq
.ccb
);
2721 rx
->hds_cfg
= rx_cfg
->hds_config
;
2723 bna_rxf_init(&rx
->rxf
, rx
, rx_cfg
, res_info
);
2725 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
2727 rx_mod
->rid_mask
|= (1 << rx
->rid
);
2733 bna_rx_destroy(struct bna_rx
*rx
)
2735 struct bna_rx_mod
*rx_mod
= &rx
->bna
->rx_mod
;
2736 struct bna_rxq
*q0
= NULL
;
2737 struct bna_rxq
*q1
= NULL
;
2738 struct bna_rxp
*rxp
;
2739 struct list_head
*qe
;
2741 bna_rxf_uninit(&rx
->rxf
);
2743 while (!list_empty(&rx
->rxp_q
)) {
2744 bfa_q_deq(&rx
->rxp_q
, &rxp
);
2745 GET_RXQS(rxp
, q0
, q1
);
2746 if (rx
->rcb_destroy_cbfn
)
2747 rx
->rcb_destroy_cbfn(rx
->bna
->bnad
, q0
->rcb
);
2751 bna_rxq_put(rx_mod
, q0
);
2754 if (rx
->rcb_destroy_cbfn
)
2755 rx
->rcb_destroy_cbfn(rx
->bna
->bnad
, q1
->rcb
);
2759 bna_rxq_put(rx_mod
, q1
);
2761 rxp
->rxq
.slr
.large
= NULL
;
2762 rxp
->rxq
.slr
.small
= NULL
;
2764 if (rx
->ccb_destroy_cbfn
)
2765 rx
->ccb_destroy_cbfn(rx
->bna
->bnad
, rxp
->cq
.ccb
);
2768 bna_rxp_put(rx_mod
, rxp
);
2771 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2772 if (qe
== &rx
->qe
) {
2774 bfa_q_qe_init(&rx
->qe
);
2779 rx_mod
->rid_mask
&= ~(1 << rx
->rid
);
2783 bna_rx_put(rx_mod
, rx
);
2787 bna_rx_enable(struct bna_rx
*rx
)
2789 if (rx
->fsm
!= (bfa_sm_t
)bna_rx_sm_stopped
)
2792 rx
->rx_flags
|= BNA_RX_F_ENABLED
;
2793 if (rx
->rx_flags
& BNA_RX_F_ENET_STARTED
)
2794 bfa_fsm_send_event(rx
, RX_E_START
);
2798 bna_rx_disable(struct bna_rx
*rx
, enum bna_cleanup_type type
,
2799 void (*cbfn
)(void *, struct bna_rx
*))
2801 if (type
== BNA_SOFT_CLEANUP
) {
2802 /* h/w should not be accessed. Treat we're stopped */
2803 (*cbfn
)(rx
->bna
->bnad
, rx
);
2805 rx
->stop_cbfn
= cbfn
;
2806 rx
->stop_cbarg
= rx
->bna
->bnad
;
2808 rx
->rx_flags
&= ~BNA_RX_F_ENABLED
;
2810 bfa_fsm_send_event(rx
, RX_E_STOP
);
2815 bna_rx_cleanup_complete(struct bna_rx
*rx
)
2817 bfa_fsm_send_event(rx
, RX_E_CLEANUP_DONE
);
2821 bna_rx_vlan_strip_enable(struct bna_rx
*rx
)
2823 struct bna_rxf
*rxf
= &rx
->rxf
;
2825 if (rxf
->vlan_strip_status
== BNA_STATUS_T_DISABLED
) {
2826 rxf
->vlan_strip_status
= BNA_STATUS_T_ENABLED
;
2827 rxf
->vlan_strip_pending
= true;
2828 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
2833 bna_rx_vlan_strip_disable(struct bna_rx
*rx
)
2835 struct bna_rxf
*rxf
= &rx
->rxf
;
2837 if (rxf
->vlan_strip_status
!= BNA_STATUS_T_DISABLED
) {
2838 rxf
->vlan_strip_status
= BNA_STATUS_T_DISABLED
;
2839 rxf
->vlan_strip_pending
= true;
2840 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
2845 bna_rx_mode_set(struct bna_rx
*rx
, enum bna_rxmode new_mode
,
2846 enum bna_rxmode bitmask
,
2847 void (*cbfn
)(struct bnad
*, struct bna_rx
*))
2849 struct bna_rxf
*rxf
= &rx
->rxf
;
2850 int need_hw_config
= 0;
2854 if (is_promisc_enable(new_mode
, bitmask
)) {
2855 /* If promisc mode is already enabled elsewhere in the system */
2856 if ((rx
->bna
->promisc_rid
!= BFI_INVALID_RID
) &&
2857 (rx
->bna
->promisc_rid
!= rxf
->rx
->rid
))
2860 /* If default mode is already enabled in the system */
2861 if (rx
->bna
->default_mode_rid
!= BFI_INVALID_RID
)
2864 /* Trying to enable promiscuous and default mode together */
2865 if (is_default_enable(new_mode
, bitmask
))
2869 if (is_default_enable(new_mode
, bitmask
)) {
2870 /* If default mode is already enabled elsewhere in the system */
2871 if ((rx
->bna
->default_mode_rid
!= BFI_INVALID_RID
) &&
2872 (rx
->bna
->default_mode_rid
!= rxf
->rx
->rid
)) {
2876 /* If promiscuous mode is already enabled in the system */
2877 if (rx
->bna
->promisc_rid
!= BFI_INVALID_RID
)
2881 /* Process the commands */
2883 if (is_promisc_enable(new_mode
, bitmask
)) {
2884 if (bna_rxf_promisc_enable(rxf
))
2886 } else if (is_promisc_disable(new_mode
, bitmask
)) {
2887 if (bna_rxf_promisc_disable(rxf
))
2891 if (is_allmulti_enable(new_mode
, bitmask
)) {
2892 if (bna_rxf_allmulti_enable(rxf
))
2894 } else if (is_allmulti_disable(new_mode
, bitmask
)) {
2895 if (bna_rxf_allmulti_disable(rxf
))
2899 /* Trigger h/w if needed */
2901 if (need_hw_config
) {
2902 rxf
->cam_fltr_cbfn
= cbfn
;
2903 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
2904 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
2906 (*cbfn
)(rx
->bna
->bnad
, rx
);
2908 return BNA_CB_SUCCESS
;
2915 bna_rx_vlanfilter_enable(struct bna_rx
*rx
)
2917 struct bna_rxf
*rxf
= &rx
->rxf
;
2919 if (rxf
->vlan_filter_status
== BNA_STATUS_T_DISABLED
) {
2920 rxf
->vlan_filter_status
= BNA_STATUS_T_ENABLED
;
2921 rxf
->vlan_pending_bitmask
= (u8
)BFI_VLAN_BMASK_ALL
;
2922 bfa_fsm_send_event(rxf
, RXF_E_CONFIG
);
2927 bna_rx_coalescing_timeo_set(struct bna_rx
*rx
, int coalescing_timeo
)
2929 struct bna_rxp
*rxp
;
2930 struct list_head
*qe
;
2932 list_for_each(qe
, &rx
->rxp_q
) {
2933 rxp
= (struct bna_rxp
*)qe
;
2934 rxp
->cq
.ccb
->rx_coalescing_timeo
= coalescing_timeo
;
2935 bna_ib_coalescing_timeo_set(&rxp
->cq
.ib
, coalescing_timeo
);
2940 bna_rx_dim_reconfig(struct bna
*bna
, const u32 vector
[][BNA_BIAS_T_MAX
])
2944 for (i
= 0; i
< BNA_LOAD_T_MAX
; i
++)
2945 for (j
= 0; j
< BNA_BIAS_T_MAX
; j
++)
2946 bna
->rx_mod
.dim_vector
[i
][j
] = vector
[i
][j
];
2950 bna_rx_dim_update(struct bna_ccb
*ccb
)
2952 struct bna
*bna
= ccb
->cq
->rx
->bna
;
2954 u32 pkt_rt
, small_rt
, large_rt
;
2955 u8 coalescing_timeo
;
2957 if ((ccb
->pkt_rate
.small_pkt_cnt
== 0) &&
2958 (ccb
->pkt_rate
.large_pkt_cnt
== 0))
2961 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2963 small_rt
= ccb
->pkt_rate
.small_pkt_cnt
;
2964 large_rt
= ccb
->pkt_rate
.large_pkt_cnt
;
2966 pkt_rt
= small_rt
+ large_rt
;
2968 if (pkt_rt
< BNA_PKT_RATE_10K
)
2969 load
= BNA_LOAD_T_LOW_4
;
2970 else if (pkt_rt
< BNA_PKT_RATE_20K
)
2971 load
= BNA_LOAD_T_LOW_3
;
2972 else if (pkt_rt
< BNA_PKT_RATE_30K
)
2973 load
= BNA_LOAD_T_LOW_2
;
2974 else if (pkt_rt
< BNA_PKT_RATE_40K
)
2975 load
= BNA_LOAD_T_LOW_1
;
2976 else if (pkt_rt
< BNA_PKT_RATE_50K
)
2977 load
= BNA_LOAD_T_HIGH_1
;
2978 else if (pkt_rt
< BNA_PKT_RATE_60K
)
2979 load
= BNA_LOAD_T_HIGH_2
;
2980 else if (pkt_rt
< BNA_PKT_RATE_80K
)
2981 load
= BNA_LOAD_T_HIGH_3
;
2983 load
= BNA_LOAD_T_HIGH_4
;
2985 if (small_rt
> (large_rt
<< 1))
2990 ccb
->pkt_rate
.small_pkt_cnt
= 0;
2991 ccb
->pkt_rate
.large_pkt_cnt
= 0;
2993 coalescing_timeo
= bna
->rx_mod
.dim_vector
[load
][bias
];
2994 ccb
->rx_coalescing_timeo
= coalescing_timeo
;
2997 bna_ib_coalescing_timeo_set(&ccb
->cq
->ib
, coalescing_timeo
);
3000 const u32 bna_napi_dim_vector
[BNA_LOAD_T_MAX
][BNA_BIAS_T_MAX
] = {
3013 #define call_tx_stop_cbfn(tx) \
3015 if ((tx)->stop_cbfn) { \
3016 void (*cbfn)(void *, struct bna_tx *); \
3018 cbfn = (tx)->stop_cbfn; \
3019 cbarg = (tx)->stop_cbarg; \
3020 (tx)->stop_cbfn = NULL; \
3021 (tx)->stop_cbarg = NULL; \
3022 cbfn(cbarg, (tx)); \
3026 #define call_tx_prio_change_cbfn(tx) \
3028 if ((tx)->prio_change_cbfn) { \
3029 void (*cbfn)(struct bnad *, struct bna_tx *); \
3030 cbfn = (tx)->prio_change_cbfn; \
3031 (tx)->prio_change_cbfn = NULL; \
3032 cbfn((tx)->bna->bnad, (tx)); \
3036 static void bna_tx_mod_cb_tx_stopped(void *tx_mod
, struct bna_tx
*tx
);
3037 static void bna_bfi_tx_enet_start(struct bna_tx
*tx
);
3038 static void bna_tx_enet_stop(struct bna_tx
*tx
);
3046 TX_E_PRIO_CHANGE
= 6,
3047 TX_E_CLEANUP_DONE
= 7,
3051 bfa_fsm_state_decl(bna_tx
, stopped
, struct bna_tx
, enum bna_tx_event
);
3052 bfa_fsm_state_decl(bna_tx
, start_wait
, struct bna_tx
, enum bna_tx_event
);
3053 bfa_fsm_state_decl(bna_tx
, started
, struct bna_tx
, enum bna_tx_event
);
3054 bfa_fsm_state_decl(bna_tx
, stop_wait
, struct bna_tx
, enum bna_tx_event
);
3055 bfa_fsm_state_decl(bna_tx
, cleanup_wait
, struct bna_tx
,
3057 bfa_fsm_state_decl(bna_tx
, prio_stop_wait
, struct bna_tx
,
3059 bfa_fsm_state_decl(bna_tx
, prio_cleanup_wait
, struct bna_tx
,
3061 bfa_fsm_state_decl(bna_tx
, failed
, struct bna_tx
, enum bna_tx_event
);
3062 bfa_fsm_state_decl(bna_tx
, quiesce_wait
, struct bna_tx
,
3066 bna_tx_sm_stopped_entry(struct bna_tx
*tx
)
3068 call_tx_stop_cbfn(tx
);
3072 bna_tx_sm_stopped(struct bna_tx
*tx
, enum bna_tx_event event
)
3076 bfa_fsm_set_state(tx
, bna_tx_sm_start_wait
);
3080 call_tx_stop_cbfn(tx
);
3087 case TX_E_PRIO_CHANGE
:
3088 call_tx_prio_change_cbfn(tx
);
3091 case TX_E_BW_UPDATE
:
3096 bfa_sm_fault(event
);
3101 bna_tx_sm_start_wait_entry(struct bna_tx
*tx
)
3103 bna_bfi_tx_enet_start(tx
);
3107 bna_tx_sm_start_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3111 tx
->flags
&= ~(BNA_TX_F_PRIO_CHANGED
| BNA_TX_F_BW_UPDATED
);
3112 bfa_fsm_set_state(tx
, bna_tx_sm_stop_wait
);
3116 tx
->flags
&= ~(BNA_TX_F_PRIO_CHANGED
| BNA_TX_F_BW_UPDATED
);
3117 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3121 if (tx
->flags
& (BNA_TX_F_PRIO_CHANGED
| BNA_TX_F_BW_UPDATED
)) {
3122 tx
->flags
&= ~(BNA_TX_F_PRIO_CHANGED
|
3123 BNA_TX_F_BW_UPDATED
);
3124 bfa_fsm_set_state(tx
, bna_tx_sm_prio_stop_wait
);
3126 bfa_fsm_set_state(tx
, bna_tx_sm_started
);
3129 case TX_E_PRIO_CHANGE
:
3130 tx
->flags
|= BNA_TX_F_PRIO_CHANGED
;
3133 case TX_E_BW_UPDATE
:
3134 tx
->flags
|= BNA_TX_F_BW_UPDATED
;
3138 bfa_sm_fault(event
);
3143 bna_tx_sm_started_entry(struct bna_tx
*tx
)
3145 struct bna_txq
*txq
;
3146 struct list_head
*qe
;
3147 int is_regular
= (tx
->type
== BNA_TX_T_REGULAR
);
3149 list_for_each(qe
, &tx
->txq_q
) {
3150 txq
= (struct bna_txq
*)qe
;
3151 txq
->tcb
->priority
= txq
->priority
;
3153 bna_ib_start(tx
->bna
, &txq
->ib
, is_regular
);
3155 tx
->tx_resume_cbfn(tx
->bna
->bnad
, tx
);
3159 bna_tx_sm_started(struct bna_tx
*tx
, enum bna_tx_event event
)
3163 bfa_fsm_set_state(tx
, bna_tx_sm_stop_wait
);
3164 tx
->tx_stall_cbfn(tx
->bna
->bnad
, tx
);
3165 bna_tx_enet_stop(tx
);
3169 bfa_fsm_set_state(tx
, bna_tx_sm_failed
);
3170 tx
->tx_stall_cbfn(tx
->bna
->bnad
, tx
);
3171 tx
->tx_cleanup_cbfn(tx
->bna
->bnad
, tx
);
3174 case TX_E_PRIO_CHANGE
:
3175 case TX_E_BW_UPDATE
:
3176 bfa_fsm_set_state(tx
, bna_tx_sm_prio_stop_wait
);
3180 bfa_sm_fault(event
);
3185 bna_tx_sm_stop_wait_entry(struct bna_tx
*tx
)
3190 bna_tx_sm_stop_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3195 bfa_fsm_set_state(tx
, bna_tx_sm_cleanup_wait
);
3196 tx
->tx_cleanup_cbfn(tx
->bna
->bnad
, tx
);
3201 * We are here due to start_wait -> stop_wait transition on
3204 bna_tx_enet_stop(tx
);
3207 case TX_E_PRIO_CHANGE
:
3208 case TX_E_BW_UPDATE
:
3213 bfa_sm_fault(event
);
3218 bna_tx_sm_cleanup_wait_entry(struct bna_tx
*tx
)
3223 bna_tx_sm_cleanup_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3227 case TX_E_PRIO_CHANGE
:
3228 case TX_E_BW_UPDATE
:
3232 case TX_E_CLEANUP_DONE
:
3233 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3237 bfa_sm_fault(event
);
3242 bna_tx_sm_prio_stop_wait_entry(struct bna_tx
*tx
)
3244 tx
->tx_stall_cbfn(tx
->bna
->bnad
, tx
);
3245 bna_tx_enet_stop(tx
);
3249 bna_tx_sm_prio_stop_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3253 bfa_fsm_set_state(tx
, bna_tx_sm_stop_wait
);
3257 bfa_fsm_set_state(tx
, bna_tx_sm_failed
);
3258 call_tx_prio_change_cbfn(tx
);
3259 tx
->tx_cleanup_cbfn(tx
->bna
->bnad
, tx
);
3263 bfa_fsm_set_state(tx
, bna_tx_sm_prio_cleanup_wait
);
3266 case TX_E_PRIO_CHANGE
:
3267 case TX_E_BW_UPDATE
:
3272 bfa_sm_fault(event
);
3277 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx
*tx
)
3279 call_tx_prio_change_cbfn(tx
);
3280 tx
->tx_cleanup_cbfn(tx
->bna
->bnad
, tx
);
3284 bna_tx_sm_prio_cleanup_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3288 bfa_fsm_set_state(tx
, bna_tx_sm_cleanup_wait
);
3292 bfa_fsm_set_state(tx
, bna_tx_sm_failed
);
3295 case TX_E_PRIO_CHANGE
:
3296 case TX_E_BW_UPDATE
:
3300 case TX_E_CLEANUP_DONE
:
3301 bfa_fsm_set_state(tx
, bna_tx_sm_start_wait
);
3305 bfa_sm_fault(event
);
3310 bna_tx_sm_failed_entry(struct bna_tx
*tx
)
3315 bna_tx_sm_failed(struct bna_tx
*tx
, enum bna_tx_event event
)
3319 bfa_fsm_set_state(tx
, bna_tx_sm_quiesce_wait
);
3323 bfa_fsm_set_state(tx
, bna_tx_sm_cleanup_wait
);
3330 case TX_E_CLEANUP_DONE
:
3331 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3335 bfa_sm_fault(event
);
3340 bna_tx_sm_quiesce_wait_entry(struct bna_tx
*tx
)
3345 bna_tx_sm_quiesce_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3349 bfa_fsm_set_state(tx
, bna_tx_sm_cleanup_wait
);
3353 bfa_fsm_set_state(tx
, bna_tx_sm_failed
);
3356 case TX_E_CLEANUP_DONE
:
3357 bfa_fsm_set_state(tx
, bna_tx_sm_start_wait
);
3360 case TX_E_BW_UPDATE
:
3365 bfa_sm_fault(event
);
3370 bna_bfi_tx_enet_start(struct bna_tx
*tx
)
3372 struct bfi_enet_tx_cfg_req
*cfg_req
= &tx
->bfi_enet_cmd
.cfg_req
;
3373 struct bna_txq
*txq
= NULL
;
3374 struct list_head
*qe
;
3377 bfi_msgq_mhdr_set(cfg_req
->mh
, BFI_MC_ENET
,
3378 BFI_ENET_H2I_TX_CFG_SET_REQ
, 0, tx
->rid
);
3379 cfg_req
->mh
.num_entries
= htons(
3380 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req
)));
3382 cfg_req
->num_queues
= tx
->num_txq
;
3383 for (i
= 0, qe
= bfa_q_first(&tx
->txq_q
);
3385 i
++, qe
= bfa_q_next(qe
)) {
3386 txq
= (struct bna_txq
*)qe
;
3388 bfi_enet_datapath_q_init(&cfg_req
->q_cfg
[i
].q
.q
, &txq
->qpt
);
3389 cfg_req
->q_cfg
[i
].q
.priority
= txq
->priority
;
3391 cfg_req
->q_cfg
[i
].ib
.index_addr
.a32
.addr_lo
=
3392 txq
->ib
.ib_seg_host_addr
.lsb
;
3393 cfg_req
->q_cfg
[i
].ib
.index_addr
.a32
.addr_hi
=
3394 txq
->ib
.ib_seg_host_addr
.msb
;
3395 cfg_req
->q_cfg
[i
].ib
.intr
.msix_index
=
3396 htons((u16
)txq
->ib
.intr_vector
);
3399 cfg_req
->ib_cfg
.int_pkt_dma
= BNA_STATUS_T_ENABLED
;
3400 cfg_req
->ib_cfg
.int_enabled
= BNA_STATUS_T_ENABLED
;
3401 cfg_req
->ib_cfg
.int_pkt_enabled
= BNA_STATUS_T_DISABLED
;
3402 cfg_req
->ib_cfg
.continuous_coalescing
= BNA_STATUS_T_ENABLED
;
3403 cfg_req
->ib_cfg
.msix
= (txq
->ib
.intr_type
== BNA_INTR_T_MSIX
)
3404 ? BNA_STATUS_T_ENABLED
: BNA_STATUS_T_DISABLED
;
3405 cfg_req
->ib_cfg
.coalescing_timeout
=
3406 htonl((u32
)txq
->ib
.coalescing_timeo
);
3407 cfg_req
->ib_cfg
.inter_pkt_timeout
=
3408 htonl((u32
)txq
->ib
.interpkt_timeo
);
3409 cfg_req
->ib_cfg
.inter_pkt_count
= (u8
)txq
->ib
.interpkt_count
;
3411 cfg_req
->tx_cfg
.vlan_mode
= BFI_ENET_TX_VLAN_WI
;
3412 cfg_req
->tx_cfg
.vlan_id
= htons((u16
)tx
->txf_vlan_id
);
3413 cfg_req
->tx_cfg
.admit_tagged_frame
= BNA_STATUS_T_DISABLED
;
3414 cfg_req
->tx_cfg
.apply_vlan_filter
= BNA_STATUS_T_DISABLED
;
3416 bfa_msgq_cmd_set(&tx
->msgq_cmd
, NULL
, NULL
,
3417 sizeof(struct bfi_enet_tx_cfg_req
), &cfg_req
->mh
);
3418 bfa_msgq_cmd_post(&tx
->bna
->msgq
, &tx
->msgq_cmd
);
3422 bna_bfi_tx_enet_stop(struct bna_tx
*tx
)
3424 struct bfi_enet_req
*req
= &tx
->bfi_enet_cmd
.req
;
3426 bfi_msgq_mhdr_set(req
->mh
, BFI_MC_ENET
,
3427 BFI_ENET_H2I_TX_CFG_CLR_REQ
, 0, tx
->rid
);
3428 req
->mh
.num_entries
= htons(
3429 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req
)));
3430 bfa_msgq_cmd_set(&tx
->msgq_cmd
, NULL
, NULL
, sizeof(struct bfi_enet_req
),
3432 bfa_msgq_cmd_post(&tx
->bna
->msgq
, &tx
->msgq_cmd
);
3436 bna_tx_enet_stop(struct bna_tx
*tx
)
3438 struct bna_txq
*txq
;
3439 struct list_head
*qe
;
3442 list_for_each(qe
, &tx
->txq_q
) {
3443 txq
= (struct bna_txq
*)qe
;
3444 bna_ib_stop(tx
->bna
, &txq
->ib
);
3447 bna_bfi_tx_enet_stop(tx
);
3451 bna_txq_qpt_setup(struct bna_txq
*txq
, int page_count
, int page_size
,
3452 struct bna_mem_descr
*qpt_mem
,
3453 struct bna_mem_descr
*swqpt_mem
,
3454 struct bna_mem_descr
*page_mem
)
3458 struct bna_dma_addr bna_dma
;
3461 txq
->qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
3462 txq
->qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
3463 txq
->qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
3464 txq
->qpt
.page_count
= page_count
;
3465 txq
->qpt
.page_size
= page_size
;
3467 txq
->tcb
->sw_qpt
= (void **) swqpt_mem
->kva
;
3468 txq
->tcb
->sw_q
= page_mem
->kva
;
3470 kva
= page_mem
->kva
;
3471 BNA_GET_DMA_ADDR(&page_mem
->dma
, dma
);
3473 for (i
= 0; i
< page_count
; i
++) {
3474 txq
->tcb
->sw_qpt
[i
] = kva
;
3477 BNA_SET_DMA_ADDR(dma
, &bna_dma
);
3478 ((struct bna_dma_addr
*)txq
->qpt
.kv_qpt_ptr
)[i
].lsb
=
3480 ((struct bna_dma_addr
*)txq
->qpt
.kv_qpt_ptr
)[i
].msb
=
3486 static struct bna_tx
*
3487 bna_tx_get(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
3489 struct list_head
*qe
= NULL
;
3490 struct bna_tx
*tx
= NULL
;
3492 if (list_empty(&tx_mod
->tx_free_q
))
3494 if (type
== BNA_TX_T_REGULAR
) {
3495 bfa_q_deq(&tx_mod
->tx_free_q
, &qe
);
3497 bfa_q_deq_tail(&tx_mod
->tx_free_q
, &qe
);
3499 tx
= (struct bna_tx
*)qe
;
3500 bfa_q_qe_init(&tx
->qe
);
3507 bna_tx_free(struct bna_tx
*tx
)
3509 struct bna_tx_mod
*tx_mod
= &tx
->bna
->tx_mod
;
3510 struct bna_txq
*txq
;
3511 struct list_head
*prev_qe
;
3512 struct list_head
*qe
;
3514 while (!list_empty(&tx
->txq_q
)) {
3515 bfa_q_deq(&tx
->txq_q
, &txq
);
3516 bfa_q_qe_init(&txq
->qe
);
3519 list_add_tail(&txq
->qe
, &tx_mod
->txq_free_q
);
3522 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3523 if (qe
== &tx
->qe
) {
3525 bfa_q_qe_init(&tx
->qe
);
3534 list_for_each(qe
, &tx_mod
->tx_free_q
) {
3535 if (((struct bna_tx
*)qe
)->rid
< tx
->rid
)
3542 if (prev_qe
== NULL
) {
3543 /* This is the first entry */
3544 bfa_q_enq_head(&tx_mod
->tx_free_q
, &tx
->qe
);
3545 } else if (bfa_q_next(prev_qe
) == &tx_mod
->tx_free_q
) {
3546 /* This is the last entry */
3547 list_add_tail(&tx
->qe
, &tx_mod
->tx_free_q
);
3549 /* Somewhere in the middle */
3550 bfa_q_next(&tx
->qe
) = bfa_q_next(prev_qe
);
3551 bfa_q_prev(&tx
->qe
) = prev_qe
;
3552 bfa_q_next(prev_qe
) = &tx
->qe
;
3553 bfa_q_prev(bfa_q_next(&tx
->qe
)) = &tx
->qe
;
3558 bna_tx_start(struct bna_tx
*tx
)
3560 tx
->flags
|= BNA_TX_F_ENET_STARTED
;
3561 if (tx
->flags
& BNA_TX_F_ENABLED
)
3562 bfa_fsm_send_event(tx
, TX_E_START
);
3566 bna_tx_stop(struct bna_tx
*tx
)
3568 tx
->stop_cbfn
= bna_tx_mod_cb_tx_stopped
;
3569 tx
->stop_cbarg
= &tx
->bna
->tx_mod
;
3571 tx
->flags
&= ~BNA_TX_F_ENET_STARTED
;
3572 bfa_fsm_send_event(tx
, TX_E_STOP
);
3576 bna_tx_fail(struct bna_tx
*tx
)
3578 tx
->flags
&= ~BNA_TX_F_ENET_STARTED
;
3579 bfa_fsm_send_event(tx
, TX_E_FAIL
);
3583 bna_bfi_tx_enet_start_rsp(struct bna_tx
*tx
, struct bfi_msgq_mhdr
*msghdr
)
3585 struct bfi_enet_tx_cfg_rsp
*cfg_rsp
= &tx
->bfi_enet_cmd
.cfg_rsp
;
3586 struct bna_txq
*txq
= NULL
;
3587 struct list_head
*qe
;
3590 bfa_msgq_rsp_copy(&tx
->bna
->msgq
, (u8
*)cfg_rsp
,
3591 sizeof(struct bfi_enet_tx_cfg_rsp
));
3593 tx
->hw_id
= cfg_rsp
->hw_id
;
3595 for (i
= 0, qe
= bfa_q_first(&tx
->txq_q
);
3596 i
< tx
->num_txq
; i
++, qe
= bfa_q_next(qe
)) {
3597 txq
= (struct bna_txq
*)qe
;
3599 /* Setup doorbells */
3600 txq
->tcb
->i_dbell
->doorbell_addr
=
3601 tx
->bna
->pcidev
.pci_bar_kva
3602 + ntohl(cfg_rsp
->q_handles
[i
].i_dbell
);
3604 tx
->bna
->pcidev
.pci_bar_kva
3605 + ntohl(cfg_rsp
->q_handles
[i
].q_dbell
);
3606 txq
->hw_id
= cfg_rsp
->q_handles
[i
].hw_qid
;
3608 /* Initialize producer/consumer indexes */
3609 (*txq
->tcb
->hw_consumer_index
) = 0;
3610 txq
->tcb
->producer_index
= txq
->tcb
->consumer_index
= 0;
3613 bfa_fsm_send_event(tx
, TX_E_STARTED
);
3617 bna_bfi_tx_enet_stop_rsp(struct bna_tx
*tx
, struct bfi_msgq_mhdr
*msghdr
)
3619 bfa_fsm_send_event(tx
, TX_E_STOPPED
);
3623 bna_bfi_bw_update_aen(struct bna_tx_mod
*tx_mod
)
3626 struct list_head
*qe
;
3628 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3629 tx
= (struct bna_tx
*)qe
;
3630 bfa_fsm_send_event(tx
, TX_E_BW_UPDATE
);
3635 bna_tx_res_req(int num_txq
, int txq_depth
, struct bna_res_info
*res_info
)
3639 struct bna_mem_info
*mem_info
;
3641 res_info
[BNA_TX_RES_MEM_T_TCB
].res_type
= BNA_RES_T_MEM
;
3642 mem_info
= &res_info
[BNA_TX_RES_MEM_T_TCB
].res_u
.mem_info
;
3643 mem_info
->mem_type
= BNA_MEM_T_KVA
;
3644 mem_info
->len
= sizeof(struct bna_tcb
);
3645 mem_info
->num
= num_txq
;
3647 q_size
= txq_depth
* BFI_TXQ_WI_SIZE
;
3648 q_size
= ALIGN(q_size
, PAGE_SIZE
);
3649 page_count
= q_size
>> PAGE_SHIFT
;
3651 res_info
[BNA_TX_RES_MEM_T_QPT
].res_type
= BNA_RES_T_MEM
;
3652 mem_info
= &res_info
[BNA_TX_RES_MEM_T_QPT
].res_u
.mem_info
;
3653 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3654 mem_info
->len
= page_count
* sizeof(struct bna_dma_addr
);
3655 mem_info
->num
= num_txq
;
3657 res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_type
= BNA_RES_T_MEM
;
3658 mem_info
= &res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_u
.mem_info
;
3659 mem_info
->mem_type
= BNA_MEM_T_KVA
;
3660 mem_info
->len
= page_count
* sizeof(void *);
3661 mem_info
->num
= num_txq
;
3663 res_info
[BNA_TX_RES_MEM_T_PAGE
].res_type
= BNA_RES_T_MEM
;
3664 mem_info
= &res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
;
3665 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3666 mem_info
->len
= PAGE_SIZE
* page_count
;
3667 mem_info
->num
= num_txq
;
3669 res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_type
= BNA_RES_T_MEM
;
3670 mem_info
= &res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_u
.mem_info
;
3671 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3672 mem_info
->len
= BFI_IBIDX_SIZE
;
3673 mem_info
->num
= num_txq
;
3675 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_type
= BNA_RES_T_INTR
;
3676 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
.intr_type
=
3678 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
.num
= num_txq
;
3682 bna_tx_create(struct bna
*bna
, struct bnad
*bnad
,
3683 struct bna_tx_config
*tx_cfg
,
3684 const struct bna_tx_event_cbfn
*tx_cbfn
,
3685 struct bna_res_info
*res_info
, void *priv
)
3687 struct bna_intr_info
*intr_info
;
3688 struct bna_tx_mod
*tx_mod
= &bna
->tx_mod
;
3690 struct bna_txq
*txq
;
3691 struct list_head
*qe
;
3695 intr_info
= &res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
;
3696 page_count
= (res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
.len
) /
3703 if ((intr_info
->num
!= 1) && (intr_info
->num
!= tx_cfg
->num_txq
))
3708 tx
= bna_tx_get(tx_mod
, tx_cfg
->tx_type
);
3716 INIT_LIST_HEAD(&tx
->txq_q
);
3717 for (i
= 0; i
< tx_cfg
->num_txq
; i
++) {
3718 if (list_empty(&tx_mod
->txq_free_q
))
3721 bfa_q_deq(&tx_mod
->txq_free_q
, &txq
);
3722 bfa_q_qe_init(&txq
->qe
);
3723 list_add_tail(&txq
->qe
, &tx
->txq_q
);
3733 tx
->tcb_setup_cbfn
= tx_cbfn
->tcb_setup_cbfn
;
3734 tx
->tcb_destroy_cbfn
= tx_cbfn
->tcb_destroy_cbfn
;
3735 /* Following callbacks are mandatory */
3736 tx
->tx_stall_cbfn
= tx_cbfn
->tx_stall_cbfn
;
3737 tx
->tx_resume_cbfn
= tx_cbfn
->tx_resume_cbfn
;
3738 tx
->tx_cleanup_cbfn
= tx_cbfn
->tx_cleanup_cbfn
;
3740 list_add_tail(&tx
->qe
, &tx_mod
->tx_active_q
);
3742 tx
->num_txq
= tx_cfg
->num_txq
;
3745 if (tx
->bna
->tx_mod
.flags
& BNA_TX_MOD_F_ENET_STARTED
) {
3747 case BNA_TX_T_REGULAR
:
3748 if (!(tx
->bna
->tx_mod
.flags
&
3749 BNA_TX_MOD_F_ENET_LOOPBACK
))
3750 tx
->flags
|= BNA_TX_F_ENET_STARTED
;
3752 case BNA_TX_T_LOOPBACK
:
3753 if (tx
->bna
->tx_mod
.flags
& BNA_TX_MOD_F_ENET_LOOPBACK
)
3754 tx
->flags
|= BNA_TX_F_ENET_STARTED
;
3762 list_for_each(qe
, &tx
->txq_q
) {
3763 txq
= (struct bna_txq
*)qe
;
3764 txq
->tcb
= (struct bna_tcb
*)
3765 res_info
[BNA_TX_RES_MEM_T_TCB
].res_u
.mem_info
.mdl
[i
].kva
;
3766 txq
->tx_packets
= 0;
3770 txq
->ib
.ib_seg_host_addr
.lsb
=
3771 res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.lsb
;
3772 txq
->ib
.ib_seg_host_addr
.msb
=
3773 res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.msb
;
3774 txq
->ib
.ib_seg_host_addr_kva
=
3775 res_info
[BNA_TX_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].kva
;
3776 txq
->ib
.intr_type
= intr_info
->intr_type
;
3777 txq
->ib
.intr_vector
= (intr_info
->num
== 1) ?
3778 intr_info
->idl
[0].vector
:
3779 intr_info
->idl
[i
].vector
;
3780 if (intr_info
->intr_type
== BNA_INTR_T_INTX
)
3781 txq
->ib
.intr_vector
= (1 << txq
->ib
.intr_vector
);
3782 txq
->ib
.coalescing_timeo
= tx_cfg
->coalescing_timeo
;
3783 txq
->ib
.interpkt_timeo
= BFI_TX_INTERPKT_TIMEO
;
3784 txq
->ib
.interpkt_count
= BFI_TX_INTERPKT_COUNT
;
3788 txq
->tcb
->q_depth
= tx_cfg
->txq_depth
;
3789 txq
->tcb
->unmap_q
= (void *)
3790 res_info
[BNA_TX_RES_MEM_T_UNMAPQ
].res_u
.mem_info
.mdl
[i
].kva
;
3791 txq
->tcb
->hw_consumer_index
=
3792 (u32
*)txq
->ib
.ib_seg_host_addr_kva
;
3793 txq
->tcb
->i_dbell
= &txq
->ib
.door_bell
;
3794 txq
->tcb
->intr_type
= txq
->ib
.intr_type
;
3795 txq
->tcb
->intr_vector
= txq
->ib
.intr_vector
;
3796 txq
->tcb
->txq
= txq
;
3797 txq
->tcb
->bnad
= bnad
;
3800 /* QPT, SWQPT, Pages */
3801 bna_txq_qpt_setup(txq
, page_count
, PAGE_SIZE
,
3802 &res_info
[BNA_TX_RES_MEM_T_QPT
].res_u
.mem_info
.mdl
[i
],
3803 &res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_u
.mem_info
.mdl
[i
],
3804 &res_info
[BNA_TX_RES_MEM_T_PAGE
].
3805 res_u
.mem_info
.mdl
[i
]);
3807 /* Callback to bnad for setting up TCB */
3808 if (tx
->tcb_setup_cbfn
)
3809 (tx
->tcb_setup_cbfn
)(bna
->bnad
, txq
->tcb
);
3811 if (tx_cfg
->num_txq
== BFI_TX_MAX_PRIO
)
3812 txq
->priority
= txq
->tcb
->id
;
3814 txq
->priority
= tx_mod
->default_prio
;
3819 tx
->txf_vlan_id
= 0;
3821 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3823 tx_mod
->rid_mask
|= (1 << tx
->rid
);
3833 bna_tx_destroy(struct bna_tx
*tx
)
3835 struct bna_txq
*txq
;
3836 struct list_head
*qe
;
3838 list_for_each(qe
, &tx
->txq_q
) {
3839 txq
= (struct bna_txq
*)qe
;
3840 if (tx
->tcb_destroy_cbfn
)
3841 (tx
->tcb_destroy_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3844 tx
->bna
->tx_mod
.rid_mask
&= ~(1 << tx
->rid
);
3849 bna_tx_enable(struct bna_tx
*tx
)
3851 if (tx
->fsm
!= (bfa_sm_t
)bna_tx_sm_stopped
)
3854 tx
->flags
|= BNA_TX_F_ENABLED
;
3856 if (tx
->flags
& BNA_TX_F_ENET_STARTED
)
3857 bfa_fsm_send_event(tx
, TX_E_START
);
3861 bna_tx_disable(struct bna_tx
*tx
, enum bna_cleanup_type type
,
3862 void (*cbfn
)(void *, struct bna_tx
*))
3864 if (type
== BNA_SOFT_CLEANUP
) {
3865 (*cbfn
)(tx
->bna
->bnad
, tx
);
3869 tx
->stop_cbfn
= cbfn
;
3870 tx
->stop_cbarg
= tx
->bna
->bnad
;
3872 tx
->flags
&= ~BNA_TX_F_ENABLED
;
3874 bfa_fsm_send_event(tx
, TX_E_STOP
);
3878 bna_tx_cleanup_complete(struct bna_tx
*tx
)
3880 bfa_fsm_send_event(tx
, TX_E_CLEANUP_DONE
);
3884 bna_tx_mod_cb_tx_stopped(void *arg
, struct bna_tx
*tx
)
3886 struct bna_tx_mod
*tx_mod
= (struct bna_tx_mod
*)arg
;
3888 bfa_wc_down(&tx_mod
->tx_stop_wc
);
3892 bna_tx_mod_cb_tx_stopped_all(void *arg
)
3894 struct bna_tx_mod
*tx_mod
= (struct bna_tx_mod
*)arg
;
3896 if (tx_mod
->stop_cbfn
)
3897 tx_mod
->stop_cbfn(&tx_mod
->bna
->enet
);
3898 tx_mod
->stop_cbfn
= NULL
;
3902 bna_tx_mod_init(struct bna_tx_mod
*tx_mod
, struct bna
*bna
,
3903 struct bna_res_info
*res_info
)
3910 tx_mod
->tx
= (struct bna_tx
*)
3911 res_info
[BNA_MOD_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
3912 tx_mod
->txq
= (struct bna_txq
*)
3913 res_info
[BNA_MOD_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
3915 INIT_LIST_HEAD(&tx_mod
->tx_free_q
);
3916 INIT_LIST_HEAD(&tx_mod
->tx_active_q
);
3918 INIT_LIST_HEAD(&tx_mod
->txq_free_q
);
3920 for (i
= 0; i
< bna
->ioceth
.attr
.num_txq
; i
++) {
3921 tx_mod
->tx
[i
].rid
= i
;
3922 bfa_q_qe_init(&tx_mod
->tx
[i
].qe
);
3923 list_add_tail(&tx_mod
->tx
[i
].qe
, &tx_mod
->tx_free_q
);
3924 bfa_q_qe_init(&tx_mod
->txq
[i
].qe
);
3925 list_add_tail(&tx_mod
->txq
[i
].qe
, &tx_mod
->txq_free_q
);
3928 tx_mod
->prio_map
= BFI_TX_PRIO_MAP_ALL
;
3929 tx_mod
->default_prio
= 0;
3930 tx_mod
->iscsi_over_cee
= BNA_STATUS_T_DISABLED
;
3931 tx_mod
->iscsi_prio
= -1;
3935 bna_tx_mod_uninit(struct bna_tx_mod
*tx_mod
)
3937 struct list_head
*qe
;
3941 list_for_each(qe
, &tx_mod
->tx_free_q
)
3945 list_for_each(qe
, &tx_mod
->txq_free_q
)
3952 bna_tx_mod_start(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
3955 struct list_head
*qe
;
3957 tx_mod
->flags
|= BNA_TX_MOD_F_ENET_STARTED
;
3958 if (type
== BNA_TX_T_LOOPBACK
)
3959 tx_mod
->flags
|= BNA_TX_MOD_F_ENET_LOOPBACK
;
3961 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3962 tx
= (struct bna_tx
*)qe
;
3963 if (tx
->type
== type
)
3969 bna_tx_mod_stop(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
3972 struct list_head
*qe
;
3974 tx_mod
->flags
&= ~BNA_TX_MOD_F_ENET_STARTED
;
3975 tx_mod
->flags
&= ~BNA_TX_MOD_F_ENET_LOOPBACK
;
3977 tx_mod
->stop_cbfn
= bna_enet_cb_tx_stopped
;
3979 bfa_wc_init(&tx_mod
->tx_stop_wc
, bna_tx_mod_cb_tx_stopped_all
, tx_mod
);
3981 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3982 tx
= (struct bna_tx
*)qe
;
3983 if (tx
->type
== type
) {
3984 bfa_wc_up(&tx_mod
->tx_stop_wc
);
3989 bfa_wc_wait(&tx_mod
->tx_stop_wc
);
3993 bna_tx_mod_fail(struct bna_tx_mod
*tx_mod
)
3996 struct list_head
*qe
;
3998 tx_mod
->flags
&= ~BNA_TX_MOD_F_ENET_STARTED
;
3999 tx_mod
->flags
&= ~BNA_TX_MOD_F_ENET_LOOPBACK
;
4001 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4002 tx
= (struct bna_tx
*)qe
;
4008 bna_tx_coalescing_timeo_set(struct bna_tx
*tx
, int coalescing_timeo
)
4010 struct bna_txq
*txq
;
4011 struct list_head
*qe
;
4013 list_for_each(qe
, &tx
->txq_q
) {
4014 txq
= (struct bna_txq
*)qe
;
4015 bna_ib_coalescing_timeo_set(&txq
->ib
, coalescing_timeo
);