2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
25 #define bna_ib_find_free_ibidx(_mask, _pos)\
28 while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\
29 ((1 << (_pos)) & (_mask)))\
33 #define bna_ib_count_ibidx(_mask, _count)\
37 while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\
38 if ((1 << pos) & (_mask))\
44 #define bna_ib_select_segpool(_count, _q_idx)\
48 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\
49 if ((_count <= ibidx_pool[i].pool_entry_size)) {\
56 struct bna_ibidx_pool
{
60 init_ibidx_pool(ibidx_pool
);
62 static struct bna_intr
*
63 bna_intr_get(struct bna_ib_mod
*ib_mod
, enum bna_intr_type intr_type
,
66 struct bna_intr
*intr
;
69 list_for_each(qe
, &ib_mod
->intr_active_q
) {
70 intr
= (struct bna_intr
*)qe
;
72 if ((intr
->intr_type
== intr_type
) &&
73 (intr
->vector
== vector
)) {
79 if (list_empty(&ib_mod
->intr_free_q
))
82 bfa_q_deq(&ib_mod
->intr_free_q
, &intr
);
83 bfa_q_qe_init(&intr
->qe
);
86 intr
->intr_type
= intr_type
;
87 intr
->vector
= vector
;
89 list_add_tail(&intr
->qe
, &ib_mod
->intr_active_q
);
95 bna_intr_put(struct bna_ib_mod
*ib_mod
,
96 struct bna_intr
*intr
)
100 if (intr
->ref_count
== 0) {
103 bfa_q_qe_init(&intr
->qe
);
104 list_add_tail(&intr
->qe
, &ib_mod
->intr_free_q
);
109 bna_ib_mod_init(struct bna_ib_mod
*ib_mod
, struct bna
*bna
,
110 struct bna_res_info
*res_info
)
116 struct bna_doorbell_qset
*qset
;
121 ib_mod
->ib
= (struct bna_ib
*)
122 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
123 ib_mod
->intr
= (struct bna_intr
*)
124 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
125 ib_mod
->idx_seg
= (struct bna_ibidx_seg
*)
126 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
128 INIT_LIST_HEAD(&ib_mod
->ib_free_q
);
129 INIT_LIST_HEAD(&ib_mod
->intr_free_q
);
130 INIT_LIST_HEAD(&ib_mod
->intr_active_q
);
132 for (i
= 0; i
< BFI_IBIDX_TOTAL_POOLS
; i
++)
133 INIT_LIST_HEAD(&ib_mod
->ibidx_seg_pool
[i
]);
135 for (i
= 0; i
< BFI_MAX_IB
; i
++) {
136 ib_mod
->ib
[i
].ib_id
= i
;
138 ib_mod
->ib
[i
].ib_seg_host_addr_kva
=
139 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].kva
;
140 ib_mod
->ib
[i
].ib_seg_host_addr
.lsb
=
141 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.lsb
;
142 ib_mod
->ib
[i
].ib_seg_host_addr
.msb
=
143 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.msb
;
145 qset
= (struct bna_doorbell_qset
*)0;
146 off
= (unsigned long)(&qset
[i
>> 1].ib0
[(i
& 0x1)
148 ib_mod
->ib
[i
].door_bell
.doorbell_addr
= off
+
149 BNA_GET_DOORBELL_BASE_ADDR(bna
->pcidev
.pci_bar_kva
);
151 bfa_q_qe_init(&ib_mod
->ib
[i
].qe
);
152 list_add_tail(&ib_mod
->ib
[i
].qe
, &ib_mod
->ib_free_q
);
154 bfa_q_qe_init(&ib_mod
->intr
[i
].qe
);
155 list_add_tail(&ib_mod
->intr
[i
].qe
, &ib_mod
->intr_free_q
);
160 for (i
= 0; i
< BFI_IBIDX_TOTAL_POOLS
; i
++) {
161 for (j
= 0; j
< ibidx_pool
[i
].pool_size
; j
++) {
162 bfa_q_qe_init(&ib_mod
->idx_seg
[count
]);
163 ib_mod
->idx_seg
[count
].ib_seg_size
=
164 ibidx_pool
[i
].pool_entry_size
;
165 ib_mod
->idx_seg
[count
].ib_idx_tbl_offset
= offset
;
166 list_add_tail(&ib_mod
->idx_seg
[count
].qe
,
167 &ib_mod
->ibidx_seg_pool
[i
]);
169 offset
+= ibidx_pool
[i
].pool_entry_size
;
175 bna_ib_mod_uninit(struct bna_ib_mod
*ib_mod
)
179 struct list_head
*qe
;
182 list_for_each(qe
, &ib_mod
->ib_free_q
)
186 list_for_each(qe
, &ib_mod
->intr_free_q
)
189 for (i
= 0; i
< BFI_IBIDX_TOTAL_POOLS
; i
++) {
191 list_for_each(qe
, &ib_mod
->ibidx_seg_pool
[i
])
198 static struct bna_ib
*
199 bna_ib_get(struct bna_ib_mod
*ib_mod
,
200 enum bna_intr_type intr_type
,
204 struct bna_intr
*intr
;
206 if (intr_type
== BNA_INTR_T_INTX
)
207 vector
= (1 << vector
);
209 intr
= bna_intr_get(ib_mod
, intr_type
, vector
);
214 if (intr
->ib
->ref_count
== BFI_IBIDX_MAX_SEGSIZE
) {
215 bna_intr_put(ib_mod
, intr
);
218 intr
->ib
->ref_count
++;
222 if (list_empty(&ib_mod
->ib_free_q
)) {
223 bna_intr_put(ib_mod
, intr
);
227 bfa_q_deq(&ib_mod
->ib_free_q
, &ib
);
228 bfa_q_qe_init(&ib
->qe
);
238 ib
->bna
= ib_mod
->bna
;
244 bna_ib_put(struct bna_ib_mod
*ib_mod
, struct bna_ib
*ib
)
246 bna_intr_put(ib_mod
, ib
->intr
);
250 if (ib
->ref_count
== 0) {
253 list_add_tail(&ib
->qe
, &ib_mod
->ib_free_q
);
257 /* Returns index offset - starting from 0 */
259 bna_ib_reserve_idx(struct bna_ib
*ib
)
261 struct bna_ib_mod
*ib_mod
= &ib
->bna
->ib_mod
;
262 struct bna_ibidx_seg
*idx_seg
;
267 /* Find the first free index position */
268 bna_ib_find_free_ibidx(ib
->idx_mask
, idx
);
269 if (idx
== BFI_IBIDX_MAX_SEGSIZE
)
273 * Calculate the total number of indexes held by this IB,
274 * including the index newly reserved above.
276 bna_ib_count_ibidx((ib
->idx_mask
| (1 << idx
)), num_idx
);
278 /* See if there is a free space in the index segment held by this IB */
279 if (ib
->idx_seg
&& (num_idx
<= ib
->idx_seg
->ib_seg_size
)) {
280 ib
->idx_mask
|= (1 << idx
);
287 /* Allocate a new segment */
288 bna_ib_select_segpool(num_idx
, q_idx
);
290 if (q_idx
== BFI_IBIDX_TOTAL_POOLS
)
292 if (!list_empty(&ib_mod
->ibidx_seg_pool
[q_idx
]))
296 bfa_q_deq(&ib_mod
->ibidx_seg_pool
[q_idx
], &idx_seg
);
297 bfa_q_qe_init(&idx_seg
->qe
);
299 /* Free the old segment */
301 bna_ib_select_segpool(ib
->idx_seg
->ib_seg_size
, q_idx
);
302 list_add_tail(&ib
->idx_seg
->qe
, &ib_mod
->ibidx_seg_pool
[q_idx
]);
305 ib
->idx_seg
= idx_seg
;
307 ib
->idx_mask
|= (1 << idx
);
313 bna_ib_release_idx(struct bna_ib
*ib
, int idx
)
315 struct bna_ib_mod
*ib_mod
= &ib
->bna
->ib_mod
;
316 struct bna_ibidx_seg
*idx_seg
;
321 ib
->idx_mask
&= ~(1 << idx
);
326 bna_ib_count_ibidx(ib
->idx_mask
, num_idx
);
329 * Free the segment, if there are no more indexes in the segment
333 bna_ib_select_segpool(ib
->idx_seg
->ib_seg_size
, cur_q_idx
);
334 list_add_tail(&ib
->idx_seg
->qe
,
335 &ib_mod
->ibidx_seg_pool
[cur_q_idx
]);
340 /* See if we can move to a smaller segment */
341 bna_ib_select_segpool(num_idx
, new_q_idx
);
342 bna_ib_select_segpool(ib
->idx_seg
->ib_seg_size
, cur_q_idx
);
343 while (new_q_idx
< cur_q_idx
) {
344 if (!list_empty(&ib_mod
->ibidx_seg_pool
[new_q_idx
]))
348 if (new_q_idx
< cur_q_idx
) {
349 /* Select the new smaller segment */
350 bfa_q_deq(&ib_mod
->ibidx_seg_pool
[new_q_idx
], &idx_seg
);
351 bfa_q_qe_init(&idx_seg
->qe
);
352 /* Free the old segment */
353 list_add_tail(&ib
->idx_seg
->qe
,
354 &ib_mod
->ibidx_seg_pool
[cur_q_idx
]);
355 ib
->idx_seg
= idx_seg
;
360 bna_ib_config(struct bna_ib
*ib
, struct bna_ib_config
*ib_config
)
365 ib
->ib_config
.coalescing_timeo
= ib_config
->coalescing_timeo
;
366 ib
->ib_config
.interpkt_timeo
= ib_config
->interpkt_timeo
;
367 ib
->ib_config
.interpkt_count
= ib_config
->interpkt_count
;
368 ib
->ib_config
.ctrl_flags
= ib_config
->ctrl_flags
;
370 ib
->ib_config
.ctrl_flags
|= BFI_IB_CF_MASTER_ENABLE
;
371 if (ib
->intr
->intr_type
== BNA_INTR_T_MSIX
)
372 ib
->ib_config
.ctrl_flags
|= BFI_IB_CF_MSIX_MODE
;
378 bna_ib_start(struct bna_ib
*ib
)
380 struct bna_ib_blk_mem ib_cfg
;
381 struct bna_ib_blk_mem
*ib_mem
;
385 void __iomem
*base_addr
;
390 if (ib
->start_count
> 1)
393 ib_cfg
.host_addr_lo
= (u32
)(ib
->ib_seg_host_addr
.lsb
);
394 ib_cfg
.host_addr_hi
= (u32
)(ib
->ib_seg_host_addr
.msb
);
396 ib_cfg
.clsc_n_ctrl_n_msix
= (((u32
)
397 ib
->ib_config
.coalescing_timeo
<< 16) |
398 ((u32
)ib
->ib_config
.ctrl_flags
<< 8) |
400 ib_cfg
.ipkt_n_ent_n_idxof
=
402 (ib
->ib_config
.interpkt_timeo
& 0xf) << 16) |
403 ((u32
)ib
->idx_seg
->ib_seg_size
<< 8) |
404 (ib
->idx_seg
->ib_idx_tbl_offset
);
405 ib_cfg
.ipkt_cnt_cfg_n_unacked
= ((u32
)
406 ib
->ib_config
.interpkt_count
<< 24);
408 pg_num
= BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM
+ ib
->bna
->port_num
,
409 HQM_IB_RAM_BASE_OFFSET
);
410 writel(pg_num
, ib
->bna
->regs
.page_addr
);
412 base_addr
= BNA_GET_MEM_BASE_ADDR(ib
->bna
->pcidev
.pci_bar_kva
,
413 HQM_IB_RAM_BASE_OFFSET
);
415 ib_mem
= (struct bna_ib_blk_mem
*)0;
416 off
= (unsigned long)&ib_mem
[ib
->ib_id
].host_addr_lo
;
417 writel(htonl(ib_cfg
.host_addr_lo
), base_addr
+ off
);
419 off
= (unsigned long)&ib_mem
[ib
->ib_id
].host_addr_hi
;
420 writel(htonl(ib_cfg
.host_addr_hi
), base_addr
+ off
);
422 off
= (unsigned long)&ib_mem
[ib
->ib_id
].clsc_n_ctrl_n_msix
;
423 writel(ib_cfg
.clsc_n_ctrl_n_msix
, base_addr
+ off
);
425 off
= (unsigned long)&ib_mem
[ib
->ib_id
].ipkt_n_ent_n_idxof
;
426 writel(ib_cfg
.ipkt_n_ent_n_idxof
, base_addr
+ off
);
428 off
= (unsigned long)&ib_mem
[ib
->ib_id
].ipkt_cnt_cfg_n_unacked
;
429 writel(ib_cfg
.ipkt_cnt_cfg_n_unacked
, base_addr
+ off
);
431 ib
->door_bell
.doorbell_ack
= BNA_DOORBELL_IB_INT_ACK(
432 (u32
)ib
->ib_config
.coalescing_timeo
, 0);
434 pg_num
= BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM
+ ib
->bna
->port_num
,
435 HQM_INDX_TBL_RAM_BASE_OFFSET
);
436 writel(pg_num
, ib
->bna
->regs
.page_addr
);
438 base_addr
= BNA_GET_MEM_BASE_ADDR(ib
->bna
->pcidev
.pci_bar_kva
,
439 HQM_INDX_TBL_RAM_BASE_OFFSET
);
440 for (i
= 0; i
< ib
->idx_seg
->ib_seg_size
; i
++) {
441 off
= (unsigned long)
442 ((ib
->idx_seg
->ib_idx_tbl_offset
+ i
) * BFI_IBIDX_SIZE
);
443 writel(0, base_addr
+ off
);
446 if (ib
->intr
->intr_type
== BNA_INTR_T_INTX
) {
447 bna_intx_disable(ib
->bna
, intx_mask
);
448 intx_mask
&= ~(ib
->intr
->vector
);
449 bna_intx_enable(ib
->bna
, intx_mask
);
454 bna_ib_stop(struct bna_ib
*ib
)
460 if (ib
->start_count
== 0) {
461 writel(BNA_DOORBELL_IB_INT_DISABLE
,
462 ib
->door_bell
.doorbell_addr
);
463 if (ib
->intr
->intr_type
== BNA_INTR_T_INTX
) {
464 bna_intx_disable(ib
->bna
, intx_mask
);
465 intx_mask
|= (ib
->intr
->vector
);
466 bna_intx_enable(ib
->bna
, intx_mask
);
472 bna_ib_fail(struct bna_ib
*ib
)
480 static void rxf_enable(struct bna_rxf
*rxf
);
481 static void rxf_disable(struct bna_rxf
*rxf
);
482 static void __rxf_config_set(struct bna_rxf
*rxf
);
483 static void __rxf_rit_set(struct bna_rxf
*rxf
);
484 static void __bna_rxf_stat_clr(struct bna_rxf
*rxf
);
485 static int rxf_process_packet_filter(struct bna_rxf
*rxf
);
486 static int rxf_clear_packet_filter(struct bna_rxf
*rxf
);
487 static void rxf_reset_packet_filter(struct bna_rxf
*rxf
);
488 static void rxf_cb_enabled(void *arg
, int status
);
489 static void rxf_cb_disabled(void *arg
, int status
);
490 static void bna_rxf_cb_stats_cleared(void *arg
, int status
);
491 static void __rxf_enable(struct bna_rxf
*rxf
);
492 static void __rxf_disable(struct bna_rxf
*rxf
);
494 bfa_fsm_state_decl(bna_rxf
, stopped
, struct bna_rxf
,
496 bfa_fsm_state_decl(bna_rxf
, start_wait
, struct bna_rxf
,
498 bfa_fsm_state_decl(bna_rxf
, cam_fltr_mod_wait
, struct bna_rxf
,
500 bfa_fsm_state_decl(bna_rxf
, started
, struct bna_rxf
,
502 bfa_fsm_state_decl(bna_rxf
, cam_fltr_clr_wait
, struct bna_rxf
,
504 bfa_fsm_state_decl(bna_rxf
, stop_wait
, struct bna_rxf
,
506 bfa_fsm_state_decl(bna_rxf
, pause_wait
, struct bna_rxf
,
508 bfa_fsm_state_decl(bna_rxf
, resume_wait
, struct bna_rxf
,
510 bfa_fsm_state_decl(bna_rxf
, stat_clr_wait
, struct bna_rxf
,
513 static struct bfa_sm_table rxf_sm_table
[] = {
514 {BFA_SM(bna_rxf_sm_stopped
), BNA_RXF_STOPPED
},
515 {BFA_SM(bna_rxf_sm_start_wait
), BNA_RXF_START_WAIT
},
516 {BFA_SM(bna_rxf_sm_cam_fltr_mod_wait
), BNA_RXF_CAM_FLTR_MOD_WAIT
},
517 {BFA_SM(bna_rxf_sm_started
), BNA_RXF_STARTED
},
518 {BFA_SM(bna_rxf_sm_cam_fltr_clr_wait
), BNA_RXF_CAM_FLTR_CLR_WAIT
},
519 {BFA_SM(bna_rxf_sm_stop_wait
), BNA_RXF_STOP_WAIT
},
520 {BFA_SM(bna_rxf_sm_pause_wait
), BNA_RXF_PAUSE_WAIT
},
521 {BFA_SM(bna_rxf_sm_resume_wait
), BNA_RXF_RESUME_WAIT
},
522 {BFA_SM(bna_rxf_sm_stat_clr_wait
), BNA_RXF_STAT_CLR_WAIT
}
526 bna_rxf_sm_stopped_entry(struct bna_rxf
*rxf
)
528 call_rxf_stop_cbfn(rxf
, BNA_CB_SUCCESS
);
532 bna_rxf_sm_stopped(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
536 bfa_fsm_set_state(rxf
, bna_rxf_sm_start_wait
);
540 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
547 case RXF_E_CAM_FLTR_MOD
:
548 call_rxf_cam_fltr_cbfn(rxf
, BNA_CB_SUCCESS
);
553 case RXF_E_CAM_FLTR_RESP
:
555 * These events are received due to flushing of mbox
562 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_PAUSED
;
563 call_rxf_pause_cbfn(rxf
, BNA_CB_SUCCESS
);
567 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_RUNNING
;
568 call_rxf_resume_cbfn(rxf
, BNA_CB_SUCCESS
);
572 bfa_sm_fault(rxf
->rx
->bna
, event
);
577 bna_rxf_sm_start_wait_entry(struct bna_rxf
*rxf
)
579 __rxf_config_set(rxf
);
585 bna_rxf_sm_start_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
590 * STOP is originated from bnad. When this happens,
591 * it can not be waiting for filter update
593 call_rxf_start_cbfn(rxf
, BNA_CB_INTERRUPT
);
594 bfa_fsm_set_state(rxf
, bna_rxf_sm_stop_wait
);
598 call_rxf_cam_fltr_cbfn(rxf
, BNA_CB_SUCCESS
);
599 call_rxf_start_cbfn(rxf
, BNA_CB_FAIL
);
600 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
603 case RXF_E_CAM_FLTR_MOD
:
609 * Force rxf_process_filter() to go through initial
612 if ((rxf
->ucast_active_mac
!= NULL
) &&
613 (rxf
->ucast_pending_set
== 0))
614 rxf
->ucast_pending_set
= 1;
616 if (rxf
->rss_status
== BNA_STATUS_T_ENABLED
)
617 rxf
->rxf_flags
|= BNA_RXF_FL_RSS_CONFIG_PENDING
;
619 rxf
->rxf_flags
|= BNA_RXF_FL_VLAN_CONFIG_PENDING
;
621 bfa_fsm_set_state(rxf
, bna_rxf_sm_cam_fltr_mod_wait
);
626 rxf
->rxf_flags
|= BNA_RXF_FL_OPERSTATE_CHANGED
;
630 bfa_sm_fault(rxf
->rx
->bna
, event
);
635 bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf
*rxf
)
637 if (!rxf_process_packet_filter(rxf
)) {
638 /* No more pending CAM entries to update */
639 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
644 bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
649 * STOP is originated from bnad. When this happens,
650 * it can not be waiting for filter update
652 call_rxf_start_cbfn(rxf
, BNA_CB_INTERRUPT
);
653 bfa_fsm_set_state(rxf
, bna_rxf_sm_cam_fltr_clr_wait
);
657 rxf_reset_packet_filter(rxf
);
658 call_rxf_cam_fltr_cbfn(rxf
, BNA_CB_SUCCESS
);
659 call_rxf_start_cbfn(rxf
, BNA_CB_FAIL
);
660 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
663 case RXF_E_CAM_FLTR_MOD
:
667 case RXF_E_CAM_FLTR_RESP
:
668 if (!rxf_process_packet_filter(rxf
)) {
669 /* No more pending CAM entries to update */
670 call_rxf_cam_fltr_cbfn(rxf
, BNA_CB_SUCCESS
);
671 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
677 rxf
->rxf_flags
|= BNA_RXF_FL_OPERSTATE_CHANGED
;
681 bfa_sm_fault(rxf
->rx
->bna
, event
);
686 bna_rxf_sm_started_entry(struct bna_rxf
*rxf
)
688 call_rxf_start_cbfn(rxf
, BNA_CB_SUCCESS
);
690 if (rxf
->rxf_flags
& BNA_RXF_FL_OPERSTATE_CHANGED
) {
691 if (rxf
->rxf_oper_state
== BNA_RXF_OPER_STATE_PAUSED
)
692 bfa_fsm_send_event(rxf
, RXF_E_PAUSE
);
694 bfa_fsm_send_event(rxf
, RXF_E_RESUME
);
700 bna_rxf_sm_started(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
704 bfa_fsm_set_state(rxf
, bna_rxf_sm_cam_fltr_clr_wait
);
705 /* Hack to get FSM start clearing CAM entries */
706 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_RESP
);
710 rxf_reset_packet_filter(rxf
);
711 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
714 case RXF_E_CAM_FLTR_MOD
:
715 bfa_fsm_set_state(rxf
, bna_rxf_sm_cam_fltr_mod_wait
);
719 bfa_fsm_set_state(rxf
, bna_rxf_sm_pause_wait
);
723 bfa_fsm_set_state(rxf
, bna_rxf_sm_resume_wait
);
727 bfa_sm_fault(rxf
->rx
->bna
, event
);
732 bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf
*rxf
)
735 * Note: Do not add rxf_clear_packet_filter here.
736 * It will overstep mbox when this transition happens:
737 * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
742 bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
747 * FSM was in the process of stopping, initiated by
748 * bnad. When this happens, no one can be waiting for
749 * start or filter update
751 rxf_reset_packet_filter(rxf
);
752 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
755 case RXF_E_CAM_FLTR_RESP
:
756 if (!rxf_clear_packet_filter(rxf
)) {
757 /* No more pending CAM entries to clear */
758 bfa_fsm_set_state(rxf
, bna_rxf_sm_stop_wait
);
764 bfa_sm_fault(rxf
->rx
->bna
, event
);
769 bna_rxf_sm_stop_wait_entry(struct bna_rxf
*rxf
)
772 * NOTE: Do not add rxf_disable here.
773 * It will overstep mbox when this transition happens:
774 * start_wait -> stop_wait on RXF_E_STOP event
779 bna_rxf_sm_stop_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
784 * FSM was in the process of stopping, initiated by
785 * bnad. When this happens, no one can be waiting for
786 * start or filter update
788 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
793 * This event is received due to abrupt transition from
794 * bna_rxf_sm_start_wait state on receiving
802 * FSM was in the process of stopping, initiated by
803 * bnad. When this happens, no one can be waiting for
804 * start or filter update
806 bfa_fsm_set_state(rxf
, bna_rxf_sm_stat_clr_wait
);
810 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_PAUSED
;
814 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_RUNNING
;
818 bfa_sm_fault(rxf
->rx
->bna
, event
);
823 bna_rxf_sm_pause_wait_entry(struct bna_rxf
*rxf
)
826 ~(BNA_RXF_FL_OPERSTATE_CHANGED
| BNA_RXF_FL_RXF_ENABLED
);
831 bna_rxf_sm_pause_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
836 * FSM was in the process of disabling rxf, initiated by
839 call_rxf_pause_cbfn(rxf
, BNA_CB_FAIL
);
840 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
844 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_PAUSED
;
845 call_rxf_pause_cbfn(rxf
, BNA_CB_SUCCESS
);
846 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
850 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
851 * any other event during these states
854 bfa_sm_fault(rxf
->rx
->bna
, event
);
859 bna_rxf_sm_resume_wait_entry(struct bna_rxf
*rxf
)
861 rxf
->rxf_flags
&= ~(BNA_RXF_FL_OPERSTATE_CHANGED
);
862 rxf
->rxf_flags
|= BNA_RXF_FL_RXF_ENABLED
;
867 bna_rxf_sm_resume_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
872 * FSM was in the process of disabling rxf, initiated by
875 call_rxf_resume_cbfn(rxf
, BNA_CB_FAIL
);
876 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
880 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_RUNNING
;
881 call_rxf_resume_cbfn(rxf
, BNA_CB_SUCCESS
);
882 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
886 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
887 * any other event during these states
890 bfa_sm_fault(rxf
->rx
->bna
, event
);
895 bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf
*rxf
)
897 __bna_rxf_stat_clr(rxf
);
901 bna_rxf_sm_stat_clr_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
905 case RXF_E_STAT_CLEARED
:
906 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
910 bfa_sm_fault(rxf
->rx
->bna
, event
);
915 __rxf_enable(struct bna_rxf
*rxf
)
917 struct bfi_ll_rxf_multi_req ll_req
;
920 if (rxf
->rxf_id
< 32)
921 bm
[0] = 1 << rxf
->rxf_id
;
923 bm
[1] = 1 << (rxf
->rxf_id
- 32);
925 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_RX_REQ
, 0);
926 ll_req
.rxf_id_mask
[0] = htonl(bm
[0]);
927 ll_req
.rxf_id_mask
[1] = htonl(bm
[1]);
930 bna_mbox_qe_fill(&rxf
->mbox_qe
, &ll_req
, sizeof(ll_req
),
931 rxf_cb_enabled
, rxf
);
933 bna_mbox_send(rxf
->rx
->bna
, &rxf
->mbox_qe
);
937 __rxf_disable(struct bna_rxf
*rxf
)
939 struct bfi_ll_rxf_multi_req ll_req
;
942 if (rxf
->rxf_id
< 32)
943 bm
[0] = 1 << rxf
->rxf_id
;
945 bm
[1] = 1 << (rxf
->rxf_id
- 32);
947 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_RX_REQ
, 0);
948 ll_req
.rxf_id_mask
[0] = htonl(bm
[0]);
949 ll_req
.rxf_id_mask
[1] = htonl(bm
[1]);
952 bna_mbox_qe_fill(&rxf
->mbox_qe
, &ll_req
, sizeof(ll_req
),
953 rxf_cb_disabled
, rxf
);
955 bna_mbox_send(rxf
->rx
->bna
, &rxf
->mbox_qe
);
959 __rxf_config_set(struct bna_rxf
*rxf
)
962 struct bna_rss_mem
*rss_mem
;
963 struct bna_rx_fndb_ram
*rx_fndb_ram
;
964 struct bna
*bna
= rxf
->rx
->bna
;
965 void __iomem
*base_addr
;
968 base_addr
= BNA_GET_MEM_BASE_ADDR(bna
->pcidev
.pci_bar_kva
,
969 RSS_TABLE_BASE_OFFSET
);
971 rss_mem
= (struct bna_rss_mem
*)0;
973 /* Configure RSS if required */
974 if (rxf
->ctrl_flags
& BNA_RXF_CF_RSS_ENABLE
) {
975 /* configure RSS Table */
976 writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM
+
977 bna
->port_num
, RSS_TABLE_BASE_OFFSET
),
978 bna
->regs
.page_addr
);
980 /* temporarily disable RSS, while hash value is written */
981 off
= (unsigned long)&rss_mem
[0].type_n_hash
;
982 writel(0, base_addr
+ off
);
984 for (i
= 0; i
< BFI_RSS_HASH_KEY_LEN
; i
++) {
985 off
= (unsigned long)
986 &rss_mem
[0].hash_key
[(BFI_RSS_HASH_KEY_LEN
- 1) - i
];
987 writel(htonl(rxf
->rss_cfg
.toeplitz_hash_key
[i
]),
991 off
= (unsigned long)&rss_mem
[0].type_n_hash
;
992 writel(rxf
->rss_cfg
.hash_type
| rxf
->rss_cfg
.hash_mask
,
997 writel(BNA_GET_PAGE_NUM(
998 LUT0_MEM_BLK_BASE_PG_NUM
+ (bna
->port_num
* 2),
999 RX_FNDB_RAM_BASE_OFFSET
),
1000 bna
->regs
.page_addr
);
1002 base_addr
= BNA_GET_MEM_BASE_ADDR(bna
->pcidev
.pci_bar_kva
,
1003 RX_FNDB_RAM_BASE_OFFSET
);
1005 rx_fndb_ram
= (struct bna_rx_fndb_ram
*)0;
1007 /* We always use RSS table 0 */
1008 off
= (unsigned long)&rx_fndb_ram
[rxf
->rxf_id
].rss_prop
;
1009 writel(rxf
->ctrl_flags
& BNA_RXF_CF_RSS_ENABLE
,
1012 /* small large buffer enable/disable */
1013 off
= (unsigned long)&rx_fndb_ram
[rxf
->rxf_id
].size_routing_props
;
1014 writel((rxf
->ctrl_flags
& BNA_RXF_CF_SM_LG_RXQ
) | 0x80,
1017 /* RIT offset, HDS forced offset, multicast RxQ Id */
1018 off
= (unsigned long)&rx_fndb_ram
[rxf
->rxf_id
].rit_hds_mcastq
;
1019 writel((rxf
->rit_segment
->rit_offset
<< 16) |
1020 (rxf
->forced_offset
<< 8) |
1021 (rxf
->hds_cfg
.hdr_type
& BNA_HDS_FORCED
) | rxf
->mcast_rxq_id
,
1025 * default vlan tag, default function enable, strip vlan bytes,
1026 * HDS type, header size
1029 off
= (unsigned long)&rx_fndb_ram
[rxf
->rxf_id
].control_flags
;
1030 writel(((u32
)rxf
->default_vlan_tag
<< 16) |
1032 (BNA_RXF_CF_DEFAULT_VLAN
|
1033 BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE
|
1034 BNA_RXF_CF_VLAN_STRIP
)) |
1035 (rxf
->hds_cfg
.hdr_type
& ~BNA_HDS_FORCED
) |
1036 rxf
->hds_cfg
.header_size
,
1041 __rxf_vlan_filter_set(struct bna_rxf
*rxf
, enum bna_status status
)
1043 struct bna
*bna
= rxf
->rx
->bna
;
1046 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM
+
1047 (bna
->port_num
* 2), VLAN_RAM_BASE_OFFSET
),
1048 bna
->regs
.page_addr
);
1050 if (status
== BNA_STATUS_T_ENABLED
) {
1051 /* enable VLAN filtering on this function */
1052 for (i
= 0; i
<= BFI_MAX_VLAN
/ 32; i
++) {
1053 writel(rxf
->vlan_filter_table
[i
],
1054 BNA_GET_VLAN_MEM_ENTRY_ADDR
1055 (bna
->pcidev
.pci_bar_kva
, rxf
->rxf_id
,
1059 /* disable VLAN filtering on this function */
1060 for (i
= 0; i
<= BFI_MAX_VLAN
/ 32; i
++) {
1062 BNA_GET_VLAN_MEM_ENTRY_ADDR
1063 (bna
->pcidev
.pci_bar_kva
, rxf
->rxf_id
,
1070 __rxf_rit_set(struct bna_rxf
*rxf
)
1072 struct bna
*bna
= rxf
->rx
->bna
;
1073 struct bna_rit_mem
*rit_mem
;
1075 void __iomem
*base_addr
;
1078 base_addr
= BNA_GET_MEM_BASE_ADDR(bna
->pcidev
.pci_bar_kva
,
1079 FUNCTION_TO_RXQ_TRANSLATE
);
1081 rit_mem
= (struct bna_rit_mem
*)0;
1083 writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM
+ bna
->port_num
,
1084 FUNCTION_TO_RXQ_TRANSLATE
),
1085 bna
->regs
.page_addr
);
1087 for (i
= 0; i
< rxf
->rit_segment
->rit_size
; i
++) {
1088 off
= (unsigned long)&rit_mem
[i
+ rxf
->rit_segment
->rit_offset
];
1089 writel(rxf
->rit_segment
->rit
[i
].large_rxq_id
<< 6 |
1090 rxf
->rit_segment
->rit
[i
].small_rxq_id
,
1096 __bna_rxf_stat_clr(struct bna_rxf
*rxf
)
1098 struct bfi_ll_stats_req ll_req
;
1101 if (rxf
->rxf_id
< 32)
1102 bm
[0] = 1 << rxf
->rxf_id
;
1104 bm
[1] = 1 << (rxf
->rxf_id
- 32);
1106 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_STATS_CLEAR_REQ
, 0);
1107 ll_req
.stats_mask
= 0;
1108 ll_req
.txf_id_mask
[0] = 0;
1109 ll_req
.txf_id_mask
[1] = 0;
1111 ll_req
.rxf_id_mask
[0] = htonl(bm
[0]);
1112 ll_req
.rxf_id_mask
[1] = htonl(bm
[1]);
1114 bna_mbox_qe_fill(&rxf
->mbox_qe
, &ll_req
, sizeof(ll_req
),
1115 bna_rxf_cb_stats_cleared
, rxf
);
1116 bna_mbox_send(rxf
->rx
->bna
, &rxf
->mbox_qe
);
1120 rxf_enable(struct bna_rxf
*rxf
)
1122 if (rxf
->rxf_oper_state
== BNA_RXF_OPER_STATE_PAUSED
)
1123 bfa_fsm_send_event(rxf
, RXF_E_STARTED
);
1125 rxf
->rxf_flags
|= BNA_RXF_FL_RXF_ENABLED
;
1131 rxf_cb_enabled(void *arg
, int status
)
1133 struct bna_rxf
*rxf
= (struct bna_rxf
*)arg
;
1135 bfa_q_qe_init(&rxf
->mbox_qe
.qe
);
1136 bfa_fsm_send_event(rxf
, RXF_E_STARTED
);
1140 rxf_disable(struct bna_rxf
*rxf
)
1142 if (rxf
->rxf_oper_state
== BNA_RXF_OPER_STATE_PAUSED
)
1143 bfa_fsm_send_event(rxf
, RXF_E_STOPPED
);
1145 rxf
->rxf_flags
&= ~BNA_RXF_FL_RXF_ENABLED
;
1150 rxf_cb_disabled(void *arg
, int status
)
1152 struct bna_rxf
*rxf
= (struct bna_rxf
*)arg
;
1154 bfa_q_qe_init(&rxf
->mbox_qe
.qe
);
1155 bfa_fsm_send_event(rxf
, RXF_E_STOPPED
);
1159 rxf_cb_cam_fltr_mbox_cmd(void *arg
, int status
)
1161 struct bna_rxf
*rxf
= (struct bna_rxf
*)arg
;
1163 bfa_q_qe_init(&rxf
->mbox_qe
.qe
);
1165 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_RESP
);
1169 bna_rxf_cb_stats_cleared(void *arg
, int status
)
1171 struct bna_rxf
*rxf
= (struct bna_rxf
*)arg
;
1173 bfa_q_qe_init(&rxf
->mbox_qe
.qe
);
1174 bfa_fsm_send_event(rxf
, RXF_E_STAT_CLEARED
);
1178 rxf_cam_mbox_cmd(struct bna_rxf
*rxf
, u8 cmd
,
1179 const struct bna_mac
*mac_addr
)
1181 struct bfi_ll_mac_addr_req req
;
1183 bfi_h2i_set(req
.mh
, BFI_MC_LL
, cmd
, 0);
1185 req
.rxf_id
= rxf
->rxf_id
;
1186 memcpy(&req
.mac_addr
, (void *)&mac_addr
->addr
, ETH_ALEN
);
1188 bna_mbox_qe_fill(&rxf
->mbox_qe
, &req
, sizeof(req
),
1189 rxf_cb_cam_fltr_mbox_cmd
, rxf
);
1191 bna_mbox_send(rxf
->rx
->bna
, &rxf
->mbox_qe
);
1195 rxf_process_packet_filter_mcast(struct bna_rxf
*rxf
)
1197 struct bna_mac
*mac
= NULL
;
1198 struct list_head
*qe
;
1200 /* Add multicast entries */
1201 if (!list_empty(&rxf
->mcast_pending_add_q
)) {
1202 bfa_q_deq(&rxf
->mcast_pending_add_q
, &qe
);
1204 mac
= (struct bna_mac
*)qe
;
1205 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_ADD_REQ
, mac
);
1206 list_add_tail(&mac
->qe
, &rxf
->mcast_active_q
);
1210 /* Delete multicast entries previousely added */
1211 if (!list_empty(&rxf
->mcast_pending_del_q
)) {
1212 bfa_q_deq(&rxf
->mcast_pending_del_q
, &qe
);
1214 mac
= (struct bna_mac
*)qe
;
1215 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_DEL_REQ
, mac
);
1216 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1224 rxf_process_packet_filter_vlan(struct bna_rxf
*rxf
)
1226 /* Apply the VLAN filter */
1227 if (rxf
->rxf_flags
& BNA_RXF_FL_VLAN_CONFIG_PENDING
) {
1228 rxf
->rxf_flags
&= ~BNA_RXF_FL_VLAN_CONFIG_PENDING
;
1229 if (!(rxf
->rxmode_active
& BNA_RXMODE_PROMISC
))
1230 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
1233 /* Apply RSS configuration */
1234 if (rxf
->rxf_flags
& BNA_RXF_FL_RSS_CONFIG_PENDING
) {
1235 rxf
->rxf_flags
&= ~BNA_RXF_FL_RSS_CONFIG_PENDING
;
1236 if (rxf
->rss_status
== BNA_STATUS_T_DISABLED
) {
1237 /* RSS is being disabled */
1238 rxf
->ctrl_flags
&= ~BNA_RXF_CF_RSS_ENABLE
;
1240 __rxf_config_set(rxf
);
1242 /* RSS is being enabled or reconfigured */
1243 rxf
->ctrl_flags
|= BNA_RXF_CF_RSS_ENABLE
;
1245 __rxf_config_set(rxf
);
1253 * Processes pending ucast, mcast entry addition/deletion and issues mailbox
1254 * command. Also processes pending filter configuration - promiscuous mode,
1255 * default mode, allmutli mode and issues mailbox command or directly applies
1259 rxf_process_packet_filter(struct bna_rxf
*rxf
)
1261 /* Set the default MAC first */
1262 if (rxf
->ucast_pending_set
> 0) {
1263 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_SET_REQ
,
1264 rxf
->ucast_active_mac
);
1265 rxf
->ucast_pending_set
--;
1269 if (rxf_process_packet_filter_ucast(rxf
))
1272 if (rxf_process_packet_filter_mcast(rxf
))
1275 if (rxf_process_packet_filter_promisc(rxf
))
1278 if (rxf_process_packet_filter_allmulti(rxf
))
1281 if (rxf_process_packet_filter_vlan(rxf
))
1288 rxf_clear_packet_filter_mcast(struct bna_rxf
*rxf
)
1290 struct bna_mac
*mac
= NULL
;
1291 struct list_head
*qe
;
1293 /* 3. delete pending mcast entries */
1294 if (!list_empty(&rxf
->mcast_pending_del_q
)) {
1295 bfa_q_deq(&rxf
->mcast_pending_del_q
, &qe
);
1297 mac
= (struct bna_mac
*)qe
;
1298 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_DEL_REQ
, mac
);
1299 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1303 /* 4. clear active mcast entries; move them to pending_add_q */
1304 if (!list_empty(&rxf
->mcast_active_q
)) {
1305 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
1307 mac
= (struct bna_mac
*)qe
;
1308 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_DEL_REQ
, mac
);
1309 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
1317 * In the rxf stop path, processes pending ucast/mcast delete queue and issues
1318 * the mailbox command. Moves the active ucast/mcast entries to pending add q,
1319 * so that they are added to CAM again in the rxf start path. Moves the current
1320 * filter settings - promiscuous, default, allmutli - to pending filter
1324 rxf_clear_packet_filter(struct bna_rxf
*rxf
)
1326 if (rxf_clear_packet_filter_ucast(rxf
))
1329 if (rxf_clear_packet_filter_mcast(rxf
))
1332 /* 5. clear active default MAC in the CAM */
1333 if (rxf
->ucast_pending_set
> 0)
1334 rxf
->ucast_pending_set
= 0;
1336 if (rxf_clear_packet_filter_promisc(rxf
))
1339 if (rxf_clear_packet_filter_allmulti(rxf
))
1346 rxf_reset_packet_filter_mcast(struct bna_rxf
*rxf
)
1348 struct list_head
*qe
;
1349 struct bna_mac
*mac
;
1351 /* 3. Move active mcast entries to pending_add_q */
1352 while (!list_empty(&rxf
->mcast_active_q
)) {
1353 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
1355 list_add_tail(qe
, &rxf
->mcast_pending_add_q
);
1358 /* 4. Throw away delete pending mcast entries */
1359 while (!list_empty(&rxf
->mcast_pending_del_q
)) {
1360 bfa_q_deq(&rxf
->mcast_pending_del_q
, &qe
);
1362 mac
= (struct bna_mac
*)qe
;
1363 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1368 * In the rxf fail path, throws away the ucast/mcast entries pending for
1369 * deletion, moves all active ucast/mcast entries to pending queue so that
1370 * they are added back to CAM in the rxf start path. Also moves the current
1371 * filter configuration to pending filter configuration.
1374 rxf_reset_packet_filter(struct bna_rxf
*rxf
)
1376 rxf_reset_packet_filter_ucast(rxf
);
1378 rxf_reset_packet_filter_mcast(rxf
);
1380 /* 5. Turn off ucast set flag */
1381 rxf
->ucast_pending_set
= 0;
1383 rxf_reset_packet_filter_promisc(rxf
);
1385 rxf_reset_packet_filter_allmulti(rxf
);
1389 bna_rxf_init(struct bna_rxf
*rxf
,
1391 struct bna_rx_config
*q_config
)
1393 struct list_head
*qe
;
1394 struct bna_rxp
*rxp
;
1396 /* rxf_id is initialized during rx_mod init */
1399 INIT_LIST_HEAD(&rxf
->ucast_pending_add_q
);
1400 INIT_LIST_HEAD(&rxf
->ucast_pending_del_q
);
1401 rxf
->ucast_pending_set
= 0;
1402 INIT_LIST_HEAD(&rxf
->ucast_active_q
);
1403 rxf
->ucast_active_mac
= NULL
;
1405 INIT_LIST_HEAD(&rxf
->mcast_pending_add_q
);
1406 INIT_LIST_HEAD(&rxf
->mcast_pending_del_q
);
1407 INIT_LIST_HEAD(&rxf
->mcast_active_q
);
1409 bfa_q_qe_init(&rxf
->mbox_qe
.qe
);
1411 if (q_config
->vlan_strip_status
== BNA_STATUS_T_ENABLED
)
1412 rxf
->ctrl_flags
|= BNA_RXF_CF_VLAN_STRIP
;
1414 rxf
->rxf_oper_state
= (q_config
->paused
) ?
1415 BNA_RXF_OPER_STATE_PAUSED
: BNA_RXF_OPER_STATE_RUNNING
;
1417 bna_rxf_adv_init(rxf
, rx
, q_config
);
1419 rxf
->rit_segment
= bna_rit_mod_seg_get(&rxf
->rx
->bna
->rit_mod
,
1420 q_config
->num_paths
);
1422 list_for_each(qe
, &rx
->rxp_q
) {
1423 rxp
= (struct bna_rxp
*)qe
;
1424 if (q_config
->rxp_type
== BNA_RXP_SINGLE
)
1425 rxf
->mcast_rxq_id
= rxp
->rxq
.single
.only
->rxq_id
;
1427 rxf
->mcast_rxq_id
= rxp
->rxq
.slr
.large
->rxq_id
;
1431 rxf
->vlan_filter_status
= BNA_STATUS_T_DISABLED
;
1432 memset(rxf
->vlan_filter_table
, 0,
1433 (sizeof(u32
) * ((BFI_MAX_VLAN
+ 1) / 32)));
1435 /* Set up VLAN 0 for pure priority tagged packets */
1436 rxf
->vlan_filter_table
[0] |= 1;
1438 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
1442 bna_rxf_uninit(struct bna_rxf
*rxf
)
1444 struct bna
*bna
= rxf
->rx
->bna
;
1445 struct bna_mac
*mac
;
1447 bna_rit_mod_seg_put(&rxf
->rx
->bna
->rit_mod
, rxf
->rit_segment
);
1448 rxf
->rit_segment
= NULL
;
1450 rxf
->ucast_pending_set
= 0;
1452 while (!list_empty(&rxf
->ucast_pending_add_q
)) {
1453 bfa_q_deq(&rxf
->ucast_pending_add_q
, &mac
);
1454 bfa_q_qe_init(&mac
->qe
);
1455 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
1458 if (rxf
->ucast_active_mac
) {
1459 bfa_q_qe_init(&rxf
->ucast_active_mac
->qe
);
1460 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
,
1461 rxf
->ucast_active_mac
);
1462 rxf
->ucast_active_mac
= NULL
;
1465 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
1466 bfa_q_deq(&rxf
->mcast_pending_add_q
, &mac
);
1467 bfa_q_qe_init(&mac
->qe
);
1468 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1471 /* Turn off pending promisc mode */
1472 if (is_promisc_enable(rxf
->rxmode_pending
,
1473 rxf
->rxmode_pending_bitmask
)) {
1474 /* system promisc state should be pending */
1475 BUG_ON(!(bna
->rxf_promisc_id
== rxf
->rxf_id
));
1476 promisc_inactive(rxf
->rxmode_pending
,
1477 rxf
->rxmode_pending_bitmask
);
1478 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
1480 /* Promisc mode should not be active */
1481 BUG_ON(rxf
->rxmode_active
& BNA_RXMODE_PROMISC
);
1483 /* Turn off pending all-multi mode */
1484 if (is_allmulti_enable(rxf
->rxmode_pending
,
1485 rxf
->rxmode_pending_bitmask
)) {
1486 allmulti_inactive(rxf
->rxmode_pending
,
1487 rxf
->rxmode_pending_bitmask
);
1489 /* Allmulti mode should not be active */
1490 BUG_ON(rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
);
1496 bna_rx_cb_rxf_started(struct bna_rx
*rx
, enum bna_cb_status status
)
1498 bfa_fsm_send_event(rx
, RX_E_RXF_STARTED
);
1499 if (rx
->rxf
.rxf_id
< 32)
1500 rx
->bna
->rx_mod
.rxf_bmap
[0] |= ((u32
)1 << rx
->rxf
.rxf_id
);
1502 rx
->bna
->rx_mod
.rxf_bmap
[1] |= ((u32
)
1503 1 << (rx
->rxf
.rxf_id
- 32));
1507 bna_rxf_start(struct bna_rxf
*rxf
)
1509 rxf
->start_cbfn
= bna_rx_cb_rxf_started
;
1510 rxf
->start_cbarg
= rxf
->rx
;
1511 rxf
->rxf_flags
&= ~BNA_RXF_FL_FAILED
;
1512 bfa_fsm_send_event(rxf
, RXF_E_START
);
1516 bna_rx_cb_rxf_stopped(struct bna_rx
*rx
, enum bna_cb_status status
)
1518 bfa_fsm_send_event(rx
, RX_E_RXF_STOPPED
);
1519 if (rx
->rxf
.rxf_id
< 32)
1520 rx
->bna
->rx_mod
.rxf_bmap
[0] &= ~(u32
)1 << rx
->rxf
.rxf_id
;
1522 rx
->bna
->rx_mod
.rxf_bmap
[1] &= ~(u32
)
1523 1 << (rx
->rxf
.rxf_id
- 32);
1527 bna_rxf_stop(struct bna_rxf
*rxf
)
1529 rxf
->stop_cbfn
= bna_rx_cb_rxf_stopped
;
1530 rxf
->stop_cbarg
= rxf
->rx
;
1531 bfa_fsm_send_event(rxf
, RXF_E_STOP
);
1535 bna_rxf_fail(struct bna_rxf
*rxf
)
1537 rxf
->rxf_flags
|= BNA_RXF_FL_FAILED
;
1538 bfa_fsm_send_event(rxf
, RXF_E_FAIL
);
1542 bna_rxf_state_get(struct bna_rxf
*rxf
)
1544 return bfa_sm_to_state(rxf_sm_table
, rxf
->fsm
);
1548 bna_rx_ucast_set(struct bna_rx
*rx
, u8
*ucmac
,
1549 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
1550 enum bna_cb_status
))
1552 struct bna_rxf
*rxf
= &rx
->rxf
;
1554 if (rxf
->ucast_active_mac
== NULL
) {
1555 rxf
->ucast_active_mac
=
1556 bna_ucam_mod_mac_get(&rxf
->rx
->bna
->ucam_mod
);
1557 if (rxf
->ucast_active_mac
== NULL
)
1558 return BNA_CB_UCAST_CAM_FULL
;
1559 bfa_q_qe_init(&rxf
->ucast_active_mac
->qe
);
1562 memcpy(rxf
->ucast_active_mac
->addr
, ucmac
, ETH_ALEN
);
1563 rxf
->ucast_pending_set
++;
1564 rxf
->cam_fltr_cbfn
= cbfn
;
1565 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
1567 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
1569 return BNA_CB_SUCCESS
;
1573 bna_rx_mcast_add(struct bna_rx
*rx
, u8
*addr
,
1574 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
1575 enum bna_cb_status
))
1577 struct bna_rxf
*rxf
= &rx
->rxf
;
1578 struct list_head
*qe
;
1579 struct bna_mac
*mac
;
1581 /* Check if already added */
1582 list_for_each(qe
, &rxf
->mcast_active_q
) {
1583 mac
= (struct bna_mac
*)qe
;
1584 if (BNA_MAC_IS_EQUAL(mac
->addr
, addr
)) {
1586 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
1587 return BNA_CB_SUCCESS
;
1591 /* Check if pending addition */
1592 list_for_each(qe
, &rxf
->mcast_pending_add_q
) {
1593 mac
= (struct bna_mac
*)qe
;
1594 if (BNA_MAC_IS_EQUAL(mac
->addr
, addr
)) {
1596 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
1597 return BNA_CB_SUCCESS
;
1601 mac
= bna_mcam_mod_mac_get(&rxf
->rx
->bna
->mcam_mod
);
1603 return BNA_CB_MCAST_LIST_FULL
;
1604 bfa_q_qe_init(&mac
->qe
);
1605 memcpy(mac
->addr
, addr
, ETH_ALEN
);
1606 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
1608 rxf
->cam_fltr_cbfn
= cbfn
;
1609 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
1611 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
1613 return BNA_CB_SUCCESS
;
1617 bna_rx_mcast_listset(struct bna_rx
*rx
, int count
, u8
*mclist
,
1618 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
1619 enum bna_cb_status
))
1621 struct bna_rxf
*rxf
= &rx
->rxf
;
1622 struct list_head list_head
;
1623 struct list_head
*qe
;
1625 struct bna_mac
*mac
;
1626 struct bna_mac
*mac1
;
1629 int need_hw_config
= 0;
1632 /* Allocate nodes */
1633 INIT_LIST_HEAD(&list_head
);
1634 for (i
= 0, mcaddr
= mclist
; i
< count
; i
++) {
1635 mac
= bna_mcam_mod_mac_get(&rxf
->rx
->bna
->mcam_mod
);
1638 bfa_q_qe_init(&mac
->qe
);
1639 memcpy(mac
->addr
, mcaddr
, ETH_ALEN
);
1640 list_add_tail(&mac
->qe
, &list_head
);
1645 /* Schedule for addition */
1646 while (!list_empty(&list_head
)) {
1647 bfa_q_deq(&list_head
, &qe
);
1648 mac
= (struct bna_mac
*)qe
;
1649 bfa_q_qe_init(&mac
->qe
);
1653 /* Skip if already added */
1654 list_for_each(qe
, &rxf
->mcast_active_q
) {
1655 mac1
= (struct bna_mac
*)qe
;
1656 if (BNA_MAC_IS_EQUAL(mac1
->addr
, mac
->addr
)) {
1657 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
,
1667 /* Skip if pending addition */
1668 list_for_each(qe
, &rxf
->mcast_pending_add_q
) {
1669 mac1
= (struct bna_mac
*)qe
;
1670 if (BNA_MAC_IS_EQUAL(mac1
->addr
, mac
->addr
)) {
1671 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
,
1682 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
1686 * Delete the entries that are in the pending_add_q but not
1689 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
1690 bfa_q_deq(&rxf
->mcast_pending_add_q
, &qe
);
1691 mac
= (struct bna_mac
*)qe
;
1692 bfa_q_qe_init(&mac
->qe
);
1693 for (i
= 0, mcaddr
= mclist
, delete = 1; i
< count
; i
++) {
1694 if (BNA_MAC_IS_EQUAL(mcaddr
, mac
->addr
)) {
1701 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1703 list_add_tail(&mac
->qe
, &list_head
);
1705 while (!list_empty(&list_head
)) {
1706 bfa_q_deq(&list_head
, &qe
);
1707 mac
= (struct bna_mac
*)qe
;
1708 bfa_q_qe_init(&mac
->qe
);
1709 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
1713 * Schedule entries for deletion that are in the active_q but not
1716 while (!list_empty(&rxf
->mcast_active_q
)) {
1717 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
1718 mac
= (struct bna_mac
*)qe
;
1719 bfa_q_qe_init(&mac
->qe
);
1720 for (i
= 0, mcaddr
= mclist
, delete = 1; i
< count
; i
++) {
1721 if (BNA_MAC_IS_EQUAL(mcaddr
, mac
->addr
)) {
1728 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_del_q
);
1731 list_add_tail(&mac
->qe
, &list_head
);
1734 while (!list_empty(&list_head
)) {
1735 bfa_q_deq(&list_head
, &qe
);
1736 mac
= (struct bna_mac
*)qe
;
1737 bfa_q_qe_init(&mac
->qe
);
1738 list_add_tail(&mac
->qe
, &rxf
->mcast_active_q
);
1741 if (need_hw_config
) {
1742 rxf
->cam_fltr_cbfn
= cbfn
;
1743 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
1744 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
1746 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
1748 return BNA_CB_SUCCESS
;
1751 while (!list_empty(&list_head
)) {
1752 bfa_q_deq(&list_head
, &qe
);
1753 mac
= (struct bna_mac
*)qe
;
1754 bfa_q_qe_init(&mac
->qe
);
1755 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1758 return BNA_CB_MCAST_LIST_FULL
;
1762 bna_rx_vlan_add(struct bna_rx
*rx
, int vlan_id
)
1764 struct bna_rxf
*rxf
= &rx
->rxf
;
1765 int index
= (vlan_id
>> 5);
1766 int bit
= (1 << (vlan_id
& 0x1F));
1768 rxf
->vlan_filter_table
[index
] |= bit
;
1769 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
) {
1770 rxf
->rxf_flags
|= BNA_RXF_FL_VLAN_CONFIG_PENDING
;
1771 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
1776 bna_rx_vlan_del(struct bna_rx
*rx
, int vlan_id
)
1778 struct bna_rxf
*rxf
= &rx
->rxf
;
1779 int index
= (vlan_id
>> 5);
1780 int bit
= (1 << (vlan_id
& 0x1F));
1782 rxf
->vlan_filter_table
[index
] &= ~bit
;
1783 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
) {
1784 rxf
->rxf_flags
|= BNA_RXF_FL_VLAN_CONFIG_PENDING
;
1785 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
1792 #define RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem) do { \
1793 struct bna_doorbell_qset *_qset; \
1794 unsigned long off; \
1795 (q)->rcb->producer_index = (q)->rcb->consumer_index = 0; \
1796 (q)->rcb->q_depth = (qdepth); \
1797 (q)->rcb->unmap_q = unmapq_mem; \
1798 (q)->rcb->rxq = (q); \
1799 (q)->rcb->cq = &(rxp)->cq; \
1800 (q)->rcb->bnad = (bna)->bnad; \
1801 _qset = (struct bna_doorbell_qset *)0; \
1802 off = (unsigned long)&_qset[(q)->rxq_id].rxq[0]; \
1803 (q)->rcb->q_dbell = off + \
1804 BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva); \
1805 (q)->rcb->id = _id; \
1808 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1809 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1811 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1812 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1814 #define call_rx_stop_callback(rx, status) \
1815 if ((rx)->stop_cbfn) { \
1816 (*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status)); \
1817 (rx)->stop_cbfn = NULL; \
1818 (rx)->stop_cbarg = NULL; \
1822 * Since rx_enable is synchronous callback, there is no start_cbfn required.
1823 * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers
1827 #define call_rx_disable_cbfn(rx, status) \
1828 if ((rx)->disable_cbfn) { \
1829 (*(rx)->disable_cbfn)((rx)->disable_cbarg, \
1831 (rx)->disable_cbfn = NULL; \
1832 (rx)->disable_cbarg = NULL; \
1835 #define rxqs_reqd(type, num_rxqs) \
1836 (((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2))
1838 #define rx_ib_fail(rx) \
1840 struct bna_rxp *rxp; \
1841 struct list_head *qe; \
1842 list_for_each(qe, &(rx)->rxp_q) { \
1843 rxp = (struct bna_rxp *)qe; \
1844 bna_ib_fail(rxp->cq.ib); \
1848 static void __bna_multi_rxq_stop(struct bna_rxp
*, u32
*);
1849 static void __bna_rxq_start(struct bna_rxq
*rxq
);
1850 static void __bna_cq_start(struct bna_cq
*cq
);
1851 static void bna_rit_create(struct bna_rx
*rx
);
1852 static void bna_rx_cb_multi_rxq_stopped(void *arg
, int status
);
1853 static void bna_rx_cb_rxq_stopped_all(void *arg
);
1855 bfa_fsm_state_decl(bna_rx
, stopped
,
1856 struct bna_rx
, enum bna_rx_event
);
1857 bfa_fsm_state_decl(bna_rx
, rxf_start_wait
,
1858 struct bna_rx
, enum bna_rx_event
);
1859 bfa_fsm_state_decl(bna_rx
, started
,
1860 struct bna_rx
, enum bna_rx_event
);
1861 bfa_fsm_state_decl(bna_rx
, rxf_stop_wait
,
1862 struct bna_rx
, enum bna_rx_event
);
1863 bfa_fsm_state_decl(bna_rx
, rxq_stop_wait
,
1864 struct bna_rx
, enum bna_rx_event
);
1866 static const struct bfa_sm_table rx_sm_table
[] = {
1867 {BFA_SM(bna_rx_sm_stopped
), BNA_RX_STOPPED
},
1868 {BFA_SM(bna_rx_sm_rxf_start_wait
), BNA_RX_RXF_START_WAIT
},
1869 {BFA_SM(bna_rx_sm_started
), BNA_RX_STARTED
},
1870 {BFA_SM(bna_rx_sm_rxf_stop_wait
), BNA_RX_RXF_STOP_WAIT
},
1871 {BFA_SM(bna_rx_sm_rxq_stop_wait
), BNA_RX_RXQ_STOP_WAIT
},
1874 static void bna_rx_sm_stopped_entry(struct bna_rx
*rx
)
1876 struct bna_rxp
*rxp
;
1877 struct list_head
*qe_rxp
;
1879 list_for_each(qe_rxp
, &rx
->rxp_q
) {
1880 rxp
= (struct bna_rxp
*)qe_rxp
;
1881 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rxp
->cq
.ccb
);
1884 call_rx_stop_callback(rx
, BNA_CB_SUCCESS
);
1887 static void bna_rx_sm_stopped(struct bna_rx
*rx
,
1888 enum bna_rx_event event
)
1892 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_start_wait
);
1895 call_rx_stop_callback(rx
, BNA_CB_SUCCESS
);
1901 bfa_sm_fault(rx
->bna
, event
);
1907 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx
*rx
)
1909 struct bna_rxp
*rxp
;
1910 struct list_head
*qe_rxp
;
1911 struct bna_rxq
*q0
= NULL
, *q1
= NULL
;
1916 list_for_each(qe_rxp
, &rx
->rxp_q
) {
1917 rxp
= (struct bna_rxp
*)qe_rxp
;
1918 bna_ib_start(rxp
->cq
.ib
);
1919 GET_RXQS(rxp
, q0
, q1
);
1920 q0
->buffer_size
= bna_port_mtu_get(&rx
->bna
->port
);
1921 __bna_rxq_start(q0
);
1922 rx
->rx_post_cbfn(rx
->bna
->bnad
, q0
->rcb
);
1924 __bna_rxq_start(q1
);
1925 rx
->rx_post_cbfn(rx
->bna
->bnad
, q1
->rcb
);
1927 __bna_cq_start(&rxp
->cq
);
1930 bna_rxf_start(&rx
->rxf
);
1933 static void bna_rx_sm_rxf_start_wait(struct bna_rx
*rx
,
1934 enum bna_rx_event event
)
1938 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_stop_wait
);
1941 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1943 bna_rxf_fail(&rx
->rxf
);
1945 case RX_E_RXF_STARTED
:
1946 bfa_fsm_set_state(rx
, bna_rx_sm_started
);
1949 bfa_sm_fault(rx
->bna
, event
);
1955 bna_rx_sm_started_entry(struct bna_rx
*rx
)
1957 struct bna_rxp
*rxp
;
1958 struct list_head
*qe_rxp
;
1961 list_for_each(qe_rxp
, &rx
->rxp_q
) {
1962 rxp
= (struct bna_rxp
*)qe_rxp
;
1963 bna_ib_ack(&rxp
->cq
.ib
->door_bell
, 0);
1966 bna_llport_rx_started(&rx
->bna
->port
.llport
);
1970 bna_rx_sm_started(struct bna_rx
*rx
, enum bna_rx_event event
)
1974 bna_llport_rx_stopped(&rx
->bna
->port
.llport
);
1975 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1977 bna_rxf_fail(&rx
->rxf
);
1980 bna_llport_rx_stopped(&rx
->bna
->port
.llport
);
1981 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_stop_wait
);
1984 bfa_sm_fault(rx
->bna
, event
);
1990 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx
*rx
)
1992 bna_rxf_stop(&rx
->rxf
);
1996 bna_rx_sm_rxf_stop_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1999 case RX_E_RXF_STOPPED
:
2000 bfa_fsm_set_state(rx
, bna_rx_sm_rxq_stop_wait
);
2002 case RX_E_RXF_STARTED
:
2004 * RxF was in the process of starting up when
2005 * RXF_E_STOP was issued. Ignore this event
2009 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
2011 bna_rxf_fail(&rx
->rxf
);
2014 bfa_sm_fault(rx
->bna
, event
);
2021 bna_rx_sm_rxq_stop_wait_entry(struct bna_rx
*rx
)
2023 struct bna_rxp
*rxp
= NULL
;
2024 struct bna_rxq
*q0
= NULL
;
2025 struct bna_rxq
*q1
= NULL
;
2026 struct list_head
*qe
;
2027 u32 rxq_mask
[2] = {0, 0};
2029 /* Only one call to multi-rxq-stop for all RXPs in this RX */
2030 bfa_wc_up(&rx
->rxq_stop_wc
);
2031 list_for_each(qe
, &rx
->rxp_q
) {
2032 rxp
= (struct bna_rxp
*)qe
;
2033 GET_RXQS(rxp
, q0
, q1
);
2034 if (q0
->rxq_id
< 32)
2035 rxq_mask
[0] |= ((u32
)1 << q0
->rxq_id
);
2037 rxq_mask
[1] |= ((u32
)1 << (q0
->rxq_id
- 32));
2039 if (q1
->rxq_id
< 32)
2040 rxq_mask
[0] |= ((u32
)1 << q1
->rxq_id
);
2042 rxq_mask
[1] |= ((u32
)
2043 1 << (q1
->rxq_id
- 32));
2047 __bna_multi_rxq_stop(rxp
, rxq_mask
);
2051 bna_rx_sm_rxq_stop_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
2053 struct bna_rxp
*rxp
= NULL
;
2054 struct list_head
*qe
;
2057 case RX_E_RXQ_STOPPED
:
2058 list_for_each(qe
, &rx
->rxp_q
) {
2059 rxp
= (struct bna_rxp
*)qe
;
2060 bna_ib_stop(rxp
->cq
.ib
);
2064 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
2067 bfa_sm_fault(rx
->bna
, event
);
2073 __bna_multi_rxq_stop(struct bna_rxp
*rxp
, u32
* rxq_id_mask
)
2075 struct bfi_ll_q_stop_req ll_req
;
2077 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_RXQ_STOP_REQ
, 0);
2078 ll_req
.q_id_mask
[0] = htonl(rxq_id_mask
[0]);
2079 ll_req
.q_id_mask
[1] = htonl(rxq_id_mask
[1]);
2080 bna_mbox_qe_fill(&rxp
->mbox_qe
, &ll_req
, sizeof(ll_req
),
2081 bna_rx_cb_multi_rxq_stopped
, rxp
);
2082 bna_mbox_send(rxp
->rx
->bna
, &rxp
->mbox_qe
);
2086 __bna_rxq_start(struct bna_rxq
*rxq
)
2088 struct bna_rxtx_q_mem
*q_mem
;
2089 struct bna_rxq_mem rxq_cfg
, *rxq_mem
;
2090 struct bna_dma_addr cur_q_addr
;
2091 /* struct bna_doorbell_qset *qset; */
2092 struct bna_qpt
*qpt
;
2094 struct bna
*bna
= rxq
->rx
->bna
;
2095 void __iomem
*base_addr
;
2099 cur_q_addr
= *((struct bna_dma_addr
*)(qpt
->kv_qpt_ptr
));
2101 rxq_cfg
.pg_tbl_addr_lo
= qpt
->hw_qpt_ptr
.lsb
;
2102 rxq_cfg
.pg_tbl_addr_hi
= qpt
->hw_qpt_ptr
.msb
;
2103 rxq_cfg
.cur_q_entry_lo
= cur_q_addr
.lsb
;
2104 rxq_cfg
.cur_q_entry_hi
= cur_q_addr
.msb
;
2106 rxq_cfg
.pg_cnt_n_prd_ptr
= ((u32
)qpt
->page_count
<< 16) | 0x0;
2107 rxq_cfg
.entry_n_pg_size
= ((u32
)(BFI_RXQ_WI_SIZE
>> 2) << 16) |
2108 (qpt
->page_size
>> 2);
2109 rxq_cfg
.sg_n_cq_n_cns_ptr
=
2110 ((u32
)(rxq
->rxp
->cq
.cq_id
& 0xff) << 16) | 0x0;
2111 rxq_cfg
.buf_sz_n_q_state
= ((u32
)rxq
->buffer_size
<< 16) |
2113 rxq_cfg
.next_qid
= 0x0 | (0x3 << 8);
2115 /* Write the page number register */
2116 pg_num
= BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM
+ bna
->port_num
,
2117 HQM_RXTX_Q_RAM_BASE_OFFSET
);
2118 writel(pg_num
, bna
->regs
.page_addr
);
2121 base_addr
= BNA_GET_MEM_BASE_ADDR(bna
->pcidev
.pci_bar_kva
,
2122 HQM_RXTX_Q_RAM_BASE_OFFSET
);
2124 q_mem
= (struct bna_rxtx_q_mem
*)0;
2125 rxq_mem
= &q_mem
[rxq
->rxq_id
].rxq
;
2127 off
= (unsigned long)&rxq_mem
->pg_tbl_addr_lo
;
2128 writel(htonl(rxq_cfg
.pg_tbl_addr_lo
), base_addr
+ off
);
2130 off
= (unsigned long)&rxq_mem
->pg_tbl_addr_hi
;
2131 writel(htonl(rxq_cfg
.pg_tbl_addr_hi
), base_addr
+ off
);
2133 off
= (unsigned long)&rxq_mem
->cur_q_entry_lo
;
2134 writel(htonl(rxq_cfg
.cur_q_entry_lo
), base_addr
+ off
);
2136 off
= (unsigned long)&rxq_mem
->cur_q_entry_hi
;
2137 writel(htonl(rxq_cfg
.cur_q_entry_hi
), base_addr
+ off
);
2139 off
= (unsigned long)&rxq_mem
->pg_cnt_n_prd_ptr
;
2140 writel(rxq_cfg
.pg_cnt_n_prd_ptr
, base_addr
+ off
);
2142 off
= (unsigned long)&rxq_mem
->entry_n_pg_size
;
2143 writel(rxq_cfg
.entry_n_pg_size
, base_addr
+ off
);
2145 off
= (unsigned long)&rxq_mem
->sg_n_cq_n_cns_ptr
;
2146 writel(rxq_cfg
.sg_n_cq_n_cns_ptr
, base_addr
+ off
);
2148 off
= (unsigned long)&rxq_mem
->buf_sz_n_q_state
;
2149 writel(rxq_cfg
.buf_sz_n_q_state
, base_addr
+ off
);
2151 off
= (unsigned long)&rxq_mem
->next_qid
;
2152 writel(rxq_cfg
.next_qid
, base_addr
+ off
);
2154 rxq
->rcb
->producer_index
= 0;
2155 rxq
->rcb
->consumer_index
= 0;
2159 __bna_cq_start(struct bna_cq
*cq
)
2161 struct bna_cq_mem cq_cfg
, *cq_mem
;
2162 const struct bna_qpt
*qpt
;
2163 struct bna_dma_addr cur_q_addr
;
2165 struct bna
*bna
= cq
->rx
->bna
;
2166 void __iomem
*base_addr
;
2170 cur_q_addr
= *((struct bna_dma_addr
*)(qpt
->kv_qpt_ptr
));
2173 * Fill out structure, to be subsequently written
2176 cq_cfg
.pg_tbl_addr_lo
= qpt
->hw_qpt_ptr
.lsb
;
2177 cq_cfg
.pg_tbl_addr_hi
= qpt
->hw_qpt_ptr
.msb
;
2178 cq_cfg
.cur_q_entry_lo
= cur_q_addr
.lsb
;
2179 cq_cfg
.cur_q_entry_hi
= cur_q_addr
.msb
;
2181 cq_cfg
.pg_cnt_n_prd_ptr
= (qpt
->page_count
<< 16) | 0x0;
2182 cq_cfg
.entry_n_pg_size
=
2183 ((u32
)(BFI_CQ_WI_SIZE
>> 2) << 16) | (qpt
->page_size
>> 2);
2184 cq_cfg
.int_blk_n_cns_ptr
= ((((u32
)cq
->ib_seg_offset
) << 24) |
2185 ((u32
)(cq
->ib
->ib_id
& 0xff) << 16) | 0x0);
2186 cq_cfg
.q_state
= BNA_Q_IDLE_STATE
;
2188 /* Write the page number register */
2189 pg_num
= BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM
+ bna
->port_num
,
2190 HQM_CQ_RAM_BASE_OFFSET
);
2192 writel(pg_num
, bna
->regs
.page_addr
);
2195 base_addr
= BNA_GET_MEM_BASE_ADDR(bna
->pcidev
.pci_bar_kva
,
2196 HQM_CQ_RAM_BASE_OFFSET
);
2198 cq_mem
= (struct bna_cq_mem
*)0;
2200 off
= (unsigned long)&cq_mem
[cq
->cq_id
].pg_tbl_addr_lo
;
2201 writel(htonl(cq_cfg
.pg_tbl_addr_lo
), base_addr
+ off
);
2203 off
= (unsigned long)&cq_mem
[cq
->cq_id
].pg_tbl_addr_hi
;
2204 writel(htonl(cq_cfg
.pg_tbl_addr_hi
), base_addr
+ off
);
2206 off
= (unsigned long)&cq_mem
[cq
->cq_id
].cur_q_entry_lo
;
2207 writel(htonl(cq_cfg
.cur_q_entry_lo
), base_addr
+ off
);
2209 off
= (unsigned long)&cq_mem
[cq
->cq_id
].cur_q_entry_hi
;
2210 writel(htonl(cq_cfg
.cur_q_entry_hi
), base_addr
+ off
);
2212 off
= (unsigned long)&cq_mem
[cq
->cq_id
].pg_cnt_n_prd_ptr
;
2213 writel(cq_cfg
.pg_cnt_n_prd_ptr
, base_addr
+ off
);
2215 off
= (unsigned long)&cq_mem
[cq
->cq_id
].entry_n_pg_size
;
2216 writel(cq_cfg
.entry_n_pg_size
, base_addr
+ off
);
2218 off
= (unsigned long)&cq_mem
[cq
->cq_id
].int_blk_n_cns_ptr
;
2219 writel(cq_cfg
.int_blk_n_cns_ptr
, base_addr
+ off
);
2221 off
= (unsigned long)&cq_mem
[cq
->cq_id
].q_state
;
2222 writel(cq_cfg
.q_state
, base_addr
+ off
);
2224 cq
->ccb
->producer_index
= 0;
2225 *(cq
->ccb
->hw_producer_index
) = 0;
2229 bna_rit_create(struct bna_rx
*rx
)
2231 struct list_head
*qe_rxp
;
2233 struct bna_rxp
*rxp
;
2234 struct bna_rxq
*q0
= NULL
;
2235 struct bna_rxq
*q1
= NULL
;
2241 list_for_each(qe_rxp
, &rx
->rxp_q
) {
2242 rxp
= (struct bna_rxp
*)qe_rxp
;
2243 GET_RXQS(rxp
, q0
, q1
);
2244 rx
->rxf
.rit_segment
->rit
[offset
].large_rxq_id
= q0
->rxq_id
;
2245 rx
->rxf
.rit_segment
->rit
[offset
].small_rxq_id
=
2246 (q1
? q1
->rxq_id
: 0);
2252 _rx_can_satisfy(struct bna_rx_mod
*rx_mod
,
2253 struct bna_rx_config
*rx_cfg
)
2255 if ((rx_mod
->rx_free_count
== 0) ||
2256 (rx_mod
->rxp_free_count
== 0) ||
2257 (rx_mod
->rxq_free_count
== 0))
2260 if (rx_cfg
->rxp_type
== BNA_RXP_SINGLE
) {
2261 if ((rx_mod
->rxp_free_count
< rx_cfg
->num_paths
) ||
2262 (rx_mod
->rxq_free_count
< rx_cfg
->num_paths
))
2265 if ((rx_mod
->rxp_free_count
< rx_cfg
->num_paths
) ||
2266 (rx_mod
->rxq_free_count
< (2 * rx_cfg
->num_paths
)))
2270 if (!bna_rit_mod_can_satisfy(&rx_mod
->bna
->rit_mod
, rx_cfg
->num_paths
))
2276 static struct bna_rxq
*
2277 _get_free_rxq(struct bna_rx_mod
*rx_mod
)
2279 struct bna_rxq
*rxq
= NULL
;
2280 struct list_head
*qe
= NULL
;
2282 bfa_q_deq(&rx_mod
->rxq_free_q
, &qe
);
2284 rx_mod
->rxq_free_count
--;
2285 rxq
= (struct bna_rxq
*)qe
;
2291 _put_free_rxq(struct bna_rx_mod
*rx_mod
, struct bna_rxq
*rxq
)
2293 bfa_q_qe_init(&rxq
->qe
);
2294 list_add_tail(&rxq
->qe
, &rx_mod
->rxq_free_q
);
2295 rx_mod
->rxq_free_count
++;
2298 static struct bna_rxp
*
2299 _get_free_rxp(struct bna_rx_mod
*rx_mod
)
2301 struct list_head
*qe
= NULL
;
2302 struct bna_rxp
*rxp
= NULL
;
2304 bfa_q_deq(&rx_mod
->rxp_free_q
, &qe
);
2306 rx_mod
->rxp_free_count
--;
2308 rxp
= (struct bna_rxp
*)qe
;
2315 _put_free_rxp(struct bna_rx_mod
*rx_mod
, struct bna_rxp
*rxp
)
2317 bfa_q_qe_init(&rxp
->qe
);
2318 list_add_tail(&rxp
->qe
, &rx_mod
->rxp_free_q
);
2319 rx_mod
->rxp_free_count
++;
2322 static struct bna_rx
*
2323 _get_free_rx(struct bna_rx_mod
*rx_mod
)
2325 struct list_head
*qe
= NULL
;
2326 struct bna_rx
*rx
= NULL
;
2328 bfa_q_deq(&rx_mod
->rx_free_q
, &qe
);
2330 rx_mod
->rx_free_count
--;
2332 rx
= (struct bna_rx
*)qe
;
2334 list_add_tail(&rx
->qe
, &rx_mod
->rx_active_q
);
2341 _put_free_rx(struct bna_rx_mod
*rx_mod
, struct bna_rx
*rx
)
2343 bfa_q_qe_init(&rx
->qe
);
2344 list_add_tail(&rx
->qe
, &rx_mod
->rx_free_q
);
2345 rx_mod
->rx_free_count
++;
2349 _rx_init(struct bna_rx
*rx
, struct bna
*bna
)
2354 INIT_LIST_HEAD(&rx
->rxp_q
);
2356 rx
->rxq_stop_wc
.wc_resume
= bna_rx_cb_rxq_stopped_all
;
2357 rx
->rxq_stop_wc
.wc_cbarg
= rx
;
2358 rx
->rxq_stop_wc
.wc_count
= 0;
2360 rx
->stop_cbfn
= NULL
;
2361 rx
->stop_cbarg
= NULL
;
2365 _rxp_add_rxqs(struct bna_rxp
*rxp
,
2369 switch (rxp
->type
) {
2370 case BNA_RXP_SINGLE
:
2371 rxp
->rxq
.single
.only
= q0
;
2372 rxp
->rxq
.single
.reserved
= NULL
;
2375 rxp
->rxq
.slr
.large
= q0
;
2376 rxp
->rxq
.slr
.small
= q1
;
2379 rxp
->rxq
.hds
.data
= q0
;
2380 rxp
->rxq
.hds
.hdr
= q1
;
2388 _rxq_qpt_init(struct bna_rxq
*rxq
,
2389 struct bna_rxp
*rxp
,
2392 struct bna_mem_descr
*qpt_mem
,
2393 struct bna_mem_descr
*swqpt_mem
,
2394 struct bna_mem_descr
*page_mem
)
2398 rxq
->qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
2399 rxq
->qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
2400 rxq
->qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
2401 rxq
->qpt
.page_count
= page_count
;
2402 rxq
->qpt
.page_size
= page_size
;
2404 rxq
->rcb
->sw_qpt
= (void **) swqpt_mem
->kva
;
2406 for (i
= 0; i
< rxq
->qpt
.page_count
; i
++) {
2407 rxq
->rcb
->sw_qpt
[i
] = page_mem
[i
].kva
;
2408 ((struct bna_dma_addr
*)rxq
->qpt
.kv_qpt_ptr
)[i
].lsb
=
2409 page_mem
[i
].dma
.lsb
;
2410 ((struct bna_dma_addr
*)rxq
->qpt
.kv_qpt_ptr
)[i
].msb
=
2411 page_mem
[i
].dma
.msb
;
2417 _rxp_cqpt_setup(struct bna_rxp
*rxp
,
2420 struct bna_mem_descr
*qpt_mem
,
2421 struct bna_mem_descr
*swqpt_mem
,
2422 struct bna_mem_descr
*page_mem
)
2426 rxp
->cq
.qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
2427 rxp
->cq
.qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
2428 rxp
->cq
.qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
2429 rxp
->cq
.qpt
.page_count
= page_count
;
2430 rxp
->cq
.qpt
.page_size
= page_size
;
2432 rxp
->cq
.ccb
->sw_qpt
= (void **) swqpt_mem
->kva
;
2434 for (i
= 0; i
< rxp
->cq
.qpt
.page_count
; i
++) {
2435 rxp
->cq
.ccb
->sw_qpt
[i
] = page_mem
[i
].kva
;
2437 ((struct bna_dma_addr
*)rxp
->cq
.qpt
.kv_qpt_ptr
)[i
].lsb
=
2438 page_mem
[i
].dma
.lsb
;
2439 ((struct bna_dma_addr
*)rxp
->cq
.qpt
.kv_qpt_ptr
)[i
].msb
=
2440 page_mem
[i
].dma
.msb
;
2446 _rx_add_rxp(struct bna_rx
*rx
, struct bna_rxp
*rxp
)
2448 list_add_tail(&rxp
->qe
, &rx
->rxp_q
);
2452 _init_rxmod_queues(struct bna_rx_mod
*rx_mod
)
2454 INIT_LIST_HEAD(&rx_mod
->rx_free_q
);
2455 INIT_LIST_HEAD(&rx_mod
->rxq_free_q
);
2456 INIT_LIST_HEAD(&rx_mod
->rxp_free_q
);
2457 INIT_LIST_HEAD(&rx_mod
->rx_active_q
);
2459 rx_mod
->rx_free_count
= 0;
2460 rx_mod
->rxq_free_count
= 0;
2461 rx_mod
->rxp_free_count
= 0;
2465 _rx_ctor(struct bna_rx
*rx
, int id
)
2467 bfa_q_qe_init(&rx
->qe
);
2468 INIT_LIST_HEAD(&rx
->rxp_q
);
2471 rx
->rxf
.rxf_id
= id
;
2473 /* FIXME: mbox_qe ctor()?? */
2474 bfa_q_qe_init(&rx
->mbox_qe
.qe
);
2476 rx
->stop_cbfn
= NULL
;
2477 rx
->stop_cbarg
= NULL
;
2481 bna_rx_cb_multi_rxq_stopped(void *arg
, int status
)
2483 struct bna_rxp
*rxp
= (struct bna_rxp
*)arg
;
2485 bfa_wc_down(&rxp
->rx
->rxq_stop_wc
);
2489 bna_rx_cb_rxq_stopped_all(void *arg
)
2491 struct bna_rx
*rx
= (struct bna_rx
*)arg
;
2493 bfa_fsm_send_event(rx
, RX_E_RXQ_STOPPED
);
2497 bna_rx_mod_cb_rx_stopped(void *arg
, struct bna_rx
*rx
,
2498 enum bna_cb_status status
)
2500 struct bna_rx_mod
*rx_mod
= (struct bna_rx_mod
*)arg
;
2502 bfa_wc_down(&rx_mod
->rx_stop_wc
);
2506 bna_rx_mod_cb_rx_stopped_all(void *arg
)
2508 struct bna_rx_mod
*rx_mod
= (struct bna_rx_mod
*)arg
;
2510 if (rx_mod
->stop_cbfn
)
2511 rx_mod
->stop_cbfn(&rx_mod
->bna
->port
, BNA_CB_SUCCESS
);
2512 rx_mod
->stop_cbfn
= NULL
;
2516 bna_rx_start(struct bna_rx
*rx
)
2518 rx
->rx_flags
|= BNA_RX_F_PORT_ENABLED
;
2519 if (rx
->rx_flags
& BNA_RX_F_ENABLE
)
2520 bfa_fsm_send_event(rx
, RX_E_START
);
2524 bna_rx_stop(struct bna_rx
*rx
)
2526 rx
->rx_flags
&= ~BNA_RX_F_PORT_ENABLED
;
2527 if (rx
->fsm
== (bfa_fsm_t
) bna_rx_sm_stopped
)
2528 bna_rx_mod_cb_rx_stopped(&rx
->bna
->rx_mod
, rx
, BNA_CB_SUCCESS
);
2530 rx
->stop_cbfn
= bna_rx_mod_cb_rx_stopped
;
2531 rx
->stop_cbarg
= &rx
->bna
->rx_mod
;
2532 bfa_fsm_send_event(rx
, RX_E_STOP
);
2537 bna_rx_fail(struct bna_rx
*rx
)
2539 /* Indicate port is not enabled, and failed */
2540 rx
->rx_flags
&= ~BNA_RX_F_PORT_ENABLED
;
2541 rx
->rx_flags
|= BNA_RX_F_PORT_FAILED
;
2542 bfa_fsm_send_event(rx
, RX_E_FAIL
);
2546 bna_rx_mod_start(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
2549 struct list_head
*qe
;
2551 rx_mod
->flags
|= BNA_RX_MOD_F_PORT_STARTED
;
2552 if (type
== BNA_RX_T_LOOPBACK
)
2553 rx_mod
->flags
|= BNA_RX_MOD_F_PORT_LOOPBACK
;
2555 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2556 rx
= (struct bna_rx
*)qe
;
2557 if (rx
->type
== type
)
2563 bna_rx_mod_stop(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
2566 struct list_head
*qe
;
2568 rx_mod
->flags
&= ~BNA_RX_MOD_F_PORT_STARTED
;
2569 rx_mod
->flags
&= ~BNA_RX_MOD_F_PORT_LOOPBACK
;
2571 rx_mod
->stop_cbfn
= bna_port_cb_rx_stopped
;
2574 * Before calling bna_rx_stop(), increment rx_stop_wc as many times
2575 * as we are going to call bna_rx_stop
2577 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2578 rx
= (struct bna_rx
*)qe
;
2579 if (rx
->type
== type
)
2580 bfa_wc_up(&rx_mod
->rx_stop_wc
);
2583 if (rx_mod
->rx_stop_wc
.wc_count
== 0) {
2584 rx_mod
->stop_cbfn(&rx_mod
->bna
->port
, BNA_CB_SUCCESS
);
2585 rx_mod
->stop_cbfn
= NULL
;
2589 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2590 rx
= (struct bna_rx
*)qe
;
2591 if (rx
->type
== type
)
2597 bna_rx_mod_fail(struct bna_rx_mod
*rx_mod
)
2600 struct list_head
*qe
;
2602 rx_mod
->flags
&= ~BNA_RX_MOD_F_PORT_STARTED
;
2603 rx_mod
->flags
&= ~BNA_RX_MOD_F_PORT_LOOPBACK
;
2605 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2606 rx
= (struct bna_rx
*)qe
;
2611 void bna_rx_mod_init(struct bna_rx_mod
*rx_mod
, struct bna
*bna
,
2612 struct bna_res_info
*res_info
)
2615 struct bna_rx
*rx_ptr
;
2616 struct bna_rxp
*rxp_ptr
;
2617 struct bna_rxq
*rxq_ptr
;
2622 rx_mod
->rx
= (struct bna_rx
*)
2623 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2624 rx_mod
->rxp
= (struct bna_rxp
*)
2625 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2626 rx_mod
->rxq
= (struct bna_rxq
*)
2627 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2629 /* Initialize the queues */
2630 _init_rxmod_queues(rx_mod
);
2632 /* Build RX queues */
2633 for (index
= 0; index
< BFI_MAX_RXQ
; index
++) {
2634 rx_ptr
= &rx_mod
->rx
[index
];
2635 _rx_ctor(rx_ptr
, index
);
2636 list_add_tail(&rx_ptr
->qe
, &rx_mod
->rx_free_q
);
2637 rx_mod
->rx_free_count
++;
2640 /* build RX-path queue */
2641 for (index
= 0; index
< BFI_MAX_RXQ
; index
++) {
2642 rxp_ptr
= &rx_mod
->rxp
[index
];
2643 rxp_ptr
->cq
.cq_id
= index
;
2644 bfa_q_qe_init(&rxp_ptr
->qe
);
2645 list_add_tail(&rxp_ptr
->qe
, &rx_mod
->rxp_free_q
);
2646 rx_mod
->rxp_free_count
++;
2649 /* build RXQ queue */
2650 for (index
= 0; index
< BFI_MAX_RXQ
; index
++) {
2651 rxq_ptr
= &rx_mod
->rxq
[index
];
2652 rxq_ptr
->rxq_id
= index
;
2654 bfa_q_qe_init(&rxq_ptr
->qe
);
2655 list_add_tail(&rxq_ptr
->qe
, &rx_mod
->rxq_free_q
);
2656 rx_mod
->rxq_free_count
++;
2659 rx_mod
->rx_stop_wc
.wc_resume
= bna_rx_mod_cb_rx_stopped_all
;
2660 rx_mod
->rx_stop_wc
.wc_cbarg
= rx_mod
;
2661 rx_mod
->rx_stop_wc
.wc_count
= 0;
2665 bna_rx_mod_uninit(struct bna_rx_mod
*rx_mod
)
2667 struct list_head
*qe
;
2671 list_for_each(qe
, &rx_mod
->rx_free_q
)
2675 list_for_each(qe
, &rx_mod
->rxp_free_q
)
2679 list_for_each(qe
, &rx_mod
->rxq_free_q
)
2686 bna_rx_state_get(struct bna_rx
*rx
)
2688 return bfa_sm_to_state(rx_sm_table
, rx
->fsm
);
2692 bna_rx_res_req(struct bna_rx_config
*q_cfg
, struct bna_res_info
*res_info
)
2694 u32 cq_size
, hq_size
, dq_size
;
2695 u32 cpage_count
, hpage_count
, dpage_count
;
2696 struct bna_mem_info
*mem_info
;
2701 dq_depth
= q_cfg
->q_depth
;
2702 hq_depth
= ((q_cfg
->rxp_type
== BNA_RXP_SINGLE
) ? 0 : q_cfg
->q_depth
);
2703 cq_depth
= dq_depth
+ hq_depth
;
2705 BNA_TO_POWER_OF_2_HIGH(cq_depth
);
2706 cq_size
= cq_depth
* BFI_CQ_WI_SIZE
;
2707 cq_size
= ALIGN(cq_size
, PAGE_SIZE
);
2708 cpage_count
= SIZE_TO_PAGES(cq_size
);
2710 BNA_TO_POWER_OF_2_HIGH(dq_depth
);
2711 dq_size
= dq_depth
* BFI_RXQ_WI_SIZE
;
2712 dq_size
= ALIGN(dq_size
, PAGE_SIZE
);
2713 dpage_count
= SIZE_TO_PAGES(dq_size
);
2715 if (BNA_RXP_SINGLE
!= q_cfg
->rxp_type
) {
2716 BNA_TO_POWER_OF_2_HIGH(hq_depth
);
2717 hq_size
= hq_depth
* BFI_RXQ_WI_SIZE
;
2718 hq_size
= ALIGN(hq_size
, PAGE_SIZE
);
2719 hpage_count
= SIZE_TO_PAGES(hq_size
);
2724 /* CCB structures */
2725 res_info
[BNA_RX_RES_MEM_T_CCB
].res_type
= BNA_RES_T_MEM
;
2726 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CCB
].res_u
.mem_info
;
2727 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2728 mem_info
->len
= sizeof(struct bna_ccb
);
2729 mem_info
->num
= q_cfg
->num_paths
;
2731 /* RCB structures */
2732 res_info
[BNA_RX_RES_MEM_T_RCB
].res_type
= BNA_RES_T_MEM
;
2733 mem_info
= &res_info
[BNA_RX_RES_MEM_T_RCB
].res_u
.mem_info
;
2734 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2735 mem_info
->len
= sizeof(struct bna_rcb
);
2736 mem_info
->num
= BNA_GET_RXQS(q_cfg
);
2738 /* Completion QPT */
2739 res_info
[BNA_RX_RES_MEM_T_CQPT
].res_type
= BNA_RES_T_MEM
;
2740 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CQPT
].res_u
.mem_info
;
2741 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2742 mem_info
->len
= cpage_count
* sizeof(struct bna_dma_addr
);
2743 mem_info
->num
= q_cfg
->num_paths
;
2745 /* Completion s/w QPT */
2746 res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_type
= BNA_RES_T_MEM
;
2747 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_u
.mem_info
;
2748 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2749 mem_info
->len
= cpage_count
* sizeof(void *);
2750 mem_info
->num
= q_cfg
->num_paths
;
2752 /* Completion QPT pages */
2753 res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_type
= BNA_RES_T_MEM
;
2754 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
;
2755 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2756 mem_info
->len
= PAGE_SIZE
;
2757 mem_info
->num
= cpage_count
* q_cfg
->num_paths
;
2760 res_info
[BNA_RX_RES_MEM_T_DQPT
].res_type
= BNA_RES_T_MEM
;
2761 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DQPT
].res_u
.mem_info
;
2762 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2763 mem_info
->len
= dpage_count
* sizeof(struct bna_dma_addr
);
2764 mem_info
->num
= q_cfg
->num_paths
;
2767 res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_type
= BNA_RES_T_MEM
;
2768 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_u
.mem_info
;
2769 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2770 mem_info
->len
= dpage_count
* sizeof(void *);
2771 mem_info
->num
= q_cfg
->num_paths
;
2773 /* Data QPT pages */
2774 res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_type
= BNA_RES_T_MEM
;
2775 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
;
2776 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2777 mem_info
->len
= PAGE_SIZE
;
2778 mem_info
->num
= dpage_count
* q_cfg
->num_paths
;
2781 res_info
[BNA_RX_RES_MEM_T_HQPT
].res_type
= BNA_RES_T_MEM
;
2782 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HQPT
].res_u
.mem_info
;
2783 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2784 mem_info
->len
= hpage_count
* sizeof(struct bna_dma_addr
);
2785 mem_info
->num
= (hpage_count
? q_cfg
->num_paths
: 0);
2788 res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_type
= BNA_RES_T_MEM
;
2789 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_u
.mem_info
;
2790 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2791 mem_info
->len
= hpage_count
* sizeof(void *);
2792 mem_info
->num
= (hpage_count
? q_cfg
->num_paths
: 0);
2795 res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_type
= BNA_RES_T_MEM
;
2796 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
;
2797 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2798 mem_info
->len
= (hpage_count
? PAGE_SIZE
: 0);
2799 mem_info
->num
= (hpage_count
? (hpage_count
* q_cfg
->num_paths
) : 0);
2802 res_info
[BNA_RX_RES_T_INTR
].res_type
= BNA_RES_T_INTR
;
2803 res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
.intr_type
= BNA_INTR_T_MSIX
;
2804 res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
.num
= q_cfg
->num_paths
;
2808 bna_rx_create(struct bna
*bna
, struct bnad
*bnad
,
2809 struct bna_rx_config
*rx_cfg
,
2810 struct bna_rx_event_cbfn
*rx_cbfn
,
2811 struct bna_res_info
*res_info
,
2814 struct bna_rx_mod
*rx_mod
= &bna
->rx_mod
;
2816 struct bna_rxp
*rxp
;
2819 struct bna_intr_info
*intr_info
;
2821 struct bna_mem_descr
*ccb_mem
;
2822 struct bna_mem_descr
*rcb_mem
;
2823 struct bna_mem_descr
*unmapq_mem
;
2824 struct bna_mem_descr
*cqpt_mem
;
2825 struct bna_mem_descr
*cswqpt_mem
;
2826 struct bna_mem_descr
*cpage_mem
;
2827 struct bna_mem_descr
*hqpt_mem
; /* Header/Small Q qpt */
2828 struct bna_mem_descr
*dqpt_mem
; /* Data/Large Q qpt */
2829 struct bna_mem_descr
*hsqpt_mem
; /* s/w qpt for hdr */
2830 struct bna_mem_descr
*dsqpt_mem
; /* s/w qpt for data */
2831 struct bna_mem_descr
*hpage_mem
; /* hdr page mem */
2832 struct bna_mem_descr
*dpage_mem
; /* data page mem */
2833 int i
, cpage_idx
= 0, dpage_idx
= 0, hpage_idx
= 0, ret
;
2834 int dpage_count
, hpage_count
, rcb_idx
;
2835 struct bna_ib_config ibcfg
;
2836 /* Fail if we don't have enough RXPs, RXQs */
2837 if (!_rx_can_satisfy(rx_mod
, rx_cfg
))
2840 /* Initialize resource pointers */
2841 intr_info
= &res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
;
2842 ccb_mem
= &res_info
[BNA_RX_RES_MEM_T_CCB
].res_u
.mem_info
.mdl
[0];
2843 rcb_mem
= &res_info
[BNA_RX_RES_MEM_T_RCB
].res_u
.mem_info
.mdl
[0];
2844 unmapq_mem
= &res_info
[BNA_RX_RES_MEM_T_UNMAPQ
].res_u
.mem_info
.mdl
[0];
2845 cqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_CQPT
].res_u
.mem_info
.mdl
[0];
2846 cswqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_u
.mem_info
.mdl
[0];
2847 cpage_mem
= &res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
.mdl
[0];
2848 hqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_HQPT
].res_u
.mem_info
.mdl
[0];
2849 dqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_DQPT
].res_u
.mem_info
.mdl
[0];
2850 hsqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_u
.mem_info
.mdl
[0];
2851 dsqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_u
.mem_info
.mdl
[0];
2852 hpage_mem
= &res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
.mdl
[0];
2853 dpage_mem
= &res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
.mdl
[0];
2855 /* Compute q depth & page count */
2856 page_count
= res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
.num
/
2859 dpage_count
= res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
.num
/
2862 hpage_count
= res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
.num
/
2864 /* Get RX pointer */
2865 rx
= _get_free_rx(rx_mod
);
2868 rx
->type
= rx_cfg
->rx_type
;
2870 rx
->rcb_setup_cbfn
= rx_cbfn
->rcb_setup_cbfn
;
2871 rx
->rcb_destroy_cbfn
= rx_cbfn
->rcb_destroy_cbfn
;
2872 rx
->ccb_setup_cbfn
= rx_cbfn
->ccb_setup_cbfn
;
2873 rx
->ccb_destroy_cbfn
= rx_cbfn
->ccb_destroy_cbfn
;
2874 /* Following callbacks are mandatory */
2875 rx
->rx_cleanup_cbfn
= rx_cbfn
->rx_cleanup_cbfn
;
2876 rx
->rx_post_cbfn
= rx_cbfn
->rx_post_cbfn
;
2878 if (rx
->bna
->rx_mod
.flags
& BNA_RX_MOD_F_PORT_STARTED
) {
2880 case BNA_RX_T_REGULAR
:
2881 if (!(rx
->bna
->rx_mod
.flags
&
2882 BNA_RX_MOD_F_PORT_LOOPBACK
))
2883 rx
->rx_flags
|= BNA_RX_F_PORT_ENABLED
;
2885 case BNA_RX_T_LOOPBACK
:
2886 if (rx
->bna
->rx_mod
.flags
& BNA_RX_MOD_F_PORT_LOOPBACK
)
2887 rx
->rx_flags
|= BNA_RX_F_PORT_ENABLED
;
2892 for (i
= 0, rcb_idx
= 0; i
< rx_cfg
->num_paths
; i
++) {
2893 rxp
= _get_free_rxp(rx_mod
);
2894 rxp
->type
= rx_cfg
->rxp_type
;
2898 /* Get required RXQs, and queue them to rx-path */
2899 q0
= _get_free_rxq(rx_mod
);
2900 if (BNA_RXP_SINGLE
== rx_cfg
->rxp_type
)
2903 q1
= _get_free_rxq(rx_mod
);
2906 if (1 == intr_info
->num
) {
2907 rxp
->cq
.ib
= bna_ib_get(&bna
->ib_mod
,
2908 intr_info
->intr_type
,
2909 intr_info
->idl
[0].vector
);
2910 rxp
->vector
= intr_info
->idl
[0].vector
;
2912 rxp
->cq
.ib
= bna_ib_get(&bna
->ib_mod
,
2913 intr_info
->intr_type
,
2914 intr_info
->idl
[i
].vector
);
2916 /* Map the MSI-x vector used for this RXP */
2917 rxp
->vector
= intr_info
->idl
[i
].vector
;
2920 rxp
->cq
.ib_seg_offset
= bna_ib_reserve_idx(rxp
->cq
.ib
);
2922 ibcfg
.coalescing_timeo
= BFI_RX_COALESCING_TIMEO
;
2923 ibcfg
.interpkt_count
= BFI_RX_INTERPKT_COUNT
;
2924 ibcfg
.interpkt_timeo
= BFI_RX_INTERPKT_TIMEO
;
2925 ibcfg
.ctrl_flags
= BFI_IB_CF_INT_ENABLE
;
2927 ret
= bna_ib_config(rxp
->cq
.ib
, &ibcfg
);
2929 /* Link rxqs to rxp */
2930 _rxp_add_rxqs(rxp
, q0
, q1
);
2932 /* Link rxp to rx */
2933 _rx_add_rxp(rx
, rxp
);
2938 /* Initialize RCB for the large / data q */
2939 q0
->rcb
= (struct bna_rcb
*) rcb_mem
[rcb_idx
].kva
;
2940 RXQ_RCB_INIT(q0
, rxp
, rx_cfg
->q_depth
, bna
, 0,
2941 (void *)unmapq_mem
[rcb_idx
].kva
);
2943 (q0
)->rx_packets
= (q0
)->rx_bytes
= 0;
2944 (q0
)->rx_packets_with_error
= (q0
)->rxbuf_alloc_failed
= 0;
2946 /* Initialize RXQs */
2947 _rxq_qpt_init(q0
, rxp
, dpage_count
, PAGE_SIZE
,
2948 &dqpt_mem
[i
], &dsqpt_mem
[i
], &dpage_mem
[dpage_idx
]);
2949 q0
->rcb
->page_idx
= dpage_idx
;
2950 q0
->rcb
->page_count
= dpage_count
;
2951 dpage_idx
+= dpage_count
;
2953 /* Call bnad to complete rcb setup */
2954 if (rx
->rcb_setup_cbfn
)
2955 rx
->rcb_setup_cbfn(bnad
, q0
->rcb
);
2961 q1
->rcb
= (struct bna_rcb
*) rcb_mem
[rcb_idx
].kva
;
2962 RXQ_RCB_INIT(q1
, rxp
, rx_cfg
->q_depth
, bna
, 1,
2963 (void *)unmapq_mem
[rcb_idx
].kva
);
2965 (q1
)->buffer_size
= (rx_cfg
)->small_buff_size
;
2966 (q1
)->rx_packets
= (q1
)->rx_bytes
= 0;
2967 (q1
)->rx_packets_with_error
=
2968 (q1
)->rxbuf_alloc_failed
= 0;
2970 _rxq_qpt_init(q1
, rxp
, hpage_count
, PAGE_SIZE
,
2971 &hqpt_mem
[i
], &hsqpt_mem
[i
],
2972 &hpage_mem
[hpage_idx
]);
2973 q1
->rcb
->page_idx
= hpage_idx
;
2974 q1
->rcb
->page_count
= hpage_count
;
2975 hpage_idx
+= hpage_count
;
2977 /* Call bnad to complete rcb setup */
2978 if (rx
->rcb_setup_cbfn
)
2979 rx
->rcb_setup_cbfn(bnad
, q1
->rcb
);
2982 rxp
->cq
.ccb
= (struct bna_ccb
*) ccb_mem
[i
].kva
;
2983 _rxp_cqpt_setup(rxp
, page_count
, PAGE_SIZE
,
2984 &cqpt_mem
[i
], &cswqpt_mem
[i
], &cpage_mem
[cpage_idx
]);
2985 rxp
->cq
.ccb
->page_idx
= cpage_idx
;
2986 rxp
->cq
.ccb
->page_count
= page_count
;
2987 cpage_idx
+= page_count
;
2989 rxp
->cq
.ccb
->pkt_rate
.small_pkt_cnt
= 0;
2990 rxp
->cq
.ccb
->pkt_rate
.large_pkt_cnt
= 0;
2992 rxp
->cq
.ccb
->producer_index
= 0;
2993 rxp
->cq
.ccb
->q_depth
= rx_cfg
->q_depth
+
2994 ((rx_cfg
->rxp_type
== BNA_RXP_SINGLE
) ?
2995 0 : rx_cfg
->q_depth
);
2996 rxp
->cq
.ccb
->i_dbell
= &rxp
->cq
.ib
->door_bell
;
2997 rxp
->cq
.ccb
->rcb
[0] = q0
->rcb
;
2999 rxp
->cq
.ccb
->rcb
[1] = q1
->rcb
;
3000 rxp
->cq
.ccb
->cq
= &rxp
->cq
;
3001 rxp
->cq
.ccb
->bnad
= bna
->bnad
;
3002 rxp
->cq
.ccb
->hw_producer_index
=
3003 ((volatile u32
*)rxp
->cq
.ib
->ib_seg_host_addr_kva
+
3004 (rxp
->cq
.ib_seg_offset
* BFI_IBIDX_SIZE
));
3005 *(rxp
->cq
.ccb
->hw_producer_index
) = 0;
3006 rxp
->cq
.ccb
->intr_type
= intr_info
->intr_type
;
3007 rxp
->cq
.ccb
->intr_vector
= (intr_info
->num
== 1) ?
3008 intr_info
->idl
[0].vector
:
3009 intr_info
->idl
[i
].vector
;
3010 rxp
->cq
.ccb
->rx_coalescing_timeo
=
3011 rxp
->cq
.ib
->ib_config
.coalescing_timeo
;
3012 rxp
->cq
.ccb
->id
= i
;
3014 /* Call bnad to complete CCB setup */
3015 if (rx
->ccb_setup_cbfn
)
3016 rx
->ccb_setup_cbfn(bnad
, rxp
->cq
.ccb
);
3018 } /* for each rx-path */
3020 bna_rxf_init(&rx
->rxf
, rx
, rx_cfg
);
3022 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
3028 bna_rx_destroy(struct bna_rx
*rx
)
3030 struct bna_rx_mod
*rx_mod
= &rx
->bna
->rx_mod
;
3031 struct bna_ib_mod
*ib_mod
= &rx
->bna
->ib_mod
;
3032 struct bna_rxq
*q0
= NULL
;
3033 struct bna_rxq
*q1
= NULL
;
3034 struct bna_rxp
*rxp
;
3035 struct list_head
*qe
;
3037 bna_rxf_uninit(&rx
->rxf
);
3039 while (!list_empty(&rx
->rxp_q
)) {
3040 bfa_q_deq(&rx
->rxp_q
, &rxp
);
3041 GET_RXQS(rxp
, q0
, q1
);
3042 /* Callback to bnad for destroying RCB */
3043 if (rx
->rcb_destroy_cbfn
)
3044 rx
->rcb_destroy_cbfn(rx
->bna
->bnad
, q0
->rcb
);
3048 _put_free_rxq(rx_mod
, q0
);
3050 /* Callback to bnad for destroying RCB */
3051 if (rx
->rcb_destroy_cbfn
)
3052 rx
->rcb_destroy_cbfn(rx
->bna
->bnad
, q1
->rcb
);
3056 _put_free_rxq(rx_mod
, q1
);
3058 rxp
->rxq
.slr
.large
= NULL
;
3059 rxp
->rxq
.slr
.small
= NULL
;
3061 if (rxp
->cq
.ib_seg_offset
!= 0xff)
3062 bna_ib_release_idx(rxp
->cq
.ib
,
3063 rxp
->cq
.ib_seg_offset
);
3064 bna_ib_put(ib_mod
, rxp
->cq
.ib
);
3067 /* Callback to bnad for destroying CCB */
3068 if (rx
->ccb_destroy_cbfn
)
3069 rx
->ccb_destroy_cbfn(rx
->bna
->bnad
, rxp
->cq
.ccb
);
3072 _put_free_rxp(rx_mod
, rxp
);
3075 list_for_each(qe
, &rx_mod
->rx_active_q
) {
3076 if (qe
== &rx
->qe
) {
3078 bfa_q_qe_init(&rx
->qe
);
3085 _put_free_rx(rx_mod
, rx
);
3089 bna_rx_enable(struct bna_rx
*rx
)
3091 if (rx
->fsm
!= (bfa_sm_t
)bna_rx_sm_stopped
)
3094 rx
->rx_flags
|= BNA_RX_F_ENABLE
;
3095 if (rx
->rx_flags
& BNA_RX_F_PORT_ENABLED
)
3096 bfa_fsm_send_event(rx
, RX_E_START
);
3100 bna_rx_disable(struct bna_rx
*rx
, enum bna_cleanup_type type
,
3101 void (*cbfn
)(void *, struct bna_rx
*,
3102 enum bna_cb_status
))
3104 if (type
== BNA_SOFT_CLEANUP
) {
3105 /* h/w should not be accessed. Treat we're stopped */
3106 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
3108 rx
->stop_cbfn
= cbfn
;
3109 rx
->stop_cbarg
= rx
->bna
->bnad
;
3111 rx
->rx_flags
&= ~BNA_RX_F_ENABLE
;
3113 bfa_fsm_send_event(rx
, RX_E_STOP
);
3120 #define call_tx_stop_cbfn(tx, status)\
3122 if ((tx)->stop_cbfn)\
3123 (tx)->stop_cbfn((tx)->stop_cbarg, (tx), status);\
3124 (tx)->stop_cbfn = NULL;\
3125 (tx)->stop_cbarg = NULL;\
3128 #define call_tx_prio_change_cbfn(tx, status)\
3130 if ((tx)->prio_change_cbfn)\
3131 (tx)->prio_change_cbfn((tx)->bna->bnad, (tx), status);\
3132 (tx)->prio_change_cbfn = NULL;\
3135 static void bna_tx_mod_cb_tx_stopped(void *tx_mod
, struct bna_tx
*tx
,
3136 enum bna_cb_status status
);
3137 static void bna_tx_cb_txq_stopped(void *arg
, int status
);
3138 static void bna_tx_cb_stats_cleared(void *arg
, int status
);
3139 static void __bna_tx_stop(struct bna_tx
*tx
);
3140 static void __bna_tx_start(struct bna_tx
*tx
);
3141 static void __bna_txf_stat_clr(struct bna_tx
*tx
);
3147 TX_E_TXQ_STOPPED
= 4,
3148 TX_E_PRIO_CHANGE
= 5,
3149 TX_E_STAT_CLEARED
= 6,
3155 BNA_TX_TXQ_STOP_WAIT
= 3,
3156 BNA_TX_PRIO_STOP_WAIT
= 4,
3157 BNA_TX_STAT_CLR_WAIT
= 5,
3160 bfa_fsm_state_decl(bna_tx
, stopped
, struct bna_tx
,
3162 bfa_fsm_state_decl(bna_tx
, started
, struct bna_tx
,
3164 bfa_fsm_state_decl(bna_tx
, txq_stop_wait
, struct bna_tx
,
3166 bfa_fsm_state_decl(bna_tx
, prio_stop_wait
, struct bna_tx
,
3168 bfa_fsm_state_decl(bna_tx
, stat_clr_wait
, struct bna_tx
,
3171 static struct bfa_sm_table tx_sm_table
[] = {
3172 {BFA_SM(bna_tx_sm_stopped
), BNA_TX_STOPPED
},
3173 {BFA_SM(bna_tx_sm_started
), BNA_TX_STARTED
},
3174 {BFA_SM(bna_tx_sm_txq_stop_wait
), BNA_TX_TXQ_STOP_WAIT
},
3175 {BFA_SM(bna_tx_sm_prio_stop_wait
), BNA_TX_PRIO_STOP_WAIT
},
3176 {BFA_SM(bna_tx_sm_stat_clr_wait
), BNA_TX_STAT_CLR_WAIT
},
3180 bna_tx_sm_stopped_entry(struct bna_tx
*tx
)
3182 struct bna_txq
*txq
;
3183 struct list_head
*qe
;
3185 list_for_each(qe
, &tx
->txq_q
) {
3186 txq
= (struct bna_txq
*)qe
;
3187 (tx
->tx_cleanup_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3190 call_tx_stop_cbfn(tx
, BNA_CB_SUCCESS
);
3194 bna_tx_sm_stopped(struct bna_tx
*tx
, enum bna_tx_event event
)
3198 bfa_fsm_set_state(tx
, bna_tx_sm_started
);
3202 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3209 case TX_E_PRIO_CHANGE
:
3210 call_tx_prio_change_cbfn(tx
, BNA_CB_SUCCESS
);
3213 case TX_E_TXQ_STOPPED
:
3215 * This event is received due to flushing of mbox when
3222 bfa_sm_fault(tx
->bna
, event
);
3227 bna_tx_sm_started_entry(struct bna_tx
*tx
)
3229 struct bna_txq
*txq
;
3230 struct list_head
*qe
;
3235 list_for_each(qe
, &tx
->txq_q
) {
3236 txq
= (struct bna_txq
*)qe
;
3237 bna_ib_ack(&txq
->ib
->door_bell
, 0);
3242 bna_tx_sm_started(struct bna_tx
*tx
, enum bna_tx_event event
)
3244 struct bna_txq
*txq
;
3245 struct list_head
*qe
;
3249 bfa_fsm_set_state(tx
, bna_tx_sm_txq_stop_wait
);
3254 list_for_each(qe
, &tx
->txq_q
) {
3255 txq
= (struct bna_txq
*)qe
;
3256 bna_ib_fail(txq
->ib
);
3257 (tx
->tx_stall_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3259 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3262 case TX_E_PRIO_CHANGE
:
3263 bfa_fsm_set_state(tx
, bna_tx_sm_prio_stop_wait
);
3267 bfa_sm_fault(tx
->bna
, event
);
3272 bna_tx_sm_txq_stop_wait_entry(struct bna_tx
*tx
)
3277 bna_tx_sm_txq_stop_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3279 struct bna_txq
*txq
;
3280 struct list_head
*qe
;
3284 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3287 case TX_E_TXQ_STOPPED
:
3288 list_for_each(qe
, &tx
->txq_q
) {
3289 txq
= (struct bna_txq
*)qe
;
3290 bna_ib_stop(txq
->ib
);
3292 bfa_fsm_set_state(tx
, bna_tx_sm_stat_clr_wait
);
3295 case TX_E_PRIO_CHANGE
:
3300 bfa_sm_fault(tx
->bna
, event
);
3305 bna_tx_sm_prio_stop_wait_entry(struct bna_tx
*tx
)
3311 bna_tx_sm_prio_stop_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3313 struct bna_txq
*txq
;
3314 struct list_head
*qe
;
3318 bfa_fsm_set_state(tx
, bna_tx_sm_txq_stop_wait
);
3322 call_tx_prio_change_cbfn(tx
, BNA_CB_FAIL
);
3323 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3326 case TX_E_TXQ_STOPPED
:
3327 list_for_each(qe
, &tx
->txq_q
) {
3328 txq
= (struct bna_txq
*)qe
;
3329 bna_ib_stop(txq
->ib
);
3330 (tx
->tx_cleanup_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3332 call_tx_prio_change_cbfn(tx
, BNA_CB_SUCCESS
);
3333 bfa_fsm_set_state(tx
, bna_tx_sm_started
);
3336 case TX_E_PRIO_CHANGE
:
3341 bfa_sm_fault(tx
->bna
, event
);
3346 bna_tx_sm_stat_clr_wait_entry(struct bna_tx
*tx
)
3348 __bna_txf_stat_clr(tx
);
3352 bna_tx_sm_stat_clr_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3356 case TX_E_STAT_CLEARED
:
3357 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3361 bfa_sm_fault(tx
->bna
, event
);
3366 __bna_txq_start(struct bna_tx
*tx
, struct bna_txq
*txq
)
3368 struct bna_rxtx_q_mem
*q_mem
;
3369 struct bna_txq_mem txq_cfg
;
3370 struct bna_txq_mem
*txq_mem
;
3371 struct bna_dma_addr cur_q_addr
;
3373 void __iomem
*base_addr
;
3376 /* Fill out structure, to be subsequently written to hardware */
3377 txq_cfg
.pg_tbl_addr_lo
= txq
->qpt
.hw_qpt_ptr
.lsb
;
3378 txq_cfg
.pg_tbl_addr_hi
= txq
->qpt
.hw_qpt_ptr
.msb
;
3379 cur_q_addr
= *((struct bna_dma_addr
*)(txq
->qpt
.kv_qpt_ptr
));
3380 txq_cfg
.cur_q_entry_lo
= cur_q_addr
.lsb
;
3381 txq_cfg
.cur_q_entry_hi
= cur_q_addr
.msb
;
3383 txq_cfg
.pg_cnt_n_prd_ptr
= (txq
->qpt
.page_count
<< 16) | 0x0;
3385 txq_cfg
.entry_n_pg_size
= ((u32
)(BFI_TXQ_WI_SIZE
>> 2) << 16) |
3386 (txq
->qpt
.page_size
>> 2);
3387 txq_cfg
.int_blk_n_cns_ptr
= ((((u32
)txq
->ib_seg_offset
) << 24) |
3388 ((u32
)(txq
->ib
->ib_id
& 0xff) << 16) | 0x0);
3390 txq_cfg
.cns_ptr2_n_q_state
= BNA_Q_IDLE_STATE
;
3391 txq_cfg
.nxt_qid_n_fid_n_pri
= (((tx
->txf
.txf_id
& 0x3f) << 3) |
3392 (txq
->priority
& 0x7));
3393 txq_cfg
.wvc_n_cquota_n_rquota
=
3394 ((((u32
)BFI_TX_MAX_WRR_QUOTA
& 0xfff) << 12) |
3395 (BFI_TX_MAX_WRR_QUOTA
& 0xfff));
3397 /* Setup the page and write to H/W */
3399 pg_num
= BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM
+ tx
->bna
->port_num
,
3400 HQM_RXTX_Q_RAM_BASE_OFFSET
);
3401 writel(pg_num
, tx
->bna
->regs
.page_addr
);
3403 base_addr
= BNA_GET_MEM_BASE_ADDR(tx
->bna
->pcidev
.pci_bar_kva
,
3404 HQM_RXTX_Q_RAM_BASE_OFFSET
);
3405 q_mem
= (struct bna_rxtx_q_mem
*)0;
3406 txq_mem
= &q_mem
[txq
->txq_id
].txq
;
3409 * The following 4 lines, is a hack b'cos the H/W needs to read
3410 * these DMA addresses as little endian
3413 off
= (unsigned long)&txq_mem
->pg_tbl_addr_lo
;
3414 writel(htonl(txq_cfg
.pg_tbl_addr_lo
), base_addr
+ off
);
3416 off
= (unsigned long)&txq_mem
->pg_tbl_addr_hi
;
3417 writel(htonl(txq_cfg
.pg_tbl_addr_hi
), base_addr
+ off
);
3419 off
= (unsigned long)&txq_mem
->cur_q_entry_lo
;
3420 writel(htonl(txq_cfg
.cur_q_entry_lo
), base_addr
+ off
);
3422 off
= (unsigned long)&txq_mem
->cur_q_entry_hi
;
3423 writel(htonl(txq_cfg
.cur_q_entry_hi
), base_addr
+ off
);
3425 off
= (unsigned long)&txq_mem
->pg_cnt_n_prd_ptr
;
3426 writel(txq_cfg
.pg_cnt_n_prd_ptr
, base_addr
+ off
);
3428 off
= (unsigned long)&txq_mem
->entry_n_pg_size
;
3429 writel(txq_cfg
.entry_n_pg_size
, base_addr
+ off
);
3431 off
= (unsigned long)&txq_mem
->int_blk_n_cns_ptr
;
3432 writel(txq_cfg
.int_blk_n_cns_ptr
, base_addr
+ off
);
3434 off
= (unsigned long)&txq_mem
->cns_ptr2_n_q_state
;
3435 writel(txq_cfg
.cns_ptr2_n_q_state
, base_addr
+ off
);
3437 off
= (unsigned long)&txq_mem
->nxt_qid_n_fid_n_pri
;
3438 writel(txq_cfg
.nxt_qid_n_fid_n_pri
, base_addr
+ off
);
3440 off
= (unsigned long)&txq_mem
->wvc_n_cquota_n_rquota
;
3441 writel(txq_cfg
.wvc_n_cquota_n_rquota
, base_addr
+ off
);
3443 txq
->tcb
->producer_index
= 0;
3444 txq
->tcb
->consumer_index
= 0;
3445 *(txq
->tcb
->hw_consumer_index
) = 0;
3450 __bna_txq_stop(struct bna_tx
*tx
, struct bna_txq
*txq
)
3452 struct bfi_ll_q_stop_req ll_req
;
3453 u32 bit_mask
[2] = {0, 0};
3454 if (txq
->txq_id
< 32)
3455 bit_mask
[0] = (u32
)1 << txq
->txq_id
;
3457 bit_mask
[1] = (u32
)1 << (txq
->txq_id
- 32);
3459 memset(&ll_req
, 0, sizeof(ll_req
));
3460 ll_req
.mh
.msg_class
= BFI_MC_LL
;
3461 ll_req
.mh
.msg_id
= BFI_LL_H2I_TXQ_STOP_REQ
;
3462 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
3463 ll_req
.q_id_mask
[0] = htonl(bit_mask
[0]);
3464 ll_req
.q_id_mask
[1] = htonl(bit_mask
[1]);
3466 bna_mbox_qe_fill(&tx
->mbox_qe
, &ll_req
, sizeof(ll_req
),
3467 bna_tx_cb_txq_stopped
, tx
);
3469 bna_mbox_send(tx
->bna
, &tx
->mbox_qe
);
3473 __bna_txf_start(struct bna_tx
*tx
)
3475 struct bna_tx_fndb_ram
*tx_fndb
;
3476 struct bna_txf
*txf
= &tx
->txf
;
3477 void __iomem
*base_addr
;
3480 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM
+
3481 (tx
->bna
->port_num
* 2), TX_FNDB_RAM_BASE_OFFSET
),
3482 tx
->bna
->regs
.page_addr
);
3484 base_addr
= BNA_GET_MEM_BASE_ADDR(tx
->bna
->pcidev
.pci_bar_kva
,
3485 TX_FNDB_RAM_BASE_OFFSET
);
3487 tx_fndb
= (struct bna_tx_fndb_ram
*)0;
3488 off
= (unsigned long)&tx_fndb
[txf
->txf_id
].vlan_n_ctrl_flags
;
3490 writel(((u32
)txf
->vlan
<< 16) | txf
->ctrl_flags
,
3493 if (tx
->txf
.txf_id
< 32)
3494 tx
->bna
->tx_mod
.txf_bmap
[0] |= ((u32
)1 << tx
->txf
.txf_id
);
3496 tx
->bna
->tx_mod
.txf_bmap
[1] |= ((u32
)
3497 1 << (tx
->txf
.txf_id
- 32));
3501 __bna_txf_stop(struct bna_tx
*tx
)
3503 struct bna_tx_fndb_ram
*tx_fndb
;
3506 struct bna_txf
*txf
= &tx
->txf
;
3507 void __iomem
*base_addr
;
3510 /* retrieve the running txf_flags & turn off enable bit */
3511 page_num
= BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM
+
3512 (tx
->bna
->port_num
* 2), TX_FNDB_RAM_BASE_OFFSET
);
3513 writel(page_num
, tx
->bna
->regs
.page_addr
);
3515 base_addr
= BNA_GET_MEM_BASE_ADDR(tx
->bna
->pcidev
.pci_bar_kva
,
3516 TX_FNDB_RAM_BASE_OFFSET
);
3517 tx_fndb
= (struct bna_tx_fndb_ram
*)0;
3518 off
= (unsigned long)&tx_fndb
[txf
->txf_id
].vlan_n_ctrl_flags
;
3520 ctl_flags
= readl(base_addr
+ off
);
3521 ctl_flags
&= ~BFI_TXF_CF_ENABLE
;
3523 writel(ctl_flags
, base_addr
+ off
);
3525 if (tx
->txf
.txf_id
< 32)
3526 tx
->bna
->tx_mod
.txf_bmap
[0] &= ~((u32
)1 << tx
->txf
.txf_id
);
3528 tx
->bna
->tx_mod
.txf_bmap
[0] &= ~((u32
)
3529 1 << (tx
->txf
.txf_id
- 32));
3533 __bna_txf_stat_clr(struct bna_tx
*tx
)
3535 struct bfi_ll_stats_req ll_req
;
3536 u32 txf_bmap
[2] = {0, 0};
3537 if (tx
->txf
.txf_id
< 32)
3538 txf_bmap
[0] = ((u32
)1 << tx
->txf
.txf_id
);
3540 txf_bmap
[1] = ((u32
)1 << (tx
->txf
.txf_id
- 32));
3541 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_STATS_CLEAR_REQ
, 0);
3542 ll_req
.stats_mask
= 0;
3543 ll_req
.rxf_id_mask
[0] = 0;
3544 ll_req
.rxf_id_mask
[1] = 0;
3545 ll_req
.txf_id_mask
[0] = htonl(txf_bmap
[0]);
3546 ll_req
.txf_id_mask
[1] = htonl(txf_bmap
[1]);
3548 bna_mbox_qe_fill(&tx
->mbox_qe
, &ll_req
, sizeof(ll_req
),
3549 bna_tx_cb_stats_cleared
, tx
);
3550 bna_mbox_send(tx
->bna
, &tx
->mbox_qe
);
3554 __bna_tx_start(struct bna_tx
*tx
)
3556 struct bna_txq
*txq
;
3557 struct list_head
*qe
;
3559 list_for_each(qe
, &tx
->txq_q
) {
3560 txq
= (struct bna_txq
*)qe
;
3561 bna_ib_start(txq
->ib
);
3562 __bna_txq_start(tx
, txq
);
3565 __bna_txf_start(tx
);
3567 list_for_each(qe
, &tx
->txq_q
) {
3568 txq
= (struct bna_txq
*)qe
;
3569 txq
->tcb
->priority
= txq
->priority
;
3570 (tx
->tx_resume_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3575 __bna_tx_stop(struct bna_tx
*tx
)
3577 struct bna_txq
*txq
;
3578 struct list_head
*qe
;
3580 list_for_each(qe
, &tx
->txq_q
) {
3581 txq
= (struct bna_txq
*)qe
;
3582 (tx
->tx_stall_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3587 list_for_each(qe
, &tx
->txq_q
) {
3588 txq
= (struct bna_txq
*)qe
;
3589 bfa_wc_up(&tx
->txq_stop_wc
);
3592 list_for_each(qe
, &tx
->txq_q
) {
3593 txq
= (struct bna_txq
*)qe
;
3594 __bna_txq_stop(tx
, txq
);
3599 bna_txq_qpt_setup(struct bna_txq
*txq
, int page_count
, int page_size
,
3600 struct bna_mem_descr
*qpt_mem
,
3601 struct bna_mem_descr
*swqpt_mem
,
3602 struct bna_mem_descr
*page_mem
)
3606 txq
->qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
3607 txq
->qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
3608 txq
->qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
3609 txq
->qpt
.page_count
= page_count
;
3610 txq
->qpt
.page_size
= page_size
;
3612 txq
->tcb
->sw_qpt
= (void **) swqpt_mem
->kva
;
3614 for (i
= 0; i
< page_count
; i
++) {
3615 txq
->tcb
->sw_qpt
[i
] = page_mem
[i
].kva
;
3617 ((struct bna_dma_addr
*)txq
->qpt
.kv_qpt_ptr
)[i
].lsb
=
3618 page_mem
[i
].dma
.lsb
;
3619 ((struct bna_dma_addr
*)txq
->qpt
.kv_qpt_ptr
)[i
].msb
=
3620 page_mem
[i
].dma
.msb
;
3626 bna_tx_free(struct bna_tx
*tx
)
3628 struct bna_tx_mod
*tx_mod
= &tx
->bna
->tx_mod
;
3629 struct bna_txq
*txq
;
3630 struct bna_ib_mod
*ib_mod
= &tx
->bna
->ib_mod
;
3631 struct list_head
*qe
;
3633 while (!list_empty(&tx
->txq_q
)) {
3634 bfa_q_deq(&tx
->txq_q
, &txq
);
3635 bfa_q_qe_init(&txq
->qe
);
3637 if (txq
->ib_seg_offset
!= -1)
3638 bna_ib_release_idx(txq
->ib
,
3639 txq
->ib_seg_offset
);
3640 bna_ib_put(ib_mod
, txq
->ib
);
3645 list_add_tail(&txq
->qe
, &tx_mod
->txq_free_q
);
3648 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3649 if (qe
== &tx
->qe
) {
3651 bfa_q_qe_init(&tx
->qe
);
3658 list_add_tail(&tx
->qe
, &tx_mod
->tx_free_q
);
3662 bna_tx_cb_txq_stopped(void *arg
, int status
)
3664 struct bna_tx
*tx
= (struct bna_tx
*)arg
;
3666 bfa_q_qe_init(&tx
->mbox_qe
.qe
);
3667 bfa_wc_down(&tx
->txq_stop_wc
);
3671 bna_tx_cb_txq_stopped_all(void *arg
)
3673 struct bna_tx
*tx
= (struct bna_tx
*)arg
;
3675 bfa_fsm_send_event(tx
, TX_E_TXQ_STOPPED
);
3679 bna_tx_cb_stats_cleared(void *arg
, int status
)
3681 struct bna_tx
*tx
= (struct bna_tx
*)arg
;
3683 bfa_q_qe_init(&tx
->mbox_qe
.qe
);
3685 bfa_fsm_send_event(tx
, TX_E_STAT_CLEARED
);
3689 bna_tx_start(struct bna_tx
*tx
)
3691 tx
->flags
|= BNA_TX_F_PORT_STARTED
;
3692 if (tx
->flags
& BNA_TX_F_ENABLED
)
3693 bfa_fsm_send_event(tx
, TX_E_START
);
3697 bna_tx_stop(struct bna_tx
*tx
)
3699 tx
->stop_cbfn
= bna_tx_mod_cb_tx_stopped
;
3700 tx
->stop_cbarg
= &tx
->bna
->tx_mod
;
3702 tx
->flags
&= ~BNA_TX_F_PORT_STARTED
;
3703 bfa_fsm_send_event(tx
, TX_E_STOP
);
3707 bna_tx_fail(struct bna_tx
*tx
)
3709 tx
->flags
&= ~BNA_TX_F_PORT_STARTED
;
3710 bfa_fsm_send_event(tx
, TX_E_FAIL
);
3714 bna_tx_prio_changed(struct bna_tx
*tx
, int prio
)
3716 struct bna_txq
*txq
;
3717 struct list_head
*qe
;
3719 list_for_each(qe
, &tx
->txq_q
) {
3720 txq
= (struct bna_txq
*)qe
;
3721 txq
->priority
= prio
;
3724 bfa_fsm_send_event(tx
, TX_E_PRIO_CHANGE
);
3728 bna_tx_cee_link_status(struct bna_tx
*tx
, int cee_link
)
3731 tx
->flags
|= BNA_TX_F_PRIO_LOCK
;
3733 tx
->flags
&= ~BNA_TX_F_PRIO_LOCK
;
3737 bna_tx_mod_cb_tx_stopped(void *arg
, struct bna_tx
*tx
,
3738 enum bna_cb_status status
)
3740 struct bna_tx_mod
*tx_mod
= (struct bna_tx_mod
*)arg
;
3742 bfa_wc_down(&tx_mod
->tx_stop_wc
);
3746 bna_tx_mod_cb_tx_stopped_all(void *arg
)
3748 struct bna_tx_mod
*tx_mod
= (struct bna_tx_mod
*)arg
;
3750 if (tx_mod
->stop_cbfn
)
3751 tx_mod
->stop_cbfn(&tx_mod
->bna
->port
, BNA_CB_SUCCESS
);
3752 tx_mod
->stop_cbfn
= NULL
;
3756 bna_tx_res_req(int num_txq
, int txq_depth
, struct bna_res_info
*res_info
)
3760 struct bna_mem_info
*mem_info
;
3762 res_info
[BNA_TX_RES_MEM_T_TCB
].res_type
= BNA_RES_T_MEM
;
3763 mem_info
= &res_info
[BNA_TX_RES_MEM_T_TCB
].res_u
.mem_info
;
3764 mem_info
->mem_type
= BNA_MEM_T_KVA
;
3765 mem_info
->len
= sizeof(struct bna_tcb
);
3766 mem_info
->num
= num_txq
;
3768 q_size
= txq_depth
* BFI_TXQ_WI_SIZE
;
3769 q_size
= ALIGN(q_size
, PAGE_SIZE
);
3770 page_count
= q_size
>> PAGE_SHIFT
;
3772 res_info
[BNA_TX_RES_MEM_T_QPT
].res_type
= BNA_RES_T_MEM
;
3773 mem_info
= &res_info
[BNA_TX_RES_MEM_T_QPT
].res_u
.mem_info
;
3774 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3775 mem_info
->len
= page_count
* sizeof(struct bna_dma_addr
);
3776 mem_info
->num
= num_txq
;
3778 res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_type
= BNA_RES_T_MEM
;
3779 mem_info
= &res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_u
.mem_info
;
3780 mem_info
->mem_type
= BNA_MEM_T_KVA
;
3781 mem_info
->len
= page_count
* sizeof(void *);
3782 mem_info
->num
= num_txq
;
3784 res_info
[BNA_TX_RES_MEM_T_PAGE
].res_type
= BNA_RES_T_MEM
;
3785 mem_info
= &res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
;
3786 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3787 mem_info
->len
= PAGE_SIZE
;
3788 mem_info
->num
= num_txq
* page_count
;
3790 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_type
= BNA_RES_T_INTR
;
3791 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
.intr_type
=
3793 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
.num
= num_txq
;
3797 bna_tx_create(struct bna
*bna
, struct bnad
*bnad
,
3798 struct bna_tx_config
*tx_cfg
,
3799 struct bna_tx_event_cbfn
*tx_cbfn
,
3800 struct bna_res_info
*res_info
, void *priv
)
3802 struct bna_intr_info
*intr_info
;
3803 struct bna_tx_mod
*tx_mod
= &bna
->tx_mod
;
3805 struct bna_txq
*txq
;
3806 struct list_head
*qe
;
3807 struct bna_ib_mod
*ib_mod
= &bna
->ib_mod
;
3808 struct bna_doorbell_qset
*qset
;
3809 struct bna_ib_config ib_config
;
3816 intr_info
= &res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
;
3817 page_count
= (res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
.num
) /
3819 page_size
= res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
.len
;
3825 if ((intr_info
->num
!= 1) && (intr_info
->num
!= tx_cfg
->num_txq
))
3830 if (list_empty(&tx_mod
->tx_free_q
))
3832 bfa_q_deq(&tx_mod
->tx_free_q
, &tx
);
3833 bfa_q_qe_init(&tx
->qe
);
3837 INIT_LIST_HEAD(&tx
->txq_q
);
3838 for (i
= 0; i
< tx_cfg
->num_txq
; i
++) {
3839 if (list_empty(&tx_mod
->txq_free_q
))
3842 bfa_q_deq(&tx_mod
->txq_free_q
, &txq
);
3843 bfa_q_qe_init(&txq
->qe
);
3844 list_add_tail(&txq
->qe
, &tx
->txq_q
);
3846 txq
->ib_seg_offset
= -1;
3852 list_for_each(qe
, &tx
->txq_q
) {
3853 txq
= (struct bna_txq
*)qe
;
3855 if (intr_info
->num
== 1)
3856 txq
->ib
= bna_ib_get(ib_mod
, intr_info
->intr_type
,
3857 intr_info
->idl
[0].vector
);
3859 txq
->ib
= bna_ib_get(ib_mod
, intr_info
->intr_type
,
3860 intr_info
->idl
[i
].vector
);
3862 if (txq
->ib
== NULL
)
3865 txq
->ib_seg_offset
= bna_ib_reserve_idx(txq
->ib
);
3866 if (txq
->ib_seg_offset
== -1)
3878 tx
->tcb_setup_cbfn
= tx_cbfn
->tcb_setup_cbfn
;
3879 tx
->tcb_destroy_cbfn
= tx_cbfn
->tcb_destroy_cbfn
;
3880 /* Following callbacks are mandatory */
3881 tx
->tx_stall_cbfn
= tx_cbfn
->tx_stall_cbfn
;
3882 tx
->tx_resume_cbfn
= tx_cbfn
->tx_resume_cbfn
;
3883 tx
->tx_cleanup_cbfn
= tx_cbfn
->tx_cleanup_cbfn
;
3885 list_add_tail(&tx
->qe
, &tx_mod
->tx_active_q
);
3888 tx
->txq_stop_wc
.wc_resume
= bna_tx_cb_txq_stopped_all
;
3889 tx
->txq_stop_wc
.wc_cbarg
= tx
;
3890 tx
->txq_stop_wc
.wc_count
= 0;
3892 tx
->type
= tx_cfg
->tx_type
;
3895 if (tx
->bna
->tx_mod
.flags
& BNA_TX_MOD_F_PORT_STARTED
) {
3897 case BNA_TX_T_REGULAR
:
3898 if (!(tx
->bna
->tx_mod
.flags
&
3899 BNA_TX_MOD_F_PORT_LOOPBACK
))
3900 tx
->flags
|= BNA_TX_F_PORT_STARTED
;
3902 case BNA_TX_T_LOOPBACK
:
3903 if (tx
->bna
->tx_mod
.flags
& BNA_TX_MOD_F_PORT_LOOPBACK
)
3904 tx
->flags
|= BNA_TX_F_PORT_STARTED
;
3908 if (tx
->bna
->tx_mod
.cee_link
)
3909 tx
->flags
|= BNA_TX_F_PRIO_LOCK
;
3915 list_for_each(qe
, &tx
->txq_q
) {
3916 txq
= (struct bna_txq
*)qe
;
3917 txq
->priority
= tx_mod
->priority
;
3918 txq
->tcb
= (struct bna_tcb
*)
3919 res_info
[BNA_TX_RES_MEM_T_TCB
].res_u
.mem_info
.mdl
[i
].kva
;
3920 txq
->tx_packets
= 0;
3925 ib_config
.coalescing_timeo
= BFI_TX_COALESCING_TIMEO
;
3926 ib_config
.interpkt_timeo
= 0; /* Not used */
3927 ib_config
.interpkt_count
= BFI_TX_INTERPKT_COUNT
;
3928 ib_config
.ctrl_flags
= (BFI_IB_CF_INTER_PKT_DMA
|
3929 BFI_IB_CF_INT_ENABLE
|
3930 BFI_IB_CF_COALESCING_MODE
);
3931 bna_ib_config(txq
->ib
, &ib_config
);
3935 txq
->tcb
->producer_index
= 0;
3936 txq
->tcb
->consumer_index
= 0;
3937 txq
->tcb
->hw_consumer_index
= (volatile u32
*)
3938 ((volatile u8
*)txq
->ib
->ib_seg_host_addr_kva
+
3939 (txq
->ib_seg_offset
* BFI_IBIDX_SIZE
));
3940 *(txq
->tcb
->hw_consumer_index
) = 0;
3941 txq
->tcb
->q_depth
= tx_cfg
->txq_depth
;
3942 txq
->tcb
->unmap_q
= (void *)
3943 res_info
[BNA_TX_RES_MEM_T_UNMAPQ
].res_u
.mem_info
.mdl
[i
].kva
;
3944 qset
= (struct bna_doorbell_qset
*)0;
3945 off
= (unsigned long)&qset
[txq
->txq_id
].txq
[0];
3946 txq
->tcb
->q_dbell
= off
+
3947 BNA_GET_DOORBELL_BASE_ADDR(bna
->pcidev
.pci_bar_kva
);
3948 txq
->tcb
->i_dbell
= &txq
->ib
->door_bell
;
3949 txq
->tcb
->intr_type
= intr_info
->intr_type
;
3950 txq
->tcb
->intr_vector
= (intr_info
->num
== 1) ?
3951 intr_info
->idl
[0].vector
:
3952 intr_info
->idl
[i
].vector
;
3953 txq
->tcb
->txq
= txq
;
3954 txq
->tcb
->bnad
= bnad
;
3957 /* QPT, SWQPT, Pages */
3958 bna_txq_qpt_setup(txq
, page_count
, page_size
,
3959 &res_info
[BNA_TX_RES_MEM_T_QPT
].res_u
.mem_info
.mdl
[i
],
3960 &res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_u
.mem_info
.mdl
[i
],
3961 &res_info
[BNA_TX_RES_MEM_T_PAGE
].
3962 res_u
.mem_info
.mdl
[page_idx
]);
3963 txq
->tcb
->page_idx
= page_idx
;
3964 txq
->tcb
->page_count
= page_count
;
3965 page_idx
+= page_count
;
3967 /* Callback to bnad for setting up TCB */
3968 if (tx
->tcb_setup_cbfn
)
3969 (tx
->tcb_setup_cbfn
)(bna
->bnad
, txq
->tcb
);
3976 tx
->txf
.ctrl_flags
= BFI_TXF_CF_ENABLE
| BFI_TXF_CF_VLAN_WI_BASED
;
3980 bfa_q_qe_init(&tx
->mbox_qe
.qe
);
3982 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3992 bna_tx_destroy(struct bna_tx
*tx
)
3994 /* Callback to bnad for destroying TCB */
3995 if (tx
->tcb_destroy_cbfn
) {
3996 struct bna_txq
*txq
;
3997 struct list_head
*qe
;
3999 list_for_each(qe
, &tx
->txq_q
) {
4000 txq
= (struct bna_txq
*)qe
;
4001 (tx
->tcb_destroy_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
4009 bna_tx_enable(struct bna_tx
*tx
)
4011 if (tx
->fsm
!= (bfa_sm_t
)bna_tx_sm_stopped
)
4014 tx
->flags
|= BNA_TX_F_ENABLED
;
4016 if (tx
->flags
& BNA_TX_F_PORT_STARTED
)
4017 bfa_fsm_send_event(tx
, TX_E_START
);
4021 bna_tx_disable(struct bna_tx
*tx
, enum bna_cleanup_type type
,
4022 void (*cbfn
)(void *, struct bna_tx
*, enum bna_cb_status
))
4024 if (type
== BNA_SOFT_CLEANUP
) {
4025 (*cbfn
)(tx
->bna
->bnad
, tx
, BNA_CB_SUCCESS
);
4029 tx
->stop_cbfn
= cbfn
;
4030 tx
->stop_cbarg
= tx
->bna
->bnad
;
4032 tx
->flags
&= ~BNA_TX_F_ENABLED
;
4034 bfa_fsm_send_event(tx
, TX_E_STOP
);
4038 bna_tx_state_get(struct bna_tx
*tx
)
4040 return bfa_sm_to_state(tx_sm_table
, tx
->fsm
);
4044 bna_tx_mod_init(struct bna_tx_mod
*tx_mod
, struct bna
*bna
,
4045 struct bna_res_info
*res_info
)
4052 tx_mod
->tx
= (struct bna_tx
*)
4053 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
4054 tx_mod
->txq
= (struct bna_txq
*)
4055 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
4057 INIT_LIST_HEAD(&tx_mod
->tx_free_q
);
4058 INIT_LIST_HEAD(&tx_mod
->tx_active_q
);
4060 INIT_LIST_HEAD(&tx_mod
->txq_free_q
);
4062 for (i
= 0; i
< BFI_MAX_TXQ
; i
++) {
4063 tx_mod
->tx
[i
].txf
.txf_id
= i
;
4064 bfa_q_qe_init(&tx_mod
->tx
[i
].qe
);
4065 list_add_tail(&tx_mod
->tx
[i
].qe
, &tx_mod
->tx_free_q
);
4067 tx_mod
->txq
[i
].txq_id
= i
;
4068 bfa_q_qe_init(&tx_mod
->txq
[i
].qe
);
4069 list_add_tail(&tx_mod
->txq
[i
].qe
, &tx_mod
->txq_free_q
);
4072 tx_mod
->tx_stop_wc
.wc_resume
= bna_tx_mod_cb_tx_stopped_all
;
4073 tx_mod
->tx_stop_wc
.wc_cbarg
= tx_mod
;
4074 tx_mod
->tx_stop_wc
.wc_count
= 0;
4078 bna_tx_mod_uninit(struct bna_tx_mod
*tx_mod
)
4080 struct list_head
*qe
;
4084 list_for_each(qe
, &tx_mod
->tx_free_q
)
4088 list_for_each(qe
, &tx_mod
->txq_free_q
)
4095 bna_tx_mod_start(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
4098 struct list_head
*qe
;
4100 tx_mod
->flags
|= BNA_TX_MOD_F_PORT_STARTED
;
4101 if (type
== BNA_TX_T_LOOPBACK
)
4102 tx_mod
->flags
|= BNA_TX_MOD_F_PORT_LOOPBACK
;
4104 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4105 tx
= (struct bna_tx
*)qe
;
4106 if (tx
->type
== type
)
4112 bna_tx_mod_stop(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
4115 struct list_head
*qe
;
4117 tx_mod
->flags
&= ~BNA_TX_MOD_F_PORT_STARTED
;
4118 tx_mod
->flags
&= ~BNA_TX_MOD_F_PORT_LOOPBACK
;
4120 tx_mod
->stop_cbfn
= bna_port_cb_tx_stopped
;
4123 * Before calling bna_tx_stop(), increment tx_stop_wc as many times
4124 * as we are going to call bna_tx_stop
4126 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4127 tx
= (struct bna_tx
*)qe
;
4128 if (tx
->type
== type
)
4129 bfa_wc_up(&tx_mod
->tx_stop_wc
);
4132 if (tx_mod
->tx_stop_wc
.wc_count
== 0) {
4133 tx_mod
->stop_cbfn(&tx_mod
->bna
->port
, BNA_CB_SUCCESS
);
4134 tx_mod
->stop_cbfn
= NULL
;
4138 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4139 tx
= (struct bna_tx
*)qe
;
4140 if (tx
->type
== type
)
4146 bna_tx_mod_fail(struct bna_tx_mod
*tx_mod
)
4149 struct list_head
*qe
;
4151 tx_mod
->flags
&= ~BNA_TX_MOD_F_PORT_STARTED
;
4152 tx_mod
->flags
&= ~BNA_TX_MOD_F_PORT_LOOPBACK
;
4154 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4155 tx
= (struct bna_tx
*)qe
;
4161 bna_tx_mod_prio_changed(struct bna_tx_mod
*tx_mod
, int prio
)
4164 struct list_head
*qe
;
4166 if (prio
!= tx_mod
->priority
) {
4167 tx_mod
->priority
= prio
;
4169 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4170 tx
= (struct bna_tx
*)qe
;
4171 bna_tx_prio_changed(tx
, prio
);
4177 bna_tx_mod_cee_link_status(struct bna_tx_mod
*tx_mod
, int cee_link
)
4180 struct list_head
*qe
;
4182 tx_mod
->cee_link
= cee_link
;
4184 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4185 tx
= (struct bna_tx
*)qe
;
4186 bna_tx_cee_link_status(tx
, cee_link
);