2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
25 #define bna_ib_find_free_ibidx(_mask, _pos)\
28 while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\
29 ((1 << (_pos)) & (_mask)))\
33 #define bna_ib_count_ibidx(_mask, _count)\
37 while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\
38 if ((1 << pos) & (_mask))\
44 #define bna_ib_select_segpool(_count, _q_idx)\
48 for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\
49 if ((_count <= ibidx_pool[i].pool_entry_size)) {\
56 struct bna_ibidx_pool
{
60 init_ibidx_pool(ibidx_pool
);
62 static struct bna_intr
*
63 bna_intr_get(struct bna_ib_mod
*ib_mod
, enum bna_intr_type intr_type
,
66 struct bna_intr
*intr
;
69 list_for_each(qe
, &ib_mod
->intr_active_q
) {
70 intr
= (struct bna_intr
*)qe
;
72 if ((intr
->intr_type
== intr_type
) &&
73 (intr
->vector
== vector
)) {
79 if (list_empty(&ib_mod
->intr_free_q
))
82 bfa_q_deq(&ib_mod
->intr_free_q
, &intr
);
83 bfa_q_qe_init(&intr
->qe
);
86 intr
->intr_type
= intr_type
;
87 intr
->vector
= vector
;
89 list_add_tail(&intr
->qe
, &ib_mod
->intr_active_q
);
95 bna_intr_put(struct bna_ib_mod
*ib_mod
,
96 struct bna_intr
*intr
)
100 if (intr
->ref_count
== 0) {
103 bfa_q_qe_init(&intr
->qe
);
104 list_add_tail(&intr
->qe
, &ib_mod
->intr_free_q
);
109 bna_ib_mod_init(struct bna_ib_mod
*ib_mod
, struct bna
*bna
,
110 struct bna_res_info
*res_info
)
116 struct bna_doorbell_qset
*qset
;
121 ib_mod
->ib
= (struct bna_ib
*)
122 res_info
[BNA_RES_MEM_T_IB_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
123 ib_mod
->intr
= (struct bna_intr
*)
124 res_info
[BNA_RES_MEM_T_INTR_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
125 ib_mod
->idx_seg
= (struct bna_ibidx_seg
*)
126 res_info
[BNA_RES_MEM_T_IDXSEG_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
128 INIT_LIST_HEAD(&ib_mod
->ib_free_q
);
129 INIT_LIST_HEAD(&ib_mod
->intr_free_q
);
130 INIT_LIST_HEAD(&ib_mod
->intr_active_q
);
132 for (i
= 0; i
< BFI_IBIDX_TOTAL_POOLS
; i
++)
133 INIT_LIST_HEAD(&ib_mod
->ibidx_seg_pool
[i
]);
135 for (i
= 0; i
< BFI_MAX_IB
; i
++) {
136 ib_mod
->ib
[i
].ib_id
= i
;
138 ib_mod
->ib
[i
].ib_seg_host_addr_kva
=
139 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].kva
;
140 ib_mod
->ib
[i
].ib_seg_host_addr
.lsb
=
141 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.lsb
;
142 ib_mod
->ib
[i
].ib_seg_host_addr
.msb
=
143 res_info
[BNA_RES_MEM_T_IBIDX
].res_u
.mem_info
.mdl
[i
].dma
.msb
;
145 qset
= (struct bna_doorbell_qset
*)0;
146 off
= (unsigned long)(&qset
[i
>> 1].ib0
[(i
& 0x1)
148 ib_mod
->ib
[i
].door_bell
.doorbell_addr
= off
+
149 BNA_GET_DOORBELL_BASE_ADDR(bna
->pcidev
.pci_bar_kva
);
151 bfa_q_qe_init(&ib_mod
->ib
[i
].qe
);
152 list_add_tail(&ib_mod
->ib
[i
].qe
, &ib_mod
->ib_free_q
);
154 bfa_q_qe_init(&ib_mod
->intr
[i
].qe
);
155 list_add_tail(&ib_mod
->intr
[i
].qe
, &ib_mod
->intr_free_q
);
160 for (i
= 0; i
< BFI_IBIDX_TOTAL_POOLS
; i
++) {
161 for (j
= 0; j
< ibidx_pool
[i
].pool_size
; j
++) {
162 bfa_q_qe_init(&ib_mod
->idx_seg
[count
]);
163 ib_mod
->idx_seg
[count
].ib_seg_size
=
164 ibidx_pool
[i
].pool_entry_size
;
165 ib_mod
->idx_seg
[count
].ib_idx_tbl_offset
= offset
;
166 list_add_tail(&ib_mod
->idx_seg
[count
].qe
,
167 &ib_mod
->ibidx_seg_pool
[i
]);
169 offset
+= ibidx_pool
[i
].pool_entry_size
;
175 bna_ib_mod_uninit(struct bna_ib_mod
*ib_mod
)
179 struct list_head
*qe
;
182 list_for_each(qe
, &ib_mod
->ib_free_q
)
186 list_for_each(qe
, &ib_mod
->intr_free_q
)
189 for (i
= 0; i
< BFI_IBIDX_TOTAL_POOLS
; i
++) {
191 list_for_each(qe
, &ib_mod
->ibidx_seg_pool
[i
])
198 static struct bna_ib
*
199 bna_ib_get(struct bna_ib_mod
*ib_mod
,
200 enum bna_intr_type intr_type
,
204 struct bna_intr
*intr
;
206 if (intr_type
== BNA_INTR_T_INTX
)
207 vector
= (1 << vector
);
209 intr
= bna_intr_get(ib_mod
, intr_type
, vector
);
214 if (intr
->ib
->ref_count
== BFI_IBIDX_MAX_SEGSIZE
) {
215 bna_intr_put(ib_mod
, intr
);
218 intr
->ib
->ref_count
++;
222 if (list_empty(&ib_mod
->ib_free_q
)) {
223 bna_intr_put(ib_mod
, intr
);
227 bfa_q_deq(&ib_mod
->ib_free_q
, &ib
);
228 bfa_q_qe_init(&ib
->qe
);
238 ib
->bna
= ib_mod
->bna
;
244 bna_ib_put(struct bna_ib_mod
*ib_mod
, struct bna_ib
*ib
)
246 bna_intr_put(ib_mod
, ib
->intr
);
250 if (ib
->ref_count
== 0) {
253 list_add_tail(&ib
->qe
, &ib_mod
->ib_free_q
);
257 /* Returns index offset - starting from 0 */
259 bna_ib_reserve_idx(struct bna_ib
*ib
)
261 struct bna_ib_mod
*ib_mod
= &ib
->bna
->ib_mod
;
262 struct bna_ibidx_seg
*idx_seg
;
267 /* Find the first free index position */
268 bna_ib_find_free_ibidx(ib
->idx_mask
, idx
);
269 if (idx
== BFI_IBIDX_MAX_SEGSIZE
)
273 * Calculate the total number of indexes held by this IB,
274 * including the index newly reserved above.
276 bna_ib_count_ibidx((ib
->idx_mask
| (1 << idx
)), num_idx
);
278 /* See if there is a free space in the index segment held by this IB */
279 if (ib
->idx_seg
&& (num_idx
<= ib
->idx_seg
->ib_seg_size
)) {
280 ib
->idx_mask
|= (1 << idx
);
287 /* Allocate a new segment */
288 bna_ib_select_segpool(num_idx
, q_idx
);
290 if (q_idx
== BFI_IBIDX_TOTAL_POOLS
)
292 if (!list_empty(&ib_mod
->ibidx_seg_pool
[q_idx
]))
296 bfa_q_deq(&ib_mod
->ibidx_seg_pool
[q_idx
], &idx_seg
);
297 bfa_q_qe_init(&idx_seg
->qe
);
299 /* Free the old segment */
301 bna_ib_select_segpool(ib
->idx_seg
->ib_seg_size
, q_idx
);
302 list_add_tail(&ib
->idx_seg
->qe
, &ib_mod
->ibidx_seg_pool
[q_idx
]);
305 ib
->idx_seg
= idx_seg
;
307 ib
->idx_mask
|= (1 << idx
);
313 bna_ib_release_idx(struct bna_ib
*ib
, int idx
)
315 struct bna_ib_mod
*ib_mod
= &ib
->bna
->ib_mod
;
316 struct bna_ibidx_seg
*idx_seg
;
321 ib
->idx_mask
&= ~(1 << idx
);
326 bna_ib_count_ibidx(ib
->idx_mask
, num_idx
);
329 * Free the segment, if there are no more indexes in the segment
333 bna_ib_select_segpool(ib
->idx_seg
->ib_seg_size
, cur_q_idx
);
334 list_add_tail(&ib
->idx_seg
->qe
,
335 &ib_mod
->ibidx_seg_pool
[cur_q_idx
]);
340 /* See if we can move to a smaller segment */
341 bna_ib_select_segpool(num_idx
, new_q_idx
);
342 bna_ib_select_segpool(ib
->idx_seg
->ib_seg_size
, cur_q_idx
);
343 while (new_q_idx
< cur_q_idx
) {
344 if (!list_empty(&ib_mod
->ibidx_seg_pool
[new_q_idx
]))
348 if (new_q_idx
< cur_q_idx
) {
349 /* Select the new smaller segment */
350 bfa_q_deq(&ib_mod
->ibidx_seg_pool
[new_q_idx
], &idx_seg
);
351 bfa_q_qe_init(&idx_seg
->qe
);
352 /* Free the old segment */
353 list_add_tail(&ib
->idx_seg
->qe
,
354 &ib_mod
->ibidx_seg_pool
[cur_q_idx
]);
355 ib
->idx_seg
= idx_seg
;
360 bna_ib_config(struct bna_ib
*ib
, struct bna_ib_config
*ib_config
)
365 ib
->ib_config
.coalescing_timeo
= ib_config
->coalescing_timeo
;
366 ib
->ib_config
.interpkt_timeo
= ib_config
->interpkt_timeo
;
367 ib
->ib_config
.interpkt_count
= ib_config
->interpkt_count
;
368 ib
->ib_config
.ctrl_flags
= ib_config
->ctrl_flags
;
370 ib
->ib_config
.ctrl_flags
|= BFI_IB_CF_MASTER_ENABLE
;
371 if (ib
->intr
->intr_type
== BNA_INTR_T_MSIX
)
372 ib
->ib_config
.ctrl_flags
|= BFI_IB_CF_MSIX_MODE
;
378 bna_ib_start(struct bna_ib
*ib
)
380 struct bna_ib_blk_mem ib_cfg
;
381 struct bna_ib_blk_mem
*ib_mem
;
385 void __iomem
*base_addr
;
390 if (ib
->start_count
> 1)
393 ib_cfg
.host_addr_lo
= (u32
)(ib
->ib_seg_host_addr
.lsb
);
394 ib_cfg
.host_addr_hi
= (u32
)(ib
->ib_seg_host_addr
.msb
);
396 ib_cfg
.clsc_n_ctrl_n_msix
= (((u32
)
397 ib
->ib_config
.coalescing_timeo
<< 16) |
398 ((u32
)ib
->ib_config
.ctrl_flags
<< 8) |
400 ib_cfg
.ipkt_n_ent_n_idxof
=
402 (ib
->ib_config
.interpkt_timeo
& 0xf) << 16) |
403 ((u32
)ib
->idx_seg
->ib_seg_size
<< 8) |
404 (ib
->idx_seg
->ib_idx_tbl_offset
);
405 ib_cfg
.ipkt_cnt_cfg_n_unacked
= ((u32
)
406 ib
->ib_config
.interpkt_count
<< 24);
408 pg_num
= BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM
+ ib
->bna
->port_num
,
409 HQM_IB_RAM_BASE_OFFSET
);
410 writel(pg_num
, ib
->bna
->regs
.page_addr
);
412 base_addr
= BNA_GET_MEM_BASE_ADDR(ib
->bna
->pcidev
.pci_bar_kva
,
413 HQM_IB_RAM_BASE_OFFSET
);
415 ib_mem
= (struct bna_ib_blk_mem
*)0;
416 off
= (unsigned long)&ib_mem
[ib
->ib_id
].host_addr_lo
;
417 writel(htonl(ib_cfg
.host_addr_lo
), base_addr
+ off
);
419 off
= (unsigned long)&ib_mem
[ib
->ib_id
].host_addr_hi
;
420 writel(htonl(ib_cfg
.host_addr_hi
), base_addr
+ off
);
422 off
= (unsigned long)&ib_mem
[ib
->ib_id
].clsc_n_ctrl_n_msix
;
423 writel(ib_cfg
.clsc_n_ctrl_n_msix
, base_addr
+ off
);
425 off
= (unsigned long)&ib_mem
[ib
->ib_id
].ipkt_n_ent_n_idxof
;
426 writel(ib_cfg
.ipkt_n_ent_n_idxof
, base_addr
+ off
);
428 off
= (unsigned long)&ib_mem
[ib
->ib_id
].ipkt_cnt_cfg_n_unacked
;
429 writel(ib_cfg
.ipkt_cnt_cfg_n_unacked
, base_addr
+ off
);
431 ib
->door_bell
.doorbell_ack
= BNA_DOORBELL_IB_INT_ACK(
432 (u32
)ib
->ib_config
.coalescing_timeo
, 0);
434 pg_num
= BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM
+ ib
->bna
->port_num
,
435 HQM_INDX_TBL_RAM_BASE_OFFSET
);
436 writel(pg_num
, ib
->bna
->regs
.page_addr
);
438 base_addr
= BNA_GET_MEM_BASE_ADDR(ib
->bna
->pcidev
.pci_bar_kva
,
439 HQM_INDX_TBL_RAM_BASE_OFFSET
);
440 for (i
= 0; i
< ib
->idx_seg
->ib_seg_size
; i
++) {
441 off
= (unsigned long)
442 ((ib
->idx_seg
->ib_idx_tbl_offset
+ i
) * BFI_IBIDX_SIZE
);
443 writel(0, base_addr
+ off
);
446 if (ib
->intr
->intr_type
== BNA_INTR_T_INTX
) {
447 bna_intx_disable(ib
->bna
, intx_mask
);
448 intx_mask
&= ~(ib
->intr
->vector
);
449 bna_intx_enable(ib
->bna
, intx_mask
);
454 bna_ib_stop(struct bna_ib
*ib
)
460 if (ib
->start_count
== 0) {
461 writel(BNA_DOORBELL_IB_INT_DISABLE
,
462 ib
->door_bell
.doorbell_addr
);
463 if (ib
->intr
->intr_type
== BNA_INTR_T_INTX
) {
464 bna_intx_disable(ib
->bna
, intx_mask
);
465 intx_mask
|= (ib
->intr
->vector
);
466 bna_intx_enable(ib
->bna
, intx_mask
);
472 bna_ib_fail(struct bna_ib
*ib
)
480 static void rxf_enable(struct bna_rxf
*rxf
);
481 static void rxf_disable(struct bna_rxf
*rxf
);
482 static void __rxf_config_set(struct bna_rxf
*rxf
);
483 static void __rxf_rit_set(struct bna_rxf
*rxf
);
484 static void __bna_rxf_stat_clr(struct bna_rxf
*rxf
);
485 static int rxf_process_packet_filter(struct bna_rxf
*rxf
);
486 static int rxf_clear_packet_filter(struct bna_rxf
*rxf
);
487 static void rxf_reset_packet_filter(struct bna_rxf
*rxf
);
488 static void rxf_cb_enabled(void *arg
, int status
);
489 static void rxf_cb_disabled(void *arg
, int status
);
490 static void bna_rxf_cb_stats_cleared(void *arg
, int status
);
491 static void __rxf_enable(struct bna_rxf
*rxf
);
492 static void __rxf_disable(struct bna_rxf
*rxf
);
494 bfa_fsm_state_decl(bna_rxf
, stopped
, struct bna_rxf
,
496 bfa_fsm_state_decl(bna_rxf
, start_wait
, struct bna_rxf
,
498 bfa_fsm_state_decl(bna_rxf
, cam_fltr_mod_wait
, struct bna_rxf
,
500 bfa_fsm_state_decl(bna_rxf
, started
, struct bna_rxf
,
502 bfa_fsm_state_decl(bna_rxf
, cam_fltr_clr_wait
, struct bna_rxf
,
504 bfa_fsm_state_decl(bna_rxf
, stop_wait
, struct bna_rxf
,
506 bfa_fsm_state_decl(bna_rxf
, pause_wait
, struct bna_rxf
,
508 bfa_fsm_state_decl(bna_rxf
, resume_wait
, struct bna_rxf
,
510 bfa_fsm_state_decl(bna_rxf
, stat_clr_wait
, struct bna_rxf
,
513 static struct bfa_sm_table rxf_sm_table
[] = {
514 {BFA_SM(bna_rxf_sm_stopped
), BNA_RXF_STOPPED
},
515 {BFA_SM(bna_rxf_sm_start_wait
), BNA_RXF_START_WAIT
},
516 {BFA_SM(bna_rxf_sm_cam_fltr_mod_wait
), BNA_RXF_CAM_FLTR_MOD_WAIT
},
517 {BFA_SM(bna_rxf_sm_started
), BNA_RXF_STARTED
},
518 {BFA_SM(bna_rxf_sm_cam_fltr_clr_wait
), BNA_RXF_CAM_FLTR_CLR_WAIT
},
519 {BFA_SM(bna_rxf_sm_stop_wait
), BNA_RXF_STOP_WAIT
},
520 {BFA_SM(bna_rxf_sm_pause_wait
), BNA_RXF_PAUSE_WAIT
},
521 {BFA_SM(bna_rxf_sm_resume_wait
), BNA_RXF_RESUME_WAIT
},
522 {BFA_SM(bna_rxf_sm_stat_clr_wait
), BNA_RXF_STAT_CLR_WAIT
}
526 bna_rxf_sm_stopped_entry(struct bna_rxf
*rxf
)
528 call_rxf_stop_cbfn(rxf
, BNA_CB_SUCCESS
);
532 bna_rxf_sm_stopped(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
536 bfa_fsm_set_state(rxf
, bna_rxf_sm_start_wait
);
540 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
547 case RXF_E_CAM_FLTR_MOD
:
548 call_rxf_cam_fltr_cbfn(rxf
, BNA_CB_SUCCESS
);
553 case RXF_E_CAM_FLTR_RESP
:
555 * These events are received due to flushing of mbox
562 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_PAUSED
;
563 call_rxf_pause_cbfn(rxf
, BNA_CB_SUCCESS
);
567 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_RUNNING
;
568 call_rxf_resume_cbfn(rxf
, BNA_CB_SUCCESS
);
572 bfa_sm_fault(rxf
->rx
->bna
, event
);
577 bna_rxf_sm_start_wait_entry(struct bna_rxf
*rxf
)
579 __rxf_config_set(rxf
);
585 bna_rxf_sm_start_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
590 * STOP is originated from bnad. When this happens,
591 * it can not be waiting for filter update
593 call_rxf_start_cbfn(rxf
, BNA_CB_INTERRUPT
);
594 bfa_fsm_set_state(rxf
, bna_rxf_sm_stop_wait
);
598 call_rxf_cam_fltr_cbfn(rxf
, BNA_CB_SUCCESS
);
599 call_rxf_start_cbfn(rxf
, BNA_CB_FAIL
);
600 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
603 case RXF_E_CAM_FLTR_MOD
:
609 * Force rxf_process_filter() to go through initial
612 if ((rxf
->ucast_active_mac
!= NULL
) &&
613 (rxf
->ucast_pending_set
== 0))
614 rxf
->ucast_pending_set
= 1;
616 if (rxf
->rss_status
== BNA_STATUS_T_ENABLED
)
617 rxf
->rxf_flags
|= BNA_RXF_FL_RSS_CONFIG_PENDING
;
619 rxf
->rxf_flags
|= BNA_RXF_FL_VLAN_CONFIG_PENDING
;
621 bfa_fsm_set_state(rxf
, bna_rxf_sm_cam_fltr_mod_wait
);
626 rxf
->rxf_flags
|= BNA_RXF_FL_OPERSTATE_CHANGED
;
630 bfa_sm_fault(rxf
->rx
->bna
, event
);
635 bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf
*rxf
)
637 if (!rxf_process_packet_filter(rxf
)) {
638 /* No more pending CAM entries to update */
639 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
644 bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
649 * STOP is originated from bnad. When this happens,
650 * it can not be waiting for filter update
652 call_rxf_start_cbfn(rxf
, BNA_CB_INTERRUPT
);
653 bfa_fsm_set_state(rxf
, bna_rxf_sm_cam_fltr_clr_wait
);
657 rxf_reset_packet_filter(rxf
);
658 call_rxf_cam_fltr_cbfn(rxf
, BNA_CB_SUCCESS
);
659 call_rxf_start_cbfn(rxf
, BNA_CB_FAIL
);
660 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
663 case RXF_E_CAM_FLTR_MOD
:
667 case RXF_E_CAM_FLTR_RESP
:
668 if (!rxf_process_packet_filter(rxf
)) {
669 /* No more pending CAM entries to update */
670 call_rxf_cam_fltr_cbfn(rxf
, BNA_CB_SUCCESS
);
671 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
677 rxf
->rxf_flags
|= BNA_RXF_FL_OPERSTATE_CHANGED
;
681 bfa_sm_fault(rxf
->rx
->bna
, event
);
686 bna_rxf_sm_started_entry(struct bna_rxf
*rxf
)
688 call_rxf_start_cbfn(rxf
, BNA_CB_SUCCESS
);
690 if (rxf
->rxf_flags
& BNA_RXF_FL_OPERSTATE_CHANGED
) {
691 if (rxf
->rxf_oper_state
== BNA_RXF_OPER_STATE_PAUSED
)
692 bfa_fsm_send_event(rxf
, RXF_E_PAUSE
);
694 bfa_fsm_send_event(rxf
, RXF_E_RESUME
);
700 bna_rxf_sm_started(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
704 bfa_fsm_set_state(rxf
, bna_rxf_sm_cam_fltr_clr_wait
);
705 /* Hack to get FSM start clearing CAM entries */
706 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_RESP
);
710 rxf_reset_packet_filter(rxf
);
711 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
714 case RXF_E_CAM_FLTR_MOD
:
715 bfa_fsm_set_state(rxf
, bna_rxf_sm_cam_fltr_mod_wait
);
719 bfa_fsm_set_state(rxf
, bna_rxf_sm_pause_wait
);
723 bfa_fsm_set_state(rxf
, bna_rxf_sm_resume_wait
);
727 bfa_sm_fault(rxf
->rx
->bna
, event
);
732 bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf
*rxf
)
735 * Note: Do not add rxf_clear_packet_filter here.
736 * It will overstep mbox when this transition happens:
737 * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
742 bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
747 * FSM was in the process of stopping, initiated by
748 * bnad. When this happens, no one can be waiting for
749 * start or filter update
751 rxf_reset_packet_filter(rxf
);
752 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
755 case RXF_E_CAM_FLTR_RESP
:
756 if (!rxf_clear_packet_filter(rxf
)) {
757 /* No more pending CAM entries to clear */
758 bfa_fsm_set_state(rxf
, bna_rxf_sm_stop_wait
);
764 bfa_sm_fault(rxf
->rx
->bna
, event
);
769 bna_rxf_sm_stop_wait_entry(struct bna_rxf
*rxf
)
772 * NOTE: Do not add rxf_disable here.
773 * It will overstep mbox when this transition happens:
774 * start_wait -> stop_wait on RXF_E_STOP event
779 bna_rxf_sm_stop_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
784 * FSM was in the process of stopping, initiated by
785 * bnad. When this happens, no one can be waiting for
786 * start or filter update
788 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
793 * This event is received due to abrupt transition from
794 * bna_rxf_sm_start_wait state on receiving
802 * FSM was in the process of stopping, initiated by
803 * bnad. When this happens, no one can be waiting for
804 * start or filter update
806 bfa_fsm_set_state(rxf
, bna_rxf_sm_stat_clr_wait
);
810 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_PAUSED
;
814 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_RUNNING
;
818 bfa_sm_fault(rxf
->rx
->bna
, event
);
823 bna_rxf_sm_pause_wait_entry(struct bna_rxf
*rxf
)
826 ~(BNA_RXF_FL_OPERSTATE_CHANGED
| BNA_RXF_FL_RXF_ENABLED
);
831 bna_rxf_sm_pause_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
836 * FSM was in the process of disabling rxf, initiated by
839 call_rxf_pause_cbfn(rxf
, BNA_CB_FAIL
);
840 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
844 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_PAUSED
;
845 call_rxf_pause_cbfn(rxf
, BNA_CB_SUCCESS
);
846 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
850 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
851 * any other event during these states
854 bfa_sm_fault(rxf
->rx
->bna
, event
);
859 bna_rxf_sm_resume_wait_entry(struct bna_rxf
*rxf
)
861 rxf
->rxf_flags
&= ~(BNA_RXF_FL_OPERSTATE_CHANGED
);
862 rxf
->rxf_flags
|= BNA_RXF_FL_RXF_ENABLED
;
867 bna_rxf_sm_resume_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
872 * FSM was in the process of disabling rxf, initiated by
875 call_rxf_resume_cbfn(rxf
, BNA_CB_FAIL
);
876 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
880 rxf
->rxf_oper_state
= BNA_RXF_OPER_STATE_RUNNING
;
881 call_rxf_resume_cbfn(rxf
, BNA_CB_SUCCESS
);
882 bfa_fsm_set_state(rxf
, bna_rxf_sm_started
);
886 * Since PAUSE/RESUME can only be sent by bnad, we don't expect
887 * any other event during these states
890 bfa_sm_fault(rxf
->rx
->bna
, event
);
895 bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf
*rxf
)
897 __bna_rxf_stat_clr(rxf
);
901 bna_rxf_sm_stat_clr_wait(struct bna_rxf
*rxf
, enum bna_rxf_event event
)
905 case RXF_E_STAT_CLEARED
:
906 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
910 bfa_sm_fault(rxf
->rx
->bna
, event
);
915 __rxf_enable(struct bna_rxf
*rxf
)
917 struct bfi_ll_rxf_multi_req ll_req
;
920 if (rxf
->rxf_id
< 32)
921 bm
[0] = 1 << rxf
->rxf_id
;
923 bm
[1] = 1 << (rxf
->rxf_id
- 32);
925 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_RX_REQ
, 0);
926 ll_req
.rxf_id_mask
[0] = htonl(bm
[0]);
927 ll_req
.rxf_id_mask
[1] = htonl(bm
[1]);
930 bna_mbox_qe_fill(&rxf
->mbox_qe
, &ll_req
, sizeof(ll_req
),
931 rxf_cb_enabled
, rxf
);
933 bna_mbox_send(rxf
->rx
->bna
, &rxf
->mbox_qe
);
937 __rxf_disable(struct bna_rxf
*rxf
)
939 struct bfi_ll_rxf_multi_req ll_req
;
942 if (rxf
->rxf_id
< 32)
943 bm
[0] = 1 << rxf
->rxf_id
;
945 bm
[1] = 1 << (rxf
->rxf_id
- 32);
947 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_RX_REQ
, 0);
948 ll_req
.rxf_id_mask
[0] = htonl(bm
[0]);
949 ll_req
.rxf_id_mask
[1] = htonl(bm
[1]);
952 bna_mbox_qe_fill(&rxf
->mbox_qe
, &ll_req
, sizeof(ll_req
),
953 rxf_cb_disabled
, rxf
);
955 bna_mbox_send(rxf
->rx
->bna
, &rxf
->mbox_qe
);
959 __rxf_config_set(struct bna_rxf
*rxf
)
962 struct bna_rss_mem
*rss_mem
;
963 struct bna_rx_fndb_ram
*rx_fndb_ram
;
964 struct bna
*bna
= rxf
->rx
->bna
;
965 void __iomem
*base_addr
;
968 base_addr
= BNA_GET_MEM_BASE_ADDR(bna
->pcidev
.pci_bar_kva
,
969 RSS_TABLE_BASE_OFFSET
);
971 rss_mem
= (struct bna_rss_mem
*)0;
973 /* Configure RSS if required */
974 if (rxf
->ctrl_flags
& BNA_RXF_CF_RSS_ENABLE
) {
975 /* configure RSS Table */
976 writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM
+
977 bna
->port_num
, RSS_TABLE_BASE_OFFSET
),
978 bna
->regs
.page_addr
);
980 /* temporarily disable RSS, while hash value is written */
981 off
= (unsigned long)&rss_mem
[0].type_n_hash
;
982 writel(0, base_addr
+ off
);
984 for (i
= 0; i
< BFI_RSS_HASH_KEY_LEN
; i
++) {
985 off
= (unsigned long)
986 &rss_mem
[0].hash_key
[(BFI_RSS_HASH_KEY_LEN
- 1) - i
];
987 writel(htonl(rxf
->rss_cfg
.toeplitz_hash_key
[i
]),
991 off
= (unsigned long)&rss_mem
[0].type_n_hash
;
992 writel(rxf
->rss_cfg
.hash_type
| rxf
->rss_cfg
.hash_mask
,
997 writel(BNA_GET_PAGE_NUM(
998 LUT0_MEM_BLK_BASE_PG_NUM
+ (bna
->port_num
* 2),
999 RX_FNDB_RAM_BASE_OFFSET
),
1000 bna
->regs
.page_addr
);
1002 base_addr
= BNA_GET_MEM_BASE_ADDR(bna
->pcidev
.pci_bar_kva
,
1003 RX_FNDB_RAM_BASE_OFFSET
);
1005 rx_fndb_ram
= (struct bna_rx_fndb_ram
*)0;
1007 /* We always use RSS table 0 */
1008 off
= (unsigned long)&rx_fndb_ram
[rxf
->rxf_id
].rss_prop
;
1009 writel(rxf
->ctrl_flags
& BNA_RXF_CF_RSS_ENABLE
,
1012 /* small large buffer enable/disable */
1013 off
= (unsigned long)&rx_fndb_ram
[rxf
->rxf_id
].size_routing_props
;
1014 writel((rxf
->ctrl_flags
& BNA_RXF_CF_SM_LG_RXQ
) | 0x80,
1017 /* RIT offset, HDS forced offset, multicast RxQ Id */
1018 off
= (unsigned long)&rx_fndb_ram
[rxf
->rxf_id
].rit_hds_mcastq
;
1019 writel((rxf
->rit_segment
->rit_offset
<< 16) |
1020 (rxf
->forced_offset
<< 8) |
1021 (rxf
->hds_cfg
.hdr_type
& BNA_HDS_FORCED
) | rxf
->mcast_rxq_id
,
1025 * default vlan tag, default function enable, strip vlan bytes,
1026 * HDS type, header size
1029 off
= (unsigned long)&rx_fndb_ram
[rxf
->rxf_id
].control_flags
;
1030 writel(((u32
)rxf
->default_vlan_tag
<< 16) |
1032 (BNA_RXF_CF_DEFAULT_VLAN
|
1033 BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE
|
1034 BNA_RXF_CF_VLAN_STRIP
)) |
1035 (rxf
->hds_cfg
.hdr_type
& ~BNA_HDS_FORCED
) |
1036 rxf
->hds_cfg
.header_size
,
1041 __rxf_vlan_filter_set(struct bna_rxf
*rxf
, enum bna_status status
)
1043 struct bna
*bna
= rxf
->rx
->bna
;
1046 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM
+
1047 (bna
->port_num
* 2), VLAN_RAM_BASE_OFFSET
),
1048 bna
->regs
.page_addr
);
1050 if (status
== BNA_STATUS_T_ENABLED
) {
1051 /* enable VLAN filtering on this function */
1052 for (i
= 0; i
<= BFI_MAX_VLAN
/ 32; i
++) {
1053 writel(rxf
->vlan_filter_table
[i
],
1054 BNA_GET_VLAN_MEM_ENTRY_ADDR
1055 (bna
->pcidev
.pci_bar_kva
, rxf
->rxf_id
,
1059 /* disable VLAN filtering on this function */
1060 for (i
= 0; i
<= BFI_MAX_VLAN
/ 32; i
++) {
1062 BNA_GET_VLAN_MEM_ENTRY_ADDR
1063 (bna
->pcidev
.pci_bar_kva
, rxf
->rxf_id
,
1070 __rxf_rit_set(struct bna_rxf
*rxf
)
1072 struct bna
*bna
= rxf
->rx
->bna
;
1073 struct bna_rit_mem
*rit_mem
;
1075 void __iomem
*base_addr
;
1078 base_addr
= BNA_GET_MEM_BASE_ADDR(bna
->pcidev
.pci_bar_kva
,
1079 FUNCTION_TO_RXQ_TRANSLATE
);
1081 rit_mem
= (struct bna_rit_mem
*)0;
1083 writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM
+ bna
->port_num
,
1084 FUNCTION_TO_RXQ_TRANSLATE
),
1085 bna
->regs
.page_addr
);
1087 for (i
= 0; i
< rxf
->rit_segment
->rit_size
; i
++) {
1088 off
= (unsigned long)&rit_mem
[i
+ rxf
->rit_segment
->rit_offset
];
1089 writel(rxf
->rit_segment
->rit
[i
].large_rxq_id
<< 6 |
1090 rxf
->rit_segment
->rit
[i
].small_rxq_id
,
1096 __bna_rxf_stat_clr(struct bna_rxf
*rxf
)
1098 struct bfi_ll_stats_req ll_req
;
1101 if (rxf
->rxf_id
< 32)
1102 bm
[0] = 1 << rxf
->rxf_id
;
1104 bm
[1] = 1 << (rxf
->rxf_id
- 32);
1106 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_STATS_CLEAR_REQ
, 0);
1107 ll_req
.stats_mask
= 0;
1108 ll_req
.txf_id_mask
[0] = 0;
1109 ll_req
.txf_id_mask
[1] = 0;
1111 ll_req
.rxf_id_mask
[0] = htonl(bm
[0]);
1112 ll_req
.rxf_id_mask
[1] = htonl(bm
[1]);
1114 bna_mbox_qe_fill(&rxf
->mbox_qe
, &ll_req
, sizeof(ll_req
),
1115 bna_rxf_cb_stats_cleared
, rxf
);
1116 bna_mbox_send(rxf
->rx
->bna
, &rxf
->mbox_qe
);
1120 rxf_enable(struct bna_rxf
*rxf
)
1122 if (rxf
->rxf_oper_state
== BNA_RXF_OPER_STATE_PAUSED
)
1123 bfa_fsm_send_event(rxf
, RXF_E_STARTED
);
1125 rxf
->rxf_flags
|= BNA_RXF_FL_RXF_ENABLED
;
1131 rxf_cb_enabled(void *arg
, int status
)
1133 struct bna_rxf
*rxf
= (struct bna_rxf
*)arg
;
1135 bfa_q_qe_init(&rxf
->mbox_qe
.qe
);
1136 bfa_fsm_send_event(rxf
, RXF_E_STARTED
);
1140 rxf_disable(struct bna_rxf
*rxf
)
1142 if (rxf
->rxf_oper_state
== BNA_RXF_OPER_STATE_PAUSED
)
1143 bfa_fsm_send_event(rxf
, RXF_E_STOPPED
);
1145 rxf
->rxf_flags
&= ~BNA_RXF_FL_RXF_ENABLED
;
1150 rxf_cb_disabled(void *arg
, int status
)
1152 struct bna_rxf
*rxf
= (struct bna_rxf
*)arg
;
1154 bfa_q_qe_init(&rxf
->mbox_qe
.qe
);
1155 bfa_fsm_send_event(rxf
, RXF_E_STOPPED
);
1159 rxf_cb_cam_fltr_mbox_cmd(void *arg
, int status
)
1161 struct bna_rxf
*rxf
= (struct bna_rxf
*)arg
;
1163 bfa_q_qe_init(&rxf
->mbox_qe
.qe
);
1165 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_RESP
);
1169 bna_rxf_cb_stats_cleared(void *arg
, int status
)
1171 struct bna_rxf
*rxf
= (struct bna_rxf
*)arg
;
1173 bfa_q_qe_init(&rxf
->mbox_qe
.qe
);
1174 bfa_fsm_send_event(rxf
, RXF_E_STAT_CLEARED
);
1178 rxf_cam_mbox_cmd(struct bna_rxf
*rxf
, u8 cmd
,
1179 const struct bna_mac
*mac_addr
)
1181 struct bfi_ll_mac_addr_req req
;
1183 bfi_h2i_set(req
.mh
, BFI_MC_LL
, cmd
, 0);
1185 req
.rxf_id
= rxf
->rxf_id
;
1186 memcpy(&req
.mac_addr
, (void *)&mac_addr
->addr
, ETH_ALEN
);
1188 bna_mbox_qe_fill(&rxf
->mbox_qe
, &req
, sizeof(req
),
1189 rxf_cb_cam_fltr_mbox_cmd
, rxf
);
1191 bna_mbox_send(rxf
->rx
->bna
, &rxf
->mbox_qe
);
1195 rxf_process_packet_filter_mcast(struct bna_rxf
*rxf
)
1197 struct bna_mac
*mac
= NULL
;
1198 struct list_head
*qe
;
1200 /* Add multicast entries */
1201 if (!list_empty(&rxf
->mcast_pending_add_q
)) {
1202 bfa_q_deq(&rxf
->mcast_pending_add_q
, &qe
);
1204 mac
= (struct bna_mac
*)qe
;
1205 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_ADD_REQ
, mac
);
1206 list_add_tail(&mac
->qe
, &rxf
->mcast_active_q
);
1210 /* Delete multicast entries previousely added */
1211 if (!list_empty(&rxf
->mcast_pending_del_q
)) {
1212 bfa_q_deq(&rxf
->mcast_pending_del_q
, &qe
);
1214 mac
= (struct bna_mac
*)qe
;
1215 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_DEL_REQ
, mac
);
1216 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1224 rxf_process_packet_filter_vlan(struct bna_rxf
*rxf
)
1226 /* Apply the VLAN filter */
1227 if (rxf
->rxf_flags
& BNA_RXF_FL_VLAN_CONFIG_PENDING
) {
1228 rxf
->rxf_flags
&= ~BNA_RXF_FL_VLAN_CONFIG_PENDING
;
1229 if (!(rxf
->rxmode_active
& BNA_RXMODE_PROMISC
))
1230 __rxf_vlan_filter_set(rxf
, rxf
->vlan_filter_status
);
1233 /* Apply RSS configuration */
1234 if (rxf
->rxf_flags
& BNA_RXF_FL_RSS_CONFIG_PENDING
) {
1235 rxf
->rxf_flags
&= ~BNA_RXF_FL_RSS_CONFIG_PENDING
;
1236 if (rxf
->rss_status
== BNA_STATUS_T_DISABLED
) {
1237 /* RSS is being disabled */
1238 rxf
->ctrl_flags
&= ~BNA_RXF_CF_RSS_ENABLE
;
1240 __rxf_config_set(rxf
);
1242 /* RSS is being enabled or reconfigured */
1243 rxf
->ctrl_flags
|= BNA_RXF_CF_RSS_ENABLE
;
1245 __rxf_config_set(rxf
);
1253 * Processes pending ucast, mcast entry addition/deletion and issues mailbox
1254 * command. Also processes pending filter configuration - promiscuous mode,
1255 * default mode, allmutli mode and issues mailbox command or directly applies
1259 rxf_process_packet_filter(struct bna_rxf
*rxf
)
1261 /* Set the default MAC first */
1262 if (rxf
->ucast_pending_set
> 0) {
1263 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_UCAST_SET_REQ
,
1264 rxf
->ucast_active_mac
);
1265 rxf
->ucast_pending_set
--;
1269 if (rxf_process_packet_filter_ucast(rxf
))
1272 if (rxf_process_packet_filter_mcast(rxf
))
1275 if (rxf_process_packet_filter_promisc(rxf
))
1278 if (rxf_process_packet_filter_allmulti(rxf
))
1281 if (rxf_process_packet_filter_vlan(rxf
))
1288 rxf_clear_packet_filter_mcast(struct bna_rxf
*rxf
)
1290 struct bna_mac
*mac
= NULL
;
1291 struct list_head
*qe
;
1293 /* 3. delete pending mcast entries */
1294 if (!list_empty(&rxf
->mcast_pending_del_q
)) {
1295 bfa_q_deq(&rxf
->mcast_pending_del_q
, &qe
);
1297 mac
= (struct bna_mac
*)qe
;
1298 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_DEL_REQ
, mac
);
1299 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1303 /* 4. clear active mcast entries; move them to pending_add_q */
1304 if (!list_empty(&rxf
->mcast_active_q
)) {
1305 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
1307 mac
= (struct bna_mac
*)qe
;
1308 rxf_cam_mbox_cmd(rxf
, BFI_LL_H2I_MAC_MCAST_DEL_REQ
, mac
);
1309 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
1317 * In the rxf stop path, processes pending ucast/mcast delete queue and issues
1318 * the mailbox command. Moves the active ucast/mcast entries to pending add q,
1319 * so that they are added to CAM again in the rxf start path. Moves the current
1320 * filter settings - promiscuous, default, allmutli - to pending filter
1324 rxf_clear_packet_filter(struct bna_rxf
*rxf
)
1326 if (rxf_clear_packet_filter_ucast(rxf
))
1329 if (rxf_clear_packet_filter_mcast(rxf
))
1332 /* 5. clear active default MAC in the CAM */
1333 if (rxf
->ucast_pending_set
> 0)
1334 rxf
->ucast_pending_set
= 0;
1336 if (rxf_clear_packet_filter_promisc(rxf
))
1339 if (rxf_clear_packet_filter_allmulti(rxf
))
1346 rxf_reset_packet_filter_mcast(struct bna_rxf
*rxf
)
1348 struct list_head
*qe
;
1349 struct bna_mac
*mac
;
1351 /* 3. Move active mcast entries to pending_add_q */
1352 while (!list_empty(&rxf
->mcast_active_q
)) {
1353 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
1355 list_add_tail(qe
, &rxf
->mcast_pending_add_q
);
1358 /* 4. Throw away delete pending mcast entries */
1359 while (!list_empty(&rxf
->mcast_pending_del_q
)) {
1360 bfa_q_deq(&rxf
->mcast_pending_del_q
, &qe
);
1362 mac
= (struct bna_mac
*)qe
;
1363 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1368 * In the rxf fail path, throws away the ucast/mcast entries pending for
1369 * deletion, moves all active ucast/mcast entries to pending queue so that
1370 * they are added back to CAM in the rxf start path. Also moves the current
1371 * filter configuration to pending filter configuration.
1374 rxf_reset_packet_filter(struct bna_rxf
*rxf
)
1376 rxf_reset_packet_filter_ucast(rxf
);
1378 rxf_reset_packet_filter_mcast(rxf
);
1380 /* 5. Turn off ucast set flag */
1381 rxf
->ucast_pending_set
= 0;
1383 rxf_reset_packet_filter_promisc(rxf
);
1385 rxf_reset_packet_filter_allmulti(rxf
);
1389 bna_rxf_init(struct bna_rxf
*rxf
,
1391 struct bna_rx_config
*q_config
)
1393 struct list_head
*qe
;
1394 struct bna_rxp
*rxp
;
1396 /* rxf_id is initialized during rx_mod init */
1399 INIT_LIST_HEAD(&rxf
->ucast_pending_add_q
);
1400 INIT_LIST_HEAD(&rxf
->ucast_pending_del_q
);
1401 rxf
->ucast_pending_set
= 0;
1402 INIT_LIST_HEAD(&rxf
->ucast_active_q
);
1403 rxf
->ucast_active_mac
= NULL
;
1405 INIT_LIST_HEAD(&rxf
->mcast_pending_add_q
);
1406 INIT_LIST_HEAD(&rxf
->mcast_pending_del_q
);
1407 INIT_LIST_HEAD(&rxf
->mcast_active_q
);
1409 bfa_q_qe_init(&rxf
->mbox_qe
.qe
);
1411 if (q_config
->vlan_strip_status
== BNA_STATUS_T_ENABLED
)
1412 rxf
->ctrl_flags
|= BNA_RXF_CF_VLAN_STRIP
;
1414 rxf
->rxf_oper_state
= (q_config
->paused
) ?
1415 BNA_RXF_OPER_STATE_PAUSED
: BNA_RXF_OPER_STATE_RUNNING
;
1417 bna_rxf_adv_init(rxf
, rx
, q_config
);
1419 rxf
->rit_segment
= bna_rit_mod_seg_get(&rxf
->rx
->bna
->rit_mod
,
1420 q_config
->num_paths
);
1422 list_for_each(qe
, &rx
->rxp_q
) {
1423 rxp
= (struct bna_rxp
*)qe
;
1424 if (q_config
->rxp_type
== BNA_RXP_SINGLE
)
1425 rxf
->mcast_rxq_id
= rxp
->rxq
.single
.only
->rxq_id
;
1427 rxf
->mcast_rxq_id
= rxp
->rxq
.slr
.large
->rxq_id
;
1431 rxf
->vlan_filter_status
= BNA_STATUS_T_DISABLED
;
1432 memset(rxf
->vlan_filter_table
, 0,
1433 (sizeof(u32
) * ((BFI_MAX_VLAN
+ 1) / 32)));
1435 /* Set up VLAN 0 for pure priority tagged packets */
1436 rxf
->vlan_filter_table
[0] |= 1;
1438 bfa_fsm_set_state(rxf
, bna_rxf_sm_stopped
);
1442 bna_rxf_uninit(struct bna_rxf
*rxf
)
1444 struct bna
*bna
= rxf
->rx
->bna
;
1445 struct bna_mac
*mac
;
1447 bna_rit_mod_seg_put(&rxf
->rx
->bna
->rit_mod
, rxf
->rit_segment
);
1448 rxf
->rit_segment
= NULL
;
1450 rxf
->ucast_pending_set
= 0;
1452 while (!list_empty(&rxf
->ucast_pending_add_q
)) {
1453 bfa_q_deq(&rxf
->ucast_pending_add_q
, &mac
);
1454 bfa_q_qe_init(&mac
->qe
);
1455 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
, mac
);
1458 if (rxf
->ucast_active_mac
) {
1459 bfa_q_qe_init(&rxf
->ucast_active_mac
->qe
);
1460 bna_ucam_mod_mac_put(&rxf
->rx
->bna
->ucam_mod
,
1461 rxf
->ucast_active_mac
);
1462 rxf
->ucast_active_mac
= NULL
;
1465 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
1466 bfa_q_deq(&rxf
->mcast_pending_add_q
, &mac
);
1467 bfa_q_qe_init(&mac
->qe
);
1468 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1471 /* Turn off pending promisc mode */
1472 if (is_promisc_enable(rxf
->rxmode_pending
,
1473 rxf
->rxmode_pending_bitmask
)) {
1474 /* system promisc state should be pending */
1475 BUG_ON(!(bna
->rxf_promisc_id
== rxf
->rxf_id
));
1476 promisc_inactive(rxf
->rxmode_pending
,
1477 rxf
->rxmode_pending_bitmask
);
1478 bna
->rxf_promisc_id
= BFI_MAX_RXF
;
1480 /* Promisc mode should not be active */
1481 BUG_ON(rxf
->rxmode_active
& BNA_RXMODE_PROMISC
);
1483 /* Turn off pending all-multi mode */
1484 if (is_allmulti_enable(rxf
->rxmode_pending
,
1485 rxf
->rxmode_pending_bitmask
)) {
1486 allmulti_inactive(rxf
->rxmode_pending
,
1487 rxf
->rxmode_pending_bitmask
);
1489 /* Allmulti mode should not be active */
1490 BUG_ON(rxf
->rxmode_active
& BNA_RXMODE_ALLMULTI
);
1496 bna_rx_cb_rxf_started(struct bna_rx
*rx
, enum bna_cb_status status
)
1498 bfa_fsm_send_event(rx
, RX_E_RXF_STARTED
);
1499 if (rx
->rxf
.rxf_id
< 32)
1500 rx
->bna
->rx_mod
.rxf_bmap
[0] |= ((u32
)1 << rx
->rxf
.rxf_id
);
1502 rx
->bna
->rx_mod
.rxf_bmap
[1] |= ((u32
)
1503 1 << (rx
->rxf
.rxf_id
- 32));
1507 bna_rxf_start(struct bna_rxf
*rxf
)
1509 rxf
->start_cbfn
= bna_rx_cb_rxf_started
;
1510 rxf
->start_cbarg
= rxf
->rx
;
1511 rxf
->rxf_flags
&= ~BNA_RXF_FL_FAILED
;
1512 bfa_fsm_send_event(rxf
, RXF_E_START
);
1516 bna_rx_cb_rxf_stopped(struct bna_rx
*rx
, enum bna_cb_status status
)
1518 bfa_fsm_send_event(rx
, RX_E_RXF_STOPPED
);
1519 if (rx
->rxf
.rxf_id
< 32)
1520 rx
->bna
->rx_mod
.rxf_bmap
[0] &= ~(u32
)1 << rx
->rxf
.rxf_id
;
1522 rx
->bna
->rx_mod
.rxf_bmap
[1] &= ~(u32
)
1523 1 << (rx
->rxf
.rxf_id
- 32);
1527 bna_rxf_stop(struct bna_rxf
*rxf
)
1529 rxf
->stop_cbfn
= bna_rx_cb_rxf_stopped
;
1530 rxf
->stop_cbarg
= rxf
->rx
;
1531 bfa_fsm_send_event(rxf
, RXF_E_STOP
);
1535 bna_rxf_fail(struct bna_rxf
*rxf
)
1537 rxf
->rxf_flags
|= BNA_RXF_FL_FAILED
;
1538 bfa_fsm_send_event(rxf
, RXF_E_FAIL
);
1542 bna_rxf_state_get(struct bna_rxf
*rxf
)
1544 return bfa_sm_to_state(rxf_sm_table
, rxf
->fsm
);
1548 bna_rx_ucast_set(struct bna_rx
*rx
, u8
*ucmac
,
1549 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
1550 enum bna_cb_status
))
1552 struct bna_rxf
*rxf
= &rx
->rxf
;
1554 if (rxf
->ucast_active_mac
== NULL
) {
1555 rxf
->ucast_active_mac
=
1556 bna_ucam_mod_mac_get(&rxf
->rx
->bna
->ucam_mod
);
1557 if (rxf
->ucast_active_mac
== NULL
)
1558 return BNA_CB_UCAST_CAM_FULL
;
1559 bfa_q_qe_init(&rxf
->ucast_active_mac
->qe
);
1562 memcpy(rxf
->ucast_active_mac
->addr
, ucmac
, ETH_ALEN
);
1563 rxf
->ucast_pending_set
++;
1564 rxf
->cam_fltr_cbfn
= cbfn
;
1565 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
1567 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
1569 return BNA_CB_SUCCESS
;
1573 bna_rx_mcast_add(struct bna_rx
*rx
, u8
*addr
,
1574 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
1575 enum bna_cb_status
))
1577 struct bna_rxf
*rxf
= &rx
->rxf
;
1578 struct list_head
*qe
;
1579 struct bna_mac
*mac
;
1581 /* Check if already added */
1582 list_for_each(qe
, &rxf
->mcast_active_q
) {
1583 mac
= (struct bna_mac
*)qe
;
1584 if (BNA_MAC_IS_EQUAL(mac
->addr
, addr
)) {
1586 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
1587 return BNA_CB_SUCCESS
;
1591 /* Check if pending addition */
1592 list_for_each(qe
, &rxf
->mcast_pending_add_q
) {
1593 mac
= (struct bna_mac
*)qe
;
1594 if (BNA_MAC_IS_EQUAL(mac
->addr
, addr
)) {
1596 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
1597 return BNA_CB_SUCCESS
;
1601 mac
= bna_mcam_mod_mac_get(&rxf
->rx
->bna
->mcam_mod
);
1603 return BNA_CB_MCAST_LIST_FULL
;
1604 bfa_q_qe_init(&mac
->qe
);
1605 memcpy(mac
->addr
, addr
, ETH_ALEN
);
1606 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
1608 rxf
->cam_fltr_cbfn
= cbfn
;
1609 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
1611 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
1613 return BNA_CB_SUCCESS
;
1617 bna_rx_mcast_listset(struct bna_rx
*rx
, int count
, u8
*mclist
,
1618 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
1619 enum bna_cb_status
))
1621 struct bna_rxf
*rxf
= &rx
->rxf
;
1622 struct list_head list_head
;
1623 struct list_head
*qe
;
1625 struct bna_mac
*mac
;
1626 struct bna_mac
*mac1
;
1629 int need_hw_config
= 0;
1632 /* Allocate nodes */
1633 INIT_LIST_HEAD(&list_head
);
1634 for (i
= 0, mcaddr
= mclist
; i
< count
; i
++) {
1635 mac
= bna_mcam_mod_mac_get(&rxf
->rx
->bna
->mcam_mod
);
1638 bfa_q_qe_init(&mac
->qe
);
1639 memcpy(mac
->addr
, mcaddr
, ETH_ALEN
);
1640 list_add_tail(&mac
->qe
, &list_head
);
1645 /* Schedule for addition */
1646 while (!list_empty(&list_head
)) {
1647 bfa_q_deq(&list_head
, &qe
);
1648 mac
= (struct bna_mac
*)qe
;
1649 bfa_q_qe_init(&mac
->qe
);
1653 /* Skip if already added */
1654 list_for_each(qe
, &rxf
->mcast_active_q
) {
1655 mac1
= (struct bna_mac
*)qe
;
1656 if (BNA_MAC_IS_EQUAL(mac1
->addr
, mac
->addr
)) {
1657 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
,
1667 /* Skip if pending addition */
1668 list_for_each(qe
, &rxf
->mcast_pending_add_q
) {
1669 mac1
= (struct bna_mac
*)qe
;
1670 if (BNA_MAC_IS_EQUAL(mac1
->addr
, mac
->addr
)) {
1671 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
,
1682 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
1686 * Delete the entries that are in the pending_add_q but not
1689 while (!list_empty(&rxf
->mcast_pending_add_q
)) {
1690 bfa_q_deq(&rxf
->mcast_pending_add_q
, &qe
);
1691 mac
= (struct bna_mac
*)qe
;
1692 bfa_q_qe_init(&mac
->qe
);
1693 for (i
= 0, mcaddr
= mclist
, delete = 1; i
< count
; i
++) {
1694 if (BNA_MAC_IS_EQUAL(mcaddr
, mac
->addr
)) {
1701 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1703 list_add_tail(&mac
->qe
, &list_head
);
1705 while (!list_empty(&list_head
)) {
1706 bfa_q_deq(&list_head
, &qe
);
1707 mac
= (struct bna_mac
*)qe
;
1708 bfa_q_qe_init(&mac
->qe
);
1709 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_add_q
);
1713 * Schedule entries for deletion that are in the active_q but not
1716 while (!list_empty(&rxf
->mcast_active_q
)) {
1717 bfa_q_deq(&rxf
->mcast_active_q
, &qe
);
1718 mac
= (struct bna_mac
*)qe
;
1719 bfa_q_qe_init(&mac
->qe
);
1720 for (i
= 0, mcaddr
= mclist
, delete = 1; i
< count
; i
++) {
1721 if (BNA_MAC_IS_EQUAL(mcaddr
, mac
->addr
)) {
1728 list_add_tail(&mac
->qe
, &rxf
->mcast_pending_del_q
);
1731 list_add_tail(&mac
->qe
, &list_head
);
1734 while (!list_empty(&list_head
)) {
1735 bfa_q_deq(&list_head
, &qe
);
1736 mac
= (struct bna_mac
*)qe
;
1737 bfa_q_qe_init(&mac
->qe
);
1738 list_add_tail(&mac
->qe
, &rxf
->mcast_active_q
);
1741 if (need_hw_config
) {
1742 rxf
->cam_fltr_cbfn
= cbfn
;
1743 rxf
->cam_fltr_cbarg
= rx
->bna
->bnad
;
1744 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
1746 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
1748 return BNA_CB_SUCCESS
;
1751 while (!list_empty(&list_head
)) {
1752 bfa_q_deq(&list_head
, &qe
);
1753 mac
= (struct bna_mac
*)qe
;
1754 bfa_q_qe_init(&mac
->qe
);
1755 bna_mcam_mod_mac_put(&rxf
->rx
->bna
->mcam_mod
, mac
);
1758 return BNA_CB_MCAST_LIST_FULL
;
1762 bna_rx_vlan_add(struct bna_rx
*rx
, int vlan_id
)
1764 struct bna_rxf
*rxf
= &rx
->rxf
;
1765 int index
= (vlan_id
>> 5);
1766 int bit
= (1 << (vlan_id
& 0x1F));
1768 rxf
->vlan_filter_table
[index
] |= bit
;
1769 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
) {
1770 rxf
->rxf_flags
|= BNA_RXF_FL_VLAN_CONFIG_PENDING
;
1771 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
1776 bna_rx_vlan_del(struct bna_rx
*rx
, int vlan_id
)
1778 struct bna_rxf
*rxf
= &rx
->rxf
;
1779 int index
= (vlan_id
>> 5);
1780 int bit
= (1 << (vlan_id
& 0x1F));
1782 rxf
->vlan_filter_table
[index
] &= ~bit
;
1783 if (rxf
->vlan_filter_status
== BNA_STATUS_T_ENABLED
) {
1784 rxf
->rxf_flags
|= BNA_RXF_FL_VLAN_CONFIG_PENDING
;
1785 bfa_fsm_send_event(rxf
, RXF_E_CAM_FLTR_MOD
);
1792 #define RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem) do { \
1793 struct bna_doorbell_qset *_qset; \
1794 unsigned long off; \
1795 (q)->rcb->producer_index = (q)->rcb->consumer_index = 0; \
1796 (q)->rcb->q_depth = (qdepth); \
1797 (q)->rcb->unmap_q = unmapq_mem; \
1798 (q)->rcb->rxq = (q); \
1799 (q)->rcb->cq = &(rxp)->cq; \
1800 (q)->rcb->bnad = (bna)->bnad; \
1801 _qset = (struct bna_doorbell_qset *)0; \
1802 off = (unsigned long)&_qset[(q)->rxq_id].rxq[0]; \
1803 (q)->rcb->q_dbell = off + \
1804 BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva); \
1805 (q)->rcb->id = _id; \
1808 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1809 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1811 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1812 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1814 #define call_rx_stop_callback(rx, status) \
1815 if ((rx)->stop_cbfn) { \
1816 (*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status)); \
1817 (rx)->stop_cbfn = NULL; \
1818 (rx)->stop_cbarg = NULL; \
1822 * Since rx_enable is synchronous callback, there is no start_cbfn required.
1823 * Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers
1827 #define call_rx_disable_cbfn(rx, status) \
1828 if ((rx)->disable_cbfn) { \
1829 (*(rx)->disable_cbfn)((rx)->disable_cbarg, \
1831 (rx)->disable_cbfn = NULL; \
1832 (rx)->disable_cbarg = NULL; \
1835 #define rxqs_reqd(type, num_rxqs) \
1836 (((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2))
1838 #define rx_ib_fail(rx) \
1840 struct bna_rxp *rxp; \
1841 struct list_head *qe; \
1842 list_for_each(qe, &(rx)->rxp_q) { \
1843 rxp = (struct bna_rxp *)qe; \
1844 bna_ib_fail(rxp->cq.ib); \
1848 static void __bna_multi_rxq_stop(struct bna_rxp
*, u32
*);
1849 static void __bna_rxq_start(struct bna_rxq
*rxq
);
1850 static void __bna_cq_start(struct bna_cq
*cq
);
1851 static void bna_rit_create(struct bna_rx
*rx
);
1852 static void bna_rx_cb_multi_rxq_stopped(void *arg
, int status
);
1853 static void bna_rx_cb_rxq_stopped_all(void *arg
);
1855 bfa_fsm_state_decl(bna_rx
, stopped
,
1856 struct bna_rx
, enum bna_rx_event
);
1857 bfa_fsm_state_decl(bna_rx
, rxf_start_wait
,
1858 struct bna_rx
, enum bna_rx_event
);
1859 bfa_fsm_state_decl(bna_rx
, started
,
1860 struct bna_rx
, enum bna_rx_event
);
1861 bfa_fsm_state_decl(bna_rx
, rxf_stop_wait
,
1862 struct bna_rx
, enum bna_rx_event
);
1863 bfa_fsm_state_decl(bna_rx
, rxq_stop_wait
,
1864 struct bna_rx
, enum bna_rx_event
);
1866 static const struct bfa_sm_table rx_sm_table
[] = {
1867 {BFA_SM(bna_rx_sm_stopped
), BNA_RX_STOPPED
},
1868 {BFA_SM(bna_rx_sm_rxf_start_wait
), BNA_RX_RXF_START_WAIT
},
1869 {BFA_SM(bna_rx_sm_started
), BNA_RX_STARTED
},
1870 {BFA_SM(bna_rx_sm_rxf_stop_wait
), BNA_RX_RXF_STOP_WAIT
},
1871 {BFA_SM(bna_rx_sm_rxq_stop_wait
), BNA_RX_RXQ_STOP_WAIT
},
1874 static void bna_rx_sm_stopped_entry(struct bna_rx
*rx
)
1876 struct bna_rxp
*rxp
;
1877 struct list_head
*qe_rxp
;
1879 list_for_each(qe_rxp
, &rx
->rxp_q
) {
1880 rxp
= (struct bna_rxp
*)qe_rxp
;
1881 rx
->rx_cleanup_cbfn(rx
->bna
->bnad
, rxp
->cq
.ccb
);
1884 call_rx_stop_callback(rx
, BNA_CB_SUCCESS
);
1887 static void bna_rx_sm_stopped(struct bna_rx
*rx
,
1888 enum bna_rx_event event
)
1892 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_start_wait
);
1895 call_rx_stop_callback(rx
, BNA_CB_SUCCESS
);
1901 bfa_sm_fault(rx
->bna
, event
);
1907 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx
*rx
)
1909 struct bna_rxp
*rxp
;
1910 struct list_head
*qe_rxp
;
1911 struct bna_rxq
*q0
= NULL
, *q1
= NULL
;
1916 list_for_each(qe_rxp
, &rx
->rxp_q
) {
1917 rxp
= (struct bna_rxp
*)qe_rxp
;
1918 bna_ib_start(rxp
->cq
.ib
);
1919 GET_RXQS(rxp
, q0
, q1
);
1920 q0
->buffer_size
= bna_port_mtu_get(&rx
->bna
->port
);
1921 __bna_rxq_start(q0
);
1922 rx
->rx_post_cbfn(rx
->bna
->bnad
, q0
->rcb
);
1924 __bna_rxq_start(q1
);
1925 rx
->rx_post_cbfn(rx
->bna
->bnad
, q1
->rcb
);
1927 __bna_cq_start(&rxp
->cq
);
1930 bna_rxf_start(&rx
->rxf
);
1933 static void bna_rx_sm_rxf_start_wait(struct bna_rx
*rx
,
1934 enum bna_rx_event event
)
1938 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_stop_wait
);
1941 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1943 bna_rxf_fail(&rx
->rxf
);
1945 case RX_E_RXF_STARTED
:
1946 bfa_fsm_set_state(rx
, bna_rx_sm_started
);
1949 bfa_sm_fault(rx
->bna
, event
);
1955 bna_rx_sm_started_entry(struct bna_rx
*rx
)
1957 struct bna_rxp
*rxp
;
1958 struct list_head
*qe_rxp
;
1961 list_for_each(qe_rxp
, &rx
->rxp_q
) {
1962 rxp
= (struct bna_rxp
*)qe_rxp
;
1963 bna_ib_ack(&rxp
->cq
.ib
->door_bell
, 0);
1966 bna_llport_rx_started(&rx
->bna
->port
.llport
);
1970 bna_rx_sm_started(struct bna_rx
*rx
, enum bna_rx_event event
)
1974 bna_llport_rx_stopped(&rx
->bna
->port
.llport
);
1975 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
1977 bna_rxf_fail(&rx
->rxf
);
1980 bna_llport_rx_stopped(&rx
->bna
->port
.llport
);
1981 bfa_fsm_set_state(rx
, bna_rx_sm_rxf_stop_wait
);
1984 bfa_sm_fault(rx
->bna
, event
);
1990 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx
*rx
)
1992 bna_rxf_stop(&rx
->rxf
);
1996 bna_rx_sm_rxf_stop_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
1999 case RX_E_RXF_STOPPED
:
2000 bfa_fsm_set_state(rx
, bna_rx_sm_rxq_stop_wait
);
2002 case RX_E_RXF_STARTED
:
2004 * RxF was in the process of starting up when
2005 * RXF_E_STOP was issued. Ignore this event
2009 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
2011 bna_rxf_fail(&rx
->rxf
);
2014 bfa_sm_fault(rx
->bna
, event
);
2021 bna_rx_sm_rxq_stop_wait_entry(struct bna_rx
*rx
)
2023 struct bna_rxp
*rxp
= NULL
;
2024 struct bna_rxq
*q0
= NULL
;
2025 struct bna_rxq
*q1
= NULL
;
2026 struct list_head
*qe
;
2027 u32 rxq_mask
[2] = {0, 0};
2029 /* Only one call to multi-rxq-stop for all RXPs in this RX */
2030 bfa_wc_up(&rx
->rxq_stop_wc
);
2031 list_for_each(qe
, &rx
->rxp_q
) {
2032 rxp
= (struct bna_rxp
*)qe
;
2033 GET_RXQS(rxp
, q0
, q1
);
2034 if (q0
->rxq_id
< 32)
2035 rxq_mask
[0] |= ((u32
)1 << q0
->rxq_id
);
2037 rxq_mask
[1] |= ((u32
)1 << (q0
->rxq_id
- 32));
2039 if (q1
->rxq_id
< 32)
2040 rxq_mask
[0] |= ((u32
)1 << q1
->rxq_id
);
2042 rxq_mask
[1] |= ((u32
)
2043 1 << (q1
->rxq_id
- 32));
2047 __bna_multi_rxq_stop(rxp
, rxq_mask
);
2051 bna_rx_sm_rxq_stop_wait(struct bna_rx
*rx
, enum bna_rx_event event
)
2053 struct bna_rxp
*rxp
= NULL
;
2054 struct list_head
*qe
;
2057 case RX_E_RXQ_STOPPED
:
2058 list_for_each(qe
, &rx
->rxp_q
) {
2059 rxp
= (struct bna_rxp
*)qe
;
2060 bna_ib_stop(rxp
->cq
.ib
);
2064 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
2067 bfa_sm_fault(rx
->bna
, event
);
2073 __bna_multi_rxq_stop(struct bna_rxp
*rxp
, u32
* rxq_id_mask
)
2075 struct bfi_ll_q_stop_req ll_req
;
2077 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_RXQ_STOP_REQ
, 0);
2078 ll_req
.q_id_mask
[0] = htonl(rxq_id_mask
[0]);
2079 ll_req
.q_id_mask
[1] = htonl(rxq_id_mask
[1]);
2080 bna_mbox_qe_fill(&rxp
->mbox_qe
, &ll_req
, sizeof(ll_req
),
2081 bna_rx_cb_multi_rxq_stopped
, rxp
);
2082 bna_mbox_send(rxp
->rx
->bna
, &rxp
->mbox_qe
);
2086 __bna_rxq_start(struct bna_rxq
*rxq
)
2088 struct bna_rxtx_q_mem
*q_mem
;
2089 struct bna_rxq_mem rxq_cfg
, *rxq_mem
;
2090 struct bna_dma_addr cur_q_addr
;
2091 /* struct bna_doorbell_qset *qset; */
2092 struct bna_qpt
*qpt
;
2094 struct bna
*bna
= rxq
->rx
->bna
;
2095 void __iomem
*base_addr
;
2099 cur_q_addr
= *((struct bna_dma_addr
*)(qpt
->kv_qpt_ptr
));
2101 rxq_cfg
.pg_tbl_addr_lo
= qpt
->hw_qpt_ptr
.lsb
;
2102 rxq_cfg
.pg_tbl_addr_hi
= qpt
->hw_qpt_ptr
.msb
;
2103 rxq_cfg
.cur_q_entry_lo
= cur_q_addr
.lsb
;
2104 rxq_cfg
.cur_q_entry_hi
= cur_q_addr
.msb
;
2106 rxq_cfg
.pg_cnt_n_prd_ptr
= ((u32
)qpt
->page_count
<< 16) | 0x0;
2107 rxq_cfg
.entry_n_pg_size
= ((u32
)(BFI_RXQ_WI_SIZE
>> 2) << 16) |
2108 (qpt
->page_size
>> 2);
2109 rxq_cfg
.sg_n_cq_n_cns_ptr
=
2110 ((u32
)(rxq
->rxp
->cq
.cq_id
& 0xff) << 16) | 0x0;
2111 rxq_cfg
.buf_sz_n_q_state
= ((u32
)rxq
->buffer_size
<< 16) |
2113 rxq_cfg
.next_qid
= 0x0 | (0x3 << 8);
2115 /* Write the page number register */
2116 pg_num
= BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM
+ bna
->port_num
,
2117 HQM_RXTX_Q_RAM_BASE_OFFSET
);
2118 writel(pg_num
, bna
->regs
.page_addr
);
2121 base_addr
= BNA_GET_MEM_BASE_ADDR(bna
->pcidev
.pci_bar_kva
,
2122 HQM_RXTX_Q_RAM_BASE_OFFSET
);
2124 q_mem
= (struct bna_rxtx_q_mem
*)0;
2125 rxq_mem
= &q_mem
[rxq
->rxq_id
].rxq
;
2127 off
= (unsigned long)&rxq_mem
->pg_tbl_addr_lo
;
2128 writel(htonl(rxq_cfg
.pg_tbl_addr_lo
), base_addr
+ off
);
2130 off
= (unsigned long)&rxq_mem
->pg_tbl_addr_hi
;
2131 writel(htonl(rxq_cfg
.pg_tbl_addr_hi
), base_addr
+ off
);
2133 off
= (unsigned long)&rxq_mem
->cur_q_entry_lo
;
2134 writel(htonl(rxq_cfg
.cur_q_entry_lo
), base_addr
+ off
);
2136 off
= (unsigned long)&rxq_mem
->cur_q_entry_hi
;
2137 writel(htonl(rxq_cfg
.cur_q_entry_hi
), base_addr
+ off
);
2139 off
= (unsigned long)&rxq_mem
->pg_cnt_n_prd_ptr
;
2140 writel(rxq_cfg
.pg_cnt_n_prd_ptr
, base_addr
+ off
);
2142 off
= (unsigned long)&rxq_mem
->entry_n_pg_size
;
2143 writel(rxq_cfg
.entry_n_pg_size
, base_addr
+ off
);
2145 off
= (unsigned long)&rxq_mem
->sg_n_cq_n_cns_ptr
;
2146 writel(rxq_cfg
.sg_n_cq_n_cns_ptr
, base_addr
+ off
);
2148 off
= (unsigned long)&rxq_mem
->buf_sz_n_q_state
;
2149 writel(rxq_cfg
.buf_sz_n_q_state
, base_addr
+ off
);
2151 off
= (unsigned long)&rxq_mem
->next_qid
;
2152 writel(rxq_cfg
.next_qid
, base_addr
+ off
);
2154 rxq
->rcb
->producer_index
= 0;
2155 rxq
->rcb
->consumer_index
= 0;
2159 __bna_cq_start(struct bna_cq
*cq
)
2161 struct bna_cq_mem cq_cfg
, *cq_mem
;
2162 const struct bna_qpt
*qpt
;
2163 struct bna_dma_addr cur_q_addr
;
2165 struct bna
*bna
= cq
->rx
->bna
;
2166 void __iomem
*base_addr
;
2170 cur_q_addr
= *((struct bna_dma_addr
*)(qpt
->kv_qpt_ptr
));
2173 * Fill out structure, to be subsequently written
2176 cq_cfg
.pg_tbl_addr_lo
= qpt
->hw_qpt_ptr
.lsb
;
2177 cq_cfg
.pg_tbl_addr_hi
= qpt
->hw_qpt_ptr
.msb
;
2178 cq_cfg
.cur_q_entry_lo
= cur_q_addr
.lsb
;
2179 cq_cfg
.cur_q_entry_hi
= cur_q_addr
.msb
;
2181 cq_cfg
.pg_cnt_n_prd_ptr
= (qpt
->page_count
<< 16) | 0x0;
2182 cq_cfg
.entry_n_pg_size
=
2183 ((u32
)(BFI_CQ_WI_SIZE
>> 2) << 16) | (qpt
->page_size
>> 2);
2184 cq_cfg
.int_blk_n_cns_ptr
= ((((u32
)cq
->ib_seg_offset
) << 24) |
2185 ((u32
)(cq
->ib
->ib_id
& 0xff) << 16) | 0x0);
2186 cq_cfg
.q_state
= BNA_Q_IDLE_STATE
;
2188 /* Write the page number register */
2189 pg_num
= BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM
+ bna
->port_num
,
2190 HQM_CQ_RAM_BASE_OFFSET
);
2192 writel(pg_num
, bna
->regs
.page_addr
);
2195 base_addr
= BNA_GET_MEM_BASE_ADDR(bna
->pcidev
.pci_bar_kva
,
2196 HQM_CQ_RAM_BASE_OFFSET
);
2198 cq_mem
= (struct bna_cq_mem
*)0;
2200 off
= (unsigned long)&cq_mem
[cq
->cq_id
].pg_tbl_addr_lo
;
2201 writel(htonl(cq_cfg
.pg_tbl_addr_lo
), base_addr
+ off
);
2203 off
= (unsigned long)&cq_mem
[cq
->cq_id
].pg_tbl_addr_hi
;
2204 writel(htonl(cq_cfg
.pg_tbl_addr_hi
), base_addr
+ off
);
2206 off
= (unsigned long)&cq_mem
[cq
->cq_id
].cur_q_entry_lo
;
2207 writel(htonl(cq_cfg
.cur_q_entry_lo
), base_addr
+ off
);
2209 off
= (unsigned long)&cq_mem
[cq
->cq_id
].cur_q_entry_hi
;
2210 writel(htonl(cq_cfg
.cur_q_entry_hi
), base_addr
+ off
);
2212 off
= (unsigned long)&cq_mem
[cq
->cq_id
].pg_cnt_n_prd_ptr
;
2213 writel(cq_cfg
.pg_cnt_n_prd_ptr
, base_addr
+ off
);
2215 off
= (unsigned long)&cq_mem
[cq
->cq_id
].entry_n_pg_size
;
2216 writel(cq_cfg
.entry_n_pg_size
, base_addr
+ off
);
2218 off
= (unsigned long)&cq_mem
[cq
->cq_id
].int_blk_n_cns_ptr
;
2219 writel(cq_cfg
.int_blk_n_cns_ptr
, base_addr
+ off
);
2221 off
= (unsigned long)&cq_mem
[cq
->cq_id
].q_state
;
2222 writel(cq_cfg
.q_state
, base_addr
+ off
);
2224 cq
->ccb
->producer_index
= 0;
2225 *(cq
->ccb
->hw_producer_index
) = 0;
2229 bna_rit_create(struct bna_rx
*rx
)
2231 struct list_head
*qe_rxp
;
2232 struct bna_rxp
*rxp
;
2233 struct bna_rxq
*q0
= NULL
;
2234 struct bna_rxq
*q1
= NULL
;
2238 list_for_each(qe_rxp
, &rx
->rxp_q
) {
2239 rxp
= (struct bna_rxp
*)qe_rxp
;
2240 GET_RXQS(rxp
, q0
, q1
);
2241 rx
->rxf
.rit_segment
->rit
[offset
].large_rxq_id
= q0
->rxq_id
;
2242 rx
->rxf
.rit_segment
->rit
[offset
].small_rxq_id
=
2243 (q1
? q1
->rxq_id
: 0);
2249 _rx_can_satisfy(struct bna_rx_mod
*rx_mod
,
2250 struct bna_rx_config
*rx_cfg
)
2252 if ((rx_mod
->rx_free_count
== 0) ||
2253 (rx_mod
->rxp_free_count
== 0) ||
2254 (rx_mod
->rxq_free_count
== 0))
2257 if (rx_cfg
->rxp_type
== BNA_RXP_SINGLE
) {
2258 if ((rx_mod
->rxp_free_count
< rx_cfg
->num_paths
) ||
2259 (rx_mod
->rxq_free_count
< rx_cfg
->num_paths
))
2262 if ((rx_mod
->rxp_free_count
< rx_cfg
->num_paths
) ||
2263 (rx_mod
->rxq_free_count
< (2 * rx_cfg
->num_paths
)))
2267 if (!bna_rit_mod_can_satisfy(&rx_mod
->bna
->rit_mod
, rx_cfg
->num_paths
))
2273 static struct bna_rxq
*
2274 _get_free_rxq(struct bna_rx_mod
*rx_mod
)
2276 struct bna_rxq
*rxq
= NULL
;
2277 struct list_head
*qe
= NULL
;
2279 bfa_q_deq(&rx_mod
->rxq_free_q
, &qe
);
2281 rx_mod
->rxq_free_count
--;
2282 rxq
= (struct bna_rxq
*)qe
;
2288 _put_free_rxq(struct bna_rx_mod
*rx_mod
, struct bna_rxq
*rxq
)
2290 bfa_q_qe_init(&rxq
->qe
);
2291 list_add_tail(&rxq
->qe
, &rx_mod
->rxq_free_q
);
2292 rx_mod
->rxq_free_count
++;
2295 static struct bna_rxp
*
2296 _get_free_rxp(struct bna_rx_mod
*rx_mod
)
2298 struct list_head
*qe
= NULL
;
2299 struct bna_rxp
*rxp
= NULL
;
2301 bfa_q_deq(&rx_mod
->rxp_free_q
, &qe
);
2303 rx_mod
->rxp_free_count
--;
2305 rxp
= (struct bna_rxp
*)qe
;
2312 _put_free_rxp(struct bna_rx_mod
*rx_mod
, struct bna_rxp
*rxp
)
2314 bfa_q_qe_init(&rxp
->qe
);
2315 list_add_tail(&rxp
->qe
, &rx_mod
->rxp_free_q
);
2316 rx_mod
->rxp_free_count
++;
2319 static struct bna_rx
*
2320 _get_free_rx(struct bna_rx_mod
*rx_mod
)
2322 struct list_head
*qe
= NULL
;
2323 struct bna_rx
*rx
= NULL
;
2325 bfa_q_deq(&rx_mod
->rx_free_q
, &qe
);
2327 rx_mod
->rx_free_count
--;
2329 rx
= (struct bna_rx
*)qe
;
2331 list_add_tail(&rx
->qe
, &rx_mod
->rx_active_q
);
2338 _put_free_rx(struct bna_rx_mod
*rx_mod
, struct bna_rx
*rx
)
2340 bfa_q_qe_init(&rx
->qe
);
2341 list_add_tail(&rx
->qe
, &rx_mod
->rx_free_q
);
2342 rx_mod
->rx_free_count
++;
2346 _rx_init(struct bna_rx
*rx
, struct bna
*bna
)
2351 INIT_LIST_HEAD(&rx
->rxp_q
);
2353 rx
->rxq_stop_wc
.wc_resume
= bna_rx_cb_rxq_stopped_all
;
2354 rx
->rxq_stop_wc
.wc_cbarg
= rx
;
2355 rx
->rxq_stop_wc
.wc_count
= 0;
2357 rx
->stop_cbfn
= NULL
;
2358 rx
->stop_cbarg
= NULL
;
2362 _rxp_add_rxqs(struct bna_rxp
*rxp
,
2366 switch (rxp
->type
) {
2367 case BNA_RXP_SINGLE
:
2368 rxp
->rxq
.single
.only
= q0
;
2369 rxp
->rxq
.single
.reserved
= NULL
;
2372 rxp
->rxq
.slr
.large
= q0
;
2373 rxp
->rxq
.slr
.small
= q1
;
2376 rxp
->rxq
.hds
.data
= q0
;
2377 rxp
->rxq
.hds
.hdr
= q1
;
2385 _rxq_qpt_init(struct bna_rxq
*rxq
,
2386 struct bna_rxp
*rxp
,
2389 struct bna_mem_descr
*qpt_mem
,
2390 struct bna_mem_descr
*swqpt_mem
,
2391 struct bna_mem_descr
*page_mem
)
2395 rxq
->qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
2396 rxq
->qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
2397 rxq
->qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
2398 rxq
->qpt
.page_count
= page_count
;
2399 rxq
->qpt
.page_size
= page_size
;
2401 rxq
->rcb
->sw_qpt
= (void **) swqpt_mem
->kva
;
2403 for (i
= 0; i
< rxq
->qpt
.page_count
; i
++) {
2404 rxq
->rcb
->sw_qpt
[i
] = page_mem
[i
].kva
;
2405 ((struct bna_dma_addr
*)rxq
->qpt
.kv_qpt_ptr
)[i
].lsb
=
2406 page_mem
[i
].dma
.lsb
;
2407 ((struct bna_dma_addr
*)rxq
->qpt
.kv_qpt_ptr
)[i
].msb
=
2408 page_mem
[i
].dma
.msb
;
2414 _rxp_cqpt_setup(struct bna_rxp
*rxp
,
2417 struct bna_mem_descr
*qpt_mem
,
2418 struct bna_mem_descr
*swqpt_mem
,
2419 struct bna_mem_descr
*page_mem
)
2423 rxp
->cq
.qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
2424 rxp
->cq
.qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
2425 rxp
->cq
.qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
2426 rxp
->cq
.qpt
.page_count
= page_count
;
2427 rxp
->cq
.qpt
.page_size
= page_size
;
2429 rxp
->cq
.ccb
->sw_qpt
= (void **) swqpt_mem
->kva
;
2431 for (i
= 0; i
< rxp
->cq
.qpt
.page_count
; i
++) {
2432 rxp
->cq
.ccb
->sw_qpt
[i
] = page_mem
[i
].kva
;
2434 ((struct bna_dma_addr
*)rxp
->cq
.qpt
.kv_qpt_ptr
)[i
].lsb
=
2435 page_mem
[i
].dma
.lsb
;
2436 ((struct bna_dma_addr
*)rxp
->cq
.qpt
.kv_qpt_ptr
)[i
].msb
=
2437 page_mem
[i
].dma
.msb
;
2443 _rx_add_rxp(struct bna_rx
*rx
, struct bna_rxp
*rxp
)
2445 list_add_tail(&rxp
->qe
, &rx
->rxp_q
);
2449 _init_rxmod_queues(struct bna_rx_mod
*rx_mod
)
2451 INIT_LIST_HEAD(&rx_mod
->rx_free_q
);
2452 INIT_LIST_HEAD(&rx_mod
->rxq_free_q
);
2453 INIT_LIST_HEAD(&rx_mod
->rxp_free_q
);
2454 INIT_LIST_HEAD(&rx_mod
->rx_active_q
);
2456 rx_mod
->rx_free_count
= 0;
2457 rx_mod
->rxq_free_count
= 0;
2458 rx_mod
->rxp_free_count
= 0;
2462 _rx_ctor(struct bna_rx
*rx
, int id
)
2464 bfa_q_qe_init(&rx
->qe
);
2465 INIT_LIST_HEAD(&rx
->rxp_q
);
2468 rx
->rxf
.rxf_id
= id
;
2470 /* FIXME: mbox_qe ctor()?? */
2471 bfa_q_qe_init(&rx
->mbox_qe
.qe
);
2473 rx
->stop_cbfn
= NULL
;
2474 rx
->stop_cbarg
= NULL
;
2478 bna_rx_cb_multi_rxq_stopped(void *arg
, int status
)
2480 struct bna_rxp
*rxp
= (struct bna_rxp
*)arg
;
2482 bfa_wc_down(&rxp
->rx
->rxq_stop_wc
);
2486 bna_rx_cb_rxq_stopped_all(void *arg
)
2488 struct bna_rx
*rx
= (struct bna_rx
*)arg
;
2490 bfa_fsm_send_event(rx
, RX_E_RXQ_STOPPED
);
2494 bna_rx_mod_cb_rx_stopped(void *arg
, struct bna_rx
*rx
,
2495 enum bna_cb_status status
)
2497 struct bna_rx_mod
*rx_mod
= (struct bna_rx_mod
*)arg
;
2499 bfa_wc_down(&rx_mod
->rx_stop_wc
);
2503 bna_rx_mod_cb_rx_stopped_all(void *arg
)
2505 struct bna_rx_mod
*rx_mod
= (struct bna_rx_mod
*)arg
;
2507 if (rx_mod
->stop_cbfn
)
2508 rx_mod
->stop_cbfn(&rx_mod
->bna
->port
, BNA_CB_SUCCESS
);
2509 rx_mod
->stop_cbfn
= NULL
;
2513 bna_rx_start(struct bna_rx
*rx
)
2515 rx
->rx_flags
|= BNA_RX_F_PORT_ENABLED
;
2516 if (rx
->rx_flags
& BNA_RX_F_ENABLE
)
2517 bfa_fsm_send_event(rx
, RX_E_START
);
2521 bna_rx_stop(struct bna_rx
*rx
)
2523 rx
->rx_flags
&= ~BNA_RX_F_PORT_ENABLED
;
2524 if (rx
->fsm
== (bfa_fsm_t
) bna_rx_sm_stopped
)
2525 bna_rx_mod_cb_rx_stopped(&rx
->bna
->rx_mod
, rx
, BNA_CB_SUCCESS
);
2527 rx
->stop_cbfn
= bna_rx_mod_cb_rx_stopped
;
2528 rx
->stop_cbarg
= &rx
->bna
->rx_mod
;
2529 bfa_fsm_send_event(rx
, RX_E_STOP
);
2534 bna_rx_fail(struct bna_rx
*rx
)
2536 /* Indicate port is not enabled, and failed */
2537 rx
->rx_flags
&= ~BNA_RX_F_PORT_ENABLED
;
2538 rx
->rx_flags
|= BNA_RX_F_PORT_FAILED
;
2539 bfa_fsm_send_event(rx
, RX_E_FAIL
);
2543 bna_rx_mod_start(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
2546 struct list_head
*qe
;
2548 rx_mod
->flags
|= BNA_RX_MOD_F_PORT_STARTED
;
2549 if (type
== BNA_RX_T_LOOPBACK
)
2550 rx_mod
->flags
|= BNA_RX_MOD_F_PORT_LOOPBACK
;
2552 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2553 rx
= (struct bna_rx
*)qe
;
2554 if (rx
->type
== type
)
2560 bna_rx_mod_stop(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
)
2563 struct list_head
*qe
;
2565 rx_mod
->flags
&= ~BNA_RX_MOD_F_PORT_STARTED
;
2566 rx_mod
->flags
&= ~BNA_RX_MOD_F_PORT_LOOPBACK
;
2568 rx_mod
->stop_cbfn
= bna_port_cb_rx_stopped
;
2571 * Before calling bna_rx_stop(), increment rx_stop_wc as many times
2572 * as we are going to call bna_rx_stop
2574 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2575 rx
= (struct bna_rx
*)qe
;
2576 if (rx
->type
== type
)
2577 bfa_wc_up(&rx_mod
->rx_stop_wc
);
2580 if (rx_mod
->rx_stop_wc
.wc_count
== 0) {
2581 rx_mod
->stop_cbfn(&rx_mod
->bna
->port
, BNA_CB_SUCCESS
);
2582 rx_mod
->stop_cbfn
= NULL
;
2586 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2587 rx
= (struct bna_rx
*)qe
;
2588 if (rx
->type
== type
)
2594 bna_rx_mod_fail(struct bna_rx_mod
*rx_mod
)
2597 struct list_head
*qe
;
2599 rx_mod
->flags
&= ~BNA_RX_MOD_F_PORT_STARTED
;
2600 rx_mod
->flags
&= ~BNA_RX_MOD_F_PORT_LOOPBACK
;
2602 list_for_each(qe
, &rx_mod
->rx_active_q
) {
2603 rx
= (struct bna_rx
*)qe
;
2608 void bna_rx_mod_init(struct bna_rx_mod
*rx_mod
, struct bna
*bna
,
2609 struct bna_res_info
*res_info
)
2612 struct bna_rx
*rx_ptr
;
2613 struct bna_rxp
*rxp_ptr
;
2614 struct bna_rxq
*rxq_ptr
;
2619 rx_mod
->rx
= (struct bna_rx
*)
2620 res_info
[BNA_RES_MEM_T_RX_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2621 rx_mod
->rxp
= (struct bna_rxp
*)
2622 res_info
[BNA_RES_MEM_T_RXP_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2623 rx_mod
->rxq
= (struct bna_rxq
*)
2624 res_info
[BNA_RES_MEM_T_RXQ_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
2626 /* Initialize the queues */
2627 _init_rxmod_queues(rx_mod
);
2629 /* Build RX queues */
2630 for (index
= 0; index
< BFI_MAX_RXQ
; index
++) {
2631 rx_ptr
= &rx_mod
->rx
[index
];
2632 _rx_ctor(rx_ptr
, index
);
2633 list_add_tail(&rx_ptr
->qe
, &rx_mod
->rx_free_q
);
2634 rx_mod
->rx_free_count
++;
2637 /* build RX-path queue */
2638 for (index
= 0; index
< BFI_MAX_RXQ
; index
++) {
2639 rxp_ptr
= &rx_mod
->rxp
[index
];
2640 rxp_ptr
->cq
.cq_id
= index
;
2641 bfa_q_qe_init(&rxp_ptr
->qe
);
2642 list_add_tail(&rxp_ptr
->qe
, &rx_mod
->rxp_free_q
);
2643 rx_mod
->rxp_free_count
++;
2646 /* build RXQ queue */
2647 for (index
= 0; index
< BFI_MAX_RXQ
; index
++) {
2648 rxq_ptr
= &rx_mod
->rxq
[index
];
2649 rxq_ptr
->rxq_id
= index
;
2651 bfa_q_qe_init(&rxq_ptr
->qe
);
2652 list_add_tail(&rxq_ptr
->qe
, &rx_mod
->rxq_free_q
);
2653 rx_mod
->rxq_free_count
++;
2656 rx_mod
->rx_stop_wc
.wc_resume
= bna_rx_mod_cb_rx_stopped_all
;
2657 rx_mod
->rx_stop_wc
.wc_cbarg
= rx_mod
;
2658 rx_mod
->rx_stop_wc
.wc_count
= 0;
2662 bna_rx_mod_uninit(struct bna_rx_mod
*rx_mod
)
2664 struct list_head
*qe
;
2668 list_for_each(qe
, &rx_mod
->rx_free_q
)
2672 list_for_each(qe
, &rx_mod
->rxp_free_q
)
2676 list_for_each(qe
, &rx_mod
->rxq_free_q
)
2683 bna_rx_state_get(struct bna_rx
*rx
)
2685 return bfa_sm_to_state(rx_sm_table
, rx
->fsm
);
2689 bna_rx_res_req(struct bna_rx_config
*q_cfg
, struct bna_res_info
*res_info
)
2691 u32 cq_size
, hq_size
, dq_size
;
2692 u32 cpage_count
, hpage_count
, dpage_count
;
2693 struct bna_mem_info
*mem_info
;
2698 dq_depth
= q_cfg
->q_depth
;
2699 hq_depth
= ((q_cfg
->rxp_type
== BNA_RXP_SINGLE
) ? 0 : q_cfg
->q_depth
);
2700 cq_depth
= dq_depth
+ hq_depth
;
2702 BNA_TO_POWER_OF_2_HIGH(cq_depth
);
2703 cq_size
= cq_depth
* BFI_CQ_WI_SIZE
;
2704 cq_size
= ALIGN(cq_size
, PAGE_SIZE
);
2705 cpage_count
= SIZE_TO_PAGES(cq_size
);
2707 BNA_TO_POWER_OF_2_HIGH(dq_depth
);
2708 dq_size
= dq_depth
* BFI_RXQ_WI_SIZE
;
2709 dq_size
= ALIGN(dq_size
, PAGE_SIZE
);
2710 dpage_count
= SIZE_TO_PAGES(dq_size
);
2712 if (BNA_RXP_SINGLE
!= q_cfg
->rxp_type
) {
2713 BNA_TO_POWER_OF_2_HIGH(hq_depth
);
2714 hq_size
= hq_depth
* BFI_RXQ_WI_SIZE
;
2715 hq_size
= ALIGN(hq_size
, PAGE_SIZE
);
2716 hpage_count
= SIZE_TO_PAGES(hq_size
);
2721 /* CCB structures */
2722 res_info
[BNA_RX_RES_MEM_T_CCB
].res_type
= BNA_RES_T_MEM
;
2723 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CCB
].res_u
.mem_info
;
2724 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2725 mem_info
->len
= sizeof(struct bna_ccb
);
2726 mem_info
->num
= q_cfg
->num_paths
;
2728 /* RCB structures */
2729 res_info
[BNA_RX_RES_MEM_T_RCB
].res_type
= BNA_RES_T_MEM
;
2730 mem_info
= &res_info
[BNA_RX_RES_MEM_T_RCB
].res_u
.mem_info
;
2731 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2732 mem_info
->len
= sizeof(struct bna_rcb
);
2733 mem_info
->num
= BNA_GET_RXQS(q_cfg
);
2735 /* Completion QPT */
2736 res_info
[BNA_RX_RES_MEM_T_CQPT
].res_type
= BNA_RES_T_MEM
;
2737 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CQPT
].res_u
.mem_info
;
2738 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2739 mem_info
->len
= cpage_count
* sizeof(struct bna_dma_addr
);
2740 mem_info
->num
= q_cfg
->num_paths
;
2742 /* Completion s/w QPT */
2743 res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_type
= BNA_RES_T_MEM
;
2744 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_u
.mem_info
;
2745 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2746 mem_info
->len
= cpage_count
* sizeof(void *);
2747 mem_info
->num
= q_cfg
->num_paths
;
2749 /* Completion QPT pages */
2750 res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_type
= BNA_RES_T_MEM
;
2751 mem_info
= &res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
;
2752 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2753 mem_info
->len
= PAGE_SIZE
;
2754 mem_info
->num
= cpage_count
* q_cfg
->num_paths
;
2757 res_info
[BNA_RX_RES_MEM_T_DQPT
].res_type
= BNA_RES_T_MEM
;
2758 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DQPT
].res_u
.mem_info
;
2759 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2760 mem_info
->len
= dpage_count
* sizeof(struct bna_dma_addr
);
2761 mem_info
->num
= q_cfg
->num_paths
;
2764 res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_type
= BNA_RES_T_MEM
;
2765 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_u
.mem_info
;
2766 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2767 mem_info
->len
= dpage_count
* sizeof(void *);
2768 mem_info
->num
= q_cfg
->num_paths
;
2770 /* Data QPT pages */
2771 res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_type
= BNA_RES_T_MEM
;
2772 mem_info
= &res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
;
2773 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2774 mem_info
->len
= PAGE_SIZE
;
2775 mem_info
->num
= dpage_count
* q_cfg
->num_paths
;
2778 res_info
[BNA_RX_RES_MEM_T_HQPT
].res_type
= BNA_RES_T_MEM
;
2779 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HQPT
].res_u
.mem_info
;
2780 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2781 mem_info
->len
= hpage_count
* sizeof(struct bna_dma_addr
);
2782 mem_info
->num
= (hpage_count
? q_cfg
->num_paths
: 0);
2785 res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_type
= BNA_RES_T_MEM
;
2786 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_u
.mem_info
;
2787 mem_info
->mem_type
= BNA_MEM_T_KVA
;
2788 mem_info
->len
= hpage_count
* sizeof(void *);
2789 mem_info
->num
= (hpage_count
? q_cfg
->num_paths
: 0);
2792 res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_type
= BNA_RES_T_MEM
;
2793 mem_info
= &res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
;
2794 mem_info
->mem_type
= BNA_MEM_T_DMA
;
2795 mem_info
->len
= (hpage_count
? PAGE_SIZE
: 0);
2796 mem_info
->num
= (hpage_count
? (hpage_count
* q_cfg
->num_paths
) : 0);
2799 res_info
[BNA_RX_RES_T_INTR
].res_type
= BNA_RES_T_INTR
;
2800 res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
.intr_type
= BNA_INTR_T_MSIX
;
2801 res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
.num
= q_cfg
->num_paths
;
2805 bna_rx_create(struct bna
*bna
, struct bnad
*bnad
,
2806 struct bna_rx_config
*rx_cfg
,
2807 struct bna_rx_event_cbfn
*rx_cbfn
,
2808 struct bna_res_info
*res_info
,
2811 struct bna_rx_mod
*rx_mod
= &bna
->rx_mod
;
2813 struct bna_rxp
*rxp
;
2816 struct bna_intr_info
*intr_info
;
2818 struct bna_mem_descr
*ccb_mem
;
2819 struct bna_mem_descr
*rcb_mem
;
2820 struct bna_mem_descr
*unmapq_mem
;
2821 struct bna_mem_descr
*cqpt_mem
;
2822 struct bna_mem_descr
*cswqpt_mem
;
2823 struct bna_mem_descr
*cpage_mem
;
2824 struct bna_mem_descr
*hqpt_mem
; /* Header/Small Q qpt */
2825 struct bna_mem_descr
*dqpt_mem
; /* Data/Large Q qpt */
2826 struct bna_mem_descr
*hsqpt_mem
; /* s/w qpt for hdr */
2827 struct bna_mem_descr
*dsqpt_mem
; /* s/w qpt for data */
2828 struct bna_mem_descr
*hpage_mem
; /* hdr page mem */
2829 struct bna_mem_descr
*dpage_mem
; /* data page mem */
2830 int i
, cpage_idx
= 0, dpage_idx
= 0, hpage_idx
= 0;
2831 int dpage_count
, hpage_count
, rcb_idx
;
2832 struct bna_ib_config ibcfg
;
2833 /* Fail if we don't have enough RXPs, RXQs */
2834 if (!_rx_can_satisfy(rx_mod
, rx_cfg
))
2837 /* Initialize resource pointers */
2838 intr_info
= &res_info
[BNA_RX_RES_T_INTR
].res_u
.intr_info
;
2839 ccb_mem
= &res_info
[BNA_RX_RES_MEM_T_CCB
].res_u
.mem_info
.mdl
[0];
2840 rcb_mem
= &res_info
[BNA_RX_RES_MEM_T_RCB
].res_u
.mem_info
.mdl
[0];
2841 unmapq_mem
= &res_info
[BNA_RX_RES_MEM_T_UNMAPQ
].res_u
.mem_info
.mdl
[0];
2842 cqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_CQPT
].res_u
.mem_info
.mdl
[0];
2843 cswqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_CSWQPT
].res_u
.mem_info
.mdl
[0];
2844 cpage_mem
= &res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
.mdl
[0];
2845 hqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_HQPT
].res_u
.mem_info
.mdl
[0];
2846 dqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_DQPT
].res_u
.mem_info
.mdl
[0];
2847 hsqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_HSWQPT
].res_u
.mem_info
.mdl
[0];
2848 dsqpt_mem
= &res_info
[BNA_RX_RES_MEM_T_DSWQPT
].res_u
.mem_info
.mdl
[0];
2849 hpage_mem
= &res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
.mdl
[0];
2850 dpage_mem
= &res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
.mdl
[0];
2852 /* Compute q depth & page count */
2853 page_count
= res_info
[BNA_RX_RES_MEM_T_CQPT_PAGE
].res_u
.mem_info
.num
/
2856 dpage_count
= res_info
[BNA_RX_RES_MEM_T_DPAGE
].res_u
.mem_info
.num
/
2859 hpage_count
= res_info
[BNA_RX_RES_MEM_T_HPAGE
].res_u
.mem_info
.num
/
2861 /* Get RX pointer */
2862 rx
= _get_free_rx(rx_mod
);
2865 rx
->type
= rx_cfg
->rx_type
;
2867 rx
->rcb_setup_cbfn
= rx_cbfn
->rcb_setup_cbfn
;
2868 rx
->rcb_destroy_cbfn
= rx_cbfn
->rcb_destroy_cbfn
;
2869 rx
->ccb_setup_cbfn
= rx_cbfn
->ccb_setup_cbfn
;
2870 rx
->ccb_destroy_cbfn
= rx_cbfn
->ccb_destroy_cbfn
;
2871 /* Following callbacks are mandatory */
2872 rx
->rx_cleanup_cbfn
= rx_cbfn
->rx_cleanup_cbfn
;
2873 rx
->rx_post_cbfn
= rx_cbfn
->rx_post_cbfn
;
2875 if (rx
->bna
->rx_mod
.flags
& BNA_RX_MOD_F_PORT_STARTED
) {
2877 case BNA_RX_T_REGULAR
:
2878 if (!(rx
->bna
->rx_mod
.flags
&
2879 BNA_RX_MOD_F_PORT_LOOPBACK
))
2880 rx
->rx_flags
|= BNA_RX_F_PORT_ENABLED
;
2882 case BNA_RX_T_LOOPBACK
:
2883 if (rx
->bna
->rx_mod
.flags
& BNA_RX_MOD_F_PORT_LOOPBACK
)
2884 rx
->rx_flags
|= BNA_RX_F_PORT_ENABLED
;
2889 for (i
= 0, rcb_idx
= 0; i
< rx_cfg
->num_paths
; i
++) {
2890 rxp
= _get_free_rxp(rx_mod
);
2891 rxp
->type
= rx_cfg
->rxp_type
;
2895 /* Get required RXQs, and queue them to rx-path */
2896 q0
= _get_free_rxq(rx_mod
);
2897 if (BNA_RXP_SINGLE
== rx_cfg
->rxp_type
)
2900 q1
= _get_free_rxq(rx_mod
);
2903 if (1 == intr_info
->num
) {
2904 rxp
->cq
.ib
= bna_ib_get(&bna
->ib_mod
,
2905 intr_info
->intr_type
,
2906 intr_info
->idl
[0].vector
);
2907 rxp
->vector
= intr_info
->idl
[0].vector
;
2909 rxp
->cq
.ib
= bna_ib_get(&bna
->ib_mod
,
2910 intr_info
->intr_type
,
2911 intr_info
->idl
[i
].vector
);
2913 /* Map the MSI-x vector used for this RXP */
2914 rxp
->vector
= intr_info
->idl
[i
].vector
;
2917 rxp
->cq
.ib_seg_offset
= bna_ib_reserve_idx(rxp
->cq
.ib
);
2919 ibcfg
.coalescing_timeo
= BFI_RX_COALESCING_TIMEO
;
2920 ibcfg
.interpkt_count
= BFI_RX_INTERPKT_COUNT
;
2921 ibcfg
.interpkt_timeo
= BFI_RX_INTERPKT_TIMEO
;
2922 ibcfg
.ctrl_flags
= BFI_IB_CF_INT_ENABLE
;
2924 bna_ib_config(rxp
->cq
.ib
, &ibcfg
);
2926 /* Link rxqs to rxp */
2927 _rxp_add_rxqs(rxp
, q0
, q1
);
2929 /* Link rxp to rx */
2930 _rx_add_rxp(rx
, rxp
);
2935 /* Initialize RCB for the large / data q */
2936 q0
->rcb
= (struct bna_rcb
*) rcb_mem
[rcb_idx
].kva
;
2937 RXQ_RCB_INIT(q0
, rxp
, rx_cfg
->q_depth
, bna
, 0,
2938 (void *)unmapq_mem
[rcb_idx
].kva
);
2940 (q0
)->rx_packets
= (q0
)->rx_bytes
= 0;
2941 (q0
)->rx_packets_with_error
= (q0
)->rxbuf_alloc_failed
= 0;
2943 /* Initialize RXQs */
2944 _rxq_qpt_init(q0
, rxp
, dpage_count
, PAGE_SIZE
,
2945 &dqpt_mem
[i
], &dsqpt_mem
[i
], &dpage_mem
[dpage_idx
]);
2946 q0
->rcb
->page_idx
= dpage_idx
;
2947 q0
->rcb
->page_count
= dpage_count
;
2948 dpage_idx
+= dpage_count
;
2950 /* Call bnad to complete rcb setup */
2951 if (rx
->rcb_setup_cbfn
)
2952 rx
->rcb_setup_cbfn(bnad
, q0
->rcb
);
2958 q1
->rcb
= (struct bna_rcb
*) rcb_mem
[rcb_idx
].kva
;
2959 RXQ_RCB_INIT(q1
, rxp
, rx_cfg
->q_depth
, bna
, 1,
2960 (void *)unmapq_mem
[rcb_idx
].kva
);
2962 (q1
)->buffer_size
= (rx_cfg
)->small_buff_size
;
2963 (q1
)->rx_packets
= (q1
)->rx_bytes
= 0;
2964 (q1
)->rx_packets_with_error
=
2965 (q1
)->rxbuf_alloc_failed
= 0;
2967 _rxq_qpt_init(q1
, rxp
, hpage_count
, PAGE_SIZE
,
2968 &hqpt_mem
[i
], &hsqpt_mem
[i
],
2969 &hpage_mem
[hpage_idx
]);
2970 q1
->rcb
->page_idx
= hpage_idx
;
2971 q1
->rcb
->page_count
= hpage_count
;
2972 hpage_idx
+= hpage_count
;
2974 /* Call bnad to complete rcb setup */
2975 if (rx
->rcb_setup_cbfn
)
2976 rx
->rcb_setup_cbfn(bnad
, q1
->rcb
);
2979 rxp
->cq
.ccb
= (struct bna_ccb
*) ccb_mem
[i
].kva
;
2980 _rxp_cqpt_setup(rxp
, page_count
, PAGE_SIZE
,
2981 &cqpt_mem
[i
], &cswqpt_mem
[i
], &cpage_mem
[cpage_idx
]);
2982 rxp
->cq
.ccb
->page_idx
= cpage_idx
;
2983 rxp
->cq
.ccb
->page_count
= page_count
;
2984 cpage_idx
+= page_count
;
2986 rxp
->cq
.ccb
->pkt_rate
.small_pkt_cnt
= 0;
2987 rxp
->cq
.ccb
->pkt_rate
.large_pkt_cnt
= 0;
2989 rxp
->cq
.ccb
->producer_index
= 0;
2990 rxp
->cq
.ccb
->q_depth
= rx_cfg
->q_depth
+
2991 ((rx_cfg
->rxp_type
== BNA_RXP_SINGLE
) ?
2992 0 : rx_cfg
->q_depth
);
2993 rxp
->cq
.ccb
->i_dbell
= &rxp
->cq
.ib
->door_bell
;
2994 rxp
->cq
.ccb
->rcb
[0] = q0
->rcb
;
2996 rxp
->cq
.ccb
->rcb
[1] = q1
->rcb
;
2997 rxp
->cq
.ccb
->cq
= &rxp
->cq
;
2998 rxp
->cq
.ccb
->bnad
= bna
->bnad
;
2999 rxp
->cq
.ccb
->hw_producer_index
=
3000 ((volatile u32
*)rxp
->cq
.ib
->ib_seg_host_addr_kva
+
3001 (rxp
->cq
.ib_seg_offset
* BFI_IBIDX_SIZE
));
3002 *(rxp
->cq
.ccb
->hw_producer_index
) = 0;
3003 rxp
->cq
.ccb
->intr_type
= intr_info
->intr_type
;
3004 rxp
->cq
.ccb
->intr_vector
= (intr_info
->num
== 1) ?
3005 intr_info
->idl
[0].vector
:
3006 intr_info
->idl
[i
].vector
;
3007 rxp
->cq
.ccb
->rx_coalescing_timeo
=
3008 rxp
->cq
.ib
->ib_config
.coalescing_timeo
;
3009 rxp
->cq
.ccb
->id
= i
;
3011 /* Call bnad to complete CCB setup */
3012 if (rx
->ccb_setup_cbfn
)
3013 rx
->ccb_setup_cbfn(bnad
, rxp
->cq
.ccb
);
3015 } /* for each rx-path */
3017 bna_rxf_init(&rx
->rxf
, rx
, rx_cfg
);
3019 bfa_fsm_set_state(rx
, bna_rx_sm_stopped
);
3025 bna_rx_destroy(struct bna_rx
*rx
)
3027 struct bna_rx_mod
*rx_mod
= &rx
->bna
->rx_mod
;
3028 struct bna_ib_mod
*ib_mod
= &rx
->bna
->ib_mod
;
3029 struct bna_rxq
*q0
= NULL
;
3030 struct bna_rxq
*q1
= NULL
;
3031 struct bna_rxp
*rxp
;
3032 struct list_head
*qe
;
3034 bna_rxf_uninit(&rx
->rxf
);
3036 while (!list_empty(&rx
->rxp_q
)) {
3037 bfa_q_deq(&rx
->rxp_q
, &rxp
);
3038 GET_RXQS(rxp
, q0
, q1
);
3039 /* Callback to bnad for destroying RCB */
3040 if (rx
->rcb_destroy_cbfn
)
3041 rx
->rcb_destroy_cbfn(rx
->bna
->bnad
, q0
->rcb
);
3045 _put_free_rxq(rx_mod
, q0
);
3047 /* Callback to bnad for destroying RCB */
3048 if (rx
->rcb_destroy_cbfn
)
3049 rx
->rcb_destroy_cbfn(rx
->bna
->bnad
, q1
->rcb
);
3053 _put_free_rxq(rx_mod
, q1
);
3055 rxp
->rxq
.slr
.large
= NULL
;
3056 rxp
->rxq
.slr
.small
= NULL
;
3058 if (rxp
->cq
.ib_seg_offset
!= 0xff)
3059 bna_ib_release_idx(rxp
->cq
.ib
,
3060 rxp
->cq
.ib_seg_offset
);
3061 bna_ib_put(ib_mod
, rxp
->cq
.ib
);
3064 /* Callback to bnad for destroying CCB */
3065 if (rx
->ccb_destroy_cbfn
)
3066 rx
->ccb_destroy_cbfn(rx
->bna
->bnad
, rxp
->cq
.ccb
);
3069 _put_free_rxp(rx_mod
, rxp
);
3072 list_for_each(qe
, &rx_mod
->rx_active_q
) {
3073 if (qe
== &rx
->qe
) {
3075 bfa_q_qe_init(&rx
->qe
);
3082 _put_free_rx(rx_mod
, rx
);
3086 bna_rx_enable(struct bna_rx
*rx
)
3088 if (rx
->fsm
!= (bfa_sm_t
)bna_rx_sm_stopped
)
3091 rx
->rx_flags
|= BNA_RX_F_ENABLE
;
3092 if (rx
->rx_flags
& BNA_RX_F_PORT_ENABLED
)
3093 bfa_fsm_send_event(rx
, RX_E_START
);
3097 bna_rx_disable(struct bna_rx
*rx
, enum bna_cleanup_type type
,
3098 void (*cbfn
)(void *, struct bna_rx
*,
3099 enum bna_cb_status
))
3101 if (type
== BNA_SOFT_CLEANUP
) {
3102 /* h/w should not be accessed. Treat we're stopped */
3103 (*cbfn
)(rx
->bna
->bnad
, rx
, BNA_CB_SUCCESS
);
3105 rx
->stop_cbfn
= cbfn
;
3106 rx
->stop_cbarg
= rx
->bna
->bnad
;
3108 rx
->rx_flags
&= ~BNA_RX_F_ENABLE
;
3110 bfa_fsm_send_event(rx
, RX_E_STOP
);
3117 #define call_tx_stop_cbfn(tx, status)\
3119 if ((tx)->stop_cbfn)\
3120 (tx)->stop_cbfn((tx)->stop_cbarg, (tx), status);\
3121 (tx)->stop_cbfn = NULL;\
3122 (tx)->stop_cbarg = NULL;\
3125 #define call_tx_prio_change_cbfn(tx, status)\
3127 if ((tx)->prio_change_cbfn)\
3128 (tx)->prio_change_cbfn((tx)->bna->bnad, (tx), status);\
3129 (tx)->prio_change_cbfn = NULL;\
3132 static void bna_tx_mod_cb_tx_stopped(void *tx_mod
, struct bna_tx
*tx
,
3133 enum bna_cb_status status
);
3134 static void bna_tx_cb_txq_stopped(void *arg
, int status
);
3135 static void bna_tx_cb_stats_cleared(void *arg
, int status
);
3136 static void __bna_tx_stop(struct bna_tx
*tx
);
3137 static void __bna_tx_start(struct bna_tx
*tx
);
3138 static void __bna_txf_stat_clr(struct bna_tx
*tx
);
3144 TX_E_TXQ_STOPPED
= 4,
3145 TX_E_PRIO_CHANGE
= 5,
3146 TX_E_STAT_CLEARED
= 6,
3152 BNA_TX_TXQ_STOP_WAIT
= 3,
3153 BNA_TX_PRIO_STOP_WAIT
= 4,
3154 BNA_TX_STAT_CLR_WAIT
= 5,
3157 bfa_fsm_state_decl(bna_tx
, stopped
, struct bna_tx
,
3159 bfa_fsm_state_decl(bna_tx
, started
, struct bna_tx
,
3161 bfa_fsm_state_decl(bna_tx
, txq_stop_wait
, struct bna_tx
,
3163 bfa_fsm_state_decl(bna_tx
, prio_stop_wait
, struct bna_tx
,
3165 bfa_fsm_state_decl(bna_tx
, stat_clr_wait
, struct bna_tx
,
3168 static struct bfa_sm_table tx_sm_table
[] = {
3169 {BFA_SM(bna_tx_sm_stopped
), BNA_TX_STOPPED
},
3170 {BFA_SM(bna_tx_sm_started
), BNA_TX_STARTED
},
3171 {BFA_SM(bna_tx_sm_txq_stop_wait
), BNA_TX_TXQ_STOP_WAIT
},
3172 {BFA_SM(bna_tx_sm_prio_stop_wait
), BNA_TX_PRIO_STOP_WAIT
},
3173 {BFA_SM(bna_tx_sm_stat_clr_wait
), BNA_TX_STAT_CLR_WAIT
},
3177 bna_tx_sm_stopped_entry(struct bna_tx
*tx
)
3179 struct bna_txq
*txq
;
3180 struct list_head
*qe
;
3182 list_for_each(qe
, &tx
->txq_q
) {
3183 txq
= (struct bna_txq
*)qe
;
3184 (tx
->tx_cleanup_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3187 call_tx_stop_cbfn(tx
, BNA_CB_SUCCESS
);
3191 bna_tx_sm_stopped(struct bna_tx
*tx
, enum bna_tx_event event
)
3195 bfa_fsm_set_state(tx
, bna_tx_sm_started
);
3199 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3206 case TX_E_PRIO_CHANGE
:
3207 call_tx_prio_change_cbfn(tx
, BNA_CB_SUCCESS
);
3210 case TX_E_TXQ_STOPPED
:
3212 * This event is received due to flushing of mbox when
3219 bfa_sm_fault(tx
->bna
, event
);
3224 bna_tx_sm_started_entry(struct bna_tx
*tx
)
3226 struct bna_txq
*txq
;
3227 struct list_head
*qe
;
3232 list_for_each(qe
, &tx
->txq_q
) {
3233 txq
= (struct bna_txq
*)qe
;
3234 bna_ib_ack(&txq
->ib
->door_bell
, 0);
3239 bna_tx_sm_started(struct bna_tx
*tx
, enum bna_tx_event event
)
3241 struct bna_txq
*txq
;
3242 struct list_head
*qe
;
3246 bfa_fsm_set_state(tx
, bna_tx_sm_txq_stop_wait
);
3251 list_for_each(qe
, &tx
->txq_q
) {
3252 txq
= (struct bna_txq
*)qe
;
3253 bna_ib_fail(txq
->ib
);
3254 (tx
->tx_stall_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3256 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3259 case TX_E_PRIO_CHANGE
:
3260 bfa_fsm_set_state(tx
, bna_tx_sm_prio_stop_wait
);
3264 bfa_sm_fault(tx
->bna
, event
);
3269 bna_tx_sm_txq_stop_wait_entry(struct bna_tx
*tx
)
3274 bna_tx_sm_txq_stop_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3276 struct bna_txq
*txq
;
3277 struct list_head
*qe
;
3281 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3284 case TX_E_TXQ_STOPPED
:
3285 list_for_each(qe
, &tx
->txq_q
) {
3286 txq
= (struct bna_txq
*)qe
;
3287 bna_ib_stop(txq
->ib
);
3289 bfa_fsm_set_state(tx
, bna_tx_sm_stat_clr_wait
);
3292 case TX_E_PRIO_CHANGE
:
3297 bfa_sm_fault(tx
->bna
, event
);
3302 bna_tx_sm_prio_stop_wait_entry(struct bna_tx
*tx
)
3308 bna_tx_sm_prio_stop_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3310 struct bna_txq
*txq
;
3311 struct list_head
*qe
;
3315 bfa_fsm_set_state(tx
, bna_tx_sm_txq_stop_wait
);
3319 call_tx_prio_change_cbfn(tx
, BNA_CB_FAIL
);
3320 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3323 case TX_E_TXQ_STOPPED
:
3324 list_for_each(qe
, &tx
->txq_q
) {
3325 txq
= (struct bna_txq
*)qe
;
3326 bna_ib_stop(txq
->ib
);
3327 (tx
->tx_cleanup_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3329 call_tx_prio_change_cbfn(tx
, BNA_CB_SUCCESS
);
3330 bfa_fsm_set_state(tx
, bna_tx_sm_started
);
3333 case TX_E_PRIO_CHANGE
:
3338 bfa_sm_fault(tx
->bna
, event
);
3343 bna_tx_sm_stat_clr_wait_entry(struct bna_tx
*tx
)
3345 __bna_txf_stat_clr(tx
);
3349 bna_tx_sm_stat_clr_wait(struct bna_tx
*tx
, enum bna_tx_event event
)
3353 case TX_E_STAT_CLEARED
:
3354 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3358 bfa_sm_fault(tx
->bna
, event
);
3363 __bna_txq_start(struct bna_tx
*tx
, struct bna_txq
*txq
)
3365 struct bna_rxtx_q_mem
*q_mem
;
3366 struct bna_txq_mem txq_cfg
;
3367 struct bna_txq_mem
*txq_mem
;
3368 struct bna_dma_addr cur_q_addr
;
3370 void __iomem
*base_addr
;
3373 /* Fill out structure, to be subsequently written to hardware */
3374 txq_cfg
.pg_tbl_addr_lo
= txq
->qpt
.hw_qpt_ptr
.lsb
;
3375 txq_cfg
.pg_tbl_addr_hi
= txq
->qpt
.hw_qpt_ptr
.msb
;
3376 cur_q_addr
= *((struct bna_dma_addr
*)(txq
->qpt
.kv_qpt_ptr
));
3377 txq_cfg
.cur_q_entry_lo
= cur_q_addr
.lsb
;
3378 txq_cfg
.cur_q_entry_hi
= cur_q_addr
.msb
;
3380 txq_cfg
.pg_cnt_n_prd_ptr
= (txq
->qpt
.page_count
<< 16) | 0x0;
3382 txq_cfg
.entry_n_pg_size
= ((u32
)(BFI_TXQ_WI_SIZE
>> 2) << 16) |
3383 (txq
->qpt
.page_size
>> 2);
3384 txq_cfg
.int_blk_n_cns_ptr
= ((((u32
)txq
->ib_seg_offset
) << 24) |
3385 ((u32
)(txq
->ib
->ib_id
& 0xff) << 16) | 0x0);
3387 txq_cfg
.cns_ptr2_n_q_state
= BNA_Q_IDLE_STATE
;
3388 txq_cfg
.nxt_qid_n_fid_n_pri
= (((tx
->txf
.txf_id
& 0x3f) << 3) |
3389 (txq
->priority
& 0x7));
3390 txq_cfg
.wvc_n_cquota_n_rquota
=
3391 ((((u32
)BFI_TX_MAX_WRR_QUOTA
& 0xfff) << 12) |
3392 (BFI_TX_MAX_WRR_QUOTA
& 0xfff));
3394 /* Setup the page and write to H/W */
3396 pg_num
= BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM
+ tx
->bna
->port_num
,
3397 HQM_RXTX_Q_RAM_BASE_OFFSET
);
3398 writel(pg_num
, tx
->bna
->regs
.page_addr
);
3400 base_addr
= BNA_GET_MEM_BASE_ADDR(tx
->bna
->pcidev
.pci_bar_kva
,
3401 HQM_RXTX_Q_RAM_BASE_OFFSET
);
3402 q_mem
= (struct bna_rxtx_q_mem
*)0;
3403 txq_mem
= &q_mem
[txq
->txq_id
].txq
;
3406 * The following 4 lines, is a hack b'cos the H/W needs to read
3407 * these DMA addresses as little endian
3410 off
= (unsigned long)&txq_mem
->pg_tbl_addr_lo
;
3411 writel(htonl(txq_cfg
.pg_tbl_addr_lo
), base_addr
+ off
);
3413 off
= (unsigned long)&txq_mem
->pg_tbl_addr_hi
;
3414 writel(htonl(txq_cfg
.pg_tbl_addr_hi
), base_addr
+ off
);
3416 off
= (unsigned long)&txq_mem
->cur_q_entry_lo
;
3417 writel(htonl(txq_cfg
.cur_q_entry_lo
), base_addr
+ off
);
3419 off
= (unsigned long)&txq_mem
->cur_q_entry_hi
;
3420 writel(htonl(txq_cfg
.cur_q_entry_hi
), base_addr
+ off
);
3422 off
= (unsigned long)&txq_mem
->pg_cnt_n_prd_ptr
;
3423 writel(txq_cfg
.pg_cnt_n_prd_ptr
, base_addr
+ off
);
3425 off
= (unsigned long)&txq_mem
->entry_n_pg_size
;
3426 writel(txq_cfg
.entry_n_pg_size
, base_addr
+ off
);
3428 off
= (unsigned long)&txq_mem
->int_blk_n_cns_ptr
;
3429 writel(txq_cfg
.int_blk_n_cns_ptr
, base_addr
+ off
);
3431 off
= (unsigned long)&txq_mem
->cns_ptr2_n_q_state
;
3432 writel(txq_cfg
.cns_ptr2_n_q_state
, base_addr
+ off
);
3434 off
= (unsigned long)&txq_mem
->nxt_qid_n_fid_n_pri
;
3435 writel(txq_cfg
.nxt_qid_n_fid_n_pri
, base_addr
+ off
);
3437 off
= (unsigned long)&txq_mem
->wvc_n_cquota_n_rquota
;
3438 writel(txq_cfg
.wvc_n_cquota_n_rquota
, base_addr
+ off
);
3440 txq
->tcb
->producer_index
= 0;
3441 txq
->tcb
->consumer_index
= 0;
3442 *(txq
->tcb
->hw_consumer_index
) = 0;
3447 __bna_txq_stop(struct bna_tx
*tx
, struct bna_txq
*txq
)
3449 struct bfi_ll_q_stop_req ll_req
;
3450 u32 bit_mask
[2] = {0, 0};
3451 if (txq
->txq_id
< 32)
3452 bit_mask
[0] = (u32
)1 << txq
->txq_id
;
3454 bit_mask
[1] = (u32
)1 << (txq
->txq_id
- 32);
3456 memset(&ll_req
, 0, sizeof(ll_req
));
3457 ll_req
.mh
.msg_class
= BFI_MC_LL
;
3458 ll_req
.mh
.msg_id
= BFI_LL_H2I_TXQ_STOP_REQ
;
3459 ll_req
.mh
.mtag
.h2i
.lpu_id
= 0;
3460 ll_req
.q_id_mask
[0] = htonl(bit_mask
[0]);
3461 ll_req
.q_id_mask
[1] = htonl(bit_mask
[1]);
3463 bna_mbox_qe_fill(&tx
->mbox_qe
, &ll_req
, sizeof(ll_req
),
3464 bna_tx_cb_txq_stopped
, tx
);
3466 bna_mbox_send(tx
->bna
, &tx
->mbox_qe
);
3470 __bna_txf_start(struct bna_tx
*tx
)
3472 struct bna_tx_fndb_ram
*tx_fndb
;
3473 struct bna_txf
*txf
= &tx
->txf
;
3474 void __iomem
*base_addr
;
3477 writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM
+
3478 (tx
->bna
->port_num
* 2), TX_FNDB_RAM_BASE_OFFSET
),
3479 tx
->bna
->regs
.page_addr
);
3481 base_addr
= BNA_GET_MEM_BASE_ADDR(tx
->bna
->pcidev
.pci_bar_kva
,
3482 TX_FNDB_RAM_BASE_OFFSET
);
3484 tx_fndb
= (struct bna_tx_fndb_ram
*)0;
3485 off
= (unsigned long)&tx_fndb
[txf
->txf_id
].vlan_n_ctrl_flags
;
3487 writel(((u32
)txf
->vlan
<< 16) | txf
->ctrl_flags
,
3490 if (tx
->txf
.txf_id
< 32)
3491 tx
->bna
->tx_mod
.txf_bmap
[0] |= ((u32
)1 << tx
->txf
.txf_id
);
3493 tx
->bna
->tx_mod
.txf_bmap
[1] |= ((u32
)
3494 1 << (tx
->txf
.txf_id
- 32));
3498 __bna_txf_stop(struct bna_tx
*tx
)
3500 struct bna_tx_fndb_ram
*tx_fndb
;
3503 struct bna_txf
*txf
= &tx
->txf
;
3504 void __iomem
*base_addr
;
3507 /* retrieve the running txf_flags & turn off enable bit */
3508 page_num
= BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM
+
3509 (tx
->bna
->port_num
* 2), TX_FNDB_RAM_BASE_OFFSET
);
3510 writel(page_num
, tx
->bna
->regs
.page_addr
);
3512 base_addr
= BNA_GET_MEM_BASE_ADDR(tx
->bna
->pcidev
.pci_bar_kva
,
3513 TX_FNDB_RAM_BASE_OFFSET
);
3514 tx_fndb
= (struct bna_tx_fndb_ram
*)0;
3515 off
= (unsigned long)&tx_fndb
[txf
->txf_id
].vlan_n_ctrl_flags
;
3517 ctl_flags
= readl(base_addr
+ off
);
3518 ctl_flags
&= ~BFI_TXF_CF_ENABLE
;
3520 writel(ctl_flags
, base_addr
+ off
);
3522 if (tx
->txf
.txf_id
< 32)
3523 tx
->bna
->tx_mod
.txf_bmap
[0] &= ~((u32
)1 << tx
->txf
.txf_id
);
3525 tx
->bna
->tx_mod
.txf_bmap
[0] &= ~((u32
)
3526 1 << (tx
->txf
.txf_id
- 32));
3530 __bna_txf_stat_clr(struct bna_tx
*tx
)
3532 struct bfi_ll_stats_req ll_req
;
3533 u32 txf_bmap
[2] = {0, 0};
3534 if (tx
->txf
.txf_id
< 32)
3535 txf_bmap
[0] = ((u32
)1 << tx
->txf
.txf_id
);
3537 txf_bmap
[1] = ((u32
)1 << (tx
->txf
.txf_id
- 32));
3538 bfi_h2i_set(ll_req
.mh
, BFI_MC_LL
, BFI_LL_H2I_STATS_CLEAR_REQ
, 0);
3539 ll_req
.stats_mask
= 0;
3540 ll_req
.rxf_id_mask
[0] = 0;
3541 ll_req
.rxf_id_mask
[1] = 0;
3542 ll_req
.txf_id_mask
[0] = htonl(txf_bmap
[0]);
3543 ll_req
.txf_id_mask
[1] = htonl(txf_bmap
[1]);
3545 bna_mbox_qe_fill(&tx
->mbox_qe
, &ll_req
, sizeof(ll_req
),
3546 bna_tx_cb_stats_cleared
, tx
);
3547 bna_mbox_send(tx
->bna
, &tx
->mbox_qe
);
3551 __bna_tx_start(struct bna_tx
*tx
)
3553 struct bna_txq
*txq
;
3554 struct list_head
*qe
;
3556 list_for_each(qe
, &tx
->txq_q
) {
3557 txq
= (struct bna_txq
*)qe
;
3558 bna_ib_start(txq
->ib
);
3559 __bna_txq_start(tx
, txq
);
3562 __bna_txf_start(tx
);
3564 list_for_each(qe
, &tx
->txq_q
) {
3565 txq
= (struct bna_txq
*)qe
;
3566 txq
->tcb
->priority
= txq
->priority
;
3567 (tx
->tx_resume_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3572 __bna_tx_stop(struct bna_tx
*tx
)
3574 struct bna_txq
*txq
;
3575 struct list_head
*qe
;
3577 list_for_each(qe
, &tx
->txq_q
) {
3578 txq
= (struct bna_txq
*)qe
;
3579 (tx
->tx_stall_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
3584 list_for_each(qe
, &tx
->txq_q
) {
3585 txq
= (struct bna_txq
*)qe
;
3586 bfa_wc_up(&tx
->txq_stop_wc
);
3589 list_for_each(qe
, &tx
->txq_q
) {
3590 txq
= (struct bna_txq
*)qe
;
3591 __bna_txq_stop(tx
, txq
);
3596 bna_txq_qpt_setup(struct bna_txq
*txq
, int page_count
, int page_size
,
3597 struct bna_mem_descr
*qpt_mem
,
3598 struct bna_mem_descr
*swqpt_mem
,
3599 struct bna_mem_descr
*page_mem
)
3603 txq
->qpt
.hw_qpt_ptr
.lsb
= qpt_mem
->dma
.lsb
;
3604 txq
->qpt
.hw_qpt_ptr
.msb
= qpt_mem
->dma
.msb
;
3605 txq
->qpt
.kv_qpt_ptr
= qpt_mem
->kva
;
3606 txq
->qpt
.page_count
= page_count
;
3607 txq
->qpt
.page_size
= page_size
;
3609 txq
->tcb
->sw_qpt
= (void **) swqpt_mem
->kva
;
3611 for (i
= 0; i
< page_count
; i
++) {
3612 txq
->tcb
->sw_qpt
[i
] = page_mem
[i
].kva
;
3614 ((struct bna_dma_addr
*)txq
->qpt
.kv_qpt_ptr
)[i
].lsb
=
3615 page_mem
[i
].dma
.lsb
;
3616 ((struct bna_dma_addr
*)txq
->qpt
.kv_qpt_ptr
)[i
].msb
=
3617 page_mem
[i
].dma
.msb
;
3623 bna_tx_free(struct bna_tx
*tx
)
3625 struct bna_tx_mod
*tx_mod
= &tx
->bna
->tx_mod
;
3626 struct bna_txq
*txq
;
3627 struct bna_ib_mod
*ib_mod
= &tx
->bna
->ib_mod
;
3628 struct list_head
*qe
;
3630 while (!list_empty(&tx
->txq_q
)) {
3631 bfa_q_deq(&tx
->txq_q
, &txq
);
3632 bfa_q_qe_init(&txq
->qe
);
3634 if (txq
->ib_seg_offset
!= -1)
3635 bna_ib_release_idx(txq
->ib
,
3636 txq
->ib_seg_offset
);
3637 bna_ib_put(ib_mod
, txq
->ib
);
3642 list_add_tail(&txq
->qe
, &tx_mod
->txq_free_q
);
3645 list_for_each(qe
, &tx_mod
->tx_active_q
) {
3646 if (qe
== &tx
->qe
) {
3648 bfa_q_qe_init(&tx
->qe
);
3655 list_add_tail(&tx
->qe
, &tx_mod
->tx_free_q
);
3659 bna_tx_cb_txq_stopped(void *arg
, int status
)
3661 struct bna_tx
*tx
= (struct bna_tx
*)arg
;
3663 bfa_q_qe_init(&tx
->mbox_qe
.qe
);
3664 bfa_wc_down(&tx
->txq_stop_wc
);
3668 bna_tx_cb_txq_stopped_all(void *arg
)
3670 struct bna_tx
*tx
= (struct bna_tx
*)arg
;
3672 bfa_fsm_send_event(tx
, TX_E_TXQ_STOPPED
);
3676 bna_tx_cb_stats_cleared(void *arg
, int status
)
3678 struct bna_tx
*tx
= (struct bna_tx
*)arg
;
3680 bfa_q_qe_init(&tx
->mbox_qe
.qe
);
3682 bfa_fsm_send_event(tx
, TX_E_STAT_CLEARED
);
3686 bna_tx_start(struct bna_tx
*tx
)
3688 tx
->flags
|= BNA_TX_F_PORT_STARTED
;
3689 if (tx
->flags
& BNA_TX_F_ENABLED
)
3690 bfa_fsm_send_event(tx
, TX_E_START
);
3694 bna_tx_stop(struct bna_tx
*tx
)
3696 tx
->stop_cbfn
= bna_tx_mod_cb_tx_stopped
;
3697 tx
->stop_cbarg
= &tx
->bna
->tx_mod
;
3699 tx
->flags
&= ~BNA_TX_F_PORT_STARTED
;
3700 bfa_fsm_send_event(tx
, TX_E_STOP
);
3704 bna_tx_fail(struct bna_tx
*tx
)
3706 tx
->flags
&= ~BNA_TX_F_PORT_STARTED
;
3707 bfa_fsm_send_event(tx
, TX_E_FAIL
);
3711 bna_tx_prio_changed(struct bna_tx
*tx
, int prio
)
3713 struct bna_txq
*txq
;
3714 struct list_head
*qe
;
3716 list_for_each(qe
, &tx
->txq_q
) {
3717 txq
= (struct bna_txq
*)qe
;
3718 txq
->priority
= prio
;
3721 bfa_fsm_send_event(tx
, TX_E_PRIO_CHANGE
);
3725 bna_tx_cee_link_status(struct bna_tx
*tx
, int cee_link
)
3728 tx
->flags
|= BNA_TX_F_PRIO_LOCK
;
3730 tx
->flags
&= ~BNA_TX_F_PRIO_LOCK
;
3734 bna_tx_mod_cb_tx_stopped(void *arg
, struct bna_tx
*tx
,
3735 enum bna_cb_status status
)
3737 struct bna_tx_mod
*tx_mod
= (struct bna_tx_mod
*)arg
;
3739 bfa_wc_down(&tx_mod
->tx_stop_wc
);
3743 bna_tx_mod_cb_tx_stopped_all(void *arg
)
3745 struct bna_tx_mod
*tx_mod
= (struct bna_tx_mod
*)arg
;
3747 if (tx_mod
->stop_cbfn
)
3748 tx_mod
->stop_cbfn(&tx_mod
->bna
->port
, BNA_CB_SUCCESS
);
3749 tx_mod
->stop_cbfn
= NULL
;
3753 bna_tx_res_req(int num_txq
, int txq_depth
, struct bna_res_info
*res_info
)
3757 struct bna_mem_info
*mem_info
;
3759 res_info
[BNA_TX_RES_MEM_T_TCB
].res_type
= BNA_RES_T_MEM
;
3760 mem_info
= &res_info
[BNA_TX_RES_MEM_T_TCB
].res_u
.mem_info
;
3761 mem_info
->mem_type
= BNA_MEM_T_KVA
;
3762 mem_info
->len
= sizeof(struct bna_tcb
);
3763 mem_info
->num
= num_txq
;
3765 q_size
= txq_depth
* BFI_TXQ_WI_SIZE
;
3766 q_size
= ALIGN(q_size
, PAGE_SIZE
);
3767 page_count
= q_size
>> PAGE_SHIFT
;
3769 res_info
[BNA_TX_RES_MEM_T_QPT
].res_type
= BNA_RES_T_MEM
;
3770 mem_info
= &res_info
[BNA_TX_RES_MEM_T_QPT
].res_u
.mem_info
;
3771 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3772 mem_info
->len
= page_count
* sizeof(struct bna_dma_addr
);
3773 mem_info
->num
= num_txq
;
3775 res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_type
= BNA_RES_T_MEM
;
3776 mem_info
= &res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_u
.mem_info
;
3777 mem_info
->mem_type
= BNA_MEM_T_KVA
;
3778 mem_info
->len
= page_count
* sizeof(void *);
3779 mem_info
->num
= num_txq
;
3781 res_info
[BNA_TX_RES_MEM_T_PAGE
].res_type
= BNA_RES_T_MEM
;
3782 mem_info
= &res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
;
3783 mem_info
->mem_type
= BNA_MEM_T_DMA
;
3784 mem_info
->len
= PAGE_SIZE
;
3785 mem_info
->num
= num_txq
* page_count
;
3787 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_type
= BNA_RES_T_INTR
;
3788 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
.intr_type
=
3790 res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
.num
= num_txq
;
3794 bna_tx_create(struct bna
*bna
, struct bnad
*bnad
,
3795 struct bna_tx_config
*tx_cfg
,
3796 struct bna_tx_event_cbfn
*tx_cbfn
,
3797 struct bna_res_info
*res_info
, void *priv
)
3799 struct bna_intr_info
*intr_info
;
3800 struct bna_tx_mod
*tx_mod
= &bna
->tx_mod
;
3802 struct bna_txq
*txq
;
3803 struct list_head
*qe
;
3804 struct bna_ib_mod
*ib_mod
= &bna
->ib_mod
;
3805 struct bna_doorbell_qset
*qset
;
3806 struct bna_ib_config ib_config
;
3813 intr_info
= &res_info
[BNA_TX_RES_INTR_T_TXCMPL
].res_u
.intr_info
;
3814 page_count
= (res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
.num
) /
3816 page_size
= res_info
[BNA_TX_RES_MEM_T_PAGE
].res_u
.mem_info
.len
;
3822 if ((intr_info
->num
!= 1) && (intr_info
->num
!= tx_cfg
->num_txq
))
3827 if (list_empty(&tx_mod
->tx_free_q
))
3829 bfa_q_deq(&tx_mod
->tx_free_q
, &tx
);
3830 bfa_q_qe_init(&tx
->qe
);
3834 INIT_LIST_HEAD(&tx
->txq_q
);
3835 for (i
= 0; i
< tx_cfg
->num_txq
; i
++) {
3836 if (list_empty(&tx_mod
->txq_free_q
))
3839 bfa_q_deq(&tx_mod
->txq_free_q
, &txq
);
3840 bfa_q_qe_init(&txq
->qe
);
3841 list_add_tail(&txq
->qe
, &tx
->txq_q
);
3843 txq
->ib_seg_offset
= -1;
3849 list_for_each(qe
, &tx
->txq_q
) {
3850 txq
= (struct bna_txq
*)qe
;
3852 if (intr_info
->num
== 1)
3853 txq
->ib
= bna_ib_get(ib_mod
, intr_info
->intr_type
,
3854 intr_info
->idl
[0].vector
);
3856 txq
->ib
= bna_ib_get(ib_mod
, intr_info
->intr_type
,
3857 intr_info
->idl
[i
].vector
);
3859 if (txq
->ib
== NULL
)
3862 txq
->ib_seg_offset
= bna_ib_reserve_idx(txq
->ib
);
3863 if (txq
->ib_seg_offset
== -1)
3875 tx
->tcb_setup_cbfn
= tx_cbfn
->tcb_setup_cbfn
;
3876 tx
->tcb_destroy_cbfn
= tx_cbfn
->tcb_destroy_cbfn
;
3877 /* Following callbacks are mandatory */
3878 tx
->tx_stall_cbfn
= tx_cbfn
->tx_stall_cbfn
;
3879 tx
->tx_resume_cbfn
= tx_cbfn
->tx_resume_cbfn
;
3880 tx
->tx_cleanup_cbfn
= tx_cbfn
->tx_cleanup_cbfn
;
3882 list_add_tail(&tx
->qe
, &tx_mod
->tx_active_q
);
3885 tx
->txq_stop_wc
.wc_resume
= bna_tx_cb_txq_stopped_all
;
3886 tx
->txq_stop_wc
.wc_cbarg
= tx
;
3887 tx
->txq_stop_wc
.wc_count
= 0;
3889 tx
->type
= tx_cfg
->tx_type
;
3892 if (tx
->bna
->tx_mod
.flags
& BNA_TX_MOD_F_PORT_STARTED
) {
3894 case BNA_TX_T_REGULAR
:
3895 if (!(tx
->bna
->tx_mod
.flags
&
3896 BNA_TX_MOD_F_PORT_LOOPBACK
))
3897 tx
->flags
|= BNA_TX_F_PORT_STARTED
;
3899 case BNA_TX_T_LOOPBACK
:
3900 if (tx
->bna
->tx_mod
.flags
& BNA_TX_MOD_F_PORT_LOOPBACK
)
3901 tx
->flags
|= BNA_TX_F_PORT_STARTED
;
3905 if (tx
->bna
->tx_mod
.cee_link
)
3906 tx
->flags
|= BNA_TX_F_PRIO_LOCK
;
3912 list_for_each(qe
, &tx
->txq_q
) {
3913 txq
= (struct bna_txq
*)qe
;
3914 txq
->priority
= tx_mod
->priority
;
3915 txq
->tcb
= (struct bna_tcb
*)
3916 res_info
[BNA_TX_RES_MEM_T_TCB
].res_u
.mem_info
.mdl
[i
].kva
;
3917 txq
->tx_packets
= 0;
3922 ib_config
.coalescing_timeo
= BFI_TX_COALESCING_TIMEO
;
3923 ib_config
.interpkt_timeo
= 0; /* Not used */
3924 ib_config
.interpkt_count
= BFI_TX_INTERPKT_COUNT
;
3925 ib_config
.ctrl_flags
= (BFI_IB_CF_INTER_PKT_DMA
|
3926 BFI_IB_CF_INT_ENABLE
|
3927 BFI_IB_CF_COALESCING_MODE
);
3928 bna_ib_config(txq
->ib
, &ib_config
);
3932 txq
->tcb
->producer_index
= 0;
3933 txq
->tcb
->consumer_index
= 0;
3934 txq
->tcb
->hw_consumer_index
= (volatile u32
*)
3935 ((volatile u8
*)txq
->ib
->ib_seg_host_addr_kva
+
3936 (txq
->ib_seg_offset
* BFI_IBIDX_SIZE
));
3937 *(txq
->tcb
->hw_consumer_index
) = 0;
3938 txq
->tcb
->q_depth
= tx_cfg
->txq_depth
;
3939 txq
->tcb
->unmap_q
= (void *)
3940 res_info
[BNA_TX_RES_MEM_T_UNMAPQ
].res_u
.mem_info
.mdl
[i
].kva
;
3941 qset
= (struct bna_doorbell_qset
*)0;
3942 off
= (unsigned long)&qset
[txq
->txq_id
].txq
[0];
3943 txq
->tcb
->q_dbell
= off
+
3944 BNA_GET_DOORBELL_BASE_ADDR(bna
->pcidev
.pci_bar_kva
);
3945 txq
->tcb
->i_dbell
= &txq
->ib
->door_bell
;
3946 txq
->tcb
->intr_type
= intr_info
->intr_type
;
3947 txq
->tcb
->intr_vector
= (intr_info
->num
== 1) ?
3948 intr_info
->idl
[0].vector
:
3949 intr_info
->idl
[i
].vector
;
3950 txq
->tcb
->txq
= txq
;
3951 txq
->tcb
->bnad
= bnad
;
3954 /* QPT, SWQPT, Pages */
3955 bna_txq_qpt_setup(txq
, page_count
, page_size
,
3956 &res_info
[BNA_TX_RES_MEM_T_QPT
].res_u
.mem_info
.mdl
[i
],
3957 &res_info
[BNA_TX_RES_MEM_T_SWQPT
].res_u
.mem_info
.mdl
[i
],
3958 &res_info
[BNA_TX_RES_MEM_T_PAGE
].
3959 res_u
.mem_info
.mdl
[page_idx
]);
3960 txq
->tcb
->page_idx
= page_idx
;
3961 txq
->tcb
->page_count
= page_count
;
3962 page_idx
+= page_count
;
3964 /* Callback to bnad for setting up TCB */
3965 if (tx
->tcb_setup_cbfn
)
3966 (tx
->tcb_setup_cbfn
)(bna
->bnad
, txq
->tcb
);
3973 tx
->txf
.ctrl_flags
= BFI_TXF_CF_ENABLE
| BFI_TXF_CF_VLAN_WI_BASED
;
3977 bfa_q_qe_init(&tx
->mbox_qe
.qe
);
3979 bfa_fsm_set_state(tx
, bna_tx_sm_stopped
);
3989 bna_tx_destroy(struct bna_tx
*tx
)
3991 /* Callback to bnad for destroying TCB */
3992 if (tx
->tcb_destroy_cbfn
) {
3993 struct bna_txq
*txq
;
3994 struct list_head
*qe
;
3996 list_for_each(qe
, &tx
->txq_q
) {
3997 txq
= (struct bna_txq
*)qe
;
3998 (tx
->tcb_destroy_cbfn
)(tx
->bna
->bnad
, txq
->tcb
);
4006 bna_tx_enable(struct bna_tx
*tx
)
4008 if (tx
->fsm
!= (bfa_sm_t
)bna_tx_sm_stopped
)
4011 tx
->flags
|= BNA_TX_F_ENABLED
;
4013 if (tx
->flags
& BNA_TX_F_PORT_STARTED
)
4014 bfa_fsm_send_event(tx
, TX_E_START
);
4018 bna_tx_disable(struct bna_tx
*tx
, enum bna_cleanup_type type
,
4019 void (*cbfn
)(void *, struct bna_tx
*, enum bna_cb_status
))
4021 if (type
== BNA_SOFT_CLEANUP
) {
4022 (*cbfn
)(tx
->bna
->bnad
, tx
, BNA_CB_SUCCESS
);
4026 tx
->stop_cbfn
= cbfn
;
4027 tx
->stop_cbarg
= tx
->bna
->bnad
;
4029 tx
->flags
&= ~BNA_TX_F_ENABLED
;
4031 bfa_fsm_send_event(tx
, TX_E_STOP
);
4035 bna_tx_state_get(struct bna_tx
*tx
)
4037 return bfa_sm_to_state(tx_sm_table
, tx
->fsm
);
4041 bna_tx_mod_init(struct bna_tx_mod
*tx_mod
, struct bna
*bna
,
4042 struct bna_res_info
*res_info
)
4049 tx_mod
->tx
= (struct bna_tx
*)
4050 res_info
[BNA_RES_MEM_T_TX_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
4051 tx_mod
->txq
= (struct bna_txq
*)
4052 res_info
[BNA_RES_MEM_T_TXQ_ARRAY
].res_u
.mem_info
.mdl
[0].kva
;
4054 INIT_LIST_HEAD(&tx_mod
->tx_free_q
);
4055 INIT_LIST_HEAD(&tx_mod
->tx_active_q
);
4057 INIT_LIST_HEAD(&tx_mod
->txq_free_q
);
4059 for (i
= 0; i
< BFI_MAX_TXQ
; i
++) {
4060 tx_mod
->tx
[i
].txf
.txf_id
= i
;
4061 bfa_q_qe_init(&tx_mod
->tx
[i
].qe
);
4062 list_add_tail(&tx_mod
->tx
[i
].qe
, &tx_mod
->tx_free_q
);
4064 tx_mod
->txq
[i
].txq_id
= i
;
4065 bfa_q_qe_init(&tx_mod
->txq
[i
].qe
);
4066 list_add_tail(&tx_mod
->txq
[i
].qe
, &tx_mod
->txq_free_q
);
4069 tx_mod
->tx_stop_wc
.wc_resume
= bna_tx_mod_cb_tx_stopped_all
;
4070 tx_mod
->tx_stop_wc
.wc_cbarg
= tx_mod
;
4071 tx_mod
->tx_stop_wc
.wc_count
= 0;
4075 bna_tx_mod_uninit(struct bna_tx_mod
*tx_mod
)
4077 struct list_head
*qe
;
4081 list_for_each(qe
, &tx_mod
->tx_free_q
)
4085 list_for_each(qe
, &tx_mod
->txq_free_q
)
4092 bna_tx_mod_start(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
4095 struct list_head
*qe
;
4097 tx_mod
->flags
|= BNA_TX_MOD_F_PORT_STARTED
;
4098 if (type
== BNA_TX_T_LOOPBACK
)
4099 tx_mod
->flags
|= BNA_TX_MOD_F_PORT_LOOPBACK
;
4101 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4102 tx
= (struct bna_tx
*)qe
;
4103 if (tx
->type
== type
)
4109 bna_tx_mod_stop(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
)
4112 struct list_head
*qe
;
4114 tx_mod
->flags
&= ~BNA_TX_MOD_F_PORT_STARTED
;
4115 tx_mod
->flags
&= ~BNA_TX_MOD_F_PORT_LOOPBACK
;
4117 tx_mod
->stop_cbfn
= bna_port_cb_tx_stopped
;
4120 * Before calling bna_tx_stop(), increment tx_stop_wc as many times
4121 * as we are going to call bna_tx_stop
4123 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4124 tx
= (struct bna_tx
*)qe
;
4125 if (tx
->type
== type
)
4126 bfa_wc_up(&tx_mod
->tx_stop_wc
);
4129 if (tx_mod
->tx_stop_wc
.wc_count
== 0) {
4130 tx_mod
->stop_cbfn(&tx_mod
->bna
->port
, BNA_CB_SUCCESS
);
4131 tx_mod
->stop_cbfn
= NULL
;
4135 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4136 tx
= (struct bna_tx
*)qe
;
4137 if (tx
->type
== type
)
4143 bna_tx_mod_fail(struct bna_tx_mod
*tx_mod
)
4146 struct list_head
*qe
;
4148 tx_mod
->flags
&= ~BNA_TX_MOD_F_PORT_STARTED
;
4149 tx_mod
->flags
&= ~BNA_TX_MOD_F_PORT_LOOPBACK
;
4151 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4152 tx
= (struct bna_tx
*)qe
;
4158 bna_tx_mod_prio_changed(struct bna_tx_mod
*tx_mod
, int prio
)
4161 struct list_head
*qe
;
4163 if (prio
!= tx_mod
->priority
) {
4164 tx_mod
->priority
= prio
;
4166 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4167 tx
= (struct bna_tx
*)qe
;
4168 bna_tx_prio_changed(tx
, prio
);
4174 bna_tx_mod_cee_link_status(struct bna_tx_mod
*tx_mod
, int cee_link
)
4177 struct list_head
*qe
;
4179 tx_mod
->cee_link
= cee_link
;
4181 list_for_each(qe
, &tx_mod
->tx_active_q
) {
4182 tx
= (struct bna_tx
*)qe
;
4183 bna_tx_cee_link_status(tx
, cee_link
);