PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / net / ethernet / brocade / bna / bna_tx_rx.c
blob85e63546abe3311c2902a5a93cd422942b714b24
1 /*
2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
18 #include "bna.h"
19 #include "bfi.h"
21 /* IB */
22 static void
23 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
25 ib->coalescing_timeo = coalescing_timeo;
26 ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
27 (u32)ib->coalescing_timeo, 0);
30 /* RXF */
32 #define bna_rxf_vlan_cfg_soft_reset(rxf) \
33 do { \
34 (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \
35 (rxf)->vlan_strip_pending = true; \
36 } while (0)
38 #define bna_rxf_rss_cfg_soft_reset(rxf) \
39 do { \
40 if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \
41 (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \
42 BNA_RSS_F_CFG_PENDING | \
43 BNA_RSS_F_STATUS_PENDING); \
44 } while (0)
46 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
47 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
48 static int bna_rxf_fltr_clear(struct bna_rxf *rxf);
49 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
50 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
51 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
52 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
53 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
54 enum bna_cleanup_type cleanup);
55 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
56 enum bna_cleanup_type cleanup);
57 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
58 enum bna_cleanup_type cleanup);
60 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
61 enum bna_rxf_event);
62 bfa_fsm_state_decl(bna_rxf, paused, struct bna_rxf,
63 enum bna_rxf_event);
64 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
65 enum bna_rxf_event);
66 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
67 enum bna_rxf_event);
68 bfa_fsm_state_decl(bna_rxf, fltr_clr_wait, struct bna_rxf,
69 enum bna_rxf_event);
70 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
71 enum bna_rxf_event);
73 static void
74 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
76 call_rxf_stop_cbfn(rxf);
79 static void
80 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
82 switch (event) {
83 case RXF_E_START:
84 if (rxf->flags & BNA_RXF_F_PAUSED) {
85 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
86 call_rxf_start_cbfn(rxf);
87 } else
88 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
89 break;
91 case RXF_E_STOP:
92 call_rxf_stop_cbfn(rxf);
93 break;
95 case RXF_E_FAIL:
96 /* No-op */
97 break;
99 case RXF_E_CONFIG:
100 call_rxf_cam_fltr_cbfn(rxf);
101 break;
103 case RXF_E_PAUSE:
104 rxf->flags |= BNA_RXF_F_PAUSED;
105 call_rxf_pause_cbfn(rxf);
106 break;
108 case RXF_E_RESUME:
109 rxf->flags &= ~BNA_RXF_F_PAUSED;
110 call_rxf_resume_cbfn(rxf);
111 break;
113 default:
114 bfa_sm_fault(event);
118 static void
119 bna_rxf_sm_paused_entry(struct bna_rxf *rxf)
121 call_rxf_pause_cbfn(rxf);
124 static void
125 bna_rxf_sm_paused(struct bna_rxf *rxf, enum bna_rxf_event event)
127 switch (event) {
128 case RXF_E_STOP:
129 case RXF_E_FAIL:
130 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
131 break;
133 case RXF_E_CONFIG:
134 call_rxf_cam_fltr_cbfn(rxf);
135 break;
137 case RXF_E_RESUME:
138 rxf->flags &= ~BNA_RXF_F_PAUSED;
139 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
140 break;
142 default:
143 bfa_sm_fault(event);
147 static void
148 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
150 if (!bna_rxf_cfg_apply(rxf)) {
151 /* No more pending config updates */
152 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
156 static void
157 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
159 switch (event) {
160 case RXF_E_STOP:
161 bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
162 break;
164 case RXF_E_FAIL:
165 bna_rxf_cfg_reset(rxf);
166 call_rxf_start_cbfn(rxf);
167 call_rxf_cam_fltr_cbfn(rxf);
168 call_rxf_resume_cbfn(rxf);
169 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
170 break;
172 case RXF_E_CONFIG:
173 /* No-op */
174 break;
176 case RXF_E_PAUSE:
177 rxf->flags |= BNA_RXF_F_PAUSED;
178 call_rxf_start_cbfn(rxf);
179 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
180 break;
182 case RXF_E_FW_RESP:
183 if (!bna_rxf_cfg_apply(rxf)) {
184 /* No more pending config updates */
185 bfa_fsm_set_state(rxf, bna_rxf_sm_started);
187 break;
189 default:
190 bfa_sm_fault(event);
194 static void
195 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
197 call_rxf_start_cbfn(rxf);
198 call_rxf_cam_fltr_cbfn(rxf);
199 call_rxf_resume_cbfn(rxf);
202 static void
203 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
205 switch (event) {
206 case RXF_E_STOP:
207 case RXF_E_FAIL:
208 bna_rxf_cfg_reset(rxf);
209 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
210 break;
212 case RXF_E_CONFIG:
213 bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
214 break;
216 case RXF_E_PAUSE:
217 rxf->flags |= BNA_RXF_F_PAUSED;
218 if (!bna_rxf_fltr_clear(rxf))
219 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
220 else
221 bfa_fsm_set_state(rxf, bna_rxf_sm_fltr_clr_wait);
222 break;
224 default:
225 bfa_sm_fault(event);
229 static void
230 bna_rxf_sm_fltr_clr_wait_entry(struct bna_rxf *rxf)
234 static void
235 bna_rxf_sm_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
237 switch (event) {
238 case RXF_E_FAIL:
239 bna_rxf_cfg_reset(rxf);
240 call_rxf_pause_cbfn(rxf);
241 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
242 break;
244 case RXF_E_FW_RESP:
245 if (!bna_rxf_fltr_clear(rxf)) {
246 /* No more pending CAM entries to clear */
247 bfa_fsm_set_state(rxf, bna_rxf_sm_paused);
249 break;
251 default:
252 bfa_sm_fault(event);
256 static void
257 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
261 static void
262 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
264 switch (event) {
265 case RXF_E_FAIL:
266 case RXF_E_FW_RESP:
267 bna_rxf_cfg_reset(rxf);
268 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
269 break;
271 default:
272 bfa_sm_fault(event);
276 static void
277 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
278 enum bfi_enet_h2i_msgs req_type)
280 struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
282 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
283 req->mh.num_entries = htons(
284 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
285 memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
286 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
287 sizeof(struct bfi_enet_ucast_req), &req->mh);
288 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
291 static void
292 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
294 struct bfi_enet_mcast_add_req *req =
295 &rxf->bfi_enet_cmd.mcast_add_req;
297 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
298 0, rxf->rx->rid);
299 req->mh.num_entries = htons(
300 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
301 memcpy(&req->mac_addr, &mac->addr, sizeof(mac_t));
302 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
303 sizeof(struct bfi_enet_mcast_add_req), &req->mh);
304 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
307 static void
308 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
310 struct bfi_enet_mcast_del_req *req =
311 &rxf->bfi_enet_cmd.mcast_del_req;
313 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
314 0, rxf->rx->rid);
315 req->mh.num_entries = htons(
316 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
317 req->handle = htons(handle);
318 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
319 sizeof(struct bfi_enet_mcast_del_req), &req->mh);
320 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
323 static void
324 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
326 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
328 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
329 BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
330 req->mh.num_entries = htons(
331 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
332 req->enable = status;
333 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
334 sizeof(struct bfi_enet_enable_req), &req->mh);
335 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
338 static void
339 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
341 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
343 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
344 BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
345 req->mh.num_entries = htons(
346 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
347 req->enable = status;
348 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
349 sizeof(struct bfi_enet_enable_req), &req->mh);
350 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
353 static void
354 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
356 struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
357 int i;
358 int j;
360 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
361 BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
362 req->mh.num_entries = htons(
363 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
364 req->block_idx = block_idx;
365 for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
366 j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
367 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
368 req->bit_mask[i] =
369 htonl(rxf->vlan_filter_table[j]);
370 else
371 req->bit_mask[i] = 0xFFFFFFFF;
373 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
374 sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
375 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
378 static void
379 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
381 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
383 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
384 BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
385 req->mh.num_entries = htons(
386 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
387 req->enable = rxf->vlan_strip_status;
388 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
389 sizeof(struct bfi_enet_enable_req), &req->mh);
390 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
393 static void
394 bna_bfi_rit_cfg(struct bna_rxf *rxf)
396 struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
398 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
399 BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
400 req->mh.num_entries = htons(
401 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
402 req->size = htons(rxf->rit_size);
403 memcpy(&req->table[0], rxf->rit, rxf->rit_size);
404 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
405 sizeof(struct bfi_enet_rit_req), &req->mh);
406 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
409 static void
410 bna_bfi_rss_cfg(struct bna_rxf *rxf)
412 struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
413 int i;
415 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
416 BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
417 req->mh.num_entries = htons(
418 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
419 req->cfg.type = rxf->rss_cfg.hash_type;
420 req->cfg.mask = rxf->rss_cfg.hash_mask;
421 for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
422 req->cfg.key[i] =
423 htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
424 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
425 sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
426 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
429 static void
430 bna_bfi_rss_enable(struct bna_rxf *rxf)
432 struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
434 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
435 BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
436 req->mh.num_entries = htons(
437 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
438 req->enable = rxf->rss_status;
439 bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
440 sizeof(struct bfi_enet_enable_req), &req->mh);
441 bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
444 /* This function gets the multicast MAC that has already been added to CAM */
445 static struct bna_mac *
446 bna_rxf_mcmac_get(struct bna_rxf *rxf, u8 *mac_addr)
448 struct bna_mac *mac;
449 struct list_head *qe;
451 list_for_each(qe, &rxf->mcast_active_q) {
452 mac = (struct bna_mac *)qe;
453 if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
454 return mac;
457 list_for_each(qe, &rxf->mcast_pending_del_q) {
458 mac = (struct bna_mac *)qe;
459 if (BNA_MAC_IS_EQUAL(&mac->addr, mac_addr))
460 return mac;
463 return NULL;
466 static struct bna_mcam_handle *
467 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
469 struct bna_mcam_handle *mchandle;
470 struct list_head *qe;
472 list_for_each(qe, &rxf->mcast_handle_q) {
473 mchandle = (struct bna_mcam_handle *)qe;
474 if (mchandle->handle == handle)
475 return mchandle;
478 return NULL;
481 static void
482 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
484 struct bna_mac *mcmac;
485 struct bna_mcam_handle *mchandle;
487 mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
488 mchandle = bna_rxf_mchandle_get(rxf, handle);
489 if (mchandle == NULL) {
490 mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
491 mchandle->handle = handle;
492 mchandle->refcnt = 0;
493 list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
495 mchandle->refcnt++;
496 mcmac->handle = mchandle;
499 static int
500 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
501 enum bna_cleanup_type cleanup)
503 struct bna_mcam_handle *mchandle;
504 int ret = 0;
506 mchandle = mac->handle;
507 if (mchandle == NULL)
508 return ret;
510 mchandle->refcnt--;
511 if (mchandle->refcnt == 0) {
512 if (cleanup == BNA_HARD_CLEANUP) {
513 bna_bfi_mcast_del_req(rxf, mchandle->handle);
514 ret = 1;
516 list_del(&mchandle->qe);
517 bfa_q_qe_init(&mchandle->qe);
518 bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
520 mac->handle = NULL;
522 return ret;
525 static int
526 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
528 struct bna_mac *mac = NULL;
529 struct list_head *qe;
530 int ret;
532 /* First delete multicast entries to maintain the count */
533 while (!list_empty(&rxf->mcast_pending_del_q)) {
534 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
535 bfa_q_qe_init(qe);
536 mac = (struct bna_mac *)qe;
537 ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
538 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
539 if (ret)
540 return ret;
543 /* Add multicast entries */
544 if (!list_empty(&rxf->mcast_pending_add_q)) {
545 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
546 bfa_q_qe_init(qe);
547 mac = (struct bna_mac *)qe;
548 list_add_tail(&mac->qe, &rxf->mcast_active_q);
549 bna_bfi_mcast_add_req(rxf, mac);
550 return 1;
553 return 0;
556 static int
557 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
559 u8 vlan_pending_bitmask;
560 int block_idx = 0;
562 if (rxf->vlan_pending_bitmask) {
563 vlan_pending_bitmask = rxf->vlan_pending_bitmask;
564 while (!(vlan_pending_bitmask & 0x1)) {
565 block_idx++;
566 vlan_pending_bitmask >>= 1;
568 rxf->vlan_pending_bitmask &= ~(1 << block_idx);
569 bna_bfi_rx_vlan_filter_set(rxf, block_idx);
570 return 1;
573 return 0;
576 static int
577 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
579 struct list_head *qe;
580 struct bna_mac *mac;
581 int ret;
583 /* Throw away delete pending mcast entries */
584 while (!list_empty(&rxf->mcast_pending_del_q)) {
585 bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
586 bfa_q_qe_init(qe);
587 mac = (struct bna_mac *)qe;
588 ret = bna_rxf_mcast_del(rxf, mac, cleanup);
589 bna_cam_mod_mac_put(bna_mcam_mod_del_q(rxf->rx->bna), mac);
590 if (ret)
591 return ret;
594 /* Move active mcast entries to pending_add_q */
595 while (!list_empty(&rxf->mcast_active_q)) {
596 bfa_q_deq(&rxf->mcast_active_q, &qe);
597 bfa_q_qe_init(qe);
598 list_add_tail(qe, &rxf->mcast_pending_add_q);
599 mac = (struct bna_mac *)qe;
600 if (bna_rxf_mcast_del(rxf, mac, cleanup))
601 return 1;
604 return 0;
607 static int
608 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
610 if (rxf->rss_pending) {
611 if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
612 rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
613 bna_bfi_rit_cfg(rxf);
614 return 1;
617 if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
618 rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
619 bna_bfi_rss_cfg(rxf);
620 return 1;
623 if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
624 rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
625 bna_bfi_rss_enable(rxf);
626 return 1;
630 return 0;
633 static int
634 bna_rxf_cfg_apply(struct bna_rxf *rxf)
636 if (bna_rxf_ucast_cfg_apply(rxf))
637 return 1;
639 if (bna_rxf_mcast_cfg_apply(rxf))
640 return 1;
642 if (bna_rxf_promisc_cfg_apply(rxf))
643 return 1;
645 if (bna_rxf_allmulti_cfg_apply(rxf))
646 return 1;
648 if (bna_rxf_vlan_cfg_apply(rxf))
649 return 1;
651 if (bna_rxf_vlan_strip_cfg_apply(rxf))
652 return 1;
654 if (bna_rxf_rss_cfg_apply(rxf))
655 return 1;
657 return 0;
660 /* Only software reset */
661 static int
662 bna_rxf_fltr_clear(struct bna_rxf *rxf)
664 if (bna_rxf_ucast_cfg_reset(rxf, BNA_HARD_CLEANUP))
665 return 1;
667 if (bna_rxf_mcast_cfg_reset(rxf, BNA_HARD_CLEANUP))
668 return 1;
670 if (bna_rxf_promisc_cfg_reset(rxf, BNA_HARD_CLEANUP))
671 return 1;
673 if (bna_rxf_allmulti_cfg_reset(rxf, BNA_HARD_CLEANUP))
674 return 1;
676 return 0;
679 static void
680 bna_rxf_cfg_reset(struct bna_rxf *rxf)
682 bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
683 bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
684 bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
685 bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
686 bna_rxf_vlan_cfg_soft_reset(rxf);
687 bna_rxf_rss_cfg_soft_reset(rxf);
690 static void
691 bna_rit_init(struct bna_rxf *rxf, int rit_size)
693 struct bna_rx *rx = rxf->rx;
694 struct bna_rxp *rxp;
695 struct list_head *qe;
696 int offset = 0;
698 rxf->rit_size = rit_size;
699 list_for_each(qe, &rx->rxp_q) {
700 rxp = (struct bna_rxp *)qe;
701 rxf->rit[offset] = rxp->cq.ccb->id;
702 offset++;
707 void
708 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
710 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
713 void
714 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
715 struct bfi_msgq_mhdr *msghdr)
717 struct bfi_enet_rsp *rsp =
718 (struct bfi_enet_rsp *)msghdr;
720 if (rsp->error) {
721 /* Clear ucast from cache */
722 rxf->ucast_active_set = 0;
725 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
728 void
729 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
730 struct bfi_msgq_mhdr *msghdr)
732 struct bfi_enet_mcast_add_req *req =
733 &rxf->bfi_enet_cmd.mcast_add_req;
734 struct bfi_enet_mcast_add_rsp *rsp =
735 (struct bfi_enet_mcast_add_rsp *)msghdr;
737 bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
738 ntohs(rsp->handle));
739 bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
742 static void
743 bna_rxf_init(struct bna_rxf *rxf,
744 struct bna_rx *rx,
745 struct bna_rx_config *q_config,
746 struct bna_res_info *res_info)
748 rxf->rx = rx;
750 INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
751 INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
752 rxf->ucast_pending_set = 0;
753 rxf->ucast_active_set = 0;
754 INIT_LIST_HEAD(&rxf->ucast_active_q);
755 rxf->ucast_pending_mac = NULL;
757 INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
758 INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
759 INIT_LIST_HEAD(&rxf->mcast_active_q);
760 INIT_LIST_HEAD(&rxf->mcast_handle_q);
762 if (q_config->paused)
763 rxf->flags |= BNA_RXF_F_PAUSED;
765 rxf->rit = (u8 *)
766 res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
767 bna_rit_init(rxf, q_config->num_paths);
769 rxf->rss_status = q_config->rss_status;
770 if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
771 rxf->rss_cfg = q_config->rss_config;
772 rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
773 rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
774 rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
777 rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
778 memset(rxf->vlan_filter_table, 0,
779 (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
780 rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
781 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
783 rxf->vlan_strip_status = q_config->vlan_strip_status;
785 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
788 static void
789 bna_rxf_uninit(struct bna_rxf *rxf)
791 struct bna_mac *mac;
793 rxf->ucast_pending_set = 0;
794 rxf->ucast_active_set = 0;
796 while (!list_empty(&rxf->ucast_pending_add_q)) {
797 bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
798 bfa_q_qe_init(&mac->qe);
799 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna), mac);
802 if (rxf->ucast_pending_mac) {
803 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
804 bna_cam_mod_mac_put(bna_ucam_mod_free_q(rxf->rx->bna),
805 rxf->ucast_pending_mac);
806 rxf->ucast_pending_mac = NULL;
809 while (!list_empty(&rxf->mcast_pending_add_q)) {
810 bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
811 bfa_q_qe_init(&mac->qe);
812 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
815 rxf->rxmode_pending = 0;
816 rxf->rxmode_pending_bitmask = 0;
817 if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
818 rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
819 if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
820 rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
822 rxf->rss_pending = 0;
823 rxf->vlan_strip_pending = false;
825 rxf->flags = 0;
827 rxf->rx = NULL;
830 static void
831 bna_rx_cb_rxf_started(struct bna_rx *rx)
833 bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
836 static void
837 bna_rxf_start(struct bna_rxf *rxf)
839 rxf->start_cbfn = bna_rx_cb_rxf_started;
840 rxf->start_cbarg = rxf->rx;
841 bfa_fsm_send_event(rxf, RXF_E_START);
844 static void
845 bna_rx_cb_rxf_stopped(struct bna_rx *rx)
847 bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
850 static void
851 bna_rxf_stop(struct bna_rxf *rxf)
853 rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
854 rxf->stop_cbarg = rxf->rx;
855 bfa_fsm_send_event(rxf, RXF_E_STOP);
858 static void
859 bna_rxf_fail(struct bna_rxf *rxf)
861 bfa_fsm_send_event(rxf, RXF_E_FAIL);
864 enum bna_cb_status
865 bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
866 void (*cbfn)(struct bnad *, struct bna_rx *))
868 struct bna_rxf *rxf = &rx->rxf;
870 if (rxf->ucast_pending_mac == NULL) {
871 rxf->ucast_pending_mac =
872 bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
873 if (rxf->ucast_pending_mac == NULL)
874 return BNA_CB_UCAST_CAM_FULL;
875 bfa_q_qe_init(&rxf->ucast_pending_mac->qe);
878 memcpy(rxf->ucast_pending_mac->addr, ucmac, ETH_ALEN);
879 rxf->ucast_pending_set = 1;
880 rxf->cam_fltr_cbfn = cbfn;
881 rxf->cam_fltr_cbarg = rx->bna->bnad;
883 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
885 return BNA_CB_SUCCESS;
888 enum bna_cb_status
889 bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
890 void (*cbfn)(struct bnad *, struct bna_rx *))
892 struct bna_rxf *rxf = &rx->rxf;
893 struct bna_mac *mac;
895 /* Check if already added or pending addition */
896 if (bna_mac_find(&rxf->mcast_active_q, addr) ||
897 bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
898 if (cbfn)
899 cbfn(rx->bna->bnad, rx);
900 return BNA_CB_SUCCESS;
903 mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
904 if (mac == NULL)
905 return BNA_CB_MCAST_LIST_FULL;
906 bfa_q_qe_init(&mac->qe);
907 memcpy(mac->addr, addr, ETH_ALEN);
908 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
910 rxf->cam_fltr_cbfn = cbfn;
911 rxf->cam_fltr_cbarg = rx->bna->bnad;
913 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
915 return BNA_CB_SUCCESS;
918 enum bna_cb_status
919 bna_rx_ucast_listset(struct bna_rx *rx, int count, u8 *uclist,
920 void (*cbfn)(struct bnad *, struct bna_rx *))
922 struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
923 struct bna_rxf *rxf = &rx->rxf;
924 struct list_head list_head;
925 struct list_head *qe;
926 u8 *mcaddr;
927 struct bna_mac *mac, *del_mac;
928 int i;
930 /* Purge the pending_add_q */
931 while (!list_empty(&rxf->ucast_pending_add_q)) {
932 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
933 bfa_q_qe_init(qe);
934 mac = (struct bna_mac *)qe;
935 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
938 /* Schedule active_q entries for deletion */
939 while (!list_empty(&rxf->ucast_active_q)) {
940 bfa_q_deq(&rxf->ucast_active_q, &qe);
941 mac = (struct bna_mac *)qe;
942 bfa_q_qe_init(&mac->qe);
944 del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
945 memcpy(del_mac, mac, sizeof(*del_mac));
946 list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
947 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
950 /* Allocate nodes */
951 INIT_LIST_HEAD(&list_head);
952 for (i = 0, mcaddr = uclist; i < count; i++) {
953 mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
954 if (mac == NULL)
955 goto err_return;
956 bfa_q_qe_init(&mac->qe);
957 memcpy(mac->addr, mcaddr, ETH_ALEN);
958 list_add_tail(&mac->qe, &list_head);
959 mcaddr += ETH_ALEN;
962 /* Add the new entries */
963 while (!list_empty(&list_head)) {
964 bfa_q_deq(&list_head, &qe);
965 mac = (struct bna_mac *)qe;
966 bfa_q_qe_init(&mac->qe);
967 list_add_tail(&mac->qe, &rxf->ucast_pending_add_q);
970 rxf->cam_fltr_cbfn = cbfn;
971 rxf->cam_fltr_cbarg = rx->bna->bnad;
972 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
974 return BNA_CB_SUCCESS;
976 err_return:
977 while (!list_empty(&list_head)) {
978 bfa_q_deq(&list_head, &qe);
979 mac = (struct bna_mac *)qe;
980 bfa_q_qe_init(&mac->qe);
981 bna_cam_mod_mac_put(&ucam_mod->free_q, mac);
984 return BNA_CB_UCAST_CAM_FULL;
987 enum bna_cb_status
988 bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
989 void (*cbfn)(struct bnad *, struct bna_rx *))
991 struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
992 struct bna_rxf *rxf = &rx->rxf;
993 struct list_head list_head;
994 struct list_head *qe;
995 u8 *mcaddr;
996 struct bna_mac *mac, *del_mac;
997 int i;
999 /* Purge the pending_add_q */
1000 while (!list_empty(&rxf->mcast_pending_add_q)) {
1001 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1002 bfa_q_qe_init(qe);
1003 mac = (struct bna_mac *)qe;
1004 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
1007 /* Schedule active_q entries for deletion */
1008 while (!list_empty(&rxf->mcast_active_q)) {
1009 bfa_q_deq(&rxf->mcast_active_q, &qe);
1010 mac = (struct bna_mac *)qe;
1011 bfa_q_qe_init(&mac->qe);
1013 del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
1015 memcpy(del_mac, mac, sizeof(*del_mac));
1016 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
1017 mac->handle = NULL;
1018 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
1021 /* Allocate nodes */
1022 INIT_LIST_HEAD(&list_head);
1023 for (i = 0, mcaddr = mclist; i < count; i++) {
1024 mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
1025 if (mac == NULL)
1026 goto err_return;
1027 bfa_q_qe_init(&mac->qe);
1028 memcpy(mac->addr, mcaddr, ETH_ALEN);
1029 list_add_tail(&mac->qe, &list_head);
1031 mcaddr += ETH_ALEN;
1034 /* Add the new entries */
1035 while (!list_empty(&list_head)) {
1036 bfa_q_deq(&list_head, &qe);
1037 mac = (struct bna_mac *)qe;
1038 bfa_q_qe_init(&mac->qe);
1039 list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
1042 rxf->cam_fltr_cbfn = cbfn;
1043 rxf->cam_fltr_cbarg = rx->bna->bnad;
1044 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1046 return BNA_CB_SUCCESS;
1048 err_return:
1049 while (!list_empty(&list_head)) {
1050 bfa_q_deq(&list_head, &qe);
1051 mac = (struct bna_mac *)qe;
1052 bfa_q_qe_init(&mac->qe);
1053 bna_cam_mod_mac_put(&mcam_mod->free_q, mac);
1056 return BNA_CB_MCAST_LIST_FULL;
1059 void
1060 bna_rx_mcast_delall(struct bna_rx *rx,
1061 void (*cbfn)(struct bnad *, struct bna_rx *))
1063 struct bna_rxf *rxf = &rx->rxf;
1064 struct list_head *qe;
1065 struct bna_mac *mac, *del_mac;
1066 int need_hw_config = 0;
1068 /* Purge all entries from pending_add_q */
1069 while (!list_empty(&rxf->mcast_pending_add_q)) {
1070 bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
1071 mac = (struct bna_mac *)qe;
1072 bfa_q_qe_init(&mac->qe);
1073 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
1076 /* Schedule all entries in active_q for deletion */
1077 while (!list_empty(&rxf->mcast_active_q)) {
1078 bfa_q_deq(&rxf->mcast_active_q, &qe);
1079 mac = (struct bna_mac *)qe;
1080 bfa_q_qe_init(&mac->qe);
1082 del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
1084 memcpy(del_mac, mac, sizeof(*del_mac));
1085 list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
1086 mac->handle = NULL;
1087 bna_cam_mod_mac_put(bna_mcam_mod_free_q(rxf->rx->bna), mac);
1088 need_hw_config = 1;
1091 if (need_hw_config) {
1092 rxf->cam_fltr_cbfn = cbfn;
1093 rxf->cam_fltr_cbarg = rx->bna->bnad;
1094 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1095 return;
1098 if (cbfn)
1099 (*cbfn)(rx->bna->bnad, rx);
1102 void
1103 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
1105 struct bna_rxf *rxf = &rx->rxf;
1106 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1107 int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
1108 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1110 rxf->vlan_filter_table[index] |= bit;
1111 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1112 rxf->vlan_pending_bitmask |= (1 << group_id);
1113 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1117 void
1118 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
1120 struct bna_rxf *rxf = &rx->rxf;
1121 int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
1122 int bit = (1 << (vlan_id & BFI_VLAN_WORD_MASK));
1123 int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
1125 rxf->vlan_filter_table[index] &= ~bit;
1126 if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
1127 rxf->vlan_pending_bitmask |= (1 << group_id);
1128 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
1132 static int
1133 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
1135 struct bna_mac *mac = NULL;
1136 struct list_head *qe;
1138 /* Delete MAC addresses previousely added */
1139 if (!list_empty(&rxf->ucast_pending_del_q)) {
1140 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1141 bfa_q_qe_init(qe);
1142 mac = (struct bna_mac *)qe;
1143 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1144 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna), mac);
1145 return 1;
1148 /* Set default unicast MAC */
1149 if (rxf->ucast_pending_set) {
1150 rxf->ucast_pending_set = 0;
1151 memcpy(rxf->ucast_active_mac.addr,
1152 rxf->ucast_pending_mac->addr, ETH_ALEN);
1153 rxf->ucast_active_set = 1;
1154 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1155 BFI_ENET_H2I_MAC_UCAST_SET_REQ);
1156 return 1;
1159 /* Add additional MAC entries */
1160 if (!list_empty(&rxf->ucast_pending_add_q)) {
1161 bfa_q_deq(&rxf->ucast_pending_add_q, &qe);
1162 bfa_q_qe_init(qe);
1163 mac = (struct bna_mac *)qe;
1164 list_add_tail(&mac->qe, &rxf->ucast_active_q);
1165 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
1166 return 1;
1169 return 0;
1172 static int
1173 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1175 struct list_head *qe;
1176 struct bna_mac *mac;
1178 /* Throw away delete pending ucast entries */
1179 while (!list_empty(&rxf->ucast_pending_del_q)) {
1180 bfa_q_deq(&rxf->ucast_pending_del_q, &qe);
1181 bfa_q_qe_init(qe);
1182 mac = (struct bna_mac *)qe;
1183 if (cleanup == BNA_SOFT_CLEANUP)
1184 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1185 mac);
1186 else {
1187 bna_bfi_ucast_req(rxf, mac,
1188 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1189 bna_cam_mod_mac_put(bna_ucam_mod_del_q(rxf->rx->bna),
1190 mac);
1191 return 1;
1195 /* Move active ucast entries to pending_add_q */
1196 while (!list_empty(&rxf->ucast_active_q)) {
1197 bfa_q_deq(&rxf->ucast_active_q, &qe);
1198 bfa_q_qe_init(qe);
1199 list_add_tail(qe, &rxf->ucast_pending_add_q);
1200 if (cleanup == BNA_HARD_CLEANUP) {
1201 mac = (struct bna_mac *)qe;
1202 bna_bfi_ucast_req(rxf, mac,
1203 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1204 return 1;
1208 if (rxf->ucast_active_set) {
1209 rxf->ucast_pending_set = 1;
1210 rxf->ucast_active_set = 0;
1211 if (cleanup == BNA_HARD_CLEANUP) {
1212 bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1213 BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1214 return 1;
1218 return 0;
1221 static int
1222 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1224 struct bna *bna = rxf->rx->bna;
1226 /* Enable/disable promiscuous mode */
1227 if (is_promisc_enable(rxf->rxmode_pending,
1228 rxf->rxmode_pending_bitmask)) {
1229 /* move promisc configuration from pending -> active */
1230 promisc_inactive(rxf->rxmode_pending,
1231 rxf->rxmode_pending_bitmask);
1232 rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1233 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1234 return 1;
1235 } else if (is_promisc_disable(rxf->rxmode_pending,
1236 rxf->rxmode_pending_bitmask)) {
1237 /* move promisc configuration from pending -> active */
1238 promisc_inactive(rxf->rxmode_pending,
1239 rxf->rxmode_pending_bitmask);
1240 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1241 bna->promisc_rid = BFI_INVALID_RID;
1242 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1243 return 1;
1246 return 0;
1249 static int
1250 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1252 struct bna *bna = rxf->rx->bna;
1254 /* Clear pending promisc mode disable */
1255 if (is_promisc_disable(rxf->rxmode_pending,
1256 rxf->rxmode_pending_bitmask)) {
1257 promisc_inactive(rxf->rxmode_pending,
1258 rxf->rxmode_pending_bitmask);
1259 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1260 bna->promisc_rid = BFI_INVALID_RID;
1261 if (cleanup == BNA_HARD_CLEANUP) {
1262 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1263 return 1;
1267 /* Move promisc mode config from active -> pending */
1268 if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1269 promisc_enable(rxf->rxmode_pending,
1270 rxf->rxmode_pending_bitmask);
1271 rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1272 if (cleanup == BNA_HARD_CLEANUP) {
1273 bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1274 return 1;
1278 return 0;
1281 static int
1282 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1284 /* Enable/disable allmulti mode */
1285 if (is_allmulti_enable(rxf->rxmode_pending,
1286 rxf->rxmode_pending_bitmask)) {
1287 /* move allmulti configuration from pending -> active */
1288 allmulti_inactive(rxf->rxmode_pending,
1289 rxf->rxmode_pending_bitmask);
1290 rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1291 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1292 return 1;
1293 } else if (is_allmulti_disable(rxf->rxmode_pending,
1294 rxf->rxmode_pending_bitmask)) {
1295 /* move allmulti configuration from pending -> active */
1296 allmulti_inactive(rxf->rxmode_pending,
1297 rxf->rxmode_pending_bitmask);
1298 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1299 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1300 return 1;
1303 return 0;
1306 static int
1307 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1309 /* Clear pending allmulti mode disable */
1310 if (is_allmulti_disable(rxf->rxmode_pending,
1311 rxf->rxmode_pending_bitmask)) {
1312 allmulti_inactive(rxf->rxmode_pending,
1313 rxf->rxmode_pending_bitmask);
1314 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1315 if (cleanup == BNA_HARD_CLEANUP) {
1316 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1317 return 1;
1321 /* Move allmulti mode config from active -> pending */
1322 if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1323 allmulti_enable(rxf->rxmode_pending,
1324 rxf->rxmode_pending_bitmask);
1325 rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1326 if (cleanup == BNA_HARD_CLEANUP) {
1327 bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1328 return 1;
1332 return 0;
1335 static int
1336 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1338 struct bna *bna = rxf->rx->bna;
1339 int ret = 0;
1341 if (is_promisc_enable(rxf->rxmode_pending,
1342 rxf->rxmode_pending_bitmask) ||
1343 (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1344 /* Do nothing if pending enable or already enabled */
1345 } else if (is_promisc_disable(rxf->rxmode_pending,
1346 rxf->rxmode_pending_bitmask)) {
1347 /* Turn off pending disable command */
1348 promisc_inactive(rxf->rxmode_pending,
1349 rxf->rxmode_pending_bitmask);
1350 } else {
1351 /* Schedule enable */
1352 promisc_enable(rxf->rxmode_pending,
1353 rxf->rxmode_pending_bitmask);
1354 bna->promisc_rid = rxf->rx->rid;
1355 ret = 1;
1358 return ret;
1361 static int
1362 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1364 struct bna *bna = rxf->rx->bna;
1365 int ret = 0;
1367 if (is_promisc_disable(rxf->rxmode_pending,
1368 rxf->rxmode_pending_bitmask) ||
1369 (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1370 /* Do nothing if pending disable or already disabled */
1371 } else if (is_promisc_enable(rxf->rxmode_pending,
1372 rxf->rxmode_pending_bitmask)) {
1373 /* Turn off pending enable command */
1374 promisc_inactive(rxf->rxmode_pending,
1375 rxf->rxmode_pending_bitmask);
1376 bna->promisc_rid = BFI_INVALID_RID;
1377 } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1378 /* Schedule disable */
1379 promisc_disable(rxf->rxmode_pending,
1380 rxf->rxmode_pending_bitmask);
1381 ret = 1;
1384 return ret;
1387 static int
1388 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1390 int ret = 0;
1392 if (is_allmulti_enable(rxf->rxmode_pending,
1393 rxf->rxmode_pending_bitmask) ||
1394 (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1395 /* Do nothing if pending enable or already enabled */
1396 } else if (is_allmulti_disable(rxf->rxmode_pending,
1397 rxf->rxmode_pending_bitmask)) {
1398 /* Turn off pending disable command */
1399 allmulti_inactive(rxf->rxmode_pending,
1400 rxf->rxmode_pending_bitmask);
1401 } else {
1402 /* Schedule enable */
1403 allmulti_enable(rxf->rxmode_pending,
1404 rxf->rxmode_pending_bitmask);
1405 ret = 1;
1408 return ret;
1411 static int
1412 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1414 int ret = 0;
1416 if (is_allmulti_disable(rxf->rxmode_pending,
1417 rxf->rxmode_pending_bitmask) ||
1418 (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1419 /* Do nothing if pending disable or already disabled */
1420 } else if (is_allmulti_enable(rxf->rxmode_pending,
1421 rxf->rxmode_pending_bitmask)) {
1422 /* Turn off pending enable command */
1423 allmulti_inactive(rxf->rxmode_pending,
1424 rxf->rxmode_pending_bitmask);
1425 } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1426 /* Schedule disable */
1427 allmulti_disable(rxf->rxmode_pending,
1428 rxf->rxmode_pending_bitmask);
1429 ret = 1;
1432 return ret;
1435 static int
1436 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1438 if (rxf->vlan_strip_pending) {
1439 rxf->vlan_strip_pending = false;
1440 bna_bfi_vlan_strip_enable(rxf);
1441 return 1;
1444 return 0;
1447 /* RX */
1449 #define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1450 (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1452 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1453 (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1455 #define call_rx_stop_cbfn(rx) \
1456 do { \
1457 if ((rx)->stop_cbfn) { \
1458 void (*cbfn)(void *, struct bna_rx *); \
1459 void *cbarg; \
1460 cbfn = (rx)->stop_cbfn; \
1461 cbarg = (rx)->stop_cbarg; \
1462 (rx)->stop_cbfn = NULL; \
1463 (rx)->stop_cbarg = NULL; \
1464 cbfn(cbarg, rx); \
1466 } while (0)
1468 #define call_rx_stall_cbfn(rx) \
1469 do { \
1470 if ((rx)->rx_stall_cbfn) \
1471 (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \
1472 } while (0)
1474 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \
1475 do { \
1476 struct bna_dma_addr cur_q_addr = \
1477 *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \
1478 (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \
1479 (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \
1480 (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \
1481 (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \
1482 (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1483 (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1484 } while (0)
1486 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1487 static void bna_rx_enet_stop(struct bna_rx *rx);
1488 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1490 bfa_fsm_state_decl(bna_rx, stopped,
1491 struct bna_rx, enum bna_rx_event);
1492 bfa_fsm_state_decl(bna_rx, start_wait,
1493 struct bna_rx, enum bna_rx_event);
1494 bfa_fsm_state_decl(bna_rx, start_stop_wait,
1495 struct bna_rx, enum bna_rx_event);
1496 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1497 struct bna_rx, enum bna_rx_event);
1498 bfa_fsm_state_decl(bna_rx, started,
1499 struct bna_rx, enum bna_rx_event);
1500 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1501 struct bna_rx, enum bna_rx_event);
1502 bfa_fsm_state_decl(bna_rx, stop_wait,
1503 struct bna_rx, enum bna_rx_event);
1504 bfa_fsm_state_decl(bna_rx, cleanup_wait,
1505 struct bna_rx, enum bna_rx_event);
1506 bfa_fsm_state_decl(bna_rx, failed,
1507 struct bna_rx, enum bna_rx_event);
1508 bfa_fsm_state_decl(bna_rx, quiesce_wait,
1509 struct bna_rx, enum bna_rx_event);
1511 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1513 call_rx_stop_cbfn(rx);
1516 static void bna_rx_sm_stopped(struct bna_rx *rx,
1517 enum bna_rx_event event)
1519 switch (event) {
1520 case RX_E_START:
1521 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1522 break;
1524 case RX_E_STOP:
1525 call_rx_stop_cbfn(rx);
1526 break;
1528 case RX_E_FAIL:
1529 /* no-op */
1530 break;
1532 default:
1533 bfa_sm_fault(event);
1534 break;
1538 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1540 bna_bfi_rx_enet_start(rx);
1543 static void
1544 bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1548 static void
1549 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1551 switch (event) {
1552 case RX_E_FAIL:
1553 case RX_E_STOPPED:
1554 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1555 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1556 break;
1558 case RX_E_STARTED:
1559 bna_rx_enet_stop(rx);
1560 break;
1562 default:
1563 bfa_sm_fault(event);
1564 break;
1568 static void bna_rx_sm_start_wait(struct bna_rx *rx,
1569 enum bna_rx_event event)
1571 switch (event) {
1572 case RX_E_STOP:
1573 bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
1574 break;
1576 case RX_E_FAIL:
1577 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1578 break;
1580 case RX_E_STARTED:
1581 bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1582 break;
1584 default:
1585 bfa_sm_fault(event);
1586 break;
1590 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1592 rx->rx_post_cbfn(rx->bna->bnad, rx);
1593 bna_rxf_start(&rx->rxf);
1596 static void
1597 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1601 static void
1602 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1604 switch (event) {
1605 case RX_E_FAIL:
1606 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1607 bna_rxf_fail(&rx->rxf);
1608 call_rx_stall_cbfn(rx);
1609 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1610 break;
1612 case RX_E_RXF_STARTED:
1613 bna_rxf_stop(&rx->rxf);
1614 break;
1616 case RX_E_RXF_STOPPED:
1617 bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1618 call_rx_stall_cbfn(rx);
1619 bna_rx_enet_stop(rx);
1620 break;
1622 default:
1623 bfa_sm_fault(event);
1624 break;
1629 static void
1630 bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1634 static void
1635 bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1637 switch (event) {
1638 case RX_E_FAIL:
1639 case RX_E_STOPPED:
1640 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1641 break;
1643 case RX_E_STARTED:
1644 bna_rx_enet_stop(rx);
1645 break;
1647 default:
1648 bfa_sm_fault(event);
1652 static void
1653 bna_rx_sm_started_entry(struct bna_rx *rx)
1655 struct bna_rxp *rxp;
1656 struct list_head *qe_rxp;
1657 int is_regular = (rx->type == BNA_RX_T_REGULAR);
1659 /* Start IB */
1660 list_for_each(qe_rxp, &rx->rxp_q) {
1661 rxp = (struct bna_rxp *)qe_rxp;
1662 bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1665 bna_ethport_cb_rx_started(&rx->bna->ethport);
1668 static void
1669 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1671 switch (event) {
1672 case RX_E_STOP:
1673 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1674 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1675 bna_rxf_stop(&rx->rxf);
1676 break;
1678 case RX_E_FAIL:
1679 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1680 bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1681 bna_rxf_fail(&rx->rxf);
1682 call_rx_stall_cbfn(rx);
1683 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1684 break;
1686 default:
1687 bfa_sm_fault(event);
1688 break;
1692 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1693 enum bna_rx_event event)
1695 switch (event) {
1696 case RX_E_STOP:
1697 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1698 break;
1700 case RX_E_FAIL:
1701 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1702 bna_rxf_fail(&rx->rxf);
1703 call_rx_stall_cbfn(rx);
1704 rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1705 break;
1707 case RX_E_RXF_STARTED:
1708 bfa_fsm_set_state(rx, bna_rx_sm_started);
1709 break;
1711 default:
1712 bfa_sm_fault(event);
1713 break;
1717 static void
1718 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1722 static void
1723 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1725 switch (event) {
1726 case RX_E_FAIL:
1727 case RX_E_RXF_STOPPED:
1728 /* No-op */
1729 break;
1731 case RX_E_CLEANUP_DONE:
1732 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1733 break;
1735 default:
1736 bfa_sm_fault(event);
1737 break;
1741 static void
1742 bna_rx_sm_failed_entry(struct bna_rx *rx)
1746 static void
1747 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1749 switch (event) {
1750 case RX_E_START:
1751 bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1752 break;
1754 case RX_E_STOP:
1755 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1756 break;
1758 case RX_E_FAIL:
1759 case RX_E_RXF_STARTED:
1760 case RX_E_RXF_STOPPED:
1761 /* No-op */
1762 break;
1764 case RX_E_CLEANUP_DONE:
1765 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1766 break;
1768 default:
1769 bfa_sm_fault(event);
1770 break;
1773 static void
1774 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1778 static void
1779 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1781 switch (event) {
1782 case RX_E_STOP:
1783 bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1784 break;
1786 case RX_E_FAIL:
1787 bfa_fsm_set_state(rx, bna_rx_sm_failed);
1788 break;
1790 case RX_E_CLEANUP_DONE:
1791 bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1792 break;
1794 default:
1795 bfa_sm_fault(event);
1796 break;
1800 static void
1801 bna_bfi_rx_enet_start(struct bna_rx *rx)
1803 struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1804 struct bna_rxp *rxp = NULL;
1805 struct bna_rxq *q0 = NULL, *q1 = NULL;
1806 struct list_head *rxp_qe;
1807 int i;
1809 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1810 BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1811 cfg_req->mh.num_entries = htons(
1812 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1814 cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
1815 cfg_req->num_queue_sets = rx->num_paths;
1816 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
1817 i < rx->num_paths;
1818 i++, rxp_qe = bfa_q_next(rxp_qe)) {
1819 rxp = (struct bna_rxp *)rxp_qe;
1821 GET_RXQS(rxp, q0, q1);
1822 switch (rxp->type) {
1823 case BNA_RXP_SLR:
1824 case BNA_RXP_HDS:
1825 /* Small RxQ */
1826 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1827 &q1->qpt);
1828 cfg_req->q_cfg[i].qs.rx_buffer_size =
1829 htons((u16)q1->buffer_size);
1830 /* Fall through */
1832 case BNA_RXP_SINGLE:
1833 /* Large/Single RxQ */
1834 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1835 &q0->qpt);
1836 if (q0->multi_buffer)
1837 /* multi-buffer is enabled by allocating
1838 * a new rx with new set of resources.
1839 * q0->buffer_size should be initialized to
1840 * fragment size.
1842 cfg_req->rx_cfg.multi_buffer =
1843 BNA_STATUS_T_ENABLED;
1844 else
1845 q0->buffer_size =
1846 bna_enet_mtu_get(&rx->bna->enet);
1847 cfg_req->q_cfg[i].ql.rx_buffer_size =
1848 htons((u16)q0->buffer_size);
1849 break;
1851 default:
1852 BUG_ON(1);
1855 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1856 &rxp->cq.qpt);
1858 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1859 rxp->cq.ib.ib_seg_host_addr.lsb;
1860 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1861 rxp->cq.ib.ib_seg_host_addr.msb;
1862 cfg_req->q_cfg[i].ib.intr.msix_index =
1863 htons((u16)rxp->cq.ib.intr_vector);
1866 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1867 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1868 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1869 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1870 cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1871 ? BNA_STATUS_T_ENABLED :
1872 BNA_STATUS_T_DISABLED;
1873 cfg_req->ib_cfg.coalescing_timeout =
1874 htonl((u32)rxp->cq.ib.coalescing_timeo);
1875 cfg_req->ib_cfg.inter_pkt_timeout =
1876 htonl((u32)rxp->cq.ib.interpkt_timeo);
1877 cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1879 switch (rxp->type) {
1880 case BNA_RXP_SLR:
1881 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1882 break;
1884 case BNA_RXP_HDS:
1885 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1886 cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1887 cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1888 cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1889 break;
1891 case BNA_RXP_SINGLE:
1892 cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1893 break;
1895 default:
1896 BUG_ON(1);
1898 cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1900 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1901 sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1902 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1905 static void
1906 bna_bfi_rx_enet_stop(struct bna_rx *rx)
1908 struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1910 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1911 BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1912 req->mh.num_entries = htons(
1913 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1914 bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1915 &req->mh);
1916 bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1919 static void
1920 bna_rx_enet_stop(struct bna_rx *rx)
1922 struct bna_rxp *rxp;
1923 struct list_head *qe_rxp;
1925 /* Stop IB */
1926 list_for_each(qe_rxp, &rx->rxp_q) {
1927 rxp = (struct bna_rxp *)qe_rxp;
1928 bna_ib_stop(rx->bna, &rxp->cq.ib);
1931 bna_bfi_rx_enet_stop(rx);
1934 static int
1935 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1937 if ((rx_mod->rx_free_count == 0) ||
1938 (rx_mod->rxp_free_count == 0) ||
1939 (rx_mod->rxq_free_count == 0))
1940 return 0;
1942 if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1943 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1944 (rx_mod->rxq_free_count < rx_cfg->num_paths))
1945 return 0;
1946 } else {
1947 if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1948 (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1949 return 0;
1952 return 1;
1955 static struct bna_rxq *
1956 bna_rxq_get(struct bna_rx_mod *rx_mod)
1958 struct bna_rxq *rxq = NULL;
1959 struct list_head *qe = NULL;
1961 bfa_q_deq(&rx_mod->rxq_free_q, &qe);
1962 rx_mod->rxq_free_count--;
1963 rxq = (struct bna_rxq *)qe;
1964 bfa_q_qe_init(&rxq->qe);
1966 return rxq;
1969 static void
1970 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1972 bfa_q_qe_init(&rxq->qe);
1973 list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1974 rx_mod->rxq_free_count++;
1977 static struct bna_rxp *
1978 bna_rxp_get(struct bna_rx_mod *rx_mod)
1980 struct list_head *qe = NULL;
1981 struct bna_rxp *rxp = NULL;
1983 bfa_q_deq(&rx_mod->rxp_free_q, &qe);
1984 rx_mod->rxp_free_count--;
1985 rxp = (struct bna_rxp *)qe;
1986 bfa_q_qe_init(&rxp->qe);
1988 return rxp;
1991 static void
1992 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1994 bfa_q_qe_init(&rxp->qe);
1995 list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1996 rx_mod->rxp_free_count++;
1999 static struct bna_rx *
2000 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2002 struct list_head *qe = NULL;
2003 struct bna_rx *rx = NULL;
2005 if (type == BNA_RX_T_REGULAR) {
2006 bfa_q_deq(&rx_mod->rx_free_q, &qe);
2007 } else
2008 bfa_q_deq_tail(&rx_mod->rx_free_q, &qe);
2010 rx_mod->rx_free_count--;
2011 rx = (struct bna_rx *)qe;
2012 bfa_q_qe_init(&rx->qe);
2013 list_add_tail(&rx->qe, &rx_mod->rx_active_q);
2014 rx->type = type;
2016 return rx;
2019 static void
2020 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
2022 struct list_head *prev_qe = NULL;
2023 struct list_head *qe;
2025 bfa_q_qe_init(&rx->qe);
2027 list_for_each(qe, &rx_mod->rx_free_q) {
2028 if (((struct bna_rx *)qe)->rid < rx->rid)
2029 prev_qe = qe;
2030 else
2031 break;
2034 if (prev_qe == NULL) {
2035 /* This is the first entry */
2036 bfa_q_enq_head(&rx_mod->rx_free_q, &rx->qe);
2037 } else if (bfa_q_next(prev_qe) == &rx_mod->rx_free_q) {
2038 /* This is the last entry */
2039 list_add_tail(&rx->qe, &rx_mod->rx_free_q);
2040 } else {
2041 /* Somewhere in the middle */
2042 bfa_q_next(&rx->qe) = bfa_q_next(prev_qe);
2043 bfa_q_prev(&rx->qe) = prev_qe;
2044 bfa_q_next(prev_qe) = &rx->qe;
2045 bfa_q_prev(bfa_q_next(&rx->qe)) = &rx->qe;
2048 rx_mod->rx_free_count++;
2051 static void
2052 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
2053 struct bna_rxq *q1)
2055 switch (rxp->type) {
2056 case BNA_RXP_SINGLE:
2057 rxp->rxq.single.only = q0;
2058 rxp->rxq.single.reserved = NULL;
2059 break;
2060 case BNA_RXP_SLR:
2061 rxp->rxq.slr.large = q0;
2062 rxp->rxq.slr.small = q1;
2063 break;
2064 case BNA_RXP_HDS:
2065 rxp->rxq.hds.data = q0;
2066 rxp->rxq.hds.hdr = q1;
2067 break;
2068 default:
2069 break;
2073 static void
2074 bna_rxq_qpt_setup(struct bna_rxq *rxq,
2075 struct bna_rxp *rxp,
2076 u32 page_count,
2077 u32 page_size,
2078 struct bna_mem_descr *qpt_mem,
2079 struct bna_mem_descr *swqpt_mem,
2080 struct bna_mem_descr *page_mem)
2082 u8 *kva;
2083 u64 dma;
2084 struct bna_dma_addr bna_dma;
2085 int i;
2087 rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2088 rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2089 rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
2090 rxq->qpt.page_count = page_count;
2091 rxq->qpt.page_size = page_size;
2093 rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
2094 rxq->rcb->sw_q = page_mem->kva;
2096 kva = page_mem->kva;
2097 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2099 for (i = 0; i < rxq->qpt.page_count; i++) {
2100 rxq->rcb->sw_qpt[i] = kva;
2101 kva += PAGE_SIZE;
2103 BNA_SET_DMA_ADDR(dma, &bna_dma);
2104 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
2105 bna_dma.lsb;
2106 ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
2107 bna_dma.msb;
2108 dma += PAGE_SIZE;
2112 static void
2113 bna_rxp_cqpt_setup(struct bna_rxp *rxp,
2114 u32 page_count,
2115 u32 page_size,
2116 struct bna_mem_descr *qpt_mem,
2117 struct bna_mem_descr *swqpt_mem,
2118 struct bna_mem_descr *page_mem)
2120 u8 *kva;
2121 u64 dma;
2122 struct bna_dma_addr bna_dma;
2123 int i;
2125 rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
2126 rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
2127 rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
2128 rxp->cq.qpt.page_count = page_count;
2129 rxp->cq.qpt.page_size = page_size;
2131 rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
2132 rxp->cq.ccb->sw_q = page_mem->kva;
2134 kva = page_mem->kva;
2135 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
2137 for (i = 0; i < rxp->cq.qpt.page_count; i++) {
2138 rxp->cq.ccb->sw_qpt[i] = kva;
2139 kva += PAGE_SIZE;
2141 BNA_SET_DMA_ADDR(dma, &bna_dma);
2142 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
2143 bna_dma.lsb;
2144 ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
2145 bna_dma.msb;
2146 dma += PAGE_SIZE;
2150 static void
2151 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
2153 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2155 bfa_wc_down(&rx_mod->rx_stop_wc);
2158 static void
2159 bna_rx_mod_cb_rx_stopped_all(void *arg)
2161 struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
2163 if (rx_mod->stop_cbfn)
2164 rx_mod->stop_cbfn(&rx_mod->bna->enet);
2165 rx_mod->stop_cbfn = NULL;
2168 static void
2169 bna_rx_start(struct bna_rx *rx)
2171 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2172 if (rx->rx_flags & BNA_RX_F_ENABLED)
2173 bfa_fsm_send_event(rx, RX_E_START);
2176 static void
2177 bna_rx_stop(struct bna_rx *rx)
2179 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2180 if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
2181 bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
2182 else {
2183 rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
2184 rx->stop_cbarg = &rx->bna->rx_mod;
2185 bfa_fsm_send_event(rx, RX_E_STOP);
2189 static void
2190 bna_rx_fail(struct bna_rx *rx)
2192 /* Indicate Enet is not enabled, and failed */
2193 rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
2194 bfa_fsm_send_event(rx, RX_E_FAIL);
2197 void
2198 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2200 struct bna_rx *rx;
2201 struct list_head *qe;
2203 rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
2204 if (type == BNA_RX_T_LOOPBACK)
2205 rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
2207 list_for_each(qe, &rx_mod->rx_active_q) {
2208 rx = (struct bna_rx *)qe;
2209 if (rx->type == type)
2210 bna_rx_start(rx);
2214 void
2215 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
2217 struct bna_rx *rx;
2218 struct list_head *qe;
2220 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2221 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2223 rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
2225 bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2227 list_for_each(qe, &rx_mod->rx_active_q) {
2228 rx = (struct bna_rx *)qe;
2229 if (rx->type == type) {
2230 bfa_wc_up(&rx_mod->rx_stop_wc);
2231 bna_rx_stop(rx);
2235 bfa_wc_wait(&rx_mod->rx_stop_wc);
2238 void
2239 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2241 struct bna_rx *rx;
2242 struct list_head *qe;
2244 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2245 rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2247 list_for_each(qe, &rx_mod->rx_active_q) {
2248 rx = (struct bna_rx *)qe;
2249 bna_rx_fail(rx);
2253 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2254 struct bna_res_info *res_info)
2256 int index;
2257 struct bna_rx *rx_ptr;
2258 struct bna_rxp *rxp_ptr;
2259 struct bna_rxq *rxq_ptr;
2261 rx_mod->bna = bna;
2262 rx_mod->flags = 0;
2264 rx_mod->rx = (struct bna_rx *)
2265 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2266 rx_mod->rxp = (struct bna_rxp *)
2267 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2268 rx_mod->rxq = (struct bna_rxq *)
2269 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2271 /* Initialize the queues */
2272 INIT_LIST_HEAD(&rx_mod->rx_free_q);
2273 rx_mod->rx_free_count = 0;
2274 INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2275 rx_mod->rxq_free_count = 0;
2276 INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2277 rx_mod->rxp_free_count = 0;
2278 INIT_LIST_HEAD(&rx_mod->rx_active_q);
2280 /* Build RX queues */
2281 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2282 rx_ptr = &rx_mod->rx[index];
2284 bfa_q_qe_init(&rx_ptr->qe);
2285 INIT_LIST_HEAD(&rx_ptr->rxp_q);
2286 rx_ptr->bna = NULL;
2287 rx_ptr->rid = index;
2288 rx_ptr->stop_cbfn = NULL;
2289 rx_ptr->stop_cbarg = NULL;
2291 list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2292 rx_mod->rx_free_count++;
2295 /* build RX-path queue */
2296 for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2297 rxp_ptr = &rx_mod->rxp[index];
2298 bfa_q_qe_init(&rxp_ptr->qe);
2299 list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2300 rx_mod->rxp_free_count++;
2303 /* build RXQ queue */
2304 for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2305 rxq_ptr = &rx_mod->rxq[index];
2306 bfa_q_qe_init(&rxq_ptr->qe);
2307 list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2308 rx_mod->rxq_free_count++;
2312 void
2313 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2315 struct list_head *qe;
2316 int i;
2318 i = 0;
2319 list_for_each(qe, &rx_mod->rx_free_q)
2320 i++;
2322 i = 0;
2323 list_for_each(qe, &rx_mod->rxp_free_q)
2324 i++;
2326 i = 0;
2327 list_for_each(qe, &rx_mod->rxq_free_q)
2328 i++;
2330 rx_mod->bna = NULL;
2333 void
2334 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2336 struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2337 struct bna_rxp *rxp = NULL;
2338 struct bna_rxq *q0 = NULL, *q1 = NULL;
2339 struct list_head *rxp_qe;
2340 int i;
2342 bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2343 sizeof(struct bfi_enet_rx_cfg_rsp));
2345 rx->hw_id = cfg_rsp->hw_id;
2347 for (i = 0, rxp_qe = bfa_q_first(&rx->rxp_q);
2348 i < rx->num_paths;
2349 i++, rxp_qe = bfa_q_next(rxp_qe)) {
2350 rxp = (struct bna_rxp *)rxp_qe;
2351 GET_RXQS(rxp, q0, q1);
2353 /* Setup doorbells */
2354 rxp->cq.ccb->i_dbell->doorbell_addr =
2355 rx->bna->pcidev.pci_bar_kva
2356 + ntohl(cfg_rsp->q_handles[i].i_dbell);
2357 rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2358 q0->rcb->q_dbell =
2359 rx->bna->pcidev.pci_bar_kva
2360 + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2361 q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2362 if (q1) {
2363 q1->rcb->q_dbell =
2364 rx->bna->pcidev.pci_bar_kva
2365 + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2366 q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2369 /* Initialize producer/consumer indexes */
2370 (*rxp->cq.ccb->hw_producer_index) = 0;
2371 rxp->cq.ccb->producer_index = 0;
2372 q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2373 if (q1)
2374 q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2377 bfa_fsm_send_event(rx, RX_E_STARTED);
2380 void
2381 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2383 bfa_fsm_send_event(rx, RX_E_STOPPED);
2386 void
2387 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2389 u32 cq_size, hq_size, dq_size;
2390 u32 cpage_count, hpage_count, dpage_count;
2391 struct bna_mem_info *mem_info;
2392 u32 cq_depth;
2393 u32 hq_depth;
2394 u32 dq_depth;
2396 dq_depth = q_cfg->q0_depth;
2397 hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
2398 cq_depth = dq_depth + hq_depth;
2400 BNA_TO_POWER_OF_2_HIGH(cq_depth);
2401 cq_size = cq_depth * BFI_CQ_WI_SIZE;
2402 cq_size = ALIGN(cq_size, PAGE_SIZE);
2403 cpage_count = SIZE_TO_PAGES(cq_size);
2405 BNA_TO_POWER_OF_2_HIGH(dq_depth);
2406 dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2407 dq_size = ALIGN(dq_size, PAGE_SIZE);
2408 dpage_count = SIZE_TO_PAGES(dq_size);
2410 if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2411 BNA_TO_POWER_OF_2_HIGH(hq_depth);
2412 hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2413 hq_size = ALIGN(hq_size, PAGE_SIZE);
2414 hpage_count = SIZE_TO_PAGES(hq_size);
2415 } else
2416 hpage_count = 0;
2418 res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2419 mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2420 mem_info->mem_type = BNA_MEM_T_KVA;
2421 mem_info->len = sizeof(struct bna_ccb);
2422 mem_info->num = q_cfg->num_paths;
2424 res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2425 mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2426 mem_info->mem_type = BNA_MEM_T_KVA;
2427 mem_info->len = sizeof(struct bna_rcb);
2428 mem_info->num = BNA_GET_RXQS(q_cfg);
2430 res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2431 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2432 mem_info->mem_type = BNA_MEM_T_DMA;
2433 mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2434 mem_info->num = q_cfg->num_paths;
2436 res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2437 mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2438 mem_info->mem_type = BNA_MEM_T_KVA;
2439 mem_info->len = cpage_count * sizeof(void *);
2440 mem_info->num = q_cfg->num_paths;
2442 res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2443 mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2444 mem_info->mem_type = BNA_MEM_T_DMA;
2445 mem_info->len = PAGE_SIZE * cpage_count;
2446 mem_info->num = q_cfg->num_paths;
2448 res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2449 mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2450 mem_info->mem_type = BNA_MEM_T_DMA;
2451 mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2452 mem_info->num = q_cfg->num_paths;
2454 res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2455 mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2456 mem_info->mem_type = BNA_MEM_T_KVA;
2457 mem_info->len = dpage_count * sizeof(void *);
2458 mem_info->num = q_cfg->num_paths;
2460 res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2461 mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2462 mem_info->mem_type = BNA_MEM_T_DMA;
2463 mem_info->len = PAGE_SIZE * dpage_count;
2464 mem_info->num = q_cfg->num_paths;
2466 res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2467 mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2468 mem_info->mem_type = BNA_MEM_T_DMA;
2469 mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2470 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2472 res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2473 mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2474 mem_info->mem_type = BNA_MEM_T_KVA;
2475 mem_info->len = hpage_count * sizeof(void *);
2476 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2478 res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2479 mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2480 mem_info->mem_type = BNA_MEM_T_DMA;
2481 mem_info->len = PAGE_SIZE * hpage_count;
2482 mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2484 res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2485 mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2486 mem_info->mem_type = BNA_MEM_T_DMA;
2487 mem_info->len = BFI_IBIDX_SIZE;
2488 mem_info->num = q_cfg->num_paths;
2490 res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2491 mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2492 mem_info->mem_type = BNA_MEM_T_KVA;
2493 mem_info->len = BFI_ENET_RSS_RIT_MAX;
2494 mem_info->num = 1;
2496 res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2497 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2498 res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2501 struct bna_rx *
2502 bna_rx_create(struct bna *bna, struct bnad *bnad,
2503 struct bna_rx_config *rx_cfg,
2504 const struct bna_rx_event_cbfn *rx_cbfn,
2505 struct bna_res_info *res_info,
2506 void *priv)
2508 struct bna_rx_mod *rx_mod = &bna->rx_mod;
2509 struct bna_rx *rx;
2510 struct bna_rxp *rxp;
2511 struct bna_rxq *q0;
2512 struct bna_rxq *q1;
2513 struct bna_intr_info *intr_info;
2514 struct bna_mem_descr *hqunmap_mem;
2515 struct bna_mem_descr *dqunmap_mem;
2516 struct bna_mem_descr *ccb_mem;
2517 struct bna_mem_descr *rcb_mem;
2518 struct bna_mem_descr *cqpt_mem;
2519 struct bna_mem_descr *cswqpt_mem;
2520 struct bna_mem_descr *cpage_mem;
2521 struct bna_mem_descr *hqpt_mem;
2522 struct bna_mem_descr *dqpt_mem;
2523 struct bna_mem_descr *hsqpt_mem;
2524 struct bna_mem_descr *dsqpt_mem;
2525 struct bna_mem_descr *hpage_mem;
2526 struct bna_mem_descr *dpage_mem;
2527 u32 dpage_count, hpage_count;
2528 u32 hq_idx, dq_idx, rcb_idx;
2529 u32 cq_depth, i;
2530 u32 page_count;
2532 if (!bna_rx_res_check(rx_mod, rx_cfg))
2533 return NULL;
2535 intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2536 ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2537 rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2538 dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
2539 hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
2540 cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2541 cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2542 cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2543 hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2544 dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2545 hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2546 dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2547 hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2548 dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2550 page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2551 PAGE_SIZE;
2553 dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2554 PAGE_SIZE;
2556 hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2557 PAGE_SIZE;
2559 rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2560 rx->bna = bna;
2561 rx->rx_flags = 0;
2562 INIT_LIST_HEAD(&rx->rxp_q);
2563 rx->stop_cbfn = NULL;
2564 rx->stop_cbarg = NULL;
2565 rx->priv = priv;
2567 rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2568 rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2569 rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2570 rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2571 rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2572 /* Following callbacks are mandatory */
2573 rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2574 rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2576 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2577 switch (rx->type) {
2578 case BNA_RX_T_REGULAR:
2579 if (!(rx->bna->rx_mod.flags &
2580 BNA_RX_MOD_F_ENET_LOOPBACK))
2581 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2582 break;
2583 case BNA_RX_T_LOOPBACK:
2584 if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2585 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2586 break;
2590 rx->num_paths = rx_cfg->num_paths;
2591 for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
2592 i < rx->num_paths; i++) {
2593 rxp = bna_rxp_get(rx_mod);
2594 list_add_tail(&rxp->qe, &rx->rxp_q);
2595 rxp->type = rx_cfg->rxp_type;
2596 rxp->rx = rx;
2597 rxp->cq.rx = rx;
2599 q0 = bna_rxq_get(rx_mod);
2600 if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2601 q1 = NULL;
2602 else
2603 q1 = bna_rxq_get(rx_mod);
2605 if (1 == intr_info->num)
2606 rxp->vector = intr_info->idl[0].vector;
2607 else
2608 rxp->vector = intr_info->idl[i].vector;
2610 /* Setup IB */
2612 rxp->cq.ib.ib_seg_host_addr.lsb =
2613 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2614 rxp->cq.ib.ib_seg_host_addr.msb =
2615 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2616 rxp->cq.ib.ib_seg_host_addr_kva =
2617 res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2618 rxp->cq.ib.intr_type = intr_info->intr_type;
2619 if (intr_info->intr_type == BNA_INTR_T_MSIX)
2620 rxp->cq.ib.intr_vector = rxp->vector;
2621 else
2622 rxp->cq.ib.intr_vector = (1 << rxp->vector);
2623 rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2624 rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2625 rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2627 bna_rxp_add_rxqs(rxp, q0, q1);
2629 /* Setup large Q */
2631 q0->rx = rx;
2632 q0->rxp = rxp;
2634 q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2635 q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
2636 rcb_idx++; dq_idx++;
2637 q0->rcb->q_depth = rx_cfg->q0_depth;
2638 q0->q_depth = rx_cfg->q0_depth;
2639 q0->multi_buffer = rx_cfg->q0_multi_buf;
2640 q0->buffer_size = rx_cfg->q0_buf_size;
2641 q0->num_vecs = rx_cfg->q0_num_vecs;
2642 q0->rcb->rxq = q0;
2643 q0->rcb->bnad = bna->bnad;
2644 q0->rcb->id = 0;
2645 q0->rx_packets = q0->rx_bytes = 0;
2646 q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2648 bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2649 &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2651 if (rx->rcb_setup_cbfn)
2652 rx->rcb_setup_cbfn(bnad, q0->rcb);
2654 /* Setup small Q */
2656 if (q1) {
2657 q1->rx = rx;
2658 q1->rxp = rxp;
2660 q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2661 q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
2662 rcb_idx++; hq_idx++;
2663 q1->rcb->q_depth = rx_cfg->q1_depth;
2664 q1->q_depth = rx_cfg->q1_depth;
2665 q1->multi_buffer = BNA_STATUS_T_DISABLED;
2666 q1->num_vecs = 1;
2667 q1->rcb->rxq = q1;
2668 q1->rcb->bnad = bna->bnad;
2669 q1->rcb->id = 1;
2670 q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2671 rx_cfg->hds_config.forced_offset
2672 : rx_cfg->q1_buf_size;
2673 q1->rx_packets = q1->rx_bytes = 0;
2674 q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2676 bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2677 &hqpt_mem[i], &hsqpt_mem[i],
2678 &hpage_mem[i]);
2680 if (rx->rcb_setup_cbfn)
2681 rx->rcb_setup_cbfn(bnad, q1->rcb);
2684 /* Setup CQ */
2686 rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2687 cq_depth = rx_cfg->q0_depth +
2688 ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2689 0 : rx_cfg->q1_depth);
2690 /* if multi-buffer is enabled sum of q0_depth
2691 * and q1_depth need not be a power of 2
2693 BNA_TO_POWER_OF_2_HIGH(cq_depth);
2694 rxp->cq.ccb->q_depth = cq_depth;
2695 rxp->cq.ccb->cq = &rxp->cq;
2696 rxp->cq.ccb->rcb[0] = q0->rcb;
2697 q0->rcb->ccb = rxp->cq.ccb;
2698 if (q1) {
2699 rxp->cq.ccb->rcb[1] = q1->rcb;
2700 q1->rcb->ccb = rxp->cq.ccb;
2702 rxp->cq.ccb->hw_producer_index =
2703 (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2704 rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2705 rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2706 rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2707 rxp->cq.ccb->rx_coalescing_timeo =
2708 rxp->cq.ib.coalescing_timeo;
2709 rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2710 rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2711 rxp->cq.ccb->bnad = bna->bnad;
2712 rxp->cq.ccb->id = i;
2714 bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2715 &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2717 if (rx->ccb_setup_cbfn)
2718 rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2721 rx->hds_cfg = rx_cfg->hds_config;
2723 bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2725 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2727 rx_mod->rid_mask |= (1 << rx->rid);
2729 return rx;
2732 void
2733 bna_rx_destroy(struct bna_rx *rx)
2735 struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2736 struct bna_rxq *q0 = NULL;
2737 struct bna_rxq *q1 = NULL;
2738 struct bna_rxp *rxp;
2739 struct list_head *qe;
2741 bna_rxf_uninit(&rx->rxf);
2743 while (!list_empty(&rx->rxp_q)) {
2744 bfa_q_deq(&rx->rxp_q, &rxp);
2745 GET_RXQS(rxp, q0, q1);
2746 if (rx->rcb_destroy_cbfn)
2747 rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2748 q0->rcb = NULL;
2749 q0->rxp = NULL;
2750 q0->rx = NULL;
2751 bna_rxq_put(rx_mod, q0);
2753 if (q1) {
2754 if (rx->rcb_destroy_cbfn)
2755 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2756 q1->rcb = NULL;
2757 q1->rxp = NULL;
2758 q1->rx = NULL;
2759 bna_rxq_put(rx_mod, q1);
2761 rxp->rxq.slr.large = NULL;
2762 rxp->rxq.slr.small = NULL;
2764 if (rx->ccb_destroy_cbfn)
2765 rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2766 rxp->cq.ccb = NULL;
2767 rxp->rx = NULL;
2768 bna_rxp_put(rx_mod, rxp);
2771 list_for_each(qe, &rx_mod->rx_active_q) {
2772 if (qe == &rx->qe) {
2773 list_del(&rx->qe);
2774 bfa_q_qe_init(&rx->qe);
2775 break;
2779 rx_mod->rid_mask &= ~(1 << rx->rid);
2781 rx->bna = NULL;
2782 rx->priv = NULL;
2783 bna_rx_put(rx_mod, rx);
2786 void
2787 bna_rx_enable(struct bna_rx *rx)
2789 if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2790 return;
2792 rx->rx_flags |= BNA_RX_F_ENABLED;
2793 if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2794 bfa_fsm_send_event(rx, RX_E_START);
2797 void
2798 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2799 void (*cbfn)(void *, struct bna_rx *))
2801 if (type == BNA_SOFT_CLEANUP) {
2802 /* h/w should not be accessed. Treat we're stopped */
2803 (*cbfn)(rx->bna->bnad, rx);
2804 } else {
2805 rx->stop_cbfn = cbfn;
2806 rx->stop_cbarg = rx->bna->bnad;
2808 rx->rx_flags &= ~BNA_RX_F_ENABLED;
2810 bfa_fsm_send_event(rx, RX_E_STOP);
2814 void
2815 bna_rx_cleanup_complete(struct bna_rx *rx)
2817 bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2820 void
2821 bna_rx_vlan_strip_enable(struct bna_rx *rx)
2823 struct bna_rxf *rxf = &rx->rxf;
2825 if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
2826 rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
2827 rxf->vlan_strip_pending = true;
2828 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2832 void
2833 bna_rx_vlan_strip_disable(struct bna_rx *rx)
2835 struct bna_rxf *rxf = &rx->rxf;
2837 if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
2838 rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
2839 rxf->vlan_strip_pending = true;
2840 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2844 enum bna_cb_status
2845 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2846 enum bna_rxmode bitmask,
2847 void (*cbfn)(struct bnad *, struct bna_rx *))
2849 struct bna_rxf *rxf = &rx->rxf;
2850 int need_hw_config = 0;
2852 /* Error checks */
2854 if (is_promisc_enable(new_mode, bitmask)) {
2855 /* If promisc mode is already enabled elsewhere in the system */
2856 if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2857 (rx->bna->promisc_rid != rxf->rx->rid))
2858 goto err_return;
2860 /* If default mode is already enabled in the system */
2861 if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2862 goto err_return;
2864 /* Trying to enable promiscuous and default mode together */
2865 if (is_default_enable(new_mode, bitmask))
2866 goto err_return;
2869 if (is_default_enable(new_mode, bitmask)) {
2870 /* If default mode is already enabled elsewhere in the system */
2871 if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2872 (rx->bna->default_mode_rid != rxf->rx->rid)) {
2873 goto err_return;
2876 /* If promiscuous mode is already enabled in the system */
2877 if (rx->bna->promisc_rid != BFI_INVALID_RID)
2878 goto err_return;
2881 /* Process the commands */
2883 if (is_promisc_enable(new_mode, bitmask)) {
2884 if (bna_rxf_promisc_enable(rxf))
2885 need_hw_config = 1;
2886 } else if (is_promisc_disable(new_mode, bitmask)) {
2887 if (bna_rxf_promisc_disable(rxf))
2888 need_hw_config = 1;
2891 if (is_allmulti_enable(new_mode, bitmask)) {
2892 if (bna_rxf_allmulti_enable(rxf))
2893 need_hw_config = 1;
2894 } else if (is_allmulti_disable(new_mode, bitmask)) {
2895 if (bna_rxf_allmulti_disable(rxf))
2896 need_hw_config = 1;
2899 /* Trigger h/w if needed */
2901 if (need_hw_config) {
2902 rxf->cam_fltr_cbfn = cbfn;
2903 rxf->cam_fltr_cbarg = rx->bna->bnad;
2904 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2905 } else if (cbfn)
2906 (*cbfn)(rx->bna->bnad, rx);
2908 return BNA_CB_SUCCESS;
2910 err_return:
2911 return BNA_CB_FAIL;
2914 void
2915 bna_rx_vlanfilter_enable(struct bna_rx *rx)
2917 struct bna_rxf *rxf = &rx->rxf;
2919 if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2920 rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2921 rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2922 bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2926 void
2927 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2929 struct bna_rxp *rxp;
2930 struct list_head *qe;
2932 list_for_each(qe, &rx->rxp_q) {
2933 rxp = (struct bna_rxp *)qe;
2934 rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2935 bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2939 void
2940 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2942 int i, j;
2944 for (i = 0; i < BNA_LOAD_T_MAX; i++)
2945 for (j = 0; j < BNA_BIAS_T_MAX; j++)
2946 bna->rx_mod.dim_vector[i][j] = vector[i][j];
2949 void
2950 bna_rx_dim_update(struct bna_ccb *ccb)
2952 struct bna *bna = ccb->cq->rx->bna;
2953 u32 load, bias;
2954 u32 pkt_rt, small_rt, large_rt;
2955 u8 coalescing_timeo;
2957 if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2958 (ccb->pkt_rate.large_pkt_cnt == 0))
2959 return;
2961 /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2963 small_rt = ccb->pkt_rate.small_pkt_cnt;
2964 large_rt = ccb->pkt_rate.large_pkt_cnt;
2966 pkt_rt = small_rt + large_rt;
2968 if (pkt_rt < BNA_PKT_RATE_10K)
2969 load = BNA_LOAD_T_LOW_4;
2970 else if (pkt_rt < BNA_PKT_RATE_20K)
2971 load = BNA_LOAD_T_LOW_3;
2972 else if (pkt_rt < BNA_PKT_RATE_30K)
2973 load = BNA_LOAD_T_LOW_2;
2974 else if (pkt_rt < BNA_PKT_RATE_40K)
2975 load = BNA_LOAD_T_LOW_1;
2976 else if (pkt_rt < BNA_PKT_RATE_50K)
2977 load = BNA_LOAD_T_HIGH_1;
2978 else if (pkt_rt < BNA_PKT_RATE_60K)
2979 load = BNA_LOAD_T_HIGH_2;
2980 else if (pkt_rt < BNA_PKT_RATE_80K)
2981 load = BNA_LOAD_T_HIGH_3;
2982 else
2983 load = BNA_LOAD_T_HIGH_4;
2985 if (small_rt > (large_rt << 1))
2986 bias = 0;
2987 else
2988 bias = 1;
2990 ccb->pkt_rate.small_pkt_cnt = 0;
2991 ccb->pkt_rate.large_pkt_cnt = 0;
2993 coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2994 ccb->rx_coalescing_timeo = coalescing_timeo;
2996 /* Set it to IB */
2997 bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
3000 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
3001 {12, 12},
3002 {6, 10},
3003 {5, 10},
3004 {4, 8},
3005 {3, 6},
3006 {3, 6},
3007 {2, 4},
3008 {1, 2},
3011 /* TX */
3013 #define call_tx_stop_cbfn(tx) \
3014 do { \
3015 if ((tx)->stop_cbfn) { \
3016 void (*cbfn)(void *, struct bna_tx *); \
3017 void *cbarg; \
3018 cbfn = (tx)->stop_cbfn; \
3019 cbarg = (tx)->stop_cbarg; \
3020 (tx)->stop_cbfn = NULL; \
3021 (tx)->stop_cbarg = NULL; \
3022 cbfn(cbarg, (tx)); \
3024 } while (0)
3026 #define call_tx_prio_change_cbfn(tx) \
3027 do { \
3028 if ((tx)->prio_change_cbfn) { \
3029 void (*cbfn)(struct bnad *, struct bna_tx *); \
3030 cbfn = (tx)->prio_change_cbfn; \
3031 (tx)->prio_change_cbfn = NULL; \
3032 cbfn((tx)->bna->bnad, (tx)); \
3034 } while (0)
3036 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
3037 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
3038 static void bna_tx_enet_stop(struct bna_tx *tx);
3040 enum bna_tx_event {
3041 TX_E_START = 1,
3042 TX_E_STOP = 2,
3043 TX_E_FAIL = 3,
3044 TX_E_STARTED = 4,
3045 TX_E_STOPPED = 5,
3046 TX_E_PRIO_CHANGE = 6,
3047 TX_E_CLEANUP_DONE = 7,
3048 TX_E_BW_UPDATE = 8,
3051 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
3052 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
3053 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
3054 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
3055 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
3056 enum bna_tx_event);
3057 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
3058 enum bna_tx_event);
3059 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
3060 enum bna_tx_event);
3061 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
3062 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
3063 enum bna_tx_event);
3065 static void
3066 bna_tx_sm_stopped_entry(struct bna_tx *tx)
3068 call_tx_stop_cbfn(tx);
3071 static void
3072 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
3074 switch (event) {
3075 case TX_E_START:
3076 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3077 break;
3079 case TX_E_STOP:
3080 call_tx_stop_cbfn(tx);
3081 break;
3083 case TX_E_FAIL:
3084 /* No-op */
3085 break;
3087 case TX_E_PRIO_CHANGE:
3088 call_tx_prio_change_cbfn(tx);
3089 break;
3091 case TX_E_BW_UPDATE:
3092 /* No-op */
3093 break;
3095 default:
3096 bfa_sm_fault(event);
3100 static void
3101 bna_tx_sm_start_wait_entry(struct bna_tx *tx)
3103 bna_bfi_tx_enet_start(tx);
3106 static void
3107 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
3109 switch (event) {
3110 case TX_E_STOP:
3111 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3112 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3113 break;
3115 case TX_E_FAIL:
3116 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED);
3117 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3118 break;
3120 case TX_E_STARTED:
3121 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) {
3122 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED |
3123 BNA_TX_F_BW_UPDATED);
3124 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3125 } else
3126 bfa_fsm_set_state(tx, bna_tx_sm_started);
3127 break;
3129 case TX_E_PRIO_CHANGE:
3130 tx->flags |= BNA_TX_F_PRIO_CHANGED;
3131 break;
3133 case TX_E_BW_UPDATE:
3134 tx->flags |= BNA_TX_F_BW_UPDATED;
3135 break;
3137 default:
3138 bfa_sm_fault(event);
3142 static void
3143 bna_tx_sm_started_entry(struct bna_tx *tx)
3145 struct bna_txq *txq;
3146 struct list_head *qe;
3147 int is_regular = (tx->type == BNA_TX_T_REGULAR);
3149 list_for_each(qe, &tx->txq_q) {
3150 txq = (struct bna_txq *)qe;
3151 txq->tcb->priority = txq->priority;
3152 /* Start IB */
3153 bna_ib_start(tx->bna, &txq->ib, is_regular);
3155 tx->tx_resume_cbfn(tx->bna->bnad, tx);
3158 static void
3159 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
3161 switch (event) {
3162 case TX_E_STOP:
3163 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3164 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3165 bna_tx_enet_stop(tx);
3166 break;
3168 case TX_E_FAIL:
3169 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3170 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3171 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3172 break;
3174 case TX_E_PRIO_CHANGE:
3175 case TX_E_BW_UPDATE:
3176 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
3177 break;
3179 default:
3180 bfa_sm_fault(event);
3184 static void
3185 bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
3189 static void
3190 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3192 switch (event) {
3193 case TX_E_FAIL:
3194 case TX_E_STOPPED:
3195 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3196 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3197 break;
3199 case TX_E_STARTED:
3201 * We are here due to start_wait -> stop_wait transition on
3202 * TX_E_STOP event
3204 bna_tx_enet_stop(tx);
3205 break;
3207 case TX_E_PRIO_CHANGE:
3208 case TX_E_BW_UPDATE:
3209 /* No-op */
3210 break;
3212 default:
3213 bfa_sm_fault(event);
3217 static void
3218 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
3222 static void
3223 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3225 switch (event) {
3226 case TX_E_FAIL:
3227 case TX_E_PRIO_CHANGE:
3228 case TX_E_BW_UPDATE:
3229 /* No-op */
3230 break;
3232 case TX_E_CLEANUP_DONE:
3233 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3234 break;
3236 default:
3237 bfa_sm_fault(event);
3241 static void
3242 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
3244 tx->tx_stall_cbfn(tx->bna->bnad, tx);
3245 bna_tx_enet_stop(tx);
3248 static void
3249 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
3251 switch (event) {
3252 case TX_E_STOP:
3253 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
3254 break;
3256 case TX_E_FAIL:
3257 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3258 call_tx_prio_change_cbfn(tx);
3259 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3260 break;
3262 case TX_E_STOPPED:
3263 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
3264 break;
3266 case TX_E_PRIO_CHANGE:
3267 case TX_E_BW_UPDATE:
3268 /* No-op */
3269 break;
3271 default:
3272 bfa_sm_fault(event);
3276 static void
3277 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
3279 call_tx_prio_change_cbfn(tx);
3280 tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
3283 static void
3284 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3286 switch (event) {
3287 case TX_E_STOP:
3288 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3289 break;
3291 case TX_E_FAIL:
3292 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3293 break;
3295 case TX_E_PRIO_CHANGE:
3296 case TX_E_BW_UPDATE:
3297 /* No-op */
3298 break;
3300 case TX_E_CLEANUP_DONE:
3301 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3302 break;
3304 default:
3305 bfa_sm_fault(event);
3309 static void
3310 bna_tx_sm_failed_entry(struct bna_tx *tx)
3314 static void
3315 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3317 switch (event) {
3318 case TX_E_START:
3319 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3320 break;
3322 case TX_E_STOP:
3323 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3324 break;
3326 case TX_E_FAIL:
3327 /* No-op */
3328 break;
3330 case TX_E_CLEANUP_DONE:
3331 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3332 break;
3334 default:
3335 bfa_sm_fault(event);
3339 static void
3340 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3344 static void
3345 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3347 switch (event) {
3348 case TX_E_STOP:
3349 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3350 break;
3352 case TX_E_FAIL:
3353 bfa_fsm_set_state(tx, bna_tx_sm_failed);
3354 break;
3356 case TX_E_CLEANUP_DONE:
3357 bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3358 break;
3360 case TX_E_BW_UPDATE:
3361 /* No-op */
3362 break;
3364 default:
3365 bfa_sm_fault(event);
3369 static void
3370 bna_bfi_tx_enet_start(struct bna_tx *tx)
3372 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3373 struct bna_txq *txq = NULL;
3374 struct list_head *qe;
3375 int i;
3377 bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3378 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3379 cfg_req->mh.num_entries = htons(
3380 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3382 cfg_req->num_queues = tx->num_txq;
3383 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3384 i < tx->num_txq;
3385 i++, qe = bfa_q_next(qe)) {
3386 txq = (struct bna_txq *)qe;
3388 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3389 cfg_req->q_cfg[i].q.priority = txq->priority;
3391 cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3392 txq->ib.ib_seg_host_addr.lsb;
3393 cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3394 txq->ib.ib_seg_host_addr.msb;
3395 cfg_req->q_cfg[i].ib.intr.msix_index =
3396 htons((u16)txq->ib.intr_vector);
3399 cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3400 cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3401 cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3402 cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3403 cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3404 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3405 cfg_req->ib_cfg.coalescing_timeout =
3406 htonl((u32)txq->ib.coalescing_timeo);
3407 cfg_req->ib_cfg.inter_pkt_timeout =
3408 htonl((u32)txq->ib.interpkt_timeo);
3409 cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3411 cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3412 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3413 cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_DISABLED;
3414 cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3416 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3417 sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3418 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3421 static void
3422 bna_bfi_tx_enet_stop(struct bna_tx *tx)
3424 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3426 bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3427 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3428 req->mh.num_entries = htons(
3429 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3430 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3431 &req->mh);
3432 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3435 static void
3436 bna_tx_enet_stop(struct bna_tx *tx)
3438 struct bna_txq *txq;
3439 struct list_head *qe;
3441 /* Stop IB */
3442 list_for_each(qe, &tx->txq_q) {
3443 txq = (struct bna_txq *)qe;
3444 bna_ib_stop(tx->bna, &txq->ib);
3447 bna_bfi_tx_enet_stop(tx);
3450 static void
3451 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3452 struct bna_mem_descr *qpt_mem,
3453 struct bna_mem_descr *swqpt_mem,
3454 struct bna_mem_descr *page_mem)
3456 u8 *kva;
3457 u64 dma;
3458 struct bna_dma_addr bna_dma;
3459 int i;
3461 txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3462 txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3463 txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3464 txq->qpt.page_count = page_count;
3465 txq->qpt.page_size = page_size;
3467 txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3468 txq->tcb->sw_q = page_mem->kva;
3470 kva = page_mem->kva;
3471 BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3473 for (i = 0; i < page_count; i++) {
3474 txq->tcb->sw_qpt[i] = kva;
3475 kva += PAGE_SIZE;
3477 BNA_SET_DMA_ADDR(dma, &bna_dma);
3478 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3479 bna_dma.lsb;
3480 ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3481 bna_dma.msb;
3482 dma += PAGE_SIZE;
3486 static struct bna_tx *
3487 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3489 struct list_head *qe = NULL;
3490 struct bna_tx *tx = NULL;
3492 if (list_empty(&tx_mod->tx_free_q))
3493 return NULL;
3494 if (type == BNA_TX_T_REGULAR) {
3495 bfa_q_deq(&tx_mod->tx_free_q, &qe);
3496 } else {
3497 bfa_q_deq_tail(&tx_mod->tx_free_q, &qe);
3499 tx = (struct bna_tx *)qe;
3500 bfa_q_qe_init(&tx->qe);
3501 tx->type = type;
3503 return tx;
3506 static void
3507 bna_tx_free(struct bna_tx *tx)
3509 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3510 struct bna_txq *txq;
3511 struct list_head *prev_qe;
3512 struct list_head *qe;
3514 while (!list_empty(&tx->txq_q)) {
3515 bfa_q_deq(&tx->txq_q, &txq);
3516 bfa_q_qe_init(&txq->qe);
3517 txq->tcb = NULL;
3518 txq->tx = NULL;
3519 list_add_tail(&txq->qe, &tx_mod->txq_free_q);
3522 list_for_each(qe, &tx_mod->tx_active_q) {
3523 if (qe == &tx->qe) {
3524 list_del(&tx->qe);
3525 bfa_q_qe_init(&tx->qe);
3526 break;
3530 tx->bna = NULL;
3531 tx->priv = NULL;
3533 prev_qe = NULL;
3534 list_for_each(qe, &tx_mod->tx_free_q) {
3535 if (((struct bna_tx *)qe)->rid < tx->rid)
3536 prev_qe = qe;
3537 else {
3538 break;
3542 if (prev_qe == NULL) {
3543 /* This is the first entry */
3544 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe);
3545 } else if (bfa_q_next(prev_qe) == &tx_mod->tx_free_q) {
3546 /* This is the last entry */
3547 list_add_tail(&tx->qe, &tx_mod->tx_free_q);
3548 } else {
3549 /* Somewhere in the middle */
3550 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe);
3551 bfa_q_prev(&tx->qe) = prev_qe;
3552 bfa_q_next(prev_qe) = &tx->qe;
3553 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe;
3557 static void
3558 bna_tx_start(struct bna_tx *tx)
3560 tx->flags |= BNA_TX_F_ENET_STARTED;
3561 if (tx->flags & BNA_TX_F_ENABLED)
3562 bfa_fsm_send_event(tx, TX_E_START);
3565 static void
3566 bna_tx_stop(struct bna_tx *tx)
3568 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3569 tx->stop_cbarg = &tx->bna->tx_mod;
3571 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3572 bfa_fsm_send_event(tx, TX_E_STOP);
3575 static void
3576 bna_tx_fail(struct bna_tx *tx)
3578 tx->flags &= ~BNA_TX_F_ENET_STARTED;
3579 bfa_fsm_send_event(tx, TX_E_FAIL);
3582 void
3583 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3585 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3586 struct bna_txq *txq = NULL;
3587 struct list_head *qe;
3588 int i;
3590 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3591 sizeof(struct bfi_enet_tx_cfg_rsp));
3593 tx->hw_id = cfg_rsp->hw_id;
3595 for (i = 0, qe = bfa_q_first(&tx->txq_q);
3596 i < tx->num_txq; i++, qe = bfa_q_next(qe)) {
3597 txq = (struct bna_txq *)qe;
3599 /* Setup doorbells */
3600 txq->tcb->i_dbell->doorbell_addr =
3601 tx->bna->pcidev.pci_bar_kva
3602 + ntohl(cfg_rsp->q_handles[i].i_dbell);
3603 txq->tcb->q_dbell =
3604 tx->bna->pcidev.pci_bar_kva
3605 + ntohl(cfg_rsp->q_handles[i].q_dbell);
3606 txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3608 /* Initialize producer/consumer indexes */
3609 (*txq->tcb->hw_consumer_index) = 0;
3610 txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3613 bfa_fsm_send_event(tx, TX_E_STARTED);
3616 void
3617 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3619 bfa_fsm_send_event(tx, TX_E_STOPPED);
3622 void
3623 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3625 struct bna_tx *tx;
3626 struct list_head *qe;
3628 list_for_each(qe, &tx_mod->tx_active_q) {
3629 tx = (struct bna_tx *)qe;
3630 bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3634 void
3635 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3637 u32 q_size;
3638 u32 page_count;
3639 struct bna_mem_info *mem_info;
3641 res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3642 mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3643 mem_info->mem_type = BNA_MEM_T_KVA;
3644 mem_info->len = sizeof(struct bna_tcb);
3645 mem_info->num = num_txq;
3647 q_size = txq_depth * BFI_TXQ_WI_SIZE;
3648 q_size = ALIGN(q_size, PAGE_SIZE);
3649 page_count = q_size >> PAGE_SHIFT;
3651 res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3652 mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3653 mem_info->mem_type = BNA_MEM_T_DMA;
3654 mem_info->len = page_count * sizeof(struct bna_dma_addr);
3655 mem_info->num = num_txq;
3657 res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3658 mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3659 mem_info->mem_type = BNA_MEM_T_KVA;
3660 mem_info->len = page_count * sizeof(void *);
3661 mem_info->num = num_txq;
3663 res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3664 mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3665 mem_info->mem_type = BNA_MEM_T_DMA;
3666 mem_info->len = PAGE_SIZE * page_count;
3667 mem_info->num = num_txq;
3669 res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3670 mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3671 mem_info->mem_type = BNA_MEM_T_DMA;
3672 mem_info->len = BFI_IBIDX_SIZE;
3673 mem_info->num = num_txq;
3675 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3676 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3677 BNA_INTR_T_MSIX;
3678 res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3681 struct bna_tx *
3682 bna_tx_create(struct bna *bna, struct bnad *bnad,
3683 struct bna_tx_config *tx_cfg,
3684 const struct bna_tx_event_cbfn *tx_cbfn,
3685 struct bna_res_info *res_info, void *priv)
3687 struct bna_intr_info *intr_info;
3688 struct bna_tx_mod *tx_mod = &bna->tx_mod;
3689 struct bna_tx *tx;
3690 struct bna_txq *txq;
3691 struct list_head *qe;
3692 int page_count;
3693 int i;
3695 intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3696 page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3697 PAGE_SIZE;
3700 * Get resources
3703 if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3704 return NULL;
3706 /* Tx */
3708 tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3709 if (!tx)
3710 return NULL;
3711 tx->bna = bna;
3712 tx->priv = priv;
3714 /* TxQs */
3716 INIT_LIST_HEAD(&tx->txq_q);
3717 for (i = 0; i < tx_cfg->num_txq; i++) {
3718 if (list_empty(&tx_mod->txq_free_q))
3719 goto err_return;
3721 bfa_q_deq(&tx_mod->txq_free_q, &txq);
3722 bfa_q_qe_init(&txq->qe);
3723 list_add_tail(&txq->qe, &tx->txq_q);
3724 txq->tx = tx;
3728 * Initialize
3731 /* Tx */
3733 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3734 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3735 /* Following callbacks are mandatory */
3736 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3737 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3738 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3740 list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3742 tx->num_txq = tx_cfg->num_txq;
3744 tx->flags = 0;
3745 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3746 switch (tx->type) {
3747 case BNA_TX_T_REGULAR:
3748 if (!(tx->bna->tx_mod.flags &
3749 BNA_TX_MOD_F_ENET_LOOPBACK))
3750 tx->flags |= BNA_TX_F_ENET_STARTED;
3751 break;
3752 case BNA_TX_T_LOOPBACK:
3753 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3754 tx->flags |= BNA_TX_F_ENET_STARTED;
3755 break;
3759 /* TxQ */
3761 i = 0;
3762 list_for_each(qe, &tx->txq_q) {
3763 txq = (struct bna_txq *)qe;
3764 txq->tcb = (struct bna_tcb *)
3765 res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3766 txq->tx_packets = 0;
3767 txq->tx_bytes = 0;
3769 /* IB */
3770 txq->ib.ib_seg_host_addr.lsb =
3771 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3772 txq->ib.ib_seg_host_addr.msb =
3773 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3774 txq->ib.ib_seg_host_addr_kva =
3775 res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3776 txq->ib.intr_type = intr_info->intr_type;
3777 txq->ib.intr_vector = (intr_info->num == 1) ?
3778 intr_info->idl[0].vector :
3779 intr_info->idl[i].vector;
3780 if (intr_info->intr_type == BNA_INTR_T_INTX)
3781 txq->ib.intr_vector = (1 << txq->ib.intr_vector);
3782 txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3783 txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3784 txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3786 /* TCB */
3788 txq->tcb->q_depth = tx_cfg->txq_depth;
3789 txq->tcb->unmap_q = (void *)
3790 res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3791 txq->tcb->hw_consumer_index =
3792 (u32 *)txq->ib.ib_seg_host_addr_kva;
3793 txq->tcb->i_dbell = &txq->ib.door_bell;
3794 txq->tcb->intr_type = txq->ib.intr_type;
3795 txq->tcb->intr_vector = txq->ib.intr_vector;
3796 txq->tcb->txq = txq;
3797 txq->tcb->bnad = bnad;
3798 txq->tcb->id = i;
3800 /* QPT, SWQPT, Pages */
3801 bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3802 &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3803 &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3804 &res_info[BNA_TX_RES_MEM_T_PAGE].
3805 res_u.mem_info.mdl[i]);
3807 /* Callback to bnad for setting up TCB */
3808 if (tx->tcb_setup_cbfn)
3809 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3811 if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3812 txq->priority = txq->tcb->id;
3813 else
3814 txq->priority = tx_mod->default_prio;
3816 i++;
3819 tx->txf_vlan_id = 0;
3821 bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3823 tx_mod->rid_mask |= (1 << tx->rid);
3825 return tx;
3827 err_return:
3828 bna_tx_free(tx);
3829 return NULL;
3832 void
3833 bna_tx_destroy(struct bna_tx *tx)
3835 struct bna_txq *txq;
3836 struct list_head *qe;
3838 list_for_each(qe, &tx->txq_q) {
3839 txq = (struct bna_txq *)qe;
3840 if (tx->tcb_destroy_cbfn)
3841 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3844 tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid);
3845 bna_tx_free(tx);
3848 void
3849 bna_tx_enable(struct bna_tx *tx)
3851 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3852 return;
3854 tx->flags |= BNA_TX_F_ENABLED;
3856 if (tx->flags & BNA_TX_F_ENET_STARTED)
3857 bfa_fsm_send_event(tx, TX_E_START);
3860 void
3861 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3862 void (*cbfn)(void *, struct bna_tx *))
3864 if (type == BNA_SOFT_CLEANUP) {
3865 (*cbfn)(tx->bna->bnad, tx);
3866 return;
3869 tx->stop_cbfn = cbfn;
3870 tx->stop_cbarg = tx->bna->bnad;
3872 tx->flags &= ~BNA_TX_F_ENABLED;
3874 bfa_fsm_send_event(tx, TX_E_STOP);
3877 void
3878 bna_tx_cleanup_complete(struct bna_tx *tx)
3880 bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3883 static void
3884 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3886 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3888 bfa_wc_down(&tx_mod->tx_stop_wc);
3891 static void
3892 bna_tx_mod_cb_tx_stopped_all(void *arg)
3894 struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3896 if (tx_mod->stop_cbfn)
3897 tx_mod->stop_cbfn(&tx_mod->bna->enet);
3898 tx_mod->stop_cbfn = NULL;
3901 void
3902 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3903 struct bna_res_info *res_info)
3905 int i;
3907 tx_mod->bna = bna;
3908 tx_mod->flags = 0;
3910 tx_mod->tx = (struct bna_tx *)
3911 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3912 tx_mod->txq = (struct bna_txq *)
3913 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3915 INIT_LIST_HEAD(&tx_mod->tx_free_q);
3916 INIT_LIST_HEAD(&tx_mod->tx_active_q);
3918 INIT_LIST_HEAD(&tx_mod->txq_free_q);
3920 for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3921 tx_mod->tx[i].rid = i;
3922 bfa_q_qe_init(&tx_mod->tx[i].qe);
3923 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3924 bfa_q_qe_init(&tx_mod->txq[i].qe);
3925 list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3928 tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3929 tx_mod->default_prio = 0;
3930 tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3931 tx_mod->iscsi_prio = -1;
3934 void
3935 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3937 struct list_head *qe;
3938 int i;
3940 i = 0;
3941 list_for_each(qe, &tx_mod->tx_free_q)
3942 i++;
3944 i = 0;
3945 list_for_each(qe, &tx_mod->txq_free_q)
3946 i++;
3948 tx_mod->bna = NULL;
3951 void
3952 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3954 struct bna_tx *tx;
3955 struct list_head *qe;
3957 tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3958 if (type == BNA_TX_T_LOOPBACK)
3959 tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3961 list_for_each(qe, &tx_mod->tx_active_q) {
3962 tx = (struct bna_tx *)qe;
3963 if (tx->type == type)
3964 bna_tx_start(tx);
3968 void
3969 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3971 struct bna_tx *tx;
3972 struct list_head *qe;
3974 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3975 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3977 tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3979 bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3981 list_for_each(qe, &tx_mod->tx_active_q) {
3982 tx = (struct bna_tx *)qe;
3983 if (tx->type == type) {
3984 bfa_wc_up(&tx_mod->tx_stop_wc);
3985 bna_tx_stop(tx);
3989 bfa_wc_wait(&tx_mod->tx_stop_wc);
3992 void
3993 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3995 struct bna_tx *tx;
3996 struct list_head *qe;
3998 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3999 tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
4001 list_for_each(qe, &tx_mod->tx_active_q) {
4002 tx = (struct bna_tx *)qe;
4003 bna_tx_fail(tx);
4007 void
4008 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
4010 struct bna_txq *txq;
4011 struct list_head *qe;
4013 list_for_each(qe, &tx->txq_q) {
4014 txq = (struct bna_txq *)qe;
4015 bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);