2 * Linux network driver for Brocade Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
20 #include "bna_types.h"
22 extern const u32 bna_napi_dim_vector
[][BNA_BIAS_T_MAX
];
26 * Macros and constants
30 #define BNA_IOC_TIMER_FREQ 200
33 #define BNA_MESSAGE_SIZE 256
35 /* MBOX API for PORT, TX, RX */
36 #define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg) \
38 memcpy(&((_qe)->cmd.msg[0]), (_cmd), (_cmd_len)); \
39 (_qe)->cbfn = (_cbfn); \
40 (_qe)->cbarg = (_cbarg); \
43 #define bna_is_small_rxq(rcb) ((rcb)->id == 1)
45 #define BNA_MAC_IS_EQUAL(_mac1, _mac2) \
46 (!memcmp((_mac1), (_mac2), sizeof(mac_t)))
48 #define BNA_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
50 #define BNA_TO_POWER_OF_2(x) \
53 while ((x) && (x) != 1) { \
60 #define BNA_TO_POWER_OF_2_HIGH(x) \
69 * input : _addr-> os dma addr in host endian format,
70 * output : _bna_dma_addr-> pointer to hw dma addr
72 #define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \
75 cpu_to_be64((u64)(_addr)); \
76 (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \
77 (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \
81 * input : _bna_dma_addr-> pointer to hw dma addr
82 * output : _addr-> os dma addr in host endian format
84 #define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \
86 (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \
87 | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \
90 #define containing_rec(addr, type, field) \
91 ((type *)((unsigned char *)(addr) - \
92 (unsigned char *)(&((type *)0)->field)))
94 #define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
96 /* TxQ element is 64 bytes */
97 #define BNA_TXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 6)
98 #define BNA_TXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 6)
100 #define BNA_TXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
102 unsigned int page_index; /* index within a page */ \
104 page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
105 (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
106 page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
107 (_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
110 /* RxQ element is 8 bytes */
111 #define BNA_RXQ_PAGE_INDEX_MAX (PAGE_SIZE >> 3)
112 #define BNA_RXQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 3)
114 #define BNA_RXQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
116 unsigned int page_index; /* index within a page */ \
118 page_index = (_qe_idx) & (BNA_RXQ_PAGE_INDEX_MAX - 1); \
119 (_qe_ptr_range) = (BNA_RXQ_PAGE_INDEX_MAX - page_index); \
120 page_addr = (_qpt_ptr)[((_qe_idx) >> \
121 BNA_RXQ_PAGE_INDEX_MAX_SHIFT)]; \
122 (_qe_ptr) = &((struct bna_rxq_entry *)(page_addr))[page_index]; \
125 /* CQ element is 16 bytes */
126 #define BNA_CQ_PAGE_INDEX_MAX (PAGE_SIZE >> 4)
127 #define BNA_CQ_PAGE_INDEX_MAX_SHIFT (PAGE_SHIFT - 4)
129 #define BNA_CQ_QPGE_PTR_GET(_qe_idx, _qpt_ptr, _qe_ptr, _qe_ptr_range) \
131 unsigned int page_index; /* index within a page */ \
134 page_index = (_qe_idx) & (BNA_CQ_PAGE_INDEX_MAX - 1); \
135 (_qe_ptr_range) = (BNA_CQ_PAGE_INDEX_MAX - page_index); \
136 page_addr = (_qpt_ptr)[((_qe_idx) >> \
137 BNA_CQ_PAGE_INDEX_MAX_SHIFT)]; \
138 (_qe_ptr) = &((struct bna_cq_entry *)(page_addr))[page_index];\
141 #define BNA_QE_INDX_2_PTR(_cast, _qe_idx, _q_base) \
142 (&((_cast *)(_q_base))[(_qe_idx)])
144 #define BNA_QE_INDX_RANGE(_qe_idx, _q_depth) ((_q_depth) - (_qe_idx))
146 #define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \
147 ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1))
149 #define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \
150 (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1))
152 #define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \
153 (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \
156 #define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \
157 ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \
160 #define BNA_Q_GET_CI(_q_ptr) ((_q_ptr)->q.consumer_index)
162 #define BNA_Q_GET_PI(_q_ptr) ((_q_ptr)->q.producer_index)
164 #define BNA_Q_PI_ADD(_q_ptr, _num) \
165 (_q_ptr)->q.producer_index = \
166 (((_q_ptr)->q.producer_index + (_num)) & \
167 ((_q_ptr)->q.q_depth - 1))
169 #define BNA_Q_CI_ADD(_q_ptr, _num) \
170 (_q_ptr)->q.consumer_index = \
171 (((_q_ptr)->q.consumer_index + (_num)) \
172 & ((_q_ptr)->q.q_depth - 1))
174 #define BNA_Q_FREE_COUNT(_q_ptr) \
175 (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
177 #define BNA_Q_IN_USE_COUNT(_q_ptr) \
178 (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
180 /* These macros build the data portion of the TxQ/RxQ doorbell */
181 #define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
182 #define BNA_DOORBELL_Q_STOP (0x40000000)
184 /* These macros build the data portion of the IB doorbell */
185 #define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
186 (0x80000000 | ((_timeout) << 16) | (_events))
187 #define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
189 /* Set the coalescing timer for the given ib */
190 #define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \
191 ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0));
193 /* Acks 'events' # of events for a given ib */
194 #define bna_ib_ack(_i_dbell, _events) \
195 (writel(((_i_dbell)->doorbell_ack | (_events)), \
196 (_i_dbell)->doorbell_addr));
198 #define bna_txq_prod_indx_doorbell(_tcb) \
199 (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \
202 #define bna_rxq_prod_indx_doorbell(_rcb) \
203 (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \
206 #define BNA_LARGE_PKT_SIZE 1000
208 #define BNA_UPDATE_PKT_CNT(_pkt, _len) \
210 if ((_len) > BNA_LARGE_PKT_SIZE) { \
211 (_pkt)->large_pkt_cnt++; \
213 (_pkt)->small_pkt_cnt++; \
217 #define call_rxf_stop_cbfn(rxf, status) \
218 if ((rxf)->stop_cbfn) { \
219 (*(rxf)->stop_cbfn)((rxf)->stop_cbarg, (status)); \
220 (rxf)->stop_cbfn = NULL; \
221 (rxf)->stop_cbarg = NULL; \
224 #define call_rxf_start_cbfn(rxf, status) \
225 if ((rxf)->start_cbfn) { \
226 (*(rxf)->start_cbfn)((rxf)->start_cbarg, (status)); \
227 (rxf)->start_cbfn = NULL; \
228 (rxf)->start_cbarg = NULL; \
231 #define call_rxf_cam_fltr_cbfn(rxf, status) \
232 if ((rxf)->cam_fltr_cbfn) { \
233 (*(rxf)->cam_fltr_cbfn)((rxf)->cam_fltr_cbarg, rxf->rx, \
235 (rxf)->cam_fltr_cbfn = NULL; \
236 (rxf)->cam_fltr_cbarg = NULL; \
239 #define call_rxf_pause_cbfn(rxf, status) \
240 if ((rxf)->oper_state_cbfn) { \
241 (*(rxf)->oper_state_cbfn)((rxf)->oper_state_cbarg, rxf->rx,\
243 (rxf)->rxf_flags &= ~BNA_RXF_FL_OPERSTATE_CHANGED; \
244 (rxf)->oper_state_cbfn = NULL; \
245 (rxf)->oper_state_cbarg = NULL; \
248 #define call_rxf_resume_cbfn(rxf, status) call_rxf_pause_cbfn(rxf, status)
250 #define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx))
252 #define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx))
254 #define xxx_enable(mode, bitmask, xxx) \
260 #define xxx_disable(mode, bitmask, xxx) \
266 #define xxx_inactive(mode, bitmask, xxx) \
272 #define is_promisc_enable(mode, bitmask) \
273 is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
275 #define is_promisc_disable(mode, bitmask) \
276 is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
278 #define promisc_enable(mode, bitmask) \
279 xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC)
281 #define promisc_disable(mode, bitmask) \
282 xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC)
284 #define promisc_inactive(mode, bitmask) \
285 xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC)
287 #define is_default_enable(mode, bitmask) \
288 is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
290 #define is_default_disable(mode, bitmask) \
291 is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
293 #define default_enable(mode, bitmask) \
294 xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT)
296 #define default_disable(mode, bitmask) \
297 xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT)
299 #define default_inactive(mode, bitmask) \
300 xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT)
302 #define is_allmulti_enable(mode, bitmask) \
303 is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
305 #define is_allmulti_disable(mode, bitmask) \
306 is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
308 #define allmulti_enable(mode, bitmask) \
309 xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI)
311 #define allmulti_disable(mode, bitmask) \
312 xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI)
314 #define allmulti_inactive(mode, bitmask) \
315 xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI)
317 #define GET_RXQS(rxp, q0, q1) do { \
318 switch ((rxp)->type) { \
319 case BNA_RXP_SINGLE: \
320 (q0) = rxp->rxq.single.only; \
324 (q0) = rxp->rxq.slr.large; \
325 (q1) = rxp->rxq.slr.small; \
328 (q0) = rxp->rxq.hds.data; \
329 (q1) = rxp->rxq.hds.hdr; \
336 * Function prototypes
345 void bna_res_req(struct bna_res_info
*res_info
);
346 void bna_init(struct bna
*bna
, struct bnad
*bnad
,
347 struct bfa_pcidev
*pcidev
,
348 struct bna_res_info
*res_info
);
349 void bna_uninit(struct bna
*bna
);
350 void bna_stats_get(struct bna
*bna
);
351 void bna_get_perm_mac(struct bna
*bna
, u8
*mac
);
354 int bna_rit_mod_can_satisfy(struct bna_rit_mod
*rit_mod
, int seg_size
);
357 struct bna_mac
*bna_ucam_mod_mac_get(struct bna_ucam_mod
*ucam_mod
);
358 void bna_ucam_mod_mac_put(struct bna_ucam_mod
*ucam_mod
,
359 struct bna_mac
*mac
);
360 struct bna_mac
*bna_mcam_mod_mac_get(struct bna_mcam_mod
*mcam_mod
);
361 void bna_mcam_mod_mac_put(struct bna_mcam_mod
*mcam_mod
,
362 struct bna_mac
*mac
);
363 struct bna_rit_segment
*
364 bna_rit_mod_seg_get(struct bna_rit_mod
*rit_mod
, int seg_size
);
365 void bna_rit_mod_seg_put(struct bna_rit_mod
*rit_mod
,
366 struct bna_rit_segment
*seg
);
373 void bna_device_enable(struct bna_device
*device
);
374 void bna_device_disable(struct bna_device
*device
,
375 enum bna_cleanup_type type
);
381 /* APIs for PORT, TX, RX */
382 void bna_mbox_handler(struct bna
*bna
, u32 intr_status
);
383 void bna_mbox_send(struct bna
*bna
, struct bna_mbox_qe
*mbox_qe
);
390 int bna_port_mtu_get(struct bna_port
*port
);
391 void bna_llport_rx_started(struct bna_llport
*llport
);
392 void bna_llport_rx_stopped(struct bna_llport
*llport
);
395 void bna_port_enable(struct bna_port
*port
);
396 void bna_port_disable(struct bna_port
*port
, enum bna_cleanup_type type
,
397 void (*cbfn
)(void *, enum bna_cb_status
));
398 void bna_port_pause_config(struct bna_port
*port
,
399 struct bna_pause_config
*pause_config
,
400 void (*cbfn
)(struct bnad
*, enum bna_cb_status
));
401 void bna_port_mtu_set(struct bna_port
*port
, int mtu
,
402 void (*cbfn
)(struct bnad
*, enum bna_cb_status
));
403 void bna_port_mac_get(struct bna_port
*port
, mac_t
*mac
);
405 /* Callbacks for TX, RX */
406 void bna_port_cb_tx_stopped(struct bna_port
*port
,
407 enum bna_cb_status status
);
408 void bna_port_cb_rx_stopped(struct bna_port
*port
,
409 enum bna_cb_status status
);
416 void bna_ib_mod_init(struct bna_ib_mod
*ib_mod
, struct bna
*bna
,
417 struct bna_res_info
*res_info
);
418 void bna_ib_mod_uninit(struct bna_ib_mod
*ib_mod
);
425 void bna_tx_mod_init(struct bna_tx_mod
*tx_mod
, struct bna
*bna
,
426 struct bna_res_info
*res_info
);
427 void bna_tx_mod_uninit(struct bna_tx_mod
*tx_mod
);
428 int bna_tx_state_get(struct bna_tx
*tx
);
431 void bna_tx_mod_start(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
);
432 void bna_tx_mod_stop(struct bna_tx_mod
*tx_mod
, enum bna_tx_type type
);
433 void bna_tx_mod_fail(struct bna_tx_mod
*tx_mod
);
434 void bna_tx_mod_prio_changed(struct bna_tx_mod
*tx_mod
, int prio
);
435 void bna_tx_mod_cee_link_status(struct bna_tx_mod
*tx_mod
, int cee_link
);
438 void bna_tx_res_req(int num_txq
, int txq_depth
,
439 struct bna_res_info
*res_info
);
440 struct bna_tx
*bna_tx_create(struct bna
*bna
, struct bnad
*bnad
,
441 struct bna_tx_config
*tx_cfg
,
442 struct bna_tx_event_cbfn
*tx_cbfn
,
443 struct bna_res_info
*res_info
, void *priv
);
444 void bna_tx_destroy(struct bna_tx
*tx
);
445 void bna_tx_enable(struct bna_tx
*tx
);
446 void bna_tx_disable(struct bna_tx
*tx
, enum bna_cleanup_type type
,
447 void (*cbfn
)(void *, struct bna_tx
*,
448 enum bna_cb_status
));
449 void bna_tx_coalescing_timeo_set(struct bna_tx
*tx
, int coalescing_timeo
);
456 void rxf_cb_cam_fltr_mbox_cmd(void *arg
, int status
);
457 void rxf_cam_mbox_cmd(struct bna_rxf
*rxf
, u8 cmd
,
458 const struct bna_mac
*mac_addr
);
459 void __rxf_vlan_filter_set(struct bna_rxf
*rxf
, enum bna_status status
);
460 void bna_rxf_adv_init(struct bna_rxf
*rxf
,
462 struct bna_rx_config
*q_config
);
463 int rxf_process_packet_filter_ucast(struct bna_rxf
*rxf
);
464 int rxf_process_packet_filter_promisc(struct bna_rxf
*rxf
);
465 int rxf_process_packet_filter_default(struct bna_rxf
*rxf
);
466 int rxf_process_packet_filter_allmulti(struct bna_rxf
*rxf
);
467 int rxf_clear_packet_filter_ucast(struct bna_rxf
*rxf
);
468 int rxf_clear_packet_filter_promisc(struct bna_rxf
*rxf
);
469 int rxf_clear_packet_filter_default(struct bna_rxf
*rxf
);
470 int rxf_clear_packet_filter_allmulti(struct bna_rxf
*rxf
);
471 void rxf_reset_packet_filter_ucast(struct bna_rxf
*rxf
);
472 void rxf_reset_packet_filter_promisc(struct bna_rxf
*rxf
);
473 void rxf_reset_packet_filter_default(struct bna_rxf
*rxf
);
474 void rxf_reset_packet_filter_allmulti(struct bna_rxf
*rxf
);
477 void bna_rx_mod_init(struct bna_rx_mod
*rx_mod
, struct bna
*bna
,
478 struct bna_res_info
*res_info
);
479 void bna_rx_mod_uninit(struct bna_rx_mod
*rx_mod
);
480 int bna_rx_state_get(struct bna_rx
*rx
);
481 int bna_rxf_state_get(struct bna_rxf
*rxf
);
484 void bna_rx_mod_start(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
);
485 void bna_rx_mod_stop(struct bna_rx_mod
*rx_mod
, enum bna_rx_type type
);
486 void bna_rx_mod_fail(struct bna_rx_mod
*rx_mod
);
489 void bna_rx_res_req(struct bna_rx_config
*rx_config
,
490 struct bna_res_info
*res_info
);
491 struct bna_rx
*bna_rx_create(struct bna
*bna
, struct bnad
*bnad
,
492 struct bna_rx_config
*rx_cfg
,
493 struct bna_rx_event_cbfn
*rx_cbfn
,
494 struct bna_res_info
*res_info
, void *priv
);
495 void bna_rx_destroy(struct bna_rx
*rx
);
496 void bna_rx_enable(struct bna_rx
*rx
);
497 void bna_rx_disable(struct bna_rx
*rx
, enum bna_cleanup_type type
,
498 void (*cbfn
)(void *, struct bna_rx
*,
499 enum bna_cb_status
));
500 void bna_rx_coalescing_timeo_set(struct bna_rx
*rx
, int coalescing_timeo
);
501 void bna_rx_dim_reconfig(struct bna
*bna
, const u32 vector
[][BNA_BIAS_T_MAX
]);
502 void bna_rx_dim_update(struct bna_ccb
*ccb
);
504 bna_rx_ucast_set(struct bna_rx
*rx
, u8
*ucmac
,
505 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
506 enum bna_cb_status
));
508 bna_rx_mcast_add(struct bna_rx
*rx
, u8
*mcmac
,
509 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
510 enum bna_cb_status
));
512 bna_rx_mcast_listset(struct bna_rx
*rx
, int count
, u8
*mcmac
,
513 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
514 enum bna_cb_status
));
516 bna_rx_mode_set(struct bna_rx
*rx
, enum bna_rxmode rxmode
,
517 enum bna_rxmode bitmask
,
518 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
519 enum bna_cb_status
));
520 void bna_rx_vlan_add(struct bna_rx
*rx
, int vlan_id
);
521 void bna_rx_vlan_del(struct bna_rx
*rx
, int vlan_id
);
522 void bna_rx_vlanfilter_enable(struct bna_rx
*rx
);
523 void bna_rx_hds_enable(struct bna_rx
*rx
, struct bna_rxf_hds
*hds_config
,
524 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
525 enum bna_cb_status
));
526 void bna_rx_hds_disable(struct bna_rx
*rx
,
527 void (*cbfn
)(struct bnad
*, struct bna_rx
*,
528 enum bna_cb_status
));
534 /* Callbacks for BNA */
535 void bnad_cb_stats_get(struct bnad
*bnad
, enum bna_cb_status status
,
536 struct bna_stats
*stats
);
538 /* Callbacks for DEVICE */
539 void bnad_cb_device_enabled(struct bnad
*bnad
, enum bna_cb_status status
);
540 void bnad_cb_device_disabled(struct bnad
*bnad
, enum bna_cb_status status
);
541 void bnad_cb_device_enable_mbox_intr(struct bnad
*bnad
);
542 void bnad_cb_device_disable_mbox_intr(struct bnad
*bnad
);
544 /* Callbacks for port */
545 void bnad_cb_port_link_status(struct bnad
*bnad
,
546 enum bna_link_status status
);
548 #endif /* __BNA_H__ */