1 /* bnx2x_sp.c: Broadcom Everest network driver.
3 * Copyright 2011 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22 #include <linux/module.h>
23 #include <linux/crc32.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/crc32c.h>
28 #include "bnx2x_cmn.h"
31 #define BNX2X_MAX_EMUL_MULTI 16
33 /**** Exe Queue interfaces ****/
36 * bnx2x_exe_queue_init - init the Exe Queue object
38 * @o: poiter to the object
40 * @owner: poiter to the owner
41 * @validate: validate function pointer
42 * @optimize: optimize function pointer
43 * @exec: execute function pointer
44 * @get: get function pointer
46 static inline void bnx2x_exe_queue_init(struct bnx2x
*bp
,
47 struct bnx2x_exe_queue_obj
*o
,
49 union bnx2x_qable_obj
*owner
,
50 exe_q_validate validate
,
51 exe_q_optimize optimize
,
55 memset(o
, 0, sizeof(*o
));
57 INIT_LIST_HEAD(&o
->exe_queue
);
58 INIT_LIST_HEAD(&o
->pending_comp
);
60 spin_lock_init(&o
->lock
);
62 o
->exe_chunk_len
= exe_len
;
65 /* Owner specific callbacks */
66 o
->validate
= validate
;
67 o
->optimize
= optimize
;
71 DP(BNX2X_MSG_SP
, "Setup the execution queue with the chunk "
72 "length of %d\n", exe_len
);
75 static inline void bnx2x_exe_queue_free_elem(struct bnx2x
*bp
,
76 struct bnx2x_exeq_elem
*elem
)
78 DP(BNX2X_MSG_SP
, "Deleting an exe_queue element\n");
82 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj
*o
)
84 struct bnx2x_exeq_elem
*elem
;
87 spin_lock_bh(&o
->lock
);
89 list_for_each_entry(elem
, &o
->exe_queue
, link
)
92 spin_unlock_bh(&o
->lock
);
98 * bnx2x_exe_queue_add - add a new element to the execution queue
102 * @cmd: new command to add
103 * @restore: true - do not optimize the command
105 * If the element is optimized or is illegal, frees it.
107 static inline int bnx2x_exe_queue_add(struct bnx2x
*bp
,
108 struct bnx2x_exe_queue_obj
*o
,
109 struct bnx2x_exeq_elem
*elem
,
114 spin_lock_bh(&o
->lock
);
117 /* Try to cancel this element queue */
118 rc
= o
->optimize(bp
, o
->owner
, elem
);
122 /* Check if this request is ok */
123 rc
= o
->validate(bp
, o
->owner
, elem
);
125 BNX2X_ERR("Preamble failed: %d\n", rc
);
130 /* If so, add it to the execution queue */
131 list_add_tail(&elem
->link
, &o
->exe_queue
);
133 spin_unlock_bh(&o
->lock
);
138 bnx2x_exe_queue_free_elem(bp
, elem
);
140 spin_unlock_bh(&o
->lock
);
146 static inline void __bnx2x_exe_queue_reset_pending(
148 struct bnx2x_exe_queue_obj
*o
)
150 struct bnx2x_exeq_elem
*elem
;
152 while (!list_empty(&o
->pending_comp
)) {
153 elem
= list_first_entry(&o
->pending_comp
,
154 struct bnx2x_exeq_elem
, link
);
156 list_del(&elem
->link
);
157 bnx2x_exe_queue_free_elem(bp
, elem
);
161 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x
*bp
,
162 struct bnx2x_exe_queue_obj
*o
)
165 spin_lock_bh(&o
->lock
);
167 __bnx2x_exe_queue_reset_pending(bp
, o
);
169 spin_unlock_bh(&o
->lock
);
174 * bnx2x_exe_queue_step - execute one execution chunk atomically
178 * @ramrod_flags: flags
180 * (Atomicy is ensured using the exe_queue->lock).
182 static inline int bnx2x_exe_queue_step(struct bnx2x
*bp
,
183 struct bnx2x_exe_queue_obj
*o
,
184 unsigned long *ramrod_flags
)
186 struct bnx2x_exeq_elem
*elem
, spacer
;
189 memset(&spacer
, 0, sizeof(spacer
));
191 spin_lock_bh(&o
->lock
);
194 * Next step should not be performed until the current is finished,
195 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
196 * properly clear object internals without sending any command to the FW
197 * which also implies there won't be any completion to clear the
200 if (!list_empty(&o
->pending_comp
)) {
201 if (test_bit(RAMROD_DRV_CLR_ONLY
, ramrod_flags
)) {
202 DP(BNX2X_MSG_SP
, "RAMROD_DRV_CLR_ONLY requested: "
203 "resetting pending_comp\n");
204 __bnx2x_exe_queue_reset_pending(bp
, o
);
206 spin_unlock_bh(&o
->lock
);
212 * Run through the pending commands list and create a next
215 while (!list_empty(&o
->exe_queue
)) {
216 elem
= list_first_entry(&o
->exe_queue
, struct bnx2x_exeq_elem
,
218 WARN_ON(!elem
->cmd_len
);
220 if (cur_len
+ elem
->cmd_len
<= o
->exe_chunk_len
) {
221 cur_len
+= elem
->cmd_len
;
223 * Prevent from both lists being empty when moving an
224 * element. This will allow the call of
225 * bnx2x_exe_queue_empty() without locking.
227 list_add_tail(&spacer
.link
, &o
->pending_comp
);
229 list_del(&elem
->link
);
230 list_add_tail(&elem
->link
, &o
->pending_comp
);
231 list_del(&spacer
.link
);
238 spin_unlock_bh(&o
->lock
);
242 rc
= o
->execute(bp
, o
->owner
, &o
->pending_comp
, ramrod_flags
);
245 * In case of an error return the commands back to the queue
246 * and reset the pending_comp.
248 list_splice_init(&o
->pending_comp
, &o
->exe_queue
);
251 * If zero is returned, means there are no outstanding pending
252 * completions and we may dismiss the pending list.
254 __bnx2x_exe_queue_reset_pending(bp
, o
);
256 spin_unlock_bh(&o
->lock
);
260 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj
*o
)
262 bool empty
= list_empty(&o
->exe_queue
);
264 /* Don't reorder!!! */
267 return empty
&& list_empty(&o
->pending_comp
);
270 static inline struct bnx2x_exeq_elem
*bnx2x_exe_queue_alloc_elem(
273 DP(BNX2X_MSG_SP
, "Allocating a new exe_queue element\n");
274 return kzalloc(sizeof(struct bnx2x_exeq_elem
), GFP_ATOMIC
);
277 /************************ raw_obj functions ***********************************/
278 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj
*o
)
280 return !!test_bit(o
->state
, o
->pstate
);
283 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj
*o
)
285 smp_mb__before_clear_bit();
286 clear_bit(o
->state
, o
->pstate
);
287 smp_mb__after_clear_bit();
290 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj
*o
)
292 smp_mb__before_clear_bit();
293 set_bit(o
->state
, o
->pstate
);
294 smp_mb__after_clear_bit();
298 * bnx2x_state_wait - wait until the given bit(state) is cleared
301 * @state: state which is to be cleared
302 * @state_p: state buffer
305 static inline int bnx2x_state_wait(struct bnx2x
*bp
, int state
,
306 unsigned long *pstate
)
308 /* can take a while if any port is running */
312 if (CHIP_REV_IS_EMUL(bp
))
315 DP(BNX2X_MSG_SP
, "waiting for state to become %d\n", state
);
319 if (!test_bit(state
, pstate
)) {
320 #ifdef BNX2X_STOP_ON_ERROR
321 DP(BNX2X_MSG_SP
, "exit (cnt %d)\n", 5000 - cnt
);
326 usleep_range(1000, 1000);
333 BNX2X_ERR("timeout waiting for state %d\n", state
);
334 #ifdef BNX2X_STOP_ON_ERROR
341 static int bnx2x_raw_wait(struct bnx2x
*bp
, struct bnx2x_raw_obj
*raw
)
343 return bnx2x_state_wait(bp
, raw
->state
, raw
->pstate
);
346 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
347 /* credit handling callbacks */
348 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj
*o
, int *offset
)
350 struct bnx2x_credit_pool_obj
*mp
= o
->macs_pool
;
354 return mp
->get_entry(mp
, offset
);
357 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj
*o
)
359 struct bnx2x_credit_pool_obj
*mp
= o
->macs_pool
;
363 return mp
->get(mp
, 1);
366 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj
*o
, int *offset
)
368 struct bnx2x_credit_pool_obj
*vp
= o
->vlans_pool
;
372 return vp
->get_entry(vp
, offset
);
375 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj
*o
)
377 struct bnx2x_credit_pool_obj
*vp
= o
->vlans_pool
;
381 return vp
->get(vp
, 1);
384 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj
*o
)
386 struct bnx2x_credit_pool_obj
*mp
= o
->macs_pool
;
387 struct bnx2x_credit_pool_obj
*vp
= o
->vlans_pool
;
392 if (!vp
->get(vp
, 1)) {
400 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj
*o
, int offset
)
402 struct bnx2x_credit_pool_obj
*mp
= o
->macs_pool
;
404 return mp
->put_entry(mp
, offset
);
407 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj
*o
)
409 struct bnx2x_credit_pool_obj
*mp
= o
->macs_pool
;
411 return mp
->put(mp
, 1);
414 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj
*o
, int offset
)
416 struct bnx2x_credit_pool_obj
*vp
= o
->vlans_pool
;
418 return vp
->put_entry(vp
, offset
);
421 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj
*o
)
423 struct bnx2x_credit_pool_obj
*vp
= o
->vlans_pool
;
425 return vp
->put(vp
, 1);
428 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj
*o
)
430 struct bnx2x_credit_pool_obj
*mp
= o
->macs_pool
;
431 struct bnx2x_credit_pool_obj
*vp
= o
->vlans_pool
;
436 if (!vp
->put(vp
, 1)) {
444 /* check_add() callbacks */
445 static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj
*o
,
446 union bnx2x_classification_ramrod_data
*data
)
448 struct bnx2x_vlan_mac_registry_elem
*pos
;
450 if (!is_valid_ether_addr(data
->mac
.mac
))
453 /* Check if a requested MAC already exists */
454 list_for_each_entry(pos
, &o
->head
, link
)
455 if (!memcmp(data
->mac
.mac
, pos
->u
.mac
.mac
, ETH_ALEN
))
461 static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj
*o
,
462 union bnx2x_classification_ramrod_data
*data
)
464 struct bnx2x_vlan_mac_registry_elem
*pos
;
466 list_for_each_entry(pos
, &o
->head
, link
)
467 if (data
->vlan
.vlan
== pos
->u
.vlan
.vlan
)
473 static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj
*o
,
474 union bnx2x_classification_ramrod_data
*data
)
476 struct bnx2x_vlan_mac_registry_elem
*pos
;
478 list_for_each_entry(pos
, &o
->head
, link
)
479 if ((data
->vlan_mac
.vlan
== pos
->u
.vlan_mac
.vlan
) &&
480 (!memcmp(data
->vlan_mac
.mac
, pos
->u
.vlan_mac
.mac
,
488 /* check_del() callbacks */
489 static struct bnx2x_vlan_mac_registry_elem
*
490 bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj
*o
,
491 union bnx2x_classification_ramrod_data
*data
)
493 struct bnx2x_vlan_mac_registry_elem
*pos
;
495 list_for_each_entry(pos
, &o
->head
, link
)
496 if (!memcmp(data
->mac
.mac
, pos
->u
.mac
.mac
, ETH_ALEN
))
502 static struct bnx2x_vlan_mac_registry_elem
*
503 bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj
*o
,
504 union bnx2x_classification_ramrod_data
*data
)
506 struct bnx2x_vlan_mac_registry_elem
*pos
;
508 list_for_each_entry(pos
, &o
->head
, link
)
509 if (data
->vlan
.vlan
== pos
->u
.vlan
.vlan
)
515 static struct bnx2x_vlan_mac_registry_elem
*
516 bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj
*o
,
517 union bnx2x_classification_ramrod_data
*data
)
519 struct bnx2x_vlan_mac_registry_elem
*pos
;
521 list_for_each_entry(pos
, &o
->head
, link
)
522 if ((data
->vlan_mac
.vlan
== pos
->u
.vlan_mac
.vlan
) &&
523 (!memcmp(data
->vlan_mac
.mac
, pos
->u
.vlan_mac
.mac
,
530 /* check_move() callback */
531 static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj
*src_o
,
532 struct bnx2x_vlan_mac_obj
*dst_o
,
533 union bnx2x_classification_ramrod_data
*data
)
535 struct bnx2x_vlan_mac_registry_elem
*pos
;
538 /* Check if we can delete the requested configuration from the first
541 pos
= src_o
->check_del(src_o
, data
);
543 /* check if configuration can be added */
544 rc
= dst_o
->check_add(dst_o
, data
);
546 /* If this classification can not be added (is already set)
547 * or can't be deleted - return an error.
555 static bool bnx2x_check_move_always_err(
556 struct bnx2x_vlan_mac_obj
*src_o
,
557 struct bnx2x_vlan_mac_obj
*dst_o
,
558 union bnx2x_classification_ramrod_data
*data
)
564 static inline u8
bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj
*o
)
566 struct bnx2x_raw_obj
*raw
= &o
->raw
;
569 if ((raw
->obj_type
== BNX2X_OBJ_TYPE_TX
) ||
570 (raw
->obj_type
== BNX2X_OBJ_TYPE_RX_TX
))
571 rx_tx_flag
|= ETH_CLASSIFY_CMD_HEADER_TX_CMD
;
573 if ((raw
->obj_type
== BNX2X_OBJ_TYPE_RX
) ||
574 (raw
->obj_type
== BNX2X_OBJ_TYPE_RX_TX
))
575 rx_tx_flag
|= ETH_CLASSIFY_CMD_HEADER_RX_CMD
;
580 /* LLH CAM line allocations */
582 LLH_CAM_ISCSI_ETH_LINE
= 0,
584 LLH_CAM_MAX_PF_LINE
= NIG_REG_LLH1_FUNC_MEM_SIZE
/ 2
587 static inline void bnx2x_set_mac_in_nig(struct bnx2x
*bp
,
588 bool add
, unsigned char *dev_addr
, int index
)
591 u32 reg_offset
= BP_PORT(bp
) ? NIG_REG_LLH1_FUNC_MEM
:
592 NIG_REG_LLH0_FUNC_MEM
;
594 if (!IS_MF_SI(bp
) || index
> LLH_CAM_MAX_PF_LINE
)
597 DP(BNX2X_MSG_SP
, "Going to %s LLH configuration at entry %d\n",
598 (add
? "ADD" : "DELETE"), index
);
601 /* LLH_FUNC_MEM is a u64 WB register */
602 reg_offset
+= 8*index
;
604 wb_data
[0] = ((dev_addr
[2] << 24) | (dev_addr
[3] << 16) |
605 (dev_addr
[4] << 8) | dev_addr
[5]);
606 wb_data
[1] = ((dev_addr
[0] << 8) | dev_addr
[1]);
608 REG_WR_DMAE(bp
, reg_offset
, wb_data
, 2);
611 REG_WR(bp
, (BP_PORT(bp
) ? NIG_REG_LLH1_FUNC_MEM_ENABLE
:
612 NIG_REG_LLH0_FUNC_MEM_ENABLE
) + 4*index
, add
);
616 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
619 * @o: queue for which we want to configure this rule
620 * @add: if true the command is an ADD command, DEL otherwise
621 * @opcode: CLASSIFY_RULE_OPCODE_XXX
622 * @hdr: pointer to a header to setup
625 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x
*bp
,
626 struct bnx2x_vlan_mac_obj
*o
, bool add
, int opcode
,
627 struct eth_classify_cmd_header
*hdr
)
629 struct bnx2x_raw_obj
*raw
= &o
->raw
;
631 hdr
->client_id
= raw
->cl_id
;
632 hdr
->func_id
= raw
->func_id
;
634 /* Rx or/and Tx (internal switching) configuration ? */
635 hdr
->cmd_general_data
|=
636 bnx2x_vlan_mac_get_rx_tx_flag(o
);
639 hdr
->cmd_general_data
|= ETH_CLASSIFY_CMD_HEADER_IS_ADD
;
641 hdr
->cmd_general_data
|=
642 (opcode
<< ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT
);
646 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
648 * @cid: connection id
649 * @type: BNX2X_FILTER_XXX_PENDING
650 * @hdr: poiter to header to setup
653 * currently we always configure one rule and echo field to contain a CID and an
656 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid
, int type
,
657 struct eth_classify_header
*hdr
, int rule_cnt
)
659 hdr
->echo
= (cid
& BNX2X_SWCID_MASK
) | (type
<< BNX2X_SWCID_SHIFT
);
660 hdr
->rule_cnt
= (u8
)rule_cnt
;
664 /* hw_config() callbacks */
665 static void bnx2x_set_one_mac_e2(struct bnx2x
*bp
,
666 struct bnx2x_vlan_mac_obj
*o
,
667 struct bnx2x_exeq_elem
*elem
, int rule_idx
,
670 struct bnx2x_raw_obj
*raw
= &o
->raw
;
671 struct eth_classify_rules_ramrod_data
*data
=
672 (struct eth_classify_rules_ramrod_data
*)(raw
->rdata
);
673 int rule_cnt
= rule_idx
+ 1, cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
674 union eth_classify_rule_cmd
*rule_entry
= &data
->rules
[rule_idx
];
675 bool add
= (cmd
== BNX2X_VLAN_MAC_ADD
) ? true : false;
676 unsigned long *vlan_mac_flags
= &elem
->cmd_data
.vlan_mac
.vlan_mac_flags
;
677 u8
*mac
= elem
->cmd_data
.vlan_mac
.u
.mac
.mac
;
680 * Set LLH CAM entry: currently only iSCSI and ETH macs are
681 * relevant. In addition, current implementation is tuned for a
684 * When multiple unicast ETH MACs PF configuration in switch
685 * independent mode is required (NetQ, multiple netdev MACs,
686 * etc.), consider better utilisation of 8 per function MAC
687 * entries in the LLH register. There is also
688 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
689 * total number of CAM entries to 16.
691 * Currently we won't configure NIG for MACs other than a primary ETH
692 * MAC and iSCSI L2 MAC.
694 * If this MAC is moving from one Queue to another, no need to change
697 if (cmd
!= BNX2X_VLAN_MAC_MOVE
) {
698 if (test_bit(BNX2X_ISCSI_ETH_MAC
, vlan_mac_flags
))
699 bnx2x_set_mac_in_nig(bp
, add
, mac
,
700 LLH_CAM_ISCSI_ETH_LINE
);
701 else if (test_bit(BNX2X_ETH_MAC
, vlan_mac_flags
))
702 bnx2x_set_mac_in_nig(bp
, add
, mac
, LLH_CAM_ETH_LINE
);
705 /* Reset the ramrod data buffer for the first rule */
707 memset(data
, 0, sizeof(*data
));
709 /* Setup a command header */
710 bnx2x_vlan_mac_set_cmd_hdr_e2(bp
, o
, add
, CLASSIFY_RULE_OPCODE_MAC
,
711 &rule_entry
->mac
.header
);
713 DP(BNX2X_MSG_SP
, "About to %s MAC %pM for Queue %d\n",
714 add
? "add" : "delete", mac
, raw
->cl_id
);
716 /* Set a MAC itself */
717 bnx2x_set_fw_mac_addr(&rule_entry
->mac
.mac_msb
,
718 &rule_entry
->mac
.mac_mid
,
719 &rule_entry
->mac
.mac_lsb
, mac
);
721 /* MOVE: Add a rule that will add this MAC to the target Queue */
722 if (cmd
== BNX2X_VLAN_MAC_MOVE
) {
726 /* Setup ramrod data */
727 bnx2x_vlan_mac_set_cmd_hdr_e2(bp
,
728 elem
->cmd_data
.vlan_mac
.target_obj
,
729 true, CLASSIFY_RULE_OPCODE_MAC
,
730 &rule_entry
->mac
.header
);
732 /* Set a MAC itself */
733 bnx2x_set_fw_mac_addr(&rule_entry
->mac
.mac_msb
,
734 &rule_entry
->mac
.mac_mid
,
735 &rule_entry
->mac
.mac_lsb
, mac
);
738 /* Set the ramrod data header */
739 /* TODO: take this to the higher level in order to prevent multiple
741 bnx2x_vlan_mac_set_rdata_hdr_e2(raw
->cid
, raw
->state
, &data
->header
,
746 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
751 * @cam_offset: offset in cam memory
752 * @hdr: pointer to a header to setup
756 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x
*bp
,
757 struct bnx2x_vlan_mac_obj
*o
, int type
, int cam_offset
,
758 struct mac_configuration_hdr
*hdr
)
760 struct bnx2x_raw_obj
*r
= &o
->raw
;
763 hdr
->offset
= (u8
)cam_offset
;
764 hdr
->client_id
= 0xff;
765 hdr
->echo
= ((r
->cid
& BNX2X_SWCID_MASK
) | (type
<< BNX2X_SWCID_SHIFT
));
768 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x
*bp
,
769 struct bnx2x_vlan_mac_obj
*o
, bool add
, int opcode
, u8
*mac
,
770 u16 vlan_id
, struct mac_configuration_entry
*cfg_entry
)
772 struct bnx2x_raw_obj
*r
= &o
->raw
;
773 u32 cl_bit_vec
= (1 << r
->cl_id
);
775 cfg_entry
->clients_bit_vector
= cpu_to_le32(cl_bit_vec
);
776 cfg_entry
->pf_id
= r
->func_id
;
777 cfg_entry
->vlan_id
= cpu_to_le16(vlan_id
);
780 SET_FLAG(cfg_entry
->flags
, MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
781 T_ETH_MAC_COMMAND_SET
);
782 SET_FLAG(cfg_entry
->flags
,
783 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE
, opcode
);
785 /* Set a MAC in a ramrod data */
786 bnx2x_set_fw_mac_addr(&cfg_entry
->msb_mac_addr
,
787 &cfg_entry
->middle_mac_addr
,
788 &cfg_entry
->lsb_mac_addr
, mac
);
790 SET_FLAG(cfg_entry
->flags
, MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
791 T_ETH_MAC_COMMAND_INVALIDATE
);
794 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x
*bp
,
795 struct bnx2x_vlan_mac_obj
*o
, int type
, int cam_offset
, bool add
,
796 u8
*mac
, u16 vlan_id
, int opcode
, struct mac_configuration_cmd
*config
)
798 struct mac_configuration_entry
*cfg_entry
= &config
->config_table
[0];
799 struct bnx2x_raw_obj
*raw
= &o
->raw
;
801 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp
, o
, type
, cam_offset
,
803 bnx2x_vlan_mac_set_cfg_entry_e1x(bp
, o
, add
, opcode
, mac
, vlan_id
,
806 DP(BNX2X_MSG_SP
, "%s MAC %pM CLID %d CAM offset %d\n",
807 add
? "setting" : "clearing",
808 mac
, raw
->cl_id
, cam_offset
);
812 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
815 * @o: bnx2x_vlan_mac_obj
816 * @elem: bnx2x_exeq_elem
817 * @rule_idx: rule_idx
818 * @cam_offset: cam_offset
820 static void bnx2x_set_one_mac_e1x(struct bnx2x
*bp
,
821 struct bnx2x_vlan_mac_obj
*o
,
822 struct bnx2x_exeq_elem
*elem
, int rule_idx
,
825 struct bnx2x_raw_obj
*raw
= &o
->raw
;
826 struct mac_configuration_cmd
*config
=
827 (struct mac_configuration_cmd
*)(raw
->rdata
);
829 * 57710 and 57711 do not support MOVE command,
830 * so it's either ADD or DEL
832 bool add
= (elem
->cmd_data
.vlan_mac
.cmd
== BNX2X_VLAN_MAC_ADD
) ?
835 /* Reset the ramrod data buffer */
836 memset(config
, 0, sizeof(*config
));
838 bnx2x_vlan_mac_set_rdata_e1x(bp
, o
, BNX2X_FILTER_MAC_PENDING
,
840 elem
->cmd_data
.vlan_mac
.u
.mac
.mac
, 0,
841 ETH_VLAN_FILTER_ANY_VLAN
, config
);
844 static void bnx2x_set_one_vlan_e2(struct bnx2x
*bp
,
845 struct bnx2x_vlan_mac_obj
*o
,
846 struct bnx2x_exeq_elem
*elem
, int rule_idx
,
849 struct bnx2x_raw_obj
*raw
= &o
->raw
;
850 struct eth_classify_rules_ramrod_data
*data
=
851 (struct eth_classify_rules_ramrod_data
*)(raw
->rdata
);
852 int rule_cnt
= rule_idx
+ 1;
853 union eth_classify_rule_cmd
*rule_entry
= &data
->rules
[rule_idx
];
854 int cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
855 bool add
= (cmd
== BNX2X_VLAN_MAC_ADD
) ? true : false;
856 u16 vlan
= elem
->cmd_data
.vlan_mac
.u
.vlan
.vlan
;
858 /* Reset the ramrod data buffer for the first rule */
860 memset(data
, 0, sizeof(*data
));
862 /* Set a rule header */
863 bnx2x_vlan_mac_set_cmd_hdr_e2(bp
, o
, add
, CLASSIFY_RULE_OPCODE_VLAN
,
864 &rule_entry
->vlan
.header
);
866 DP(BNX2X_MSG_SP
, "About to %s VLAN %d\n", (add
? "add" : "delete"),
869 /* Set a VLAN itself */
870 rule_entry
->vlan
.vlan
= cpu_to_le16(vlan
);
872 /* MOVE: Add a rule that will add this MAC to the target Queue */
873 if (cmd
== BNX2X_VLAN_MAC_MOVE
) {
877 /* Setup ramrod data */
878 bnx2x_vlan_mac_set_cmd_hdr_e2(bp
,
879 elem
->cmd_data
.vlan_mac
.target_obj
,
880 true, CLASSIFY_RULE_OPCODE_VLAN
,
881 &rule_entry
->vlan
.header
);
883 /* Set a VLAN itself */
884 rule_entry
->vlan
.vlan
= cpu_to_le16(vlan
);
887 /* Set the ramrod data header */
888 /* TODO: take this to the higher level in order to prevent multiple
890 bnx2x_vlan_mac_set_rdata_hdr_e2(raw
->cid
, raw
->state
, &data
->header
,
894 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x
*bp
,
895 struct bnx2x_vlan_mac_obj
*o
,
896 struct bnx2x_exeq_elem
*elem
,
897 int rule_idx
, int cam_offset
)
899 struct bnx2x_raw_obj
*raw
= &o
->raw
;
900 struct eth_classify_rules_ramrod_data
*data
=
901 (struct eth_classify_rules_ramrod_data
*)(raw
->rdata
);
902 int rule_cnt
= rule_idx
+ 1;
903 union eth_classify_rule_cmd
*rule_entry
= &data
->rules
[rule_idx
];
904 int cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
905 bool add
= (cmd
== BNX2X_VLAN_MAC_ADD
) ? true : false;
906 u16 vlan
= elem
->cmd_data
.vlan_mac
.u
.vlan_mac
.vlan
;
907 u8
*mac
= elem
->cmd_data
.vlan_mac
.u
.vlan_mac
.mac
;
910 /* Reset the ramrod data buffer for the first rule */
912 memset(data
, 0, sizeof(*data
));
914 /* Set a rule header */
915 bnx2x_vlan_mac_set_cmd_hdr_e2(bp
, o
, add
, CLASSIFY_RULE_OPCODE_PAIR
,
916 &rule_entry
->pair
.header
);
918 /* Set VLAN and MAC themselvs */
919 rule_entry
->pair
.vlan
= cpu_to_le16(vlan
);
920 bnx2x_set_fw_mac_addr(&rule_entry
->pair
.mac_msb
,
921 &rule_entry
->pair
.mac_mid
,
922 &rule_entry
->pair
.mac_lsb
, mac
);
924 /* MOVE: Add a rule that will add this MAC to the target Queue */
925 if (cmd
== BNX2X_VLAN_MAC_MOVE
) {
929 /* Setup ramrod data */
930 bnx2x_vlan_mac_set_cmd_hdr_e2(bp
,
931 elem
->cmd_data
.vlan_mac
.target_obj
,
932 true, CLASSIFY_RULE_OPCODE_PAIR
,
933 &rule_entry
->pair
.header
);
935 /* Set a VLAN itself */
936 rule_entry
->pair
.vlan
= cpu_to_le16(vlan
);
937 bnx2x_set_fw_mac_addr(&rule_entry
->pair
.mac_msb
,
938 &rule_entry
->pair
.mac_mid
,
939 &rule_entry
->pair
.mac_lsb
, mac
);
942 /* Set the ramrod data header */
943 /* TODO: take this to the higher level in order to prevent multiple
945 bnx2x_vlan_mac_set_rdata_hdr_e2(raw
->cid
, raw
->state
, &data
->header
,
950 * bnx2x_set_one_vlan_mac_e1h -
953 * @o: bnx2x_vlan_mac_obj
954 * @elem: bnx2x_exeq_elem
955 * @rule_idx: rule_idx
956 * @cam_offset: cam_offset
958 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x
*bp
,
959 struct bnx2x_vlan_mac_obj
*o
,
960 struct bnx2x_exeq_elem
*elem
,
961 int rule_idx
, int cam_offset
)
963 struct bnx2x_raw_obj
*raw
= &o
->raw
;
964 struct mac_configuration_cmd
*config
=
965 (struct mac_configuration_cmd
*)(raw
->rdata
);
967 * 57710 and 57711 do not support MOVE command,
968 * so it's either ADD or DEL
970 bool add
= (elem
->cmd_data
.vlan_mac
.cmd
== BNX2X_VLAN_MAC_ADD
) ?
973 /* Reset the ramrod data buffer */
974 memset(config
, 0, sizeof(*config
));
976 bnx2x_vlan_mac_set_rdata_e1x(bp
, o
, BNX2X_FILTER_VLAN_MAC_PENDING
,
978 elem
->cmd_data
.vlan_mac
.u
.vlan_mac
.mac
,
979 elem
->cmd_data
.vlan_mac
.u
.vlan_mac
.vlan
,
980 ETH_VLAN_FILTER_CLASSIFY
, config
);
983 #define list_next_entry(pos, member) \
984 list_entry((pos)->member.next, typeof(*(pos)), member)
987 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
990 * @p: command parameters
991 * @ppos: pointer to the cooky
993 * reconfigure next MAC/VLAN/VLAN-MAC element from the
994 * previously configured elements list.
996 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
999 * pointer to the cooky - that should be given back in the next call to make
1000 * function handle the next element. If *ppos is set to NULL it will restart the
1001 * iterator. If returned *ppos == NULL this means that the last element has been
1005 static int bnx2x_vlan_mac_restore(struct bnx2x
*bp
,
1006 struct bnx2x_vlan_mac_ramrod_params
*p
,
1007 struct bnx2x_vlan_mac_registry_elem
**ppos
)
1009 struct bnx2x_vlan_mac_registry_elem
*pos
;
1010 struct bnx2x_vlan_mac_obj
*o
= p
->vlan_mac_obj
;
1012 /* If list is empty - there is nothing to do here */
1013 if (list_empty(&o
->head
)) {
1018 /* make a step... */
1020 *ppos
= list_first_entry(&o
->head
,
1021 struct bnx2x_vlan_mac_registry_elem
,
1024 *ppos
= list_next_entry(*ppos
, link
);
1028 /* If it's the last step - return NULL */
1029 if (list_is_last(&pos
->link
, &o
->head
))
1032 /* Prepare a 'user_req' */
1033 memcpy(&p
->user_req
.u
, &pos
->u
, sizeof(pos
->u
));
1035 /* Set the command */
1036 p
->user_req
.cmd
= BNX2X_VLAN_MAC_ADD
;
1038 /* Set vlan_mac_flags */
1039 p
->user_req
.vlan_mac_flags
= pos
->vlan_mac_flags
;
1041 /* Set a restore bit */
1042 __set_bit(RAMROD_RESTORE
, &p
->ramrod_flags
);
1044 return bnx2x_config_vlan_mac(bp
, p
);
1048 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1049 * pointer to an element with a specific criteria and NULL if such an element
1050 * hasn't been found.
1052 static struct bnx2x_exeq_elem
*bnx2x_exeq_get_mac(
1053 struct bnx2x_exe_queue_obj
*o
,
1054 struct bnx2x_exeq_elem
*elem
)
1056 struct bnx2x_exeq_elem
*pos
;
1057 struct bnx2x_mac_ramrod_data
*data
= &elem
->cmd_data
.vlan_mac
.u
.mac
;
1059 /* Check pending for execution commands */
1060 list_for_each_entry(pos
, &o
->exe_queue
, link
)
1061 if (!memcmp(&pos
->cmd_data
.vlan_mac
.u
.mac
, data
,
1063 (pos
->cmd_data
.vlan_mac
.cmd
== elem
->cmd_data
.vlan_mac
.cmd
))
1069 static struct bnx2x_exeq_elem
*bnx2x_exeq_get_vlan(
1070 struct bnx2x_exe_queue_obj
*o
,
1071 struct bnx2x_exeq_elem
*elem
)
1073 struct bnx2x_exeq_elem
*pos
;
1074 struct bnx2x_vlan_ramrod_data
*data
= &elem
->cmd_data
.vlan_mac
.u
.vlan
;
1076 /* Check pending for execution commands */
1077 list_for_each_entry(pos
, &o
->exe_queue
, link
)
1078 if (!memcmp(&pos
->cmd_data
.vlan_mac
.u
.vlan
, data
,
1080 (pos
->cmd_data
.vlan_mac
.cmd
== elem
->cmd_data
.vlan_mac
.cmd
))
1086 static struct bnx2x_exeq_elem
*bnx2x_exeq_get_vlan_mac(
1087 struct bnx2x_exe_queue_obj
*o
,
1088 struct bnx2x_exeq_elem
*elem
)
1090 struct bnx2x_exeq_elem
*pos
;
1091 struct bnx2x_vlan_mac_ramrod_data
*data
=
1092 &elem
->cmd_data
.vlan_mac
.u
.vlan_mac
;
1094 /* Check pending for execution commands */
1095 list_for_each_entry(pos
, &o
->exe_queue
, link
)
1096 if (!memcmp(&pos
->cmd_data
.vlan_mac
.u
.vlan_mac
, data
,
1098 (pos
->cmd_data
.vlan_mac
.cmd
== elem
->cmd_data
.vlan_mac
.cmd
))
1105 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1107 * @bp: device handle
1108 * @qo: bnx2x_qable_obj
1109 * @elem: bnx2x_exeq_elem
1111 * Checks that the requested configuration can be added. If yes and if
1112 * requested, consume CAM credit.
1114 * The 'validate' is run after the 'optimize'.
1117 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x
*bp
,
1118 union bnx2x_qable_obj
*qo
,
1119 struct bnx2x_exeq_elem
*elem
)
1121 struct bnx2x_vlan_mac_obj
*o
= &qo
->vlan_mac
;
1122 struct bnx2x_exe_queue_obj
*exeq
= &o
->exe_queue
;
1125 /* Check the registry */
1126 rc
= o
->check_add(o
, &elem
->cmd_data
.vlan_mac
.u
);
1128 DP(BNX2X_MSG_SP
, "ADD command is not allowed considering "
1129 "current registry state\n");
1134 * Check if there is a pending ADD command for this
1135 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1137 if (exeq
->get(exeq
, elem
)) {
1138 DP(BNX2X_MSG_SP
, "There is a pending ADD command already\n");
1143 * TODO: Check the pending MOVE from other objects where this
1144 * object is a destination object.
1147 /* Consume the credit if not requested not to */
1148 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT
,
1149 &elem
->cmd_data
.vlan_mac
.vlan_mac_flags
) ||
1157 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1159 * @bp: device handle
1160 * @qo: quable object to check
1161 * @elem: element that needs to be deleted
1163 * Checks that the requested configuration can be deleted. If yes and if
1164 * requested, returns a CAM credit.
1166 * The 'validate' is run after the 'optimize'.
1168 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x
*bp
,
1169 union bnx2x_qable_obj
*qo
,
1170 struct bnx2x_exeq_elem
*elem
)
1172 struct bnx2x_vlan_mac_obj
*o
= &qo
->vlan_mac
;
1173 struct bnx2x_vlan_mac_registry_elem
*pos
;
1174 struct bnx2x_exe_queue_obj
*exeq
= &o
->exe_queue
;
1175 struct bnx2x_exeq_elem query_elem
;
1177 /* If this classification can not be deleted (doesn't exist)
1178 * - return a BNX2X_EXIST.
1180 pos
= o
->check_del(o
, &elem
->cmd_data
.vlan_mac
.u
);
1182 DP(BNX2X_MSG_SP
, "DEL command is not allowed considering "
1183 "current registry state\n");
1188 * Check if there are pending DEL or MOVE commands for this
1189 * MAC/VLAN/VLAN-MAC. Return an error if so.
1191 memcpy(&query_elem
, elem
, sizeof(query_elem
));
1193 /* Check for MOVE commands */
1194 query_elem
.cmd_data
.vlan_mac
.cmd
= BNX2X_VLAN_MAC_MOVE
;
1195 if (exeq
->get(exeq
, &query_elem
)) {
1196 BNX2X_ERR("There is a pending MOVE command already\n");
1200 /* Check for DEL commands */
1201 if (exeq
->get(exeq
, elem
)) {
1202 DP(BNX2X_MSG_SP
, "There is a pending DEL command already\n");
1206 /* Return the credit to the credit pool if not requested not to */
1207 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT
,
1208 &elem
->cmd_data
.vlan_mac
.vlan_mac_flags
) ||
1209 o
->put_credit(o
))) {
1210 BNX2X_ERR("Failed to return a credit\n");
1218 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1220 * @bp: device handle
1221 * @qo: quable object to check (source)
1222 * @elem: element that needs to be moved
1224 * Checks that the requested configuration can be moved. If yes and if
1225 * requested, returns a CAM credit.
1227 * The 'validate' is run after the 'optimize'.
1229 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x
*bp
,
1230 union bnx2x_qable_obj
*qo
,
1231 struct bnx2x_exeq_elem
*elem
)
1233 struct bnx2x_vlan_mac_obj
*src_o
= &qo
->vlan_mac
;
1234 struct bnx2x_vlan_mac_obj
*dest_o
= elem
->cmd_data
.vlan_mac
.target_obj
;
1235 struct bnx2x_exeq_elem query_elem
;
1236 struct bnx2x_exe_queue_obj
*src_exeq
= &src_o
->exe_queue
;
1237 struct bnx2x_exe_queue_obj
*dest_exeq
= &dest_o
->exe_queue
;
1240 * Check if we can perform this operation based on the current registry
1243 if (!src_o
->check_move(src_o
, dest_o
, &elem
->cmd_data
.vlan_mac
.u
)) {
1244 DP(BNX2X_MSG_SP
, "MOVE command is not allowed considering "
1245 "current registry state\n");
1250 * Check if there is an already pending DEL or MOVE command for the
1251 * source object or ADD command for a destination object. Return an
1254 memcpy(&query_elem
, elem
, sizeof(query_elem
));
1256 /* Check DEL on source */
1257 query_elem
.cmd_data
.vlan_mac
.cmd
= BNX2X_VLAN_MAC_DEL
;
1258 if (src_exeq
->get(src_exeq
, &query_elem
)) {
1259 BNX2X_ERR("There is a pending DEL command on the source "
1264 /* Check MOVE on source */
1265 if (src_exeq
->get(src_exeq
, elem
)) {
1266 DP(BNX2X_MSG_SP
, "There is a pending MOVE command already\n");
1270 /* Check ADD on destination */
1271 query_elem
.cmd_data
.vlan_mac
.cmd
= BNX2X_VLAN_MAC_ADD
;
1272 if (dest_exeq
->get(dest_exeq
, &query_elem
)) {
1273 BNX2X_ERR("There is a pending ADD command on the "
1274 "destination queue already\n");
1278 /* Consume the credit if not requested not to */
1279 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST
,
1280 &elem
->cmd_data
.vlan_mac
.vlan_mac_flags
) ||
1281 dest_o
->get_credit(dest_o
)))
1284 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT
,
1285 &elem
->cmd_data
.vlan_mac
.vlan_mac_flags
) ||
1286 src_o
->put_credit(src_o
))) {
1287 /* return the credit taken from dest... */
1288 dest_o
->put_credit(dest_o
);
1295 static int bnx2x_validate_vlan_mac(struct bnx2x
*bp
,
1296 union bnx2x_qable_obj
*qo
,
1297 struct bnx2x_exeq_elem
*elem
)
1299 switch (elem
->cmd_data
.vlan_mac
.cmd
) {
1300 case BNX2X_VLAN_MAC_ADD
:
1301 return bnx2x_validate_vlan_mac_add(bp
, qo
, elem
);
1302 case BNX2X_VLAN_MAC_DEL
:
1303 return bnx2x_validate_vlan_mac_del(bp
, qo
, elem
);
1304 case BNX2X_VLAN_MAC_MOVE
:
1305 return bnx2x_validate_vlan_mac_move(bp
, qo
, elem
);
1312 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1314 * @bp: device handle
1315 * @o: bnx2x_vlan_mac_obj
1318 static int bnx2x_wait_vlan_mac(struct bnx2x
*bp
,
1319 struct bnx2x_vlan_mac_obj
*o
)
1322 struct bnx2x_exe_queue_obj
*exeq
= &o
->exe_queue
;
1323 struct bnx2x_raw_obj
*raw
= &o
->raw
;
1326 /* Wait for the current command to complete */
1327 rc
= raw
->wait_comp(bp
, raw
);
1331 /* Wait until there are no pending commands */
1332 if (!bnx2x_exe_queue_empty(exeq
))
1333 usleep_range(1000, 1000);
1342 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1344 * @bp: device handle
1345 * @o: bnx2x_vlan_mac_obj
1347 * @cont: if true schedule next execution chunk
1350 static int bnx2x_complete_vlan_mac(struct bnx2x
*bp
,
1351 struct bnx2x_vlan_mac_obj
*o
,
1352 union event_ring_elem
*cqe
,
1353 unsigned long *ramrod_flags
)
1355 struct bnx2x_raw_obj
*r
= &o
->raw
;
1358 /* Reset pending list */
1359 bnx2x_exe_queue_reset_pending(bp
, &o
->exe_queue
);
1362 r
->clear_pending(r
);
1364 /* If ramrod failed this is most likely a SW bug */
1365 if (cqe
->message
.error
)
1368 /* Run the next bulk of pending commands if requeted */
1369 if (test_bit(RAMROD_CONT
, ramrod_flags
)) {
1370 rc
= bnx2x_exe_queue_step(bp
, &o
->exe_queue
, ramrod_flags
);
1375 /* If there is more work to do return PENDING */
1376 if (!bnx2x_exe_queue_empty(&o
->exe_queue
))
1383 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1385 * @bp: device handle
1386 * @o: bnx2x_qable_obj
1387 * @elem: bnx2x_exeq_elem
1389 static int bnx2x_optimize_vlan_mac(struct bnx2x
*bp
,
1390 union bnx2x_qable_obj
*qo
,
1391 struct bnx2x_exeq_elem
*elem
)
1393 struct bnx2x_exeq_elem query
, *pos
;
1394 struct bnx2x_vlan_mac_obj
*o
= &qo
->vlan_mac
;
1395 struct bnx2x_exe_queue_obj
*exeq
= &o
->exe_queue
;
1397 memcpy(&query
, elem
, sizeof(query
));
1399 switch (elem
->cmd_data
.vlan_mac
.cmd
) {
1400 case BNX2X_VLAN_MAC_ADD
:
1401 query
.cmd_data
.vlan_mac
.cmd
= BNX2X_VLAN_MAC_DEL
;
1403 case BNX2X_VLAN_MAC_DEL
:
1404 query
.cmd_data
.vlan_mac
.cmd
= BNX2X_VLAN_MAC_ADD
;
1407 /* Don't handle anything other than ADD or DEL */
1411 /* If we found the appropriate element - delete it */
1412 pos
= exeq
->get(exeq
, &query
);
1415 /* Return the credit of the optimized command */
1416 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT
,
1417 &pos
->cmd_data
.vlan_mac
.vlan_mac_flags
)) {
1418 if ((query
.cmd_data
.vlan_mac
.cmd
==
1419 BNX2X_VLAN_MAC_ADD
) && !o
->put_credit(o
)) {
1420 BNX2X_ERR("Failed to return the credit for the "
1421 "optimized ADD command\n");
1423 } else if (!o
->get_credit(o
)) { /* VLAN_MAC_DEL */
1424 BNX2X_ERR("Failed to recover the credit from "
1425 "the optimized DEL command\n");
1430 DP(BNX2X_MSG_SP
, "Optimizing %s command\n",
1431 (elem
->cmd_data
.vlan_mac
.cmd
== BNX2X_VLAN_MAC_ADD
) ?
1434 list_del(&pos
->link
);
1435 bnx2x_exe_queue_free_elem(bp
, pos
);
1443 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1445 * @bp: device handle
1451 * prepare a registry element according to the current command request.
1453 static inline int bnx2x_vlan_mac_get_registry_elem(
1455 struct bnx2x_vlan_mac_obj
*o
,
1456 struct bnx2x_exeq_elem
*elem
,
1458 struct bnx2x_vlan_mac_registry_elem
**re
)
1460 int cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
1461 struct bnx2x_vlan_mac_registry_elem
*reg_elem
;
1463 /* Allocate a new registry element if needed. */
1465 ((cmd
== BNX2X_VLAN_MAC_ADD
) || (cmd
== BNX2X_VLAN_MAC_MOVE
))) {
1466 reg_elem
= kzalloc(sizeof(*reg_elem
), GFP_ATOMIC
);
1470 /* Get a new CAM offset */
1471 if (!o
->get_cam_offset(o
, ®_elem
->cam_offset
)) {
1473 * This shell never happen, because we have checked the
1474 * CAM availiability in the 'validate'.
1481 DP(BNX2X_MSG_SP
, "Got cam offset %d\n", reg_elem
->cam_offset
);
1483 /* Set a VLAN-MAC data */
1484 memcpy(®_elem
->u
, &elem
->cmd_data
.vlan_mac
.u
,
1485 sizeof(reg_elem
->u
));
1487 /* Copy the flags (needed for DEL and RESTORE flows) */
1488 reg_elem
->vlan_mac_flags
=
1489 elem
->cmd_data
.vlan_mac
.vlan_mac_flags
;
1490 } else /* DEL, RESTORE */
1491 reg_elem
= o
->check_del(o
, &elem
->cmd_data
.vlan_mac
.u
);
1498 * bnx2x_execute_vlan_mac - execute vlan mac command
1500 * @bp: device handle
1505 * go and send a ramrod!
1507 static int bnx2x_execute_vlan_mac(struct bnx2x
*bp
,
1508 union bnx2x_qable_obj
*qo
,
1509 struct list_head
*exe_chunk
,
1510 unsigned long *ramrod_flags
)
1512 struct bnx2x_exeq_elem
*elem
;
1513 struct bnx2x_vlan_mac_obj
*o
= &qo
->vlan_mac
, *cam_obj
;
1514 struct bnx2x_raw_obj
*r
= &o
->raw
;
1516 bool restore
= test_bit(RAMROD_RESTORE
, ramrod_flags
);
1517 bool drv_only
= test_bit(RAMROD_DRV_CLR_ONLY
, ramrod_flags
);
1518 struct bnx2x_vlan_mac_registry_elem
*reg_elem
;
1522 * If DRIVER_ONLY execution is requested, cleanup a registry
1523 * and exit. Otherwise send a ramrod to FW.
1526 WARN_ON(r
->check_pending(r
));
1531 /* Fill tha ramrod data */
1532 list_for_each_entry(elem
, exe_chunk
, link
) {
1533 cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
1535 * We will add to the target object in MOVE command, so
1536 * change the object for a CAM search.
1538 if (cmd
== BNX2X_VLAN_MAC_MOVE
)
1539 cam_obj
= elem
->cmd_data
.vlan_mac
.target_obj
;
1543 rc
= bnx2x_vlan_mac_get_registry_elem(bp
, cam_obj
,
1551 /* Push a new entry into the registry */
1553 ((cmd
== BNX2X_VLAN_MAC_ADD
) ||
1554 (cmd
== BNX2X_VLAN_MAC_MOVE
)))
1555 list_add(®_elem
->link
, &cam_obj
->head
);
1557 /* Configure a single command in a ramrod data buffer */
1558 o
->set_one_rule(bp
, o
, elem
, idx
,
1559 reg_elem
->cam_offset
);
1561 /* MOVE command consumes 2 entries in the ramrod data */
1562 if (cmd
== BNX2X_VLAN_MAC_MOVE
)
1569 * No need for an explicit memory barrier here as long we would
1570 * need to ensure the ordering of writing to the SPQ element
1571 * and updating of the SPQ producer which involves a memory
1572 * read and we will have to put a full memory barrier there
1573 * (inside bnx2x_sp_post()).
1576 rc
= bnx2x_sp_post(bp
, o
->ramrod_cmd
, r
->cid
,
1577 U64_HI(r
->rdata_mapping
),
1578 U64_LO(r
->rdata_mapping
),
1579 ETH_CONNECTION_TYPE
);
1584 /* Now, when we are done with the ramrod - clean up the registry */
1585 list_for_each_entry(elem
, exe_chunk
, link
) {
1586 cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
1587 if ((cmd
== BNX2X_VLAN_MAC_DEL
) ||
1588 (cmd
== BNX2X_VLAN_MAC_MOVE
)) {
1589 reg_elem
= o
->check_del(o
, &elem
->cmd_data
.vlan_mac
.u
);
1593 o
->put_cam_offset(o
, reg_elem
->cam_offset
);
1594 list_del(®_elem
->link
);
1605 r
->clear_pending(r
);
1607 /* Cleanup a registry in case of a failure */
1608 list_for_each_entry(elem
, exe_chunk
, link
) {
1609 cmd
= elem
->cmd_data
.vlan_mac
.cmd
;
1611 if (cmd
== BNX2X_VLAN_MAC_MOVE
)
1612 cam_obj
= elem
->cmd_data
.vlan_mac
.target_obj
;
1616 /* Delete all newly added above entries */
1618 ((cmd
== BNX2X_VLAN_MAC_ADD
) ||
1619 (cmd
== BNX2X_VLAN_MAC_MOVE
))) {
1620 reg_elem
= o
->check_del(cam_obj
,
1621 &elem
->cmd_data
.vlan_mac
.u
);
1623 list_del(®_elem
->link
);
1632 static inline int bnx2x_vlan_mac_push_new_cmd(
1634 struct bnx2x_vlan_mac_ramrod_params
*p
)
1636 struct bnx2x_exeq_elem
*elem
;
1637 struct bnx2x_vlan_mac_obj
*o
= p
->vlan_mac_obj
;
1638 bool restore
= test_bit(RAMROD_RESTORE
, &p
->ramrod_flags
);
1640 /* Allocate the execution queue element */
1641 elem
= bnx2x_exe_queue_alloc_elem(bp
);
1645 /* Set the command 'length' */
1646 switch (p
->user_req
.cmd
) {
1647 case BNX2X_VLAN_MAC_MOVE
:
1654 /* Fill the object specific info */
1655 memcpy(&elem
->cmd_data
.vlan_mac
, &p
->user_req
, sizeof(p
->user_req
));
1657 /* Try to add a new command to the pending list */
1658 return bnx2x_exe_queue_add(bp
, &o
->exe_queue
, elem
, restore
);
1662 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1664 * @bp: device handle
1668 int bnx2x_config_vlan_mac(
1670 struct bnx2x_vlan_mac_ramrod_params
*p
)
1673 struct bnx2x_vlan_mac_obj
*o
= p
->vlan_mac_obj
;
1674 unsigned long *ramrod_flags
= &p
->ramrod_flags
;
1675 bool cont
= test_bit(RAMROD_CONT
, ramrod_flags
);
1676 struct bnx2x_raw_obj
*raw
= &o
->raw
;
1679 * Add new elements to the execution list for commands that require it.
1682 rc
= bnx2x_vlan_mac_push_new_cmd(bp
, p
);
1688 * If nothing will be executed further in this iteration we want to
1689 * return PENDING if there are pending commands
1691 if (!bnx2x_exe_queue_empty(&o
->exe_queue
))
1694 if (test_bit(RAMROD_DRV_CLR_ONLY
, ramrod_flags
)) {
1695 DP(BNX2X_MSG_SP
, "RAMROD_DRV_CLR_ONLY requested: "
1696 "clearing a pending bit.\n");
1697 raw
->clear_pending(raw
);
1700 /* Execute commands if required */
1701 if (cont
|| test_bit(RAMROD_EXEC
, ramrod_flags
) ||
1702 test_bit(RAMROD_COMP_WAIT
, ramrod_flags
)) {
1703 rc
= bnx2x_exe_queue_step(bp
, &o
->exe_queue
, ramrod_flags
);
1709 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1710 * then user want to wait until the last command is done.
1712 if (test_bit(RAMROD_COMP_WAIT
, &p
->ramrod_flags
)) {
1714 * Wait maximum for the current exe_queue length iterations plus
1715 * one (for the current pending command).
1717 int max_iterations
= bnx2x_exe_queue_length(&o
->exe_queue
) + 1;
1719 while (!bnx2x_exe_queue_empty(&o
->exe_queue
) &&
1722 /* Wait for the current command to complete */
1723 rc
= raw
->wait_comp(bp
, raw
);
1727 /* Make a next step */
1728 rc
= bnx2x_exe_queue_step(bp
, &o
->exe_queue
,
1743 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1745 * @bp: device handle
1748 * @ramrod_flags: execution flags to be used for this deletion
1750 * if the last operation has completed successfully and there are no
1751 * moreelements left, positive value if the last operation has completed
1752 * successfully and there are more previously configured elements, negative
1753 * value is current operation has failed.
1755 static int bnx2x_vlan_mac_del_all(struct bnx2x
*bp
,
1756 struct bnx2x_vlan_mac_obj
*o
,
1757 unsigned long *vlan_mac_flags
,
1758 unsigned long *ramrod_flags
)
1760 struct bnx2x_vlan_mac_registry_elem
*pos
= NULL
;
1762 struct bnx2x_vlan_mac_ramrod_params p
;
1763 struct bnx2x_exe_queue_obj
*exeq
= &o
->exe_queue
;
1764 struct bnx2x_exeq_elem
*exeq_pos
, *exeq_pos_n
;
1766 /* Clear pending commands first */
1768 spin_lock_bh(&exeq
->lock
);
1770 list_for_each_entry_safe(exeq_pos
, exeq_pos_n
, &exeq
->exe_queue
, link
) {
1771 if (exeq_pos
->cmd_data
.vlan_mac
.vlan_mac_flags
==
1773 list_del(&exeq_pos
->link
);
1776 spin_unlock_bh(&exeq
->lock
);
1778 /* Prepare a command request */
1779 memset(&p
, 0, sizeof(p
));
1781 p
.ramrod_flags
= *ramrod_flags
;
1782 p
.user_req
.cmd
= BNX2X_VLAN_MAC_DEL
;
1785 * Add all but the last VLAN-MAC to the execution queue without actually
1786 * execution anything.
1788 __clear_bit(RAMROD_COMP_WAIT
, &p
.ramrod_flags
);
1789 __clear_bit(RAMROD_EXEC
, &p
.ramrod_flags
);
1790 __clear_bit(RAMROD_CONT
, &p
.ramrod_flags
);
1792 list_for_each_entry(pos
, &o
->head
, link
) {
1793 if (pos
->vlan_mac_flags
== *vlan_mac_flags
) {
1794 p
.user_req
.vlan_mac_flags
= pos
->vlan_mac_flags
;
1795 memcpy(&p
.user_req
.u
, &pos
->u
, sizeof(pos
->u
));
1796 rc
= bnx2x_config_vlan_mac(bp
, &p
);
1798 BNX2X_ERR("Failed to add a new DEL command\n");
1804 p
.ramrod_flags
= *ramrod_flags
;
1805 __set_bit(RAMROD_CONT
, &p
.ramrod_flags
);
1807 return bnx2x_config_vlan_mac(bp
, &p
);
1810 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj
*raw
, u8 cl_id
,
1811 u32 cid
, u8 func_id
, void *rdata
, dma_addr_t rdata_mapping
, int state
,
1812 unsigned long *pstate
, bnx2x_obj_type type
)
1814 raw
->func_id
= func_id
;
1818 raw
->rdata_mapping
= rdata_mapping
;
1820 raw
->pstate
= pstate
;
1821 raw
->obj_type
= type
;
1822 raw
->check_pending
= bnx2x_raw_check_pending
;
1823 raw
->clear_pending
= bnx2x_raw_clear_pending
;
1824 raw
->set_pending
= bnx2x_raw_set_pending
;
1825 raw
->wait_comp
= bnx2x_raw_wait
;
1828 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj
*o
,
1829 u8 cl_id
, u32 cid
, u8 func_id
, void *rdata
, dma_addr_t rdata_mapping
,
1830 int state
, unsigned long *pstate
, bnx2x_obj_type type
,
1831 struct bnx2x_credit_pool_obj
*macs_pool
,
1832 struct bnx2x_credit_pool_obj
*vlans_pool
)
1834 INIT_LIST_HEAD(&o
->head
);
1836 o
->macs_pool
= macs_pool
;
1837 o
->vlans_pool
= vlans_pool
;
1839 o
->delete_all
= bnx2x_vlan_mac_del_all
;
1840 o
->restore
= bnx2x_vlan_mac_restore
;
1841 o
->complete
= bnx2x_complete_vlan_mac
;
1842 o
->wait
= bnx2x_wait_vlan_mac
;
1844 bnx2x_init_raw_obj(&o
->raw
, cl_id
, cid
, func_id
, rdata
, rdata_mapping
,
1845 state
, pstate
, type
);
1849 void bnx2x_init_mac_obj(struct bnx2x
*bp
,
1850 struct bnx2x_vlan_mac_obj
*mac_obj
,
1851 u8 cl_id
, u32 cid
, u8 func_id
, void *rdata
,
1852 dma_addr_t rdata_mapping
, int state
,
1853 unsigned long *pstate
, bnx2x_obj_type type
,
1854 struct bnx2x_credit_pool_obj
*macs_pool
)
1856 union bnx2x_qable_obj
*qable_obj
= (union bnx2x_qable_obj
*)mac_obj
;
1858 bnx2x_init_vlan_mac_common(mac_obj
, cl_id
, cid
, func_id
, rdata
,
1859 rdata_mapping
, state
, pstate
, type
,
1862 /* CAM credit pool handling */
1863 mac_obj
->get_credit
= bnx2x_get_credit_mac
;
1864 mac_obj
->put_credit
= bnx2x_put_credit_mac
;
1865 mac_obj
->get_cam_offset
= bnx2x_get_cam_offset_mac
;
1866 mac_obj
->put_cam_offset
= bnx2x_put_cam_offset_mac
;
1868 if (CHIP_IS_E1x(bp
)) {
1869 mac_obj
->set_one_rule
= bnx2x_set_one_mac_e1x
;
1870 mac_obj
->check_del
= bnx2x_check_mac_del
;
1871 mac_obj
->check_add
= bnx2x_check_mac_add
;
1872 mac_obj
->check_move
= bnx2x_check_move_always_err
;
1873 mac_obj
->ramrod_cmd
= RAMROD_CMD_ID_ETH_SET_MAC
;
1876 bnx2x_exe_queue_init(bp
,
1877 &mac_obj
->exe_queue
, 1, qable_obj
,
1878 bnx2x_validate_vlan_mac
,
1879 bnx2x_optimize_vlan_mac
,
1880 bnx2x_execute_vlan_mac
,
1881 bnx2x_exeq_get_mac
);
1883 mac_obj
->set_one_rule
= bnx2x_set_one_mac_e2
;
1884 mac_obj
->check_del
= bnx2x_check_mac_del
;
1885 mac_obj
->check_add
= bnx2x_check_mac_add
;
1886 mac_obj
->check_move
= bnx2x_check_move
;
1887 mac_obj
->ramrod_cmd
=
1888 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES
;
1891 bnx2x_exe_queue_init(bp
,
1892 &mac_obj
->exe_queue
, CLASSIFY_RULES_COUNT
,
1893 qable_obj
, bnx2x_validate_vlan_mac
,
1894 bnx2x_optimize_vlan_mac
,
1895 bnx2x_execute_vlan_mac
,
1896 bnx2x_exeq_get_mac
);
1900 void bnx2x_init_vlan_obj(struct bnx2x
*bp
,
1901 struct bnx2x_vlan_mac_obj
*vlan_obj
,
1902 u8 cl_id
, u32 cid
, u8 func_id
, void *rdata
,
1903 dma_addr_t rdata_mapping
, int state
,
1904 unsigned long *pstate
, bnx2x_obj_type type
,
1905 struct bnx2x_credit_pool_obj
*vlans_pool
)
1907 union bnx2x_qable_obj
*qable_obj
= (union bnx2x_qable_obj
*)vlan_obj
;
1909 bnx2x_init_vlan_mac_common(vlan_obj
, cl_id
, cid
, func_id
, rdata
,
1910 rdata_mapping
, state
, pstate
, type
, NULL
,
1913 vlan_obj
->get_credit
= bnx2x_get_credit_vlan
;
1914 vlan_obj
->put_credit
= bnx2x_put_credit_vlan
;
1915 vlan_obj
->get_cam_offset
= bnx2x_get_cam_offset_vlan
;
1916 vlan_obj
->put_cam_offset
= bnx2x_put_cam_offset_vlan
;
1918 if (CHIP_IS_E1x(bp
)) {
1919 BNX2X_ERR("Do not support chips others than E2 and newer\n");
1922 vlan_obj
->set_one_rule
= bnx2x_set_one_vlan_e2
;
1923 vlan_obj
->check_del
= bnx2x_check_vlan_del
;
1924 vlan_obj
->check_add
= bnx2x_check_vlan_add
;
1925 vlan_obj
->check_move
= bnx2x_check_move
;
1926 vlan_obj
->ramrod_cmd
=
1927 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES
;
1930 bnx2x_exe_queue_init(bp
,
1931 &vlan_obj
->exe_queue
, CLASSIFY_RULES_COUNT
,
1932 qable_obj
, bnx2x_validate_vlan_mac
,
1933 bnx2x_optimize_vlan_mac
,
1934 bnx2x_execute_vlan_mac
,
1935 bnx2x_exeq_get_vlan
);
1939 void bnx2x_init_vlan_mac_obj(struct bnx2x
*bp
,
1940 struct bnx2x_vlan_mac_obj
*vlan_mac_obj
,
1941 u8 cl_id
, u32 cid
, u8 func_id
, void *rdata
,
1942 dma_addr_t rdata_mapping
, int state
,
1943 unsigned long *pstate
, bnx2x_obj_type type
,
1944 struct bnx2x_credit_pool_obj
*macs_pool
,
1945 struct bnx2x_credit_pool_obj
*vlans_pool
)
1947 union bnx2x_qable_obj
*qable_obj
=
1948 (union bnx2x_qable_obj
*)vlan_mac_obj
;
1950 bnx2x_init_vlan_mac_common(vlan_mac_obj
, cl_id
, cid
, func_id
, rdata
,
1951 rdata_mapping
, state
, pstate
, type
,
1952 macs_pool
, vlans_pool
);
1954 /* CAM pool handling */
1955 vlan_mac_obj
->get_credit
= bnx2x_get_credit_vlan_mac
;
1956 vlan_mac_obj
->put_credit
= bnx2x_put_credit_vlan_mac
;
1958 * CAM offset is relevant for 57710 and 57711 chips only which have a
1959 * single CAM for both MACs and VLAN-MAC pairs. So the offset
1960 * will be taken from MACs' pool object only.
1962 vlan_mac_obj
->get_cam_offset
= bnx2x_get_cam_offset_mac
;
1963 vlan_mac_obj
->put_cam_offset
= bnx2x_put_cam_offset_mac
;
1965 if (CHIP_IS_E1(bp
)) {
1966 BNX2X_ERR("Do not support chips others than E2\n");
1968 } else if (CHIP_IS_E1H(bp
)) {
1969 vlan_mac_obj
->set_one_rule
= bnx2x_set_one_vlan_mac_e1h
;
1970 vlan_mac_obj
->check_del
= bnx2x_check_vlan_mac_del
;
1971 vlan_mac_obj
->check_add
= bnx2x_check_vlan_mac_add
;
1972 vlan_mac_obj
->check_move
= bnx2x_check_move_always_err
;
1973 vlan_mac_obj
->ramrod_cmd
= RAMROD_CMD_ID_ETH_SET_MAC
;
1976 bnx2x_exe_queue_init(bp
,
1977 &vlan_mac_obj
->exe_queue
, 1, qable_obj
,
1978 bnx2x_validate_vlan_mac
,
1979 bnx2x_optimize_vlan_mac
,
1980 bnx2x_execute_vlan_mac
,
1981 bnx2x_exeq_get_vlan_mac
);
1983 vlan_mac_obj
->set_one_rule
= bnx2x_set_one_vlan_mac_e2
;
1984 vlan_mac_obj
->check_del
= bnx2x_check_vlan_mac_del
;
1985 vlan_mac_obj
->check_add
= bnx2x_check_vlan_mac_add
;
1986 vlan_mac_obj
->check_move
= bnx2x_check_move
;
1987 vlan_mac_obj
->ramrod_cmd
=
1988 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES
;
1991 bnx2x_exe_queue_init(bp
,
1992 &vlan_mac_obj
->exe_queue
,
1993 CLASSIFY_RULES_COUNT
,
1994 qable_obj
, bnx2x_validate_vlan_mac
,
1995 bnx2x_optimize_vlan_mac
,
1996 bnx2x_execute_vlan_mac
,
1997 bnx2x_exeq_get_vlan_mac
);
2002 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2003 static inline void __storm_memset_mac_filters(struct bnx2x
*bp
,
2004 struct tstorm_eth_mac_filter_config
*mac_filters
,
2007 size_t size
= sizeof(struct tstorm_eth_mac_filter_config
);
2009 u32 addr
= BAR_TSTRORM_INTMEM
+
2010 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id
);
2012 __storm_memset_struct(bp
, addr
, size
, (u32
*)mac_filters
);
2015 static int bnx2x_set_rx_mode_e1x(struct bnx2x
*bp
,
2016 struct bnx2x_rx_mode_ramrod_params
*p
)
2018 /* update the bp MAC filter structure */
2019 u32 mask
= (1 << p
->cl_id
);
2021 struct tstorm_eth_mac_filter_config
*mac_filters
=
2022 (struct tstorm_eth_mac_filter_config
*)p
->rdata
;
2024 /* initial seeting is drop-all */
2025 u8 drop_all_ucast
= 1, drop_all_mcast
= 1;
2026 u8 accp_all_ucast
= 0, accp_all_bcast
= 0, accp_all_mcast
= 0;
2027 u8 unmatched_unicast
= 0;
2029 /* In e1x there we only take into account rx acceot flag since tx switching
2031 if (test_bit(BNX2X_ACCEPT_UNICAST
, &p
->rx_accept_flags
))
2032 /* accept matched ucast */
2035 if (test_bit(BNX2X_ACCEPT_MULTICAST
, &p
->rx_accept_flags
))
2036 /* accept matched mcast */
2039 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST
, &p
->rx_accept_flags
)) {
2040 /* accept all mcast */
2044 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST
, &p
->rx_accept_flags
)) {
2045 /* accept all mcast */
2049 if (test_bit(BNX2X_ACCEPT_BROADCAST
, &p
->rx_accept_flags
))
2050 /* accept (all) bcast */
2052 if (test_bit(BNX2X_ACCEPT_UNMATCHED
, &p
->rx_accept_flags
))
2053 /* accept unmatched unicasts */
2054 unmatched_unicast
= 1;
2056 mac_filters
->ucast_drop_all
= drop_all_ucast
?
2057 mac_filters
->ucast_drop_all
| mask
:
2058 mac_filters
->ucast_drop_all
& ~mask
;
2060 mac_filters
->mcast_drop_all
= drop_all_mcast
?
2061 mac_filters
->mcast_drop_all
| mask
:
2062 mac_filters
->mcast_drop_all
& ~mask
;
2064 mac_filters
->ucast_accept_all
= accp_all_ucast
?
2065 mac_filters
->ucast_accept_all
| mask
:
2066 mac_filters
->ucast_accept_all
& ~mask
;
2068 mac_filters
->mcast_accept_all
= accp_all_mcast
?
2069 mac_filters
->mcast_accept_all
| mask
:
2070 mac_filters
->mcast_accept_all
& ~mask
;
2072 mac_filters
->bcast_accept_all
= accp_all_bcast
?
2073 mac_filters
->bcast_accept_all
| mask
:
2074 mac_filters
->bcast_accept_all
& ~mask
;
2076 mac_filters
->unmatched_unicast
= unmatched_unicast
?
2077 mac_filters
->unmatched_unicast
| mask
:
2078 mac_filters
->unmatched_unicast
& ~mask
;
2080 DP(BNX2X_MSG_SP
, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2081 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2082 mac_filters
->ucast_drop_all
,
2083 mac_filters
->mcast_drop_all
,
2084 mac_filters
->ucast_accept_all
,
2085 mac_filters
->mcast_accept_all
,
2086 mac_filters
->bcast_accept_all
);
2088 /* write the MAC filter structure*/
2089 __storm_memset_mac_filters(bp
, mac_filters
, p
->func_id
);
2091 /* The operation is completed */
2092 clear_bit(p
->state
, p
->pstate
);
2093 smp_mb__after_clear_bit();
2098 /* Setup ramrod data */
2099 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid
,
2100 struct eth_classify_header
*hdr
,
2104 hdr
->rule_cnt
= rule_cnt
;
2107 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x
*bp
,
2108 unsigned long accept_flags
,
2109 struct eth_filter_rules_cmd
*cmd
,
2110 bool clear_accept_all
)
2114 /* start with 'drop-all' */
2115 state
= ETH_FILTER_RULES_CMD_UCAST_DROP_ALL
|
2116 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL
;
2119 if (test_bit(BNX2X_ACCEPT_UNICAST
, &accept_flags
))
2120 state
&= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL
;
2122 if (test_bit(BNX2X_ACCEPT_MULTICAST
, &accept_flags
))
2123 state
&= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL
;
2125 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST
, &accept_flags
)) {
2126 state
&= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL
;
2127 state
|= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL
;
2130 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST
, &accept_flags
)) {
2131 state
|= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL
;
2132 state
&= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL
;
2134 if (test_bit(BNX2X_ACCEPT_BROADCAST
, &accept_flags
))
2135 state
|= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL
;
2137 if (test_bit(BNX2X_ACCEPT_UNMATCHED
, &accept_flags
)) {
2138 state
&= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL
;
2139 state
|= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED
;
2141 if (test_bit(BNX2X_ACCEPT_ANY_VLAN
, &accept_flags
))
2142 state
|= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN
;
2145 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2146 if (clear_accept_all
) {
2147 state
&= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL
;
2148 state
&= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL
;
2149 state
&= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL
;
2150 state
&= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED
;
2153 cmd
->state
= cpu_to_le16(state
);
2157 static int bnx2x_set_rx_mode_e2(struct bnx2x
*bp
,
2158 struct bnx2x_rx_mode_ramrod_params
*p
)
2160 struct eth_filter_rules_ramrod_data
*data
= p
->rdata
;
2164 /* Reset the ramrod data buffer */
2165 memset(data
, 0, sizeof(*data
));
2167 /* Setup ramrod data */
2169 /* Tx (internal switching) */
2170 if (test_bit(RAMROD_TX
, &p
->ramrod_flags
)) {
2171 data
->rules
[rule_idx
].client_id
= p
->cl_id
;
2172 data
->rules
[rule_idx
].func_id
= p
->func_id
;
2174 data
->rules
[rule_idx
].cmd_general_data
=
2175 ETH_FILTER_RULES_CMD_TX_CMD
;
2177 bnx2x_rx_mode_set_cmd_state_e2(bp
, p
->tx_accept_flags
,
2178 &(data
->rules
[rule_idx
++]), false);
2182 if (test_bit(RAMROD_RX
, &p
->ramrod_flags
)) {
2183 data
->rules
[rule_idx
].client_id
= p
->cl_id
;
2184 data
->rules
[rule_idx
].func_id
= p
->func_id
;
2186 data
->rules
[rule_idx
].cmd_general_data
=
2187 ETH_FILTER_RULES_CMD_RX_CMD
;
2189 bnx2x_rx_mode_set_cmd_state_e2(bp
, p
->rx_accept_flags
,
2190 &(data
->rules
[rule_idx
++]), false);
2195 * If FCoE Queue configuration has been requested configure the Rx and
2196 * internal switching modes for this queue in separate rules.
2198 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2199 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2201 if (test_bit(BNX2X_RX_MODE_FCOE_ETH
, &p
->rx_mode_flags
)) {
2202 /* Tx (internal switching) */
2203 if (test_bit(RAMROD_TX
, &p
->ramrod_flags
)) {
2204 data
->rules
[rule_idx
].client_id
= bnx2x_fcoe(bp
, cl_id
);
2205 data
->rules
[rule_idx
].func_id
= p
->func_id
;
2207 data
->rules
[rule_idx
].cmd_general_data
=
2208 ETH_FILTER_RULES_CMD_TX_CMD
;
2210 bnx2x_rx_mode_set_cmd_state_e2(bp
, p
->tx_accept_flags
,
2211 &(data
->rules
[rule_idx
++]),
2216 if (test_bit(RAMROD_RX
, &p
->ramrod_flags
)) {
2217 data
->rules
[rule_idx
].client_id
= bnx2x_fcoe(bp
, cl_id
);
2218 data
->rules
[rule_idx
].func_id
= p
->func_id
;
2220 data
->rules
[rule_idx
].cmd_general_data
=
2221 ETH_FILTER_RULES_CMD_RX_CMD
;
2223 bnx2x_rx_mode_set_cmd_state_e2(bp
, p
->rx_accept_flags
,
2224 &(data
->rules
[rule_idx
++]),
2230 * Set the ramrod header (most importantly - number of rules to
2233 bnx2x_rx_mode_set_rdata_hdr_e2(p
->cid
, &data
->header
, rule_idx
);
2235 DP(BNX2X_MSG_SP
, "About to configure %d rules, rx_accept_flags 0x%lx, "
2236 "tx_accept_flags 0x%lx\n",
2237 data
->header
.rule_cnt
, p
->rx_accept_flags
,
2238 p
->tx_accept_flags
);
2241 * No need for an explicit memory barrier here as long we would
2242 * need to ensure the ordering of writing to the SPQ element
2243 * and updating of the SPQ producer which involves a memory
2244 * read and we will have to put a full memory barrier there
2245 * (inside bnx2x_sp_post()).
2249 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_FILTER_RULES
, p
->cid
,
2250 U64_HI(p
->rdata_mapping
),
2251 U64_LO(p
->rdata_mapping
),
2252 ETH_CONNECTION_TYPE
);
2256 /* Ramrod completion is pending */
2260 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x
*bp
,
2261 struct bnx2x_rx_mode_ramrod_params
*p
)
2263 return bnx2x_state_wait(bp
, p
->state
, p
->pstate
);
2266 static int bnx2x_empty_rx_mode_wait(struct bnx2x
*bp
,
2267 struct bnx2x_rx_mode_ramrod_params
*p
)
2273 int bnx2x_config_rx_mode(struct bnx2x
*bp
,
2274 struct bnx2x_rx_mode_ramrod_params
*p
)
2278 /* Configure the new classification in the chip */
2279 rc
= p
->rx_mode_obj
->config_rx_mode(bp
, p
);
2283 /* Wait for a ramrod completion if was requested */
2284 if (test_bit(RAMROD_COMP_WAIT
, &p
->ramrod_flags
)) {
2285 rc
= p
->rx_mode_obj
->wait_comp(bp
, p
);
2293 void bnx2x_init_rx_mode_obj(struct bnx2x
*bp
,
2294 struct bnx2x_rx_mode_obj
*o
)
2296 if (CHIP_IS_E1x(bp
)) {
2297 o
->wait_comp
= bnx2x_empty_rx_mode_wait
;
2298 o
->config_rx_mode
= bnx2x_set_rx_mode_e1x
;
2300 o
->wait_comp
= bnx2x_wait_rx_mode_comp_e2
;
2301 o
->config_rx_mode
= bnx2x_set_rx_mode_e2
;
2305 /********************* Multicast verbs: SET, CLEAR ****************************/
2306 static inline u8
bnx2x_mcast_bin_from_mac(u8
*mac
)
2308 return (crc32c_le(0, mac
, ETH_ALEN
) >> 24) & 0xff;
2311 struct bnx2x_mcast_mac_elem
{
2312 struct list_head link
;
2314 u8 pad
[2]; /* For a natural alignment of the following buffer */
2317 struct bnx2x_pending_mcast_cmd
{
2318 struct list_head link
;
2319 int type
; /* BNX2X_MCAST_CMD_X */
2321 struct list_head macs_head
;
2322 u32 macs_num
; /* Needed for DEL command */
2323 int next_bin
; /* Needed for RESTORE flow with aprox match */
2326 bool done
; /* set to true, when the command has been handled,
2327 * practically used in 57712 handling only, where one pending
2328 * command may be handled in a few operations. As long as for
2329 * other chips every operation handling is completed in a
2330 * single ramrod, there is no need to utilize this field.
2334 static int bnx2x_mcast_wait(struct bnx2x
*bp
,
2335 struct bnx2x_mcast_obj
*o
)
2337 if (bnx2x_state_wait(bp
, o
->sched_state
, o
->raw
.pstate
) ||
2338 o
->raw
.wait_comp(bp
, &o
->raw
))
2344 static int bnx2x_mcast_enqueue_cmd(struct bnx2x
*bp
,
2345 struct bnx2x_mcast_obj
*o
,
2346 struct bnx2x_mcast_ramrod_params
*p
,
2350 struct bnx2x_pending_mcast_cmd
*new_cmd
;
2351 struct bnx2x_mcast_mac_elem
*cur_mac
= NULL
;
2352 struct bnx2x_mcast_list_elem
*pos
;
2353 int macs_list_len
= ((cmd
== BNX2X_MCAST_CMD_ADD
) ?
2354 p
->mcast_list_len
: 0);
2356 /* If the command is empty ("handle pending commands only"), break */
2357 if (!p
->mcast_list_len
)
2360 total_sz
= sizeof(*new_cmd
) +
2361 macs_list_len
* sizeof(struct bnx2x_mcast_mac_elem
);
2363 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2364 new_cmd
= kzalloc(total_sz
, GFP_ATOMIC
);
2369 DP(BNX2X_MSG_SP
, "About to enqueue a new %d command. "
2370 "macs_list_len=%d\n", cmd
, macs_list_len
);
2372 INIT_LIST_HEAD(&new_cmd
->data
.macs_head
);
2374 new_cmd
->type
= cmd
;
2375 new_cmd
->done
= false;
2378 case BNX2X_MCAST_CMD_ADD
:
2379 cur_mac
= (struct bnx2x_mcast_mac_elem
*)
2380 ((u8
*)new_cmd
+ sizeof(*new_cmd
));
2382 /* Push the MACs of the current command into the pendig command
2385 list_for_each_entry(pos
, &p
->mcast_list
, link
) {
2386 memcpy(cur_mac
->mac
, pos
->mac
, ETH_ALEN
);
2387 list_add_tail(&cur_mac
->link
, &new_cmd
->data
.macs_head
);
2393 case BNX2X_MCAST_CMD_DEL
:
2394 new_cmd
->data
.macs_num
= p
->mcast_list_len
;
2397 case BNX2X_MCAST_CMD_RESTORE
:
2398 new_cmd
->data
.next_bin
= 0;
2402 BNX2X_ERR("Unknown command: %d\n", cmd
);
2406 /* Push the new pending command to the tail of the pending list: FIFO */
2407 list_add_tail(&new_cmd
->link
, &o
->pending_cmds_head
);
2415 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2418 * @last: index to start looking from (including)
2420 * returns the next found (set) bin or a negative value if none is found.
2422 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj
*o
, int last
)
2424 int i
, j
, inner_start
= last
% BIT_VEC64_ELEM_SZ
;
2426 for (i
= last
/ BIT_VEC64_ELEM_SZ
; i
< BNX2X_MCAST_VEC_SZ
; i
++) {
2427 if (o
->registry
.aprox_match
.vec
[i
])
2428 for (j
= inner_start
; j
< BIT_VEC64_ELEM_SZ
; j
++) {
2429 int cur_bit
= j
+ BIT_VEC64_ELEM_SZ
* i
;
2430 if (BIT_VEC64_TEST_BIT(o
->registry
.aprox_match
.
2443 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2447 * returns the index of the found bin or -1 if none is found
2449 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj
*o
)
2451 int cur_bit
= bnx2x_mcast_get_next_bin(o
, 0);
2454 BIT_VEC64_CLEAR_BIT(o
->registry
.aprox_match
.vec
, cur_bit
);
2459 static inline u8
bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj
*o
)
2461 struct bnx2x_raw_obj
*raw
= &o
->raw
;
2464 if ((raw
->obj_type
== BNX2X_OBJ_TYPE_TX
) ||
2465 (raw
->obj_type
== BNX2X_OBJ_TYPE_RX_TX
))
2466 rx_tx_flag
|= ETH_MULTICAST_RULES_CMD_TX_CMD
;
2468 if ((raw
->obj_type
== BNX2X_OBJ_TYPE_RX
) ||
2469 (raw
->obj_type
== BNX2X_OBJ_TYPE_RX_TX
))
2470 rx_tx_flag
|= ETH_MULTICAST_RULES_CMD_RX_CMD
;
2475 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x
*bp
,
2476 struct bnx2x_mcast_obj
*o
, int idx
,
2477 union bnx2x_mcast_config_data
*cfg_data
,
2480 struct bnx2x_raw_obj
*r
= &o
->raw
;
2481 struct eth_multicast_rules_ramrod_data
*data
=
2482 (struct eth_multicast_rules_ramrod_data
*)(r
->rdata
);
2483 u8 func_id
= r
->func_id
;
2484 u8 rx_tx_add_flag
= bnx2x_mcast_get_rx_tx_flag(o
);
2487 if ((cmd
== BNX2X_MCAST_CMD_ADD
) || (cmd
== BNX2X_MCAST_CMD_RESTORE
))
2488 rx_tx_add_flag
|= ETH_MULTICAST_RULES_CMD_IS_ADD
;
2490 data
->rules
[idx
].cmd_general_data
|= rx_tx_add_flag
;
2492 /* Get a bin and update a bins' vector */
2494 case BNX2X_MCAST_CMD_ADD
:
2495 bin
= bnx2x_mcast_bin_from_mac(cfg_data
->mac
);
2496 BIT_VEC64_SET_BIT(o
->registry
.aprox_match
.vec
, bin
);
2499 case BNX2X_MCAST_CMD_DEL
:
2500 /* If there were no more bins to clear
2501 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2502 * clear any (0xff) bin.
2503 * See bnx2x_mcast_validate_e2() for explanation when it may
2506 bin
= bnx2x_mcast_clear_first_bin(o
);
2509 case BNX2X_MCAST_CMD_RESTORE
:
2510 bin
= cfg_data
->bin
;
2514 BNX2X_ERR("Unknown command: %d\n", cmd
);
2518 DP(BNX2X_MSG_SP
, "%s bin %d\n",
2519 ((rx_tx_add_flag
& ETH_MULTICAST_RULES_CMD_IS_ADD
) ?
2520 "Setting" : "Clearing"), bin
);
2522 data
->rules
[idx
].bin_id
= (u8
)bin
;
2523 data
->rules
[idx
].func_id
= func_id
;
2524 data
->rules
[idx
].engine_id
= o
->engine_id
;
2528 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2530 * @bp: device handle
2532 * @start_bin: index in the registry to start from (including)
2533 * @rdata_idx: index in the ramrod data to start from
2535 * returns last handled bin index or -1 if all bins have been handled
2537 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2538 struct bnx2x
*bp
, struct bnx2x_mcast_obj
*o
, int start_bin
,
2541 int cur_bin
, cnt
= *rdata_idx
;
2542 union bnx2x_mcast_config_data cfg_data
= {0};
2544 /* go through the registry and configure the bins from it */
2545 for (cur_bin
= bnx2x_mcast_get_next_bin(o
, start_bin
); cur_bin
>= 0;
2546 cur_bin
= bnx2x_mcast_get_next_bin(o
, cur_bin
+ 1)) {
2548 cfg_data
.bin
= (u8
)cur_bin
;
2549 o
->set_one_rule(bp
, o
, cnt
, &cfg_data
,
2550 BNX2X_MCAST_CMD_RESTORE
);
2554 DP(BNX2X_MSG_SP
, "About to configure a bin %d\n", cur_bin
);
2556 /* Break if we reached the maximum number
2559 if (cnt
>= o
->max_cmd_len
)
2568 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x
*bp
,
2569 struct bnx2x_mcast_obj
*o
, struct bnx2x_pending_mcast_cmd
*cmd_pos
,
2572 struct bnx2x_mcast_mac_elem
*pmac_pos
, *pmac_pos_n
;
2573 int cnt
= *line_idx
;
2574 union bnx2x_mcast_config_data cfg_data
= {0};
2576 list_for_each_entry_safe(pmac_pos
, pmac_pos_n
, &cmd_pos
->data
.macs_head
,
2579 cfg_data
.mac
= &pmac_pos
->mac
[0];
2580 o
->set_one_rule(bp
, o
, cnt
, &cfg_data
, cmd_pos
->type
);
2584 DP(BNX2X_MSG_SP
, "About to configure %pM mcast MAC\n",
2587 list_del(&pmac_pos
->link
);
2589 /* Break if we reached the maximum number
2592 if (cnt
>= o
->max_cmd_len
)
2598 /* if no more MACs to configure - we are done */
2599 if (list_empty(&cmd_pos
->data
.macs_head
))
2600 cmd_pos
->done
= true;
2603 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x
*bp
,
2604 struct bnx2x_mcast_obj
*o
, struct bnx2x_pending_mcast_cmd
*cmd_pos
,
2607 int cnt
= *line_idx
;
2609 while (cmd_pos
->data
.macs_num
) {
2610 o
->set_one_rule(bp
, o
, cnt
, NULL
, cmd_pos
->type
);
2614 cmd_pos
->data
.macs_num
--;
2616 DP(BNX2X_MSG_SP
, "Deleting MAC. %d left,cnt is %d\n",
2617 cmd_pos
->data
.macs_num
, cnt
);
2619 /* Break if we reached the maximum
2622 if (cnt
>= o
->max_cmd_len
)
2628 /* If we cleared all bins - we are done */
2629 if (!cmd_pos
->data
.macs_num
)
2630 cmd_pos
->done
= true;
2633 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x
*bp
,
2634 struct bnx2x_mcast_obj
*o
, struct bnx2x_pending_mcast_cmd
*cmd_pos
,
2637 cmd_pos
->data
.next_bin
= o
->hdl_restore(bp
, o
, cmd_pos
->data
.next_bin
,
2640 if (cmd_pos
->data
.next_bin
< 0)
2641 /* If o->set_restore returned -1 we are done */
2642 cmd_pos
->done
= true;
2644 /* Start from the next bin next time */
2645 cmd_pos
->data
.next_bin
++;
2648 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x
*bp
,
2649 struct bnx2x_mcast_ramrod_params
*p
)
2651 struct bnx2x_pending_mcast_cmd
*cmd_pos
, *cmd_pos_n
;
2653 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
2655 list_for_each_entry_safe(cmd_pos
, cmd_pos_n
, &o
->pending_cmds_head
,
2657 switch (cmd_pos
->type
) {
2658 case BNX2X_MCAST_CMD_ADD
:
2659 bnx2x_mcast_hdl_pending_add_e2(bp
, o
, cmd_pos
, &cnt
);
2662 case BNX2X_MCAST_CMD_DEL
:
2663 bnx2x_mcast_hdl_pending_del_e2(bp
, o
, cmd_pos
, &cnt
);
2666 case BNX2X_MCAST_CMD_RESTORE
:
2667 bnx2x_mcast_hdl_pending_restore_e2(bp
, o
, cmd_pos
,
2672 BNX2X_ERR("Unknown command: %d\n", cmd_pos
->type
);
2676 /* If the command has been completed - remove it from the list
2677 * and free the memory
2679 if (cmd_pos
->done
) {
2680 list_del(&cmd_pos
->link
);
2684 /* Break if we reached the maximum number of rules */
2685 if (cnt
>= o
->max_cmd_len
)
2692 static inline void bnx2x_mcast_hdl_add(struct bnx2x
*bp
,
2693 struct bnx2x_mcast_obj
*o
, struct bnx2x_mcast_ramrod_params
*p
,
2696 struct bnx2x_mcast_list_elem
*mlist_pos
;
2697 union bnx2x_mcast_config_data cfg_data
= {0};
2698 int cnt
= *line_idx
;
2700 list_for_each_entry(mlist_pos
, &p
->mcast_list
, link
) {
2701 cfg_data
.mac
= mlist_pos
->mac
;
2702 o
->set_one_rule(bp
, o
, cnt
, &cfg_data
, BNX2X_MCAST_CMD_ADD
);
2706 DP(BNX2X_MSG_SP
, "About to configure %pM mcast MAC\n",
2713 static inline void bnx2x_mcast_hdl_del(struct bnx2x
*bp
,
2714 struct bnx2x_mcast_obj
*o
, struct bnx2x_mcast_ramrod_params
*p
,
2717 int cnt
= *line_idx
, i
;
2719 for (i
= 0; i
< p
->mcast_list_len
; i
++) {
2720 o
->set_one_rule(bp
, o
, cnt
, NULL
, BNX2X_MCAST_CMD_DEL
);
2724 DP(BNX2X_MSG_SP
, "Deleting MAC. %d left\n",
2725 p
->mcast_list_len
- i
- 1);
2732 * bnx2x_mcast_handle_current_cmd -
2734 * @bp: device handle
2737 * @start_cnt: first line in the ramrod data that may be used
2739 * This function is called iff there is enough place for the current command in
2741 * Returns number of lines filled in the ramrod data in total.
2743 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x
*bp
,
2744 struct bnx2x_mcast_ramrod_params
*p
, int cmd
,
2747 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
2748 int cnt
= start_cnt
;
2750 DP(BNX2X_MSG_SP
, "p->mcast_list_len=%d\n", p
->mcast_list_len
);
2753 case BNX2X_MCAST_CMD_ADD
:
2754 bnx2x_mcast_hdl_add(bp
, o
, p
, &cnt
);
2757 case BNX2X_MCAST_CMD_DEL
:
2758 bnx2x_mcast_hdl_del(bp
, o
, p
, &cnt
);
2761 case BNX2X_MCAST_CMD_RESTORE
:
2762 o
->hdl_restore(bp
, o
, 0, &cnt
);
2766 BNX2X_ERR("Unknown command: %d\n", cmd
);
2770 /* The current command has been handled */
2771 p
->mcast_list_len
= 0;
2776 static int bnx2x_mcast_validate_e2(struct bnx2x
*bp
,
2777 struct bnx2x_mcast_ramrod_params
*p
,
2780 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
2781 int reg_sz
= o
->get_registry_size(o
);
2784 /* DEL command deletes all currently configured MACs */
2785 case BNX2X_MCAST_CMD_DEL
:
2786 o
->set_registry_size(o
, 0);
2789 /* RESTORE command will restore the entire multicast configuration */
2790 case BNX2X_MCAST_CMD_RESTORE
:
2791 /* Here we set the approximate amount of work to do, which in
2792 * fact may be only less as some MACs in postponed ADD
2793 * command(s) scheduled before this command may fall into
2794 * the same bin and the actual number of bins set in the
2795 * registry would be less than we estimated here. See
2796 * bnx2x_mcast_set_one_rule_e2() for further details.
2798 p
->mcast_list_len
= reg_sz
;
2801 case BNX2X_MCAST_CMD_ADD
:
2802 case BNX2X_MCAST_CMD_CONT
:
2803 /* Here we assume that all new MACs will fall into new bins.
2804 * However we will correct the real registry size after we
2805 * handle all pending commands.
2807 o
->set_registry_size(o
, reg_sz
+ p
->mcast_list_len
);
2811 BNX2X_ERR("Unknown command: %d\n", cmd
);
2816 /* Increase the total number of MACs pending to be configured */
2817 o
->total_pending_num
+= p
->mcast_list_len
;
2822 static void bnx2x_mcast_revert_e2(struct bnx2x
*bp
,
2823 struct bnx2x_mcast_ramrod_params
*p
,
2826 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
2828 o
->set_registry_size(o
, old_num_bins
);
2829 o
->total_pending_num
-= p
->mcast_list_len
;
2833 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2835 * @bp: device handle
2837 * @len: number of rules to handle
2839 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x
*bp
,
2840 struct bnx2x_mcast_ramrod_params
*p
,
2843 struct bnx2x_raw_obj
*r
= &p
->mcast_obj
->raw
;
2844 struct eth_multicast_rules_ramrod_data
*data
=
2845 (struct eth_multicast_rules_ramrod_data
*)(r
->rdata
);
2847 data
->header
.echo
= ((r
->cid
& BNX2X_SWCID_MASK
) |
2848 (BNX2X_FILTER_MCAST_PENDING
<< BNX2X_SWCID_SHIFT
));
2849 data
->header
.rule_cnt
= len
;
2853 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2855 * @bp: device handle
2858 * Recalculate the actual number of set bins in the registry using Brian
2859 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2861 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2863 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x
*bp
,
2864 struct bnx2x_mcast_obj
*o
)
2869 for (i
= 0; i
< BNX2X_MCAST_VEC_SZ
; i
++) {
2870 elem
= o
->registry
.aprox_match
.vec
[i
];
2875 o
->set_registry_size(o
, cnt
);
2880 static int bnx2x_mcast_setup_e2(struct bnx2x
*bp
,
2881 struct bnx2x_mcast_ramrod_params
*p
,
2884 struct bnx2x_raw_obj
*raw
= &p
->mcast_obj
->raw
;
2885 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
2886 struct eth_multicast_rules_ramrod_data
*data
=
2887 (struct eth_multicast_rules_ramrod_data
*)(raw
->rdata
);
2890 /* Reset the ramrod data buffer */
2891 memset(data
, 0, sizeof(*data
));
2893 cnt
= bnx2x_mcast_handle_pending_cmds_e2(bp
, p
);
2895 /* If there are no more pending commands - clear SCHEDULED state */
2896 if (list_empty(&o
->pending_cmds_head
))
2899 /* The below may be true iff there was enough room in ramrod
2900 * data for all pending commands and for the current
2901 * command. Otherwise the current command would have been added
2902 * to the pending commands and p->mcast_list_len would have been
2905 if (p
->mcast_list_len
> 0)
2906 cnt
= bnx2x_mcast_handle_current_cmd(bp
, p
, cmd
, cnt
);
2908 /* We've pulled out some MACs - update the total number of
2911 o
->total_pending_num
-= cnt
;
2914 WARN_ON(o
->total_pending_num
< 0);
2915 WARN_ON(cnt
> o
->max_cmd_len
);
2917 bnx2x_mcast_set_rdata_hdr_e2(bp
, p
, (u8
)cnt
);
2919 /* Update a registry size if there are no more pending operations.
2921 * We don't want to change the value of the registry size if there are
2922 * pending operations because we want it to always be equal to the
2923 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2924 * set bins after the last requested operation in order to properly
2925 * evaluate the size of the next DEL/RESTORE operation.
2927 * Note that we update the registry itself during command(s) handling
2928 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
2929 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2930 * with a limited amount of update commands (per MAC/bin) and we don't
2931 * know in this scope what the actual state of bins configuration is
2932 * going to be after this ramrod.
2934 if (!o
->total_pending_num
)
2935 bnx2x_mcast_refresh_registry_e2(bp
, o
);
2938 * If CLEAR_ONLY was requested - don't send a ramrod and clear
2939 * RAMROD_PENDING status immediately.
2941 if (test_bit(RAMROD_DRV_CLR_ONLY
, &p
->ramrod_flags
)) {
2942 raw
->clear_pending(raw
);
2946 * No need for an explicit memory barrier here as long we would
2947 * need to ensure the ordering of writing to the SPQ element
2948 * and updating of the SPQ producer which involves a memory
2949 * read and we will have to put a full memory barrier there
2950 * (inside bnx2x_sp_post()).
2954 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_MULTICAST_RULES
,
2955 raw
->cid
, U64_HI(raw
->rdata_mapping
),
2956 U64_LO(raw
->rdata_mapping
),
2957 ETH_CONNECTION_TYPE
);
2961 /* Ramrod completion is pending */
2966 static int bnx2x_mcast_validate_e1h(struct bnx2x
*bp
,
2967 struct bnx2x_mcast_ramrod_params
*p
,
2970 /* Mark, that there is a work to do */
2971 if ((cmd
== BNX2X_MCAST_CMD_DEL
) || (cmd
== BNX2X_MCAST_CMD_RESTORE
))
2972 p
->mcast_list_len
= 1;
2977 static void bnx2x_mcast_revert_e1h(struct bnx2x
*bp
,
2978 struct bnx2x_mcast_ramrod_params
*p
,
2984 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
2986 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2989 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x
*bp
,
2990 struct bnx2x_mcast_obj
*o
,
2991 struct bnx2x_mcast_ramrod_params
*p
,
2994 struct bnx2x_mcast_list_elem
*mlist_pos
;
2997 list_for_each_entry(mlist_pos
, &p
->mcast_list
, link
) {
2998 bit
= bnx2x_mcast_bin_from_mac(mlist_pos
->mac
);
2999 BNX2X_57711_SET_MC_FILTER(mc_filter
, bit
);
3001 DP(BNX2X_MSG_SP
, "About to configure %pM mcast MAC, bin %d\n",
3002 mlist_pos
->mac
, bit
);
3004 /* bookkeeping... */
3005 BIT_VEC64_SET_BIT(o
->registry
.aprox_match
.vec
,
3010 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x
*bp
,
3011 struct bnx2x_mcast_obj
*o
, struct bnx2x_mcast_ramrod_params
*p
,
3016 for (bit
= bnx2x_mcast_get_next_bin(o
, 0);
3018 bit
= bnx2x_mcast_get_next_bin(o
, bit
+ 1)) {
3019 BNX2X_57711_SET_MC_FILTER(mc_filter
, bit
);
3020 DP(BNX2X_MSG_SP
, "About to set bin %d\n", bit
);
3024 /* On 57711 we write the multicast MACs' aproximate match
3025 * table by directly into the TSTORM's internal RAM. So we don't
3026 * really need to handle any tricks to make it work.
3028 static int bnx2x_mcast_setup_e1h(struct bnx2x
*bp
,
3029 struct bnx2x_mcast_ramrod_params
*p
,
3033 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
3034 struct bnx2x_raw_obj
*r
= &o
->raw
;
3036 /* If CLEAR_ONLY has been requested - clear the registry
3037 * and clear a pending bit.
3039 if (!test_bit(RAMROD_DRV_CLR_ONLY
, &p
->ramrod_flags
)) {
3040 u32 mc_filter
[MC_HASH_SIZE
] = {0};
3042 /* Set the multicast filter bits before writing it into
3043 * the internal memory.
3046 case BNX2X_MCAST_CMD_ADD
:
3047 bnx2x_mcast_hdl_add_e1h(bp
, o
, p
, mc_filter
);
3050 case BNX2X_MCAST_CMD_DEL
:
3052 "Invalidating multicast MACs configuration\n");
3054 /* clear the registry */
3055 memset(o
->registry
.aprox_match
.vec
, 0,
3056 sizeof(o
->registry
.aprox_match
.vec
));
3059 case BNX2X_MCAST_CMD_RESTORE
:
3060 bnx2x_mcast_hdl_restore_e1h(bp
, o
, p
, mc_filter
);
3064 BNX2X_ERR("Unknown command: %d\n", cmd
);
3068 /* Set the mcast filter in the internal memory */
3069 for (i
= 0; i
< MC_HASH_SIZE
; i
++)
3070 REG_WR(bp
, MC_HASH_OFFSET(bp
, i
), mc_filter
[i
]);
3072 /* clear the registry */
3073 memset(o
->registry
.aprox_match
.vec
, 0,
3074 sizeof(o
->registry
.aprox_match
.vec
));
3077 r
->clear_pending(r
);
3082 static int bnx2x_mcast_validate_e1(struct bnx2x
*bp
,
3083 struct bnx2x_mcast_ramrod_params
*p
,
3086 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
3087 int reg_sz
= o
->get_registry_size(o
);
3090 /* DEL command deletes all currently configured MACs */
3091 case BNX2X_MCAST_CMD_DEL
:
3092 o
->set_registry_size(o
, 0);
3095 /* RESTORE command will restore the entire multicast configuration */
3096 case BNX2X_MCAST_CMD_RESTORE
:
3097 p
->mcast_list_len
= reg_sz
;
3098 DP(BNX2X_MSG_SP
, "Command %d, p->mcast_list_len=%d\n",
3099 cmd
, p
->mcast_list_len
);
3102 case BNX2X_MCAST_CMD_ADD
:
3103 case BNX2X_MCAST_CMD_CONT
:
3104 /* Multicast MACs on 57710 are configured as unicast MACs and
3105 * there is only a limited number of CAM entries for that
3108 if (p
->mcast_list_len
> o
->max_cmd_len
) {
3109 BNX2X_ERR("Can't configure more than %d multicast MACs"
3110 "on 57710\n", o
->max_cmd_len
);
3113 /* Every configured MAC should be cleared if DEL command is
3114 * called. Only the last ADD command is relevant as long as
3115 * every ADD commands overrides the previous configuration.
3117 DP(BNX2X_MSG_SP
, "p->mcast_list_len=%d\n", p
->mcast_list_len
);
3118 if (p
->mcast_list_len
> 0)
3119 o
->set_registry_size(o
, p
->mcast_list_len
);
3124 BNX2X_ERR("Unknown command: %d\n", cmd
);
3129 /* We want to ensure that commands are executed one by one for 57710.
3130 * Therefore each none-empty command will consume o->max_cmd_len.
3132 if (p
->mcast_list_len
)
3133 o
->total_pending_num
+= o
->max_cmd_len
;
3138 static void bnx2x_mcast_revert_e1(struct bnx2x
*bp
,
3139 struct bnx2x_mcast_ramrod_params
*p
,
3142 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
3144 o
->set_registry_size(o
, old_num_macs
);
3146 /* If current command hasn't been handled yet and we are
3147 * here means that it's meant to be dropped and we have to
3148 * update the number of outstandling MACs accordingly.
3150 if (p
->mcast_list_len
)
3151 o
->total_pending_num
-= o
->max_cmd_len
;
3154 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x
*bp
,
3155 struct bnx2x_mcast_obj
*o
, int idx
,
3156 union bnx2x_mcast_config_data
*cfg_data
,
3159 struct bnx2x_raw_obj
*r
= &o
->raw
;
3160 struct mac_configuration_cmd
*data
=
3161 (struct mac_configuration_cmd
*)(r
->rdata
);
3164 if ((cmd
== BNX2X_MCAST_CMD_ADD
) || (cmd
== BNX2X_MCAST_CMD_RESTORE
)) {
3165 bnx2x_set_fw_mac_addr(&data
->config_table
[idx
].msb_mac_addr
,
3166 &data
->config_table
[idx
].middle_mac_addr
,
3167 &data
->config_table
[idx
].lsb_mac_addr
,
3170 data
->config_table
[idx
].vlan_id
= 0;
3171 data
->config_table
[idx
].pf_id
= r
->func_id
;
3172 data
->config_table
[idx
].clients_bit_vector
=
3173 cpu_to_le32(1 << r
->cl_id
);
3175 SET_FLAG(data
->config_table
[idx
].flags
,
3176 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
3177 T_ETH_MAC_COMMAND_SET
);
3182 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3184 * @bp: device handle
3186 * @len: number of rules to handle
3188 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x
*bp
,
3189 struct bnx2x_mcast_ramrod_params
*p
,
3192 struct bnx2x_raw_obj
*r
= &p
->mcast_obj
->raw
;
3193 struct mac_configuration_cmd
*data
=
3194 (struct mac_configuration_cmd
*)(r
->rdata
);
3196 u8 offset
= (CHIP_REV_IS_SLOW(bp
) ?
3197 BNX2X_MAX_EMUL_MULTI
*(1 + r
->func_id
) :
3198 BNX2X_MAX_MULTICAST
*(1 + r
->func_id
));
3200 data
->hdr
.offset
= offset
;
3201 data
->hdr
.client_id
= 0xff;
3202 data
->hdr
.echo
= ((r
->cid
& BNX2X_SWCID_MASK
) |
3203 (BNX2X_FILTER_MCAST_PENDING
<< BNX2X_SWCID_SHIFT
));
3204 data
->hdr
.length
= len
;
3208 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3210 * @bp: device handle
3212 * @start_idx: index in the registry to start from
3213 * @rdata_idx: index in the ramrod data to start from
3215 * restore command for 57710 is like all other commands - always a stand alone
3216 * command - start_idx and rdata_idx will always be 0. This function will always
3218 * returns -1 to comply with 57712 variant.
3220 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3221 struct bnx2x
*bp
, struct bnx2x_mcast_obj
*o
, int start_idx
,
3224 struct bnx2x_mcast_mac_elem
*elem
;
3226 union bnx2x_mcast_config_data cfg_data
= {0};
3228 /* go through the registry and configure the MACs from it. */
3229 list_for_each_entry(elem
, &o
->registry
.exact_match
.macs
, link
) {
3230 cfg_data
.mac
= &elem
->mac
[0];
3231 o
->set_one_rule(bp
, o
, i
, &cfg_data
, BNX2X_MCAST_CMD_RESTORE
);
3235 DP(BNX2X_MSG_SP
, "About to configure %pM mcast MAC\n",
3245 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3246 struct bnx2x
*bp
, struct bnx2x_mcast_ramrod_params
*p
)
3248 struct bnx2x_pending_mcast_cmd
*cmd_pos
;
3249 struct bnx2x_mcast_mac_elem
*pmac_pos
;
3250 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
3251 union bnx2x_mcast_config_data cfg_data
= {0};
3255 /* If nothing to be done - return */
3256 if (list_empty(&o
->pending_cmds_head
))
3259 /* Handle the first command */
3260 cmd_pos
= list_first_entry(&o
->pending_cmds_head
,
3261 struct bnx2x_pending_mcast_cmd
, link
);
3263 switch (cmd_pos
->type
) {
3264 case BNX2X_MCAST_CMD_ADD
:
3265 list_for_each_entry(pmac_pos
, &cmd_pos
->data
.macs_head
, link
) {
3266 cfg_data
.mac
= &pmac_pos
->mac
[0];
3267 o
->set_one_rule(bp
, o
, cnt
, &cfg_data
, cmd_pos
->type
);
3271 DP(BNX2X_MSG_SP
, "About to configure %pM mcast MAC\n",
3276 case BNX2X_MCAST_CMD_DEL
:
3277 cnt
= cmd_pos
->data
.macs_num
;
3278 DP(BNX2X_MSG_SP
, "About to delete %d multicast MACs\n", cnt
);
3281 case BNX2X_MCAST_CMD_RESTORE
:
3282 o
->hdl_restore(bp
, o
, 0, &cnt
);
3286 BNX2X_ERR("Unknown command: %d\n", cmd_pos
->type
);
3290 list_del(&cmd_pos
->link
);
3297 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3304 static inline void bnx2x_get_fw_mac_addr(__le16
*fw_hi
, __le16
*fw_mid
,
3305 __le16
*fw_lo
, u8
*mac
)
3307 mac
[1] = ((u8
*)fw_hi
)[0];
3308 mac
[0] = ((u8
*)fw_hi
)[1];
3309 mac
[3] = ((u8
*)fw_mid
)[0];
3310 mac
[2] = ((u8
*)fw_mid
)[1];
3311 mac
[5] = ((u8
*)fw_lo
)[0];
3312 mac
[4] = ((u8
*)fw_lo
)[1];
3316 * bnx2x_mcast_refresh_registry_e1 -
3318 * @bp: device handle
3321 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3322 * and update the registry correspondingly: if ADD - allocate a memory and add
3323 * the entries to the registry (list), if DELETE - clear the registry and free
3326 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x
*bp
,
3327 struct bnx2x_mcast_obj
*o
)
3329 struct bnx2x_raw_obj
*raw
= &o
->raw
;
3330 struct bnx2x_mcast_mac_elem
*elem
;
3331 struct mac_configuration_cmd
*data
=
3332 (struct mac_configuration_cmd
*)(raw
->rdata
);
3334 /* If first entry contains a SET bit - the command was ADD,
3335 * otherwise - DEL_ALL
3337 if (GET_FLAG(data
->config_table
[0].flags
,
3338 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
)) {
3339 int i
, len
= data
->hdr
.length
;
3341 /* Break if it was a RESTORE command */
3342 if (!list_empty(&o
->registry
.exact_match
.macs
))
3345 elem
= kzalloc(sizeof(*elem
)*len
, GFP_ATOMIC
);
3347 BNX2X_ERR("Failed to allocate registry memory\n");
3351 for (i
= 0; i
< len
; i
++, elem
++) {
3352 bnx2x_get_fw_mac_addr(
3353 &data
->config_table
[i
].msb_mac_addr
,
3354 &data
->config_table
[i
].middle_mac_addr
,
3355 &data
->config_table
[i
].lsb_mac_addr
,
3357 DP(BNX2X_MSG_SP
, "Adding registry entry for [%pM]\n",
3359 list_add_tail(&elem
->link
,
3360 &o
->registry
.exact_match
.macs
);
3363 elem
= list_first_entry(&o
->registry
.exact_match
.macs
,
3364 struct bnx2x_mcast_mac_elem
, link
);
3365 DP(BNX2X_MSG_SP
, "Deleting a registry\n");
3367 INIT_LIST_HEAD(&o
->registry
.exact_match
.macs
);
3373 static int bnx2x_mcast_setup_e1(struct bnx2x
*bp
,
3374 struct bnx2x_mcast_ramrod_params
*p
,
3377 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
3378 struct bnx2x_raw_obj
*raw
= &o
->raw
;
3379 struct mac_configuration_cmd
*data
=
3380 (struct mac_configuration_cmd
*)(raw
->rdata
);
3383 /* Reset the ramrod data buffer */
3384 memset(data
, 0, sizeof(*data
));
3386 /* First set all entries as invalid */
3387 for (i
= 0; i
< o
->max_cmd_len
; i
++)
3388 SET_FLAG(data
->config_table
[i
].flags
,
3389 MAC_CONFIGURATION_ENTRY_ACTION_TYPE
,
3390 T_ETH_MAC_COMMAND_INVALIDATE
);
3392 /* Handle pending commands first */
3393 cnt
= bnx2x_mcast_handle_pending_cmds_e1(bp
, p
);
3395 /* If there are no more pending commands - clear SCHEDULED state */
3396 if (list_empty(&o
->pending_cmds_head
))
3399 /* The below may be true iff there were no pending commands */
3401 cnt
= bnx2x_mcast_handle_current_cmd(bp
, p
, cmd
, 0);
3403 /* For 57710 every command has o->max_cmd_len length to ensure that
3404 * commands are done one at a time.
3406 o
->total_pending_num
-= o
->max_cmd_len
;
3410 WARN_ON(cnt
> o
->max_cmd_len
);
3412 /* Set ramrod header (in particular, a number of entries to update) */
3413 bnx2x_mcast_set_rdata_hdr_e1(bp
, p
, (u8
)cnt
);
3415 /* update a registry: we need the registry contents to be always up
3416 * to date in order to be able to execute a RESTORE opcode. Here
3417 * we use the fact that for 57710 we sent one command at a time
3418 * hence we may take the registry update out of the command handling
3419 * and do it in a simpler way here.
3421 rc
= bnx2x_mcast_refresh_registry_e1(bp
, o
);
3426 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3427 * RAMROD_PENDING status immediately.
3429 if (test_bit(RAMROD_DRV_CLR_ONLY
, &p
->ramrod_flags
)) {
3430 raw
->clear_pending(raw
);
3434 * No need for an explicit memory barrier here as long we would
3435 * need to ensure the ordering of writing to the SPQ element
3436 * and updating of the SPQ producer which involves a memory
3437 * read and we will have to put a full memory barrier there
3438 * (inside bnx2x_sp_post()).
3442 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_SET_MAC
, raw
->cid
,
3443 U64_HI(raw
->rdata_mapping
),
3444 U64_LO(raw
->rdata_mapping
),
3445 ETH_CONNECTION_TYPE
);
3449 /* Ramrod completion is pending */
3455 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj
*o
)
3457 return o
->registry
.exact_match
.num_macs_set
;
3460 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj
*o
)
3462 return o
->registry
.aprox_match
.num_bins_set
;
3465 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj
*o
,
3468 o
->registry
.exact_match
.num_macs_set
= n
;
3471 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj
*o
,
3474 o
->registry
.aprox_match
.num_bins_set
= n
;
3477 int bnx2x_config_mcast(struct bnx2x
*bp
,
3478 struct bnx2x_mcast_ramrod_params
*p
,
3481 struct bnx2x_mcast_obj
*o
= p
->mcast_obj
;
3482 struct bnx2x_raw_obj
*r
= &o
->raw
;
3483 int rc
= 0, old_reg_size
;
3485 /* This is needed to recover number of currently configured mcast macs
3486 * in case of failure.
3488 old_reg_size
= o
->get_registry_size(o
);
3490 /* Do some calculations and checks */
3491 rc
= o
->validate(bp
, p
, cmd
);
3495 /* Return if there is no work to do */
3496 if ((!p
->mcast_list_len
) && (!o
->check_sched(o
)))
3499 DP(BNX2X_MSG_SP
, "o->total_pending_num=%d p->mcast_list_len=%d "
3500 "o->max_cmd_len=%d\n", o
->total_pending_num
,
3501 p
->mcast_list_len
, o
->max_cmd_len
);
3503 /* Enqueue the current command to the pending list if we can't complete
3504 * it in the current iteration
3506 if (r
->check_pending(r
) ||
3507 ((o
->max_cmd_len
> 0) && (o
->total_pending_num
> o
->max_cmd_len
))) {
3508 rc
= o
->enqueue_cmd(bp
, p
->mcast_obj
, p
, cmd
);
3512 /* As long as the current command is in a command list we
3513 * don't need to handle it separately.
3515 p
->mcast_list_len
= 0;
3518 if (!r
->check_pending(r
)) {
3520 /* Set 'pending' state */
3523 /* Configure the new classification in the chip */
3524 rc
= o
->config_mcast(bp
, p
, cmd
);
3528 /* Wait for a ramrod completion if was requested */
3529 if (test_bit(RAMROD_COMP_WAIT
, &p
->ramrod_flags
))
3530 rc
= o
->wait_comp(bp
, o
);
3536 r
->clear_pending(r
);
3539 o
->revert(bp
, p
, old_reg_size
);
3544 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj
*o
)
3546 smp_mb__before_clear_bit();
3547 clear_bit(o
->sched_state
, o
->raw
.pstate
);
3548 smp_mb__after_clear_bit();
3551 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj
*o
)
3553 smp_mb__before_clear_bit();
3554 set_bit(o
->sched_state
, o
->raw
.pstate
);
3555 smp_mb__after_clear_bit();
3558 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj
*o
)
3560 return !!test_bit(o
->sched_state
, o
->raw
.pstate
);
3563 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj
*o
)
3565 return o
->raw
.check_pending(&o
->raw
) || o
->check_sched(o
);
3568 void bnx2x_init_mcast_obj(struct bnx2x
*bp
,
3569 struct bnx2x_mcast_obj
*mcast_obj
,
3570 u8 mcast_cl_id
, u32 mcast_cid
, u8 func_id
,
3571 u8 engine_id
, void *rdata
, dma_addr_t rdata_mapping
,
3572 int state
, unsigned long *pstate
, bnx2x_obj_type type
)
3574 memset(mcast_obj
, 0, sizeof(*mcast_obj
));
3576 bnx2x_init_raw_obj(&mcast_obj
->raw
, mcast_cl_id
, mcast_cid
, func_id
,
3577 rdata
, rdata_mapping
, state
, pstate
, type
);
3579 mcast_obj
->engine_id
= engine_id
;
3581 INIT_LIST_HEAD(&mcast_obj
->pending_cmds_head
);
3583 mcast_obj
->sched_state
= BNX2X_FILTER_MCAST_SCHED
;
3584 mcast_obj
->check_sched
= bnx2x_mcast_check_sched
;
3585 mcast_obj
->set_sched
= bnx2x_mcast_set_sched
;
3586 mcast_obj
->clear_sched
= bnx2x_mcast_clear_sched
;
3588 if (CHIP_IS_E1(bp
)) {
3589 mcast_obj
->config_mcast
= bnx2x_mcast_setup_e1
;
3590 mcast_obj
->enqueue_cmd
= bnx2x_mcast_enqueue_cmd
;
3591 mcast_obj
->hdl_restore
=
3592 bnx2x_mcast_handle_restore_cmd_e1
;
3593 mcast_obj
->check_pending
= bnx2x_mcast_check_pending
;
3595 if (CHIP_REV_IS_SLOW(bp
))
3596 mcast_obj
->max_cmd_len
= BNX2X_MAX_EMUL_MULTI
;
3598 mcast_obj
->max_cmd_len
= BNX2X_MAX_MULTICAST
;
3600 mcast_obj
->wait_comp
= bnx2x_mcast_wait
;
3601 mcast_obj
->set_one_rule
= bnx2x_mcast_set_one_rule_e1
;
3602 mcast_obj
->validate
= bnx2x_mcast_validate_e1
;
3603 mcast_obj
->revert
= bnx2x_mcast_revert_e1
;
3604 mcast_obj
->get_registry_size
=
3605 bnx2x_mcast_get_registry_size_exact
;
3606 mcast_obj
->set_registry_size
=
3607 bnx2x_mcast_set_registry_size_exact
;
3609 /* 57710 is the only chip that uses the exact match for mcast
3612 INIT_LIST_HEAD(&mcast_obj
->registry
.exact_match
.macs
);
3614 } else if (CHIP_IS_E1H(bp
)) {
3615 mcast_obj
->config_mcast
= bnx2x_mcast_setup_e1h
;
3616 mcast_obj
->enqueue_cmd
= NULL
;
3617 mcast_obj
->hdl_restore
= NULL
;
3618 mcast_obj
->check_pending
= bnx2x_mcast_check_pending
;
3620 /* 57711 doesn't send a ramrod, so it has unlimited credit
3623 mcast_obj
->max_cmd_len
= -1;
3624 mcast_obj
->wait_comp
= bnx2x_mcast_wait
;
3625 mcast_obj
->set_one_rule
= NULL
;
3626 mcast_obj
->validate
= bnx2x_mcast_validate_e1h
;
3627 mcast_obj
->revert
= bnx2x_mcast_revert_e1h
;
3628 mcast_obj
->get_registry_size
=
3629 bnx2x_mcast_get_registry_size_aprox
;
3630 mcast_obj
->set_registry_size
=
3631 bnx2x_mcast_set_registry_size_aprox
;
3633 mcast_obj
->config_mcast
= bnx2x_mcast_setup_e2
;
3634 mcast_obj
->enqueue_cmd
= bnx2x_mcast_enqueue_cmd
;
3635 mcast_obj
->hdl_restore
=
3636 bnx2x_mcast_handle_restore_cmd_e2
;
3637 mcast_obj
->check_pending
= bnx2x_mcast_check_pending
;
3638 /* TODO: There should be a proper HSI define for this number!!!
3640 mcast_obj
->max_cmd_len
= 16;
3641 mcast_obj
->wait_comp
= bnx2x_mcast_wait
;
3642 mcast_obj
->set_one_rule
= bnx2x_mcast_set_one_rule_e2
;
3643 mcast_obj
->validate
= bnx2x_mcast_validate_e2
;
3644 mcast_obj
->revert
= bnx2x_mcast_revert_e2
;
3645 mcast_obj
->get_registry_size
=
3646 bnx2x_mcast_get_registry_size_aprox
;
3647 mcast_obj
->set_registry_size
=
3648 bnx2x_mcast_set_registry_size_aprox
;
3652 /*************************** Credit handling **********************************/
3655 * atomic_add_ifless - add if the result is less than a given value.
3657 * @v: pointer of type atomic_t
3658 * @a: the amount to add to v...
3659 * @u: ...if (v + a) is less than u.
3661 * returns true if (v + a) was less than u, and false otherwise.
3664 static inline bool __atomic_add_ifless(atomic_t
*v
, int a
, int u
)
3670 if (unlikely(c
+ a
>= u
))
3673 old
= atomic_cmpxchg((v
), c
, c
+ a
);
3674 if (likely(old
== c
))
3683 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3685 * @v: pointer of type atomic_t
3686 * @a: the amount to dec from v...
3687 * @u: ...if (v - a) is more or equal than u.
3689 * returns true if (v - a) was more or equal than u, and false
3692 static inline bool __atomic_dec_ifmoe(atomic_t
*v
, int a
, int u
)
3698 if (unlikely(c
- a
< u
))
3701 old
= atomic_cmpxchg((v
), c
, c
- a
);
3702 if (likely(old
== c
))
3710 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj
*o
, int cnt
)
3715 rc
= __atomic_dec_ifmoe(&o
->credit
, cnt
, 0);
3721 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj
*o
, int cnt
)
3727 /* Don't let to refill if credit + cnt > pool_sz */
3728 rc
= __atomic_add_ifless(&o
->credit
, cnt
, o
->pool_sz
+ 1);
3735 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj
*o
)
3740 cur_credit
= atomic_read(&o
->credit
);
3745 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj
*o
,
3752 static bool bnx2x_credit_pool_get_entry(
3753 struct bnx2x_credit_pool_obj
*o
,
3760 /* Find "internal cam-offset" then add to base for this object... */
3761 for (vec
= 0; vec
< BNX2X_POOL_VEC_SIZE
; vec
++) {
3763 /* Skip the current vector if there are no free entries in it */
3764 if (!o
->pool_mirror
[vec
])
3767 /* If we've got here we are going to find a free entry */
3768 for (idx
= vec
* BNX2X_POOL_VEC_SIZE
, i
= 0;
3769 i
< BIT_VEC64_ELEM_SZ
; idx
++, i
++)
3771 if (BIT_VEC64_TEST_BIT(o
->pool_mirror
, idx
)) {
3773 BIT_VEC64_CLEAR_BIT(o
->pool_mirror
, idx
);
3774 *offset
= o
->base_pool_offset
+ idx
;
3782 static bool bnx2x_credit_pool_put_entry(
3783 struct bnx2x_credit_pool_obj
*o
,
3786 if (offset
< o
->base_pool_offset
)
3789 offset
-= o
->base_pool_offset
;
3791 if (offset
>= o
->pool_sz
)
3794 /* Return the entry to the pool */
3795 BIT_VEC64_SET_BIT(o
->pool_mirror
, offset
);
3800 static bool bnx2x_credit_pool_put_entry_always_true(
3801 struct bnx2x_credit_pool_obj
*o
,
3807 static bool bnx2x_credit_pool_get_entry_always_true(
3808 struct bnx2x_credit_pool_obj
*o
,
3815 * bnx2x_init_credit_pool - initialize credit pool internals.
3818 * @base: Base entry in the CAM to use.
3819 * @credit: pool size.
3821 * If base is negative no CAM entries handling will be performed.
3822 * If credit is negative pool operations will always succeed (unlimited pool).
3825 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj
*p
,
3826 int base
, int credit
)
3828 /* Zero the object first */
3829 memset(p
, 0, sizeof(*p
));
3831 /* Set the table to all 1s */
3832 memset(&p
->pool_mirror
, 0xff, sizeof(p
->pool_mirror
));
3834 /* Init a pool as full */
3835 atomic_set(&p
->credit
, credit
);
3837 /* The total poll size */
3838 p
->pool_sz
= credit
;
3840 p
->base_pool_offset
= base
;
3842 /* Commit the change */
3845 p
->check
= bnx2x_credit_pool_check
;
3847 /* if pool credit is negative - disable the checks */
3849 p
->put
= bnx2x_credit_pool_put
;
3850 p
->get
= bnx2x_credit_pool_get
;
3851 p
->put_entry
= bnx2x_credit_pool_put_entry
;
3852 p
->get_entry
= bnx2x_credit_pool_get_entry
;
3854 p
->put
= bnx2x_credit_pool_always_true
;
3855 p
->get
= bnx2x_credit_pool_always_true
;
3856 p
->put_entry
= bnx2x_credit_pool_put_entry_always_true
;
3857 p
->get_entry
= bnx2x_credit_pool_get_entry_always_true
;
3860 /* If base is negative - disable entries handling */
3862 p
->put_entry
= bnx2x_credit_pool_put_entry_always_true
;
3863 p
->get_entry
= bnx2x_credit_pool_get_entry_always_true
;
3867 void bnx2x_init_mac_credit_pool(struct bnx2x
*bp
,
3868 struct bnx2x_credit_pool_obj
*p
, u8 func_id
,
3871 /* TODO: this will be defined in consts as well... */
3872 #define BNX2X_CAM_SIZE_EMUL 5
3876 if (CHIP_IS_E1(bp
)) {
3877 /* In E1, Multicast is saved in cam... */
3878 if (!CHIP_REV_IS_SLOW(bp
))
3879 cam_sz
= (MAX_MAC_CREDIT_E1
/ 2) - BNX2X_MAX_MULTICAST
;
3881 cam_sz
= BNX2X_CAM_SIZE_EMUL
- BNX2X_MAX_EMUL_MULTI
;
3883 bnx2x_init_credit_pool(p
, func_id
* cam_sz
, cam_sz
);
3885 } else if (CHIP_IS_E1H(bp
)) {
3886 /* CAM credit is equaly divided between all active functions
3889 if ((func_num
> 0)) {
3890 if (!CHIP_REV_IS_SLOW(bp
))
3891 cam_sz
= (MAX_MAC_CREDIT_E1H
/ (2*func_num
));
3893 cam_sz
= BNX2X_CAM_SIZE_EMUL
;
3894 bnx2x_init_credit_pool(p
, func_id
* cam_sz
, cam_sz
);
3896 /* this should never happen! Block MAC operations. */
3897 bnx2x_init_credit_pool(p
, 0, 0);
3903 * CAM credit is equaly divided between all active functions
3906 if ((func_num
> 0)) {
3907 if (!CHIP_REV_IS_SLOW(bp
))
3908 cam_sz
= (MAX_MAC_CREDIT_E2
/ func_num
);
3910 cam_sz
= BNX2X_CAM_SIZE_EMUL
;
3913 * No need for CAM entries handling for 57712 and
3916 bnx2x_init_credit_pool(p
, -1, cam_sz
);
3918 /* this should never happen! Block MAC operations. */
3919 bnx2x_init_credit_pool(p
, 0, 0);
3925 void bnx2x_init_vlan_credit_pool(struct bnx2x
*bp
,
3926 struct bnx2x_credit_pool_obj
*p
,
3930 if (CHIP_IS_E1x(bp
)) {
3932 * There is no VLAN credit in HW on 57710 and 57711 only
3933 * MAC / MAC-VLAN can be set
3935 bnx2x_init_credit_pool(p
, 0, -1);
3938 * CAM credit is equaly divided between all active functions
3942 int credit
= MAX_VLAN_CREDIT_E2
/ func_num
;
3943 bnx2x_init_credit_pool(p
, func_id
* credit
, credit
);
3945 /* this should never happen! Block VLAN operations. */
3946 bnx2x_init_credit_pool(p
, 0, 0);
3950 /****************** RSS Configuration ******************/
3952 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
3954 * @bp: driver hanlde
3955 * @p: pointer to rss configuration
3957 * Prints it when NETIF_MSG_IFUP debug level is configured.
3959 static inline void bnx2x_debug_print_ind_table(struct bnx2x
*bp
,
3960 struct bnx2x_config_rss_params
*p
)
3964 DP(BNX2X_MSG_SP
, "Setting indirection table to:\n");
3965 DP(BNX2X_MSG_SP
, "0x0000: ");
3966 for (i
= 0; i
< T_ETH_INDIRECTION_TABLE_SIZE
; i
++) {
3967 DP_CONT(BNX2X_MSG_SP
, "0x%02x ", p
->ind_table
[i
]);
3969 /* Print 4 bytes in a line */
3970 if ((i
+ 1 < T_ETH_INDIRECTION_TABLE_SIZE
) &&
3971 (((i
+ 1) & 0x3) == 0)) {
3972 DP_CONT(BNX2X_MSG_SP
, "\n");
3973 DP(BNX2X_MSG_SP
, "0x%04x: ", i
+ 1);
3977 DP_CONT(BNX2X_MSG_SP
, "\n");
3981 * bnx2x_setup_rss - configure RSS
3983 * @bp: device handle
3984 * @p: rss configuration
3986 * sends on UPDATE ramrod for that matter.
3988 static int bnx2x_setup_rss(struct bnx2x
*bp
,
3989 struct bnx2x_config_rss_params
*p
)
3991 struct bnx2x_rss_config_obj
*o
= p
->rss_obj
;
3992 struct bnx2x_raw_obj
*r
= &o
->raw
;
3993 struct eth_rss_update_ramrod_data
*data
=
3994 (struct eth_rss_update_ramrod_data
*)(r
->rdata
);
3998 memset(data
, 0, sizeof(*data
));
4000 DP(BNX2X_MSG_SP
, "Configuring RSS\n");
4002 /* Set an echo field */
4003 data
->echo
= (r
->cid
& BNX2X_SWCID_MASK
) |
4004 (r
->state
<< BNX2X_SWCID_SHIFT
);
4007 if (test_bit(BNX2X_RSS_MODE_DISABLED
, &p
->rss_flags
))
4008 rss_mode
= ETH_RSS_MODE_DISABLED
;
4009 else if (test_bit(BNX2X_RSS_MODE_REGULAR
, &p
->rss_flags
))
4010 rss_mode
= ETH_RSS_MODE_REGULAR
;
4011 else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI
, &p
->rss_flags
))
4012 rss_mode
= ETH_RSS_MODE_VLAN_PRI
;
4013 else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI
, &p
->rss_flags
))
4014 rss_mode
= ETH_RSS_MODE_E1HOV_PRI
;
4015 else if (test_bit(BNX2X_RSS_MODE_IP_DSCP
, &p
->rss_flags
))
4016 rss_mode
= ETH_RSS_MODE_IP_DSCP
;
4018 data
->rss_mode
= rss_mode
;
4020 DP(BNX2X_MSG_SP
, "rss_mode=%d\n", rss_mode
);
4022 /* RSS capabilities */
4023 if (test_bit(BNX2X_RSS_IPV4
, &p
->rss_flags
))
4024 data
->capabilities
|=
4025 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY
;
4027 if (test_bit(BNX2X_RSS_IPV4_TCP
, &p
->rss_flags
))
4028 data
->capabilities
|=
4029 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY
;
4031 if (test_bit(BNX2X_RSS_IPV6
, &p
->rss_flags
))
4032 data
->capabilities
|=
4033 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY
;
4035 if (test_bit(BNX2X_RSS_IPV6_TCP
, &p
->rss_flags
))
4036 data
->capabilities
|=
4037 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY
;
4040 data
->rss_result_mask
= p
->rss_result_mask
;
4043 data
->rss_engine_id
= o
->engine_id
;
4045 DP(BNX2X_MSG_SP
, "rss_engine_id=%d\n", data
->rss_engine_id
);
4047 /* Indirection table */
4048 memcpy(data
->indirection_table
, p
->ind_table
,
4049 T_ETH_INDIRECTION_TABLE_SIZE
);
4051 /* Remember the last configuration */
4052 memcpy(o
->ind_table
, p
->ind_table
, T_ETH_INDIRECTION_TABLE_SIZE
);
4054 /* Print the indirection table */
4055 if (netif_msg_ifup(bp
))
4056 bnx2x_debug_print_ind_table(bp
, p
);
4059 if (test_bit(BNX2X_RSS_SET_SRCH
, &p
->rss_flags
)) {
4060 memcpy(&data
->rss_key
[0], &p
->rss_key
[0],
4061 sizeof(data
->rss_key
));
4062 data
->capabilities
|= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY
;
4066 * No need for an explicit memory barrier here as long we would
4067 * need to ensure the ordering of writing to the SPQ element
4068 * and updating of the SPQ producer which involves a memory
4069 * read and we will have to put a full memory barrier there
4070 * (inside bnx2x_sp_post()).
4074 rc
= bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_RSS_UPDATE
, r
->cid
,
4075 U64_HI(r
->rdata_mapping
),
4076 U64_LO(r
->rdata_mapping
),
4077 ETH_CONNECTION_TYPE
);
4085 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj
*rss_obj
,
4088 memcpy(ind_table
, rss_obj
->ind_table
, sizeof(rss_obj
->ind_table
));
4091 int bnx2x_config_rss(struct bnx2x
*bp
,
4092 struct bnx2x_config_rss_params
*p
)
4095 struct bnx2x_rss_config_obj
*o
= p
->rss_obj
;
4096 struct bnx2x_raw_obj
*r
= &o
->raw
;
4098 /* Do nothing if only driver cleanup was requested */
4099 if (test_bit(RAMROD_DRV_CLR_ONLY
, &p
->ramrod_flags
))
4104 rc
= o
->config_rss(bp
, p
);
4106 r
->clear_pending(r
);
4110 if (test_bit(RAMROD_COMP_WAIT
, &p
->ramrod_flags
))
4111 rc
= r
->wait_comp(bp
, r
);
4117 void bnx2x_init_rss_config_obj(struct bnx2x
*bp
,
4118 struct bnx2x_rss_config_obj
*rss_obj
,
4119 u8 cl_id
, u32 cid
, u8 func_id
, u8 engine_id
,
4120 void *rdata
, dma_addr_t rdata_mapping
,
4121 int state
, unsigned long *pstate
,
4122 bnx2x_obj_type type
)
4124 bnx2x_init_raw_obj(&rss_obj
->raw
, cl_id
, cid
, func_id
, rdata
,
4125 rdata_mapping
, state
, pstate
, type
);
4127 rss_obj
->engine_id
= engine_id
;
4128 rss_obj
->config_rss
= bnx2x_setup_rss
;
4131 /********************** Queue state object ***********************************/
4134 * bnx2x_queue_state_change - perform Queue state change transition
4136 * @bp: device handle
4137 * @params: parameters to perform the transition
4139 * returns 0 in case of successfully completed transition, negative error
4140 * code in case of failure, positive (EBUSY) value if there is a completion
4141 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4142 * not set in params->ramrod_flags for asynchronous commands).
4145 int bnx2x_queue_state_change(struct bnx2x
*bp
,
4146 struct bnx2x_queue_state_params
*params
)
4148 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4149 int rc
, pending_bit
;
4150 unsigned long *pending
= &o
->pending
;
4152 /* Check that the requested transition is legal */
4153 if (o
->check_transition(bp
, o
, params
))
4156 /* Set "pending" bit */
4157 pending_bit
= o
->set_pending(o
, params
);
4159 /* Don't send a command if only driver cleanup was requested */
4160 if (test_bit(RAMROD_DRV_CLR_ONLY
, ¶ms
->ramrod_flags
))
4161 o
->complete_cmd(bp
, o
, pending_bit
);
4164 rc
= o
->send_cmd(bp
, params
);
4166 o
->next_state
= BNX2X_Q_STATE_MAX
;
4167 clear_bit(pending_bit
, pending
);
4168 smp_mb__after_clear_bit();
4172 if (test_bit(RAMROD_COMP_WAIT
, ¶ms
->ramrod_flags
)) {
4173 rc
= o
->wait_comp(bp
, o
, pending_bit
);
4181 return !!test_bit(pending_bit
, pending
);
4185 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj
*obj
,
4186 struct bnx2x_queue_state_params
*params
)
4188 enum bnx2x_queue_cmd cmd
= params
->cmd
, bit
;
4190 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4193 if ((cmd
== BNX2X_Q_CMD_ACTIVATE
) ||
4194 (cmd
== BNX2X_Q_CMD_DEACTIVATE
))
4195 bit
= BNX2X_Q_CMD_UPDATE
;
4199 set_bit(bit
, &obj
->pending
);
4203 static int bnx2x_queue_wait_comp(struct bnx2x
*bp
,
4204 struct bnx2x_queue_sp_obj
*o
,
4205 enum bnx2x_queue_cmd cmd
)
4207 return bnx2x_state_wait(bp
, cmd
, &o
->pending
);
4211 * bnx2x_queue_comp_cmd - complete the state change command.
4213 * @bp: device handle
4217 * Checks that the arrived completion is expected.
4219 static int bnx2x_queue_comp_cmd(struct bnx2x
*bp
,
4220 struct bnx2x_queue_sp_obj
*o
,
4221 enum bnx2x_queue_cmd cmd
)
4223 unsigned long cur_pending
= o
->pending
;
4225 if (!test_and_clear_bit(cmd
, &cur_pending
)) {
4226 BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
4227 "pending 0x%lx, next_state %d\n", cmd
,
4228 o
->cids
[BNX2X_PRIMARY_CID_INDEX
],
4229 o
->state
, cur_pending
, o
->next_state
);
4233 if (o
->next_tx_only
>= o
->max_cos
)
4234 /* >= becuase tx only must always be smaller than cos since the
4235 * primary connection suports COS 0
4237 BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4238 o
->next_tx_only
, o
->max_cos
);
4240 DP(BNX2X_MSG_SP
, "Completing command %d for queue %d, "
4241 "setting state to %d\n", cmd
,
4242 o
->cids
[BNX2X_PRIMARY_CID_INDEX
], o
->next_state
);
4244 if (o
->next_tx_only
) /* print num tx-only if any exist */
4245 DP(BNX2X_MSG_SP
, "primary cid %d: num tx-only cons %d\n",
4246 o
->cids
[BNX2X_PRIMARY_CID_INDEX
], o
->next_tx_only
);
4248 o
->state
= o
->next_state
;
4249 o
->num_tx_only
= o
->next_tx_only
;
4250 o
->next_state
= BNX2X_Q_STATE_MAX
;
4252 /* It's important that o->state and o->next_state are
4253 * updated before o->pending.
4257 clear_bit(cmd
, &o
->pending
);
4258 smp_mb__after_clear_bit();
4263 static void bnx2x_q_fill_setup_data_e2(struct bnx2x
*bp
,
4264 struct bnx2x_queue_state_params
*cmd_params
,
4265 struct client_init_ramrod_data
*data
)
4267 struct bnx2x_queue_setup_params
*params
= &cmd_params
->params
.setup
;
4271 /* IPv6 TPA supported for E2 and above only */
4272 data
->rx
.tpa_en
|= test_bit(BNX2X_Q_FLG_TPA_IPV6
, ¶ms
->flags
) *
4273 CLIENT_INIT_RX_DATA_TPA_EN_IPV6
;
4276 static void bnx2x_q_fill_init_general_data(struct bnx2x
*bp
,
4277 struct bnx2x_queue_sp_obj
*o
,
4278 struct bnx2x_general_setup_params
*params
,
4279 struct client_init_general_data
*gen_data
,
4280 unsigned long *flags
)
4282 gen_data
->client_id
= o
->cl_id
;
4284 if (test_bit(BNX2X_Q_FLG_STATS
, flags
)) {
4285 gen_data
->statistics_counter_id
=
4287 gen_data
->statistics_en_flg
= 1;
4288 gen_data
->statistics_zero_flg
=
4289 test_bit(BNX2X_Q_FLG_ZERO_STATS
, flags
);
4291 gen_data
->statistics_counter_id
=
4292 DISABLE_STATISTIC_COUNTER_ID_VALUE
;
4294 gen_data
->is_fcoe_flg
= test_bit(BNX2X_Q_FLG_FCOE
, flags
);
4295 gen_data
->activate_flg
= test_bit(BNX2X_Q_FLG_ACTIVE
, flags
);
4296 gen_data
->sp_client_id
= params
->spcl_id
;
4297 gen_data
->mtu
= cpu_to_le16(params
->mtu
);
4298 gen_data
->func_id
= o
->func_id
;
4301 gen_data
->cos
= params
->cos
;
4303 gen_data
->traffic_type
=
4304 test_bit(BNX2X_Q_FLG_FCOE
, flags
) ?
4305 LLFC_TRAFFIC_TYPE_FCOE
: LLFC_TRAFFIC_TYPE_NW
;
4307 DP(BNX2X_MSG_SP
, "flags: active %d, cos %d, stats en %d\n",
4308 gen_data
->activate_flg
, gen_data
->cos
, gen_data
->statistics_en_flg
);
4311 static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj
*o
,
4312 struct bnx2x_txq_setup_params
*params
,
4313 struct client_init_tx_data
*tx_data
,
4314 unsigned long *flags
)
4316 tx_data
->enforce_security_flg
=
4317 test_bit(BNX2X_Q_FLG_TX_SEC
, flags
);
4318 tx_data
->default_vlan
=
4319 cpu_to_le16(params
->default_vlan
);
4320 tx_data
->default_vlan_flg
=
4321 test_bit(BNX2X_Q_FLG_DEF_VLAN
, flags
);
4322 tx_data
->tx_switching_flg
=
4323 test_bit(BNX2X_Q_FLG_TX_SWITCH
, flags
);
4324 tx_data
->anti_spoofing_flg
=
4325 test_bit(BNX2X_Q_FLG_ANTI_SPOOF
, flags
);
4326 tx_data
->tx_status_block_id
= params
->fw_sb_id
;
4327 tx_data
->tx_sb_index_number
= params
->sb_cq_index
;
4328 tx_data
->tss_leading_client_id
= params
->tss_leading_cl_id
;
4330 tx_data
->tx_bd_page_base
.lo
=
4331 cpu_to_le32(U64_LO(params
->dscr_map
));
4332 tx_data
->tx_bd_page_base
.hi
=
4333 cpu_to_le32(U64_HI(params
->dscr_map
));
4335 /* Don't configure any Tx switching mode during queue SETUP */
4339 static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj
*o
,
4340 struct rxq_pause_params
*params
,
4341 struct client_init_rx_data
*rx_data
)
4343 /* flow control data */
4344 rx_data
->cqe_pause_thr_low
= cpu_to_le16(params
->rcq_th_lo
);
4345 rx_data
->cqe_pause_thr_high
= cpu_to_le16(params
->rcq_th_hi
);
4346 rx_data
->bd_pause_thr_low
= cpu_to_le16(params
->bd_th_lo
);
4347 rx_data
->bd_pause_thr_high
= cpu_to_le16(params
->bd_th_hi
);
4348 rx_data
->sge_pause_thr_low
= cpu_to_le16(params
->sge_th_lo
);
4349 rx_data
->sge_pause_thr_high
= cpu_to_le16(params
->sge_th_hi
);
4350 rx_data
->rx_cos_mask
= cpu_to_le16(params
->pri_map
);
4353 static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj
*o
,
4354 struct bnx2x_rxq_setup_params
*params
,
4355 struct client_init_rx_data
*rx_data
,
4356 unsigned long *flags
)
4359 rx_data
->tpa_en
= test_bit(BNX2X_Q_FLG_TPA
, flags
) *
4360 CLIENT_INIT_RX_DATA_TPA_EN_IPV4
;
4361 rx_data
->vmqueue_mode_en_flg
= 0;
4363 rx_data
->cache_line_alignment_log_size
=
4364 params
->cache_line_log
;
4365 rx_data
->enable_dynamic_hc
=
4366 test_bit(BNX2X_Q_FLG_DHC
, flags
);
4367 rx_data
->max_sges_for_packet
= params
->max_sges_pkt
;
4368 rx_data
->client_qzone_id
= params
->cl_qzone_id
;
4369 rx_data
->max_agg_size
= cpu_to_le16(params
->tpa_agg_sz
);
4371 /* Always start in DROP_ALL mode */
4372 rx_data
->state
= cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL
|
4373 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL
);
4375 /* We don't set drop flags */
4376 rx_data
->drop_ip_cs_err_flg
= 0;
4377 rx_data
->drop_tcp_cs_err_flg
= 0;
4378 rx_data
->drop_ttl0_flg
= 0;
4379 rx_data
->drop_udp_cs_err_flg
= 0;
4380 rx_data
->inner_vlan_removal_enable_flg
=
4381 test_bit(BNX2X_Q_FLG_VLAN
, flags
);
4382 rx_data
->outer_vlan_removal_enable_flg
=
4383 test_bit(BNX2X_Q_FLG_OV
, flags
);
4384 rx_data
->status_block_id
= params
->fw_sb_id
;
4385 rx_data
->rx_sb_index_number
= params
->sb_cq_index
;
4386 rx_data
->max_tpa_queues
= params
->max_tpa_queues
;
4387 rx_data
->max_bytes_on_bd
= cpu_to_le16(params
->buf_sz
);
4388 rx_data
->sge_buff_size
= cpu_to_le16(params
->sge_buf_sz
);
4389 rx_data
->bd_page_base
.lo
=
4390 cpu_to_le32(U64_LO(params
->dscr_map
));
4391 rx_data
->bd_page_base
.hi
=
4392 cpu_to_le32(U64_HI(params
->dscr_map
));
4393 rx_data
->sge_page_base
.lo
=
4394 cpu_to_le32(U64_LO(params
->sge_map
));
4395 rx_data
->sge_page_base
.hi
=
4396 cpu_to_le32(U64_HI(params
->sge_map
));
4397 rx_data
->cqe_page_base
.lo
=
4398 cpu_to_le32(U64_LO(params
->rcq_map
));
4399 rx_data
->cqe_page_base
.hi
=
4400 cpu_to_le32(U64_HI(params
->rcq_map
));
4401 rx_data
->is_leading_rss
= test_bit(BNX2X_Q_FLG_LEADING_RSS
, flags
);
4403 if (test_bit(BNX2X_Q_FLG_MCAST
, flags
)) {
4404 rx_data
->approx_mcast_engine_id
= o
->func_id
;
4405 rx_data
->is_approx_mcast
= 1;
4408 rx_data
->rss_engine_id
= params
->rss_engine_id
;
4410 /* silent vlan removal */
4411 rx_data
->silent_vlan_removal_flg
=
4412 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM
, flags
);
4413 rx_data
->silent_vlan_value
=
4414 cpu_to_le16(params
->silent_removal_value
);
4415 rx_data
->silent_vlan_mask
=
4416 cpu_to_le16(params
->silent_removal_mask
);
4420 /* initialize the general, tx and rx parts of a queue object */
4421 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x
*bp
,
4422 struct bnx2x_queue_state_params
*cmd_params
,
4423 struct client_init_ramrod_data
*data
)
4425 bnx2x_q_fill_init_general_data(bp
, cmd_params
->q_obj
,
4426 &cmd_params
->params
.setup
.gen_params
,
4428 &cmd_params
->params
.setup
.flags
);
4430 bnx2x_q_fill_init_tx_data(cmd_params
->q_obj
,
4431 &cmd_params
->params
.setup
.txq_params
,
4433 &cmd_params
->params
.setup
.flags
);
4435 bnx2x_q_fill_init_rx_data(cmd_params
->q_obj
,
4436 &cmd_params
->params
.setup
.rxq_params
,
4438 &cmd_params
->params
.setup
.flags
);
4440 bnx2x_q_fill_init_pause_data(cmd_params
->q_obj
,
4441 &cmd_params
->params
.setup
.pause_params
,
4445 /* initialize the general and tx parts of a tx-only queue object */
4446 static void bnx2x_q_fill_setup_tx_only(struct bnx2x
*bp
,
4447 struct bnx2x_queue_state_params
*cmd_params
,
4448 struct tx_queue_init_ramrod_data
*data
)
4450 bnx2x_q_fill_init_general_data(bp
, cmd_params
->q_obj
,
4451 &cmd_params
->params
.tx_only
.gen_params
,
4453 &cmd_params
->params
.tx_only
.flags
);
4455 bnx2x_q_fill_init_tx_data(cmd_params
->q_obj
,
4456 &cmd_params
->params
.tx_only
.txq_params
,
4458 &cmd_params
->params
.tx_only
.flags
);
4460 DP(BNX2X_MSG_SP
, "cid %d, tx bd page lo %x hi %x\n",cmd_params
->q_obj
->cids
[0],
4461 data
->tx
.tx_bd_page_base
.lo
, data
->tx
.tx_bd_page_base
.hi
);
4465 * bnx2x_q_init - init HW/FW queue
4467 * @bp: device handle
4470 * HW/FW initial Queue configuration:
4472 * - CDU context validation
4475 static inline int bnx2x_q_init(struct bnx2x
*bp
,
4476 struct bnx2x_queue_state_params
*params
)
4478 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4479 struct bnx2x_queue_init_params
*init
= ¶ms
->params
.init
;
4483 /* Tx HC configuration */
4484 if (test_bit(BNX2X_Q_TYPE_HAS_TX
, &o
->type
) &&
4485 test_bit(BNX2X_Q_FLG_HC
, &init
->tx
.flags
)) {
4486 hc_usec
= init
->tx
.hc_rate
? 1000000 / init
->tx
.hc_rate
: 0;
4488 bnx2x_update_coalesce_sb_index(bp
, init
->tx
.fw_sb_id
,
4489 init
->tx
.sb_cq_index
,
4490 !test_bit(BNX2X_Q_FLG_HC_EN
, &init
->tx
.flags
),
4494 /* Rx HC configuration */
4495 if (test_bit(BNX2X_Q_TYPE_HAS_RX
, &o
->type
) &&
4496 test_bit(BNX2X_Q_FLG_HC
, &init
->rx
.flags
)) {
4497 hc_usec
= init
->rx
.hc_rate
? 1000000 / init
->rx
.hc_rate
: 0;
4499 bnx2x_update_coalesce_sb_index(bp
, init
->rx
.fw_sb_id
,
4500 init
->rx
.sb_cq_index
,
4501 !test_bit(BNX2X_Q_FLG_HC_EN
, &init
->rx
.flags
),
4505 /* Set CDU context validation values */
4506 for (cos
= 0; cos
< o
->max_cos
; cos
++) {
4507 DP(BNX2X_MSG_SP
, "setting context validation. cid %d, cos %d\n",
4509 DP(BNX2X_MSG_SP
, "context pointer %p\n", init
->cxts
[cos
]);
4510 bnx2x_set_ctx_validation(bp
, init
->cxts
[cos
], o
->cids
[cos
]);
4513 /* As no ramrod is sent, complete the command immediately */
4514 o
->complete_cmd(bp
, o
, BNX2X_Q_CMD_INIT
);
4522 static inline int bnx2x_q_send_setup_e1x(struct bnx2x
*bp
,
4523 struct bnx2x_queue_state_params
*params
)
4525 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4526 struct client_init_ramrod_data
*rdata
=
4527 (struct client_init_ramrod_data
*)o
->rdata
;
4528 dma_addr_t data_mapping
= o
->rdata_mapping
;
4529 int ramrod
= RAMROD_CMD_ID_ETH_CLIENT_SETUP
;
4531 /* Clear the ramrod data */
4532 memset(rdata
, 0, sizeof(*rdata
));
4534 /* Fill the ramrod data */
4535 bnx2x_q_fill_setup_data_cmn(bp
, params
, rdata
);
4538 * No need for an explicit memory barrier here as long we would
4539 * need to ensure the ordering of writing to the SPQ element
4540 * and updating of the SPQ producer which involves a memory
4541 * read and we will have to put a full memory barrier there
4542 * (inside bnx2x_sp_post()).
4545 return bnx2x_sp_post(bp
, ramrod
, o
->cids
[BNX2X_PRIMARY_CID_INDEX
],
4546 U64_HI(data_mapping
),
4547 U64_LO(data_mapping
), ETH_CONNECTION_TYPE
);
4550 static inline int bnx2x_q_send_setup_e2(struct bnx2x
*bp
,
4551 struct bnx2x_queue_state_params
*params
)
4553 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4554 struct client_init_ramrod_data
*rdata
=
4555 (struct client_init_ramrod_data
*)o
->rdata
;
4556 dma_addr_t data_mapping
= o
->rdata_mapping
;
4557 int ramrod
= RAMROD_CMD_ID_ETH_CLIENT_SETUP
;
4559 /* Clear the ramrod data */
4560 memset(rdata
, 0, sizeof(*rdata
));
4562 /* Fill the ramrod data */
4563 bnx2x_q_fill_setup_data_cmn(bp
, params
, rdata
);
4564 bnx2x_q_fill_setup_data_e2(bp
, params
, rdata
);
4567 * No need for an explicit memory barrier here as long we would
4568 * need to ensure the ordering of writing to the SPQ element
4569 * and updating of the SPQ producer which involves a memory
4570 * read and we will have to put a full memory barrier there
4571 * (inside bnx2x_sp_post()).
4574 return bnx2x_sp_post(bp
, ramrod
, o
->cids
[BNX2X_PRIMARY_CID_INDEX
],
4575 U64_HI(data_mapping
),
4576 U64_LO(data_mapping
), ETH_CONNECTION_TYPE
);
4579 static inline int bnx2x_q_send_setup_tx_only(struct bnx2x
*bp
,
4580 struct bnx2x_queue_state_params
*params
)
4582 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4583 struct tx_queue_init_ramrod_data
*rdata
=
4584 (struct tx_queue_init_ramrod_data
*)o
->rdata
;
4585 dma_addr_t data_mapping
= o
->rdata_mapping
;
4586 int ramrod
= RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP
;
4587 struct bnx2x_queue_setup_tx_only_params
*tx_only_params
=
4588 ¶ms
->params
.tx_only
;
4589 u8 cid_index
= tx_only_params
->cid_index
;
4592 if (cid_index
>= o
->max_cos
) {
4593 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4594 o
->cl_id
, cid_index
);
4598 DP(BNX2X_MSG_SP
, "parameters received: cos: %d sp-id: %d\n",
4599 tx_only_params
->gen_params
.cos
,
4600 tx_only_params
->gen_params
.spcl_id
);
4602 /* Clear the ramrod data */
4603 memset(rdata
, 0, sizeof(*rdata
));
4605 /* Fill the ramrod data */
4606 bnx2x_q_fill_setup_tx_only(bp
, params
, rdata
);
4608 DP(BNX2X_MSG_SP
, "sending tx-only ramrod: cid %d, client-id %d,"
4609 "sp-client id %d, cos %d\n",
4611 rdata
->general
.client_id
,
4612 rdata
->general
.sp_client_id
, rdata
->general
.cos
);
4615 * No need for an explicit memory barrier here as long we would
4616 * need to ensure the ordering of writing to the SPQ element
4617 * and updating of the SPQ producer which involves a memory
4618 * read and we will have to put a full memory barrier there
4619 * (inside bnx2x_sp_post()).
4622 return bnx2x_sp_post(bp
, ramrod
, o
->cids
[cid_index
],
4623 U64_HI(data_mapping
),
4624 U64_LO(data_mapping
), ETH_CONNECTION_TYPE
);
4627 static void bnx2x_q_fill_update_data(struct bnx2x
*bp
,
4628 struct bnx2x_queue_sp_obj
*obj
,
4629 struct bnx2x_queue_update_params
*params
,
4630 struct client_update_ramrod_data
*data
)
4632 /* Client ID of the client to update */
4633 data
->client_id
= obj
->cl_id
;
4635 /* Function ID of the client to update */
4636 data
->func_id
= obj
->func_id
;
4638 /* Default VLAN value */
4639 data
->default_vlan
= cpu_to_le16(params
->def_vlan
);
4641 /* Inner VLAN stripping */
4642 data
->inner_vlan_removal_enable_flg
=
4643 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM
, ¶ms
->update_flags
);
4644 data
->inner_vlan_removal_change_flg
=
4645 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG
,
4646 ¶ms
->update_flags
);
4648 /* Outer VLAN sripping */
4649 data
->outer_vlan_removal_enable_flg
=
4650 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM
, ¶ms
->update_flags
);
4651 data
->outer_vlan_removal_change_flg
=
4652 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG
,
4653 ¶ms
->update_flags
);
4655 /* Drop packets that have source MAC that doesn't belong to this
4658 data
->anti_spoofing_enable_flg
=
4659 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF
, ¶ms
->update_flags
);
4660 data
->anti_spoofing_change_flg
=
4661 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG
, ¶ms
->update_flags
);
4663 /* Activate/Deactivate */
4664 data
->activate_flg
=
4665 test_bit(BNX2X_Q_UPDATE_ACTIVATE
, ¶ms
->update_flags
);
4666 data
->activate_change_flg
=
4667 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG
, ¶ms
->update_flags
);
4669 /* Enable default VLAN */
4670 data
->default_vlan_enable_flg
=
4671 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN
, ¶ms
->update_flags
);
4672 data
->default_vlan_change_flg
=
4673 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG
,
4674 ¶ms
->update_flags
);
4676 /* silent vlan removal */
4677 data
->silent_vlan_change_flg
=
4678 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG
,
4679 ¶ms
->update_flags
);
4680 data
->silent_vlan_removal_flg
=
4681 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM
, ¶ms
->update_flags
);
4682 data
->silent_vlan_value
= cpu_to_le16(params
->silent_removal_value
);
4683 data
->silent_vlan_mask
= cpu_to_le16(params
->silent_removal_mask
);
4686 static inline int bnx2x_q_send_update(struct bnx2x
*bp
,
4687 struct bnx2x_queue_state_params
*params
)
4689 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4690 struct client_update_ramrod_data
*rdata
=
4691 (struct client_update_ramrod_data
*)o
->rdata
;
4692 dma_addr_t data_mapping
= o
->rdata_mapping
;
4693 struct bnx2x_queue_update_params
*update_params
=
4694 ¶ms
->params
.update
;
4695 u8 cid_index
= update_params
->cid_index
;
4697 if (cid_index
>= o
->max_cos
) {
4698 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4699 o
->cl_id
, cid_index
);
4704 /* Clear the ramrod data */
4705 memset(rdata
, 0, sizeof(*rdata
));
4707 /* Fill the ramrod data */
4708 bnx2x_q_fill_update_data(bp
, o
, update_params
, rdata
);
4711 * No need for an explicit memory barrier here as long we would
4712 * need to ensure the ordering of writing to the SPQ element
4713 * and updating of the SPQ producer which involves a memory
4714 * read and we will have to put a full memory barrier there
4715 * (inside bnx2x_sp_post()).
4718 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_CLIENT_UPDATE
,
4719 o
->cids
[cid_index
], U64_HI(data_mapping
),
4720 U64_LO(data_mapping
), ETH_CONNECTION_TYPE
);
4724 * bnx2x_q_send_deactivate - send DEACTIVATE command
4726 * @bp: device handle
4729 * implemented using the UPDATE command.
4731 static inline int bnx2x_q_send_deactivate(struct bnx2x
*bp
,
4732 struct bnx2x_queue_state_params
*params
)
4734 struct bnx2x_queue_update_params
*update
= ¶ms
->params
.update
;
4736 memset(update
, 0, sizeof(*update
));
4738 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG
, &update
->update_flags
);
4740 return bnx2x_q_send_update(bp
, params
);
4744 * bnx2x_q_send_activate - send ACTIVATE command
4746 * @bp: device handle
4749 * implemented using the UPDATE command.
4751 static inline int bnx2x_q_send_activate(struct bnx2x
*bp
,
4752 struct bnx2x_queue_state_params
*params
)
4754 struct bnx2x_queue_update_params
*update
= ¶ms
->params
.update
;
4756 memset(update
, 0, sizeof(*update
));
4758 __set_bit(BNX2X_Q_UPDATE_ACTIVATE
, &update
->update_flags
);
4759 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG
, &update
->update_flags
);
4761 return bnx2x_q_send_update(bp
, params
);
4764 static inline int bnx2x_q_send_update_tpa(struct bnx2x
*bp
,
4765 struct bnx2x_queue_state_params
*params
)
4767 /* TODO: Not implemented yet. */
4771 static inline int bnx2x_q_send_halt(struct bnx2x
*bp
,
4772 struct bnx2x_queue_state_params
*params
)
4774 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4776 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_HALT
,
4777 o
->cids
[BNX2X_PRIMARY_CID_INDEX
], 0, o
->cl_id
,
4778 ETH_CONNECTION_TYPE
);
4781 static inline int bnx2x_q_send_cfc_del(struct bnx2x
*bp
,
4782 struct bnx2x_queue_state_params
*params
)
4784 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4785 u8 cid_idx
= params
->params
.cfc_del
.cid_index
;
4787 if (cid_idx
>= o
->max_cos
) {
4788 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4793 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_CFC_DEL
,
4794 o
->cids
[cid_idx
], 0, 0, NONE_CONNECTION_TYPE
);
4797 static inline int bnx2x_q_send_terminate(struct bnx2x
*bp
,
4798 struct bnx2x_queue_state_params
*params
)
4800 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4801 u8 cid_index
= params
->params
.terminate
.cid_index
;
4803 if (cid_index
>= o
->max_cos
) {
4804 BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4805 o
->cl_id
, cid_index
);
4809 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_TERMINATE
,
4810 o
->cids
[cid_index
], 0, 0, ETH_CONNECTION_TYPE
);
4813 static inline int bnx2x_q_send_empty(struct bnx2x
*bp
,
4814 struct bnx2x_queue_state_params
*params
)
4816 struct bnx2x_queue_sp_obj
*o
= params
->q_obj
;
4818 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_ETH_EMPTY
,
4819 o
->cids
[BNX2X_PRIMARY_CID_INDEX
], 0, 0,
4820 ETH_CONNECTION_TYPE
);
4823 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x
*bp
,
4824 struct bnx2x_queue_state_params
*params
)
4826 switch (params
->cmd
) {
4827 case BNX2X_Q_CMD_INIT
:
4828 return bnx2x_q_init(bp
, params
);
4829 case BNX2X_Q_CMD_SETUP_TX_ONLY
:
4830 return bnx2x_q_send_setup_tx_only(bp
, params
);
4831 case BNX2X_Q_CMD_DEACTIVATE
:
4832 return bnx2x_q_send_deactivate(bp
, params
);
4833 case BNX2X_Q_CMD_ACTIVATE
:
4834 return bnx2x_q_send_activate(bp
, params
);
4835 case BNX2X_Q_CMD_UPDATE
:
4836 return bnx2x_q_send_update(bp
, params
);
4837 case BNX2X_Q_CMD_UPDATE_TPA
:
4838 return bnx2x_q_send_update_tpa(bp
, params
);
4839 case BNX2X_Q_CMD_HALT
:
4840 return bnx2x_q_send_halt(bp
, params
);
4841 case BNX2X_Q_CMD_CFC_DEL
:
4842 return bnx2x_q_send_cfc_del(bp
, params
);
4843 case BNX2X_Q_CMD_TERMINATE
:
4844 return bnx2x_q_send_terminate(bp
, params
);
4845 case BNX2X_Q_CMD_EMPTY
:
4846 return bnx2x_q_send_empty(bp
, params
);
4848 BNX2X_ERR("Unknown command: %d\n", params
->cmd
);
4853 static int bnx2x_queue_send_cmd_e1x(struct bnx2x
*bp
,
4854 struct bnx2x_queue_state_params
*params
)
4856 switch (params
->cmd
) {
4857 case BNX2X_Q_CMD_SETUP
:
4858 return bnx2x_q_send_setup_e1x(bp
, params
);
4859 case BNX2X_Q_CMD_INIT
:
4860 case BNX2X_Q_CMD_SETUP_TX_ONLY
:
4861 case BNX2X_Q_CMD_DEACTIVATE
:
4862 case BNX2X_Q_CMD_ACTIVATE
:
4863 case BNX2X_Q_CMD_UPDATE
:
4864 case BNX2X_Q_CMD_UPDATE_TPA
:
4865 case BNX2X_Q_CMD_HALT
:
4866 case BNX2X_Q_CMD_CFC_DEL
:
4867 case BNX2X_Q_CMD_TERMINATE
:
4868 case BNX2X_Q_CMD_EMPTY
:
4869 return bnx2x_queue_send_cmd_cmn(bp
, params
);
4871 BNX2X_ERR("Unknown command: %d\n", params
->cmd
);
4876 static int bnx2x_queue_send_cmd_e2(struct bnx2x
*bp
,
4877 struct bnx2x_queue_state_params
*params
)
4879 switch (params
->cmd
) {
4880 case BNX2X_Q_CMD_SETUP
:
4881 return bnx2x_q_send_setup_e2(bp
, params
);
4882 case BNX2X_Q_CMD_INIT
:
4883 case BNX2X_Q_CMD_SETUP_TX_ONLY
:
4884 case BNX2X_Q_CMD_DEACTIVATE
:
4885 case BNX2X_Q_CMD_ACTIVATE
:
4886 case BNX2X_Q_CMD_UPDATE
:
4887 case BNX2X_Q_CMD_UPDATE_TPA
:
4888 case BNX2X_Q_CMD_HALT
:
4889 case BNX2X_Q_CMD_CFC_DEL
:
4890 case BNX2X_Q_CMD_TERMINATE
:
4891 case BNX2X_Q_CMD_EMPTY
:
4892 return bnx2x_queue_send_cmd_cmn(bp
, params
);
4894 BNX2X_ERR("Unknown command: %d\n", params
->cmd
);
4900 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4902 * @bp: device handle
4907 * It both checks if the requested command is legal in a current
4908 * state and, if it's legal, sets a `next_state' in the object
4909 * that will be used in the completion flow to set the `state'
4912 * returns 0 if a requested command is a legal transition,
4913 * -EINVAL otherwise.
4915 static int bnx2x_queue_chk_transition(struct bnx2x
*bp
,
4916 struct bnx2x_queue_sp_obj
*o
,
4917 struct bnx2x_queue_state_params
*params
)
4919 enum bnx2x_q_state state
= o
->state
, next_state
= BNX2X_Q_STATE_MAX
;
4920 enum bnx2x_queue_cmd cmd
= params
->cmd
;
4921 struct bnx2x_queue_update_params
*update_params
=
4922 ¶ms
->params
.update
;
4923 u8 next_tx_only
= o
->num_tx_only
;
4926 * Forget all pending for completion commands if a driver only state
4927 * transition has been requested.
4929 if (test_bit(RAMROD_DRV_CLR_ONLY
, ¶ms
->ramrod_flags
)) {
4931 o
->next_state
= BNX2X_Q_STATE_MAX
;
4935 * Don't allow a next state transition if we are in the middle of
4942 case BNX2X_Q_STATE_RESET
:
4943 if (cmd
== BNX2X_Q_CMD_INIT
)
4944 next_state
= BNX2X_Q_STATE_INITIALIZED
;
4947 case BNX2X_Q_STATE_INITIALIZED
:
4948 if (cmd
== BNX2X_Q_CMD_SETUP
) {
4949 if (test_bit(BNX2X_Q_FLG_ACTIVE
,
4950 ¶ms
->params
.setup
.flags
))
4951 next_state
= BNX2X_Q_STATE_ACTIVE
;
4953 next_state
= BNX2X_Q_STATE_INACTIVE
;
4957 case BNX2X_Q_STATE_ACTIVE
:
4958 if (cmd
== BNX2X_Q_CMD_DEACTIVATE
)
4959 next_state
= BNX2X_Q_STATE_INACTIVE
;
4961 else if ((cmd
== BNX2X_Q_CMD_EMPTY
) ||
4962 (cmd
== BNX2X_Q_CMD_UPDATE_TPA
))
4963 next_state
= BNX2X_Q_STATE_ACTIVE
;
4965 else if (cmd
== BNX2X_Q_CMD_SETUP_TX_ONLY
) {
4966 next_state
= BNX2X_Q_STATE_MULTI_COS
;
4970 else if (cmd
== BNX2X_Q_CMD_HALT
)
4971 next_state
= BNX2X_Q_STATE_STOPPED
;
4973 else if (cmd
== BNX2X_Q_CMD_UPDATE
) {
4974 /* If "active" state change is requested, update the
4975 * state accordingly.
4977 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG
,
4978 &update_params
->update_flags
) &&
4979 !test_bit(BNX2X_Q_UPDATE_ACTIVATE
,
4980 &update_params
->update_flags
))
4981 next_state
= BNX2X_Q_STATE_INACTIVE
;
4983 next_state
= BNX2X_Q_STATE_ACTIVE
;
4987 case BNX2X_Q_STATE_MULTI_COS
:
4988 if (cmd
== BNX2X_Q_CMD_TERMINATE
)
4989 next_state
= BNX2X_Q_STATE_MCOS_TERMINATED
;
4991 else if (cmd
== BNX2X_Q_CMD_SETUP_TX_ONLY
) {
4992 next_state
= BNX2X_Q_STATE_MULTI_COS
;
4993 next_tx_only
= o
->num_tx_only
+ 1;
4996 else if ((cmd
== BNX2X_Q_CMD_EMPTY
) ||
4997 (cmd
== BNX2X_Q_CMD_UPDATE_TPA
))
4998 next_state
= BNX2X_Q_STATE_MULTI_COS
;
5000 else if (cmd
== BNX2X_Q_CMD_UPDATE
) {
5001 /* If "active" state change is requested, update the
5002 * state accordingly.
5004 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG
,
5005 &update_params
->update_flags
) &&
5006 !test_bit(BNX2X_Q_UPDATE_ACTIVATE
,
5007 &update_params
->update_flags
))
5008 next_state
= BNX2X_Q_STATE_INACTIVE
;
5010 next_state
= BNX2X_Q_STATE_MULTI_COS
;
5014 case BNX2X_Q_STATE_MCOS_TERMINATED
:
5015 if (cmd
== BNX2X_Q_CMD_CFC_DEL
) {
5016 next_tx_only
= o
->num_tx_only
- 1;
5017 if (next_tx_only
== 0)
5018 next_state
= BNX2X_Q_STATE_ACTIVE
;
5020 next_state
= BNX2X_Q_STATE_MULTI_COS
;
5024 case BNX2X_Q_STATE_INACTIVE
:
5025 if (cmd
== BNX2X_Q_CMD_ACTIVATE
)
5026 next_state
= BNX2X_Q_STATE_ACTIVE
;
5028 else if ((cmd
== BNX2X_Q_CMD_EMPTY
) ||
5029 (cmd
== BNX2X_Q_CMD_UPDATE_TPA
))
5030 next_state
= BNX2X_Q_STATE_INACTIVE
;
5032 else if (cmd
== BNX2X_Q_CMD_HALT
)
5033 next_state
= BNX2X_Q_STATE_STOPPED
;
5035 else if (cmd
== BNX2X_Q_CMD_UPDATE
) {
5036 /* If "active" state change is requested, update the
5037 * state accordingly.
5039 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG
,
5040 &update_params
->update_flags
) &&
5041 test_bit(BNX2X_Q_UPDATE_ACTIVATE
,
5042 &update_params
->update_flags
)){
5043 if (o
->num_tx_only
== 0)
5044 next_state
= BNX2X_Q_STATE_ACTIVE
;
5045 else /* tx only queues exist for this queue */
5046 next_state
= BNX2X_Q_STATE_MULTI_COS
;
5048 next_state
= BNX2X_Q_STATE_INACTIVE
;
5052 case BNX2X_Q_STATE_STOPPED
:
5053 if (cmd
== BNX2X_Q_CMD_TERMINATE
)
5054 next_state
= BNX2X_Q_STATE_TERMINATED
;
5057 case BNX2X_Q_STATE_TERMINATED
:
5058 if (cmd
== BNX2X_Q_CMD_CFC_DEL
)
5059 next_state
= BNX2X_Q_STATE_RESET
;
5063 BNX2X_ERR("Illegal state: %d\n", state
);
5066 /* Transition is assured */
5067 if (next_state
!= BNX2X_Q_STATE_MAX
) {
5068 DP(BNX2X_MSG_SP
, "Good state transition: %d(%d)->%d\n",
5069 state
, cmd
, next_state
);
5070 o
->next_state
= next_state
;
5071 o
->next_tx_only
= next_tx_only
;
5075 DP(BNX2X_MSG_SP
, "Bad state transition request: %d %d\n", state
, cmd
);
5080 void bnx2x_init_queue_obj(struct bnx2x
*bp
,
5081 struct bnx2x_queue_sp_obj
*obj
,
5082 u8 cl_id
, u32
*cids
, u8 cid_cnt
, u8 func_id
,
5084 dma_addr_t rdata_mapping
, unsigned long type
)
5086 memset(obj
, 0, sizeof(*obj
));
5088 /* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5089 BUG_ON(BNX2X_MULTI_TX_COS
< cid_cnt
);
5091 memcpy(obj
->cids
, cids
, sizeof(obj
->cids
[0]) * cid_cnt
);
5092 obj
->max_cos
= cid_cnt
;
5094 obj
->func_id
= func_id
;
5096 obj
->rdata_mapping
= rdata_mapping
;
5098 obj
->next_state
= BNX2X_Q_STATE_MAX
;
5100 if (CHIP_IS_E1x(bp
))
5101 obj
->send_cmd
= bnx2x_queue_send_cmd_e1x
;
5103 obj
->send_cmd
= bnx2x_queue_send_cmd_e2
;
5105 obj
->check_transition
= bnx2x_queue_chk_transition
;
5107 obj
->complete_cmd
= bnx2x_queue_comp_cmd
;
5108 obj
->wait_comp
= bnx2x_queue_wait_comp
;
5109 obj
->set_pending
= bnx2x_queue_set_pending
;
5112 void bnx2x_queue_set_cos_cid(struct bnx2x
*bp
,
5113 struct bnx2x_queue_sp_obj
*obj
,
5116 obj
->cids
[index
] = cid
;
5119 /********************** Function state object *********************************/
5120 enum bnx2x_func_state
bnx2x_func_get_state(struct bnx2x
*bp
,
5121 struct bnx2x_func_sp_obj
*o
)
5123 /* in the middle of transaction - return INVALID state */
5125 return BNX2X_F_STATE_MAX
;
5128 * unsure the order of reading of o->pending and o->state
5129 * o->pending should be read first
5136 static int bnx2x_func_wait_comp(struct bnx2x
*bp
,
5137 struct bnx2x_func_sp_obj
*o
,
5138 enum bnx2x_func_cmd cmd
)
5140 return bnx2x_state_wait(bp
, cmd
, &o
->pending
);
5144 * bnx2x_func_state_change_comp - complete the state machine transition
5146 * @bp: device handle
5150 * Called on state change transition. Completes the state
5151 * machine transition only - no HW interaction.
5153 static inline int bnx2x_func_state_change_comp(struct bnx2x
*bp
,
5154 struct bnx2x_func_sp_obj
*o
,
5155 enum bnx2x_func_cmd cmd
)
5157 unsigned long cur_pending
= o
->pending
;
5159 if (!test_and_clear_bit(cmd
, &cur_pending
)) {
5160 BNX2X_ERR("Bad MC reply %d for func %d in state %d "
5161 "pending 0x%lx, next_state %d\n", cmd
, BP_FUNC(bp
),
5162 o
->state
, cur_pending
, o
->next_state
);
5167 "Completing command %d for func %d, setting state to %d\n",
5168 cmd
, BP_FUNC(bp
), o
->next_state
);
5170 o
->state
= o
->next_state
;
5171 o
->next_state
= BNX2X_F_STATE_MAX
;
5173 /* It's important that o->state and o->next_state are
5174 * updated before o->pending.
5178 clear_bit(cmd
, &o
->pending
);
5179 smp_mb__after_clear_bit();
5185 * bnx2x_func_comp_cmd - complete the state change command
5187 * @bp: device handle
5191 * Checks that the arrived completion is expected.
5193 static int bnx2x_func_comp_cmd(struct bnx2x
*bp
,
5194 struct bnx2x_func_sp_obj
*o
,
5195 enum bnx2x_func_cmd cmd
)
5197 /* Complete the state machine part first, check if it's a
5200 int rc
= bnx2x_func_state_change_comp(bp
, o
, cmd
);
5205 * bnx2x_func_chk_transition - perform function state machine transition
5207 * @bp: device handle
5211 * It both checks if the requested command is legal in a current
5212 * state and, if it's legal, sets a `next_state' in the object
5213 * that will be used in the completion flow to set the `state'
5216 * returns 0 if a requested command is a legal transition,
5217 * -EINVAL otherwise.
5219 static int bnx2x_func_chk_transition(struct bnx2x
*bp
,
5220 struct bnx2x_func_sp_obj
*o
,
5221 struct bnx2x_func_state_params
*params
)
5223 enum bnx2x_func_state state
= o
->state
, next_state
= BNX2X_F_STATE_MAX
;
5224 enum bnx2x_func_cmd cmd
= params
->cmd
;
5227 * Forget all pending for completion commands if a driver only state
5228 * transition has been requested.
5230 if (test_bit(RAMROD_DRV_CLR_ONLY
, ¶ms
->ramrod_flags
)) {
5232 o
->next_state
= BNX2X_F_STATE_MAX
;
5236 * Don't allow a next state transition if we are in the middle of
5243 case BNX2X_F_STATE_RESET
:
5244 if (cmd
== BNX2X_F_CMD_HW_INIT
)
5245 next_state
= BNX2X_F_STATE_INITIALIZED
;
5248 case BNX2X_F_STATE_INITIALIZED
:
5249 if (cmd
== BNX2X_F_CMD_START
)
5250 next_state
= BNX2X_F_STATE_STARTED
;
5252 else if (cmd
== BNX2X_F_CMD_HW_RESET
)
5253 next_state
= BNX2X_F_STATE_RESET
;
5256 case BNX2X_F_STATE_STARTED
:
5257 if (cmd
== BNX2X_F_CMD_STOP
)
5258 next_state
= BNX2X_F_STATE_INITIALIZED
;
5259 else if (cmd
== BNX2X_F_CMD_TX_STOP
)
5260 next_state
= BNX2X_F_STATE_TX_STOPPED
;
5263 case BNX2X_F_STATE_TX_STOPPED
:
5264 if (cmd
== BNX2X_F_CMD_TX_START
)
5265 next_state
= BNX2X_F_STATE_STARTED
;
5269 BNX2X_ERR("Unknown state: %d\n", state
);
5272 /* Transition is assured */
5273 if (next_state
!= BNX2X_F_STATE_MAX
) {
5274 DP(BNX2X_MSG_SP
, "Good function state transition: %d(%d)->%d\n",
5275 state
, cmd
, next_state
);
5276 o
->next_state
= next_state
;
5280 DP(BNX2X_MSG_SP
, "Bad function state transition request: %d %d\n",
5287 * bnx2x_func_init_func - performs HW init at function stage
5289 * @bp: device handle
5292 * Init HW when the current phase is
5293 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5296 static inline int bnx2x_func_init_func(struct bnx2x
*bp
,
5297 const struct bnx2x_func_sp_drv_ops
*drv
)
5299 return drv
->init_hw_func(bp
);
5303 * bnx2x_func_init_port - performs HW init at port stage
5305 * @bp: device handle
5308 * Init HW when the current phase is
5309 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5310 * FUNCTION-only HW blocks.
5313 static inline int bnx2x_func_init_port(struct bnx2x
*bp
,
5314 const struct bnx2x_func_sp_drv_ops
*drv
)
5316 int rc
= drv
->init_hw_port(bp
);
5320 return bnx2x_func_init_func(bp
, drv
);
5324 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5326 * @bp: device handle
5329 * Init HW when the current phase is
5330 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5331 * PORT-only and FUNCTION-only HW blocks.
5333 static inline int bnx2x_func_init_cmn_chip(struct bnx2x
*bp
,
5334 const struct bnx2x_func_sp_drv_ops
*drv
)
5336 int rc
= drv
->init_hw_cmn_chip(bp
);
5340 return bnx2x_func_init_port(bp
, drv
);
5344 * bnx2x_func_init_cmn - performs HW init at common stage
5346 * @bp: device handle
5349 * Init HW when the current phase is
5350 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5351 * PORT-only and FUNCTION-only HW blocks.
5353 static inline int bnx2x_func_init_cmn(struct bnx2x
*bp
,
5354 const struct bnx2x_func_sp_drv_ops
*drv
)
5356 int rc
= drv
->init_hw_cmn(bp
);
5360 return bnx2x_func_init_port(bp
, drv
);
5363 static int bnx2x_func_hw_init(struct bnx2x
*bp
,
5364 struct bnx2x_func_state_params
*params
)
5366 u32 load_code
= params
->params
.hw_init
.load_phase
;
5367 struct bnx2x_func_sp_obj
*o
= params
->f_obj
;
5368 const struct bnx2x_func_sp_drv_ops
*drv
= o
->drv
;
5371 DP(BNX2X_MSG_SP
, "function %d load_code %x\n",
5372 BP_ABS_FUNC(bp
), load_code
);
5374 /* Prepare buffers for unzipping the FW */
5375 rc
= drv
->gunzip_init(bp
);
5380 rc
= drv
->init_fw(bp
);
5382 BNX2X_ERR("Error loading firmware\n");
5386 /* Handle the beginning of COMMON_XXX pases separatelly... */
5387 switch (load_code
) {
5388 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
:
5389 rc
= bnx2x_func_init_cmn_chip(bp
, drv
);
5394 case FW_MSG_CODE_DRV_LOAD_COMMON
:
5395 rc
= bnx2x_func_init_cmn(bp
, drv
);
5400 case FW_MSG_CODE_DRV_LOAD_PORT
:
5401 rc
= bnx2x_func_init_port(bp
, drv
);
5406 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
5407 rc
= bnx2x_func_init_func(bp
, drv
);
5413 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code
);
5418 drv
->release_fw(bp
);
5421 drv
->gunzip_end(bp
);
5423 /* In case of success, complete the comand immediatelly: no ramrods
5427 o
->complete_cmd(bp
, o
, BNX2X_F_CMD_HW_INIT
);
5433 * bnx2x_func_reset_func - reset HW at function stage
5435 * @bp: device handle
5438 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5439 * FUNCTION-only HW blocks.
5441 static inline void bnx2x_func_reset_func(struct bnx2x
*bp
,
5442 const struct bnx2x_func_sp_drv_ops
*drv
)
5444 drv
->reset_hw_func(bp
);
5448 * bnx2x_func_reset_port - reser HW at port stage
5450 * @bp: device handle
5453 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5454 * FUNCTION-only and PORT-only HW blocks.
5458 * It's important to call reset_port before reset_func() as the last thing
5459 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5460 * makes impossible any DMAE transactions.
5462 static inline void bnx2x_func_reset_port(struct bnx2x
*bp
,
5463 const struct bnx2x_func_sp_drv_ops
*drv
)
5465 drv
->reset_hw_port(bp
);
5466 bnx2x_func_reset_func(bp
, drv
);
5470 * bnx2x_func_reset_cmn - reser HW at common stage
5472 * @bp: device handle
5475 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5476 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5477 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5479 static inline void bnx2x_func_reset_cmn(struct bnx2x
*bp
,
5480 const struct bnx2x_func_sp_drv_ops
*drv
)
5482 bnx2x_func_reset_port(bp
, drv
);
5483 drv
->reset_hw_cmn(bp
);
5487 static inline int bnx2x_func_hw_reset(struct bnx2x
*bp
,
5488 struct bnx2x_func_state_params
*params
)
5490 u32 reset_phase
= params
->params
.hw_reset
.reset_phase
;
5491 struct bnx2x_func_sp_obj
*o
= params
->f_obj
;
5492 const struct bnx2x_func_sp_drv_ops
*drv
= o
->drv
;
5494 DP(BNX2X_MSG_SP
, "function %d reset_phase %x\n", BP_ABS_FUNC(bp
),
5497 switch (reset_phase
) {
5498 case FW_MSG_CODE_DRV_UNLOAD_COMMON
:
5499 bnx2x_func_reset_cmn(bp
, drv
);
5501 case FW_MSG_CODE_DRV_UNLOAD_PORT
:
5502 bnx2x_func_reset_port(bp
, drv
);
5504 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION
:
5505 bnx2x_func_reset_func(bp
, drv
);
5508 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5513 /* Complete the comand immediatelly: no ramrods have been sent. */
5514 o
->complete_cmd(bp
, o
, BNX2X_F_CMD_HW_RESET
);
5519 static inline int bnx2x_func_send_start(struct bnx2x
*bp
,
5520 struct bnx2x_func_state_params
*params
)
5522 struct bnx2x_func_sp_obj
*o
= params
->f_obj
;
5523 struct function_start_data
*rdata
=
5524 (struct function_start_data
*)o
->rdata
;
5525 dma_addr_t data_mapping
= o
->rdata_mapping
;
5526 struct bnx2x_func_start_params
*start_params
= ¶ms
->params
.start
;
5528 memset(rdata
, 0, sizeof(*rdata
));
5530 /* Fill the ramrod data with provided parameters */
5531 rdata
->function_mode
= cpu_to_le16(start_params
->mf_mode
);
5532 rdata
->sd_vlan_tag
= start_params
->sd_vlan_tag
;
5533 rdata
->path_id
= BP_PATH(bp
);
5534 rdata
->network_cos_mode
= start_params
->network_cos_mode
;
5537 * No need for an explicit memory barrier here as long we would
5538 * need to ensure the ordering of writing to the SPQ element
5539 * and updating of the SPQ producer which involves a memory
5540 * read and we will have to put a full memory barrier there
5541 * (inside bnx2x_sp_post()).
5544 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_FUNCTION_START
, 0,
5545 U64_HI(data_mapping
),
5546 U64_LO(data_mapping
), NONE_CONNECTION_TYPE
);
5549 static inline int bnx2x_func_send_stop(struct bnx2x
*bp
,
5550 struct bnx2x_func_state_params
*params
)
5552 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_FUNCTION_STOP
, 0, 0, 0,
5553 NONE_CONNECTION_TYPE
);
5556 static inline int bnx2x_func_send_tx_stop(struct bnx2x
*bp
,
5557 struct bnx2x_func_state_params
*params
)
5559 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC
, 0, 0, 0,
5560 NONE_CONNECTION_TYPE
);
5562 static inline int bnx2x_func_send_tx_start(struct bnx2x
*bp
,
5563 struct bnx2x_func_state_params
*params
)
5565 struct bnx2x_func_sp_obj
*o
= params
->f_obj
;
5566 struct flow_control_configuration
*rdata
=
5567 (struct flow_control_configuration
*)o
->rdata
;
5568 dma_addr_t data_mapping
= o
->rdata_mapping
;
5569 struct bnx2x_func_tx_start_params
*tx_start_params
=
5570 ¶ms
->params
.tx_start
;
5573 memset(rdata
, 0, sizeof(*rdata
));
5575 rdata
->dcb_enabled
= tx_start_params
->dcb_enabled
;
5576 rdata
->dcb_version
= tx_start_params
->dcb_version
;
5577 rdata
->dont_add_pri_0_en
= tx_start_params
->dont_add_pri_0_en
;
5579 for (i
= 0; i
< ARRAY_SIZE(rdata
->traffic_type_to_priority_cos
); i
++)
5580 rdata
->traffic_type_to_priority_cos
[i
] =
5581 tx_start_params
->traffic_type_to_priority_cos
[i
];
5583 return bnx2x_sp_post(bp
, RAMROD_CMD_ID_COMMON_START_TRAFFIC
, 0,
5584 U64_HI(data_mapping
),
5585 U64_LO(data_mapping
), NONE_CONNECTION_TYPE
);
5588 static int bnx2x_func_send_cmd(struct bnx2x
*bp
,
5589 struct bnx2x_func_state_params
*params
)
5591 switch (params
->cmd
) {
5592 case BNX2X_F_CMD_HW_INIT
:
5593 return bnx2x_func_hw_init(bp
, params
);
5594 case BNX2X_F_CMD_START
:
5595 return bnx2x_func_send_start(bp
, params
);
5596 case BNX2X_F_CMD_STOP
:
5597 return bnx2x_func_send_stop(bp
, params
);
5598 case BNX2X_F_CMD_HW_RESET
:
5599 return bnx2x_func_hw_reset(bp
, params
);
5600 case BNX2X_F_CMD_TX_STOP
:
5601 return bnx2x_func_send_tx_stop(bp
, params
);
5602 case BNX2X_F_CMD_TX_START
:
5603 return bnx2x_func_send_tx_start(bp
, params
);
5605 BNX2X_ERR("Unknown command: %d\n", params
->cmd
);
5610 void bnx2x_init_func_obj(struct bnx2x
*bp
,
5611 struct bnx2x_func_sp_obj
*obj
,
5612 void *rdata
, dma_addr_t rdata_mapping
,
5613 struct bnx2x_func_sp_drv_ops
*drv_iface
)
5615 memset(obj
, 0, sizeof(*obj
));
5617 mutex_init(&obj
->one_pending_mutex
);
5620 obj
->rdata_mapping
= rdata_mapping
;
5622 obj
->send_cmd
= bnx2x_func_send_cmd
;
5623 obj
->check_transition
= bnx2x_func_chk_transition
;
5624 obj
->complete_cmd
= bnx2x_func_comp_cmd
;
5625 obj
->wait_comp
= bnx2x_func_wait_comp
;
5627 obj
->drv
= drv_iface
;
5631 * bnx2x_func_state_change - perform Function state change transition
5633 * @bp: device handle
5634 * @params: parameters to perform the transaction
5636 * returns 0 in case of successfully completed transition,
5637 * negative error code in case of failure, positive
5638 * (EBUSY) value if there is a completion to that is
5639 * still pending (possible only if RAMROD_COMP_WAIT is
5640 * not set in params->ramrod_flags for asynchronous
5643 int bnx2x_func_state_change(struct bnx2x
*bp
,
5644 struct bnx2x_func_state_params
*params
)
5646 struct bnx2x_func_sp_obj
*o
= params
->f_obj
;
5648 enum bnx2x_func_cmd cmd
= params
->cmd
;
5649 unsigned long *pending
= &o
->pending
;
5651 mutex_lock(&o
->one_pending_mutex
);
5653 /* Check that the requested transition is legal */
5654 if (o
->check_transition(bp
, o
, params
)) {
5655 mutex_unlock(&o
->one_pending_mutex
);
5659 /* Set "pending" bit */
5660 set_bit(cmd
, pending
);
5662 /* Don't send a command if only driver cleanup was requested */
5663 if (test_bit(RAMROD_DRV_CLR_ONLY
, ¶ms
->ramrod_flags
)) {
5664 bnx2x_func_state_change_comp(bp
, o
, cmd
);
5665 mutex_unlock(&o
->one_pending_mutex
);
5668 rc
= o
->send_cmd(bp
, params
);
5670 mutex_unlock(&o
->one_pending_mutex
);
5673 o
->next_state
= BNX2X_F_STATE_MAX
;
5674 clear_bit(cmd
, pending
);
5675 smp_mb__after_clear_bit();
5679 if (test_bit(RAMROD_COMP_WAIT
, ¶ms
->ramrod_flags
)) {
5680 rc
= o
->wait_comp(bp
, o
, cmd
);
5688 return !!test_bit(cmd
, pending
);