Merge tag 'chrome-platform-for-linus-4.13' of git://git.kernel.org/pub/scm/linux...
[linux/fpc-iii.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_sriov.c
blob9ca994d0bab66eeec49c53275d5257350a5f4f8e
1 /* bnx2x_sriov.c: QLogic Everest network driver.
3 * Copyright 2009-2013 Broadcom Corporation
4 * Copyright 2014 QLogic Corporation
5 * All rights reserved
7 * Unless you and QLogic execute a separate written software license
8 * agreement governing use of this software, this software is licensed to you
9 * under the terms of the GNU General Public License version 2, available
10 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
12 * Notwithstanding the above, under no circumstances may you combine this
13 * software in any way with any other QLogic software provided under a
14 * license other than the GPL, without QLogic's express prior written
15 * consent.
17 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
18 * Written by: Shmulik Ravid
19 * Ariel Elior <ariel.elior@qlogic.com>
22 #include "bnx2x.h"
23 #include "bnx2x_init.h"
24 #include "bnx2x_cmn.h"
25 #include "bnx2x_sp.h"
26 #include <linux/crc32.h>
27 #include <linux/if_vlan.h>
29 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
30 struct bnx2x_virtf **vf,
31 struct pf_vf_bulletin_content **bulletin,
32 bool test_queue);
34 /* General service functions */
35 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
36 u16 pf_id)
38 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
39 pf_id);
40 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
41 pf_id);
42 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
43 pf_id);
44 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
45 pf_id);
48 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
49 u8 enable)
51 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
52 enable);
53 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
54 enable);
55 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
56 enable);
57 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
58 enable);
61 int bnx2x_vf_idx_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
63 int idx;
65 for_each_vf(bp, idx)
66 if (bnx2x_vf(bp, idx, abs_vfid) == abs_vfid)
67 break;
68 return idx;
71 static
72 struct bnx2x_virtf *bnx2x_vf_by_abs_fid(struct bnx2x *bp, u16 abs_vfid)
74 u16 idx = (u16)bnx2x_vf_idx_by_abs_fid(bp, abs_vfid);
75 return (idx < BNX2X_NR_VIRTFN(bp)) ? BP_VF(bp, idx) : NULL;
78 static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
79 u8 igu_sb_id, u8 segment, u16 index, u8 op,
80 u8 update)
82 /* acking a VF sb through the PF - use the GRC */
83 u32 ctl;
84 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
85 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
86 u32 func_encode = vf->abs_vfid;
87 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + igu_sb_id;
88 struct igu_regular cmd_data = {0};
90 cmd_data.sb_id_and_flags =
91 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
92 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
93 (update << IGU_REGULAR_BUPDATE_SHIFT) |
94 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
96 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
97 func_encode << IGU_CTRL_REG_FID_SHIFT |
98 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
100 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
101 cmd_data.sb_id_and_flags, igu_addr_data);
102 REG_WR(bp, igu_addr_data, cmd_data.sb_id_and_flags);
103 mmiowb();
104 barrier();
106 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
107 ctl, igu_addr_ctl);
108 REG_WR(bp, igu_addr_ctl, ctl);
109 mmiowb();
110 barrier();
113 static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
114 struct bnx2x_virtf *vf,
115 bool print_err)
117 if (!bnx2x_leading_vfq(vf, sp_initialized)) {
118 if (print_err)
119 BNX2X_ERR("Slowpath objects not yet initialized!\n");
120 else
121 DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
122 return false;
124 return true;
127 /* VFOP operations states */
128 void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
129 struct bnx2x_queue_init_params *init_params,
130 struct bnx2x_queue_setup_params *setup_params,
131 u16 q_idx, u16 sb_idx)
133 DP(BNX2X_MSG_IOV,
134 "VF[%d] Q_SETUP: txq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, flags=0x%lx, traffic-type=%d",
135 vf->abs_vfid,
136 q_idx,
137 sb_idx,
138 init_params->tx.sb_cq_index,
139 init_params->tx.hc_rate,
140 setup_params->flags,
141 setup_params->txq_params.traffic_type);
144 void bnx2x_vfop_qctor_dump_rx(struct bnx2x *bp, struct bnx2x_virtf *vf,
145 struct bnx2x_queue_init_params *init_params,
146 struct bnx2x_queue_setup_params *setup_params,
147 u16 q_idx, u16 sb_idx)
149 struct bnx2x_rxq_setup_params *rxq_params = &setup_params->rxq_params;
151 DP(BNX2X_MSG_IOV, "VF[%d] Q_SETUP: rxq[%d]-- vfsb=%d, sb-index=%d, hc-rate=%d, mtu=%d, buf-size=%d\n"
152 "sge-size=%d, max_sge_pkt=%d, tpa-agg-size=%d, flags=0x%lx, drop-flags=0x%x, cache-log=%d\n",
153 vf->abs_vfid,
154 q_idx,
155 sb_idx,
156 init_params->rx.sb_cq_index,
157 init_params->rx.hc_rate,
158 setup_params->gen_params.mtu,
159 rxq_params->buf_sz,
160 rxq_params->sge_buf_sz,
161 rxq_params->max_sges_pkt,
162 rxq_params->tpa_agg_sz,
163 setup_params->flags,
164 rxq_params->drop_flags,
165 rxq_params->cache_line_log);
168 void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
169 struct bnx2x_virtf *vf,
170 struct bnx2x_vf_queue *q,
171 struct bnx2x_vf_queue_construct_params *p,
172 unsigned long q_type)
174 struct bnx2x_queue_init_params *init_p = &p->qstate.params.init;
175 struct bnx2x_queue_setup_params *setup_p = &p->prep_qsetup;
177 /* INIT */
179 /* Enable host coalescing in the transition to INIT state */
180 if (test_bit(BNX2X_Q_FLG_HC, &init_p->rx.flags))
181 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->rx.flags);
183 if (test_bit(BNX2X_Q_FLG_HC, &init_p->tx.flags))
184 __set_bit(BNX2X_Q_FLG_HC_EN, &init_p->tx.flags);
186 /* FW SB ID */
187 init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
188 init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
190 /* context */
191 init_p->cxts[0] = q->cxt;
193 /* SETUP */
195 /* Setup-op general parameters */
196 setup_p->gen_params.spcl_id = vf->sp_cl_id;
197 setup_p->gen_params.stat_id = vfq_stat_id(vf, q);
198 setup_p->gen_params.fp_hsi = vf->fp_hsi;
200 /* Setup-op flags:
201 * collect statistics, zero statistics, local-switching, security,
202 * OV for Flex10, RSS and MCAST for leading
204 if (test_bit(BNX2X_Q_FLG_STATS, &setup_p->flags))
205 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &setup_p->flags);
207 /* for VFs, enable tx switching, bd coherency, and mac address
208 * anti-spoofing
210 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &setup_p->flags);
211 __set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
212 __set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
214 /* Setup-op rx parameters */
215 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
216 struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
218 rxq_p->cl_qzone_id = vfq_qzone_id(vf, q);
219 rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx);
220 rxq_p->rss_engine_id = FW_VF_HANDLE(vf->abs_vfid);
222 if (test_bit(BNX2X_Q_FLG_TPA, &setup_p->flags))
223 rxq_p->max_tpa_queues = BNX2X_VF_MAX_TPA_AGG_QUEUES;
226 /* Setup-op tx parameters */
227 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &q_type)) {
228 setup_p->txq_params.tss_leading_cl_id = vf->leading_rss;
229 setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx);
233 static int bnx2x_vf_queue_create(struct bnx2x *bp,
234 struct bnx2x_virtf *vf, int qid,
235 struct bnx2x_vf_queue_construct_params *qctor)
237 struct bnx2x_queue_state_params *q_params;
238 int rc = 0;
240 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
242 /* Prepare ramrod information */
243 q_params = &qctor->qstate;
244 q_params->q_obj = &bnx2x_vfq(vf, qid, sp_obj);
245 set_bit(RAMROD_COMP_WAIT, &q_params->ramrod_flags);
247 if (bnx2x_get_q_logical_state(bp, q_params->q_obj) ==
248 BNX2X_Q_LOGICAL_STATE_ACTIVE) {
249 DP(BNX2X_MSG_IOV, "queue was already up. Aborting gracefully\n");
250 goto out;
253 /* Run Queue 'construction' ramrods */
254 q_params->cmd = BNX2X_Q_CMD_INIT;
255 rc = bnx2x_queue_state_change(bp, q_params);
256 if (rc)
257 goto out;
259 memcpy(&q_params->params.setup, &qctor->prep_qsetup,
260 sizeof(struct bnx2x_queue_setup_params));
261 q_params->cmd = BNX2X_Q_CMD_SETUP;
262 rc = bnx2x_queue_state_change(bp, q_params);
263 if (rc)
264 goto out;
266 /* enable interrupts */
267 bnx2x_vf_igu_ack_sb(bp, vf, vf_igu_sb(vf, bnx2x_vfq(vf, qid, sb_idx)),
268 USTORM_ID, 0, IGU_INT_ENABLE, 0);
269 out:
270 return rc;
273 static int bnx2x_vf_queue_destroy(struct bnx2x *bp, struct bnx2x_virtf *vf,
274 int qid)
276 enum bnx2x_queue_cmd cmds[] = {BNX2X_Q_CMD_HALT,
277 BNX2X_Q_CMD_TERMINATE,
278 BNX2X_Q_CMD_CFC_DEL};
279 struct bnx2x_queue_state_params q_params;
280 int rc, i;
282 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
284 /* Prepare ramrod information */
285 memset(&q_params, 0, sizeof(struct bnx2x_queue_state_params));
286 q_params.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
287 set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
289 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) ==
290 BNX2X_Q_LOGICAL_STATE_STOPPED) {
291 DP(BNX2X_MSG_IOV, "queue was already stopped. Aborting gracefully\n");
292 goto out;
295 /* Run Queue 'destruction' ramrods */
296 for (i = 0; i < ARRAY_SIZE(cmds); i++) {
297 q_params.cmd = cmds[i];
298 rc = bnx2x_queue_state_change(bp, &q_params);
299 if (rc) {
300 BNX2X_ERR("Failed to run Queue command %d\n", cmds[i]);
301 return rc;
304 out:
305 /* Clean Context */
306 if (bnx2x_vfq(vf, qid, cxt)) {
307 bnx2x_vfq(vf, qid, cxt)->ustorm_ag_context.cdu_usage = 0;
308 bnx2x_vfq(vf, qid, cxt)->xstorm_ag_context.cdu_reserved = 0;
311 return 0;
314 static void
315 bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
317 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
318 if (vf) {
319 /* the first igu entry belonging to VFs of this PF */
320 if (!BP_VFDB(bp)->first_vf_igu_entry)
321 BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
323 /* the first igu entry belonging to this VF */
324 if (!vf_sb_count(vf))
325 vf->igu_base_id = igu_sb_id;
327 ++vf_sb_count(vf);
328 ++vf->sb_count;
330 BP_VFDB(bp)->vf_sbs_pool++;
333 static inline void bnx2x_vf_vlan_credit(struct bnx2x *bp,
334 struct bnx2x_vlan_mac_obj *obj,
335 atomic_t *counter)
337 struct list_head *pos;
338 int read_lock;
339 int cnt = 0;
341 read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
342 if (read_lock)
343 DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
345 list_for_each(pos, &obj->head)
346 cnt++;
348 if (!read_lock)
349 bnx2x_vlan_mac_h_read_unlock(bp, obj);
351 atomic_set(counter, cnt);
354 static int bnx2x_vf_vlan_mac_clear(struct bnx2x *bp, struct bnx2x_virtf *vf,
355 int qid, bool drv_only, int type)
357 struct bnx2x_vlan_mac_ramrod_params ramrod;
358 int rc;
360 DP(BNX2X_MSG_IOV, "vf[%d] - deleting all %s\n", vf->abs_vfid,
361 (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
362 (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
364 /* Prepare ramrod params */
365 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
366 if (type == BNX2X_VF_FILTER_VLAN_MAC) {
367 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
368 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
369 } else if (type == BNX2X_VF_FILTER_MAC) {
370 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
371 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
372 } else {
373 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
375 ramrod.user_req.cmd = BNX2X_VLAN_MAC_DEL;
377 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
378 if (drv_only)
379 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
380 else
381 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
383 /* Start deleting */
384 rc = ramrod.vlan_mac_obj->delete_all(bp,
385 ramrod.vlan_mac_obj,
386 &ramrod.user_req.vlan_mac_flags,
387 &ramrod.ramrod_flags);
388 if (rc) {
389 BNX2X_ERR("Failed to delete all %s\n",
390 (type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MACs" :
391 (type == BNX2X_VF_FILTER_MAC) ? "MACs" : "VLANs");
392 return rc;
395 return 0;
398 static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
399 struct bnx2x_virtf *vf, int qid,
400 struct bnx2x_vf_mac_vlan_filter *filter,
401 bool drv_only)
403 struct bnx2x_vlan_mac_ramrod_params ramrod;
404 int rc;
406 DP(BNX2X_MSG_IOV, "vf[%d] - %s a %s filter\n",
407 vf->abs_vfid, filter->add ? "Adding" : "Deleting",
408 (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ? "VLAN-MAC" :
409 (filter->type == BNX2X_VF_FILTER_MAC) ? "MAC" : "VLAN");
411 /* Prepare ramrod params */
412 memset(&ramrod, 0, sizeof(struct bnx2x_vlan_mac_ramrod_params));
413 if (filter->type == BNX2X_VF_FILTER_VLAN_MAC) {
414 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_mac_obj);
415 ramrod.user_req.u.vlan.vlan = filter->vid;
416 memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
417 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
418 } else if (filter->type == BNX2X_VF_FILTER_VLAN) {
419 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
420 ramrod.user_req.u.vlan.vlan = filter->vid;
421 } else {
422 set_bit(BNX2X_ETH_MAC, &ramrod.user_req.vlan_mac_flags);
423 ramrod.vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
424 memcpy(&ramrod.user_req.u.mac.mac, filter->mac, ETH_ALEN);
426 ramrod.user_req.cmd = filter->add ? BNX2X_VLAN_MAC_ADD :
427 BNX2X_VLAN_MAC_DEL;
429 set_bit(RAMROD_EXEC, &ramrod.ramrod_flags);
430 if (drv_only)
431 set_bit(RAMROD_DRV_CLR_ONLY, &ramrod.ramrod_flags);
432 else
433 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
435 /* Add/Remove the filter */
436 rc = bnx2x_config_vlan_mac(bp, &ramrod);
437 if (rc == -EEXIST)
438 return 0;
439 if (rc) {
440 BNX2X_ERR("Failed to %s %s\n",
441 filter->add ? "add" : "delete",
442 (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
443 "VLAN-MAC" :
444 (filter->type == BNX2X_VF_FILTER_MAC) ?
445 "MAC" : "VLAN");
446 return rc;
449 filter->applied = true;
451 return 0;
454 int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
455 struct bnx2x_vf_mac_vlan_filters *filters,
456 int qid, bool drv_only)
458 int rc = 0, i;
460 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
462 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
463 return -EINVAL;
465 /* Prepare ramrod params */
466 for (i = 0; i < filters->count; i++) {
467 rc = bnx2x_vf_mac_vlan_config(bp, vf, qid,
468 &filters->filters[i], drv_only);
469 if (rc)
470 break;
473 /* Rollback if needed */
474 if (i != filters->count) {
475 BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
476 i, filters->count);
477 while (--i >= 0) {
478 if (!filters->filters[i].applied)
479 continue;
480 filters->filters[i].add = !filters->filters[i].add;
481 bnx2x_vf_mac_vlan_config(bp, vf, qid,
482 &filters->filters[i],
483 drv_only);
487 /* It's our responsibility to free the filters */
488 kfree(filters);
490 return rc;
493 int bnx2x_vf_queue_setup(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid,
494 struct bnx2x_vf_queue_construct_params *qctor)
496 int rc;
498 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
500 rc = bnx2x_vf_queue_create(bp, vf, qid, qctor);
501 if (rc)
502 goto op_err;
504 /* Schedule the configuration of any pending vlan filters */
505 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
506 BNX2X_MSG_IOV);
507 return 0;
508 op_err:
509 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
510 return rc;
513 static int bnx2x_vf_queue_flr(struct bnx2x *bp, struct bnx2x_virtf *vf,
514 int qid)
516 int rc;
518 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
520 /* If needed, clean the filtering data base */
521 if ((qid == LEADING_IDX) &&
522 bnx2x_validate_vf_sp_objs(bp, vf, false)) {
523 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
524 BNX2X_VF_FILTER_VLAN_MAC);
525 if (rc)
526 goto op_err;
527 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
528 BNX2X_VF_FILTER_VLAN);
529 if (rc)
530 goto op_err;
531 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid, true,
532 BNX2X_VF_FILTER_MAC);
533 if (rc)
534 goto op_err;
537 /* Terminate queue */
538 if (bnx2x_vfq(vf, qid, sp_obj).state != BNX2X_Q_STATE_RESET) {
539 struct bnx2x_queue_state_params qstate;
541 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
542 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
543 qstate.q_obj->state = BNX2X_Q_STATE_STOPPED;
544 qstate.cmd = BNX2X_Q_CMD_TERMINATE;
545 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
546 rc = bnx2x_queue_state_change(bp, &qstate);
547 if (rc)
548 goto op_err;
551 return 0;
552 op_err:
553 BNX2X_ERR("vf[%d:%d] error: rc %d\n", vf->abs_vfid, qid, rc);
554 return rc;
557 int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
558 bnx2x_mac_addr_t *mcasts, int mc_num, bool drv_only)
560 struct bnx2x_mcast_list_elem *mc = NULL;
561 struct bnx2x_mcast_ramrod_params mcast;
562 int rc, i;
564 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
566 /* Prepare Multicast command */
567 memset(&mcast, 0, sizeof(struct bnx2x_mcast_ramrod_params));
568 mcast.mcast_obj = &vf->mcast_obj;
569 if (drv_only)
570 set_bit(RAMROD_DRV_CLR_ONLY, &mcast.ramrod_flags);
571 else
572 set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
573 if (mc_num) {
574 mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
575 GFP_KERNEL);
576 if (!mc) {
577 BNX2X_ERR("Cannot Configure multicasts due to lack of memory\n");
578 return -ENOMEM;
582 if (mc_num) {
583 INIT_LIST_HEAD(&mcast.mcast_list);
584 for (i = 0; i < mc_num; i++) {
585 mc[i].mac = mcasts[i];
586 list_add_tail(&mc[i].link,
587 &mcast.mcast_list);
590 /* add new mcasts */
591 mcast.mcast_list_len = mc_num;
592 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_SET);
593 if (rc)
594 BNX2X_ERR("Failed to set multicasts\n");
595 } else {
596 /* clear existing mcasts */
597 rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
598 if (rc)
599 BNX2X_ERR("Failed to remove multicasts\n");
602 kfree(mc);
604 return rc;
607 static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
608 struct bnx2x_rx_mode_ramrod_params *ramrod,
609 struct bnx2x_virtf *vf,
610 unsigned long accept_flags)
612 struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
614 memset(ramrod, 0, sizeof(*ramrod));
615 ramrod->cid = vfq->cid;
616 ramrod->cl_id = vfq_cl_id(vf, vfq);
617 ramrod->rx_mode_obj = &bp->rx_mode_obj;
618 ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
619 ramrod->rx_accept_flags = accept_flags;
620 ramrod->tx_accept_flags = accept_flags;
621 ramrod->pstate = &vf->filter_state;
622 ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
624 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
625 set_bit(RAMROD_RX, &ramrod->ramrod_flags);
626 set_bit(RAMROD_TX, &ramrod->ramrod_flags);
628 ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
629 ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
632 int bnx2x_vf_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf,
633 int qid, unsigned long accept_flags)
635 struct bnx2x_rx_mode_ramrod_params ramrod;
637 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
639 bnx2x_vf_prep_rx_mode(bp, qid, &ramrod, vf, accept_flags);
640 set_bit(RAMROD_COMP_WAIT, &ramrod.ramrod_flags);
641 vfq_get(vf, qid)->accept_flags = ramrod.rx_accept_flags;
642 return bnx2x_config_rx_mode(bp, &ramrod);
645 int bnx2x_vf_queue_teardown(struct bnx2x *bp, struct bnx2x_virtf *vf, int qid)
647 int rc;
649 DP(BNX2X_MSG_IOV, "vf[%d:%d]\n", vf->abs_vfid, qid);
651 /* Remove all classification configuration for leading queue */
652 if (qid == LEADING_IDX) {
653 rc = bnx2x_vf_rxmode(bp, vf, qid, 0);
654 if (rc)
655 goto op_err;
657 /* Remove filtering if feasible */
658 if (bnx2x_validate_vf_sp_objs(bp, vf, true)) {
659 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
660 false,
661 BNX2X_VF_FILTER_VLAN_MAC);
662 if (rc)
663 goto op_err;
664 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
665 false,
666 BNX2X_VF_FILTER_VLAN);
667 if (rc)
668 goto op_err;
669 rc = bnx2x_vf_vlan_mac_clear(bp, vf, qid,
670 false,
671 BNX2X_VF_FILTER_MAC);
672 if (rc)
673 goto op_err;
674 rc = bnx2x_vf_mcast(bp, vf, NULL, 0, false);
675 if (rc)
676 goto op_err;
680 /* Destroy queue */
681 rc = bnx2x_vf_queue_destroy(bp, vf, qid);
682 if (rc)
683 goto op_err;
684 return rc;
685 op_err:
686 BNX2X_ERR("vf[%d:%d] error: rc %d\n",
687 vf->abs_vfid, qid, rc);
688 return rc;
691 /* VF enable primitives
692 * when pretend is required the caller is responsible
693 * for calling pretend prior to calling these routines
696 /* internal vf enable - until vf is enabled internally all transactions
697 * are blocked. This routine should always be called last with pretend.
699 static void bnx2x_vf_enable_internal(struct bnx2x *bp, u8 enable)
701 REG_WR(bp, PGLUE_B_REG_INTERNAL_VFID_ENABLE, enable ? 1 : 0);
704 /* clears vf error in all semi blocks */
705 static void bnx2x_vf_semi_clear_err(struct bnx2x *bp, u8 abs_vfid)
707 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, abs_vfid);
708 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, abs_vfid);
709 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, abs_vfid);
710 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, abs_vfid);
713 static void bnx2x_vf_pglue_clear_err(struct bnx2x *bp, u8 abs_vfid)
715 u32 was_err_group = (2 * BP_PATH(bp) + abs_vfid) >> 5;
716 u32 was_err_reg = 0;
718 switch (was_err_group) {
719 case 0:
720 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR;
721 break;
722 case 1:
723 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR;
724 break;
725 case 2:
726 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR;
727 break;
728 case 3:
729 was_err_reg = PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR;
730 break;
732 REG_WR(bp, was_err_reg, 1 << (abs_vfid & 0x1f));
735 static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
737 int i;
738 u32 val;
740 /* Set VF masks and configuration - pretend */
741 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
743 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
744 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
745 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
746 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
747 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
748 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
750 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
751 val |= (IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_MSI_MSIX_EN);
752 val &= ~IGU_VF_CONF_PARENT_MASK;
753 val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
754 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
756 DP(BNX2X_MSG_IOV,
757 "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
758 vf->abs_vfid, val);
760 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
762 /* iterate over all queues, clear sb consumer */
763 for (i = 0; i < vf_sb_count(vf); i++) {
764 u8 igu_sb_id = vf_igu_sb(vf, i);
766 /* zero prod memory */
767 REG_WR(bp, IGU_REG_PROD_CONS_MEMORY + igu_sb_id * 4, 0);
769 /* clear sb state machine */
770 bnx2x_igu_clear_sb_gen(bp, vf->abs_vfid, igu_sb_id,
771 false /* VF */);
773 /* disable + update */
774 bnx2x_vf_igu_ack_sb(bp, vf, igu_sb_id, USTORM_ID, 0,
775 IGU_INT_DISABLE, 1);
779 void bnx2x_vf_enable_access(struct bnx2x *bp, u8 abs_vfid)
781 /* set the VF-PF association in the FW */
782 storm_memset_vf_to_pf(bp, FW_VF_HANDLE(abs_vfid), BP_FUNC(bp));
783 storm_memset_func_en(bp, FW_VF_HANDLE(abs_vfid), 1);
785 /* clear vf errors*/
786 bnx2x_vf_semi_clear_err(bp, abs_vfid);
787 bnx2x_vf_pglue_clear_err(bp, abs_vfid);
789 /* internal vf-enable - pretend */
790 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, abs_vfid));
791 DP(BNX2X_MSG_IOV, "enabling internal access for vf %x\n", abs_vfid);
792 bnx2x_vf_enable_internal(bp, true);
793 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
796 static void bnx2x_vf_enable_traffic(struct bnx2x *bp, struct bnx2x_virtf *vf)
798 /* Reset vf in IGU interrupts are still disabled */
799 bnx2x_vf_igu_reset(bp, vf);
801 /* pretend to enable the vf with the PBF */
802 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
803 REG_WR(bp, PBF_REG_DISABLE_VF, 0);
804 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
807 static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
809 struct pci_dev *dev;
810 struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
812 if (!vf)
813 return false;
815 dev = pci_get_bus_and_slot(vf->bus, vf->devfn);
816 if (dev)
817 return bnx2x_is_pcie_pending(dev);
818 return false;
821 int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
823 /* Verify no pending pci transactions */
824 if (bnx2x_vf_is_pcie_pending(bp, abs_vfid))
825 BNX2X_ERR("PCIE Transactions still pending\n");
827 return 0;
830 /* must be called after the number of PF queues and the number of VFs are
831 * both known
833 static void
834 bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
836 struct vf_pf_resc_request *resc = &vf->alloc_resc;
838 /* will be set only during VF-ACQUIRE */
839 resc->num_rxqs = 0;
840 resc->num_txqs = 0;
842 resc->num_mac_filters = VF_MAC_CREDIT_CNT;
843 resc->num_vlan_filters = VF_VLAN_CREDIT_CNT;
845 /* no real limitation */
846 resc->num_mc_filters = 0;
848 /* num_sbs already set */
849 resc->num_sbs = vf->sb_count;
852 /* FLR routines: */
853 static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
855 /* reset the state variables */
856 bnx2x_iov_static_resc(bp, vf);
857 vf->state = VF_FREE;
860 static void bnx2x_vf_flr_clnup_hw(struct bnx2x *bp, struct bnx2x_virtf *vf)
862 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
864 /* DQ usage counter */
865 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
866 bnx2x_flr_clnup_poll_hw_counter(bp, DORQ_REG_VF_USAGE_CNT,
867 "DQ VF usage counter timed out",
868 poll_cnt);
869 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
871 /* FW cleanup command - poll for the results */
872 if (bnx2x_send_final_clnup(bp, (u8)FW_VF_HANDLE(vf->abs_vfid),
873 poll_cnt))
874 BNX2X_ERR("VF[%d] Final cleanup timed-out\n", vf->abs_vfid);
876 /* verify TX hw is flushed */
877 bnx2x_tx_hw_flushed(bp, poll_cnt);
880 static void bnx2x_vf_flr(struct bnx2x *bp, struct bnx2x_virtf *vf)
882 int rc, i;
884 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
886 /* the cleanup operations are valid if and only if the VF
887 * was first acquired.
889 for (i = 0; i < vf_rxq_count(vf); i++) {
890 rc = bnx2x_vf_queue_flr(bp, vf, i);
891 if (rc)
892 goto out;
895 /* remove multicasts */
896 bnx2x_vf_mcast(bp, vf, NULL, 0, true);
898 /* dispatch final cleanup and wait for HW queues to flush */
899 bnx2x_vf_flr_clnup_hw(bp, vf);
901 /* release VF resources */
902 bnx2x_vf_free_resc(bp, vf);
904 vf->malicious = false;
906 /* re-open the mailbox */
907 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
908 return;
909 out:
910 BNX2X_ERR("vf[%d:%d] failed flr: rc %d\n",
911 vf->abs_vfid, i, rc);
914 static void bnx2x_vf_flr_clnup(struct bnx2x *bp)
916 struct bnx2x_virtf *vf;
917 int i;
919 for (i = 0; i < BNX2X_NR_VIRTFN(bp); i++) {
920 /* VF should be RESET & in FLR cleanup states */
921 if (bnx2x_vf(bp, i, state) != VF_RESET ||
922 !bnx2x_vf(bp, i, flr_clnup_stage))
923 continue;
925 DP(BNX2X_MSG_IOV, "next vf to cleanup: %d. Num of vfs: %d\n",
926 i, BNX2X_NR_VIRTFN(bp));
928 vf = BP_VF(bp, i);
930 /* lock the vf pf channel */
931 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
933 /* invoke the VF FLR SM */
934 bnx2x_vf_flr(bp, vf);
936 /* mark the VF to be ACKED and continue */
937 vf->flr_clnup_stage = false;
938 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_FLR);
941 /* Acknowledge the handled VFs.
942 * we are acknowledge all the vfs which an flr was requested for, even
943 * if amongst them there are such that we never opened, since the mcp
944 * will interrupt us immediately again if we only ack some of the bits,
945 * resulting in an endless loop. This can happen for example in KVM
946 * where an 'all ones' flr request is sometimes given by hyper visor
948 DP(BNX2X_MSG_MCP, "DRV_STATUS_VF_DISABLED ACK for vfs 0x%x 0x%x\n",
949 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
950 for (i = 0; i < FLRD_VFS_DWORDS; i++)
951 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i],
952 bp->vfdb->flrd_vfs[i]);
954 bnx2x_fw_command(bp, DRV_MSG_CODE_VF_DISABLED_DONE, 0);
956 /* clear the acked bits - better yet if the MCP implemented
957 * write to clear semantics
959 for (i = 0; i < FLRD_VFS_DWORDS; i++)
960 SHMEM2_WR(bp, drv_ack_vf_disabled[BP_FW_MB_IDX(bp)][i], 0);
963 void bnx2x_vf_handle_flr_event(struct bnx2x *bp)
965 int i;
967 /* Read FLR'd VFs */
968 for (i = 0; i < FLRD_VFS_DWORDS; i++)
969 bp->vfdb->flrd_vfs[i] = SHMEM2_RD(bp, mcp_vf_disabled[i]);
971 DP(BNX2X_MSG_MCP,
972 "DRV_STATUS_VF_DISABLED received for vfs 0x%x 0x%x\n",
973 bp->vfdb->flrd_vfs[0], bp->vfdb->flrd_vfs[1]);
975 for_each_vf(bp, i) {
976 struct bnx2x_virtf *vf = BP_VF(bp, i);
977 u32 reset = 0;
979 if (vf->abs_vfid < 32)
980 reset = bp->vfdb->flrd_vfs[0] & (1 << vf->abs_vfid);
981 else
982 reset = bp->vfdb->flrd_vfs[1] &
983 (1 << (vf->abs_vfid - 32));
985 if (reset) {
986 /* set as reset and ready for cleanup */
987 vf->state = VF_RESET;
988 vf->flr_clnup_stage = true;
990 DP(BNX2X_MSG_IOV,
991 "Initiating Final cleanup for VF %d\n",
992 vf->abs_vfid);
996 /* do the FLR cleanup for all marked VFs*/
997 bnx2x_vf_flr_clnup(bp);
1000 /* IOV global initialization routines */
1001 void bnx2x_iov_init_dq(struct bnx2x *bp)
1003 if (!IS_SRIOV(bp))
1004 return;
1006 /* Set the DQ such that the CID reflect the abs_vfid */
1007 REG_WR(bp, DORQ_REG_VF_NORM_VF_BASE, 0);
1008 REG_WR(bp, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS));
1010 /* Set VFs starting CID. If its > 0 the preceding CIDs are belong to
1011 * the PF L2 queues
1013 REG_WR(bp, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID);
1015 /* The VF window size is the log2 of the max number of CIDs per VF */
1016 REG_WR(bp, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND);
1018 /* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
1019 * the Pf doorbell size although the 2 are independent.
1021 REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
1023 /* No security checks for now -
1024 * configure single rule (out of 16) mask = 0x1, value = 0x0,
1025 * CID range 0 - 0x1ffff
1027 REG_WR(bp, DORQ_REG_VF_TYPE_MASK_0, 1);
1028 REG_WR(bp, DORQ_REG_VF_TYPE_VALUE_0, 0);
1029 REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
1030 REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
1032 /* set the VF doorbell threshold. This threshold represents the amount
1033 * of doorbells allowed in the main DORQ fifo for a specific VF.
1035 REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
1038 void bnx2x_iov_init_dmae(struct bnx2x *bp)
1040 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
1041 REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
1044 static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
1046 struct pci_dev *dev = bp->pdev;
1047 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1049 return dev->bus->number + ((dev->devfn + iov->offset +
1050 iov->stride * vfid) >> 8);
1053 static int bnx2x_vf_devfn(struct bnx2x *bp, int vfid)
1055 struct pci_dev *dev = bp->pdev;
1056 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1058 return (dev->devfn + iov->offset + iov->stride * vfid) & 0xff;
1061 static void bnx2x_vf_set_bars(struct bnx2x *bp, struct bnx2x_virtf *vf)
1063 int i, n;
1064 struct pci_dev *dev = bp->pdev;
1065 struct bnx2x_sriov *iov = &bp->vfdb->sriov;
1067 for (i = 0, n = 0; i < PCI_SRIOV_NUM_BARS; i += 2, n++) {
1068 u64 start = pci_resource_start(dev, PCI_IOV_RESOURCES + i);
1069 u32 size = pci_resource_len(dev, PCI_IOV_RESOURCES + i);
1071 size /= iov->total;
1072 vf->bars[n].bar = start + size * vf->abs_vfid;
1073 vf->bars[n].size = size;
1077 static int bnx2x_ari_enabled(struct pci_dev *dev)
1079 return dev->bus->self && dev->bus->self->ari_enabled;
1082 static int
1083 bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
1085 int sb_id;
1086 u32 val;
1087 u8 fid, current_pf = 0;
1089 /* IGU in normal mode - read CAM */
1090 for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
1091 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + sb_id * 4);
1092 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
1093 continue;
1094 fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
1095 if (fid & IGU_FID_ENCODE_IS_PF)
1096 current_pf = fid & IGU_FID_PF_NUM_MASK;
1097 else if (current_pf == BP_FUNC(bp))
1098 bnx2x_vf_set_igu_info(bp, sb_id,
1099 (fid & IGU_FID_VF_NUM_MASK));
1100 DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
1101 ((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
1102 ((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
1103 (fid & IGU_FID_VF_NUM_MASK)), sb_id,
1104 GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
1106 DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
1107 return BP_VFDB(bp)->vf_sbs_pool;
1110 static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
1112 if (bp->vfdb) {
1113 kfree(bp->vfdb->vfqs);
1114 kfree(bp->vfdb->vfs);
1115 kfree(bp->vfdb);
1117 bp->vfdb = NULL;
1120 static int bnx2x_sriov_pci_cfg_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1122 int pos;
1123 struct pci_dev *dev = bp->pdev;
1125 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
1126 if (!pos) {
1127 BNX2X_ERR("failed to find SRIOV capability in device\n");
1128 return -ENODEV;
1131 iov->pos = pos;
1132 DP(BNX2X_MSG_IOV, "sriov ext pos %d\n", pos);
1133 pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
1134 pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &iov->total);
1135 pci_read_config_word(dev, pos + PCI_SRIOV_INITIAL_VF, &iov->initial);
1136 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
1137 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
1138 pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
1139 pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
1140 pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
1142 return 0;
1145 static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
1147 u32 val;
1149 /* read the SRIOV capability structure
1150 * The fields can be read via configuration read or
1151 * directly from the device (starting at offset PCICFG_OFFSET)
1153 if (bnx2x_sriov_pci_cfg_info(bp, iov))
1154 return -ENODEV;
1156 /* get the number of SRIOV bars */
1157 iov->nres = 0;
1159 /* read the first_vfid */
1160 val = REG_RD(bp, PCICFG_OFFSET + GRC_CONFIG_REG_PF_INIT_VF);
1161 iov->first_vf_in_pf = ((val & GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK)
1162 * 8) - (BNX2X_MAX_NUM_OF_VFS * BP_PATH(bp));
1164 DP(BNX2X_MSG_IOV,
1165 "IOV info[%d]: first vf %d, nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
1166 BP_FUNC(bp),
1167 iov->first_vf_in_pf, iov->nres, iov->cap, iov->ctrl, iov->total,
1168 iov->initial, iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
1170 return 0;
1173 /* must be called after PF bars are mapped */
1174 int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1175 int num_vfs_param)
1177 int err, i;
1178 struct bnx2x_sriov *iov;
1179 struct pci_dev *dev = bp->pdev;
1181 bp->vfdb = NULL;
1183 /* verify is pf */
1184 if (IS_VF(bp))
1185 return 0;
1187 /* verify sriov capability is present in configuration space */
1188 if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV))
1189 return 0;
1191 /* verify chip revision */
1192 if (CHIP_IS_E1x(bp))
1193 return 0;
1195 /* check if SRIOV support is turned off */
1196 if (!num_vfs_param)
1197 return 0;
1199 /* SRIOV assumes that num of PF CIDs < BNX2X_FIRST_VF_CID */
1200 if (BNX2X_L2_MAX_CID(bp) >= BNX2X_FIRST_VF_CID) {
1201 BNX2X_ERR("PF cids %d are overspilling into vf space (starts at %d). Abort SRIOV\n",
1202 BNX2X_L2_MAX_CID(bp), BNX2X_FIRST_VF_CID);
1203 return 0;
1206 /* SRIOV can be enabled only with MSIX */
1207 if (int_mode_param == BNX2X_INT_MODE_MSI ||
1208 int_mode_param == BNX2X_INT_MODE_INTX) {
1209 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1210 return 0;
1213 err = -EIO;
1214 /* verify ari is enabled */
1215 if (!bnx2x_ari_enabled(bp->pdev)) {
1216 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1217 return 0;
1220 /* verify igu is in normal mode */
1221 if (CHIP_INT_MODE_IS_BC(bp)) {
1222 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
1223 return 0;
1226 /* allocate the vfs database */
1227 bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
1228 if (!bp->vfdb) {
1229 BNX2X_ERR("failed to allocate vf database\n");
1230 err = -ENOMEM;
1231 goto failed;
1234 /* get the sriov info - Linux already collected all the pertinent
1235 * information, however the sriov structure is for the private use
1236 * of the pci module. Also we want this information regardless
1237 * of the hyper-visor.
1239 iov = &(bp->vfdb->sriov);
1240 err = bnx2x_sriov_info(bp, iov);
1241 if (err)
1242 goto failed;
1244 /* SR-IOV capability was enabled but there are no VFs*/
1245 if (iov->total == 0)
1246 goto failed;
1248 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
1250 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
1251 num_vfs_param, iov->nr_virtfn);
1253 /* allocate the vf array */
1254 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
1255 BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
1256 if (!bp->vfdb->vfs) {
1257 BNX2X_ERR("failed to allocate vf array\n");
1258 err = -ENOMEM;
1259 goto failed;
1262 /* Initial VF init - index and abs_vfid - nr_virtfn must be set */
1263 for_each_vf(bp, i) {
1264 bnx2x_vf(bp, i, index) = i;
1265 bnx2x_vf(bp, i, abs_vfid) = iov->first_vf_in_pf + i;
1266 bnx2x_vf(bp, i, state) = VF_FREE;
1267 mutex_init(&bnx2x_vf(bp, i, op_mutex));
1268 bnx2x_vf(bp, i, op_current) = CHANNEL_TLV_NONE;
1271 /* re-read the IGU CAM for VFs - index and abs_vfid must be set */
1272 if (!bnx2x_get_vf_igu_cam_info(bp)) {
1273 BNX2X_ERR("No entries in IGU CAM for vfs\n");
1274 err = -EINVAL;
1275 goto failed;
1278 /* allocate the queue arrays for all VFs */
1279 bp->vfdb->vfqs = kzalloc(
1280 BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
1281 GFP_KERNEL);
1283 if (!bp->vfdb->vfqs) {
1284 BNX2X_ERR("failed to allocate vf queue array\n");
1285 err = -ENOMEM;
1286 goto failed;
1289 /* Prepare the VFs event synchronization mechanism */
1290 mutex_init(&bp->vfdb->event_mutex);
1292 mutex_init(&bp->vfdb->bulletin_mutex);
1294 if (SHMEM2_HAS(bp, sriov_switch_mode))
1295 SHMEM2_WR(bp, sriov_switch_mode, SRIOV_SWITCH_MODE_VEB);
1297 return 0;
1298 failed:
1299 DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
1300 __bnx2x_iov_free_vfdb(bp);
1301 return err;
1304 void bnx2x_iov_remove_one(struct bnx2x *bp)
1306 int vf_idx;
1308 /* if SRIOV is not enabled there's nothing to do */
1309 if (!IS_SRIOV(bp))
1310 return;
1312 bnx2x_disable_sriov(bp);
1314 /* disable access to all VFs */
1315 for (vf_idx = 0; vf_idx < bp->vfdb->sriov.total; vf_idx++) {
1316 bnx2x_pretend_func(bp,
1317 HW_VF_HANDLE(bp,
1318 bp->vfdb->sriov.first_vf_in_pf +
1319 vf_idx));
1320 DP(BNX2X_MSG_IOV, "disabling internal access for vf %d\n",
1321 bp->vfdb->sriov.first_vf_in_pf + vf_idx);
1322 bnx2x_vf_enable_internal(bp, 0);
1323 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1326 /* free vf database */
1327 __bnx2x_iov_free_vfdb(bp);
1330 void bnx2x_iov_free_mem(struct bnx2x *bp)
1332 int i;
1334 if (!IS_SRIOV(bp))
1335 return;
1337 /* free vfs hw contexts */
1338 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1339 struct hw_dma *cxt = &bp->vfdb->context[i];
1340 BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size);
1343 BNX2X_PCI_FREE(BP_VFDB(bp)->sp_dma.addr,
1344 BP_VFDB(bp)->sp_dma.mapping,
1345 BP_VFDB(bp)->sp_dma.size);
1347 BNX2X_PCI_FREE(BP_VF_MBX_DMA(bp)->addr,
1348 BP_VF_MBX_DMA(bp)->mapping,
1349 BP_VF_MBX_DMA(bp)->size);
1351 BNX2X_PCI_FREE(BP_VF_BULLETIN_DMA(bp)->addr,
1352 BP_VF_BULLETIN_DMA(bp)->mapping,
1353 BP_VF_BULLETIN_DMA(bp)->size);
1356 int bnx2x_iov_alloc_mem(struct bnx2x *bp)
1358 size_t tot_size;
1359 int i, rc = 0;
1361 if (!IS_SRIOV(bp))
1362 return rc;
1364 /* allocate vfs hw contexts */
1365 tot_size = (BP_VFDB(bp)->sriov.first_vf_in_pf + BNX2X_NR_VIRTFN(bp)) *
1366 BNX2X_CIDS_PER_VF * sizeof(union cdu_context);
1368 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1369 struct hw_dma *cxt = BP_VF_CXT_PAGE(bp, i);
1370 cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
1372 if (cxt->size) {
1373 cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
1374 if (!cxt->addr)
1375 goto alloc_mem_err;
1376 } else {
1377 cxt->addr = NULL;
1378 cxt->mapping = 0;
1380 tot_size -= cxt->size;
1383 /* allocate vfs ramrods dma memory - client_init and set_mac */
1384 tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
1385 BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
1386 tot_size);
1387 if (!BP_VFDB(bp)->sp_dma.addr)
1388 goto alloc_mem_err;
1389 BP_VFDB(bp)->sp_dma.size = tot_size;
1391 /* allocate mailboxes */
1392 tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
1393 BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
1394 tot_size);
1395 if (!BP_VF_MBX_DMA(bp)->addr)
1396 goto alloc_mem_err;
1398 BP_VF_MBX_DMA(bp)->size = tot_size;
1400 /* allocate local bulletin boards */
1401 tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
1402 BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
1403 tot_size);
1404 if (!BP_VF_BULLETIN_DMA(bp)->addr)
1405 goto alloc_mem_err;
1407 BP_VF_BULLETIN_DMA(bp)->size = tot_size;
1409 return 0;
1411 alloc_mem_err:
1412 return -ENOMEM;
1415 static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
1416 struct bnx2x_vf_queue *q)
1418 u8 cl_id = vfq_cl_id(vf, q);
1419 u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
1420 unsigned long q_type = 0;
1422 set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
1423 set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
1425 /* Queue State object */
1426 bnx2x_init_queue_obj(bp, &q->sp_obj,
1427 cl_id, &q->cid, 1, func_id,
1428 bnx2x_vf_sp(bp, vf, q_data),
1429 bnx2x_vf_sp_map(bp, vf, q_data),
1430 q_type);
1432 /* sp indication is set only when vlan/mac/etc. are initialized */
1433 q->sp_initialized = false;
1435 DP(BNX2X_MSG_IOV,
1436 "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
1437 vf->abs_vfid, q->sp_obj.func_id, q->cid);
1440 static int bnx2x_max_speed_cap(struct bnx2x *bp)
1442 u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)];
1444 if (supported &
1445 (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full))
1446 return 20000;
1448 return 10000; /* assume lowest supported speed is 10G */
1451 int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx)
1453 struct bnx2x_link_report_data *state = &bp->last_reported_link;
1454 struct pf_vf_bulletin_content *bulletin;
1455 struct bnx2x_virtf *vf;
1456 bool update = true;
1457 int rc = 0;
1459 /* sanity and init */
1460 rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false);
1461 if (rc)
1462 return rc;
1464 mutex_lock(&bp->vfdb->bulletin_mutex);
1466 if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) {
1467 bulletin->valid_bitmap |= 1 << LINK_VALID;
1469 bulletin->link_speed = state->line_speed;
1470 bulletin->link_flags = 0;
1471 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1472 &state->link_report_flags))
1473 bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1474 if (test_bit(BNX2X_LINK_REPORT_FD,
1475 &state->link_report_flags))
1476 bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX;
1477 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1478 &state->link_report_flags))
1479 bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON;
1480 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1481 &state->link_report_flags))
1482 bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON;
1483 } else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE &&
1484 !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1485 bulletin->valid_bitmap |= 1 << LINK_VALID;
1486 bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
1487 } else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE &&
1488 (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
1489 bulletin->valid_bitmap |= 1 << LINK_VALID;
1490 bulletin->link_speed = bnx2x_max_speed_cap(bp);
1491 bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN;
1492 } else {
1493 update = false;
1496 if (update) {
1497 DP(NETIF_MSG_LINK | BNX2X_MSG_IOV,
1498 "vf %d mode %u speed %d flags %x\n", idx,
1499 vf->link_cfg, bulletin->link_speed, bulletin->link_flags);
1501 /* Post update on VF's bulletin board */
1502 rc = bnx2x_post_vf_bulletin(bp, idx);
1503 if (rc) {
1504 BNX2X_ERR("failed to update VF[%d] bulletin\n", idx);
1505 goto out;
1509 out:
1510 mutex_unlock(&bp->vfdb->bulletin_mutex);
1511 return rc;
1514 int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state)
1516 struct bnx2x *bp = netdev_priv(dev);
1517 struct bnx2x_virtf *vf = BP_VF(bp, idx);
1519 if (!vf)
1520 return -EINVAL;
1522 if (vf->link_cfg == link_state)
1523 return 0; /* nothing todo */
1525 vf->link_cfg = link_state;
1527 return bnx2x_iov_link_update_vf(bp, idx);
1530 void bnx2x_iov_link_update(struct bnx2x *bp)
1532 int vfid;
1534 if (!IS_SRIOV(bp))
1535 return;
1537 for_each_vf(bp, vfid)
1538 bnx2x_iov_link_update_vf(bp, vfid);
1541 /* called by bnx2x_nic_load */
1542 int bnx2x_iov_nic_init(struct bnx2x *bp)
1544 int vfid;
1546 if (!IS_SRIOV(bp)) {
1547 DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
1548 return 0;
1551 DP(BNX2X_MSG_IOV, "num of vfs: %d\n", (bp)->vfdb->sriov.nr_virtfn);
1553 /* let FLR complete ... */
1554 msleep(100);
1556 /* initialize vf database */
1557 for_each_vf(bp, vfid) {
1558 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1560 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vfid) *
1561 BNX2X_CIDS_PER_VF;
1563 union cdu_context *base_cxt = (union cdu_context *)
1564 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
1565 (base_vf_cid & (ILT_PAGE_CIDS-1));
1567 DP(BNX2X_MSG_IOV,
1568 "VF[%d] Max IGU SBs: %d, base vf cid 0x%x, base cid 0x%x, base cxt %p\n",
1569 vf->abs_vfid, vf_sb_count(vf), base_vf_cid,
1570 BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
1572 /* init statically provisioned resources */
1573 bnx2x_iov_static_resc(bp, vf);
1575 /* queues are initialized during VF-ACQUIRE */
1576 vf->filter_state = 0;
1577 vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
1579 bnx2x_init_credit_pool(&vf->vf_vlans_pool, 0,
1580 vf_vlan_rules_cnt(vf));
1581 bnx2x_init_credit_pool(&vf->vf_macs_pool, 0,
1582 vf_mac_rules_cnt(vf));
1584 /* init mcast object - This object will be re-initialized
1585 * during VF-ACQUIRE with the proper cl_id and cid.
1586 * It needs to be initialized here so that it can be safely
1587 * handled by a subsequent FLR flow.
1589 bnx2x_init_mcast_obj(bp, &vf->mcast_obj, 0xFF,
1590 0xFF, 0xFF, 0xFF,
1591 bnx2x_vf_sp(bp, vf, mcast_rdata),
1592 bnx2x_vf_sp_map(bp, vf, mcast_rdata),
1593 BNX2X_FILTER_MCAST_PENDING,
1594 &vf->filter_state,
1595 BNX2X_OBJ_TYPE_RX_TX);
1597 /* set the mailbox message addresses */
1598 BP_VF_MBX(bp, vfid)->msg = (struct bnx2x_vf_mbx_msg *)
1599 (((u8 *)BP_VF_MBX_DMA(bp)->addr) + vfid *
1600 MBX_MSG_ALIGNED_SIZE);
1602 BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping +
1603 vfid * MBX_MSG_ALIGNED_SIZE;
1605 /* Enable vf mailbox */
1606 bnx2x_vf_enable_mbx(bp, vf->abs_vfid);
1609 /* Final VF init */
1610 for_each_vf(bp, vfid) {
1611 struct bnx2x_virtf *vf = BP_VF(bp, vfid);
1613 /* fill in the BDF and bars */
1614 vf->bus = bnx2x_vf_bus(bp, vfid);
1615 vf->devfn = bnx2x_vf_devfn(bp, vfid);
1616 bnx2x_vf_set_bars(bp, vf);
1618 DP(BNX2X_MSG_IOV,
1619 "VF info[%d]: bus 0x%x, devfn 0x%x, bar0 [0x%x, %d], bar1 [0x%x, %d], bar2 [0x%x, %d]\n",
1620 vf->abs_vfid, vf->bus, vf->devfn,
1621 (unsigned)vf->bars[0].bar, vf->bars[0].size,
1622 (unsigned)vf->bars[1].bar, vf->bars[1].size,
1623 (unsigned)vf->bars[2].bar, vf->bars[2].size);
1626 return 0;
1629 /* called by bnx2x_chip_cleanup */
1630 int bnx2x_iov_chip_cleanup(struct bnx2x *bp)
1632 int i;
1634 if (!IS_SRIOV(bp))
1635 return 0;
1637 /* release all the VFs */
1638 for_each_vf(bp, i)
1639 bnx2x_vf_release(bp, BP_VF(bp, i));
1641 return 0;
1644 /* called by bnx2x_init_hw_func, returns the next ilt line */
1645 int bnx2x_iov_init_ilt(struct bnx2x *bp, u16 line)
1647 int i;
1648 struct bnx2x_ilt *ilt = BP_ILT(bp);
1650 if (!IS_SRIOV(bp))
1651 return line;
1653 /* set vfs ilt lines */
1654 for (i = 0; i < BNX2X_VF_CIDS/ILT_PAGE_CIDS; i++) {
1655 struct hw_dma *hw_cxt = BP_VF_CXT_PAGE(bp, i);
1657 ilt->lines[line+i].page = hw_cxt->addr;
1658 ilt->lines[line+i].page_mapping = hw_cxt->mapping;
1659 ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */
1661 return line + i;
1664 static u8 bnx2x_iov_is_vf_cid(struct bnx2x *bp, u16 cid)
1666 return ((cid >= BNX2X_FIRST_VF_CID) &&
1667 ((cid - BNX2X_FIRST_VF_CID) < BNX2X_VF_CIDS));
1670 static
1671 void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1672 struct bnx2x_vf_queue *vfq,
1673 union event_ring_elem *elem)
1675 unsigned long ramrod_flags = 0;
1676 int rc = 0;
1677 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
1679 /* Always push next commands out, don't wait here */
1680 set_bit(RAMROD_CONT, &ramrod_flags);
1682 switch (echo >> BNX2X_SWCID_SHIFT) {
1683 case BNX2X_FILTER_MAC_PENDING:
1684 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
1685 &ramrod_flags);
1686 break;
1687 case BNX2X_FILTER_VLAN_PENDING:
1688 rc = vfq->vlan_obj.complete(bp, &vfq->vlan_obj, elem,
1689 &ramrod_flags);
1690 break;
1691 default:
1692 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
1693 return;
1695 if (rc < 0)
1696 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
1697 else if (rc > 0)
1698 DP(BNX2X_MSG_IOV, "Scheduled next pending commands...\n");
1701 static
1702 void bnx2x_vf_handle_mcast_eqe(struct bnx2x *bp,
1703 struct bnx2x_virtf *vf)
1705 struct bnx2x_mcast_ramrod_params rparam = {NULL};
1706 int rc;
1708 rparam.mcast_obj = &vf->mcast_obj;
1709 vf->mcast_obj.raw.clear_pending(&vf->mcast_obj.raw);
1711 /* If there are pending mcast commands - send them */
1712 if (vf->mcast_obj.check_pending(&vf->mcast_obj)) {
1713 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1714 if (rc < 0)
1715 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
1716 rc);
1720 static
1721 void bnx2x_vf_handle_filters_eqe(struct bnx2x *bp,
1722 struct bnx2x_virtf *vf)
1724 smp_mb__before_atomic();
1725 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
1726 smp_mb__after_atomic();
1729 static void bnx2x_vf_handle_rss_update_eqe(struct bnx2x *bp,
1730 struct bnx2x_virtf *vf)
1732 vf->rss_conf_obj.raw.clear_pending(&vf->rss_conf_obj.raw);
1735 int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
1737 struct bnx2x_virtf *vf;
1738 int qidx = 0, abs_vfid;
1739 u8 opcode;
1740 u16 cid = 0xffff;
1742 if (!IS_SRIOV(bp))
1743 return 1;
1745 /* first get the cid - the only events we handle here are cfc-delete
1746 * and set-mac completion
1748 opcode = elem->message.opcode;
1750 switch (opcode) {
1751 case EVENT_RING_OPCODE_CFC_DEL:
1752 cid = SW_CID(elem->message.data.cfc_del_event.cid);
1753 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
1754 break;
1755 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1756 case EVENT_RING_OPCODE_MULTICAST_RULES:
1757 case EVENT_RING_OPCODE_FILTERS_RULES:
1758 case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1759 cid = SW_CID(elem->message.data.eth_event.echo);
1760 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
1761 break;
1762 case EVENT_RING_OPCODE_VF_FLR:
1763 abs_vfid = elem->message.data.vf_flr_event.vf_id;
1764 DP(BNX2X_MSG_IOV, "Got VF FLR notification abs_vfid=%d\n",
1765 abs_vfid);
1766 goto get_vf;
1767 case EVENT_RING_OPCODE_MALICIOUS_VF:
1768 abs_vfid = elem->message.data.malicious_vf_event.vf_id;
1769 BNX2X_ERR("Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
1770 abs_vfid,
1771 elem->message.data.malicious_vf_event.err_id);
1772 goto get_vf;
1773 default:
1774 return 1;
1777 /* check if the cid is the VF range */
1778 if (!bnx2x_iov_is_vf_cid(bp, cid)) {
1779 DP(BNX2X_MSG_IOV, "cid is outside vf range: %d\n", cid);
1780 return 1;
1783 /* extract vf and rxq index from vf_cid - relies on the following:
1784 * 1. vfid on cid reflects the true abs_vfid
1785 * 2. The max number of VFs (per path) is 64
1787 qidx = cid & ((1 << BNX2X_VF_CID_WND)-1);
1788 abs_vfid = (cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1789 get_vf:
1790 vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
1792 if (!vf) {
1793 BNX2X_ERR("EQ completion for unknown VF, cid %d, abs_vfid %d\n",
1794 cid, abs_vfid);
1795 return 0;
1798 switch (opcode) {
1799 case EVENT_RING_OPCODE_CFC_DEL:
1800 DP(BNX2X_MSG_IOV, "got VF [%d:%d] cfc delete ramrod\n",
1801 vf->abs_vfid, qidx);
1802 vfq_get(vf, qidx)->sp_obj.complete_cmd(bp,
1803 &vfq_get(vf,
1804 qidx)->sp_obj,
1805 BNX2X_Q_CMD_CFC_DEL);
1806 break;
1807 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1808 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mac/vlan ramrod\n",
1809 vf->abs_vfid, qidx);
1810 bnx2x_vf_handle_classification_eqe(bp, vfq_get(vf, qidx), elem);
1811 break;
1812 case EVENT_RING_OPCODE_MULTICAST_RULES:
1813 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set mcast ramrod\n",
1814 vf->abs_vfid, qidx);
1815 bnx2x_vf_handle_mcast_eqe(bp, vf);
1816 break;
1817 case EVENT_RING_OPCODE_FILTERS_RULES:
1818 DP(BNX2X_MSG_IOV, "got VF [%d:%d] set rx-mode ramrod\n",
1819 vf->abs_vfid, qidx);
1820 bnx2x_vf_handle_filters_eqe(bp, vf);
1821 break;
1822 case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1823 DP(BNX2X_MSG_IOV, "got VF [%d:%d] RSS update ramrod\n",
1824 vf->abs_vfid, qidx);
1825 bnx2x_vf_handle_rss_update_eqe(bp, vf);
1826 case EVENT_RING_OPCODE_VF_FLR:
1827 /* Do nothing for now */
1828 return 0;
1829 case EVENT_RING_OPCODE_MALICIOUS_VF:
1830 vf->malicious = true;
1831 return 0;
1834 return 0;
1837 static struct bnx2x_virtf *bnx2x_vf_by_cid(struct bnx2x *bp, int vf_cid)
1839 /* extract the vf from vf_cid - relies on the following:
1840 * 1. vfid on cid reflects the true abs_vfid
1841 * 2. The max number of VFs (per path) is 64
1843 int abs_vfid = (vf_cid >> BNX2X_VF_CID_WND) & (BNX2X_MAX_NUM_OF_VFS-1);
1844 return bnx2x_vf_by_abs_fid(bp, abs_vfid);
1847 void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
1848 struct bnx2x_queue_sp_obj **q_obj)
1850 struct bnx2x_virtf *vf;
1852 if (!IS_SRIOV(bp))
1853 return;
1855 vf = bnx2x_vf_by_cid(bp, vf_cid);
1857 if (vf) {
1858 /* extract queue index from vf_cid - relies on the following:
1859 * 1. vfid on cid reflects the true abs_vfid
1860 * 2. The max number of VFs (per path) is 64
1862 int q_index = vf_cid & ((1 << BNX2X_VF_CID_WND)-1);
1863 *q_obj = &bnx2x_vfq(vf, q_index, sp_obj);
1864 } else {
1865 BNX2X_ERR("No vf matching cid %d\n", vf_cid);
1869 void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
1871 int i;
1872 int first_queue_query_index, num_queues_req;
1873 dma_addr_t cur_data_offset;
1874 struct stats_query_entry *cur_query_entry;
1875 u8 stats_count = 0;
1876 bool is_fcoe = false;
1878 if (!IS_SRIOV(bp))
1879 return;
1881 if (!NO_FCOE(bp))
1882 is_fcoe = true;
1884 /* fcoe adds one global request and one queue request */
1885 num_queues_req = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe;
1886 first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
1887 (is_fcoe ? 0 : 1);
1889 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1890 "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
1891 BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
1892 first_queue_query_index + num_queues_req);
1894 cur_data_offset = bp->fw_stats_data_mapping +
1895 offsetof(struct bnx2x_fw_stats_data, queue_stats) +
1896 num_queues_req * sizeof(struct per_queue_stats);
1898 cur_query_entry = &bp->fw_stats_req->
1899 query[first_queue_query_index + num_queues_req];
1901 for_each_vf(bp, i) {
1902 int j;
1903 struct bnx2x_virtf *vf = BP_VF(bp, i);
1905 if (vf->state != VF_ENABLED) {
1906 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1907 "vf %d not enabled so no stats for it\n",
1908 vf->abs_vfid);
1909 continue;
1912 if (vf->malicious) {
1913 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1914 "vf %d malicious so no stats for it\n",
1915 vf->abs_vfid);
1916 continue;
1919 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1920 "add addresses for vf %d\n", vf->abs_vfid);
1921 for_each_vfq(vf, j) {
1922 struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
1924 dma_addr_t q_stats_addr =
1925 vf->fw_stat_map + j * vf->stats_stride;
1927 /* collect stats fro active queues only */
1928 if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
1929 BNX2X_Q_LOGICAL_STATE_STOPPED)
1930 continue;
1932 /* create stats query entry for this queue */
1933 cur_query_entry->kind = STATS_TYPE_QUEUE;
1934 cur_query_entry->index = vfq_stat_id(vf, rxq);
1935 cur_query_entry->funcID =
1936 cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
1937 cur_query_entry->address.hi =
1938 cpu_to_le32(U64_HI(q_stats_addr));
1939 cur_query_entry->address.lo =
1940 cpu_to_le32(U64_LO(q_stats_addr));
1941 DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
1942 "added address %x %x for vf %d queue %d client %d\n",
1943 cur_query_entry->address.hi,
1944 cur_query_entry->address.lo,
1945 cur_query_entry->funcID,
1946 j, cur_query_entry->index);
1947 cur_query_entry++;
1948 cur_data_offset += sizeof(struct per_queue_stats);
1949 stats_count++;
1951 /* all stats are coalesced to the leading queue */
1952 if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
1953 break;
1956 bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
1959 /* VF API helpers */
1960 static void bnx2x_vf_qtbl_set_q(struct bnx2x *bp, u8 abs_vfid, u8 qid,
1961 u8 enable)
1963 u32 reg = PXP_REG_HST_ZONE_PERMISSION_TABLE + qid * 4;
1964 u32 val = enable ? (abs_vfid | (1 << 6)) : 0;
1966 REG_WR(bp, reg, val);
1969 static void bnx2x_vf_clr_qtbl(struct bnx2x *bp, struct bnx2x_virtf *vf)
1971 int i;
1973 for_each_vfq(vf, i)
1974 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
1975 vfq_qzone_id(vf, vfq_get(vf, i)), false);
1978 static void bnx2x_vf_igu_disable(struct bnx2x *bp, struct bnx2x_virtf *vf)
1980 u32 val;
1982 /* clear the VF configuration - pretend */
1983 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf->abs_vfid));
1984 val = REG_RD(bp, IGU_REG_VF_CONFIGURATION);
1985 val &= ~(IGU_VF_CONF_MSI_MSIX_EN | IGU_VF_CONF_SINGLE_ISR_EN |
1986 IGU_VF_CONF_FUNC_EN | IGU_VF_CONF_PARENT_MASK);
1987 REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
1988 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
1991 u8 bnx2x_vf_max_queue_cnt(struct bnx2x *bp, struct bnx2x_virtf *vf)
1993 return min_t(u8, min_t(u8, vf_sb_count(vf), BNX2X_CIDS_PER_VF),
1994 BNX2X_VF_MAX_QUEUES);
1997 static
1998 int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
1999 struct vf_pf_resc_request *req_resc)
2001 u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2002 u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
2004 return ((req_resc->num_rxqs <= rxq_cnt) &&
2005 (req_resc->num_txqs <= txq_cnt) &&
2006 (req_resc->num_sbs <= vf_sb_count(vf)) &&
2007 (req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
2008 (req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
2011 /* CORE VF API */
2012 int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
2013 struct vf_pf_resc_request *resc)
2015 int base_vf_cid = (BP_VFDB(bp)->sriov.first_vf_in_pf + vf->index) *
2016 BNX2X_CIDS_PER_VF;
2018 union cdu_context *base_cxt = (union cdu_context *)
2019 BP_VF_CXT_PAGE(bp, base_vf_cid/ILT_PAGE_CIDS)->addr +
2020 (base_vf_cid & (ILT_PAGE_CIDS-1));
2021 int i;
2023 /* if state is 'acquired' the VF was not released or FLR'd, in
2024 * this case the returned resources match the acquired already
2025 * acquired resources. Verify that the requested numbers do
2026 * not exceed the already acquired numbers.
2028 if (vf->state == VF_ACQUIRED) {
2029 DP(BNX2X_MSG_IOV, "VF[%d] Trying to re-acquire resources (VF was not released or FLR'd)\n",
2030 vf->abs_vfid);
2032 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2033 BNX2X_ERR("VF[%d] When re-acquiring resources, requested numbers must be <= then previously acquired numbers\n",
2034 vf->abs_vfid);
2035 return -EINVAL;
2037 return 0;
2040 /* Otherwise vf state must be 'free' or 'reset' */
2041 if (vf->state != VF_FREE && vf->state != VF_RESET) {
2042 BNX2X_ERR("VF[%d] Can not acquire a VF with state %d\n",
2043 vf->abs_vfid, vf->state);
2044 return -EINVAL;
2047 /* static allocation:
2048 * the global maximum number are fixed per VF. Fail the request if
2049 * requested number exceed these globals
2051 if (!bnx2x_vf_chk_avail_resc(bp, vf, resc)) {
2052 DP(BNX2X_MSG_IOV,
2053 "cannot fulfill vf resource request. Placing maximal available values in response\n");
2054 /* set the max resource in the vf */
2055 return -ENOMEM;
2058 /* Set resources counters - 0 request means max available */
2059 vf_sb_count(vf) = resc->num_sbs;
2060 vf_rxq_count(vf) = resc->num_rxqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2061 vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
2063 DP(BNX2X_MSG_IOV,
2064 "Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
2065 vf_sb_count(vf), vf_rxq_count(vf),
2066 vf_txq_count(vf), vf_mac_rules_cnt(vf),
2067 vf_vlan_rules_cnt(vf));
2069 /* Initialize the queues */
2070 if (!vf->vfqs) {
2071 DP(BNX2X_MSG_IOV, "vf->vfqs was not allocated\n");
2072 return -EINVAL;
2075 for_each_vfq(vf, i) {
2076 struct bnx2x_vf_queue *q = vfq_get(vf, i);
2078 if (!q) {
2079 BNX2X_ERR("q number %d was not allocated\n", i);
2080 return -EINVAL;
2083 q->index = i;
2084 q->cxt = &((base_cxt + i)->eth);
2085 q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i;
2087 DP(BNX2X_MSG_IOV, "VFQ[%d:%d]: index %d, cid 0x%x, cxt %p\n",
2088 vf->abs_vfid, i, q->index, q->cid, q->cxt);
2090 /* init SP objects */
2091 bnx2x_vfq_init(bp, vf, q);
2093 vf->state = VF_ACQUIRED;
2094 return 0;
2097 int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
2099 struct bnx2x_func_init_params func_init = {0};
2100 int i;
2102 /* the sb resources are initialized at this point, do the
2103 * FW/HW initializations
2105 for_each_vf_sb(vf, i)
2106 bnx2x_init_sb(bp, (dma_addr_t)sb_map[i], vf->abs_vfid, true,
2107 vf_igu_sb(vf, i), vf_igu_sb(vf, i));
2109 /* Sanity checks */
2110 if (vf->state != VF_ACQUIRED) {
2111 DP(BNX2X_MSG_IOV, "VF[%d] is not in VF_ACQUIRED, but %d\n",
2112 vf->abs_vfid, vf->state);
2113 return -EINVAL;
2116 /* let FLR complete ... */
2117 msleep(100);
2119 /* FLR cleanup epilogue */
2120 if (bnx2x_vf_flr_clnup_epilog(bp, vf->abs_vfid))
2121 return -EBUSY;
2123 /* reset IGU VF statistics: MSIX */
2124 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT + vf->abs_vfid * 4 , 0);
2126 /* function setup */
2127 func_init.pf_id = BP_FUNC(bp);
2128 func_init.func_id = FW_VF_HANDLE(vf->abs_vfid);
2129 bnx2x_func_init(bp, &func_init);
2131 /* Enable the vf */
2132 bnx2x_vf_enable_access(bp, vf->abs_vfid);
2133 bnx2x_vf_enable_traffic(bp, vf);
2135 /* queue protection table */
2136 for_each_vfq(vf, i)
2137 bnx2x_vf_qtbl_set_q(bp, vf->abs_vfid,
2138 vfq_qzone_id(vf, vfq_get(vf, i)), true);
2140 vf->state = VF_ENABLED;
2142 /* update vf bulletin board */
2143 bnx2x_post_vf_bulletin(bp, vf->index);
2145 return 0;
2148 struct set_vf_state_cookie {
2149 struct bnx2x_virtf *vf;
2150 u8 state;
2153 static void bnx2x_set_vf_state(void *cookie)
2155 struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
2157 p->vf->state = p->state;
2160 int bnx2x_vf_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
2162 int rc = 0, i;
2164 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2166 /* Close all queues */
2167 for (i = 0; i < vf_rxq_count(vf); i++) {
2168 rc = bnx2x_vf_queue_teardown(bp, vf, i);
2169 if (rc)
2170 goto op_err;
2173 /* disable the interrupts */
2174 DP(BNX2X_MSG_IOV, "disabling igu\n");
2175 bnx2x_vf_igu_disable(bp, vf);
2177 /* disable the VF */
2178 DP(BNX2X_MSG_IOV, "clearing qtbl\n");
2179 bnx2x_vf_clr_qtbl(bp, vf);
2181 /* need to make sure there are no outstanding stats ramrods which may
2182 * cause the device to access the VF's stats buffer which it will free
2183 * as soon as we return from the close flow.
2186 struct set_vf_state_cookie cookie;
2188 cookie.vf = vf;
2189 cookie.state = VF_ACQUIRED;
2190 rc = bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
2191 if (rc)
2192 goto op_err;
2195 DP(BNX2X_MSG_IOV, "set state to acquired\n");
2197 return 0;
2198 op_err:
2199 BNX2X_ERR("vf[%d] CLOSE error: rc %d\n", vf->abs_vfid, rc);
2200 return rc;
2203 /* VF release can be called either: 1. The VF was acquired but
2204 * not enabled 2. the vf was enabled or in the process of being
2205 * enabled
2207 int bnx2x_vf_free(struct bnx2x *bp, struct bnx2x_virtf *vf)
2209 int rc;
2211 DP(BNX2X_MSG_IOV, "VF[%d] STATE: %s\n", vf->abs_vfid,
2212 vf->state == VF_FREE ? "Free" :
2213 vf->state == VF_ACQUIRED ? "Acquired" :
2214 vf->state == VF_ENABLED ? "Enabled" :
2215 vf->state == VF_RESET ? "Reset" :
2216 "Unknown");
2218 switch (vf->state) {
2219 case VF_ENABLED:
2220 rc = bnx2x_vf_close(bp, vf);
2221 if (rc)
2222 goto op_err;
2223 /* Fallthrough to release resources */
2224 case VF_ACQUIRED:
2225 DP(BNX2X_MSG_IOV, "about to free resources\n");
2226 bnx2x_vf_free_resc(bp, vf);
2227 break;
2229 case VF_FREE:
2230 case VF_RESET:
2231 default:
2232 break;
2234 return 0;
2235 op_err:
2236 BNX2X_ERR("VF[%d] RELEASE error: rc %d\n", vf->abs_vfid, rc);
2237 return rc;
2240 int bnx2x_vf_rss_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2241 struct bnx2x_config_rss_params *rss)
2243 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2244 set_bit(RAMROD_COMP_WAIT, &rss->ramrod_flags);
2245 return bnx2x_config_rss(bp, rss);
2248 int bnx2x_vf_tpa_update(struct bnx2x *bp, struct bnx2x_virtf *vf,
2249 struct vfpf_tpa_tlv *tlv,
2250 struct bnx2x_queue_update_tpa_params *params)
2252 aligned_u64 *sge_addr = tlv->tpa_client_info.sge_addr;
2253 struct bnx2x_queue_state_params qstate;
2254 int qid, rc = 0;
2256 DP(BNX2X_MSG_IOV, "vf[%d]\n", vf->abs_vfid);
2258 /* Set ramrod params */
2259 memset(&qstate, 0, sizeof(struct bnx2x_queue_state_params));
2260 memcpy(&qstate.params.update_tpa, params,
2261 sizeof(struct bnx2x_queue_update_tpa_params));
2262 qstate.cmd = BNX2X_Q_CMD_UPDATE_TPA;
2263 set_bit(RAMROD_COMP_WAIT, &qstate.ramrod_flags);
2265 for (qid = 0; qid < vf_rxq_count(vf); qid++) {
2266 qstate.q_obj = &bnx2x_vfq(vf, qid, sp_obj);
2267 qstate.params.update_tpa.sge_map = sge_addr[qid];
2268 DP(BNX2X_MSG_IOV, "sge_addr[%d:%d] %08x:%08x\n",
2269 vf->abs_vfid, qid, U64_HI(sge_addr[qid]),
2270 U64_LO(sge_addr[qid]));
2271 rc = bnx2x_queue_state_change(bp, &qstate);
2272 if (rc) {
2273 BNX2X_ERR("Failed to configure sge_addr %08x:%08x for [%d:%d]\n",
2274 U64_HI(sge_addr[qid]), U64_LO(sge_addr[qid]),
2275 vf->abs_vfid, qid);
2276 return rc;
2280 return rc;
2283 /* VF release ~ VF close + VF release-resources
2284 * Release is the ultimate SW shutdown and is called whenever an
2285 * irrecoverable error is encountered.
2287 int bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf)
2289 int rc;
2291 DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
2292 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2294 rc = bnx2x_vf_free(bp, vf);
2295 if (rc)
2296 WARN(rc,
2297 "VF[%d] Failed to allocate resources for release op- rc=%d\n",
2298 vf->abs_vfid, rc);
2299 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
2300 return rc;
2303 void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2304 enum channel_tlvs tlv)
2306 /* we don't lock the channel for unsupported tlvs */
2307 if (!bnx2x_tlv_supported(tlv)) {
2308 BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
2309 return;
2312 /* lock the channel */
2313 mutex_lock(&vf->op_mutex);
2315 /* record the locking op */
2316 vf->op_current = tlv;
2318 /* log the lock */
2319 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel locked by %d\n",
2320 vf->abs_vfid, tlv);
2323 void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
2324 enum channel_tlvs expected_tlv)
2326 enum channel_tlvs current_tlv;
2328 if (!vf) {
2329 BNX2X_ERR("VF was %p\n", vf);
2330 return;
2333 current_tlv = vf->op_current;
2335 /* we don't unlock the channel for unsupported tlvs */
2336 if (!bnx2x_tlv_supported(expected_tlv))
2337 return;
2339 WARN(expected_tlv != vf->op_current,
2340 "lock mismatch: expected %d found %d", expected_tlv,
2341 vf->op_current);
2343 /* record the locking op */
2344 vf->op_current = CHANNEL_TLV_NONE;
2346 /* lock the channel */
2347 mutex_unlock(&vf->op_mutex);
2349 /* log the unlock */
2350 DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
2351 vf->abs_vfid, current_tlv);
2354 static int bnx2x_set_pf_tx_switching(struct bnx2x *bp, bool enable)
2356 struct bnx2x_queue_state_params q_params;
2357 u32 prev_flags;
2358 int i, rc;
2360 /* Verify changes are needed and record current Tx switching state */
2361 prev_flags = bp->flags;
2362 if (enable)
2363 bp->flags |= TX_SWITCHING;
2364 else
2365 bp->flags &= ~TX_SWITCHING;
2366 if (prev_flags == bp->flags)
2367 return 0;
2369 /* Verify state enables the sending of queue ramrods */
2370 if ((bp->state != BNX2X_STATE_OPEN) ||
2371 (bnx2x_get_q_logical_state(bp,
2372 &bnx2x_sp_obj(bp, &bp->fp[0]).q_obj) !=
2373 BNX2X_Q_LOGICAL_STATE_ACTIVE))
2374 return 0;
2376 /* send q. update ramrod to configure Tx switching */
2377 memset(&q_params, 0, sizeof(q_params));
2378 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2379 q_params.cmd = BNX2X_Q_CMD_UPDATE;
2380 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING_CHNG,
2381 &q_params.params.update.update_flags);
2382 if (enable)
2383 __set_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2384 &q_params.params.update.update_flags);
2385 else
2386 __clear_bit(BNX2X_Q_UPDATE_TX_SWITCHING,
2387 &q_params.params.update.update_flags);
2389 /* send the ramrod on all the queues of the PF */
2390 for_each_eth_queue(bp, i) {
2391 struct bnx2x_fastpath *fp = &bp->fp[i];
2393 /* Set the appropriate Queue object */
2394 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
2396 /* Update the Queue state */
2397 rc = bnx2x_queue_state_change(bp, &q_params);
2398 if (rc) {
2399 BNX2X_ERR("Failed to configure Tx switching\n");
2400 return rc;
2404 DP(BNX2X_MSG_IOV, "%s Tx Switching\n", enable ? "Enabled" : "Disabled");
2405 return 0;
2408 int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
2410 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
2412 if (!IS_SRIOV(bp)) {
2413 BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
2414 return -EINVAL;
2417 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
2418 num_vfs_param, BNX2X_NR_VIRTFN(bp));
2420 /* HW channel is only operational when PF is up */
2421 if (bp->state != BNX2X_STATE_OPEN) {
2422 BNX2X_ERR("VF num configuration via sysfs not supported while PF is down\n");
2423 return -EINVAL;
2426 /* we are always bound by the total_vfs in the configuration space */
2427 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
2428 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
2429 num_vfs_param, BNX2X_NR_VIRTFN(bp));
2430 num_vfs_param = BNX2X_NR_VIRTFN(bp);
2433 bp->requested_nr_virtfn = num_vfs_param;
2434 if (num_vfs_param == 0) {
2435 bnx2x_set_pf_tx_switching(bp, false);
2436 bnx2x_disable_sriov(bp);
2437 return 0;
2438 } else {
2439 return bnx2x_enable_sriov(bp);
2443 #define IGU_ENTRY_SIZE 4
2445 int bnx2x_enable_sriov(struct bnx2x *bp)
2447 int rc = 0, req_vfs = bp->requested_nr_virtfn;
2448 int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
2449 u32 igu_entry, address;
2450 u16 num_vf_queues;
2452 if (req_vfs == 0)
2453 return 0;
2455 first_vf = bp->vfdb->sriov.first_vf_in_pf;
2457 /* statically distribute vf sb pool between VFs */
2458 num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
2459 BP_VFDB(bp)->vf_sbs_pool / req_vfs);
2461 /* zero previous values learned from igu cam */
2462 for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
2463 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2465 vf->sb_count = 0;
2466 vf_sb_count(BP_VF(bp, vf_idx)) = 0;
2468 bp->vfdb->vf_sbs_pool = 0;
2470 /* prepare IGU cam */
2471 sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
2472 address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
2473 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2474 for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
2475 igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
2476 vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
2477 IGU_REG_MAPPING_MEMORY_VALID;
2478 DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
2479 sb_idx, vf_idx);
2480 REG_WR(bp, address, igu_entry);
2481 sb_idx++;
2482 address += IGU_ENTRY_SIZE;
2486 /* Reinitialize vf database according to igu cam */
2487 bnx2x_get_vf_igu_cam_info(bp);
2489 DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
2490 BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
2492 qcount = 0;
2493 for_each_vf(bp, vf_idx) {
2494 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
2496 /* set local queue arrays */
2497 vf->vfqs = &bp->vfdb->vfqs[qcount];
2498 qcount += vf_sb_count(vf);
2499 bnx2x_iov_static_resc(bp, vf);
2502 /* prepare msix vectors in VF configuration space - the value in the
2503 * PCI configuration space should be the index of the last entry,
2504 * namely one less than the actual size of the table
2506 for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
2507 bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
2508 REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
2509 num_vf_queues - 1);
2510 DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
2511 vf_idx, num_vf_queues - 1);
2513 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
2515 /* enable sriov. This will probe all the VFs, and consequentially cause
2516 * the "acquire" messages to appear on the VF PF channel.
2518 DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
2519 bnx2x_disable_sriov(bp);
2521 rc = bnx2x_set_pf_tx_switching(bp, true);
2522 if (rc)
2523 return rc;
2525 rc = pci_enable_sriov(bp->pdev, req_vfs);
2526 if (rc) {
2527 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
2528 return rc;
2530 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
2531 return req_vfs;
2534 void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
2536 int vfidx;
2537 struct pf_vf_bulletin_content *bulletin;
2539 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
2540 for_each_vf(bp, vfidx) {
2541 bulletin = BP_VF_BULLETIN(bp, vfidx);
2542 if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2543 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0,
2544 htons(ETH_P_8021Q));
2548 void bnx2x_disable_sriov(struct bnx2x *bp)
2550 if (pci_vfs_assigned(bp->pdev)) {
2551 DP(BNX2X_MSG_IOV,
2552 "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2553 return;
2556 pci_disable_sriov(bp->pdev);
2559 static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
2560 struct bnx2x_virtf **vf,
2561 struct pf_vf_bulletin_content **bulletin,
2562 bool test_queue)
2564 if (bp->state != BNX2X_STATE_OPEN) {
2565 BNX2X_ERR("PF is down - can't utilize iov-related functionality\n");
2566 return -EINVAL;
2569 if (!IS_SRIOV(bp)) {
2570 BNX2X_ERR("sriov is disabled - can't utilize iov-related functionality\n");
2571 return -EINVAL;
2574 if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
2575 BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
2576 vfidx, BNX2X_NR_VIRTFN(bp));
2577 return -EINVAL;
2580 /* init members */
2581 *vf = BP_VF(bp, vfidx);
2582 *bulletin = BP_VF_BULLETIN(bp, vfidx);
2584 if (!*vf) {
2585 BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx);
2586 return -EINVAL;
2589 if (test_queue && !(*vf)->vfqs) {
2590 BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n",
2591 vfidx);
2592 return -EINVAL;
2595 if (!*bulletin) {
2596 BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n",
2597 vfidx);
2598 return -EINVAL;
2601 return 0;
2604 int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
2605 struct ifla_vf_info *ivi)
2607 struct bnx2x *bp = netdev_priv(dev);
2608 struct bnx2x_virtf *vf = NULL;
2609 struct pf_vf_bulletin_content *bulletin = NULL;
2610 struct bnx2x_vlan_mac_obj *mac_obj;
2611 struct bnx2x_vlan_mac_obj *vlan_obj;
2612 int rc;
2614 /* sanity and init */
2615 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2616 if (rc)
2617 return rc;
2619 mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2620 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2621 if (!mac_obj || !vlan_obj) {
2622 BNX2X_ERR("VF partially initialized\n");
2623 return -EINVAL;
2626 ivi->vf = vfidx;
2627 ivi->qos = 0;
2628 ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
2629 ivi->min_tx_rate = 0;
2630 ivi->spoofchk = 1; /*always enabled */
2631 if (vf->state == VF_ENABLED) {
2632 /* mac and vlan are in vlan_mac objects */
2633 if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
2634 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
2635 0, ETH_ALEN);
2636 vlan_obj->get_n_elements(bp, vlan_obj, 1,
2637 (u8 *)&ivi->vlan, 0,
2638 VLAN_HLEN);
2640 } else {
2641 mutex_lock(&bp->vfdb->bulletin_mutex);
2642 /* mac */
2643 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
2644 /* mac configured by ndo so its in bulletin board */
2645 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
2646 else
2647 /* function has not been loaded yet. Show mac as 0s */
2648 eth_zero_addr(ivi->mac);
2650 /* vlan */
2651 if (bulletin->valid_bitmap & (1 << VLAN_VALID))
2652 /* vlan configured by ndo so its in bulletin board */
2653 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
2654 else
2655 /* function has not been loaded yet. Show vlans as 0s */
2656 memset(&ivi->vlan, 0, VLAN_HLEN);
2658 mutex_unlock(&bp->vfdb->bulletin_mutex);
2661 return 0;
2664 /* New mac for VF. Consider these cases:
2665 * 1. VF hasn't been acquired yet - save the mac in local bulletin board and
2666 * supply at acquire.
2667 * 2. VF has already been acquired but has not yet initialized - store in local
2668 * bulletin board. mac will be posted on VF bulletin board after VF init. VF
2669 * will configure this mac when it is ready.
2670 * 3. VF has already initialized but has not yet setup a queue - post the new
2671 * mac on VF's bulletin board right now. VF will configure this mac when it
2672 * is ready.
2673 * 4. VF has already set a queue - delete any macs already configured for this
2674 * queue and manually config the new mac.
2675 * In any event, once this function has been called refuse any attempts by the
2676 * VF to configure any mac for itself except for this mac. In case of a race
2677 * where the VF fails to see the new post on its bulletin board before sending a
2678 * mac configuration request, the PF will simply fail the request and VF can try
2679 * again after consulting its bulletin board.
2681 int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
2683 struct bnx2x *bp = netdev_priv(dev);
2684 int rc, q_logical_state;
2685 struct bnx2x_virtf *vf = NULL;
2686 struct pf_vf_bulletin_content *bulletin = NULL;
2688 if (!is_valid_ether_addr(mac)) {
2689 BNX2X_ERR("mac address invalid\n");
2690 return -EINVAL;
2693 /* sanity and init */
2694 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2695 if (rc)
2696 return rc;
2698 mutex_lock(&bp->vfdb->bulletin_mutex);
2700 /* update PF's copy of the VF's bulletin. Will no longer accept mac
2701 * configuration requests from vf unless match this mac
2703 bulletin->valid_bitmap |= 1 << MAC_ADDR_VALID;
2704 memcpy(bulletin->mac, mac, ETH_ALEN);
2706 /* Post update on VF's bulletin board */
2707 rc = bnx2x_post_vf_bulletin(bp, vfidx);
2709 /* release lock before checking return code */
2710 mutex_unlock(&bp->vfdb->bulletin_mutex);
2712 if (rc) {
2713 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2714 return rc;
2717 q_logical_state =
2718 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
2719 if (vf->state == VF_ENABLED &&
2720 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
2721 /* configure the mac in device on this vf's queue */
2722 unsigned long ramrod_flags = 0;
2723 struct bnx2x_vlan_mac_obj *mac_obj;
2725 /* User should be able to see failure reason in system logs */
2726 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2727 return -EINVAL;
2729 /* must lock vfpf channel to protect against vf flows */
2730 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2732 /* remove existing eth macs */
2733 mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
2734 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
2735 if (rc) {
2736 BNX2X_ERR("failed to delete eth macs\n");
2737 rc = -EINVAL;
2738 goto out;
2741 /* remove existing uc list macs */
2742 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, true);
2743 if (rc) {
2744 BNX2X_ERR("failed to delete uc_list macs\n");
2745 rc = -EINVAL;
2746 goto out;
2749 /* configure the new mac to device */
2750 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2751 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
2752 BNX2X_ETH_MAC, &ramrod_flags);
2754 out:
2755 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
2758 return rc;
2761 static void bnx2x_set_vf_vlan_acceptance(struct bnx2x *bp,
2762 struct bnx2x_virtf *vf, bool accept)
2764 struct bnx2x_rx_mode_ramrod_params rx_ramrod;
2765 unsigned long accept_flags;
2767 /* need to remove/add the VF's accept_any_vlan bit */
2768 accept_flags = bnx2x_leading_vfq(vf, accept_flags);
2769 if (accept)
2770 set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2771 else
2772 clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
2774 bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
2775 accept_flags);
2776 bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
2777 bnx2x_config_rx_mode(bp, &rx_ramrod);
2780 static int bnx2x_set_vf_vlan_filter(struct bnx2x *bp, struct bnx2x_virtf *vf,
2781 u16 vlan, bool add)
2783 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
2784 unsigned long ramrod_flags = 0;
2785 int rc = 0;
2787 /* configure the new vlan to device */
2788 memset(&ramrod_param, 0, sizeof(ramrod_param));
2789 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2790 ramrod_param.vlan_mac_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2791 ramrod_param.ramrod_flags = ramrod_flags;
2792 ramrod_param.user_req.u.vlan.vlan = vlan;
2793 ramrod_param.user_req.cmd = add ? BNX2X_VLAN_MAC_ADD
2794 : BNX2X_VLAN_MAC_DEL;
2795 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
2796 if (rc) {
2797 BNX2X_ERR("failed to configure vlan\n");
2798 return -EINVAL;
2801 return 0;
2804 int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos,
2805 __be16 vlan_proto)
2807 struct pf_vf_bulletin_content *bulletin = NULL;
2808 struct bnx2x *bp = netdev_priv(dev);
2809 struct bnx2x_vlan_mac_obj *vlan_obj;
2810 unsigned long vlan_mac_flags = 0;
2811 unsigned long ramrod_flags = 0;
2812 struct bnx2x_virtf *vf = NULL;
2813 int i, rc;
2815 if (vlan > 4095) {
2816 BNX2X_ERR("illegal vlan value %d\n", vlan);
2817 return -EINVAL;
2820 if (vlan_proto != htons(ETH_P_8021Q))
2821 return -EPROTONOSUPPORT;
2823 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
2824 vfidx, vlan, 0);
2826 /* sanity and init */
2827 rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
2828 if (rc)
2829 return rc;
2831 /* update PF's copy of the VF's bulletin. No point in posting the vlan
2832 * to the VF since it doesn't have anything to do with it. But it useful
2833 * to store it here in case the VF is not up yet and we can only
2834 * configure the vlan later when it does. Treat vlan id 0 as remove the
2835 * Host tag.
2837 mutex_lock(&bp->vfdb->bulletin_mutex);
2839 if (vlan > 0)
2840 bulletin->valid_bitmap |= 1 << VLAN_VALID;
2841 else
2842 bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
2843 bulletin->vlan = vlan;
2845 /* Post update on VF's bulletin board */
2846 rc = bnx2x_post_vf_bulletin(bp, vfidx);
2847 if (rc)
2848 BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
2849 mutex_unlock(&bp->vfdb->bulletin_mutex);
2851 /* is vf initialized and queue set up? */
2852 if (vf->state != VF_ENABLED ||
2853 bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
2854 BNX2X_Q_LOGICAL_STATE_ACTIVE)
2855 return rc;
2857 /* User should be able to see error in system logs */
2858 if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
2859 return -EINVAL;
2861 /* must lock vfpf channel to protect against vf flows */
2862 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2864 /* remove existing vlans */
2865 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2866 vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
2867 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
2868 &ramrod_flags);
2869 if (rc) {
2870 BNX2X_ERR("failed to delete vlans\n");
2871 rc = -EINVAL;
2872 goto out;
2875 /* clear accept_any_vlan when HV forces vlan, otherwise
2876 * according to VF capabilities
2878 if (vlan || !(vf->cfg_flags & VF_CFG_VLAN_FILTER))
2879 bnx2x_set_vf_vlan_acceptance(bp, vf, !vlan);
2881 rc = bnx2x_set_vf_vlan_filter(bp, vf, vlan, true);
2882 if (rc)
2883 goto out;
2885 /* send queue update ramrods to configure default vlan and
2886 * silent vlan removal
2888 for_each_vfq(vf, i) {
2889 struct bnx2x_queue_state_params q_params = {NULL};
2890 struct bnx2x_queue_update_params *update_params;
2892 q_params.q_obj = &bnx2x_vfq(vf, i, sp_obj);
2894 /* validate the Q is UP */
2895 if (bnx2x_get_q_logical_state(bp, q_params.q_obj) !=
2896 BNX2X_Q_LOGICAL_STATE_ACTIVE)
2897 continue;
2899 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
2900 q_params.cmd = BNX2X_Q_CMD_UPDATE;
2901 update_params = &q_params.params.update;
2902 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
2903 &update_params->update_flags);
2904 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
2905 &update_params->update_flags);
2906 if (vlan == 0) {
2907 /* if vlan is 0 then we want to leave the VF traffic
2908 * untagged, and leave the incoming traffic untouched
2909 * (i.e. do not remove any vlan tags).
2911 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2912 &update_params->update_flags);
2913 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2914 &update_params->update_flags);
2915 } else {
2916 /* configure default vlan to vf queue and set silent
2917 * vlan removal (the vf remains unaware of this vlan).
2919 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
2920 &update_params->update_flags);
2921 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
2922 &update_params->update_flags);
2923 update_params->def_vlan = vlan;
2924 update_params->silent_removal_value =
2925 vlan & VLAN_VID_MASK;
2926 update_params->silent_removal_mask = VLAN_VID_MASK;
2929 /* Update the Queue state */
2930 rc = bnx2x_queue_state_change(bp, &q_params);
2931 if (rc) {
2932 BNX2X_ERR("Failed to configure default VLAN queue %d\n",
2934 goto out;
2937 out:
2938 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
2940 if (rc)
2941 DP(BNX2X_MSG_IOV,
2942 "updated VF[%d] vlan configuration (vlan = %d)\n",
2943 vfidx, vlan);
2945 return rc;
2948 /* crc is the first field in the bulletin board. Compute the crc over the
2949 * entire bulletin board excluding the crc field itself. Use the length field
2950 * as the Bulletin Board was posted by a PF with possibly a different version
2951 * from the vf which will sample it. Therefore, the length is computed by the
2952 * PF and then used blindly by the VF.
2954 u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin)
2956 return crc32(BULLETIN_CRC_SEED,
2957 ((u8 *)bulletin) + sizeof(bulletin->crc),
2958 bulletin->length - sizeof(bulletin->crc));
2961 /* Check for new posts on the bulletin board */
2962 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
2964 struct pf_vf_bulletin_content *bulletin;
2965 int attempts;
2967 /* sampling structure in mid post may result with corrupted data
2968 * validate crc to ensure coherency.
2970 for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
2971 u32 crc;
2973 /* sample the bulletin board */
2974 memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin,
2975 sizeof(union pf_vf_bulletin));
2977 crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content);
2979 if (bp->shadow_bulletin.content.crc == crc)
2980 break;
2982 BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
2983 bp->shadow_bulletin.content.crc, crc);
2986 if (attempts >= BULLETIN_ATTEMPTS) {
2987 BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
2988 attempts);
2989 return PFVF_BULLETIN_CRC_ERR;
2991 bulletin = &bp->shadow_bulletin.content;
2993 /* bulletin board hasn't changed since last sample */
2994 if (bp->old_bulletin.version == bulletin->version)
2995 return PFVF_BULLETIN_UNCHANGED;
2997 /* the mac address in bulletin board is valid and is new */
2998 if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
2999 !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
3000 /* update new mac to net device */
3001 memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN);
3004 if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
3005 DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n",
3006 bulletin->link_speed, bulletin->link_flags);
3008 bp->vf_link_vars.line_speed = bulletin->link_speed;
3009 bp->vf_link_vars.link_report_flags = 0;
3010 /* Link is down */
3011 if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)
3012 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
3013 &bp->vf_link_vars.link_report_flags);
3014 /* Full DUPLEX */
3015 if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX)
3016 __set_bit(BNX2X_LINK_REPORT_FD,
3017 &bp->vf_link_vars.link_report_flags);
3018 /* Rx Flow Control is ON */
3019 if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON)
3020 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
3021 &bp->vf_link_vars.link_report_flags);
3022 /* Tx Flow Control is ON */
3023 if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON)
3024 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
3025 &bp->vf_link_vars.link_report_flags);
3026 __bnx2x_link_report(bp);
3029 /* copy new bulletin board to bp */
3030 memcpy(&bp->old_bulletin, bulletin,
3031 sizeof(struct pf_vf_bulletin_content));
3033 return PFVF_BULLETIN_UPDATED;
3036 void bnx2x_timer_sriov(struct bnx2x *bp)
3038 bnx2x_sample_bulletin(bp);
3040 /* if channel is down we need to self destruct */
3041 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
3042 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
3043 BNX2X_MSG_IOV);
3046 void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3048 /* vf doorbells are embedded within the regview */
3049 return bp->regview + PXP_VF_ADDR_DB_START;
3052 void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
3054 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
3055 sizeof(struct bnx2x_vf_mbx_msg));
3056 BNX2X_PCI_FREE(bp->pf2vf_bulletin, bp->pf2vf_bulletin_mapping,
3057 sizeof(union pf_vf_bulletin));
3060 int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3062 mutex_init(&bp->vf2pf_mutex);
3064 /* allocate vf2pf mailbox for vf to pf channel */
3065 bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
3066 sizeof(struct bnx2x_vf_mbx_msg));
3067 if (!bp->vf2pf_mbox)
3068 goto alloc_mem_err;
3070 /* allocate pf 2 vf bulletin board */
3071 bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
3072 sizeof(union pf_vf_bulletin));
3073 if (!bp->pf2vf_bulletin)
3074 goto alloc_mem_err;
3076 bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true);
3078 return 0;
3080 alloc_mem_err:
3081 bnx2x_vf_pci_dealloc(bp);
3082 return -ENOMEM;
3085 void bnx2x_iov_channel_down(struct bnx2x *bp)
3087 int vf_idx;
3088 struct pf_vf_bulletin_content *bulletin;
3090 if (!IS_SRIOV(bp))
3091 return;
3093 for_each_vf(bp, vf_idx) {
3094 /* locate this VFs bulletin board and update the channel down
3095 * bit
3097 bulletin = BP_VF_BULLETIN(bp, vf_idx);
3098 bulletin->valid_bitmap |= 1 << CHANNEL_DOWN;
3100 /* update vf bulletin board */
3101 bnx2x_post_vf_bulletin(bp, vf_idx);
3105 void bnx2x_iov_task(struct work_struct *work)
3107 struct bnx2x *bp = container_of(work, struct bnx2x, iov_task.work);
3109 if (!netif_running(bp->dev))
3110 return;
3112 if (test_and_clear_bit(BNX2X_IOV_HANDLE_FLR,
3113 &bp->iov_task_state))
3114 bnx2x_vf_handle_flr_event(bp);
3116 if (test_and_clear_bit(BNX2X_IOV_HANDLE_VF_MSG,
3117 &bp->iov_task_state))
3118 bnx2x_vf_mbx(bp);
3121 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag)
3123 smp_mb__before_atomic();
3124 set_bit(flag, &bp->iov_task_state);
3125 smp_mb__after_atomic();
3126 DP(BNX2X_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
3127 queue_delayed_work(bnx2x_iov_wq, &bp->iov_task, 0);