2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
8 #include "qlcnic_sriov.h"
10 #include "qlcnic_83xx_hw.h"
11 #include <linux/types.h>
13 #define QLC_BC_COMMAND 0
14 #define QLC_BC_RESPONSE 1
16 #define QLC_MBOX_RESP_TIMEOUT (10 * HZ)
17 #define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ)
20 #define QLC_BC_CFREE 1
22 #define QLC_BC_HDR_SZ 16
23 #define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ)
25 #define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048
26 #define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512
28 #define QLC_83XX_VF_RESET_FAIL_THRESH 8
29 #define QLC_BC_CMD_MAX_RETRY_CNT 5
31 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter
*);
32 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args
*, u32
);
33 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct
*);
34 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter
*);
35 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans
*);
36 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter
*,
37 struct qlcnic_cmd_args
*);
38 static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter
*, u8
);
39 static void qlcnic_sriov_process_bc_cmd(struct work_struct
*);
40 static int qlcnic_sriov_vf_shutdown(struct pci_dev
*);
41 static int qlcnic_sriov_vf_resume(struct qlcnic_adapter
*);
43 static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops
= {
44 .read_crb
= qlcnic_83xx_read_crb
,
45 .write_crb
= qlcnic_83xx_write_crb
,
46 .read_reg
= qlcnic_83xx_rd_reg_indirect
,
47 .write_reg
= qlcnic_83xx_wrt_reg_indirect
,
48 .get_mac_address
= qlcnic_83xx_get_mac_address
,
49 .setup_intr
= qlcnic_83xx_setup_intr
,
50 .alloc_mbx_args
= qlcnic_83xx_alloc_mbx_args
,
51 .mbx_cmd
= qlcnic_sriov_issue_cmd
,
52 .get_func_no
= qlcnic_83xx_get_func_no
,
53 .api_lock
= qlcnic_83xx_cam_lock
,
54 .api_unlock
= qlcnic_83xx_cam_unlock
,
55 .process_lb_rcv_ring_diag
= qlcnic_83xx_process_rcv_ring_diag
,
56 .create_rx_ctx
= qlcnic_83xx_create_rx_ctx
,
57 .create_tx_ctx
= qlcnic_83xx_create_tx_ctx
,
58 .del_rx_ctx
= qlcnic_83xx_del_rx_ctx
,
59 .del_tx_ctx
= qlcnic_83xx_del_tx_ctx
,
60 .setup_link_event
= qlcnic_83xx_setup_link_event
,
61 .get_nic_info
= qlcnic_83xx_get_nic_info
,
62 .get_pci_info
= qlcnic_83xx_get_pci_info
,
63 .set_nic_info
= qlcnic_83xx_set_nic_info
,
64 .change_macvlan
= qlcnic_83xx_sre_macaddr_change
,
65 .napi_enable
= qlcnic_83xx_napi_enable
,
66 .napi_disable
= qlcnic_83xx_napi_disable
,
67 .config_intr_coal
= qlcnic_83xx_config_intr_coal
,
68 .config_rss
= qlcnic_83xx_config_rss
,
69 .config_hw_lro
= qlcnic_83xx_config_hw_lro
,
70 .config_promisc_mode
= qlcnic_83xx_nic_set_promisc
,
71 .change_l2_filter
= qlcnic_83xx_change_l2_filter
,
72 .get_board_info
= qlcnic_83xx_get_port_info
,
73 .free_mac_list
= qlcnic_sriov_vf_free_mac_list
,
74 .enable_sds_intr
= qlcnic_83xx_enable_sds_intr
,
75 .disable_sds_intr
= qlcnic_83xx_disable_sds_intr
,
78 static struct qlcnic_nic_template qlcnic_sriov_vf_ops
= {
79 .config_bridged_mode
= qlcnic_config_bridged_mode
,
80 .config_led
= qlcnic_config_led
,
81 .cancel_idc_work
= qlcnic_sriov_vf_cancel_fw_work
,
82 .napi_add
= qlcnic_83xx_napi_add
,
83 .napi_del
= qlcnic_83xx_napi_del
,
84 .shutdown
= qlcnic_sriov_vf_shutdown
,
85 .resume
= qlcnic_sriov_vf_resume
,
86 .config_ipaddr
= qlcnic_83xx_config_ipaddr
,
87 .clear_legacy_intr
= qlcnic_83xx_clear_legacy_intr
,
90 static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl
[] = {
91 {QLCNIC_BC_CMD_CHANNEL_INIT
, 2, 2},
92 {QLCNIC_BC_CMD_CHANNEL_TERM
, 2, 2},
93 {QLCNIC_BC_CMD_GET_ACL
, 3, 14},
94 {QLCNIC_BC_CMD_CFG_GUEST_VLAN
, 2, 2},
97 static inline bool qlcnic_sriov_bc_msg_check(u32 val
)
99 return (val
& (1 << QLC_BC_MSG
)) ? true : false;
102 static inline bool qlcnic_sriov_channel_free_check(u32 val
)
104 return (val
& (1 << QLC_BC_CFREE
)) ? true : false;
107 static inline bool qlcnic_sriov_flr_check(u32 val
)
109 return (val
& (1 << QLC_BC_FLR
)) ? true : false;
112 static inline u8
qlcnic_sriov_target_func_id(u32 val
)
114 return (val
>> 4) & 0xff;
117 static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter
*adapter
, int vf_id
)
119 struct pci_dev
*dev
= adapter
->pdev
;
123 if (qlcnic_sriov_vf_check(adapter
))
126 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_SRIOV
);
127 pci_read_config_word(dev
, pos
+ PCI_SRIOV_VF_OFFSET
, &offset
);
128 pci_read_config_word(dev
, pos
+ PCI_SRIOV_VF_STRIDE
, &stride
);
130 return (dev
->devfn
+ offset
+ stride
* vf_id
) & 0xff;
133 int qlcnic_sriov_init(struct qlcnic_adapter
*adapter
, int num_vfs
)
135 struct qlcnic_sriov
*sriov
;
136 struct qlcnic_back_channel
*bc
;
137 struct workqueue_struct
*wq
;
138 struct qlcnic_vport
*vp
;
139 struct qlcnic_vf_info
*vf
;
142 if (!qlcnic_sriov_enable_check(adapter
))
145 sriov
= kzalloc(sizeof(struct qlcnic_sriov
), GFP_KERNEL
);
149 adapter
->ahw
->sriov
= sriov
;
150 sriov
->num_vfs
= num_vfs
;
152 sriov
->vf_info
= kzalloc(sizeof(struct qlcnic_vf_info
) *
153 num_vfs
, GFP_KERNEL
);
154 if (!sriov
->vf_info
) {
156 goto qlcnic_free_sriov
;
159 wq
= create_singlethread_workqueue("bc-trans");
162 dev_err(&adapter
->pdev
->dev
,
163 "Cannot create bc-trans workqueue\n");
164 goto qlcnic_free_vf_info
;
167 bc
->bc_trans_wq
= wq
;
169 wq
= create_singlethread_workqueue("async");
172 dev_err(&adapter
->pdev
->dev
, "Cannot create async workqueue\n");
173 goto qlcnic_destroy_trans_wq
;
176 bc
->bc_async_wq
= wq
;
177 INIT_LIST_HEAD(&bc
->async_list
);
179 for (i
= 0; i
< num_vfs
; i
++) {
180 vf
= &sriov
->vf_info
[i
];
181 vf
->adapter
= adapter
;
182 vf
->pci_func
= qlcnic_sriov_virtid_fn(adapter
, i
);
183 mutex_init(&vf
->send_cmd_lock
);
184 mutex_init(&vf
->vlan_list_lock
);
185 INIT_LIST_HEAD(&vf
->rcv_act
.wait_list
);
186 INIT_LIST_HEAD(&vf
->rcv_pend
.wait_list
);
187 spin_lock_init(&vf
->rcv_act
.lock
);
188 spin_lock_init(&vf
->rcv_pend
.lock
);
189 init_completion(&vf
->ch_free_cmpl
);
191 INIT_WORK(&vf
->trans_work
, qlcnic_sriov_process_bc_cmd
);
193 if (qlcnic_sriov_pf_check(adapter
)) {
194 vp
= kzalloc(sizeof(struct qlcnic_vport
), GFP_KERNEL
);
197 goto qlcnic_destroy_async_wq
;
199 sriov
->vf_info
[i
].vp
= vp
;
200 vp
->max_tx_bw
= MAX_BW
;
202 random_ether_addr(vp
->mac
);
203 dev_info(&adapter
->pdev
->dev
,
204 "MAC Address %pM is configured for VF %d\n",
211 qlcnic_destroy_async_wq
:
212 destroy_workqueue(bc
->bc_async_wq
);
214 qlcnic_destroy_trans_wq
:
215 destroy_workqueue(bc
->bc_trans_wq
);
218 kfree(sriov
->vf_info
);
221 kfree(adapter
->ahw
->sriov
);
225 void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list
*t_list
)
227 struct qlcnic_bc_trans
*trans
;
228 struct qlcnic_cmd_args cmd
;
231 spin_lock_irqsave(&t_list
->lock
, flags
);
233 while (!list_empty(&t_list
->wait_list
)) {
234 trans
= list_first_entry(&t_list
->wait_list
,
235 struct qlcnic_bc_trans
, list
);
236 list_del(&trans
->list
);
238 cmd
.req
.arg
= (u32
*)trans
->req_pay
;
239 cmd
.rsp
.arg
= (u32
*)trans
->rsp_pay
;
240 qlcnic_free_mbx_args(&cmd
);
241 qlcnic_sriov_cleanup_transaction(trans
);
244 spin_unlock_irqrestore(&t_list
->lock
, flags
);
247 void __qlcnic_sriov_cleanup(struct qlcnic_adapter
*adapter
)
249 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
250 struct qlcnic_back_channel
*bc
= &sriov
->bc
;
251 struct qlcnic_vf_info
*vf
;
254 if (!qlcnic_sriov_enable_check(adapter
))
257 qlcnic_sriov_cleanup_async_list(bc
);
258 destroy_workqueue(bc
->bc_async_wq
);
260 for (i
= 0; i
< sriov
->num_vfs
; i
++) {
261 vf
= &sriov
->vf_info
[i
];
262 qlcnic_sriov_cleanup_list(&vf
->rcv_pend
);
263 cancel_work_sync(&vf
->trans_work
);
264 qlcnic_sriov_cleanup_list(&vf
->rcv_act
);
267 destroy_workqueue(bc
->bc_trans_wq
);
269 for (i
= 0; i
< sriov
->num_vfs
; i
++)
270 kfree(sriov
->vf_info
[i
].vp
);
272 kfree(sriov
->vf_info
);
273 kfree(adapter
->ahw
->sriov
);
276 static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter
*adapter
)
278 qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_TERM
);
279 qlcnic_sriov_cfg_bc_intr(adapter
, 0);
280 __qlcnic_sriov_cleanup(adapter
);
283 void qlcnic_sriov_cleanup(struct qlcnic_adapter
*adapter
)
285 if (!test_bit(__QLCNIC_SRIOV_ENABLE
, &adapter
->state
))
288 qlcnic_sriov_free_vlans(adapter
);
290 if (qlcnic_sriov_pf_check(adapter
))
291 qlcnic_sriov_pf_cleanup(adapter
);
293 if (qlcnic_sriov_vf_check(adapter
))
294 qlcnic_sriov_vf_cleanup(adapter
);
297 static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter
*adapter
, u32
*hdr
,
298 u32
*pay
, u8 pci_func
, u8 size
)
300 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
301 struct qlcnic_mailbox
*mbx
= ahw
->mailbox
;
302 struct qlcnic_cmd_args cmd
;
303 unsigned long timeout
;
306 memset(&cmd
, 0, sizeof(struct qlcnic_cmd_args
));
310 cmd
.func_num
= pci_func
;
311 cmd
.op_type
= QLC_83XX_MBX_POST_BC_OP
;
312 cmd
.cmd_op
= ((struct qlcnic_bc_hdr
*)hdr
)->cmd_op
;
314 err
= mbx
->ops
->enqueue_cmd(adapter
, &cmd
, &timeout
);
316 dev_err(&adapter
->pdev
->dev
,
317 "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
318 __func__
, cmd
.cmd_op
, cmd
.type
, ahw
->pci_func
,
323 if (!wait_for_completion_timeout(&cmd
.completion
, timeout
)) {
324 dev_err(&adapter
->pdev
->dev
,
325 "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
326 __func__
, cmd
.cmd_op
, cmd
.type
, ahw
->pci_func
,
328 flush_workqueue(mbx
->work_q
);
331 return cmd
.rsp_opcode
;
334 static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter
*adapter
)
336 adapter
->num_rxd
= QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF
;
337 adapter
->max_rxd
= MAX_RCV_DESCRIPTORS_10G
;
338 adapter
->num_jumbo_rxd
= QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF
;
339 adapter
->max_jumbo_rxd
= MAX_JUMBO_RCV_DESCRIPTORS_10G
;
340 adapter
->num_txd
= MAX_CMD_DESCRIPTORS
;
341 adapter
->max_rds_rings
= MAX_RDS_RINGS
;
344 int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter
*adapter
,
345 struct qlcnic_info
*npar_info
, u16 vport_id
)
347 struct device
*dev
= &adapter
->pdev
->dev
;
348 struct qlcnic_cmd_args cmd
;
352 err
= qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_GET_NIC_INFO
);
356 cmd
.req
.arg
[1] = vport_id
<< 16 | 0x1;
357 err
= qlcnic_issue_cmd(adapter
, &cmd
);
359 dev_err(&adapter
->pdev
->dev
,
360 "Failed to get vport info, err=%d\n", err
);
361 qlcnic_free_mbx_args(&cmd
);
365 status
= cmd
.rsp
.arg
[2] & 0xffff;
367 npar_info
->min_tx_bw
= MSW(cmd
.rsp
.arg
[2]);
369 npar_info
->max_tx_bw
= LSW(cmd
.rsp
.arg
[3]);
371 npar_info
->max_tx_ques
= MSW(cmd
.rsp
.arg
[3]);
373 npar_info
->max_tx_mac_filters
= LSW(cmd
.rsp
.arg
[4]);
375 npar_info
->max_rx_mcast_mac_filters
= MSW(cmd
.rsp
.arg
[4]);
377 npar_info
->max_rx_ucast_mac_filters
= LSW(cmd
.rsp
.arg
[5]);
379 npar_info
->max_rx_ip_addr
= MSW(cmd
.rsp
.arg
[5]);
381 npar_info
->max_rx_lro_flow
= LSW(cmd
.rsp
.arg
[6]);
383 npar_info
->max_rx_status_rings
= MSW(cmd
.rsp
.arg
[6]);
385 npar_info
->max_rx_buf_rings
= LSW(cmd
.rsp
.arg
[7]);
387 npar_info
->max_rx_ques
= MSW(cmd
.rsp
.arg
[7]);
388 npar_info
->max_tx_vlan_keys
= LSW(cmd
.rsp
.arg
[8]);
389 npar_info
->max_local_ipv6_addrs
= MSW(cmd
.rsp
.arg
[8]);
390 npar_info
->max_remote_ipv6_addrs
= LSW(cmd
.rsp
.arg
[9]);
392 dev_info(dev
, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
393 "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
394 "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
395 "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
396 "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
397 "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
398 npar_info
->min_tx_bw
, npar_info
->max_tx_bw
,
399 npar_info
->max_tx_ques
, npar_info
->max_tx_mac_filters
,
400 npar_info
->max_rx_mcast_mac_filters
,
401 npar_info
->max_rx_ucast_mac_filters
, npar_info
->max_rx_ip_addr
,
402 npar_info
->max_rx_lro_flow
, npar_info
->max_rx_status_rings
,
403 npar_info
->max_rx_buf_rings
, npar_info
->max_rx_ques
,
404 npar_info
->max_tx_vlan_keys
, npar_info
->max_local_ipv6_addrs
,
405 npar_info
->max_remote_ipv6_addrs
);
407 qlcnic_free_mbx_args(&cmd
);
411 static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter
*adapter
,
412 struct qlcnic_cmd_args
*cmd
)
414 adapter
->rx_pvid
= MSW(cmd
->rsp
.arg
[1]) & 0xffff;
415 adapter
->flags
&= ~QLCNIC_TAGGING_ENABLED
;
419 static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter
*adapter
,
420 struct qlcnic_cmd_args
*cmd
)
422 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
426 if (sriov
->allowed_vlans
)
429 sriov
->any_vlan
= cmd
->rsp
.arg
[2] & 0xf;
430 sriov
->num_allowed_vlans
= cmd
->rsp
.arg
[2] >> 16;
431 dev_info(&adapter
->pdev
->dev
, "Number of allowed Guest VLANs = %d\n",
432 sriov
->num_allowed_vlans
);
434 qlcnic_sriov_alloc_vlans(adapter
);
436 if (!sriov
->any_vlan
)
439 num_vlans
= sriov
->num_allowed_vlans
;
440 sriov
->allowed_vlans
= kzalloc(sizeof(u16
) * num_vlans
, GFP_KERNEL
);
441 if (!sriov
->allowed_vlans
)
444 vlans
= (u16
*)&cmd
->rsp
.arg
[3];
445 for (i
= 0; i
< num_vlans
; i
++)
446 sriov
->allowed_vlans
[i
] = vlans
[i
];
451 static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter
*adapter
)
453 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
454 struct qlcnic_cmd_args cmd
;
457 ret
= qlcnic_sriov_alloc_bc_mbx_args(&cmd
, QLCNIC_BC_CMD_GET_ACL
);
461 ret
= qlcnic_issue_cmd(adapter
, &cmd
);
463 dev_err(&adapter
->pdev
->dev
, "Failed to get ACL, err=%d\n",
466 sriov
->vlan_mode
= cmd
.rsp
.arg
[1] & 0x3;
467 switch (sriov
->vlan_mode
) {
468 case QLC_GUEST_VLAN_MODE
:
469 ret
= qlcnic_sriov_set_guest_vlan_mode(adapter
, &cmd
);
472 ret
= qlcnic_sriov_set_pvid_mode(adapter
, &cmd
);
477 qlcnic_free_mbx_args(&cmd
);
481 static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter
*adapter
)
483 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
484 struct qlcnic_info nic_info
;
487 err
= qlcnic_sriov_get_vf_vport_info(adapter
, &nic_info
, 0);
491 ahw
->max_mc_count
= nic_info
.max_rx_mcast_mac_filters
;
493 err
= qlcnic_get_nic_info(adapter
, &nic_info
, ahw
->pci_func
);
497 if (qlcnic_83xx_get_port_info(adapter
))
500 qlcnic_sriov_vf_cfg_buff_desc(adapter
);
501 adapter
->flags
|= QLCNIC_ADAPTER_INITIALIZED
;
502 dev_info(&adapter
->pdev
->dev
, "HAL Version: %d\n",
503 adapter
->ahw
->fw_hal_version
);
505 ahw
->physical_port
= (u8
) nic_info
.phys_port
;
506 ahw
->switch_mode
= nic_info
.switch_mode
;
507 ahw
->max_mtu
= nic_info
.max_mtu
;
508 ahw
->op_mode
= nic_info
.op_mode
;
509 ahw
->capabilities
= nic_info
.capabilities
;
513 static int qlcnic_sriov_setup_vf(struct qlcnic_adapter
*adapter
,
518 INIT_LIST_HEAD(&adapter
->vf_mc_list
);
519 if (!qlcnic_use_msi_x
&& !!qlcnic_use_msi
)
520 dev_warn(&adapter
->pdev
->dev
,
521 "Device does not support MSI interrupts\n");
523 /* compute and set default and max tx/sds rings */
524 qlcnic_set_tx_ring_count(adapter
, QLCNIC_SINGLE_RING
);
525 qlcnic_set_sds_ring_count(adapter
, QLCNIC_SINGLE_RING
);
527 err
= qlcnic_setup_intr(adapter
);
529 dev_err(&adapter
->pdev
->dev
, "Failed to setup interrupt\n");
530 goto err_out_disable_msi
;
533 err
= qlcnic_83xx_setup_mbx_intr(adapter
);
535 goto err_out_disable_msi
;
537 err
= qlcnic_sriov_init(adapter
, 1);
539 goto err_out_disable_mbx_intr
;
541 err
= qlcnic_sriov_cfg_bc_intr(adapter
, 1);
543 goto err_out_cleanup_sriov
;
545 err
= qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_INIT
);
547 goto err_out_disable_bc_intr
;
549 err
= qlcnic_sriov_vf_init_driver(adapter
);
551 goto err_out_send_channel_term
;
553 err
= qlcnic_sriov_get_vf_acl(adapter
);
555 goto err_out_send_channel_term
;
557 err
= qlcnic_setup_netdev(adapter
, adapter
->netdev
, pci_using_dac
);
559 goto err_out_send_channel_term
;
561 pci_set_drvdata(adapter
->pdev
, adapter
);
562 dev_info(&adapter
->pdev
->dev
, "%s: XGbE port initialized\n",
563 adapter
->netdev
->name
);
565 qlcnic_schedule_work(adapter
, qlcnic_sriov_vf_poll_dev_state
,
566 adapter
->ahw
->idc
.delay
);
569 err_out_send_channel_term
:
570 qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_TERM
);
572 err_out_disable_bc_intr
:
573 qlcnic_sriov_cfg_bc_intr(adapter
, 0);
575 err_out_cleanup_sriov
:
576 __qlcnic_sriov_cleanup(adapter
);
578 err_out_disable_mbx_intr
:
579 qlcnic_83xx_free_mbx_intr(adapter
);
582 qlcnic_teardown_intr(adapter
);
586 static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter
*adapter
)
592 if (++adapter
->fw_fail_cnt
> QLC_BC_CMD_MAX_RETRY_CNT
)
594 state
= QLCRDX(adapter
->ahw
, QLC_83XX_IDC_DEV_STATE
);
595 } while (state
!= QLC_83XX_IDC_DEV_READY
);
600 int qlcnic_sriov_vf_init(struct qlcnic_adapter
*adapter
, int pci_using_dac
)
602 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
605 set_bit(QLC_83XX_MODULE_LOADED
, &ahw
->idc
.status
);
606 ahw
->idc
.delay
= QLC_83XX_IDC_FW_POLL_DELAY
;
607 ahw
->reset_context
= 0;
608 adapter
->fw_fail_cnt
= 0;
609 ahw
->msix_supported
= 1;
610 adapter
->need_fw_reset
= 0;
611 adapter
->flags
|= QLCNIC_TX_INTR_SHARED
;
613 err
= qlcnic_sriov_check_dev_ready(adapter
);
617 err
= qlcnic_sriov_setup_vf(adapter
, pci_using_dac
);
621 if (qlcnic_read_mac_addr(adapter
))
622 dev_warn(&adapter
->pdev
->dev
, "failed to read mac addr\n");
624 INIT_DELAYED_WORK(&adapter
->idc_aen_work
, qlcnic_83xx_idc_aen_work
);
626 clear_bit(__QLCNIC_RESETTING
, &adapter
->state
);
630 void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter
*adapter
)
632 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
634 ahw
->op_mode
= QLCNIC_SRIOV_VF_FUNC
;
635 dev_info(&adapter
->pdev
->dev
,
636 "HAL Version: %d Non Privileged SRIOV function\n",
637 ahw
->fw_hal_version
);
638 adapter
->nic_ops
= &qlcnic_sriov_vf_ops
;
639 set_bit(__QLCNIC_SRIOV_ENABLE
, &adapter
->state
);
643 void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context
*ahw
)
645 ahw
->hw_ops
= &qlcnic_sriov_vf_hw_ops
;
646 ahw
->reg_tbl
= (u32
*)qlcnic_83xx_reg_tbl
;
647 ahw
->ext_reg_tbl
= (u32
*)qlcnic_83xx_ext_reg_tbl
;
650 static u32
qlcnic_sriov_get_bc_paysize(u32 real_pay_size
, u8 curr_frag
)
654 pay_size
= real_pay_size
/ ((curr_frag
+ 1) * QLC_BC_PAYLOAD_SZ
);
657 pay_size
= QLC_BC_PAYLOAD_SZ
;
659 pay_size
= real_pay_size
% QLC_BC_PAYLOAD_SZ
;
664 int qlcnic_sriov_func_to_index(struct qlcnic_adapter
*adapter
, u8 pci_func
)
666 struct qlcnic_vf_info
*vf_info
= adapter
->ahw
->sriov
->vf_info
;
669 if (qlcnic_sriov_vf_check(adapter
))
672 for (i
= 0; i
< adapter
->ahw
->sriov
->num_vfs
; i
++) {
673 if (vf_info
[i
].pci_func
== pci_func
)
680 static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans
**trans
)
682 *trans
= kzalloc(sizeof(struct qlcnic_bc_trans
), GFP_ATOMIC
);
686 init_completion(&(*trans
)->resp_cmpl
);
690 static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr
**hdr
,
693 *hdr
= kzalloc(sizeof(struct qlcnic_bc_hdr
) * size
, GFP_ATOMIC
);
700 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args
*mbx
, u32 type
)
702 const struct qlcnic_mailbox_metadata
*mbx_tbl
;
705 mbx_tbl
= qlcnic_sriov_bc_mbx_tbl
;
706 size
= ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl
);
708 for (i
= 0; i
< size
; i
++) {
709 if (type
== mbx_tbl
[i
].cmd
) {
710 mbx
->op_type
= QLC_BC_CMD
;
711 mbx
->req
.num
= mbx_tbl
[i
].in_args
;
712 mbx
->rsp
.num
= mbx_tbl
[i
].out_args
;
713 mbx
->req
.arg
= kcalloc(mbx
->req
.num
, sizeof(u32
),
717 mbx
->rsp
.arg
= kcalloc(mbx
->rsp
.num
, sizeof(u32
),
724 memset(mbx
->req
.arg
, 0, sizeof(u32
) * mbx
->req
.num
);
725 memset(mbx
->rsp
.arg
, 0, sizeof(u32
) * mbx
->rsp
.num
);
726 mbx
->req
.arg
[0] = (type
| (mbx
->req
.num
<< 16) |
728 mbx
->rsp
.arg
[0] = (type
& 0xffff) | mbx
->rsp
.num
<< 16;
735 static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans
*trans
,
736 struct qlcnic_cmd_args
*cmd
,
737 u16 seq
, u8 msg_type
)
739 struct qlcnic_bc_hdr
*hdr
;
741 u32 num_regs
, bc_pay_sz
;
743 u8 cmd_op
, num_frags
, t_num_frags
;
745 bc_pay_sz
= QLC_BC_PAYLOAD_SZ
;
746 if (msg_type
== QLC_BC_COMMAND
) {
747 trans
->req_pay
= (struct qlcnic_bc_payload
*)cmd
->req
.arg
;
748 trans
->rsp_pay
= (struct qlcnic_bc_payload
*)cmd
->rsp
.arg
;
749 num_regs
= cmd
->req
.num
;
750 trans
->req_pay_size
= (num_regs
* 4);
751 num_regs
= cmd
->rsp
.num
;
752 trans
->rsp_pay_size
= (num_regs
* 4);
753 cmd_op
= cmd
->req
.arg
[0] & 0xff;
754 remainder
= (trans
->req_pay_size
) % (bc_pay_sz
);
755 num_frags
= (trans
->req_pay_size
) / (bc_pay_sz
);
758 t_num_frags
= num_frags
;
759 if (qlcnic_sriov_alloc_bc_msg(&trans
->req_hdr
, num_frags
))
761 remainder
= (trans
->rsp_pay_size
) % (bc_pay_sz
);
762 num_frags
= (trans
->rsp_pay_size
) / (bc_pay_sz
);
765 if (qlcnic_sriov_alloc_bc_msg(&trans
->rsp_hdr
, num_frags
))
767 num_frags
= t_num_frags
;
768 hdr
= trans
->req_hdr
;
770 cmd
->req
.arg
= (u32
*)trans
->req_pay
;
771 cmd
->rsp
.arg
= (u32
*)trans
->rsp_pay
;
772 cmd_op
= cmd
->req
.arg
[0] & 0xff;
773 remainder
= (trans
->rsp_pay_size
) % (bc_pay_sz
);
774 num_frags
= (trans
->rsp_pay_size
) / (bc_pay_sz
);
777 cmd
->req
.num
= trans
->req_pay_size
/ 4;
778 cmd
->rsp
.num
= trans
->rsp_pay_size
/ 4;
779 hdr
= trans
->rsp_hdr
;
780 cmd
->op_type
= trans
->req_hdr
->op_type
;
783 trans
->trans_id
= seq
;
784 trans
->cmd_id
= cmd_op
;
785 for (i
= 0; i
< num_frags
; i
++) {
787 hdr
[i
].msg_type
= msg_type
;
788 hdr
[i
].op_type
= cmd
->op_type
;
790 hdr
[i
].num_frags
= num_frags
;
791 hdr
[i
].frag_num
= i
+ 1;
792 hdr
[i
].cmd_op
= cmd_op
;
798 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans
*trans
)
802 kfree(trans
->req_hdr
);
803 kfree(trans
->rsp_hdr
);
807 static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info
*vf
,
808 struct qlcnic_bc_trans
*trans
, u8 type
)
810 struct qlcnic_trans_list
*t_list
;
814 if (type
== QLC_BC_RESPONSE
) {
815 t_list
= &vf
->rcv_act
;
816 spin_lock_irqsave(&t_list
->lock
, flags
);
818 list_del(&trans
->list
);
819 if (t_list
->count
> 0)
821 spin_unlock_irqrestore(&t_list
->lock
, flags
);
823 if (type
== QLC_BC_COMMAND
) {
824 while (test_and_set_bit(QLC_BC_VF_SEND
, &vf
->state
))
827 clear_bit(QLC_BC_VF_SEND
, &vf
->state
);
832 static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov
*sriov
,
833 struct qlcnic_vf_info
*vf
,
836 if (test_bit(QLC_BC_VF_FLR
, &vf
->state
) ||
837 vf
->adapter
->need_fw_reset
)
840 queue_work(sriov
->bc
.bc_trans_wq
, &vf
->trans_work
);
843 static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans
*trans
)
845 struct completion
*cmpl
= &trans
->resp_cmpl
;
847 if (wait_for_completion_timeout(cmpl
, QLC_MBOX_RESP_TIMEOUT
))
848 trans
->trans_state
= QLC_END
;
850 trans
->trans_state
= QLC_ABORT
;
855 static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans
*trans
,
858 if (type
== QLC_BC_RESPONSE
) {
859 trans
->curr_rsp_frag
++;
860 if (trans
->curr_rsp_frag
< trans
->rsp_hdr
->num_frags
)
861 trans
->trans_state
= QLC_INIT
;
863 trans
->trans_state
= QLC_END
;
865 trans
->curr_req_frag
++;
866 if (trans
->curr_req_frag
< trans
->req_hdr
->num_frags
)
867 trans
->trans_state
= QLC_INIT
;
869 trans
->trans_state
= QLC_WAIT_FOR_RESP
;
873 static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans
*trans
,
876 struct qlcnic_vf_info
*vf
= trans
->vf
;
877 struct completion
*cmpl
= &vf
->ch_free_cmpl
;
879 if (!wait_for_completion_timeout(cmpl
, QLC_MBOX_CH_FREE_TIMEOUT
)) {
880 trans
->trans_state
= QLC_ABORT
;
884 clear_bit(QLC_BC_VF_CHANNEL
, &vf
->state
);
885 qlcnic_sriov_handle_multi_frags(trans
, type
);
888 static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter
*adapter
,
889 u32
*hdr
, u32
*pay
, u32 size
)
891 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
893 u8 i
, max
= 2, hdr_size
, j
;
895 hdr_size
= (sizeof(struct qlcnic_bc_hdr
) / sizeof(u32
));
896 max
= (size
/ sizeof(u32
)) + hdr_size
;
898 fw_mbx
= readl(QLCNIC_MBX_FW(ahw
, 0));
899 for (i
= 2, j
= 0; j
< hdr_size
; i
++, j
++)
900 *(hdr
++) = readl(QLCNIC_MBX_FW(ahw
, i
));
901 for (; j
< max
; i
++, j
++)
902 *(pay
++) = readl(QLCNIC_MBX_FW(ahw
, i
));
905 static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info
*vf
)
911 if (!test_and_set_bit(QLC_BC_VF_CHANNEL
, &vf
->state
)) {
921 static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans
*trans
, u8 type
)
923 struct qlcnic_vf_info
*vf
= trans
->vf
;
924 u32 pay_size
, hdr_size
;
927 u8 pci_func
= trans
->func_id
;
929 if (__qlcnic_sriov_issue_bc_post(vf
))
932 if (type
== QLC_BC_COMMAND
) {
933 hdr
= (u32
*)(trans
->req_hdr
+ trans
->curr_req_frag
);
934 pay
= (u32
*)(trans
->req_pay
+ trans
->curr_req_frag
);
935 hdr_size
= (sizeof(struct qlcnic_bc_hdr
) / sizeof(u32
));
936 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->req_pay_size
,
937 trans
->curr_req_frag
);
938 pay_size
= (pay_size
/ sizeof(u32
));
940 hdr
= (u32
*)(trans
->rsp_hdr
+ trans
->curr_rsp_frag
);
941 pay
= (u32
*)(trans
->rsp_pay
+ trans
->curr_rsp_frag
);
942 hdr_size
= (sizeof(struct qlcnic_bc_hdr
) / sizeof(u32
));
943 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->rsp_pay_size
,
944 trans
->curr_rsp_frag
);
945 pay_size
= (pay_size
/ sizeof(u32
));
948 ret
= qlcnic_sriov_post_bc_msg(vf
->adapter
, hdr
, pay
,
953 static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans
*trans
,
954 struct qlcnic_vf_info
*vf
, u8 type
)
960 if (test_bit(QLC_BC_VF_FLR
, &vf
->state
) ||
961 vf
->adapter
->need_fw_reset
)
962 trans
->trans_state
= QLC_ABORT
;
964 switch (trans
->trans_state
) {
966 trans
->trans_state
= QLC_WAIT_FOR_CHANNEL_FREE
;
967 if (qlcnic_sriov_issue_bc_post(trans
, type
))
968 trans
->trans_state
= QLC_ABORT
;
970 case QLC_WAIT_FOR_CHANNEL_FREE
:
971 qlcnic_sriov_wait_for_channel_free(trans
, type
);
973 case QLC_WAIT_FOR_RESP
:
974 qlcnic_sriov_wait_for_resp(trans
);
983 clear_bit(QLC_BC_VF_CHANNEL
, &vf
->state
);
993 static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter
*adapter
,
994 struct qlcnic_bc_trans
*trans
, int pci_func
)
996 struct qlcnic_vf_info
*vf
;
997 int err
, index
= qlcnic_sriov_func_to_index(adapter
, pci_func
);
1002 vf
= &adapter
->ahw
->sriov
->vf_info
[index
];
1004 trans
->func_id
= pci_func
;
1006 if (!test_bit(QLC_BC_VF_STATE
, &vf
->state
)) {
1007 if (qlcnic_sriov_pf_check(adapter
))
1009 if (qlcnic_sriov_vf_check(adapter
) &&
1010 trans
->cmd_id
!= QLCNIC_BC_CMD_CHANNEL_INIT
)
1014 mutex_lock(&vf
->send_cmd_lock
);
1015 vf
->send_cmd
= trans
;
1016 err
= __qlcnic_sriov_send_bc_msg(trans
, vf
, QLC_BC_COMMAND
);
1017 qlcnic_sriov_clear_trans(vf
, trans
, QLC_BC_COMMAND
);
1018 mutex_unlock(&vf
->send_cmd_lock
);
1022 static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter
*adapter
,
1023 struct qlcnic_bc_trans
*trans
,
1024 struct qlcnic_cmd_args
*cmd
)
1026 #ifdef CONFIG_QLCNIC_SRIOV
1027 if (qlcnic_sriov_pf_check(adapter
)) {
1028 qlcnic_sriov_pf_process_bc_cmd(adapter
, trans
, cmd
);
1032 cmd
->rsp
.arg
[0] |= (0x9 << 25);
1036 static void qlcnic_sriov_process_bc_cmd(struct work_struct
*work
)
1038 struct qlcnic_vf_info
*vf
= container_of(work
, struct qlcnic_vf_info
,
1040 struct qlcnic_bc_trans
*trans
= NULL
;
1041 struct qlcnic_adapter
*adapter
= vf
->adapter
;
1042 struct qlcnic_cmd_args cmd
;
1045 if (adapter
->need_fw_reset
)
1048 if (test_bit(QLC_BC_VF_FLR
, &vf
->state
))
1051 memset(&cmd
, 0, sizeof(struct qlcnic_cmd_args
));
1052 trans
= list_first_entry(&vf
->rcv_act
.wait_list
,
1053 struct qlcnic_bc_trans
, list
);
1054 adapter
= vf
->adapter
;
1056 if (qlcnic_sriov_prepare_bc_hdr(trans
, &cmd
, trans
->req_hdr
->seq_id
,
1060 __qlcnic_sriov_process_bc_cmd(adapter
, trans
, &cmd
);
1061 trans
->trans_state
= QLC_INIT
;
1062 __qlcnic_sriov_send_bc_msg(trans
, vf
, QLC_BC_RESPONSE
);
1065 qlcnic_free_mbx_args(&cmd
);
1066 req
= qlcnic_sriov_clear_trans(vf
, trans
, QLC_BC_RESPONSE
);
1067 qlcnic_sriov_cleanup_transaction(trans
);
1069 qlcnic_sriov_schedule_bc_cmd(adapter
->ahw
->sriov
, vf
,
1070 qlcnic_sriov_process_bc_cmd
);
1073 static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr
*hdr
,
1074 struct qlcnic_vf_info
*vf
)
1076 struct qlcnic_bc_trans
*trans
;
1079 if (test_and_set_bit(QLC_BC_VF_SEND
, &vf
->state
))
1082 trans
= vf
->send_cmd
;
1087 if (trans
->trans_id
!= hdr
->seq_id
)
1090 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->rsp_pay_size
,
1091 trans
->curr_rsp_frag
);
1092 qlcnic_sriov_pull_bc_msg(vf
->adapter
,
1093 (u32
*)(trans
->rsp_hdr
+ trans
->curr_rsp_frag
),
1094 (u32
*)(trans
->rsp_pay
+ trans
->curr_rsp_frag
),
1096 if (++trans
->curr_rsp_frag
< trans
->rsp_hdr
->num_frags
)
1099 complete(&trans
->resp_cmpl
);
1102 clear_bit(QLC_BC_VF_SEND
, &vf
->state
);
1105 int __qlcnic_sriov_add_act_list(struct qlcnic_sriov
*sriov
,
1106 struct qlcnic_vf_info
*vf
,
1107 struct qlcnic_bc_trans
*trans
)
1109 struct qlcnic_trans_list
*t_list
= &vf
->rcv_act
;
1112 list_add_tail(&trans
->list
, &t_list
->wait_list
);
1113 if (t_list
->count
== 1)
1114 qlcnic_sriov_schedule_bc_cmd(sriov
, vf
,
1115 qlcnic_sriov_process_bc_cmd
);
1119 static int qlcnic_sriov_add_act_list(struct qlcnic_sriov
*sriov
,
1120 struct qlcnic_vf_info
*vf
,
1121 struct qlcnic_bc_trans
*trans
)
1123 struct qlcnic_trans_list
*t_list
= &vf
->rcv_act
;
1125 spin_lock(&t_list
->lock
);
1127 __qlcnic_sriov_add_act_list(sriov
, vf
, trans
);
1129 spin_unlock(&t_list
->lock
);
1133 static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov
*sriov
,
1134 struct qlcnic_vf_info
*vf
,
1135 struct qlcnic_bc_hdr
*hdr
)
1137 struct qlcnic_bc_trans
*trans
= NULL
;
1138 struct list_head
*node
;
1139 u32 pay_size
, curr_frag
;
1140 u8 found
= 0, active
= 0;
1142 spin_lock(&vf
->rcv_pend
.lock
);
1143 if (vf
->rcv_pend
.count
> 0) {
1144 list_for_each(node
, &vf
->rcv_pend
.wait_list
) {
1145 trans
= list_entry(node
, struct qlcnic_bc_trans
, list
);
1146 if (trans
->trans_id
== hdr
->seq_id
) {
1154 curr_frag
= trans
->curr_req_frag
;
1155 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->req_pay_size
,
1157 qlcnic_sriov_pull_bc_msg(vf
->adapter
,
1158 (u32
*)(trans
->req_hdr
+ curr_frag
),
1159 (u32
*)(trans
->req_pay
+ curr_frag
),
1161 trans
->curr_req_frag
++;
1162 if (trans
->curr_req_frag
>= hdr
->num_frags
) {
1163 vf
->rcv_pend
.count
--;
1164 list_del(&trans
->list
);
1168 spin_unlock(&vf
->rcv_pend
.lock
);
1171 if (qlcnic_sriov_add_act_list(sriov
, vf
, trans
))
1172 qlcnic_sriov_cleanup_transaction(trans
);
1177 static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov
*sriov
,
1178 struct qlcnic_bc_hdr
*hdr
,
1179 struct qlcnic_vf_info
*vf
)
1181 struct qlcnic_bc_trans
*trans
;
1182 struct qlcnic_adapter
*adapter
= vf
->adapter
;
1183 struct qlcnic_cmd_args cmd
;
1188 if (adapter
->need_fw_reset
)
1191 if (!test_bit(QLC_BC_VF_STATE
, &vf
->state
) &&
1192 hdr
->op_type
!= QLC_BC_CMD
&&
1193 hdr
->cmd_op
!= QLCNIC_BC_CMD_CHANNEL_INIT
)
1196 if (hdr
->frag_num
> 1) {
1197 qlcnic_sriov_handle_pending_trans(sriov
, vf
, hdr
);
1201 memset(&cmd
, 0, sizeof(struct qlcnic_cmd_args
));
1202 cmd_op
= hdr
->cmd_op
;
1203 if (qlcnic_sriov_alloc_bc_trans(&trans
))
1206 if (hdr
->op_type
== QLC_BC_CMD
)
1207 err
= qlcnic_sriov_alloc_bc_mbx_args(&cmd
, cmd_op
);
1209 err
= qlcnic_alloc_mbx_args(&cmd
, adapter
, cmd_op
);
1212 qlcnic_sriov_cleanup_transaction(trans
);
1216 cmd
.op_type
= hdr
->op_type
;
1217 if (qlcnic_sriov_prepare_bc_hdr(trans
, &cmd
, hdr
->seq_id
,
1219 qlcnic_free_mbx_args(&cmd
);
1220 qlcnic_sriov_cleanup_transaction(trans
);
1224 pay_size
= qlcnic_sriov_get_bc_paysize(trans
->req_pay_size
,
1225 trans
->curr_req_frag
);
1226 qlcnic_sriov_pull_bc_msg(vf
->adapter
,
1227 (u32
*)(trans
->req_hdr
+ trans
->curr_req_frag
),
1228 (u32
*)(trans
->req_pay
+ trans
->curr_req_frag
),
1230 trans
->func_id
= vf
->pci_func
;
1232 trans
->trans_id
= hdr
->seq_id
;
1233 trans
->curr_req_frag
++;
1235 if (qlcnic_sriov_soft_flr_check(adapter
, trans
, vf
))
1238 if (trans
->curr_req_frag
== trans
->req_hdr
->num_frags
) {
1239 if (qlcnic_sriov_add_act_list(sriov
, vf
, trans
)) {
1240 qlcnic_free_mbx_args(&cmd
);
1241 qlcnic_sriov_cleanup_transaction(trans
);
1244 spin_lock(&vf
->rcv_pend
.lock
);
1245 list_add_tail(&trans
->list
, &vf
->rcv_pend
.wait_list
);
1246 vf
->rcv_pend
.count
++;
1247 spin_unlock(&vf
->rcv_pend
.lock
);
1251 static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov
*sriov
,
1252 struct qlcnic_vf_info
*vf
)
1254 struct qlcnic_bc_hdr hdr
;
1255 u32
*ptr
= (u32
*)&hdr
;
1258 for (i
= 2; i
< 6; i
++)
1259 ptr
[i
- 2] = readl(QLCNIC_MBX_FW(vf
->adapter
->ahw
, i
));
1260 msg_type
= hdr
.msg_type
;
1263 case QLC_BC_COMMAND
:
1264 qlcnic_sriov_handle_bc_cmd(sriov
, &hdr
, vf
);
1266 case QLC_BC_RESPONSE
:
1267 qlcnic_sriov_handle_bc_resp(&hdr
, vf
);
1272 static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov
*sriov
,
1273 struct qlcnic_vf_info
*vf
)
1275 struct qlcnic_adapter
*adapter
= vf
->adapter
;
1277 if (qlcnic_sriov_pf_check(adapter
))
1278 qlcnic_sriov_pf_handle_flr(sriov
, vf
);
1280 dev_err(&adapter
->pdev
->dev
,
1281 "Invalid event to VF. VF should not get FLR event\n");
1284 void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter
*adapter
, u32 event
)
1286 struct qlcnic_vf_info
*vf
;
1287 struct qlcnic_sriov
*sriov
;
1291 sriov
= adapter
->ahw
->sriov
;
1292 pci_func
= qlcnic_sriov_target_func_id(event
);
1293 index
= qlcnic_sriov_func_to_index(adapter
, pci_func
);
1298 vf
= &sriov
->vf_info
[index
];
1299 vf
->pci_func
= pci_func
;
1301 if (qlcnic_sriov_channel_free_check(event
))
1302 complete(&vf
->ch_free_cmpl
);
1304 if (qlcnic_sriov_flr_check(event
)) {
1305 qlcnic_sriov_handle_flr_event(sriov
, vf
);
1309 if (qlcnic_sriov_bc_msg_check(event
))
1310 qlcnic_sriov_handle_msg_event(sriov
, vf
);
1313 int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter
*adapter
, u8 enable
)
1315 struct qlcnic_cmd_args cmd
;
1318 if (!test_bit(__QLCNIC_SRIOV_ENABLE
, &adapter
->state
))
1321 if (qlcnic_alloc_mbx_args(&cmd
, adapter
, QLCNIC_CMD_BC_EVENT_SETUP
))
1325 cmd
.req
.arg
[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
1327 err
= qlcnic_83xx_issue_cmd(adapter
, &cmd
);
1329 if (err
!= QLCNIC_RCODE_SUCCESS
) {
1330 dev_err(&adapter
->pdev
->dev
,
1331 "Failed to %s bc events, err=%d\n",
1332 (enable
? "enable" : "disable"), err
);
1335 qlcnic_free_mbx_args(&cmd
);
1339 static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter
*adapter
,
1340 struct qlcnic_bc_trans
*trans
)
1342 u8 max
= QLC_BC_CMD_MAX_RETRY_CNT
;
1345 state
= QLCRDX(adapter
->ahw
, QLC_83XX_IDC_DEV_STATE
);
1346 if (state
== QLC_83XX_IDC_DEV_READY
) {
1348 clear_bit(QLC_BC_VF_CHANNEL
, &trans
->vf
->state
);
1349 trans
->trans_state
= QLC_INIT
;
1350 if (++adapter
->fw_fail_cnt
> max
)
1359 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter
*adapter
,
1360 struct qlcnic_cmd_args
*cmd
)
1362 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
1363 struct qlcnic_mailbox
*mbx
= ahw
->mailbox
;
1364 struct device
*dev
= &adapter
->pdev
->dev
;
1365 struct qlcnic_bc_trans
*trans
;
1367 u32 rsp_data
, opcode
, mbx_err_code
, rsp
;
1368 u16 seq
= ++adapter
->ahw
->sriov
->bc
.trans_counter
;
1369 u8 func
= ahw
->pci_func
;
1371 rsp
= qlcnic_sriov_alloc_bc_trans(&trans
);
1375 rsp
= qlcnic_sriov_prepare_bc_hdr(trans
, cmd
, seq
, QLC_BC_COMMAND
);
1377 goto cleanup_transaction
;
1380 if (!test_bit(QLC_83XX_MBX_READY
, &mbx
->status
)) {
1382 QLCDB(adapter
, DRV
, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
1383 QLCNIC_MBX_RSP(cmd
->req
.arg
[0]), func
);
1387 err
= qlcnic_sriov_send_bc_cmd(adapter
, trans
, func
);
1389 dev_err(dev
, "MBX command 0x%x timed out for VF %d\n",
1390 (cmd
->req
.arg
[0] & 0xffff), func
);
1391 rsp
= QLCNIC_RCODE_TIMEOUT
;
1393 /* After adapter reset PF driver may take some time to
1394 * respond to VF's request. Retry request till maximum retries.
1396 if ((trans
->req_hdr
->cmd_op
== QLCNIC_BC_CMD_CHANNEL_INIT
) &&
1397 !qlcnic_sriov_retry_bc_cmd(adapter
, trans
))
1403 rsp_data
= cmd
->rsp
.arg
[0];
1404 mbx_err_code
= QLCNIC_MBX_STATUS(rsp_data
);
1405 opcode
= QLCNIC_MBX_RSP(cmd
->req
.arg
[0]);
1407 if ((mbx_err_code
== QLCNIC_MBX_RSP_OK
) ||
1408 (mbx_err_code
== QLCNIC_MBX_PORT_RSP_OK
)) {
1409 rsp
= QLCNIC_RCODE_SUCCESS
;
1415 "MBX command 0x%x failed with err:0x%x for VF %d\n",
1416 opcode
, mbx_err_code
, func
);
1420 if (rsp
== QLCNIC_RCODE_TIMEOUT
) {
1421 ahw
->reset_context
= 1;
1422 adapter
->need_fw_reset
= 1;
1423 clear_bit(QLC_83XX_MBX_READY
, &mbx
->status
);
1426 cleanup_transaction
:
1427 qlcnic_sriov_cleanup_transaction(trans
);
1431 static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter
*adapter
, u8 cmd_op
)
1433 struct qlcnic_cmd_args cmd
;
1434 struct qlcnic_vf_info
*vf
= &adapter
->ahw
->sriov
->vf_info
[0];
1437 if (qlcnic_sriov_alloc_bc_mbx_args(&cmd
, cmd_op
))
1440 ret
= qlcnic_issue_cmd(adapter
, &cmd
);
1442 dev_err(&adapter
->pdev
->dev
,
1443 "Failed bc channel %s %d\n", cmd_op
? "term" : "init",
1448 cmd_op
= (cmd
.rsp
.arg
[0] & 0xff);
1449 if (cmd
.rsp
.arg
[0] >> 25 == 2)
1451 if (cmd_op
== QLCNIC_BC_CMD_CHANNEL_INIT
)
1452 set_bit(QLC_BC_VF_STATE
, &vf
->state
);
1454 clear_bit(QLC_BC_VF_STATE
, &vf
->state
);
1457 qlcnic_free_mbx_args(&cmd
);
1461 static void qlcnic_vf_add_mc_list(struct net_device
*netdev
)
1463 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
1464 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
1465 struct qlcnic_mac_vlan_list
*cur
;
1466 struct list_head
*head
, tmp_list
;
1467 struct qlcnic_vf_info
*vf
;
1471 static const u8 bcast_addr
[ETH_ALEN
] = {
1472 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1475 vf
= &adapter
->ahw
->sriov
->vf_info
[0];
1476 INIT_LIST_HEAD(&tmp_list
);
1477 head
= &adapter
->vf_mc_list
;
1478 netif_addr_lock_bh(netdev
);
1480 while (!list_empty(head
)) {
1481 cur
= list_entry(head
->next
, struct qlcnic_mac_vlan_list
, list
);
1482 list_move(&cur
->list
, &tmp_list
);
1485 netif_addr_unlock_bh(netdev
);
1487 while (!list_empty(&tmp_list
)) {
1488 cur
= list_entry((&tmp_list
)->next
,
1489 struct qlcnic_mac_vlan_list
, list
);
1490 if (!qlcnic_sriov_check_any_vlan(vf
)) {
1491 qlcnic_nic_add_mac(adapter
, bcast_addr
, 0);
1492 qlcnic_nic_add_mac(adapter
, cur
->mac_addr
, 0);
1494 mutex_lock(&vf
->vlan_list_lock
);
1495 for (i
= 0; i
< sriov
->num_allowed_vlans
; i
++) {
1496 vlan_id
= vf
->sriov_vlans
[i
];
1498 qlcnic_nic_add_mac(adapter
, bcast_addr
,
1500 qlcnic_nic_add_mac(adapter
,
1505 mutex_unlock(&vf
->vlan_list_lock
);
1506 if (qlcnic_84xx_check(adapter
)) {
1507 qlcnic_nic_add_mac(adapter
, bcast_addr
, 0);
1508 qlcnic_nic_add_mac(adapter
, cur
->mac_addr
, 0);
1511 list_del(&cur
->list
);
1516 void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel
*bc
)
1518 struct list_head
*head
= &bc
->async_list
;
1519 struct qlcnic_async_work_list
*entry
;
1521 while (!list_empty(head
)) {
1522 entry
= list_entry(head
->next
, struct qlcnic_async_work_list
,
1524 cancel_work_sync(&entry
->work
);
1525 list_del(&entry
->list
);
1530 static void qlcnic_sriov_vf_set_multi(struct net_device
*netdev
)
1532 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
1533 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
1534 u32 mode
= VPORT_MISS_MODE_DROP
;
1536 if (!test_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
))
1539 if (netdev
->flags
& IFF_PROMISC
) {
1540 if (!(adapter
->flags
& QLCNIC_PROMISC_DISABLED
))
1541 mode
= VPORT_MISS_MODE_ACCEPT_ALL
;
1542 } else if ((netdev
->flags
& IFF_ALLMULTI
) ||
1543 (netdev_mc_count(netdev
) > ahw
->max_mc_count
)) {
1544 mode
= VPORT_MISS_MODE_ACCEPT_MULTI
;
1547 if (qlcnic_sriov_vf_check(adapter
))
1548 qlcnic_vf_add_mc_list(netdev
);
1550 qlcnic_nic_set_promisc(adapter
, mode
);
1553 static void qlcnic_sriov_handle_async_multi(struct work_struct
*work
)
1555 struct qlcnic_async_work_list
*entry
;
1556 struct net_device
*netdev
;
1558 entry
= container_of(work
, struct qlcnic_async_work_list
, work
);
1559 netdev
= (struct net_device
*)entry
->ptr
;
1561 qlcnic_sriov_vf_set_multi(netdev
);
1565 static struct qlcnic_async_work_list
*
1566 qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel
*bc
)
1568 struct list_head
*node
;
1569 struct qlcnic_async_work_list
*entry
= NULL
;
1572 list_for_each(node
, &bc
->async_list
) {
1573 entry
= list_entry(node
, struct qlcnic_async_work_list
, list
);
1574 if (!work_pending(&entry
->work
)) {
1581 entry
= kzalloc(sizeof(struct qlcnic_async_work_list
),
1585 list_add_tail(&entry
->list
, &bc
->async_list
);
1591 static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel
*bc
,
1592 work_func_t func
, void *data
)
1594 struct qlcnic_async_work_list
*entry
= NULL
;
1596 entry
= qlcnic_sriov_get_free_node_async_work(bc
);
1601 INIT_WORK(&entry
->work
, func
);
1602 queue_work(bc
->bc_async_wq
, &entry
->work
);
1605 void qlcnic_sriov_vf_schedule_multi(struct net_device
*netdev
)
1608 struct qlcnic_adapter
*adapter
= netdev_priv(netdev
);
1609 struct qlcnic_back_channel
*bc
= &adapter
->ahw
->sriov
->bc
;
1611 if (adapter
->need_fw_reset
)
1614 qlcnic_sriov_schedule_bc_async_work(bc
, qlcnic_sriov_handle_async_multi
,
1618 static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter
*adapter
)
1622 adapter
->need_fw_reset
= 0;
1623 qlcnic_83xx_reinit_mbx_work(adapter
->ahw
->mailbox
);
1624 qlcnic_83xx_enable_mbx_interrupt(adapter
);
1626 err
= qlcnic_sriov_cfg_bc_intr(adapter
, 1);
1630 err
= qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_INIT
);
1632 goto err_out_cleanup_bc_intr
;
1634 err
= qlcnic_sriov_vf_init_driver(adapter
);
1636 goto err_out_term_channel
;
1640 err_out_term_channel
:
1641 qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_TERM
);
1643 err_out_cleanup_bc_intr
:
1644 qlcnic_sriov_cfg_bc_intr(adapter
, 0);
1648 static void qlcnic_sriov_vf_attach(struct qlcnic_adapter
*adapter
)
1650 struct net_device
*netdev
= adapter
->netdev
;
1652 if (netif_running(netdev
)) {
1653 if (!qlcnic_up(adapter
, netdev
))
1654 qlcnic_restore_indev_addr(netdev
, NETDEV_UP
);
1657 netif_device_attach(netdev
);
1660 static void qlcnic_sriov_vf_detach(struct qlcnic_adapter
*adapter
)
1662 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
1663 struct qlcnic_intrpt_config
*intr_tbl
= ahw
->intr_tbl
;
1664 struct net_device
*netdev
= adapter
->netdev
;
1665 u8 i
, max_ints
= ahw
->num_msix
- 1;
1667 netif_device_detach(netdev
);
1668 qlcnic_83xx_detach_mailbox_work(adapter
);
1669 qlcnic_83xx_disable_mbx_intr(adapter
);
1671 if (netif_running(netdev
))
1672 qlcnic_down(adapter
, netdev
);
1674 for (i
= 0; i
< max_ints
; i
++) {
1676 intr_tbl
[i
].enabled
= 0;
1677 intr_tbl
[i
].src
= 0;
1679 ahw
->reset_context
= 0;
1682 static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter
*adapter
)
1684 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
1685 struct device
*dev
= &adapter
->pdev
->dev
;
1686 struct qlc_83xx_idc
*idc
= &ahw
->idc
;
1687 u8 func
= ahw
->pci_func
;
1690 if ((idc
->prev_state
== QLC_83XX_IDC_DEV_NEED_RESET
) ||
1691 (idc
->prev_state
== QLC_83XX_IDC_DEV_INIT
)) {
1692 if (!qlcnic_sriov_vf_reinit_driver(adapter
)) {
1693 qlcnic_sriov_vf_attach(adapter
);
1694 adapter
->fw_fail_cnt
= 0;
1696 "%s: Reinitialization of VF 0x%x done after FW reset\n",
1700 "%s: Reinitialization of VF 0x%x failed after FW reset\n",
1702 state
= QLCRDX(ahw
, QLC_83XX_IDC_DEV_STATE
);
1703 dev_info(dev
, "Current state 0x%x after FW reset\n",
1711 static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter
*adapter
)
1713 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
1714 struct qlcnic_mailbox
*mbx
= ahw
->mailbox
;
1715 struct device
*dev
= &adapter
->pdev
->dev
;
1716 struct qlc_83xx_idc
*idc
= &ahw
->idc
;
1717 u8 func
= ahw
->pci_func
;
1720 adapter
->reset_ctx_cnt
++;
1722 /* Skip the context reset and check if FW is hung */
1723 if (adapter
->reset_ctx_cnt
< 3) {
1724 adapter
->need_fw_reset
= 1;
1725 clear_bit(QLC_83XX_MBX_READY
, &mbx
->status
);
1727 "Resetting context, wait here to check if FW is in failed state\n");
1731 /* Check if number of resets exceed the threshold.
1732 * If it exceeds the threshold just fail the VF.
1734 if (adapter
->reset_ctx_cnt
> QLC_83XX_VF_RESET_FAIL_THRESH
) {
1735 clear_bit(QLC_83XX_MODULE_LOADED
, &idc
->status
);
1736 adapter
->tx_timeo_cnt
= 0;
1737 adapter
->fw_fail_cnt
= 0;
1738 adapter
->reset_ctx_cnt
= 0;
1739 qlcnic_sriov_vf_detach(adapter
);
1741 "Device context resets have exceeded the threshold, device interface will be shutdown\n");
1745 dev_info(dev
, "Resetting context of VF 0x%x\n", func
);
1746 dev_info(dev
, "%s: Context reset count %d for VF 0x%x\n",
1747 __func__
, adapter
->reset_ctx_cnt
, func
);
1748 set_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1749 adapter
->need_fw_reset
= 1;
1750 clear_bit(QLC_83XX_MBX_READY
, &mbx
->status
);
1751 qlcnic_sriov_vf_detach(adapter
);
1752 adapter
->need_fw_reset
= 0;
1754 if (!qlcnic_sriov_vf_reinit_driver(adapter
)) {
1755 qlcnic_sriov_vf_attach(adapter
);
1756 adapter
->tx_timeo_cnt
= 0;
1757 adapter
->reset_ctx_cnt
= 0;
1758 adapter
->fw_fail_cnt
= 0;
1759 dev_info(dev
, "Done resetting context for VF 0x%x\n", func
);
1761 dev_err(dev
, "%s: Reinitialization of VF 0x%x failed\n",
1763 state
= QLCRDX(ahw
, QLC_83XX_IDC_DEV_STATE
);
1764 dev_info(dev
, "%s: Current state 0x%x\n", __func__
, state
);
1770 static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter
*adapter
)
1772 struct qlcnic_hardware_context
*ahw
= adapter
->ahw
;
1775 if (ahw
->idc
.prev_state
!= QLC_83XX_IDC_DEV_READY
)
1776 ret
= qlcnic_sriov_vf_handle_dev_ready(adapter
);
1777 else if (ahw
->reset_context
)
1778 ret
= qlcnic_sriov_vf_handle_context_reset(adapter
);
1780 clear_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1784 static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter
*adapter
)
1786 struct qlc_83xx_idc
*idc
= &adapter
->ahw
->idc
;
1788 dev_err(&adapter
->pdev
->dev
, "Device is in failed state\n");
1789 if (idc
->prev_state
== QLC_83XX_IDC_DEV_READY
)
1790 qlcnic_sriov_vf_detach(adapter
);
1792 clear_bit(QLC_83XX_MODULE_LOADED
, &idc
->status
);
1793 clear_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1798 qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter
*adapter
)
1800 struct qlcnic_mailbox
*mbx
= adapter
->ahw
->mailbox
;
1801 struct qlc_83xx_idc
*idc
= &adapter
->ahw
->idc
;
1803 dev_info(&adapter
->pdev
->dev
, "Device is in quiescent state\n");
1804 if (idc
->prev_state
== QLC_83XX_IDC_DEV_READY
) {
1805 set_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1806 adapter
->tx_timeo_cnt
= 0;
1807 adapter
->reset_ctx_cnt
= 0;
1808 clear_bit(QLC_83XX_MBX_READY
, &mbx
->status
);
1809 qlcnic_sriov_vf_detach(adapter
);
1815 static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter
*adapter
)
1817 struct qlcnic_mailbox
*mbx
= adapter
->ahw
->mailbox
;
1818 struct qlc_83xx_idc
*idc
= &adapter
->ahw
->idc
;
1819 u8 func
= adapter
->ahw
->pci_func
;
1821 if (idc
->prev_state
== QLC_83XX_IDC_DEV_READY
) {
1822 dev_err(&adapter
->pdev
->dev
,
1823 "Firmware hang detected by VF 0x%x\n", func
);
1824 set_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1825 adapter
->tx_timeo_cnt
= 0;
1826 adapter
->reset_ctx_cnt
= 0;
1827 clear_bit(QLC_83XX_MBX_READY
, &mbx
->status
);
1828 qlcnic_sriov_vf_detach(adapter
);
1833 static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter
*adapter
)
1835 dev_err(&adapter
->pdev
->dev
, "%s: Device in unknown state\n", __func__
);
1839 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct
*work
)
1841 struct qlcnic_adapter
*adapter
;
1842 struct qlc_83xx_idc
*idc
;
1845 adapter
= container_of(work
, struct qlcnic_adapter
, fw_work
.work
);
1846 idc
= &adapter
->ahw
->idc
;
1847 idc
->curr_state
= QLCRDX(adapter
->ahw
, QLC_83XX_IDC_DEV_STATE
);
1849 switch (idc
->curr_state
) {
1850 case QLC_83XX_IDC_DEV_READY
:
1851 ret
= qlcnic_sriov_vf_idc_ready_state(adapter
);
1853 case QLC_83XX_IDC_DEV_NEED_RESET
:
1854 case QLC_83XX_IDC_DEV_INIT
:
1855 ret
= qlcnic_sriov_vf_idc_init_reset_state(adapter
);
1857 case QLC_83XX_IDC_DEV_NEED_QUISCENT
:
1858 ret
= qlcnic_sriov_vf_idc_need_quiescent_state(adapter
);
1860 case QLC_83XX_IDC_DEV_FAILED
:
1861 ret
= qlcnic_sriov_vf_idc_failed_state(adapter
);
1863 case QLC_83XX_IDC_DEV_QUISCENT
:
1866 ret
= qlcnic_sriov_vf_idc_unknown_state(adapter
);
1869 idc
->prev_state
= idc
->curr_state
;
1870 if (!ret
&& test_bit(QLC_83XX_MODULE_LOADED
, &idc
->status
))
1871 qlcnic_schedule_work(adapter
, qlcnic_sriov_vf_poll_dev_state
,
1875 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter
*adapter
)
1877 while (test_and_set_bit(__QLCNIC_RESETTING
, &adapter
->state
))
1880 clear_bit(QLC_83XX_MODULE_LOADED
, &adapter
->ahw
->idc
.status
);
1881 clear_bit(__QLCNIC_RESETTING
, &adapter
->state
);
1882 cancel_delayed_work_sync(&adapter
->fw_work
);
1885 static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov
*sriov
,
1886 struct qlcnic_vf_info
*vf
, u16 vlan_id
)
1888 int i
, err
= -EINVAL
;
1890 if (!vf
->sriov_vlans
)
1893 mutex_lock(&vf
->vlan_list_lock
);
1895 for (i
= 0; i
< sriov
->num_allowed_vlans
; i
++) {
1896 if (vf
->sriov_vlans
[i
] == vlan_id
) {
1902 mutex_unlock(&vf
->vlan_list_lock
);
1906 static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov
*sriov
,
1907 struct qlcnic_vf_info
*vf
)
1911 mutex_lock(&vf
->vlan_list_lock
);
1913 if (vf
->num_vlan
>= sriov
->num_allowed_vlans
)
1916 mutex_unlock(&vf
->vlan_list_lock
);
1920 static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter
*adapter
,
1923 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
1924 struct qlcnic_vf_info
*vf
;
1929 vf
= &adapter
->ahw
->sriov
->vf_info
[0];
1930 vlan_exist
= qlcnic_sriov_check_any_vlan(vf
);
1931 if (sriov
->vlan_mode
!= QLC_GUEST_VLAN_MODE
)
1935 if (qlcnic_83xx_vf_check(adapter
) && vlan_exist
)
1938 if (qlcnic_sriov_validate_num_vlans(sriov
, vf
))
1941 if (sriov
->any_vlan
) {
1942 for (i
= 0; i
< sriov
->num_allowed_vlans
; i
++) {
1943 if (sriov
->allowed_vlans
[i
] == vid
)
1951 if (!vlan_exist
|| qlcnic_sriov_check_vlan_id(sriov
, vf
, vid
))
1958 static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info
*vf
, u16 vlan_id
,
1959 enum qlcnic_vlan_operations opcode
)
1961 struct qlcnic_adapter
*adapter
= vf
->adapter
;
1962 struct qlcnic_sriov
*sriov
;
1964 sriov
= adapter
->ahw
->sriov
;
1966 if (!vf
->sriov_vlans
)
1969 mutex_lock(&vf
->vlan_list_lock
);
1973 qlcnic_sriov_add_vlan_id(sriov
, vf
, vlan_id
);
1975 case QLC_VLAN_DELETE
:
1976 qlcnic_sriov_del_vlan_id(sriov
, vf
, vlan_id
);
1979 netdev_err(adapter
->netdev
, "Invalid VLAN operation\n");
1982 mutex_unlock(&vf
->vlan_list_lock
);
1986 int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter
*adapter
,
1989 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
1990 struct qlcnic_vf_info
*vf
;
1991 struct qlcnic_cmd_args cmd
;
1997 vf
= &adapter
->ahw
->sriov
->vf_info
[0];
1998 ret
= qlcnic_sriov_validate_vlan_cfg(adapter
, vid
, enable
);
2002 ret
= qlcnic_sriov_alloc_bc_mbx_args(&cmd
,
2003 QLCNIC_BC_CMD_CFG_GUEST_VLAN
);
2007 cmd
.req
.arg
[1] = (enable
& 1) | vid
<< 16;
2009 qlcnic_sriov_cleanup_async_list(&sriov
->bc
);
2010 ret
= qlcnic_issue_cmd(adapter
, &cmd
);
2012 dev_err(&adapter
->pdev
->dev
,
2013 "Failed to configure guest VLAN, err=%d\n", ret
);
2015 qlcnic_free_mac_list(adapter
);
2018 qlcnic_sriov_vlan_operation(vf
, vid
, QLC_VLAN_ADD
);
2020 qlcnic_sriov_vlan_operation(vf
, vid
, QLC_VLAN_DELETE
);
2022 qlcnic_set_multi(adapter
->netdev
);
2025 qlcnic_free_mbx_args(&cmd
);
2029 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter
*adapter
)
2031 struct list_head
*head
= &adapter
->mac_list
;
2032 struct qlcnic_mac_vlan_list
*cur
;
2034 while (!list_empty(head
)) {
2035 cur
= list_entry(head
->next
, struct qlcnic_mac_vlan_list
, list
);
2036 qlcnic_sre_macaddr_change(adapter
, cur
->mac_addr
, cur
->vlan_id
,
2038 list_del(&cur
->list
);
2044 static int qlcnic_sriov_vf_shutdown(struct pci_dev
*pdev
)
2046 struct qlcnic_adapter
*adapter
= pci_get_drvdata(pdev
);
2047 struct net_device
*netdev
= adapter
->netdev
;
2050 netif_device_detach(netdev
);
2051 qlcnic_cancel_idc_work(adapter
);
2053 if (netif_running(netdev
))
2054 qlcnic_down(adapter
, netdev
);
2056 qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_TERM
);
2057 qlcnic_sriov_cfg_bc_intr(adapter
, 0);
2058 qlcnic_83xx_disable_mbx_intr(adapter
);
2059 cancel_delayed_work_sync(&adapter
->idc_aen_work
);
2061 retval
= pci_save_state(pdev
);
2068 static int qlcnic_sriov_vf_resume(struct qlcnic_adapter
*adapter
)
2070 struct qlc_83xx_idc
*idc
= &adapter
->ahw
->idc
;
2071 struct net_device
*netdev
= adapter
->netdev
;
2074 set_bit(QLC_83XX_MODULE_LOADED
, &idc
->status
);
2075 qlcnic_83xx_enable_mbx_interrupt(adapter
);
2076 err
= qlcnic_sriov_cfg_bc_intr(adapter
, 1);
2080 err
= qlcnic_sriov_channel_cfg_cmd(adapter
, QLCNIC_BC_CMD_CHANNEL_INIT
);
2082 if (netif_running(netdev
)) {
2083 err
= qlcnic_up(adapter
, netdev
);
2085 qlcnic_restore_indev_addr(netdev
, NETDEV_UP
);
2089 netif_device_attach(netdev
);
2090 qlcnic_schedule_work(adapter
, qlcnic_sriov_vf_poll_dev_state
,
2095 void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter
*adapter
)
2097 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
2098 struct qlcnic_vf_info
*vf
;
2101 for (i
= 0; i
< sriov
->num_vfs
; i
++) {
2102 vf
= &sriov
->vf_info
[i
];
2103 vf
->sriov_vlans
= kcalloc(sriov
->num_allowed_vlans
,
2104 sizeof(*vf
->sriov_vlans
), GFP_KERNEL
);
2108 void qlcnic_sriov_free_vlans(struct qlcnic_adapter
*adapter
)
2110 struct qlcnic_sriov
*sriov
= adapter
->ahw
->sriov
;
2111 struct qlcnic_vf_info
*vf
;
2114 for (i
= 0; i
< sriov
->num_vfs
; i
++) {
2115 vf
= &sriov
->vf_info
[i
];
2116 kfree(vf
->sriov_vlans
);
2117 vf
->sriov_vlans
= NULL
;
2121 void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov
*sriov
,
2122 struct qlcnic_vf_info
*vf
, u16 vlan_id
)
2126 for (i
= 0; i
< sriov
->num_allowed_vlans
; i
++) {
2127 if (!vf
->sriov_vlans
[i
]) {
2128 vf
->sriov_vlans
[i
] = vlan_id
;
2135 void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov
*sriov
,
2136 struct qlcnic_vf_info
*vf
, u16 vlan_id
)
2140 for (i
= 0; i
< sriov
->num_allowed_vlans
; i
++) {
2141 if (vf
->sriov_vlans
[i
] == vlan_id
) {
2142 vf
->sriov_vlans
[i
] = 0;
2149 bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info
*vf
)
2153 mutex_lock(&vf
->vlan_list_lock
);
2158 mutex_unlock(&vf
->vlan_list_lock
);