2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2010 QLogic Corporation
5 * See LICENSE.qlcnic for copyright and licensing details.
11 qlcnic_poll_rsp(struct qlcnic_adapter
*adapter
)
17 /* give atleast 1ms for firmware to respond */
20 if (++timeout
> QLCNIC_OS_CRB_RETRY_COUNT
)
21 return QLCNIC_CDRP_RSP_TIMEOUT
;
23 rsp
= QLCRD32(adapter
, QLCNIC_CDRP_CRB_OFFSET
);
24 } while (!QLCNIC_CDRP_IS_RSP(rsp
));
30 qlcnic_issue_cmd(struct qlcnic_adapter
*adapter
,
31 u32 pci_fn
, u32 version
, u32 arg1
, u32 arg2
, u32 arg3
, u32 cmd
)
35 u32 rcode
= QLCNIC_RCODE_SUCCESS
;
36 struct pci_dev
*pdev
= adapter
->pdev
;
38 signature
= QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn
, version
);
40 /* Acquire semaphore before accessing CRB */
41 if (qlcnic_api_lock(adapter
))
42 return QLCNIC_RCODE_TIMEOUT
;
44 QLCWR32(adapter
, QLCNIC_SIGN_CRB_OFFSET
, signature
);
45 QLCWR32(adapter
, QLCNIC_ARG1_CRB_OFFSET
, arg1
);
46 QLCWR32(adapter
, QLCNIC_ARG2_CRB_OFFSET
, arg2
);
47 QLCWR32(adapter
, QLCNIC_ARG3_CRB_OFFSET
, arg3
);
48 QLCWR32(adapter
, QLCNIC_CDRP_CRB_OFFSET
, QLCNIC_CDRP_FORM_CMD(cmd
));
50 rsp
= qlcnic_poll_rsp(adapter
);
52 if (rsp
== QLCNIC_CDRP_RSP_TIMEOUT
) {
53 dev_err(&pdev
->dev
, "card response timeout.\n");
54 rcode
= QLCNIC_RCODE_TIMEOUT
;
55 } else if (rsp
== QLCNIC_CDRP_RSP_FAIL
) {
56 rcode
= QLCRD32(adapter
, QLCNIC_ARG1_CRB_OFFSET
);
57 dev_err(&pdev
->dev
, "failed card response code:0x%x\n",
61 /* Release semaphore */
62 qlcnic_api_unlock(adapter
);
67 static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer
, u16 temp_size
)
70 int count
= temp_size
/ sizeof(uint32_t);
72 sum
+= *temp_buffer
++;
74 sum
= (sum
& 0xFFFFFFFF) + (sum
>> 32);
78 int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter
*adapter
)
83 u32 version
, csum
, *template, *tmp_buf
;
84 struct qlcnic_hardware_context
*ahw
;
85 struct qlcnic_dump_template_hdr
*tmpl_hdr
, *tmp_tmpl
;
86 dma_addr_t tmp_addr_t
= 0;
89 err
= qlcnic_issue_cmd(adapter
,
90 adapter
->ahw
->pci_func
,
91 adapter
->fw_hal_version
,
95 QLCNIC_CDRP_CMD_TEMP_SIZE
);
96 if (err
!= QLCNIC_RCODE_SUCCESS
) {
97 err
= QLCRD32(adapter
, QLCNIC_ARG1_CRB_OFFSET
);
98 dev_info(&adapter
->pdev
->dev
,
99 "Can't get template size %d\n", err
);
103 version
= QLCRD32(adapter
, QLCNIC_ARG3_CRB_OFFSET
);
104 temp_size
= QLCRD32(adapter
, QLCNIC_ARG2_CRB_OFFSET
);
108 tmp_addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, temp_size
,
109 &tmp_addr_t
, GFP_KERNEL
);
111 dev_err(&adapter
->pdev
->dev
,
112 "Can't get memory for FW dump template\n");
115 err
= qlcnic_issue_cmd(adapter
,
116 adapter
->ahw
->pci_func
,
117 adapter
->fw_hal_version
,
121 QLCNIC_CDRP_CMD_GET_TEMP_HDR
);
123 if (err
!= QLCNIC_RCODE_SUCCESS
) {
124 dev_err(&adapter
->pdev
->dev
,
125 "Failed to get mini dump template header %d\n", err
);
130 csum
= qlcnic_temp_checksum((uint32_t *) tmp_addr
, temp_size
);
132 dev_err(&adapter
->pdev
->dev
,
133 "Template header checksum validation failed\n");
137 ahw
->fw_dump
.tmpl_hdr
= vzalloc(temp_size
);
138 if (!ahw
->fw_dump
.tmpl_hdr
) {
143 template = (u32
*) ahw
->fw_dump
.tmpl_hdr
;
144 for (i
= 0; i
< temp_size
/sizeof(u32
); i
++)
145 *template++ = __le32_to_cpu(*tmp_buf
++);
147 tmpl_hdr
= ahw
->fw_dump
.tmpl_hdr
;
148 tmpl_hdr
->drv_cap_mask
= QLCNIC_DUMP_MASK_DEF
;
149 ahw
->fw_dump
.enable
= 1;
151 dma_free_coherent(&adapter
->pdev
->dev
, temp_size
, tmp_addr
, tmp_addr_t
);
156 qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter
*adapter
, int mtu
)
158 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
160 if (recv_ctx
->state
== QLCNIC_HOST_CTX_STATE_ACTIVE
) {
161 if (qlcnic_issue_cmd(adapter
,
162 adapter
->ahw
->pci_func
,
163 adapter
->fw_hal_version
,
164 recv_ctx
->context_id
,
167 QLCNIC_CDRP_CMD_SET_MTU
)) {
169 dev_err(&adapter
->pdev
->dev
, "Failed to set mtu\n");
178 qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter
*adapter
)
181 struct qlcnic_hostrq_rx_ctx
*prq
;
182 struct qlcnic_cardrsp_rx_ctx
*prsp
;
183 struct qlcnic_hostrq_rds_ring
*prq_rds
;
184 struct qlcnic_hostrq_sds_ring
*prq_sds
;
185 struct qlcnic_cardrsp_rds_ring
*prsp_rds
;
186 struct qlcnic_cardrsp_sds_ring
*prsp_sds
;
187 struct qlcnic_host_rds_ring
*rds_ring
;
188 struct qlcnic_host_sds_ring
*sds_ring
;
190 dma_addr_t hostrq_phys_addr
, cardrsp_phys_addr
;
193 u8 i
, nrds_rings
, nsds_rings
;
194 size_t rq_size
, rsp_size
;
195 u32 cap
, reg
, val
, reg2
;
198 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
200 nrds_rings
= adapter
->max_rds_rings
;
201 nsds_rings
= adapter
->max_sds_rings
;
204 SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx
, nrds_rings
,
207 SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx
, nrds_rings
,
210 addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, rq_size
,
211 &hostrq_phys_addr
, GFP_KERNEL
);
216 addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, rsp_size
,
217 &cardrsp_phys_addr
, GFP_KERNEL
);
224 prq
->host_rsp_dma_addr
= cpu_to_le64(cardrsp_phys_addr
);
226 cap
= (QLCNIC_CAP0_LEGACY_CONTEXT
| QLCNIC_CAP0_LEGACY_MN
227 | QLCNIC_CAP0_VALIDOFF
);
228 cap
|= (QLCNIC_CAP0_JUMBO_CONTIGUOUS
| QLCNIC_CAP0_LRO_CONTIGUOUS
);
230 prq
->valid_field_offset
= offsetof(struct qlcnic_hostrq_rx_ctx
,
232 prq
->txrx_sds_binding
= nsds_rings
- 1;
234 prq
->capabilities
[0] = cpu_to_le32(cap
);
235 prq
->host_int_crb_mode
=
236 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED
);
237 prq
->host_rds_crb_mode
=
238 cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE
);
240 prq
->num_rds_rings
= cpu_to_le16(nrds_rings
);
241 prq
->num_sds_rings
= cpu_to_le16(nsds_rings
);
242 prq
->rds_ring_offset
= 0;
244 val
= le32_to_cpu(prq
->rds_ring_offset
) +
245 (sizeof(struct qlcnic_hostrq_rds_ring
) * nrds_rings
);
246 prq
->sds_ring_offset
= cpu_to_le32(val
);
248 prq_rds
= (struct qlcnic_hostrq_rds_ring
*)(prq
->data
+
249 le32_to_cpu(prq
->rds_ring_offset
));
251 for (i
= 0; i
< nrds_rings
; i
++) {
253 rds_ring
= &recv_ctx
->rds_rings
[i
];
254 rds_ring
->producer
= 0;
256 prq_rds
[i
].host_phys_addr
= cpu_to_le64(rds_ring
->phys_addr
);
257 prq_rds
[i
].ring_size
= cpu_to_le32(rds_ring
->num_desc
);
258 prq_rds
[i
].ring_kind
= cpu_to_le32(i
);
259 prq_rds
[i
].buff_size
= cpu_to_le64(rds_ring
->dma_size
);
262 prq_sds
= (struct qlcnic_hostrq_sds_ring
*)(prq
->data
+
263 le32_to_cpu(prq
->sds_ring_offset
));
265 for (i
= 0; i
< nsds_rings
; i
++) {
267 sds_ring
= &recv_ctx
->sds_rings
[i
];
268 sds_ring
->consumer
= 0;
269 memset(sds_ring
->desc_head
, 0, STATUS_DESC_RINGSIZE(sds_ring
));
271 prq_sds
[i
].host_phys_addr
= cpu_to_le64(sds_ring
->phys_addr
);
272 prq_sds
[i
].ring_size
= cpu_to_le32(sds_ring
->num_desc
);
273 prq_sds
[i
].msi_index
= cpu_to_le16(i
);
276 phys_addr
= hostrq_phys_addr
;
277 err
= qlcnic_issue_cmd(adapter
,
278 adapter
->ahw
->pci_func
,
279 adapter
->fw_hal_version
,
280 (u32
)(phys_addr
>> 32),
281 (u32
)(phys_addr
& 0xffffffff),
283 QLCNIC_CDRP_CMD_CREATE_RX_CTX
);
285 dev_err(&adapter
->pdev
->dev
,
286 "Failed to create rx ctx in firmware%d\n", err
);
291 prsp_rds
= ((struct qlcnic_cardrsp_rds_ring
*)
292 &prsp
->data
[le32_to_cpu(prsp
->rds_ring_offset
)]);
294 for (i
= 0; i
< le16_to_cpu(prsp
->num_rds_rings
); i
++) {
295 rds_ring
= &recv_ctx
->rds_rings
[i
];
297 reg
= le32_to_cpu(prsp_rds
[i
].host_producer_crb
);
298 rds_ring
->crb_rcv_producer
= adapter
->ahw
->pci_base0
+ reg
;
301 prsp_sds
= ((struct qlcnic_cardrsp_sds_ring
*)
302 &prsp
->data
[le32_to_cpu(prsp
->sds_ring_offset
)]);
304 for (i
= 0; i
< le16_to_cpu(prsp
->num_sds_rings
); i
++) {
305 sds_ring
= &recv_ctx
->sds_rings
[i
];
307 reg
= le32_to_cpu(prsp_sds
[i
].host_consumer_crb
);
308 reg2
= le32_to_cpu(prsp_sds
[i
].interrupt_crb
);
310 sds_ring
->crb_sts_consumer
= adapter
->ahw
->pci_base0
+ reg
;
311 sds_ring
->crb_intr_mask
= adapter
->ahw
->pci_base0
+ reg2
;
314 recv_ctx
->state
= le32_to_cpu(prsp
->host_ctx_state
);
315 recv_ctx
->context_id
= le16_to_cpu(prsp
->context_id
);
316 recv_ctx
->virt_port
= prsp
->virt_port
;
319 dma_free_coherent(&adapter
->pdev
->dev
, rsp_size
, prsp
,
322 dma_free_coherent(&adapter
->pdev
->dev
, rq_size
, prq
, hostrq_phys_addr
);
327 qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter
*adapter
)
329 struct qlcnic_recv_context
*recv_ctx
= adapter
->recv_ctx
;
331 if (qlcnic_issue_cmd(adapter
,
332 adapter
->ahw
->pci_func
,
333 adapter
->fw_hal_version
,
334 recv_ctx
->context_id
,
335 QLCNIC_DESTROY_CTX_RESET
,
337 QLCNIC_CDRP_CMD_DESTROY_RX_CTX
)) {
339 dev_err(&adapter
->pdev
->dev
,
340 "Failed to destroy rx ctx in firmware\n");
343 recv_ctx
->state
= QLCNIC_HOST_CTX_STATE_FREED
;
347 qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter
*adapter
)
349 struct qlcnic_hostrq_tx_ctx
*prq
;
350 struct qlcnic_hostrq_cds_ring
*prq_cds
;
351 struct qlcnic_cardrsp_tx_ctx
*prsp
;
352 void *rq_addr
, *rsp_addr
;
353 size_t rq_size
, rsp_size
;
357 dma_addr_t rq_phys_addr
, rsp_phys_addr
;
358 struct qlcnic_host_tx_ring
*tx_ring
= adapter
->tx_ring
;
360 /* reset host resources */
361 tx_ring
->producer
= 0;
362 tx_ring
->sw_consumer
= 0;
363 *(tx_ring
->hw_consumer
) = 0;
365 rq_size
= SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx
);
366 rq_addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, rq_size
,
367 &rq_phys_addr
, GFP_KERNEL
);
371 rsp_size
= SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx
);
372 rsp_addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, rsp_size
,
373 &rsp_phys_addr
, GFP_KERNEL
);
379 memset(rq_addr
, 0, rq_size
);
382 memset(rsp_addr
, 0, rsp_size
);
385 prq
->host_rsp_dma_addr
= cpu_to_le64(rsp_phys_addr
);
387 temp
= (QLCNIC_CAP0_LEGACY_CONTEXT
| QLCNIC_CAP0_LEGACY_MN
|
389 prq
->capabilities
[0] = cpu_to_le32(temp
);
391 prq
->host_int_crb_mode
=
392 cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED
);
394 prq
->interrupt_ctl
= 0;
396 prq
->cmd_cons_dma_addr
= cpu_to_le64(tx_ring
->hw_cons_phys_addr
);
398 prq_cds
= &prq
->cds_ring
;
400 prq_cds
->host_phys_addr
= cpu_to_le64(tx_ring
->phys_addr
);
401 prq_cds
->ring_size
= cpu_to_le32(tx_ring
->num_desc
);
403 phys_addr
= rq_phys_addr
;
404 err
= qlcnic_issue_cmd(adapter
,
405 adapter
->ahw
->pci_func
,
406 adapter
->fw_hal_version
,
407 (u32
)(phys_addr
>> 32),
408 ((u32
)phys_addr
& 0xffffffff),
410 QLCNIC_CDRP_CMD_CREATE_TX_CTX
);
412 if (err
== QLCNIC_RCODE_SUCCESS
) {
413 temp
= le32_to_cpu(prsp
->cds_ring
.host_producer_crb
);
414 tx_ring
->crb_cmd_producer
= adapter
->ahw
->pci_base0
+ temp
;
416 adapter
->tx_context_id
=
417 le16_to_cpu(prsp
->context_id
);
419 dev_err(&adapter
->pdev
->dev
,
420 "Failed to create tx ctx in firmware%d\n", err
);
424 dma_free_coherent(&adapter
->pdev
->dev
, rsp_size
, rsp_addr
,
428 dma_free_coherent(&adapter
->pdev
->dev
, rq_size
, rq_addr
, rq_phys_addr
);
434 qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter
*adapter
)
436 if (qlcnic_issue_cmd(adapter
,
437 adapter
->ahw
->pci_func
,
438 adapter
->fw_hal_version
,
439 adapter
->tx_context_id
,
440 QLCNIC_DESTROY_CTX_RESET
,
442 QLCNIC_CDRP_CMD_DESTROY_TX_CTX
)) {
444 dev_err(&adapter
->pdev
->dev
,
445 "Failed to destroy tx ctx in firmware\n");
450 qlcnic_fw_cmd_set_port(struct qlcnic_adapter
*adapter
, u32 config
)
452 return qlcnic_issue_cmd(adapter
,
453 adapter
->ahw
->pci_func
,
454 adapter
->fw_hal_version
,
458 QLCNIC_CDRP_CMD_CONFIG_PORT
);
461 int qlcnic_alloc_hw_resources(struct qlcnic_adapter
*adapter
)
466 struct qlcnic_recv_context
*recv_ctx
;
467 struct qlcnic_host_rds_ring
*rds_ring
;
468 struct qlcnic_host_sds_ring
*sds_ring
;
469 struct qlcnic_host_tx_ring
*tx_ring
;
471 struct pci_dev
*pdev
= adapter
->pdev
;
473 recv_ctx
= adapter
->recv_ctx
;
474 tx_ring
= adapter
->tx_ring
;
476 tx_ring
->hw_consumer
= (__le32
*) dma_alloc_coherent(&pdev
->dev
,
477 sizeof(u32
), &tx_ring
->hw_cons_phys_addr
, GFP_KERNEL
);
478 if (tx_ring
->hw_consumer
== NULL
) {
479 dev_err(&pdev
->dev
, "failed to allocate tx consumer\n");
484 addr
= dma_alloc_coherent(&pdev
->dev
, TX_DESC_RINGSIZE(tx_ring
),
485 &tx_ring
->phys_addr
, GFP_KERNEL
);
488 dev_err(&pdev
->dev
, "failed to allocate tx desc ring\n");
493 tx_ring
->desc_head
= addr
;
495 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
496 rds_ring
= &recv_ctx
->rds_rings
[ring
];
497 addr
= dma_alloc_coherent(&adapter
->pdev
->dev
,
498 RCV_DESC_RINGSIZE(rds_ring
),
499 &rds_ring
->phys_addr
, GFP_KERNEL
);
502 "failed to allocate rds ring [%d]\n", ring
);
506 rds_ring
->desc_head
= addr
;
510 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
511 sds_ring
= &recv_ctx
->sds_rings
[ring
];
513 addr
= dma_alloc_coherent(&adapter
->pdev
->dev
,
514 STATUS_DESC_RINGSIZE(sds_ring
),
515 &sds_ring
->phys_addr
, GFP_KERNEL
);
518 "failed to allocate sds ring [%d]\n", ring
);
522 sds_ring
->desc_head
= addr
;
528 qlcnic_free_hw_resources(adapter
);
533 int qlcnic_fw_create_ctx(struct qlcnic_adapter
*adapter
)
537 if (adapter
->flags
& QLCNIC_NEED_FLR
) {
538 pci_reset_function(adapter
->pdev
);
539 adapter
->flags
&= ~QLCNIC_NEED_FLR
;
542 err
= qlcnic_fw_cmd_create_rx_ctx(adapter
);
546 err
= qlcnic_fw_cmd_create_tx_ctx(adapter
);
548 qlcnic_fw_cmd_destroy_rx_ctx(adapter
);
552 set_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
);
556 void qlcnic_fw_destroy_ctx(struct qlcnic_adapter
*adapter
)
558 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED
, &adapter
->state
)) {
559 qlcnic_fw_cmd_destroy_rx_ctx(adapter
);
560 qlcnic_fw_cmd_destroy_tx_ctx(adapter
);
562 /* Allow dma queues to drain after context reset */
567 void qlcnic_free_hw_resources(struct qlcnic_adapter
*adapter
)
569 struct qlcnic_recv_context
*recv_ctx
;
570 struct qlcnic_host_rds_ring
*rds_ring
;
571 struct qlcnic_host_sds_ring
*sds_ring
;
572 struct qlcnic_host_tx_ring
*tx_ring
;
575 recv_ctx
= adapter
->recv_ctx
;
577 tx_ring
= adapter
->tx_ring
;
578 if (tx_ring
->hw_consumer
!= NULL
) {
579 dma_free_coherent(&adapter
->pdev
->dev
,
581 tx_ring
->hw_consumer
,
582 tx_ring
->hw_cons_phys_addr
);
583 tx_ring
->hw_consumer
= NULL
;
586 if (tx_ring
->desc_head
!= NULL
) {
587 dma_free_coherent(&adapter
->pdev
->dev
,
588 TX_DESC_RINGSIZE(tx_ring
),
589 tx_ring
->desc_head
, tx_ring
->phys_addr
);
590 tx_ring
->desc_head
= NULL
;
593 for (ring
= 0; ring
< adapter
->max_rds_rings
; ring
++) {
594 rds_ring
= &recv_ctx
->rds_rings
[ring
];
596 if (rds_ring
->desc_head
!= NULL
) {
597 dma_free_coherent(&adapter
->pdev
->dev
,
598 RCV_DESC_RINGSIZE(rds_ring
),
600 rds_ring
->phys_addr
);
601 rds_ring
->desc_head
= NULL
;
605 for (ring
= 0; ring
< adapter
->max_sds_rings
; ring
++) {
606 sds_ring
= &recv_ctx
->sds_rings
[ring
];
608 if (sds_ring
->desc_head
!= NULL
) {
609 dma_free_coherent(&adapter
->pdev
->dev
,
610 STATUS_DESC_RINGSIZE(sds_ring
),
612 sds_ring
->phys_addr
);
613 sds_ring
->desc_head
= NULL
;
619 /* Get MAC address of a NIC partition */
620 int qlcnic_get_mac_address(struct qlcnic_adapter
*adapter
, u8
*mac
)
625 arg1
= adapter
->ahw
->pci_func
| BIT_8
;
626 err
= qlcnic_issue_cmd(adapter
,
627 adapter
->ahw
->pci_func
,
628 adapter
->fw_hal_version
,
632 QLCNIC_CDRP_CMD_MAC_ADDRESS
);
634 if (err
== QLCNIC_RCODE_SUCCESS
)
635 qlcnic_fetch_mac(adapter
, QLCNIC_ARG1_CRB_OFFSET
,
636 QLCNIC_ARG2_CRB_OFFSET
, 0, mac
);
638 dev_err(&adapter
->pdev
->dev
,
639 "Failed to get mac address%d\n", err
);
646 /* Get info of a NIC partition */
647 int qlcnic_get_nic_info(struct qlcnic_adapter
*adapter
,
648 struct qlcnic_info
*npar_info
, u8 func_id
)
651 dma_addr_t nic_dma_t
;
652 struct qlcnic_info
*nic_info
;
654 size_t nic_size
= sizeof(struct qlcnic_info
);
656 nic_info_addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, nic_size
,
657 &nic_dma_t
, GFP_KERNEL
);
660 memset(nic_info_addr
, 0, nic_size
);
662 nic_info
= nic_info_addr
;
663 err
= qlcnic_issue_cmd(adapter
,
664 adapter
->ahw
->pci_func
,
665 adapter
->fw_hal_version
,
668 (func_id
<< 16 | nic_size
),
669 QLCNIC_CDRP_CMD_GET_NIC_INFO
);
671 if (err
== QLCNIC_RCODE_SUCCESS
) {
672 npar_info
->pci_func
= le16_to_cpu(nic_info
->pci_func
);
673 npar_info
->op_mode
= le16_to_cpu(nic_info
->op_mode
);
674 npar_info
->phys_port
= le16_to_cpu(nic_info
->phys_port
);
675 npar_info
->switch_mode
= le16_to_cpu(nic_info
->switch_mode
);
676 npar_info
->max_tx_ques
= le16_to_cpu(nic_info
->max_tx_ques
);
677 npar_info
->max_rx_ques
= le16_to_cpu(nic_info
->max_rx_ques
);
678 npar_info
->min_tx_bw
= le16_to_cpu(nic_info
->min_tx_bw
);
679 npar_info
->max_tx_bw
= le16_to_cpu(nic_info
->max_tx_bw
);
680 npar_info
->capabilities
= le32_to_cpu(nic_info
->capabilities
);
681 npar_info
->max_mtu
= le16_to_cpu(nic_info
->max_mtu
);
683 dev_info(&adapter
->pdev
->dev
,
684 "phy port: %d switch_mode: %d,\n"
685 "\tmax_tx_q: %d max_rx_q: %d min_tx_bw: 0x%x,\n"
686 "\tmax_tx_bw: 0x%x max_mtu:0x%x, capabilities: 0x%x\n",
687 npar_info
->phys_port
, npar_info
->switch_mode
,
688 npar_info
->max_tx_ques
, npar_info
->max_rx_ques
,
689 npar_info
->min_tx_bw
, npar_info
->max_tx_bw
,
690 npar_info
->max_mtu
, npar_info
->capabilities
);
692 dev_err(&adapter
->pdev
->dev
,
693 "Failed to get nic info%d\n", err
);
697 dma_free_coherent(&adapter
->pdev
->dev
, nic_size
, nic_info_addr
,
702 /* Configure a NIC partition */
703 int qlcnic_set_nic_info(struct qlcnic_adapter
*adapter
, struct qlcnic_info
*nic
)
706 dma_addr_t nic_dma_t
;
708 struct qlcnic_info
*nic_info
;
709 size_t nic_size
= sizeof(struct qlcnic_info
);
711 if (adapter
->op_mode
!= QLCNIC_MGMT_FUNC
)
714 nic_info_addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, nic_size
,
715 &nic_dma_t
, GFP_KERNEL
);
719 memset(nic_info_addr
, 0, nic_size
);
720 nic_info
= nic_info_addr
;
722 nic_info
->pci_func
= cpu_to_le16(nic
->pci_func
);
723 nic_info
->op_mode
= cpu_to_le16(nic
->op_mode
);
724 nic_info
->phys_port
= cpu_to_le16(nic
->phys_port
);
725 nic_info
->switch_mode
= cpu_to_le16(nic
->switch_mode
);
726 nic_info
->capabilities
= cpu_to_le32(nic
->capabilities
);
727 nic_info
->max_mac_filters
= nic
->max_mac_filters
;
728 nic_info
->max_tx_ques
= cpu_to_le16(nic
->max_tx_ques
);
729 nic_info
->max_rx_ques
= cpu_to_le16(nic
->max_rx_ques
);
730 nic_info
->min_tx_bw
= cpu_to_le16(nic
->min_tx_bw
);
731 nic_info
->max_tx_bw
= cpu_to_le16(nic
->max_tx_bw
);
733 err
= qlcnic_issue_cmd(adapter
,
734 adapter
->ahw
->pci_func
,
735 adapter
->fw_hal_version
,
738 ((nic
->pci_func
<< 16) | nic_size
),
739 QLCNIC_CDRP_CMD_SET_NIC_INFO
);
741 if (err
!= QLCNIC_RCODE_SUCCESS
) {
742 dev_err(&adapter
->pdev
->dev
,
743 "Failed to set nic info%d\n", err
);
747 dma_free_coherent(&adapter
->pdev
->dev
, nic_size
, nic_info_addr
,
752 /* Get PCI Info of a partition */
753 int qlcnic_get_pci_info(struct qlcnic_adapter
*adapter
,
754 struct qlcnic_pci_info
*pci_info
)
757 dma_addr_t pci_info_dma_t
;
758 struct qlcnic_pci_info
*npar
;
760 size_t npar_size
= sizeof(struct qlcnic_pci_info
);
761 size_t pci_size
= npar_size
* QLCNIC_MAX_PCI_FUNC
;
763 pci_info_addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, pci_size
,
764 &pci_info_dma_t
, GFP_KERNEL
);
767 memset(pci_info_addr
, 0, pci_size
);
769 npar
= pci_info_addr
;
770 err
= qlcnic_issue_cmd(adapter
,
771 adapter
->ahw
->pci_func
,
772 adapter
->fw_hal_version
,
776 QLCNIC_CDRP_CMD_GET_PCI_INFO
);
778 if (err
== QLCNIC_RCODE_SUCCESS
) {
779 for (i
= 0; i
< QLCNIC_MAX_PCI_FUNC
; i
++, npar
++, pci_info
++) {
780 pci_info
->id
= le16_to_cpu(npar
->id
);
781 pci_info
->active
= le16_to_cpu(npar
->active
);
782 pci_info
->type
= le16_to_cpu(npar
->type
);
783 pci_info
->default_port
=
784 le16_to_cpu(npar
->default_port
);
785 pci_info
->tx_min_bw
=
786 le16_to_cpu(npar
->tx_min_bw
);
787 pci_info
->tx_max_bw
=
788 le16_to_cpu(npar
->tx_max_bw
);
789 memcpy(pci_info
->mac
, npar
->mac
, ETH_ALEN
);
792 dev_err(&adapter
->pdev
->dev
,
793 "Failed to get PCI Info%d\n", err
);
797 dma_free_coherent(&adapter
->pdev
->dev
, pci_size
, pci_info_addr
,
802 /* Configure eSwitch for port mirroring */
803 int qlcnic_config_port_mirroring(struct qlcnic_adapter
*adapter
, u8 id
,
804 u8 enable_mirroring
, u8 pci_func
)
809 if (adapter
->op_mode
!= QLCNIC_MGMT_FUNC
||
810 !(adapter
->eswitch
[id
].flags
& QLCNIC_SWITCH_ENABLE
))
813 arg1
= id
| (enable_mirroring
? BIT_4
: 0);
814 arg1
|= pci_func
<< 8;
816 err
= qlcnic_issue_cmd(adapter
,
817 adapter
->ahw
->pci_func
,
818 adapter
->fw_hal_version
,
822 QLCNIC_CDRP_CMD_SET_PORTMIRRORING
);
824 if (err
!= QLCNIC_RCODE_SUCCESS
) {
825 dev_err(&adapter
->pdev
->dev
,
826 "Failed to configure port mirroring%d on eswitch:%d\n",
829 dev_info(&adapter
->pdev
->dev
,
830 "Configured eSwitch %d for port mirroring:%d\n",
837 int qlcnic_get_port_stats(struct qlcnic_adapter
*adapter
, const u8 func
,
838 const u8 rx_tx
, struct __qlcnic_esw_statistics
*esw_stats
) {
840 size_t stats_size
= sizeof(struct __qlcnic_esw_statistics
);
841 struct __qlcnic_esw_statistics
*stats
;
842 dma_addr_t stats_dma_t
;
847 if (esw_stats
== NULL
)
850 if (adapter
->op_mode
!= QLCNIC_MGMT_FUNC
&&
851 func
!= adapter
->ahw
->pci_func
) {
852 dev_err(&adapter
->pdev
->dev
,
853 "Not privilege to query stats for func=%d", func
);
857 stats_addr
= dma_alloc_coherent(&adapter
->pdev
->dev
, stats_size
,
858 &stats_dma_t
, GFP_KERNEL
);
860 dev_err(&adapter
->pdev
->dev
, "Unable to allocate memory\n");
863 memset(stats_addr
, 0, stats_size
);
865 arg1
= func
| QLCNIC_STATS_VERSION
<< 8 | QLCNIC_STATS_PORT
<< 12;
866 arg1
|= rx_tx
<< 15 | stats_size
<< 16;
868 err
= qlcnic_issue_cmd(adapter
,
869 adapter
->ahw
->pci_func
,
870 adapter
->fw_hal_version
,
874 QLCNIC_CDRP_CMD_GET_ESWITCH_STATS
);
878 esw_stats
->context_id
= le16_to_cpu(stats
->context_id
);
879 esw_stats
->version
= le16_to_cpu(stats
->version
);
880 esw_stats
->size
= le16_to_cpu(stats
->size
);
881 esw_stats
->multicast_frames
=
882 le64_to_cpu(stats
->multicast_frames
);
883 esw_stats
->broadcast_frames
=
884 le64_to_cpu(stats
->broadcast_frames
);
885 esw_stats
->unicast_frames
= le64_to_cpu(stats
->unicast_frames
);
886 esw_stats
->dropped_frames
= le64_to_cpu(stats
->dropped_frames
);
887 esw_stats
->local_frames
= le64_to_cpu(stats
->local_frames
);
888 esw_stats
->errors
= le64_to_cpu(stats
->errors
);
889 esw_stats
->numbytes
= le64_to_cpu(stats
->numbytes
);
892 dma_free_coherent(&adapter
->pdev
->dev
, stats_size
, stats_addr
,
897 int qlcnic_get_eswitch_stats(struct qlcnic_adapter
*adapter
, const u8 eswitch
,
898 const u8 rx_tx
, struct __qlcnic_esw_statistics
*esw_stats
) {
900 struct __qlcnic_esw_statistics port_stats
;
904 if (esw_stats
== NULL
)
906 if (adapter
->op_mode
!= QLCNIC_MGMT_FUNC
)
908 if (adapter
->npars
== NULL
)
911 memset(esw_stats
, 0, sizeof(u64
));
912 esw_stats
->unicast_frames
= QLCNIC_ESW_STATS_NOT_AVAIL
;
913 esw_stats
->multicast_frames
= QLCNIC_ESW_STATS_NOT_AVAIL
;
914 esw_stats
->broadcast_frames
= QLCNIC_ESW_STATS_NOT_AVAIL
;
915 esw_stats
->dropped_frames
= QLCNIC_ESW_STATS_NOT_AVAIL
;
916 esw_stats
->errors
= QLCNIC_ESW_STATS_NOT_AVAIL
;
917 esw_stats
->local_frames
= QLCNIC_ESW_STATS_NOT_AVAIL
;
918 esw_stats
->numbytes
= QLCNIC_ESW_STATS_NOT_AVAIL
;
919 esw_stats
->context_id
= eswitch
;
921 for (i
= 0; i
< QLCNIC_MAX_PCI_FUNC
; i
++) {
922 if (adapter
->npars
[i
].phy_port
!= eswitch
)
925 memset(&port_stats
, 0, sizeof(struct __qlcnic_esw_statistics
));
926 if (qlcnic_get_port_stats(adapter
, i
, rx_tx
, &port_stats
))
929 esw_stats
->size
= port_stats
.size
;
930 esw_stats
->version
= port_stats
.version
;
931 QLCNIC_ADD_ESW_STATS(esw_stats
->unicast_frames
,
932 port_stats
.unicast_frames
);
933 QLCNIC_ADD_ESW_STATS(esw_stats
->multicast_frames
,
934 port_stats
.multicast_frames
);
935 QLCNIC_ADD_ESW_STATS(esw_stats
->broadcast_frames
,
936 port_stats
.broadcast_frames
);
937 QLCNIC_ADD_ESW_STATS(esw_stats
->dropped_frames
,
938 port_stats
.dropped_frames
);
939 QLCNIC_ADD_ESW_STATS(esw_stats
->errors
,
941 QLCNIC_ADD_ESW_STATS(esw_stats
->local_frames
,
942 port_stats
.local_frames
);
943 QLCNIC_ADD_ESW_STATS(esw_stats
->numbytes
,
944 port_stats
.numbytes
);
950 int qlcnic_clear_esw_stats(struct qlcnic_adapter
*adapter
, const u8 func_esw
,
951 const u8 port
, const u8 rx_tx
)
956 if (adapter
->op_mode
!= QLCNIC_MGMT_FUNC
)
959 if (func_esw
== QLCNIC_STATS_PORT
) {
960 if (port
>= QLCNIC_MAX_PCI_FUNC
)
962 } else if (func_esw
== QLCNIC_STATS_ESWITCH
) {
963 if (port
>= QLCNIC_NIU_MAX_XG_PORTS
)
969 if (rx_tx
> QLCNIC_QUERY_TX_COUNTER
)
972 arg1
= port
| QLCNIC_STATS_VERSION
<< 8 | func_esw
<< 12;
973 arg1
|= BIT_14
| rx_tx
<< 15;
975 return qlcnic_issue_cmd(adapter
,
976 adapter
->ahw
->pci_func
,
977 adapter
->fw_hal_version
,
981 QLCNIC_CDRP_CMD_GET_ESWITCH_STATS
);
984 dev_err(&adapter
->pdev
->dev
, "Invalid argument func_esw=%d port=%d"
985 "rx_ctx=%d\n", func_esw
, port
, rx_tx
);
990 __qlcnic_get_eswitch_port_config(struct qlcnic_adapter
*adapter
,
991 u32
*arg1
, u32
*arg2
)
995 pci_func
= (*arg1
>> 8);
996 err
= qlcnic_issue_cmd(adapter
,
997 adapter
->ahw
->pci_func
,
998 adapter
->fw_hal_version
,
1002 QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG
);
1004 if (err
== QLCNIC_RCODE_SUCCESS
) {
1005 *arg1
= QLCRD32(adapter
, QLCNIC_ARG1_CRB_OFFSET
);
1006 *arg2
= QLCRD32(adapter
, QLCNIC_ARG2_CRB_OFFSET
);
1007 dev_info(&adapter
->pdev
->dev
,
1008 "eSwitch port config for pci func %d\n", pci_func
);
1010 dev_err(&adapter
->pdev
->dev
,
1011 "Failed to get eswitch port config for pci func %d\n",
1016 /* Configure eSwitch port
1017 op_mode = 0 for setting default port behavior
1018 op_mode = 1 for setting vlan id
1019 op_mode = 2 for deleting vlan id
1020 op_type = 0 for vlan_id
1021 op_type = 1 for port vlan_id
1023 int qlcnic_config_switch_port(struct qlcnic_adapter
*adapter
,
1024 struct qlcnic_esw_func_cfg
*esw_cfg
)
1030 if (adapter
->op_mode
!= QLCNIC_MGMT_FUNC
)
1032 pci_func
= esw_cfg
->pci_func
;
1033 arg1
= (adapter
->npars
[pci_func
].phy_port
& BIT_0
);
1034 arg1
|= (pci_func
<< 8);
1036 if (__qlcnic_get_eswitch_port_config(adapter
, &arg1
, &arg2
))
1038 arg1
&= ~(0x0ff << 8);
1039 arg1
|= (pci_func
<< 8);
1040 arg1
&= ~(BIT_2
| BIT_3
);
1041 switch (esw_cfg
->op_mode
) {
1042 case QLCNIC_PORT_DEFAULTS
:
1043 arg1
|= (BIT_4
| BIT_6
| BIT_7
);
1044 arg2
|= (BIT_0
| BIT_1
);
1045 if (adapter
->capabilities
& QLCNIC_FW_CAPABILITY_TSO
)
1046 arg2
|= (BIT_2
| BIT_3
);
1047 if (!(esw_cfg
->discard_tagged
))
1049 if (!(esw_cfg
->promisc_mode
))
1051 if (!(esw_cfg
->mac_override
))
1053 if (!(esw_cfg
->mac_anti_spoof
))
1055 if (!(esw_cfg
->offload_flags
& BIT_0
))
1056 arg2
&= ~(BIT_1
| BIT_2
| BIT_3
);
1057 if (!(esw_cfg
->offload_flags
& BIT_1
))
1059 if (!(esw_cfg
->offload_flags
& BIT_2
))
1062 case QLCNIC_ADD_VLAN
:
1063 arg1
|= (BIT_2
| BIT_5
);
1064 arg1
|= (esw_cfg
->vlan_id
<< 16);
1066 case QLCNIC_DEL_VLAN
:
1067 arg1
|= (BIT_3
| BIT_5
);
1068 arg1
&= ~(0x0ffff << 16);
1074 err
= qlcnic_issue_cmd(adapter
,
1075 adapter
->ahw
->pci_func
,
1076 adapter
->fw_hal_version
,
1080 QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH
);
1082 if (err
!= QLCNIC_RCODE_SUCCESS
) {
1083 dev_err(&adapter
->pdev
->dev
,
1084 "Failed to configure eswitch pci func %d\n", pci_func
);
1086 dev_info(&adapter
->pdev
->dev
,
1087 "Configured eSwitch for pci func %d\n", pci_func
);
1094 qlcnic_get_eswitch_port_config(struct qlcnic_adapter
*adapter
,
1095 struct qlcnic_esw_func_cfg
*esw_cfg
)
1099 if (adapter
->op_mode
== QLCNIC_MGMT_FUNC
)
1100 phy_port
= adapter
->npars
[esw_cfg
->pci_func
].phy_port
;
1102 phy_port
= adapter
->physical_port
;
1104 arg1
|= (esw_cfg
->pci_func
<< 8);
1105 if (__qlcnic_get_eswitch_port_config(adapter
, &arg1
, &arg2
))
1108 esw_cfg
->discard_tagged
= !!(arg1
& BIT_4
);
1109 esw_cfg
->host_vlan_tag
= !!(arg1
& BIT_5
);
1110 esw_cfg
->promisc_mode
= !!(arg1
& BIT_6
);
1111 esw_cfg
->mac_override
= !!(arg1
& BIT_7
);
1112 esw_cfg
->vlan_id
= LSW(arg1
>> 16);
1113 esw_cfg
->mac_anti_spoof
= (arg2
& 0x1);
1114 esw_cfg
->offload_flags
= ((arg2
>> 1) & 0x7);