2 * Copyright (C) 2005 - 2016 Broadcom
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/module.h>
22 char *be_misconfig_evt_port_state
[] = {
23 "Physical Link is functional",
24 "Optics faulted/incorrectly installed/not installed - Reseat optics. If issue not resolved, replace.",
25 "Optics of two types installed – Remove one optic or install matching pair of optics.",
26 "Incompatible optics – Replace with compatible optics for card to function.",
27 "Unqualified optics – Replace with Avago optics for Warranty and Technical Support.",
28 "Uncertified optics – Replace with Avago-certified optics to enable link operation."
31 static char *be_port_misconfig_evt_severity
[] = {
38 static char *phy_state_oper_desc
[] = {
39 "Link is non-operational",
40 "Link is operational",
44 static struct be_cmd_priv_map cmd_priv_map
[] = {
46 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
,
48 BE_PRIV_LNKMGMT
| BE_PRIV_VHADM
|
49 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
52 OPCODE_COMMON_GET_FLOW_CONTROL
,
54 BE_PRIV_LNKQUERY
| BE_PRIV_VHADM
|
55 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
58 OPCODE_COMMON_SET_FLOW_CONTROL
,
60 BE_PRIV_LNKMGMT
| BE_PRIV_VHADM
|
61 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
64 OPCODE_ETH_GET_PPORT_STATS
,
66 BE_PRIV_LNKMGMT
| BE_PRIV_VHADM
|
67 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
70 OPCODE_COMMON_GET_PHY_DETAILS
,
72 BE_PRIV_LNKMGMT
| BE_PRIV_VHADM
|
73 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
76 OPCODE_LOWLEVEL_HOST_DDR_DMA
,
77 CMD_SUBSYSTEM_LOWLEVEL
,
78 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
81 OPCODE_LOWLEVEL_LOOPBACK_TEST
,
82 CMD_SUBSYSTEM_LOWLEVEL
,
83 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
86 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE
,
87 CMD_SUBSYSTEM_LOWLEVEL
,
88 BE_PRIV_DEVCFG
| BE_PRIV_DEVSEC
91 OPCODE_COMMON_SET_HSW_CONFIG
,
93 BE_PRIV_DEVCFG
| BE_PRIV_VHADM
97 static bool be_cmd_allowed(struct be_adapter
*adapter
, u8 opcode
, u8 subsystem
)
100 int num_entries
= sizeof(cmd_priv_map
)/sizeof(struct be_cmd_priv_map
);
101 u32 cmd_privileges
= adapter
->cmd_privileges
;
103 for (i
= 0; i
< num_entries
; i
++)
104 if (opcode
== cmd_priv_map
[i
].opcode
&&
105 subsystem
== cmd_priv_map
[i
].subsystem
)
106 if (!(cmd_privileges
& cmd_priv_map
[i
].priv_mask
))
112 static inline void *embedded_payload(struct be_mcc_wrb
*wrb
)
114 return wrb
->payload
.embedded_payload
;
117 static int be_mcc_notify(struct be_adapter
*adapter
)
119 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
122 if (be_check_error(adapter
, BE_ERROR_ANY
))
125 val
|= mccq
->id
& DB_MCCQ_RING_ID_MASK
;
126 val
|= 1 << DB_MCCQ_NUM_POSTED_SHIFT
;
129 iowrite32(val
, adapter
->db
+ DB_MCCQ_OFFSET
);
134 /* To check if valid bit is set, check the entire word as we don't know
135 * the endianness of the data (old entry is host endian while a new entry is
137 static inline bool be_mcc_compl_is_new(struct be_mcc_compl
*compl)
141 if (compl->flags
!= 0) {
142 flags
= le32_to_cpu(compl->flags
);
143 if (flags
& CQE_FLAGS_VALID_MASK
) {
144 compl->flags
= flags
;
151 /* Need to reset the entire word that houses the valid bit */
152 static inline void be_mcc_compl_use(struct be_mcc_compl
*compl)
157 static struct be_cmd_resp_hdr
*be_decode_resp_hdr(u32 tag0
, u32 tag1
)
162 addr
= ((addr
<< 16) << 16) | tag0
;
166 static bool be_skip_err_log(u8 opcode
, u16 base_status
, u16 addl_status
)
168 if (base_status
== MCC_STATUS_NOT_SUPPORTED
||
169 base_status
== MCC_STATUS_ILLEGAL_REQUEST
||
170 addl_status
== MCC_ADDL_STATUS_TOO_MANY_INTERFACES
||
171 addl_status
== MCC_ADDL_STATUS_INSUFFICIENT_VLANS
||
172 (opcode
== OPCODE_COMMON_WRITE_FLASHROM
&&
173 (base_status
== MCC_STATUS_ILLEGAL_FIELD
||
174 addl_status
== MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH
)))
180 /* Place holder for all the async MCC cmds wherein the caller is not in a busy
181 * loop (has not issued be_mcc_notify_wait())
183 static void be_async_cmd_process(struct be_adapter
*adapter
,
184 struct be_mcc_compl
*compl,
185 struct be_cmd_resp_hdr
*resp_hdr
)
187 enum mcc_base_status base_status
= base_status(compl->status
);
188 u8 opcode
= 0, subsystem
= 0;
191 opcode
= resp_hdr
->opcode
;
192 subsystem
= resp_hdr
->subsystem
;
195 if (opcode
== OPCODE_LOWLEVEL_LOOPBACK_TEST
&&
196 subsystem
== CMD_SUBSYSTEM_LOWLEVEL
) {
197 complete(&adapter
->et_cmd_compl
);
201 if (opcode
== OPCODE_LOWLEVEL_SET_LOOPBACK_MODE
&&
202 subsystem
== CMD_SUBSYSTEM_LOWLEVEL
) {
203 complete(&adapter
->et_cmd_compl
);
207 if ((opcode
== OPCODE_COMMON_WRITE_FLASHROM
||
208 opcode
== OPCODE_COMMON_WRITE_OBJECT
) &&
209 subsystem
== CMD_SUBSYSTEM_COMMON
) {
210 adapter
->flash_status
= compl->status
;
211 complete(&adapter
->et_cmd_compl
);
215 if ((opcode
== OPCODE_ETH_GET_STATISTICS
||
216 opcode
== OPCODE_ETH_GET_PPORT_STATS
) &&
217 subsystem
== CMD_SUBSYSTEM_ETH
&&
218 base_status
== MCC_STATUS_SUCCESS
) {
219 be_parse_stats(adapter
);
220 adapter
->stats_cmd_sent
= false;
224 if (opcode
== OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
&&
225 subsystem
== CMD_SUBSYSTEM_COMMON
) {
226 if (base_status
== MCC_STATUS_SUCCESS
) {
227 struct be_cmd_resp_get_cntl_addnl_attribs
*resp
=
229 adapter
->hwmon_info
.be_on_die_temp
=
230 resp
->on_die_temperature
;
232 adapter
->be_get_temp_freq
= 0;
233 adapter
->hwmon_info
.be_on_die_temp
=
240 static int be_mcc_compl_process(struct be_adapter
*adapter
,
241 struct be_mcc_compl
*compl)
243 enum mcc_base_status base_status
;
244 enum mcc_addl_status addl_status
;
245 struct be_cmd_resp_hdr
*resp_hdr
;
246 u8 opcode
= 0, subsystem
= 0;
248 /* Just swap the status to host endian; mcc tag is opaquely copied
250 be_dws_le_to_cpu(compl, 4);
252 base_status
= base_status(compl->status
);
253 addl_status
= addl_status(compl->status
);
255 resp_hdr
= be_decode_resp_hdr(compl->tag0
, compl->tag1
);
257 opcode
= resp_hdr
->opcode
;
258 subsystem
= resp_hdr
->subsystem
;
261 be_async_cmd_process(adapter
, compl, resp_hdr
);
263 if (base_status
!= MCC_STATUS_SUCCESS
&&
264 !be_skip_err_log(opcode
, base_status
, addl_status
)) {
265 if (base_status
== MCC_STATUS_UNAUTHORIZED_REQUEST
||
266 addl_status
== MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES
) {
267 dev_warn(&adapter
->pdev
->dev
,
268 "VF is not privileged to issue opcode %d-%d\n",
271 dev_err(&adapter
->pdev
->dev
,
272 "opcode %d-%d failed:status %d-%d\n",
273 opcode
, subsystem
, base_status
, addl_status
);
276 return compl->status
;
279 /* Link state evt is a string of bytes; no need for endian swapping */
280 static void be_async_link_state_process(struct be_adapter
*adapter
,
281 struct be_mcc_compl
*compl)
283 struct be_async_event_link_state
*evt
=
284 (struct be_async_event_link_state
*)compl;
286 /* When link status changes, link speed must be re-queried from FW */
287 adapter
->phy
.link_speed
= -1;
289 /* On BEx the FW does not send a separate link status
290 * notification for physical and logical link.
291 * On other chips just process the logical link
292 * status notification
294 if (!BEx_chip(adapter
) &&
295 !(evt
->port_link_status
& LOGICAL_LINK_STATUS_MASK
))
298 /* For the initial link status do not rely on the ASYNC event as
299 * it may not be received in some cases.
301 if (adapter
->flags
& BE_FLAGS_LINK_STATUS_INIT
)
302 be_link_status_update(adapter
,
303 evt
->port_link_status
& LINK_STATUS_MASK
);
306 static void be_async_port_misconfig_event_process(struct be_adapter
*adapter
,
307 struct be_mcc_compl
*compl)
309 struct be_async_event_misconfig_port
*evt
=
310 (struct be_async_event_misconfig_port
*)compl;
311 u32 sfp_misconfig_evt_word1
= le32_to_cpu(evt
->event_data_word1
);
312 u32 sfp_misconfig_evt_word2
= le32_to_cpu(evt
->event_data_word2
);
313 u8 phy_oper_state
= PHY_STATE_OPER_MSG_NONE
;
314 struct device
*dev
= &adapter
->pdev
->dev
;
315 u8 msg_severity
= DEFAULT_MSG_SEVERITY
;
320 (sfp_misconfig_evt_word1
>> (adapter
->hba_port_num
* 8)) & 0xff;
322 if (new_phy_state
== adapter
->phy_state
)
325 adapter
->phy_state
= new_phy_state
;
327 /* for older fw that doesn't populate link effect data */
328 if (!sfp_misconfig_evt_word2
)
332 (sfp_misconfig_evt_word2
>> (adapter
->hba_port_num
* 8)) & 0xff;
334 if (phy_state_info
& PHY_STATE_INFO_VALID
) {
335 msg_severity
= (phy_state_info
& PHY_STATE_MSG_SEVERITY
) >> 1;
337 if (be_phy_unqualified(new_phy_state
))
338 phy_oper_state
= (phy_state_info
& PHY_STATE_OPER
);
342 /* Log an error message that would allow a user to determine
343 * whether the SFPs have an issue
345 if (be_phy_state_unknown(new_phy_state
))
346 dev_printk(be_port_misconfig_evt_severity
[msg_severity
], dev
,
347 "Port %c: Unrecognized Optics state: 0x%x. %s",
350 phy_state_oper_desc
[phy_oper_state
]);
352 dev_printk(be_port_misconfig_evt_severity
[msg_severity
], dev
,
355 be_misconfig_evt_port_state
[new_phy_state
],
356 phy_state_oper_desc
[phy_oper_state
]);
358 /* Log Vendor name and part no. if a misconfigured SFP is detected */
359 if (be_phy_misconfigured(new_phy_state
))
360 adapter
->flags
|= BE_FLAGS_PHY_MISCONFIGURED
;
363 /* Grp5 CoS Priority evt */
364 static void be_async_grp5_cos_priority_process(struct be_adapter
*adapter
,
365 struct be_mcc_compl
*compl)
367 struct be_async_event_grp5_cos_priority
*evt
=
368 (struct be_async_event_grp5_cos_priority
*)compl;
371 adapter
->vlan_prio_bmap
= evt
->available_priority_bmap
;
372 adapter
->recommended_prio_bits
=
373 evt
->reco_default_priority
<< VLAN_PRIO_SHIFT
;
377 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
378 static void be_async_grp5_qos_speed_process(struct be_adapter
*adapter
,
379 struct be_mcc_compl
*compl)
381 struct be_async_event_grp5_qos_link_speed
*evt
=
382 (struct be_async_event_grp5_qos_link_speed
*)compl;
384 if (adapter
->phy
.link_speed
>= 0 &&
385 evt
->physical_port
== adapter
->port_num
)
386 adapter
->phy
.link_speed
= le16_to_cpu(evt
->qos_link_speed
) * 10;
390 static void be_async_grp5_pvid_state_process(struct be_adapter
*adapter
,
391 struct be_mcc_compl
*compl)
393 struct be_async_event_grp5_pvid_state
*evt
=
394 (struct be_async_event_grp5_pvid_state
*)compl;
397 adapter
->pvid
= le16_to_cpu(evt
->tag
) & VLAN_VID_MASK
;
398 dev_info(&adapter
->pdev
->dev
, "LPVID: %d\n", adapter
->pvid
);
404 #define MGMT_ENABLE_MASK 0x4
405 static void be_async_grp5_fw_control_process(struct be_adapter
*adapter
,
406 struct be_mcc_compl
*compl)
408 struct be_async_fw_control
*evt
= (struct be_async_fw_control
*)compl;
409 u32 evt_dw1
= le32_to_cpu(evt
->event_data_word1
);
411 if (evt_dw1
& MGMT_ENABLE_MASK
) {
412 adapter
->flags
|= BE_FLAGS_OS2BMC
;
413 adapter
->bmc_filt_mask
= le32_to_cpu(evt
->event_data_word2
);
415 adapter
->flags
&= ~BE_FLAGS_OS2BMC
;
419 static void be_async_grp5_evt_process(struct be_adapter
*adapter
,
420 struct be_mcc_compl
*compl)
422 u8 event_type
= (compl->flags
>> ASYNC_EVENT_TYPE_SHIFT
) &
423 ASYNC_EVENT_TYPE_MASK
;
425 switch (event_type
) {
426 case ASYNC_EVENT_COS_PRIORITY
:
427 be_async_grp5_cos_priority_process(adapter
, compl);
429 case ASYNC_EVENT_QOS_SPEED
:
430 be_async_grp5_qos_speed_process(adapter
, compl);
432 case ASYNC_EVENT_PVID_STATE
:
433 be_async_grp5_pvid_state_process(adapter
, compl);
435 /* Async event to disable/enable os2bmc and/or mac-learning */
436 case ASYNC_EVENT_FW_CONTROL
:
437 be_async_grp5_fw_control_process(adapter
, compl);
444 static void be_async_dbg_evt_process(struct be_adapter
*adapter
,
445 struct be_mcc_compl
*cmp
)
448 struct be_async_event_qnq
*evt
= (struct be_async_event_qnq
*)cmp
;
450 event_type
= (cmp
->flags
>> ASYNC_EVENT_TYPE_SHIFT
) &
451 ASYNC_EVENT_TYPE_MASK
;
453 switch (event_type
) {
454 case ASYNC_DEBUG_EVENT_TYPE_QNQ
:
456 adapter
->qnq_vid
= le16_to_cpu(evt
->vlan_tag
);
457 adapter
->flags
|= BE_FLAGS_QNQ_ASYNC_EVT_RCVD
;
460 dev_warn(&adapter
->pdev
->dev
, "Unknown debug event 0x%x!\n",
466 static void be_async_sliport_evt_process(struct be_adapter
*adapter
,
467 struct be_mcc_compl
*cmp
)
469 u8 event_type
= (cmp
->flags
>> ASYNC_EVENT_TYPE_SHIFT
) &
470 ASYNC_EVENT_TYPE_MASK
;
472 if (event_type
== ASYNC_EVENT_PORT_MISCONFIG
)
473 be_async_port_misconfig_event_process(adapter
, cmp
);
476 static inline bool is_link_state_evt(u32 flags
)
478 return ((flags
>> ASYNC_EVENT_CODE_SHIFT
) & ASYNC_EVENT_CODE_MASK
) ==
479 ASYNC_EVENT_CODE_LINK_STATE
;
482 static inline bool is_grp5_evt(u32 flags
)
484 return ((flags
>> ASYNC_EVENT_CODE_SHIFT
) & ASYNC_EVENT_CODE_MASK
) ==
485 ASYNC_EVENT_CODE_GRP_5
;
488 static inline bool is_dbg_evt(u32 flags
)
490 return ((flags
>> ASYNC_EVENT_CODE_SHIFT
) & ASYNC_EVENT_CODE_MASK
) ==
491 ASYNC_EVENT_CODE_QNQ
;
494 static inline bool is_sliport_evt(u32 flags
)
496 return ((flags
>> ASYNC_EVENT_CODE_SHIFT
) & ASYNC_EVENT_CODE_MASK
) ==
497 ASYNC_EVENT_CODE_SLIPORT
;
500 static void be_mcc_event_process(struct be_adapter
*adapter
,
501 struct be_mcc_compl
*compl)
503 if (is_link_state_evt(compl->flags
))
504 be_async_link_state_process(adapter
, compl);
505 else if (is_grp5_evt(compl->flags
))
506 be_async_grp5_evt_process(adapter
, compl);
507 else if (is_dbg_evt(compl->flags
))
508 be_async_dbg_evt_process(adapter
, compl);
509 else if (is_sliport_evt(compl->flags
))
510 be_async_sliport_evt_process(adapter
, compl);
513 static struct be_mcc_compl
*be_mcc_compl_get(struct be_adapter
*adapter
)
515 struct be_queue_info
*mcc_cq
= &adapter
->mcc_obj
.cq
;
516 struct be_mcc_compl
*compl = queue_tail_node(mcc_cq
);
518 if (be_mcc_compl_is_new(compl)) {
519 queue_tail_inc(mcc_cq
);
525 void be_async_mcc_enable(struct be_adapter
*adapter
)
527 spin_lock_bh(&adapter
->mcc_cq_lock
);
529 be_cq_notify(adapter
, adapter
->mcc_obj
.cq
.id
, true, 0);
530 adapter
->mcc_obj
.rearm_cq
= true;
532 spin_unlock_bh(&adapter
->mcc_cq_lock
);
535 void be_async_mcc_disable(struct be_adapter
*adapter
)
537 spin_lock_bh(&adapter
->mcc_cq_lock
);
539 adapter
->mcc_obj
.rearm_cq
= false;
540 be_cq_notify(adapter
, adapter
->mcc_obj
.cq
.id
, false, 0);
542 spin_unlock_bh(&adapter
->mcc_cq_lock
);
545 int be_process_mcc(struct be_adapter
*adapter
)
547 struct be_mcc_compl
*compl;
548 int num
= 0, status
= 0;
549 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
551 spin_lock(&adapter
->mcc_cq_lock
);
553 while ((compl = be_mcc_compl_get(adapter
))) {
554 if (compl->flags
& CQE_FLAGS_ASYNC_MASK
) {
555 be_mcc_event_process(adapter
, compl);
556 } else if (compl->flags
& CQE_FLAGS_COMPLETED_MASK
) {
557 status
= be_mcc_compl_process(adapter
, compl);
558 atomic_dec(&mcc_obj
->q
.used
);
560 be_mcc_compl_use(compl);
565 be_cq_notify(adapter
, mcc_obj
->cq
.id
, mcc_obj
->rearm_cq
, num
);
567 spin_unlock(&adapter
->mcc_cq_lock
);
571 /* Wait till no more pending mcc requests are present */
572 static int be_mcc_wait_compl(struct be_adapter
*adapter
)
574 #define mcc_timeout 120000 /* 12s timeout */
576 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
578 for (i
= 0; i
< mcc_timeout
; i
++) {
579 if (be_check_error(adapter
, BE_ERROR_ANY
))
583 status
= be_process_mcc(adapter
);
586 if (atomic_read(&mcc_obj
->q
.used
) == 0)
590 if (i
== mcc_timeout
) {
591 dev_err(&adapter
->pdev
->dev
, "FW not responding\n");
592 be_set_error(adapter
, BE_ERROR_FW
);
598 /* Notify MCC requests and wait for completion */
599 static int be_mcc_notify_wait(struct be_adapter
*adapter
)
602 struct be_mcc_wrb
*wrb
;
603 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
604 u32 index
= mcc_obj
->q
.head
;
605 struct be_cmd_resp_hdr
*resp
;
607 index_dec(&index
, mcc_obj
->q
.len
);
608 wrb
= queue_index_node(&mcc_obj
->q
, index
);
610 resp
= be_decode_resp_hdr(wrb
->tag0
, wrb
->tag1
);
612 status
= be_mcc_notify(adapter
);
616 status
= be_mcc_wait_compl(adapter
);
620 status
= (resp
->base_status
|
621 ((resp
->addl_status
& CQE_ADDL_STATUS_MASK
) <<
622 CQE_ADDL_STATUS_SHIFT
));
627 static int be_mbox_db_ready_wait(struct be_adapter
*adapter
, void __iomem
*db
)
633 if (be_check_error(adapter
, BE_ERROR_ANY
))
636 ready
= ioread32(db
);
637 if (ready
== 0xffffffff)
640 ready
&= MPU_MAILBOX_DB_RDY_MASK
;
645 dev_err(&adapter
->pdev
->dev
, "FW not responding\n");
646 be_set_error(adapter
, BE_ERROR_FW
);
647 be_detect_error(adapter
);
659 * Insert the mailbox address into the doorbell in two steps
660 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
662 static int be_mbox_notify_wait(struct be_adapter
*adapter
)
666 void __iomem
*db
= adapter
->db
+ MPU_MAILBOX_DB_OFFSET
;
667 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
668 struct be_mcc_mailbox
*mbox
= mbox_mem
->va
;
669 struct be_mcc_compl
*compl = &mbox
->compl;
671 /* wait for ready to be set */
672 status
= be_mbox_db_ready_wait(adapter
, db
);
676 val
|= MPU_MAILBOX_DB_HI_MASK
;
677 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
678 val
|= (upper_32_bits(mbox_mem
->dma
) >> 2) << 2;
681 /* wait for ready to be set */
682 status
= be_mbox_db_ready_wait(adapter
, db
);
687 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
688 val
|= (u32
)(mbox_mem
->dma
>> 4) << 2;
691 status
= be_mbox_db_ready_wait(adapter
, db
);
695 /* A cq entry has been made now */
696 if (be_mcc_compl_is_new(compl)) {
697 status
= be_mcc_compl_process(adapter
, &mbox
->compl);
698 be_mcc_compl_use(compl);
702 dev_err(&adapter
->pdev
->dev
, "invalid mailbox completion\n");
708 static u16
be_POST_stage_get(struct be_adapter
*adapter
)
712 if (BEx_chip(adapter
))
713 sem
= ioread32(adapter
->csr
+ SLIPORT_SEMAPHORE_OFFSET_BEx
);
715 pci_read_config_dword(adapter
->pdev
,
716 SLIPORT_SEMAPHORE_OFFSET_SH
, &sem
);
718 return sem
& POST_STAGE_MASK
;
721 static int lancer_wait_ready(struct be_adapter
*adapter
)
723 #define SLIPORT_READY_TIMEOUT 30
727 for (i
= 0; i
< SLIPORT_READY_TIMEOUT
; i
++) {
728 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
729 if (sliport_status
& SLIPORT_STATUS_RDY_MASK
)
732 if (sliport_status
& SLIPORT_STATUS_ERR_MASK
&&
733 !(sliport_status
& SLIPORT_STATUS_RN_MASK
))
739 return sliport_status
? : -1;
742 int be_fw_wait_ready(struct be_adapter
*adapter
)
745 int status
, timeout
= 0;
746 struct device
*dev
= &adapter
->pdev
->dev
;
748 if (lancer_chip(adapter
)) {
749 status
= lancer_wait_ready(adapter
);
758 /* There's no means to poll POST state on BE2/3 VFs */
759 if (BEx_chip(adapter
) && be_virtfn(adapter
))
762 stage
= be_POST_stage_get(adapter
);
763 if (stage
== POST_STAGE_ARMFW_RDY
)
766 dev_info(dev
, "Waiting for POST, %ds elapsed\n", timeout
);
767 if (msleep_interruptible(2000)) {
768 dev_err(dev
, "Waiting for POST aborted\n");
772 } while (timeout
< 60);
775 dev_err(dev
, "POST timeout; stage=%#x\n", stage
);
779 static inline struct be_sge
*nonembedded_sgl(struct be_mcc_wrb
*wrb
)
781 return &wrb
->payload
.sgl
[0];
784 static inline void fill_wrb_tags(struct be_mcc_wrb
*wrb
, unsigned long addr
)
786 wrb
->tag0
= addr
& 0xFFFFFFFF;
787 wrb
->tag1
= upper_32_bits(addr
);
790 /* Don't touch the hdr after it's prepared */
791 /* mem will be NULL for embedded commands */
792 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr
*req_hdr
,
793 u8 subsystem
, u8 opcode
, int cmd_len
,
794 struct be_mcc_wrb
*wrb
,
795 struct be_dma_mem
*mem
)
799 req_hdr
->opcode
= opcode
;
800 req_hdr
->subsystem
= subsystem
;
801 req_hdr
->request_length
= cpu_to_le32(cmd_len
- sizeof(*req_hdr
));
802 req_hdr
->version
= 0;
803 fill_wrb_tags(wrb
, (ulong
) req_hdr
);
804 wrb
->payload_length
= cmd_len
;
806 wrb
->embedded
|= (1 & MCC_WRB_SGE_CNT_MASK
) <<
807 MCC_WRB_SGE_CNT_SHIFT
;
808 sge
= nonembedded_sgl(wrb
);
809 sge
->pa_hi
= cpu_to_le32(upper_32_bits(mem
->dma
));
810 sge
->pa_lo
= cpu_to_le32(mem
->dma
& 0xFFFFFFFF);
811 sge
->len
= cpu_to_le32(mem
->size
);
813 wrb
->embedded
|= MCC_WRB_EMBEDDED_MASK
;
814 be_dws_cpu_to_le(wrb
, 8);
817 static void be_cmd_page_addrs_prepare(struct phys_addr
*pages
, u32 max_pages
,
818 struct be_dma_mem
*mem
)
820 int i
, buf_pages
= min(PAGES_4K_SPANNED(mem
->va
, mem
->size
), max_pages
);
821 u64 dma
= (u64
)mem
->dma
;
823 for (i
= 0; i
< buf_pages
; i
++) {
824 pages
[i
].lo
= cpu_to_le32(dma
& 0xFFFFFFFF);
825 pages
[i
].hi
= cpu_to_le32(upper_32_bits(dma
));
830 static inline struct be_mcc_wrb
*wrb_from_mbox(struct be_adapter
*adapter
)
832 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
833 struct be_mcc_wrb
*wrb
834 = &((struct be_mcc_mailbox
*)(mbox_mem
->va
))->wrb
;
835 memset(wrb
, 0, sizeof(*wrb
));
839 static struct be_mcc_wrb
*wrb_from_mccq(struct be_adapter
*adapter
)
841 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
842 struct be_mcc_wrb
*wrb
;
847 if (atomic_read(&mccq
->used
) >= mccq
->len
)
850 wrb
= queue_head_node(mccq
);
851 queue_head_inc(mccq
);
852 atomic_inc(&mccq
->used
);
853 memset(wrb
, 0, sizeof(*wrb
));
857 static bool use_mcc(struct be_adapter
*adapter
)
859 return adapter
->mcc_obj
.q
.created
;
862 /* Must be used only in process context */
863 static int be_cmd_lock(struct be_adapter
*adapter
)
865 if (use_mcc(adapter
)) {
866 spin_lock_bh(&adapter
->mcc_lock
);
869 return mutex_lock_interruptible(&adapter
->mbox_lock
);
873 /* Must be used only in process context */
874 static void be_cmd_unlock(struct be_adapter
*adapter
)
876 if (use_mcc(adapter
))
877 spin_unlock_bh(&adapter
->mcc_lock
);
879 return mutex_unlock(&adapter
->mbox_lock
);
882 static struct be_mcc_wrb
*be_cmd_copy(struct be_adapter
*adapter
,
883 struct be_mcc_wrb
*wrb
)
885 struct be_mcc_wrb
*dest_wrb
;
887 if (use_mcc(adapter
)) {
888 dest_wrb
= wrb_from_mccq(adapter
);
892 dest_wrb
= wrb_from_mbox(adapter
);
895 memcpy(dest_wrb
, wrb
, sizeof(*wrb
));
896 if (wrb
->embedded
& cpu_to_le32(MCC_WRB_EMBEDDED_MASK
))
897 fill_wrb_tags(dest_wrb
, (ulong
) embedded_payload(wrb
));
902 /* Must be used only in process context */
903 static int be_cmd_notify_wait(struct be_adapter
*adapter
,
904 struct be_mcc_wrb
*wrb
)
906 struct be_mcc_wrb
*dest_wrb
;
909 status
= be_cmd_lock(adapter
);
913 dest_wrb
= be_cmd_copy(adapter
, wrb
);
919 if (use_mcc(adapter
))
920 status
= be_mcc_notify_wait(adapter
);
922 status
= be_mbox_notify_wait(adapter
);
925 memcpy(wrb
, dest_wrb
, sizeof(*wrb
));
928 be_cmd_unlock(adapter
);
932 /* Tell fw we're about to start firing cmds by writing a
933 * special pattern across the wrb hdr; uses mbox
935 int be_cmd_fw_init(struct be_adapter
*adapter
)
940 if (lancer_chip(adapter
))
943 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
946 wrb
= (u8
*)wrb_from_mbox(adapter
);
956 status
= be_mbox_notify_wait(adapter
);
958 mutex_unlock(&adapter
->mbox_lock
);
962 /* Tell fw we're done with firing cmds by writing a
963 * special pattern across the wrb hdr; uses mbox
965 int be_cmd_fw_clean(struct be_adapter
*adapter
)
970 if (lancer_chip(adapter
))
973 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
976 wrb
= (u8
*)wrb_from_mbox(adapter
);
986 status
= be_mbox_notify_wait(adapter
);
988 mutex_unlock(&adapter
->mbox_lock
);
992 int be_cmd_eq_create(struct be_adapter
*adapter
, struct be_eq_obj
*eqo
)
994 struct be_mcc_wrb
*wrb
;
995 struct be_cmd_req_eq_create
*req
;
996 struct be_dma_mem
*q_mem
= &eqo
->q
.dma_mem
;
999 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1002 wrb
= wrb_from_mbox(adapter
);
1003 req
= embedded_payload(wrb
);
1005 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1006 OPCODE_COMMON_EQ_CREATE
, sizeof(*req
), wrb
,
1009 /* Support for EQ_CREATEv2 available only SH-R onwards */
1010 if (!(BEx_chip(adapter
) || lancer_chip(adapter
)))
1013 req
->hdr
.version
= ver
;
1014 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
1016 AMAP_SET_BITS(struct amap_eq_context
, valid
, req
->context
, 1);
1018 AMAP_SET_BITS(struct amap_eq_context
, size
, req
->context
, 0);
1019 AMAP_SET_BITS(struct amap_eq_context
, count
, req
->context
,
1020 __ilog2_u32(eqo
->q
.len
/ 256));
1021 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
1023 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1025 status
= be_mbox_notify_wait(adapter
);
1027 struct be_cmd_resp_eq_create
*resp
= embedded_payload(wrb
);
1029 eqo
->q
.id
= le16_to_cpu(resp
->eq_id
);
1031 (ver
== 2) ? le16_to_cpu(resp
->msix_idx
) : eqo
->idx
;
1032 eqo
->q
.created
= true;
1035 mutex_unlock(&adapter
->mbox_lock
);
1040 int be_cmd_mac_addr_query(struct be_adapter
*adapter
, u8
*mac_addr
,
1041 bool permanent
, u32 if_handle
, u32 pmac_id
)
1043 struct be_mcc_wrb
*wrb
;
1044 struct be_cmd_req_mac_query
*req
;
1047 spin_lock_bh(&adapter
->mcc_lock
);
1049 wrb
= wrb_from_mccq(adapter
);
1054 req
= embedded_payload(wrb
);
1056 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1057 OPCODE_COMMON_NTWK_MAC_QUERY
, sizeof(*req
), wrb
,
1059 req
->type
= MAC_ADDRESS_TYPE_NETWORK
;
1063 req
->if_id
= cpu_to_le16((u16
)if_handle
);
1064 req
->pmac_id
= cpu_to_le32(pmac_id
);
1068 status
= be_mcc_notify_wait(adapter
);
1070 struct be_cmd_resp_mac_query
*resp
= embedded_payload(wrb
);
1072 memcpy(mac_addr
, resp
->mac
.addr
, ETH_ALEN
);
1076 spin_unlock_bh(&adapter
->mcc_lock
);
1080 /* Uses synchronous MCCQ */
1081 int be_cmd_pmac_add(struct be_adapter
*adapter
, u8
*mac_addr
,
1082 u32 if_id
, u32
*pmac_id
, u32 domain
)
1084 struct be_mcc_wrb
*wrb
;
1085 struct be_cmd_req_pmac_add
*req
;
1088 spin_lock_bh(&adapter
->mcc_lock
);
1090 wrb
= wrb_from_mccq(adapter
);
1095 req
= embedded_payload(wrb
);
1097 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1098 OPCODE_COMMON_NTWK_PMAC_ADD
, sizeof(*req
), wrb
,
1101 req
->hdr
.domain
= domain
;
1102 req
->if_id
= cpu_to_le32(if_id
);
1103 memcpy(req
->mac_address
, mac_addr
, ETH_ALEN
);
1105 status
= be_mcc_notify_wait(adapter
);
1107 struct be_cmd_resp_pmac_add
*resp
= embedded_payload(wrb
);
1109 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
1113 spin_unlock_bh(&adapter
->mcc_lock
);
1115 if (status
== MCC_STATUS_UNAUTHORIZED_REQUEST
)
1121 /* Uses synchronous MCCQ */
1122 int be_cmd_pmac_del(struct be_adapter
*adapter
, u32 if_id
, int pmac_id
, u32 dom
)
1124 struct be_mcc_wrb
*wrb
;
1125 struct be_cmd_req_pmac_del
*req
;
1131 spin_lock_bh(&adapter
->mcc_lock
);
1133 wrb
= wrb_from_mccq(adapter
);
1138 req
= embedded_payload(wrb
);
1140 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1141 OPCODE_COMMON_NTWK_PMAC_DEL
, sizeof(*req
),
1144 req
->hdr
.domain
= dom
;
1145 req
->if_id
= cpu_to_le32(if_id
);
1146 req
->pmac_id
= cpu_to_le32(pmac_id
);
1148 status
= be_mcc_notify_wait(adapter
);
1151 spin_unlock_bh(&adapter
->mcc_lock
);
1156 int be_cmd_cq_create(struct be_adapter
*adapter
, struct be_queue_info
*cq
,
1157 struct be_queue_info
*eq
, bool no_delay
, int coalesce_wm
)
1159 struct be_mcc_wrb
*wrb
;
1160 struct be_cmd_req_cq_create
*req
;
1161 struct be_dma_mem
*q_mem
= &cq
->dma_mem
;
1165 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1168 wrb
= wrb_from_mbox(adapter
);
1169 req
= embedded_payload(wrb
);
1170 ctxt
= &req
->context
;
1172 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1173 OPCODE_COMMON_CQ_CREATE
, sizeof(*req
), wrb
,
1176 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
1178 if (BEx_chip(adapter
)) {
1179 AMAP_SET_BITS(struct amap_cq_context_be
, coalescwm
, ctxt
,
1181 AMAP_SET_BITS(struct amap_cq_context_be
, nodelay
,
1183 AMAP_SET_BITS(struct amap_cq_context_be
, count
, ctxt
,
1184 __ilog2_u32(cq
->len
/ 256));
1185 AMAP_SET_BITS(struct amap_cq_context_be
, valid
, ctxt
, 1);
1186 AMAP_SET_BITS(struct amap_cq_context_be
, eventable
, ctxt
, 1);
1187 AMAP_SET_BITS(struct amap_cq_context_be
, eqid
, ctxt
, eq
->id
);
1189 req
->hdr
.version
= 2;
1190 req
->page_size
= 1; /* 1 for 4K */
1192 /* coalesce-wm field in this cmd is not relevant to Lancer.
1193 * Lancer uses COMMON_MODIFY_CQ to set this field
1195 if (!lancer_chip(adapter
))
1196 AMAP_SET_BITS(struct amap_cq_context_v2
, coalescwm
,
1198 AMAP_SET_BITS(struct amap_cq_context_v2
, nodelay
, ctxt
,
1200 AMAP_SET_BITS(struct amap_cq_context_v2
, count
, ctxt
,
1201 __ilog2_u32(cq
->len
/ 256));
1202 AMAP_SET_BITS(struct amap_cq_context_v2
, valid
, ctxt
, 1);
1203 AMAP_SET_BITS(struct amap_cq_context_v2
, eventable
, ctxt
, 1);
1204 AMAP_SET_BITS(struct amap_cq_context_v2
, eqid
, ctxt
, eq
->id
);
1207 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
1209 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1211 status
= be_mbox_notify_wait(adapter
);
1213 struct be_cmd_resp_cq_create
*resp
= embedded_payload(wrb
);
1215 cq
->id
= le16_to_cpu(resp
->cq_id
);
1219 mutex_unlock(&adapter
->mbox_lock
);
1224 static u32
be_encoded_q_len(int q_len
)
1226 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
1228 if (len_encoded
== 16)
1233 static int be_cmd_mccq_ext_create(struct be_adapter
*adapter
,
1234 struct be_queue_info
*mccq
,
1235 struct be_queue_info
*cq
)
1237 struct be_mcc_wrb
*wrb
;
1238 struct be_cmd_req_mcc_ext_create
*req
;
1239 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
1243 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1246 wrb
= wrb_from_mbox(adapter
);
1247 req
= embedded_payload(wrb
);
1248 ctxt
= &req
->context
;
1250 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1251 OPCODE_COMMON_MCC_CREATE_EXT
, sizeof(*req
), wrb
,
1254 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
1255 if (BEx_chip(adapter
)) {
1256 AMAP_SET_BITS(struct amap_mcc_context_be
, valid
, ctxt
, 1);
1257 AMAP_SET_BITS(struct amap_mcc_context_be
, ring_size
, ctxt
,
1258 be_encoded_q_len(mccq
->len
));
1259 AMAP_SET_BITS(struct amap_mcc_context_be
, cq_id
, ctxt
, cq
->id
);
1261 req
->hdr
.version
= 1;
1262 req
->cq_id
= cpu_to_le16(cq
->id
);
1264 AMAP_SET_BITS(struct amap_mcc_context_v1
, ring_size
, ctxt
,
1265 be_encoded_q_len(mccq
->len
));
1266 AMAP_SET_BITS(struct amap_mcc_context_v1
, valid
, ctxt
, 1);
1267 AMAP_SET_BITS(struct amap_mcc_context_v1
, async_cq_id
,
1269 AMAP_SET_BITS(struct amap_mcc_context_v1
, async_cq_valid
,
1273 /* Subscribe to Link State, Sliport Event and Group 5 Events
1274 * (bits 1, 5 and 17 set)
1276 req
->async_event_bitmap
[0] =
1277 cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE
) |
1278 BIT(ASYNC_EVENT_CODE_GRP_5
) |
1279 BIT(ASYNC_EVENT_CODE_QNQ
) |
1280 BIT(ASYNC_EVENT_CODE_SLIPORT
));
1282 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
1284 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1286 status
= be_mbox_notify_wait(adapter
);
1288 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
1290 mccq
->id
= le16_to_cpu(resp
->id
);
1291 mccq
->created
= true;
1293 mutex_unlock(&adapter
->mbox_lock
);
1298 static int be_cmd_mccq_org_create(struct be_adapter
*adapter
,
1299 struct be_queue_info
*mccq
,
1300 struct be_queue_info
*cq
)
1302 struct be_mcc_wrb
*wrb
;
1303 struct be_cmd_req_mcc_create
*req
;
1304 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
1308 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1311 wrb
= wrb_from_mbox(adapter
);
1312 req
= embedded_payload(wrb
);
1313 ctxt
= &req
->context
;
1315 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1316 OPCODE_COMMON_MCC_CREATE
, sizeof(*req
), wrb
,
1319 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
1321 AMAP_SET_BITS(struct amap_mcc_context_be
, valid
, ctxt
, 1);
1322 AMAP_SET_BITS(struct amap_mcc_context_be
, ring_size
, ctxt
,
1323 be_encoded_q_len(mccq
->len
));
1324 AMAP_SET_BITS(struct amap_mcc_context_be
, cq_id
, ctxt
, cq
->id
);
1326 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
1328 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1330 status
= be_mbox_notify_wait(adapter
);
1332 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
1334 mccq
->id
= le16_to_cpu(resp
->id
);
1335 mccq
->created
= true;
1338 mutex_unlock(&adapter
->mbox_lock
);
1342 int be_cmd_mccq_create(struct be_adapter
*adapter
,
1343 struct be_queue_info
*mccq
, struct be_queue_info
*cq
)
1347 status
= be_cmd_mccq_ext_create(adapter
, mccq
, cq
);
1348 if (status
&& BEx_chip(adapter
)) {
1349 dev_warn(&adapter
->pdev
->dev
, "Upgrade to F/W ver 2.102.235.0 "
1350 "or newer to avoid conflicting priorities between NIC "
1351 "and FCoE traffic");
1352 status
= be_cmd_mccq_org_create(adapter
, mccq
, cq
);
1357 int be_cmd_txq_create(struct be_adapter
*adapter
, struct be_tx_obj
*txo
)
1359 struct be_mcc_wrb wrb
= {0};
1360 struct be_cmd_req_eth_tx_create
*req
;
1361 struct be_queue_info
*txq
= &txo
->q
;
1362 struct be_queue_info
*cq
= &txo
->cq
;
1363 struct be_dma_mem
*q_mem
= &txq
->dma_mem
;
1364 int status
, ver
= 0;
1366 req
= embedded_payload(&wrb
);
1367 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1368 OPCODE_ETH_TX_CREATE
, sizeof(*req
), &wrb
, NULL
);
1370 if (lancer_chip(adapter
)) {
1371 req
->hdr
.version
= 1;
1372 } else if (BEx_chip(adapter
)) {
1373 if (adapter
->function_caps
& BE_FUNCTION_CAPS_SUPER_NIC
)
1374 req
->hdr
.version
= 2;
1375 } else { /* For SH */
1376 req
->hdr
.version
= 2;
1379 if (req
->hdr
.version
> 0)
1380 req
->if_id
= cpu_to_le16(adapter
->if_handle
);
1381 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
1382 req
->ulp_num
= BE_ULP1_NUM
;
1383 req
->type
= BE_ETH_TX_RING_TYPE_STANDARD
;
1384 req
->cq_id
= cpu_to_le16(cq
->id
);
1385 req
->queue_size
= be_encoded_q_len(txq
->len
);
1386 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1387 ver
= req
->hdr
.version
;
1389 status
= be_cmd_notify_wait(adapter
, &wrb
);
1391 struct be_cmd_resp_eth_tx_create
*resp
= embedded_payload(&wrb
);
1393 txq
->id
= le16_to_cpu(resp
->cid
);
1395 txo
->db_offset
= le32_to_cpu(resp
->db_offset
);
1397 txo
->db_offset
= DB_TXULP1_OFFSET
;
1398 txq
->created
= true;
1405 int be_cmd_rxq_create(struct be_adapter
*adapter
,
1406 struct be_queue_info
*rxq
, u16 cq_id
, u16 frag_size
,
1407 u32 if_id
, u32 rss
, u8
*rss_id
)
1409 struct be_mcc_wrb
*wrb
;
1410 struct be_cmd_req_eth_rx_create
*req
;
1411 struct be_dma_mem
*q_mem
= &rxq
->dma_mem
;
1414 spin_lock_bh(&adapter
->mcc_lock
);
1416 wrb
= wrb_from_mccq(adapter
);
1421 req
= embedded_payload(wrb
);
1423 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1424 OPCODE_ETH_RX_CREATE
, sizeof(*req
), wrb
, NULL
);
1426 req
->cq_id
= cpu_to_le16(cq_id
);
1427 req
->frag_size
= fls(frag_size
) - 1;
1429 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1430 req
->interface_id
= cpu_to_le32(if_id
);
1431 req
->max_frame_size
= cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE
);
1432 req
->rss_queue
= cpu_to_le32(rss
);
1434 status
= be_mcc_notify_wait(adapter
);
1436 struct be_cmd_resp_eth_rx_create
*resp
= embedded_payload(wrb
);
1438 rxq
->id
= le16_to_cpu(resp
->id
);
1439 rxq
->created
= true;
1440 *rss_id
= resp
->rss_id
;
1444 spin_unlock_bh(&adapter
->mcc_lock
);
1448 /* Generic destroyer function for all types of queues
1451 int be_cmd_q_destroy(struct be_adapter
*adapter
, struct be_queue_info
*q
,
1454 struct be_mcc_wrb
*wrb
;
1455 struct be_cmd_req_q_destroy
*req
;
1456 u8 subsys
= 0, opcode
= 0;
1459 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1462 wrb
= wrb_from_mbox(adapter
);
1463 req
= embedded_payload(wrb
);
1465 switch (queue_type
) {
1467 subsys
= CMD_SUBSYSTEM_COMMON
;
1468 opcode
= OPCODE_COMMON_EQ_DESTROY
;
1471 subsys
= CMD_SUBSYSTEM_COMMON
;
1472 opcode
= OPCODE_COMMON_CQ_DESTROY
;
1475 subsys
= CMD_SUBSYSTEM_ETH
;
1476 opcode
= OPCODE_ETH_TX_DESTROY
;
1479 subsys
= CMD_SUBSYSTEM_ETH
;
1480 opcode
= OPCODE_ETH_RX_DESTROY
;
1483 subsys
= CMD_SUBSYSTEM_COMMON
;
1484 opcode
= OPCODE_COMMON_MCC_DESTROY
;
1490 be_wrb_cmd_hdr_prepare(&req
->hdr
, subsys
, opcode
, sizeof(*req
), wrb
,
1492 req
->id
= cpu_to_le16(q
->id
);
1494 status
= be_mbox_notify_wait(adapter
);
1497 mutex_unlock(&adapter
->mbox_lock
);
1502 int be_cmd_rxq_destroy(struct be_adapter
*adapter
, struct be_queue_info
*q
)
1504 struct be_mcc_wrb
*wrb
;
1505 struct be_cmd_req_q_destroy
*req
;
1508 spin_lock_bh(&adapter
->mcc_lock
);
1510 wrb
= wrb_from_mccq(adapter
);
1515 req
= embedded_payload(wrb
);
1517 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1518 OPCODE_ETH_RX_DESTROY
, sizeof(*req
), wrb
, NULL
);
1519 req
->id
= cpu_to_le16(q
->id
);
1521 status
= be_mcc_notify_wait(adapter
);
1525 spin_unlock_bh(&adapter
->mcc_lock
);
1529 /* Create an rx filtering policy configuration on an i/f
1530 * Will use MBOX only if MCCQ has not been created.
1532 int be_cmd_if_create(struct be_adapter
*adapter
, u32 cap_flags
, u32 en_flags
,
1533 u32
*if_handle
, u32 domain
)
1535 struct be_mcc_wrb wrb
= {0};
1536 struct be_cmd_req_if_create
*req
;
1539 req
= embedded_payload(&wrb
);
1540 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1541 OPCODE_COMMON_NTWK_INTERFACE_CREATE
,
1542 sizeof(*req
), &wrb
, NULL
);
1543 req
->hdr
.domain
= domain
;
1544 req
->capability_flags
= cpu_to_le32(cap_flags
);
1545 req
->enable_flags
= cpu_to_le32(en_flags
);
1546 req
->pmac_invalid
= true;
1548 status
= be_cmd_notify_wait(adapter
, &wrb
);
1550 struct be_cmd_resp_if_create
*resp
= embedded_payload(&wrb
);
1552 *if_handle
= le32_to_cpu(resp
->interface_id
);
1554 /* Hack to retrieve VF's pmac-id on BE3 */
1555 if (BE3_chip(adapter
) && be_virtfn(adapter
))
1556 adapter
->pmac_id
[0] = le32_to_cpu(resp
->pmac_id
);
1561 /* Uses MCCQ if available else MBOX */
1562 int be_cmd_if_destroy(struct be_adapter
*adapter
, int interface_id
, u32 domain
)
1564 struct be_mcc_wrb wrb
= {0};
1565 struct be_cmd_req_if_destroy
*req
;
1568 if (interface_id
== -1)
1571 req
= embedded_payload(&wrb
);
1573 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1574 OPCODE_COMMON_NTWK_INTERFACE_DESTROY
,
1575 sizeof(*req
), &wrb
, NULL
);
1576 req
->hdr
.domain
= domain
;
1577 req
->interface_id
= cpu_to_le32(interface_id
);
1579 status
= be_cmd_notify_wait(adapter
, &wrb
);
1583 /* Get stats is a non embedded command: the request is not embedded inside
1584 * WRB but is a separate dma memory block
1585 * Uses asynchronous MCC
1587 int be_cmd_get_stats(struct be_adapter
*adapter
, struct be_dma_mem
*nonemb_cmd
)
1589 struct be_mcc_wrb
*wrb
;
1590 struct be_cmd_req_hdr
*hdr
;
1593 spin_lock_bh(&adapter
->mcc_lock
);
1595 wrb
= wrb_from_mccq(adapter
);
1600 hdr
= nonemb_cmd
->va
;
1602 be_wrb_cmd_hdr_prepare(hdr
, CMD_SUBSYSTEM_ETH
,
1603 OPCODE_ETH_GET_STATISTICS
, nonemb_cmd
->size
, wrb
,
1606 /* version 1 of the cmd is not supported only by BE2 */
1607 if (BE2_chip(adapter
))
1609 if (BE3_chip(adapter
) || lancer_chip(adapter
))
1614 status
= be_mcc_notify(adapter
);
1618 adapter
->stats_cmd_sent
= true;
1621 spin_unlock_bh(&adapter
->mcc_lock
);
1626 int lancer_cmd_get_pport_stats(struct be_adapter
*adapter
,
1627 struct be_dma_mem
*nonemb_cmd
)
1629 struct be_mcc_wrb
*wrb
;
1630 struct lancer_cmd_req_pport_stats
*req
;
1633 if (!be_cmd_allowed(adapter
, OPCODE_ETH_GET_PPORT_STATS
,
1637 spin_lock_bh(&adapter
->mcc_lock
);
1639 wrb
= wrb_from_mccq(adapter
);
1644 req
= nonemb_cmd
->va
;
1646 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1647 OPCODE_ETH_GET_PPORT_STATS
, nonemb_cmd
->size
,
1650 req
->cmd_params
.params
.pport_num
= cpu_to_le16(adapter
->hba_port_num
);
1651 req
->cmd_params
.params
.reset_stats
= 0;
1653 status
= be_mcc_notify(adapter
);
1657 adapter
->stats_cmd_sent
= true;
1660 spin_unlock_bh(&adapter
->mcc_lock
);
1664 static int be_mac_to_link_speed(int mac_speed
)
1666 switch (mac_speed
) {
1667 case PHY_LINK_SPEED_ZERO
:
1669 case PHY_LINK_SPEED_10MBPS
:
1671 case PHY_LINK_SPEED_100MBPS
:
1673 case PHY_LINK_SPEED_1GBPS
:
1675 case PHY_LINK_SPEED_10GBPS
:
1677 case PHY_LINK_SPEED_20GBPS
:
1679 case PHY_LINK_SPEED_25GBPS
:
1681 case PHY_LINK_SPEED_40GBPS
:
1687 /* Uses synchronous mcc
1688 * Returns link_speed in Mbps
1690 int be_cmd_link_status_query(struct be_adapter
*adapter
, u16
*link_speed
,
1691 u8
*link_status
, u32 dom
)
1693 struct be_mcc_wrb
*wrb
;
1694 struct be_cmd_req_link_status
*req
;
1697 spin_lock_bh(&adapter
->mcc_lock
);
1700 *link_status
= LINK_DOWN
;
1702 wrb
= wrb_from_mccq(adapter
);
1707 req
= embedded_payload(wrb
);
1709 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1710 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY
,
1711 sizeof(*req
), wrb
, NULL
);
1713 /* version 1 of the cmd is not supported only by BE2 */
1714 if (!BE2_chip(adapter
))
1715 req
->hdr
.version
= 1;
1717 req
->hdr
.domain
= dom
;
1719 status
= be_mcc_notify_wait(adapter
);
1721 struct be_cmd_resp_link_status
*resp
= embedded_payload(wrb
);
1724 *link_speed
= resp
->link_speed
?
1725 le16_to_cpu(resp
->link_speed
) * 10 :
1726 be_mac_to_link_speed(resp
->mac_speed
);
1728 if (!resp
->logical_link_status
)
1732 *link_status
= resp
->logical_link_status
;
1736 spin_unlock_bh(&adapter
->mcc_lock
);
1740 /* Uses synchronous mcc */
1741 int be_cmd_get_die_temperature(struct be_adapter
*adapter
)
1743 struct be_mcc_wrb
*wrb
;
1744 struct be_cmd_req_get_cntl_addnl_attribs
*req
;
1747 spin_lock_bh(&adapter
->mcc_lock
);
1749 wrb
= wrb_from_mccq(adapter
);
1754 req
= embedded_payload(wrb
);
1756 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1757 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
,
1758 sizeof(*req
), wrb
, NULL
);
1760 status
= be_mcc_notify(adapter
);
1762 spin_unlock_bh(&adapter
->mcc_lock
);
1766 /* Uses synchronous mcc */
1767 int be_cmd_get_fat_dump_len(struct be_adapter
*adapter
, u32
*dump_size
)
1769 struct be_mcc_wrb wrb
= {0};
1770 struct be_cmd_req_get_fat
*req
;
1773 req
= embedded_payload(&wrb
);
1775 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1776 OPCODE_COMMON_MANAGE_FAT
, sizeof(*req
),
1778 req
->fat_operation
= cpu_to_le32(QUERY_FAT
);
1779 status
= be_cmd_notify_wait(adapter
, &wrb
);
1781 struct be_cmd_resp_get_fat
*resp
= embedded_payload(&wrb
);
1783 if (dump_size
&& resp
->log_size
)
1784 *dump_size
= le32_to_cpu(resp
->log_size
) -
1790 int be_cmd_get_fat_dump(struct be_adapter
*adapter
, u32 buf_len
, void *buf
)
1792 struct be_dma_mem get_fat_cmd
;
1793 struct be_mcc_wrb
*wrb
;
1794 struct be_cmd_req_get_fat
*req
;
1795 u32 offset
= 0, total_size
, buf_size
,
1796 log_offset
= sizeof(u32
), payload_len
;
1802 total_size
= buf_len
;
1804 get_fat_cmd
.size
= sizeof(struct be_cmd_req_get_fat
) + 60*1024;
1805 get_fat_cmd
.va
= dma_zalloc_coherent(&adapter
->pdev
->dev
,
1807 &get_fat_cmd
.dma
, GFP_ATOMIC
);
1808 if (!get_fat_cmd
.va
)
1811 spin_lock_bh(&adapter
->mcc_lock
);
1813 while (total_size
) {
1814 buf_size
= min(total_size
, (u32
)60*1024);
1815 total_size
-= buf_size
;
1817 wrb
= wrb_from_mccq(adapter
);
1822 req
= get_fat_cmd
.va
;
1824 payload_len
= sizeof(struct be_cmd_req_get_fat
) + buf_size
;
1825 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1826 OPCODE_COMMON_MANAGE_FAT
, payload_len
,
1829 req
->fat_operation
= cpu_to_le32(RETRIEVE_FAT
);
1830 req
->read_log_offset
= cpu_to_le32(log_offset
);
1831 req
->read_log_length
= cpu_to_le32(buf_size
);
1832 req
->data_buffer_size
= cpu_to_le32(buf_size
);
1834 status
= be_mcc_notify_wait(adapter
);
1836 struct be_cmd_resp_get_fat
*resp
= get_fat_cmd
.va
;
1838 memcpy(buf
+ offset
,
1840 le32_to_cpu(resp
->read_log_length
));
1842 dev_err(&adapter
->pdev
->dev
, "FAT Table Retrieve error\n");
1846 log_offset
+= buf_size
;
1849 dma_free_coherent(&adapter
->pdev
->dev
, get_fat_cmd
.size
,
1850 get_fat_cmd
.va
, get_fat_cmd
.dma
);
1851 spin_unlock_bh(&adapter
->mcc_lock
);
1855 /* Uses synchronous mcc */
1856 int be_cmd_get_fw_ver(struct be_adapter
*adapter
)
1858 struct be_mcc_wrb
*wrb
;
1859 struct be_cmd_req_get_fw_version
*req
;
1862 spin_lock_bh(&adapter
->mcc_lock
);
1864 wrb
= wrb_from_mccq(adapter
);
1870 req
= embedded_payload(wrb
);
1872 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1873 OPCODE_COMMON_GET_FW_VERSION
, sizeof(*req
), wrb
,
1875 status
= be_mcc_notify_wait(adapter
);
1877 struct be_cmd_resp_get_fw_version
*resp
= embedded_payload(wrb
);
1879 strlcpy(adapter
->fw_ver
, resp
->firmware_version_string
,
1880 sizeof(adapter
->fw_ver
));
1881 strlcpy(adapter
->fw_on_flash
, resp
->fw_on_flash_version_string
,
1882 sizeof(adapter
->fw_on_flash
));
1885 spin_unlock_bh(&adapter
->mcc_lock
);
1889 /* set the EQ delay interval of an EQ to specified value
1892 static int __be_cmd_modify_eqd(struct be_adapter
*adapter
,
1893 struct be_set_eqd
*set_eqd
, int num
)
1895 struct be_mcc_wrb
*wrb
;
1896 struct be_cmd_req_modify_eq_delay
*req
;
1899 spin_lock_bh(&adapter
->mcc_lock
);
1901 wrb
= wrb_from_mccq(adapter
);
1906 req
= embedded_payload(wrb
);
1908 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1909 OPCODE_COMMON_MODIFY_EQ_DELAY
, sizeof(*req
), wrb
,
1912 req
->num_eq
= cpu_to_le32(num
);
1913 for (i
= 0; i
< num
; i
++) {
1914 req
->set_eqd
[i
].eq_id
= cpu_to_le32(set_eqd
[i
].eq_id
);
1915 req
->set_eqd
[i
].phase
= 0;
1916 req
->set_eqd
[i
].delay_multiplier
=
1917 cpu_to_le32(set_eqd
[i
].delay_multiplier
);
1920 status
= be_mcc_notify(adapter
);
1922 spin_unlock_bh(&adapter
->mcc_lock
);
1926 int be_cmd_modify_eqd(struct be_adapter
*adapter
, struct be_set_eqd
*set_eqd
,
1932 num_eqs
= min(num
, 8);
1933 __be_cmd_modify_eqd(adapter
, &set_eqd
[i
], num_eqs
);
1941 /* Uses sycnhronous mcc */
1942 int be_cmd_vlan_config(struct be_adapter
*adapter
, u32 if_id
, u16
*vtag_array
,
1943 u32 num
, u32 domain
)
1945 struct be_mcc_wrb
*wrb
;
1946 struct be_cmd_req_vlan_config
*req
;
1949 spin_lock_bh(&adapter
->mcc_lock
);
1951 wrb
= wrb_from_mccq(adapter
);
1956 req
= embedded_payload(wrb
);
1958 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1959 OPCODE_COMMON_NTWK_VLAN_CONFIG
, sizeof(*req
),
1961 req
->hdr
.domain
= domain
;
1963 req
->interface_id
= if_id
;
1964 req
->untagged
= BE_IF_FLAGS_UNTAGGED
& be_if_cap_flags(adapter
) ? 1 : 0;
1965 req
->num_vlan
= num
;
1966 memcpy(req
->normal_vlan
, vtag_array
,
1967 req
->num_vlan
* sizeof(vtag_array
[0]));
1969 status
= be_mcc_notify_wait(adapter
);
1971 spin_unlock_bh(&adapter
->mcc_lock
);
1975 static int __be_cmd_rx_filter(struct be_adapter
*adapter
, u32 flags
, u32 value
)
1977 struct be_mcc_wrb
*wrb
;
1978 struct be_dma_mem
*mem
= &adapter
->rx_filter
;
1979 struct be_cmd_req_rx_filter
*req
= mem
->va
;
1982 spin_lock_bh(&adapter
->mcc_lock
);
1984 wrb
= wrb_from_mccq(adapter
);
1989 memset(req
, 0, sizeof(*req
));
1990 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1991 OPCODE_COMMON_NTWK_RX_FILTER
, sizeof(*req
),
1994 req
->if_id
= cpu_to_le32(adapter
->if_handle
);
1995 req
->if_flags_mask
= cpu_to_le32(flags
);
1996 req
->if_flags
= (value
== ON
) ? req
->if_flags_mask
: 0;
1998 if (flags
& BE_IF_FLAGS_MULTICAST
) {
1999 struct netdev_hw_addr
*ha
;
2002 /* Reset mcast promisc mode if already set by setting mask
2003 * and not setting flags field
2005 req
->if_flags_mask
|=
2006 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS
&
2007 be_if_cap_flags(adapter
));
2008 req
->mcast_num
= cpu_to_le32(netdev_mc_count(adapter
->netdev
));
2009 netdev_for_each_mc_addr(ha
, adapter
->netdev
)
2010 memcpy(req
->mcast_mac
[i
++].byte
, ha
->addr
, ETH_ALEN
);
2013 status
= be_mcc_notify_wait(adapter
);
2015 spin_unlock_bh(&adapter
->mcc_lock
);
2019 int be_cmd_rx_filter(struct be_adapter
*adapter
, u32 flags
, u32 value
)
2021 struct device
*dev
= &adapter
->pdev
->dev
;
2023 if ((flags
& be_if_cap_flags(adapter
)) != flags
) {
2024 dev_warn(dev
, "Cannot set rx filter flags 0x%x\n", flags
);
2025 dev_warn(dev
, "Interface is capable of 0x%x flags only\n",
2026 be_if_cap_flags(adapter
));
2028 flags
&= be_if_cap_flags(adapter
);
2032 return __be_cmd_rx_filter(adapter
, flags
, value
);
2035 /* Uses synchrounous mcc */
2036 int be_cmd_set_flow_control(struct be_adapter
*adapter
, u32 tx_fc
, u32 rx_fc
)
2038 struct be_mcc_wrb
*wrb
;
2039 struct be_cmd_req_set_flow_control
*req
;
2042 if (!be_cmd_allowed(adapter
, OPCODE_COMMON_SET_FLOW_CONTROL
,
2043 CMD_SUBSYSTEM_COMMON
))
2046 spin_lock_bh(&adapter
->mcc_lock
);
2048 wrb
= wrb_from_mccq(adapter
);
2053 req
= embedded_payload(wrb
);
2055 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2056 OPCODE_COMMON_SET_FLOW_CONTROL
, sizeof(*req
),
2059 req
->hdr
.version
= 1;
2060 req
->tx_flow_control
= cpu_to_le16((u16
)tx_fc
);
2061 req
->rx_flow_control
= cpu_to_le16((u16
)rx_fc
);
2063 status
= be_mcc_notify_wait(adapter
);
2066 spin_unlock_bh(&adapter
->mcc_lock
);
2068 if (base_status(status
) == MCC_STATUS_FEATURE_NOT_SUPPORTED
)
2075 int be_cmd_get_flow_control(struct be_adapter
*adapter
, u32
*tx_fc
, u32
*rx_fc
)
2077 struct be_mcc_wrb
*wrb
;
2078 struct be_cmd_req_get_flow_control
*req
;
2081 if (!be_cmd_allowed(adapter
, OPCODE_COMMON_GET_FLOW_CONTROL
,
2082 CMD_SUBSYSTEM_COMMON
))
2085 spin_lock_bh(&adapter
->mcc_lock
);
2087 wrb
= wrb_from_mccq(adapter
);
2092 req
= embedded_payload(wrb
);
2094 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2095 OPCODE_COMMON_GET_FLOW_CONTROL
, sizeof(*req
),
2098 status
= be_mcc_notify_wait(adapter
);
2100 struct be_cmd_resp_get_flow_control
*resp
=
2101 embedded_payload(wrb
);
2103 *tx_fc
= le16_to_cpu(resp
->tx_flow_control
);
2104 *rx_fc
= le16_to_cpu(resp
->rx_flow_control
);
2108 spin_unlock_bh(&adapter
->mcc_lock
);
2113 int be_cmd_query_fw_cfg(struct be_adapter
*adapter
)
2115 struct be_mcc_wrb
*wrb
;
2116 struct be_cmd_req_query_fw_cfg
*req
;
2119 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
2122 wrb
= wrb_from_mbox(adapter
);
2123 req
= embedded_payload(wrb
);
2125 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2126 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG
,
2127 sizeof(*req
), wrb
, NULL
);
2129 status
= be_mbox_notify_wait(adapter
);
2131 struct be_cmd_resp_query_fw_cfg
*resp
= embedded_payload(wrb
);
2133 adapter
->port_num
= le32_to_cpu(resp
->phys_port
);
2134 adapter
->function_mode
= le32_to_cpu(resp
->function_mode
);
2135 adapter
->function_caps
= le32_to_cpu(resp
->function_caps
);
2136 adapter
->asic_rev
= le32_to_cpu(resp
->asic_revision
) & 0xFF;
2137 dev_info(&adapter
->pdev
->dev
,
2138 "FW config: function_mode=0x%x, function_caps=0x%x\n",
2139 adapter
->function_mode
, adapter
->function_caps
);
2142 mutex_unlock(&adapter
->mbox_lock
);
2147 int be_cmd_reset_function(struct be_adapter
*adapter
)
2149 struct be_mcc_wrb
*wrb
;
2150 struct be_cmd_req_hdr
*req
;
2153 if (lancer_chip(adapter
)) {
2154 iowrite32(SLI_PORT_CONTROL_IP_MASK
,
2155 adapter
->db
+ SLIPORT_CONTROL_OFFSET
);
2156 status
= lancer_wait_ready(adapter
);
2158 dev_err(&adapter
->pdev
->dev
,
2159 "Adapter in non recoverable error\n");
2163 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
2166 wrb
= wrb_from_mbox(adapter
);
2167 req
= embedded_payload(wrb
);
2169 be_wrb_cmd_hdr_prepare(req
, CMD_SUBSYSTEM_COMMON
,
2170 OPCODE_COMMON_FUNCTION_RESET
, sizeof(*req
), wrb
,
2173 status
= be_mbox_notify_wait(adapter
);
2175 mutex_unlock(&adapter
->mbox_lock
);
2179 int be_cmd_rss_config(struct be_adapter
*adapter
, u8
*rsstable
,
2180 u32 rss_hash_opts
, u16 table_size
, const u8
*rss_hkey
)
2182 struct be_mcc_wrb
*wrb
;
2183 struct be_cmd_req_rss_config
*req
;
2186 if (!(be_if_cap_flags(adapter
) & BE_IF_FLAGS_RSS
))
2189 spin_lock_bh(&adapter
->mcc_lock
);
2191 wrb
= wrb_from_mccq(adapter
);
2196 req
= embedded_payload(wrb
);
2198 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
2199 OPCODE_ETH_RSS_CONFIG
, sizeof(*req
), wrb
, NULL
);
2201 req
->if_id
= cpu_to_le32(adapter
->if_handle
);
2202 req
->enable_rss
= cpu_to_le16(rss_hash_opts
);
2203 req
->cpu_table_size_log2
= cpu_to_le16(fls(table_size
) - 1);
2205 if (!BEx_chip(adapter
))
2206 req
->hdr
.version
= 1;
2208 memcpy(req
->cpu_table
, rsstable
, table_size
);
2209 memcpy(req
->hash
, rss_hkey
, RSS_HASH_KEY_LEN
);
2210 be_dws_cpu_to_le(req
->hash
, sizeof(req
->hash
));
2212 status
= be_mcc_notify_wait(adapter
);
2214 spin_unlock_bh(&adapter
->mcc_lock
);
2219 int be_cmd_set_beacon_state(struct be_adapter
*adapter
, u8 port_num
,
2220 u8 bcn
, u8 sts
, u8 state
)
2222 struct be_mcc_wrb
*wrb
;
2223 struct be_cmd_req_enable_disable_beacon
*req
;
2226 spin_lock_bh(&adapter
->mcc_lock
);
2228 wrb
= wrb_from_mccq(adapter
);
2233 req
= embedded_payload(wrb
);
2235 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2236 OPCODE_COMMON_ENABLE_DISABLE_BEACON
,
2237 sizeof(*req
), wrb
, NULL
);
2239 req
->port_num
= port_num
;
2240 req
->beacon_state
= state
;
2241 req
->beacon_duration
= bcn
;
2242 req
->status_duration
= sts
;
2244 status
= be_mcc_notify_wait(adapter
);
2247 spin_unlock_bh(&adapter
->mcc_lock
);
2252 int be_cmd_get_beacon_state(struct be_adapter
*adapter
, u8 port_num
, u32
*state
)
2254 struct be_mcc_wrb
*wrb
;
2255 struct be_cmd_req_get_beacon_state
*req
;
2258 spin_lock_bh(&adapter
->mcc_lock
);
2260 wrb
= wrb_from_mccq(adapter
);
2265 req
= embedded_payload(wrb
);
2267 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2268 OPCODE_COMMON_GET_BEACON_STATE
, sizeof(*req
),
2271 req
->port_num
= port_num
;
2273 status
= be_mcc_notify_wait(adapter
);
2275 struct be_cmd_resp_get_beacon_state
*resp
=
2276 embedded_payload(wrb
);
2278 *state
= resp
->beacon_state
;
2282 spin_unlock_bh(&adapter
->mcc_lock
);
2287 int be_cmd_read_port_transceiver_data(struct be_adapter
*adapter
,
2288 u8 page_num
, u8
*data
)
2290 struct be_dma_mem cmd
;
2291 struct be_mcc_wrb
*wrb
;
2292 struct be_cmd_req_port_type
*req
;
2295 if (page_num
> TR_PAGE_A2
)
2298 cmd
.size
= sizeof(struct be_cmd_resp_port_type
);
2299 cmd
.va
= dma_zalloc_coherent(&adapter
->pdev
->dev
, cmd
.size
, &cmd
.dma
,
2302 dev_err(&adapter
->pdev
->dev
, "Memory allocation failed\n");
2306 spin_lock_bh(&adapter
->mcc_lock
);
2308 wrb
= wrb_from_mccq(adapter
);
2315 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2316 OPCODE_COMMON_READ_TRANSRECV_DATA
,
2317 cmd
.size
, wrb
, &cmd
);
2319 req
->port
= cpu_to_le32(adapter
->hba_port_num
);
2320 req
->page_num
= cpu_to_le32(page_num
);
2321 status
= be_mcc_notify_wait(adapter
);
2323 struct be_cmd_resp_port_type
*resp
= cmd
.va
;
2325 memcpy(data
, resp
->page_data
, PAGE_DATA_LEN
);
2328 spin_unlock_bh(&adapter
->mcc_lock
);
2329 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
, cmd
.dma
);
2333 static int lancer_cmd_write_object(struct be_adapter
*adapter
,
2334 struct be_dma_mem
*cmd
, u32 data_size
,
2335 u32 data_offset
, const char *obj_name
,
2336 u32
*data_written
, u8
*change_status
,
2339 struct be_mcc_wrb
*wrb
;
2340 struct lancer_cmd_req_write_object
*req
;
2341 struct lancer_cmd_resp_write_object
*resp
;
2345 spin_lock_bh(&adapter
->mcc_lock
);
2346 adapter
->flash_status
= 0;
2348 wrb
= wrb_from_mccq(adapter
);
2354 req
= embedded_payload(wrb
);
2356 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2357 OPCODE_COMMON_WRITE_OBJECT
,
2358 sizeof(struct lancer_cmd_req_write_object
), wrb
,
2361 ctxt
= &req
->context
;
2362 AMAP_SET_BITS(struct amap_lancer_write_obj_context
,
2363 write_length
, ctxt
, data_size
);
2366 AMAP_SET_BITS(struct amap_lancer_write_obj_context
,
2369 AMAP_SET_BITS(struct amap_lancer_write_obj_context
,
2372 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
2373 req
->write_offset
= cpu_to_le32(data_offset
);
2374 strlcpy(req
->object_name
, obj_name
, sizeof(req
->object_name
));
2375 req
->descriptor_count
= cpu_to_le32(1);
2376 req
->buf_len
= cpu_to_le32(data_size
);
2377 req
->addr_low
= cpu_to_le32((cmd
->dma
+
2378 sizeof(struct lancer_cmd_req_write_object
))
2380 req
->addr_high
= cpu_to_le32(upper_32_bits(cmd
->dma
+
2381 sizeof(struct lancer_cmd_req_write_object
)));
2383 status
= be_mcc_notify(adapter
);
2387 spin_unlock_bh(&adapter
->mcc_lock
);
2389 if (!wait_for_completion_timeout(&adapter
->et_cmd_compl
,
2390 msecs_to_jiffies(60000)))
2391 status
= -ETIMEDOUT
;
2393 status
= adapter
->flash_status
;
2395 resp
= embedded_payload(wrb
);
2397 *data_written
= le32_to_cpu(resp
->actual_write_len
);
2398 *change_status
= resp
->change_status
;
2400 *addn_status
= resp
->additional_status
;
2406 spin_unlock_bh(&adapter
->mcc_lock
);
2410 int be_cmd_query_cable_type(struct be_adapter
*adapter
)
2412 u8 page_data
[PAGE_DATA_LEN
];
2415 status
= be_cmd_read_port_transceiver_data(adapter
, TR_PAGE_A0
,
2418 switch (adapter
->phy
.interface_type
) {
2420 adapter
->phy
.cable_type
=
2421 page_data
[QSFP_PLUS_CABLE_TYPE_OFFSET
];
2423 case PHY_TYPE_SFP_PLUS_10GB
:
2424 adapter
->phy
.cable_type
=
2425 page_data
[SFP_PLUS_CABLE_TYPE_OFFSET
];
2428 adapter
->phy
.cable_type
= 0;
2435 int be_cmd_query_sfp_info(struct be_adapter
*adapter
)
2437 u8 page_data
[PAGE_DATA_LEN
];
2440 status
= be_cmd_read_port_transceiver_data(adapter
, TR_PAGE_A0
,
2443 strlcpy(adapter
->phy
.vendor_name
, page_data
+
2444 SFP_VENDOR_NAME_OFFSET
, SFP_VENDOR_NAME_LEN
- 1);
2445 strlcpy(adapter
->phy
.vendor_pn
,
2446 page_data
+ SFP_VENDOR_PN_OFFSET
,
2447 SFP_VENDOR_NAME_LEN
- 1);
2453 static int lancer_cmd_delete_object(struct be_adapter
*adapter
,
2454 const char *obj_name
)
2456 struct lancer_cmd_req_delete_object
*req
;
2457 struct be_mcc_wrb
*wrb
;
2460 spin_lock_bh(&adapter
->mcc_lock
);
2462 wrb
= wrb_from_mccq(adapter
);
2468 req
= embedded_payload(wrb
);
2470 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2471 OPCODE_COMMON_DELETE_OBJECT
,
2472 sizeof(*req
), wrb
, NULL
);
2474 strlcpy(req
->object_name
, obj_name
, sizeof(req
->object_name
));
2476 status
= be_mcc_notify_wait(adapter
);
2478 spin_unlock_bh(&adapter
->mcc_lock
);
2482 int lancer_cmd_read_object(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
,
2483 u32 data_size
, u32 data_offset
, const char *obj_name
,
2484 u32
*data_read
, u32
*eof
, u8
*addn_status
)
2486 struct be_mcc_wrb
*wrb
;
2487 struct lancer_cmd_req_read_object
*req
;
2488 struct lancer_cmd_resp_read_object
*resp
;
2491 spin_lock_bh(&adapter
->mcc_lock
);
2493 wrb
= wrb_from_mccq(adapter
);
2499 req
= embedded_payload(wrb
);
2501 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2502 OPCODE_COMMON_READ_OBJECT
,
2503 sizeof(struct lancer_cmd_req_read_object
), wrb
,
2506 req
->desired_read_len
= cpu_to_le32(data_size
);
2507 req
->read_offset
= cpu_to_le32(data_offset
);
2508 strcpy(req
->object_name
, obj_name
);
2509 req
->descriptor_count
= cpu_to_le32(1);
2510 req
->buf_len
= cpu_to_le32(data_size
);
2511 req
->addr_low
= cpu_to_le32((cmd
->dma
& 0xFFFFFFFF));
2512 req
->addr_high
= cpu_to_le32(upper_32_bits(cmd
->dma
));
2514 status
= be_mcc_notify_wait(adapter
);
2516 resp
= embedded_payload(wrb
);
2518 *data_read
= le32_to_cpu(resp
->actual_read_len
);
2519 *eof
= le32_to_cpu(resp
->eof
);
2521 *addn_status
= resp
->additional_status
;
2525 spin_unlock_bh(&adapter
->mcc_lock
);
2529 static int be_cmd_write_flashrom(struct be_adapter
*adapter
,
2530 struct be_dma_mem
*cmd
, u32 flash_type
,
2531 u32 flash_opcode
, u32 img_offset
, u32 buf_size
)
2533 struct be_mcc_wrb
*wrb
;
2534 struct be_cmd_write_flashrom
*req
;
2537 spin_lock_bh(&adapter
->mcc_lock
);
2538 adapter
->flash_status
= 0;
2540 wrb
= wrb_from_mccq(adapter
);
2547 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2548 OPCODE_COMMON_WRITE_FLASHROM
, cmd
->size
, wrb
,
2551 req
->params
.op_type
= cpu_to_le32(flash_type
);
2552 if (flash_type
== OPTYPE_OFFSET_SPECIFIED
)
2553 req
->params
.offset
= cpu_to_le32(img_offset
);
2555 req
->params
.op_code
= cpu_to_le32(flash_opcode
);
2556 req
->params
.data_buf_size
= cpu_to_le32(buf_size
);
2558 status
= be_mcc_notify(adapter
);
2562 spin_unlock_bh(&adapter
->mcc_lock
);
2564 if (!wait_for_completion_timeout(&adapter
->et_cmd_compl
,
2565 msecs_to_jiffies(40000)))
2566 status
= -ETIMEDOUT
;
2568 status
= adapter
->flash_status
;
2573 spin_unlock_bh(&adapter
->mcc_lock
);
2577 static int be_cmd_get_flash_crc(struct be_adapter
*adapter
, u8
*flashed_crc
,
2578 u16 img_optype
, u32 img_offset
, u32 crc_offset
)
2580 struct be_cmd_read_flash_crc
*req
;
2581 struct be_mcc_wrb
*wrb
;
2584 spin_lock_bh(&adapter
->mcc_lock
);
2586 wrb
= wrb_from_mccq(adapter
);
2591 req
= embedded_payload(wrb
);
2593 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2594 OPCODE_COMMON_READ_FLASHROM
, sizeof(*req
),
2597 req
->params
.op_type
= cpu_to_le32(img_optype
);
2598 if (img_optype
== OPTYPE_OFFSET_SPECIFIED
)
2599 req
->params
.offset
= cpu_to_le32(img_offset
+ crc_offset
);
2601 req
->params
.offset
= cpu_to_le32(crc_offset
);
2603 req
->params
.op_code
= cpu_to_le32(FLASHROM_OPER_REPORT
);
2604 req
->params
.data_buf_size
= cpu_to_le32(0x4);
2606 status
= be_mcc_notify_wait(adapter
);
2608 memcpy(flashed_crc
, req
->crc
, 4);
2611 spin_unlock_bh(&adapter
->mcc_lock
);
2615 static char flash_cookie
[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2617 static bool phy_flashing_required(struct be_adapter
*adapter
)
2619 return (adapter
->phy
.phy_type
== PHY_TYPE_TN_8022
&&
2620 adapter
->phy
.interface_type
== PHY_TYPE_BASET_10GB
);
2623 static bool is_comp_in_ufi(struct be_adapter
*adapter
,
2624 struct flash_section_info
*fsec
, int type
)
2626 int i
= 0, img_type
= 0;
2627 struct flash_section_info_g2
*fsec_g2
= NULL
;
2629 if (BE2_chip(adapter
))
2630 fsec_g2
= (struct flash_section_info_g2
*)fsec
;
2632 for (i
= 0; i
< MAX_FLASH_COMP
; i
++) {
2634 img_type
= le32_to_cpu(fsec_g2
->fsec_entry
[i
].type
);
2636 img_type
= le32_to_cpu(fsec
->fsec_entry
[i
].type
);
2638 if (img_type
== type
)
2644 static struct flash_section_info
*get_fsec_info(struct be_adapter
*adapter
,
2646 const struct firmware
*fw
)
2648 struct flash_section_info
*fsec
= NULL
;
2649 const u8
*p
= fw
->data
;
2652 while (p
< (fw
->data
+ fw
->size
)) {
2653 fsec
= (struct flash_section_info
*)p
;
2654 if (!memcmp(flash_cookie
, fsec
->cookie
, sizeof(flash_cookie
)))
2661 static int be_check_flash_crc(struct be_adapter
*adapter
, const u8
*p
,
2662 u32 img_offset
, u32 img_size
, int hdr_size
,
2663 u16 img_optype
, bool *crc_match
)
2669 status
= be_cmd_get_flash_crc(adapter
, crc
, img_optype
, img_offset
,
2674 crc_offset
= hdr_size
+ img_offset
+ img_size
- 4;
2676 /* Skip flashing, if crc of flashed region matches */
2677 if (!memcmp(crc
, p
+ crc_offset
, 4))
2685 static int be_flash(struct be_adapter
*adapter
, const u8
*img
,
2686 struct be_dma_mem
*flash_cmd
, int optype
, int img_size
,
2689 u32 flash_op
, num_bytes
, total_bytes
= img_size
, bytes_sent
= 0;
2690 struct be_cmd_write_flashrom
*req
= flash_cmd
->va
;
2693 while (total_bytes
) {
2694 num_bytes
= min_t(u32
, 32 * 1024, total_bytes
);
2696 total_bytes
-= num_bytes
;
2699 if (optype
== OPTYPE_PHY_FW
)
2700 flash_op
= FLASHROM_OPER_PHY_FLASH
;
2702 flash_op
= FLASHROM_OPER_FLASH
;
2704 if (optype
== OPTYPE_PHY_FW
)
2705 flash_op
= FLASHROM_OPER_PHY_SAVE
;
2707 flash_op
= FLASHROM_OPER_SAVE
;
2710 memcpy(req
->data_buf
, img
, num_bytes
);
2712 status
= be_cmd_write_flashrom(adapter
, flash_cmd
, optype
,
2713 flash_op
, img_offset
+
2714 bytes_sent
, num_bytes
);
2715 if (base_status(status
) == MCC_STATUS_ILLEGAL_REQUEST
&&
2716 optype
== OPTYPE_PHY_FW
)
2721 bytes_sent
+= num_bytes
;
2726 /* For BE2, BE3 and BE3-R */
2727 static int be_flash_BEx(struct be_adapter
*adapter
,
2728 const struct firmware
*fw
,
2729 struct be_dma_mem
*flash_cmd
, int num_of_images
)
2731 int img_hdrs_size
= (num_of_images
* sizeof(struct image_hdr
));
2732 struct device
*dev
= &adapter
->pdev
->dev
;
2733 struct flash_section_info
*fsec
= NULL
;
2734 int status
, i
, filehdr_size
, num_comp
;
2735 const struct flash_comp
*pflashcomp
;
2739 struct flash_comp gen3_flash_types
[] = {
2740 { BE3_ISCSI_PRIMARY_IMAGE_START
, OPTYPE_ISCSI_ACTIVE
,
2741 BE3_COMP_MAX_SIZE
, IMAGE_FIRMWARE_ISCSI
},
2742 { BE3_REDBOOT_START
, OPTYPE_REDBOOT
,
2743 BE3_REDBOOT_COMP_MAX_SIZE
, IMAGE_BOOT_CODE
},
2744 { BE3_ISCSI_BIOS_START
, OPTYPE_BIOS
,
2745 BE3_BIOS_COMP_MAX_SIZE
, IMAGE_OPTION_ROM_ISCSI
},
2746 { BE3_PXE_BIOS_START
, OPTYPE_PXE_BIOS
,
2747 BE3_BIOS_COMP_MAX_SIZE
, IMAGE_OPTION_ROM_PXE
},
2748 { BE3_FCOE_BIOS_START
, OPTYPE_FCOE_BIOS
,
2749 BE3_BIOS_COMP_MAX_SIZE
, IMAGE_OPTION_ROM_FCOE
},
2750 { BE3_ISCSI_BACKUP_IMAGE_START
, OPTYPE_ISCSI_BACKUP
,
2751 BE3_COMP_MAX_SIZE
, IMAGE_FIRMWARE_BACKUP_ISCSI
},
2752 { BE3_FCOE_PRIMARY_IMAGE_START
, OPTYPE_FCOE_FW_ACTIVE
,
2753 BE3_COMP_MAX_SIZE
, IMAGE_FIRMWARE_FCOE
},
2754 { BE3_FCOE_BACKUP_IMAGE_START
, OPTYPE_FCOE_FW_BACKUP
,
2755 BE3_COMP_MAX_SIZE
, IMAGE_FIRMWARE_BACKUP_FCOE
},
2756 { BE3_NCSI_START
, OPTYPE_NCSI_FW
,
2757 BE3_NCSI_COMP_MAX_SIZE
, IMAGE_NCSI
},
2758 { BE3_PHY_FW_START
, OPTYPE_PHY_FW
,
2759 BE3_PHY_FW_COMP_MAX_SIZE
, IMAGE_FIRMWARE_PHY
}
2762 struct flash_comp gen2_flash_types
[] = {
2763 { BE2_ISCSI_PRIMARY_IMAGE_START
, OPTYPE_ISCSI_ACTIVE
,
2764 BE2_COMP_MAX_SIZE
, IMAGE_FIRMWARE_ISCSI
},
2765 { BE2_REDBOOT_START
, OPTYPE_REDBOOT
,
2766 BE2_REDBOOT_COMP_MAX_SIZE
, IMAGE_BOOT_CODE
},
2767 { BE2_ISCSI_BIOS_START
, OPTYPE_BIOS
,
2768 BE2_BIOS_COMP_MAX_SIZE
, IMAGE_OPTION_ROM_ISCSI
},
2769 { BE2_PXE_BIOS_START
, OPTYPE_PXE_BIOS
,
2770 BE2_BIOS_COMP_MAX_SIZE
, IMAGE_OPTION_ROM_PXE
},
2771 { BE2_FCOE_BIOS_START
, OPTYPE_FCOE_BIOS
,
2772 BE2_BIOS_COMP_MAX_SIZE
, IMAGE_OPTION_ROM_FCOE
},
2773 { BE2_ISCSI_BACKUP_IMAGE_START
, OPTYPE_ISCSI_BACKUP
,
2774 BE2_COMP_MAX_SIZE
, IMAGE_FIRMWARE_BACKUP_ISCSI
},
2775 { BE2_FCOE_PRIMARY_IMAGE_START
, OPTYPE_FCOE_FW_ACTIVE
,
2776 BE2_COMP_MAX_SIZE
, IMAGE_FIRMWARE_FCOE
},
2777 { BE2_FCOE_BACKUP_IMAGE_START
, OPTYPE_FCOE_FW_BACKUP
,
2778 BE2_COMP_MAX_SIZE
, IMAGE_FIRMWARE_BACKUP_FCOE
}
2781 if (BE3_chip(adapter
)) {
2782 pflashcomp
= gen3_flash_types
;
2783 filehdr_size
= sizeof(struct flash_file_hdr_g3
);
2784 num_comp
= ARRAY_SIZE(gen3_flash_types
);
2786 pflashcomp
= gen2_flash_types
;
2787 filehdr_size
= sizeof(struct flash_file_hdr_g2
);
2788 num_comp
= ARRAY_SIZE(gen2_flash_types
);
2792 /* Get flash section info*/
2793 fsec
= get_fsec_info(adapter
, filehdr_size
+ img_hdrs_size
, fw
);
2795 dev_err(dev
, "Invalid Cookie. FW image may be corrupted\n");
2798 for (i
= 0; i
< num_comp
; i
++) {
2799 if (!is_comp_in_ufi(adapter
, fsec
, pflashcomp
[i
].img_type
))
2802 if ((pflashcomp
[i
].optype
== OPTYPE_NCSI_FW
) &&
2803 memcmp(adapter
->fw_ver
, "3.102.148.0", 11) < 0)
2806 if (pflashcomp
[i
].optype
== OPTYPE_PHY_FW
&&
2807 !phy_flashing_required(adapter
))
2810 if (pflashcomp
[i
].optype
== OPTYPE_REDBOOT
) {
2811 status
= be_check_flash_crc(adapter
, fw
->data
,
2812 pflashcomp
[i
].offset
,
2816 OPTYPE_REDBOOT
, &crc_match
);
2819 "Could not get CRC for 0x%x region\n",
2820 pflashcomp
[i
].optype
);
2828 p
= fw
->data
+ filehdr_size
+ pflashcomp
[i
].offset
+
2830 if (p
+ pflashcomp
[i
].size
> fw
->data
+ fw
->size
)
2833 status
= be_flash(adapter
, p
, flash_cmd
, pflashcomp
[i
].optype
,
2834 pflashcomp
[i
].size
, 0);
2836 dev_err(dev
, "Flashing section type 0x%x failed\n",
2837 pflashcomp
[i
].img_type
);
2844 static u16
be_get_img_optype(struct flash_section_entry fsec_entry
)
2846 u32 img_type
= le32_to_cpu(fsec_entry
.type
);
2847 u16 img_optype
= le16_to_cpu(fsec_entry
.optype
);
2849 if (img_optype
!= 0xFFFF)
2853 case IMAGE_FIRMWARE_ISCSI
:
2854 img_optype
= OPTYPE_ISCSI_ACTIVE
;
2856 case IMAGE_BOOT_CODE
:
2857 img_optype
= OPTYPE_REDBOOT
;
2859 case IMAGE_OPTION_ROM_ISCSI
:
2860 img_optype
= OPTYPE_BIOS
;
2862 case IMAGE_OPTION_ROM_PXE
:
2863 img_optype
= OPTYPE_PXE_BIOS
;
2865 case IMAGE_OPTION_ROM_FCOE
:
2866 img_optype
= OPTYPE_FCOE_BIOS
;
2868 case IMAGE_FIRMWARE_BACKUP_ISCSI
:
2869 img_optype
= OPTYPE_ISCSI_BACKUP
;
2872 img_optype
= OPTYPE_NCSI_FW
;
2874 case IMAGE_FLASHISM_JUMPVECTOR
:
2875 img_optype
= OPTYPE_FLASHISM_JUMPVECTOR
;
2877 case IMAGE_FIRMWARE_PHY
:
2878 img_optype
= OPTYPE_SH_PHY_FW
;
2880 case IMAGE_REDBOOT_DIR
:
2881 img_optype
= OPTYPE_REDBOOT_DIR
;
2883 case IMAGE_REDBOOT_CONFIG
:
2884 img_optype
= OPTYPE_REDBOOT_CONFIG
;
2887 img_optype
= OPTYPE_UFI_DIR
;
2896 static int be_flash_skyhawk(struct be_adapter
*adapter
,
2897 const struct firmware
*fw
,
2898 struct be_dma_mem
*flash_cmd
, int num_of_images
)
2900 int img_hdrs_size
= num_of_images
* sizeof(struct image_hdr
);
2901 bool crc_match
, old_fw_img
, flash_offset_support
= true;
2902 struct device
*dev
= &adapter
->pdev
->dev
;
2903 struct flash_section_info
*fsec
= NULL
;
2904 u32 img_offset
, img_size
, img_type
;
2905 u16 img_optype
, flash_optype
;
2906 int status
, i
, filehdr_size
;
2909 filehdr_size
= sizeof(struct flash_file_hdr_g3
);
2910 fsec
= get_fsec_info(adapter
, filehdr_size
+ img_hdrs_size
, fw
);
2912 dev_err(dev
, "Invalid Cookie. FW image may be corrupted\n");
2917 for (i
= 0; i
< le32_to_cpu(fsec
->fsec_hdr
.num_images
); i
++) {
2918 img_offset
= le32_to_cpu(fsec
->fsec_entry
[i
].offset
);
2919 img_size
= le32_to_cpu(fsec
->fsec_entry
[i
].pad_size
);
2920 img_type
= le32_to_cpu(fsec
->fsec_entry
[i
].type
);
2921 img_optype
= be_get_img_optype(fsec
->fsec_entry
[i
]);
2922 old_fw_img
= fsec
->fsec_entry
[i
].optype
== 0xFFFF;
2924 if (img_optype
== 0xFFFF)
2927 if (flash_offset_support
)
2928 flash_optype
= OPTYPE_OFFSET_SPECIFIED
;
2930 flash_optype
= img_optype
;
2932 /* Don't bother verifying CRC if an old FW image is being
2938 status
= be_check_flash_crc(adapter
, fw
->data
, img_offset
,
2939 img_size
, filehdr_size
+
2940 img_hdrs_size
, flash_optype
,
2942 if (base_status(status
) == MCC_STATUS_ILLEGAL_REQUEST
||
2943 base_status(status
) == MCC_STATUS_ILLEGAL_FIELD
) {
2944 /* The current FW image on the card does not support
2945 * OFFSET based flashing. Retry using older mechanism
2946 * of OPTYPE based flashing
2948 if (flash_optype
== OPTYPE_OFFSET_SPECIFIED
) {
2949 flash_offset_support
= false;
2953 /* The current FW image on the card does not recognize
2954 * the new FLASH op_type. The FW download is partially
2955 * complete. Reboot the server now to enable FW image
2956 * to recognize the new FLASH op_type. To complete the
2957 * remaining process, download the same FW again after
2960 dev_err(dev
, "Flash incomplete. Reset the server\n");
2961 dev_err(dev
, "Download FW image again after reset\n");
2963 } else if (status
) {
2964 dev_err(dev
, "Could not get CRC for 0x%x region\n",
2973 p
= fw
->data
+ filehdr_size
+ img_offset
+ img_hdrs_size
;
2974 if (p
+ img_size
> fw
->data
+ fw
->size
)
2977 status
= be_flash(adapter
, p
, flash_cmd
, flash_optype
, img_size
,
2980 /* The current FW image on the card does not support OFFSET
2981 * based flashing. Retry using older mechanism of OPTYPE based
2984 if (base_status(status
) == MCC_STATUS_ILLEGAL_FIELD
&&
2985 flash_optype
== OPTYPE_OFFSET_SPECIFIED
) {
2986 flash_offset_support
= false;
2990 /* For old FW images ignore ILLEGAL_FIELD error or errors on
2994 (base_status(status
) == MCC_STATUS_ILLEGAL_FIELD
||
2995 (img_optype
== OPTYPE_UFI_DIR
&&
2996 base_status(status
) == MCC_STATUS_FAILED
))) {
2998 } else if (status
) {
2999 dev_err(dev
, "Flashing section type 0x%x failed\n",
3002 switch (addl_status(status
)) {
3003 case MCC_ADDL_STATUS_MISSING_SIGNATURE
:
3005 "Digital signature missing in FW\n");
3007 case MCC_ADDL_STATUS_INVALID_SIGNATURE
:
3009 "Invalid digital signature in FW\n");
3019 int lancer_fw_download(struct be_adapter
*adapter
,
3020 const struct firmware
*fw
)
3022 struct device
*dev
= &adapter
->pdev
->dev
;
3023 struct be_dma_mem flash_cmd
;
3024 const u8
*data_ptr
= NULL
;
3025 u8
*dest_image_ptr
= NULL
;
3026 size_t image_size
= 0;
3028 u32 data_written
= 0;
3034 if (!IS_ALIGNED(fw
->size
, sizeof(u32
))) {
3035 dev_err(dev
, "FW image size should be multiple of 4\n");
3039 flash_cmd
.size
= sizeof(struct lancer_cmd_req_write_object
)
3040 + LANCER_FW_DOWNLOAD_CHUNK
;
3041 flash_cmd
.va
= dma_zalloc_coherent(dev
, flash_cmd
.size
,
3042 &flash_cmd
.dma
, GFP_KERNEL
);
3046 dest_image_ptr
= flash_cmd
.va
+
3047 sizeof(struct lancer_cmd_req_write_object
);
3048 image_size
= fw
->size
;
3049 data_ptr
= fw
->data
;
3051 while (image_size
) {
3052 chunk_size
= min_t(u32
, image_size
, LANCER_FW_DOWNLOAD_CHUNK
);
3054 /* Copy the image chunk content. */
3055 memcpy(dest_image_ptr
, data_ptr
, chunk_size
);
3057 status
= lancer_cmd_write_object(adapter
, &flash_cmd
,
3059 LANCER_FW_DOWNLOAD_LOCATION
,
3060 &data_written
, &change_status
,
3065 offset
+= data_written
;
3066 data_ptr
+= data_written
;
3067 image_size
-= data_written
;
3071 /* Commit the FW written */
3072 status
= lancer_cmd_write_object(adapter
, &flash_cmd
,
3074 LANCER_FW_DOWNLOAD_LOCATION
,
3075 &data_written
, &change_status
,
3079 dma_free_coherent(dev
, flash_cmd
.size
, flash_cmd
.va
, flash_cmd
.dma
);
3081 dev_err(dev
, "Firmware load error\n");
3082 return be_cmd_status(status
);
3085 dev_info(dev
, "Firmware flashed successfully\n");
3087 if (change_status
== LANCER_FW_RESET_NEEDED
) {
3088 dev_info(dev
, "Resetting adapter to activate new FW\n");
3089 status
= lancer_physdev_ctrl(adapter
,
3090 PHYSDEV_CONTROL_FW_RESET_MASK
);
3092 dev_err(dev
, "Adapter busy, could not reset FW\n");
3093 dev_err(dev
, "Reboot server to activate new FW\n");
3095 } else if (change_status
!= LANCER_NO_RESET_NEEDED
) {
3096 dev_info(dev
, "Reboot server to activate new FW\n");
3102 /* Check if the flash image file is compatible with the adapter that
3105 static bool be_check_ufi_compatibility(struct be_adapter
*adapter
,
3106 struct flash_file_hdr_g3
*fhdr
)
3109 dev_err(&adapter
->pdev
->dev
, "Invalid FW UFI file");
3113 /* First letter of the build version is used to identify
3114 * which chip this image file is meant for.
3116 switch (fhdr
->build
[0]) {
3117 case BLD_STR_UFI_TYPE_SH
:
3118 if (!skyhawk_chip(adapter
))
3121 case BLD_STR_UFI_TYPE_BE3
:
3122 if (!BE3_chip(adapter
))
3125 case BLD_STR_UFI_TYPE_BE2
:
3126 if (!BE2_chip(adapter
))
3133 /* In BE3 FW images the "asic_type_rev" field doesn't track the
3134 * asic_rev of the chips it is compatible with.
3135 * When asic_type_rev is 0 the image is compatible only with
3136 * pre-BE3-R chips (asic_rev < 0x10)
3138 if (BEx_chip(adapter
) && fhdr
->asic_type_rev
== 0)
3139 return adapter
->asic_rev
< 0x10;
3141 return (fhdr
->asic_type_rev
>= adapter
->asic_rev
);
3144 int be_fw_download(struct be_adapter
*adapter
, const struct firmware
*fw
)
3146 struct device
*dev
= &adapter
->pdev
->dev
;
3147 struct flash_file_hdr_g3
*fhdr3
;
3148 struct image_hdr
*img_hdr_ptr
;
3149 int status
= 0, i
, num_imgs
;
3150 struct be_dma_mem flash_cmd
;
3152 fhdr3
= (struct flash_file_hdr_g3
*)fw
->data
;
3153 if (!be_check_ufi_compatibility(adapter
, fhdr3
)) {
3154 dev_err(dev
, "Flash image is not compatible with adapter\n");
3158 flash_cmd
.size
= sizeof(struct be_cmd_write_flashrom
);
3159 flash_cmd
.va
= dma_zalloc_coherent(dev
, flash_cmd
.size
, &flash_cmd
.dma
,
3164 num_imgs
= le32_to_cpu(fhdr3
->num_imgs
);
3165 for (i
= 0; i
< num_imgs
; i
++) {
3166 img_hdr_ptr
= (struct image_hdr
*)(fw
->data
+
3167 (sizeof(struct flash_file_hdr_g3
) +
3168 i
* sizeof(struct image_hdr
)));
3169 if (!BE2_chip(adapter
) &&
3170 le32_to_cpu(img_hdr_ptr
->imageid
) != 1)
3173 if (skyhawk_chip(adapter
))
3174 status
= be_flash_skyhawk(adapter
, fw
, &flash_cmd
,
3177 status
= be_flash_BEx(adapter
, fw
, &flash_cmd
,
3181 dma_free_coherent(dev
, flash_cmd
.size
, flash_cmd
.va
, flash_cmd
.dma
);
3183 dev_info(dev
, "Firmware flashed successfully\n");
3188 int be_cmd_enable_magic_wol(struct be_adapter
*adapter
, u8
*mac
,
3189 struct be_dma_mem
*nonemb_cmd
)
3191 struct be_mcc_wrb
*wrb
;
3192 struct be_cmd_req_acpi_wol_magic_config
*req
;
3195 spin_lock_bh(&adapter
->mcc_lock
);
3197 wrb
= wrb_from_mccq(adapter
);
3202 req
= nonemb_cmd
->va
;
3204 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
3205 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
, sizeof(*req
),
3207 memcpy(req
->magic_mac
, mac
, ETH_ALEN
);
3209 status
= be_mcc_notify_wait(adapter
);
3212 spin_unlock_bh(&adapter
->mcc_lock
);
3216 int be_cmd_set_loopback(struct be_adapter
*adapter
, u8 port_num
,
3217 u8 loopback_type
, u8 enable
)
3219 struct be_mcc_wrb
*wrb
;
3220 struct be_cmd_req_set_lmode
*req
;
3223 if (!be_cmd_allowed(adapter
, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE
,
3224 CMD_SUBSYSTEM_LOWLEVEL
))
3227 spin_lock_bh(&adapter
->mcc_lock
);
3229 wrb
= wrb_from_mccq(adapter
);
3235 req
= embedded_payload(wrb
);
3237 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
3238 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE
, sizeof(*req
),
3241 req
->src_port
= port_num
;
3242 req
->dest_port
= port_num
;
3243 req
->loopback_type
= loopback_type
;
3244 req
->loopback_state
= enable
;
3246 status
= be_mcc_notify(adapter
);
3250 spin_unlock_bh(&adapter
->mcc_lock
);
3252 if (!wait_for_completion_timeout(&adapter
->et_cmd_compl
,
3253 msecs_to_jiffies(SET_LB_MODE_TIMEOUT
)))
3254 status
= -ETIMEDOUT
;
3259 spin_unlock_bh(&adapter
->mcc_lock
);
3263 int be_cmd_loopback_test(struct be_adapter
*adapter
, u32 port_num
,
3264 u32 loopback_type
, u32 pkt_size
, u32 num_pkts
,
3267 struct be_mcc_wrb
*wrb
;
3268 struct be_cmd_req_loopback_test
*req
;
3269 struct be_cmd_resp_loopback_test
*resp
;
3272 if (!be_cmd_allowed(adapter
, OPCODE_LOWLEVEL_LOOPBACK_TEST
,
3273 CMD_SUBSYSTEM_LOWLEVEL
))
3276 spin_lock_bh(&adapter
->mcc_lock
);
3278 wrb
= wrb_from_mccq(adapter
);
3284 req
= embedded_payload(wrb
);
3286 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
3287 OPCODE_LOWLEVEL_LOOPBACK_TEST
, sizeof(*req
), wrb
,
3290 req
->hdr
.timeout
= cpu_to_le32(15);
3291 req
->pattern
= cpu_to_le64(pattern
);
3292 req
->src_port
= cpu_to_le32(port_num
);
3293 req
->dest_port
= cpu_to_le32(port_num
);
3294 req
->pkt_size
= cpu_to_le32(pkt_size
);
3295 req
->num_pkts
= cpu_to_le32(num_pkts
);
3296 req
->loopback_type
= cpu_to_le32(loopback_type
);
3298 status
= be_mcc_notify(adapter
);
3302 spin_unlock_bh(&adapter
->mcc_lock
);
3304 wait_for_completion(&adapter
->et_cmd_compl
);
3305 resp
= embedded_payload(wrb
);
3306 status
= le32_to_cpu(resp
->status
);
3310 spin_unlock_bh(&adapter
->mcc_lock
);
3314 int be_cmd_ddr_dma_test(struct be_adapter
*adapter
, u64 pattern
,
3315 u32 byte_cnt
, struct be_dma_mem
*cmd
)
3317 struct be_mcc_wrb
*wrb
;
3318 struct be_cmd_req_ddrdma_test
*req
;
3322 if (!be_cmd_allowed(adapter
, OPCODE_LOWLEVEL_HOST_DDR_DMA
,
3323 CMD_SUBSYSTEM_LOWLEVEL
))
3326 spin_lock_bh(&adapter
->mcc_lock
);
3328 wrb
= wrb_from_mccq(adapter
);
3334 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
3335 OPCODE_LOWLEVEL_HOST_DDR_DMA
, cmd
->size
, wrb
,
3338 req
->pattern
= cpu_to_le64(pattern
);
3339 req
->byte_count
= cpu_to_le32(byte_cnt
);
3340 for (i
= 0; i
< byte_cnt
; i
++) {
3341 req
->snd_buff
[i
] = (u8
)(pattern
>> (j
*8));
3347 status
= be_mcc_notify_wait(adapter
);
3350 struct be_cmd_resp_ddrdma_test
*resp
;
3353 if ((memcmp(resp
->rcv_buff
, req
->snd_buff
, byte_cnt
) != 0) ||
3360 spin_unlock_bh(&adapter
->mcc_lock
);
3364 int be_cmd_get_seeprom_data(struct be_adapter
*adapter
,
3365 struct be_dma_mem
*nonemb_cmd
)
3367 struct be_mcc_wrb
*wrb
;
3368 struct be_cmd_req_seeprom_read
*req
;
3371 spin_lock_bh(&adapter
->mcc_lock
);
3373 wrb
= wrb_from_mccq(adapter
);
3378 req
= nonemb_cmd
->va
;
3380 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3381 OPCODE_COMMON_SEEPROM_READ
, sizeof(*req
), wrb
,
3384 status
= be_mcc_notify_wait(adapter
);
3387 spin_unlock_bh(&adapter
->mcc_lock
);
3391 int be_cmd_get_phy_info(struct be_adapter
*adapter
)
3393 struct be_mcc_wrb
*wrb
;
3394 struct be_cmd_req_get_phy_info
*req
;
3395 struct be_dma_mem cmd
;
3398 if (!be_cmd_allowed(adapter
, OPCODE_COMMON_GET_PHY_DETAILS
,
3399 CMD_SUBSYSTEM_COMMON
))
3402 spin_lock_bh(&adapter
->mcc_lock
);
3404 wrb
= wrb_from_mccq(adapter
);
3409 cmd
.size
= sizeof(struct be_cmd_req_get_phy_info
);
3410 cmd
.va
= dma_zalloc_coherent(&adapter
->pdev
->dev
, cmd
.size
, &cmd
.dma
,
3413 dev_err(&adapter
->pdev
->dev
, "Memory alloc failure\n");
3420 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3421 OPCODE_COMMON_GET_PHY_DETAILS
, sizeof(*req
),
3424 status
= be_mcc_notify_wait(adapter
);
3426 struct be_phy_info
*resp_phy_info
=
3427 cmd
.va
+ sizeof(struct be_cmd_req_hdr
);
3429 adapter
->phy
.phy_type
= le16_to_cpu(resp_phy_info
->phy_type
);
3430 adapter
->phy
.interface_type
=
3431 le16_to_cpu(resp_phy_info
->interface_type
);
3432 adapter
->phy
.auto_speeds_supported
=
3433 le16_to_cpu(resp_phy_info
->auto_speeds_supported
);
3434 adapter
->phy
.fixed_speeds_supported
=
3435 le16_to_cpu(resp_phy_info
->fixed_speeds_supported
);
3436 adapter
->phy
.misc_params
=
3437 le32_to_cpu(resp_phy_info
->misc_params
);
3439 if (BE2_chip(adapter
)) {
3440 adapter
->phy
.fixed_speeds_supported
=
3441 BE_SUPPORTED_SPEED_10GBPS
|
3442 BE_SUPPORTED_SPEED_1GBPS
;
3445 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
, cmd
.dma
);
3447 spin_unlock_bh(&adapter
->mcc_lock
);
3451 static int be_cmd_set_qos(struct be_adapter
*adapter
, u32 bps
, u32 domain
)
3453 struct be_mcc_wrb
*wrb
;
3454 struct be_cmd_req_set_qos
*req
;
3457 spin_lock_bh(&adapter
->mcc_lock
);
3459 wrb
= wrb_from_mccq(adapter
);
3465 req
= embedded_payload(wrb
);
3467 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3468 OPCODE_COMMON_SET_QOS
, sizeof(*req
), wrb
, NULL
);
3470 req
->hdr
.domain
= domain
;
3471 req
->valid_bits
= cpu_to_le32(BE_QOS_BITS_NIC
);
3472 req
->max_bps_nic
= cpu_to_le32(bps
);
3474 status
= be_mcc_notify_wait(adapter
);
3477 spin_unlock_bh(&adapter
->mcc_lock
);
3481 int be_cmd_get_cntl_attributes(struct be_adapter
*adapter
)
3483 struct be_mcc_wrb
*wrb
;
3484 struct be_cmd_req_cntl_attribs
*req
;
3485 struct be_cmd_resp_cntl_attribs
*resp
;
3487 int payload_len
= max(sizeof(*req
), sizeof(*resp
));
3488 struct mgmt_controller_attrib
*attribs
;
3489 struct be_dma_mem attribs_cmd
;
3492 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
3495 memset(&attribs_cmd
, 0, sizeof(struct be_dma_mem
));
3496 attribs_cmd
.size
= sizeof(struct be_cmd_resp_cntl_attribs
);
3497 attribs_cmd
.va
= dma_zalloc_coherent(&adapter
->pdev
->dev
,
3499 &attribs_cmd
.dma
, GFP_ATOMIC
);
3500 if (!attribs_cmd
.va
) {
3501 dev_err(&adapter
->pdev
->dev
, "Memory allocation failure\n");
3506 wrb
= wrb_from_mbox(adapter
);
3511 req
= attribs_cmd
.va
;
3513 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3514 OPCODE_COMMON_GET_CNTL_ATTRIBUTES
, payload_len
,
3517 status
= be_mbox_notify_wait(adapter
);
3519 attribs
= attribs_cmd
.va
+ sizeof(struct be_cmd_resp_hdr
);
3520 adapter
->hba_port_num
= attribs
->hba_attribs
.phy_port
;
3521 serial_num
= attribs
->hba_attribs
.controller_serial_number
;
3522 for (i
= 0; i
< CNTL_SERIAL_NUM_WORDS
; i
++)
3523 adapter
->serial_num
[i
] = le32_to_cpu(serial_num
[i
]) &
3528 mutex_unlock(&adapter
->mbox_lock
);
3530 dma_free_coherent(&adapter
->pdev
->dev
, attribs_cmd
.size
,
3531 attribs_cmd
.va
, attribs_cmd
.dma
);
3536 int be_cmd_req_native_mode(struct be_adapter
*adapter
)
3538 struct be_mcc_wrb
*wrb
;
3539 struct be_cmd_req_set_func_cap
*req
;
3542 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
3545 wrb
= wrb_from_mbox(adapter
);
3551 req
= embedded_payload(wrb
);
3553 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3554 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP
,
3555 sizeof(*req
), wrb
, NULL
);
3557 req
->valid_cap_flags
= cpu_to_le32(CAPABILITY_SW_TIMESTAMPS
|
3558 CAPABILITY_BE3_NATIVE_ERX_API
);
3559 req
->cap_flags
= cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API
);
3561 status
= be_mbox_notify_wait(adapter
);
3563 struct be_cmd_resp_set_func_cap
*resp
= embedded_payload(wrb
);
3565 adapter
->be3_native
= le32_to_cpu(resp
->cap_flags
) &
3566 CAPABILITY_BE3_NATIVE_ERX_API
;
3567 if (!adapter
->be3_native
)
3568 dev_warn(&adapter
->pdev
->dev
,
3569 "adapter not in advanced mode\n");
3572 mutex_unlock(&adapter
->mbox_lock
);
3576 /* Get privilege(s) for a function */
3577 int be_cmd_get_fn_privileges(struct be_adapter
*adapter
, u32
*privilege
,
3580 struct be_mcc_wrb
*wrb
;
3581 struct be_cmd_req_get_fn_privileges
*req
;
3584 spin_lock_bh(&adapter
->mcc_lock
);
3586 wrb
= wrb_from_mccq(adapter
);
3592 req
= embedded_payload(wrb
);
3594 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3595 OPCODE_COMMON_GET_FN_PRIVILEGES
, sizeof(*req
),
3598 req
->hdr
.domain
= domain
;
3600 status
= be_mcc_notify_wait(adapter
);
3602 struct be_cmd_resp_get_fn_privileges
*resp
=
3603 embedded_payload(wrb
);
3605 *privilege
= le32_to_cpu(resp
->privilege_mask
);
3607 /* In UMC mode FW does not return right privileges.
3608 * Override with correct privilege equivalent to PF.
3610 if (BEx_chip(adapter
) && be_is_mc(adapter
) &&
3612 *privilege
= MAX_PRIVILEGES
;
3616 spin_unlock_bh(&adapter
->mcc_lock
);
3620 /* Set privilege(s) for a function */
3621 int be_cmd_set_fn_privileges(struct be_adapter
*adapter
, u32 privileges
,
3624 struct be_mcc_wrb
*wrb
;
3625 struct be_cmd_req_set_fn_privileges
*req
;
3628 spin_lock_bh(&adapter
->mcc_lock
);
3630 wrb
= wrb_from_mccq(adapter
);
3636 req
= embedded_payload(wrb
);
3637 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3638 OPCODE_COMMON_SET_FN_PRIVILEGES
, sizeof(*req
),
3640 req
->hdr
.domain
= domain
;
3641 if (lancer_chip(adapter
))
3642 req
->privileges_lancer
= cpu_to_le32(privileges
);
3644 req
->privileges
= cpu_to_le32(privileges
);
3646 status
= be_mcc_notify_wait(adapter
);
3648 spin_unlock_bh(&adapter
->mcc_lock
);
3652 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
3653 * pmac_id_valid: false => pmac_id or MAC address is requested.
3654 * If pmac_id is returned, pmac_id_valid is returned as true
3656 int be_cmd_get_mac_from_list(struct be_adapter
*adapter
, u8
*mac
,
3657 bool *pmac_id_valid
, u32
*pmac_id
, u32 if_handle
,
3660 struct be_mcc_wrb
*wrb
;
3661 struct be_cmd_req_get_mac_list
*req
;
3664 struct be_dma_mem get_mac_list_cmd
;
3667 memset(&get_mac_list_cmd
, 0, sizeof(struct be_dma_mem
));
3668 get_mac_list_cmd
.size
= sizeof(struct be_cmd_resp_get_mac_list
);
3669 get_mac_list_cmd
.va
= dma_zalloc_coherent(&adapter
->pdev
->dev
,
3670 get_mac_list_cmd
.size
,
3671 &get_mac_list_cmd
.dma
,
3674 if (!get_mac_list_cmd
.va
) {
3675 dev_err(&adapter
->pdev
->dev
,
3676 "Memory allocation failure during GET_MAC_LIST\n");
3680 spin_lock_bh(&adapter
->mcc_lock
);
3682 wrb
= wrb_from_mccq(adapter
);
3688 req
= get_mac_list_cmd
.va
;
3690 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3691 OPCODE_COMMON_GET_MAC_LIST
,
3692 get_mac_list_cmd
.size
, wrb
, &get_mac_list_cmd
);
3693 req
->hdr
.domain
= domain
;
3694 req
->mac_type
= MAC_ADDRESS_TYPE_NETWORK
;
3695 if (*pmac_id_valid
) {
3696 req
->mac_id
= cpu_to_le32(*pmac_id
);
3697 req
->iface_id
= cpu_to_le16(if_handle
);
3698 req
->perm_override
= 0;
3700 req
->perm_override
= 1;
3703 status
= be_mcc_notify_wait(adapter
);
3705 struct be_cmd_resp_get_mac_list
*resp
=
3706 get_mac_list_cmd
.va
;
3708 if (*pmac_id_valid
) {
3709 memcpy(mac
, resp
->macid_macaddr
.mac_addr_id
.macaddr
,
3714 mac_count
= resp
->true_mac_count
+ resp
->pseudo_mac_count
;
3715 /* Mac list returned could contain one or more active mac_ids
3716 * or one or more true or pseudo permanent mac addresses.
3717 * If an active mac_id is present, return first active mac_id
3720 for (i
= 0; i
< mac_count
; i
++) {
3721 struct get_list_macaddr
*mac_entry
;
3725 mac_entry
= &resp
->macaddr_list
[i
];
3726 mac_addr_size
= le16_to_cpu(mac_entry
->mac_addr_size
);
3727 /* mac_id is a 32 bit value and mac_addr size
3730 if (mac_addr_size
== sizeof(u32
)) {
3731 *pmac_id_valid
= true;
3732 mac_id
= mac_entry
->mac_addr_id
.s_mac_id
.mac_id
;
3733 *pmac_id
= le32_to_cpu(mac_id
);
3737 /* If no active mac_id found, return first mac addr */
3738 *pmac_id_valid
= false;
3739 memcpy(mac
, resp
->macaddr_list
[0].mac_addr_id
.macaddr
,
3744 spin_unlock_bh(&adapter
->mcc_lock
);
3745 dma_free_coherent(&adapter
->pdev
->dev
, get_mac_list_cmd
.size
,
3746 get_mac_list_cmd
.va
, get_mac_list_cmd
.dma
);
3750 int be_cmd_get_active_mac(struct be_adapter
*adapter
, u32 curr_pmac_id
,
3751 u8
*mac
, u32 if_handle
, bool active
, u32 domain
)
3754 be_cmd_get_mac_from_list(adapter
, mac
, &active
, &curr_pmac_id
,
3756 if (BEx_chip(adapter
))
3757 return be_cmd_mac_addr_query(adapter
, mac
, false,
3758 if_handle
, curr_pmac_id
);
3760 /* Fetch the MAC address using pmac_id */
3761 return be_cmd_get_mac_from_list(adapter
, mac
, &active
,
3766 int be_cmd_get_perm_mac(struct be_adapter
*adapter
, u8
*mac
)
3769 bool pmac_valid
= false;
3773 if (BEx_chip(adapter
)) {
3774 if (be_physfn(adapter
))
3775 status
= be_cmd_mac_addr_query(adapter
, mac
, true, 0,
3778 status
= be_cmd_mac_addr_query(adapter
, mac
, false,
3779 adapter
->if_handle
, 0);
3781 status
= be_cmd_get_mac_from_list(adapter
, mac
, &pmac_valid
,
3782 NULL
, adapter
->if_handle
, 0);
3788 /* Uses synchronous MCCQ */
3789 int be_cmd_set_mac_list(struct be_adapter
*adapter
, u8
*mac_array
,
3790 u8 mac_count
, u32 domain
)
3792 struct be_mcc_wrb
*wrb
;
3793 struct be_cmd_req_set_mac_list
*req
;
3795 struct be_dma_mem cmd
;
3797 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
3798 cmd
.size
= sizeof(struct be_cmd_req_set_mac_list
);
3799 cmd
.va
= dma_zalloc_coherent(&adapter
->pdev
->dev
, cmd
.size
, &cmd
.dma
,
3804 spin_lock_bh(&adapter
->mcc_lock
);
3806 wrb
= wrb_from_mccq(adapter
);
3813 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3814 OPCODE_COMMON_SET_MAC_LIST
, sizeof(*req
),
3817 req
->hdr
.domain
= domain
;
3818 req
->mac_count
= mac_count
;
3820 memcpy(req
->mac
, mac_array
, ETH_ALEN
*mac_count
);
3822 status
= be_mcc_notify_wait(adapter
);
3825 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
, cmd
.dma
);
3826 spin_unlock_bh(&adapter
->mcc_lock
);
3830 /* Wrapper to delete any active MACs and provision the new mac.
3831 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
3832 * current list are active.
3834 int be_cmd_set_mac(struct be_adapter
*adapter
, u8
*mac
, int if_id
, u32 dom
)
3836 bool active_mac
= false;
3837 u8 old_mac
[ETH_ALEN
];
3841 status
= be_cmd_get_mac_from_list(adapter
, old_mac
, &active_mac
,
3842 &pmac_id
, if_id
, dom
);
3844 if (!status
&& active_mac
)
3845 be_cmd_pmac_del(adapter
, if_id
, pmac_id
, dom
);
3847 return be_cmd_set_mac_list(adapter
, mac
, mac
? 1 : 0, dom
);
3850 int be_cmd_set_hsw_config(struct be_adapter
*adapter
, u16 pvid
,
3851 u32 domain
, u16 intf_id
, u16 hsw_mode
, u8 spoofchk
)
3853 struct be_mcc_wrb
*wrb
;
3854 struct be_cmd_req_set_hsw_config
*req
;
3858 if (!be_cmd_allowed(adapter
, OPCODE_COMMON_SET_HSW_CONFIG
,
3859 CMD_SUBSYSTEM_COMMON
))
3862 spin_lock_bh(&adapter
->mcc_lock
);
3864 wrb
= wrb_from_mccq(adapter
);
3870 req
= embedded_payload(wrb
);
3871 ctxt
= &req
->context
;
3873 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3874 OPCODE_COMMON_SET_HSW_CONFIG
, sizeof(*req
), wrb
,
3877 req
->hdr
.domain
= domain
;
3878 AMAP_SET_BITS(struct amap_set_hsw_context
, interface_id
, ctxt
, intf_id
);
3880 AMAP_SET_BITS(struct amap_set_hsw_context
, pvid_valid
, ctxt
, 1);
3881 AMAP_SET_BITS(struct amap_set_hsw_context
, pvid
, ctxt
, pvid
);
3884 AMAP_SET_BITS(struct amap_set_hsw_context
, interface_id
,
3885 ctxt
, adapter
->hba_port_num
);
3886 AMAP_SET_BITS(struct amap_set_hsw_context
, pport
, ctxt
, 1);
3887 AMAP_SET_BITS(struct amap_set_hsw_context
, port_fwd_type
,
3891 /* Enable/disable both mac and vlan spoof checking */
3892 if (!BEx_chip(adapter
) && spoofchk
) {
3893 AMAP_SET_BITS(struct amap_set_hsw_context
, mac_spoofchk
,
3895 AMAP_SET_BITS(struct amap_set_hsw_context
, vlan_spoofchk
,
3899 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
3900 status
= be_mcc_notify_wait(adapter
);
3903 spin_unlock_bh(&adapter
->mcc_lock
);
3907 /* Get Hyper switch config */
3908 int be_cmd_get_hsw_config(struct be_adapter
*adapter
, u16
*pvid
,
3909 u32 domain
, u16 intf_id
, u8
*mode
, bool *spoofchk
)
3911 struct be_mcc_wrb
*wrb
;
3912 struct be_cmd_req_get_hsw_config
*req
;
3917 spin_lock_bh(&adapter
->mcc_lock
);
3919 wrb
= wrb_from_mccq(adapter
);
3925 req
= embedded_payload(wrb
);
3926 ctxt
= &req
->context
;
3928 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
3929 OPCODE_COMMON_GET_HSW_CONFIG
, sizeof(*req
), wrb
,
3932 req
->hdr
.domain
= domain
;
3933 AMAP_SET_BITS(struct amap_get_hsw_req_context
, interface_id
,
3935 AMAP_SET_BITS(struct amap_get_hsw_req_context
, pvid_valid
, ctxt
, 1);
3937 if (!BEx_chip(adapter
) && mode
) {
3938 AMAP_SET_BITS(struct amap_get_hsw_req_context
, interface_id
,
3939 ctxt
, adapter
->hba_port_num
);
3940 AMAP_SET_BITS(struct amap_get_hsw_req_context
, pport
, ctxt
, 1);
3942 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
3944 status
= be_mcc_notify_wait(adapter
);
3946 struct be_cmd_resp_get_hsw_config
*resp
=
3947 embedded_payload(wrb
);
3949 be_dws_le_to_cpu(&resp
->context
, sizeof(resp
->context
));
3950 vid
= AMAP_GET_BITS(struct amap_get_hsw_resp_context
,
3951 pvid
, &resp
->context
);
3953 *pvid
= le16_to_cpu(vid
);
3955 *mode
= AMAP_GET_BITS(struct amap_get_hsw_resp_context
,
3956 port_fwd_type
, &resp
->context
);
3959 AMAP_GET_BITS(struct amap_get_hsw_resp_context
,
3960 spoofchk
, &resp
->context
);
3964 spin_unlock_bh(&adapter
->mcc_lock
);
3968 static bool be_is_wol_excluded(struct be_adapter
*adapter
)
3970 struct pci_dev
*pdev
= adapter
->pdev
;
3972 if (be_virtfn(adapter
))
3975 switch (pdev
->subsystem_device
) {
3976 case OC_SUBSYS_DEVICE_ID1
:
3977 case OC_SUBSYS_DEVICE_ID2
:
3978 case OC_SUBSYS_DEVICE_ID3
:
3979 case OC_SUBSYS_DEVICE_ID4
:
3986 int be_cmd_get_acpi_wol_cap(struct be_adapter
*adapter
)
3988 struct be_mcc_wrb
*wrb
;
3989 struct be_cmd_req_acpi_wol_magic_config_v1
*req
;
3991 struct be_dma_mem cmd
;
3993 if (!be_cmd_allowed(adapter
, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
,
3997 if (be_is_wol_excluded(adapter
))
4000 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
4003 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
4004 cmd
.size
= sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1
);
4005 cmd
.va
= dma_zalloc_coherent(&adapter
->pdev
->dev
, cmd
.size
, &cmd
.dma
,
4008 dev_err(&adapter
->pdev
->dev
, "Memory allocation failure\n");
4013 wrb
= wrb_from_mbox(adapter
);
4021 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
4022 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
,
4023 sizeof(*req
), wrb
, &cmd
);
4025 req
->hdr
.version
= 1;
4026 req
->query_options
= BE_GET_WOL_CAP
;
4028 status
= be_mbox_notify_wait(adapter
);
4030 struct be_cmd_resp_acpi_wol_magic_config_v1
*resp
;
4032 resp
= (struct be_cmd_resp_acpi_wol_magic_config_v1
*)cmd
.va
;
4034 adapter
->wol_cap
= resp
->wol_settings
;
4036 /* Non-zero macaddr indicates WOL is enabled */
4037 if (adapter
->wol_cap
& BE_WOL_CAP
&&
4038 !is_zero_ether_addr(resp
->magic_mac
))
4039 adapter
->wol_en
= true;
4042 mutex_unlock(&adapter
->mbox_lock
);
4044 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
,
4050 int be_cmd_set_fw_log_level(struct be_adapter
*adapter
, u32 level
)
4052 struct be_dma_mem extfat_cmd
;
4053 struct be_fat_conf_params
*cfgs
;
4057 memset(&extfat_cmd
, 0, sizeof(struct be_dma_mem
));
4058 extfat_cmd
.size
= sizeof(struct be_cmd_resp_get_ext_fat_caps
);
4059 extfat_cmd
.va
= dma_zalloc_coherent(&adapter
->pdev
->dev
,
4060 extfat_cmd
.size
, &extfat_cmd
.dma
,
4065 status
= be_cmd_get_ext_fat_capabilites(adapter
, &extfat_cmd
);
4069 cfgs
= (struct be_fat_conf_params
*)
4070 (extfat_cmd
.va
+ sizeof(struct be_cmd_resp_hdr
));
4071 for (i
= 0; i
< le32_to_cpu(cfgs
->num_modules
); i
++) {
4072 u32 num_modes
= le32_to_cpu(cfgs
->module
[i
].num_modes
);
4074 for (j
= 0; j
< num_modes
; j
++) {
4075 if (cfgs
->module
[i
].trace_lvl
[j
].mode
== MODE_UART
)
4076 cfgs
->module
[i
].trace_lvl
[j
].dbg_lvl
=
4081 status
= be_cmd_set_ext_fat_capabilites(adapter
, &extfat_cmd
, cfgs
);
4083 dma_free_coherent(&adapter
->pdev
->dev
, extfat_cmd
.size
, extfat_cmd
.va
,
4088 int be_cmd_get_fw_log_level(struct be_adapter
*adapter
)
4090 struct be_dma_mem extfat_cmd
;
4091 struct be_fat_conf_params
*cfgs
;
4095 memset(&extfat_cmd
, 0, sizeof(struct be_dma_mem
));
4096 extfat_cmd
.size
= sizeof(struct be_cmd_resp_get_ext_fat_caps
);
4097 extfat_cmd
.va
= dma_zalloc_coherent(&adapter
->pdev
->dev
,
4098 extfat_cmd
.size
, &extfat_cmd
.dma
,
4101 if (!extfat_cmd
.va
) {
4102 dev_err(&adapter
->pdev
->dev
, "%s: Memory allocation failure\n",
4107 status
= be_cmd_get_ext_fat_capabilites(adapter
, &extfat_cmd
);
4109 cfgs
= (struct be_fat_conf_params
*)(extfat_cmd
.va
+
4110 sizeof(struct be_cmd_resp_hdr
));
4112 for (j
= 0; j
< le32_to_cpu(cfgs
->module
[0].num_modes
); j
++) {
4113 if (cfgs
->module
[0].trace_lvl
[j
].mode
== MODE_UART
)
4114 level
= cfgs
->module
[0].trace_lvl
[j
].dbg_lvl
;
4117 dma_free_coherent(&adapter
->pdev
->dev
, extfat_cmd
.size
, extfat_cmd
.va
,
4123 int be_cmd_get_ext_fat_capabilites(struct be_adapter
*adapter
,
4124 struct be_dma_mem
*cmd
)
4126 struct be_mcc_wrb
*wrb
;
4127 struct be_cmd_req_get_ext_fat_caps
*req
;
4130 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
4133 wrb
= wrb_from_mbox(adapter
);
4140 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
4141 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES
,
4142 cmd
->size
, wrb
, cmd
);
4143 req
->parameter_type
= cpu_to_le32(1);
4145 status
= be_mbox_notify_wait(adapter
);
4147 mutex_unlock(&adapter
->mbox_lock
);
4151 int be_cmd_set_ext_fat_capabilites(struct be_adapter
*adapter
,
4152 struct be_dma_mem
*cmd
,
4153 struct be_fat_conf_params
*configs
)
4155 struct be_mcc_wrb
*wrb
;
4156 struct be_cmd_req_set_ext_fat_caps
*req
;
4159 spin_lock_bh(&adapter
->mcc_lock
);
4161 wrb
= wrb_from_mccq(adapter
);
4168 memcpy(&req
->set_params
, configs
, sizeof(struct be_fat_conf_params
));
4169 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
4170 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES
,
4171 cmd
->size
, wrb
, cmd
);
4173 status
= be_mcc_notify_wait(adapter
);
4175 spin_unlock_bh(&adapter
->mcc_lock
);
4179 int be_cmd_query_port_name(struct be_adapter
*adapter
)
4181 struct be_cmd_req_get_port_name
*req
;
4182 struct be_mcc_wrb
*wrb
;
4185 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
4188 wrb
= wrb_from_mbox(adapter
);
4189 req
= embedded_payload(wrb
);
4191 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
4192 OPCODE_COMMON_GET_PORT_NAME
, sizeof(*req
), wrb
,
4194 if (!BEx_chip(adapter
))
4195 req
->hdr
.version
= 1;
4197 status
= be_mbox_notify_wait(adapter
);
4199 struct be_cmd_resp_get_port_name
*resp
= embedded_payload(wrb
);
4201 adapter
->port_name
= resp
->port_name
[adapter
->hba_port_num
];
4203 adapter
->port_name
= adapter
->hba_port_num
+ '0';
4206 mutex_unlock(&adapter
->mbox_lock
);
4210 /* When more than 1 NIC descriptor is present in the descriptor list,
4211 * the caller must specify the pf_num to obtain the NIC descriptor
4212 * corresponding to its pci function.
4213 * get_vft must be true when the caller wants the VF-template desc of the
4215 * The pf_num should be set to PF_NUM_IGNORE when the caller knows
4216 * that only it's NIC descriptor is present in the descriptor list.
4218 static struct be_nic_res_desc
*be_get_nic_desc(u8
*buf
, u32 desc_count
,
4219 bool get_vft
, u8 pf_num
)
4221 struct be_res_desc_hdr
*hdr
= (struct be_res_desc_hdr
*)buf
;
4222 struct be_nic_res_desc
*nic
;
4225 for (i
= 0; i
< desc_count
; i
++) {
4226 if (hdr
->desc_type
== NIC_RESOURCE_DESC_TYPE_V0
||
4227 hdr
->desc_type
== NIC_RESOURCE_DESC_TYPE_V1
) {
4228 nic
= (struct be_nic_res_desc
*)hdr
;
4230 if ((pf_num
== PF_NUM_IGNORE
||
4231 nic
->pf_num
== pf_num
) &&
4232 (!get_vft
|| nic
->flags
& BIT(VFT_SHIFT
)))
4235 hdr
->desc_len
= hdr
->desc_len
? : RESOURCE_DESC_SIZE_V0
;
4236 hdr
= (void *)hdr
+ hdr
->desc_len
;
4241 static struct be_nic_res_desc
*be_get_vft_desc(u8
*buf
, u32 desc_count
,
4244 return be_get_nic_desc(buf
, desc_count
, true, pf_num
);
4247 static struct be_nic_res_desc
*be_get_func_nic_desc(u8
*buf
, u32 desc_count
,
4250 return be_get_nic_desc(buf
, desc_count
, false, pf_num
);
4253 static struct be_pcie_res_desc
*be_get_pcie_desc(u8
*buf
, u32 desc_count
,
4256 struct be_res_desc_hdr
*hdr
= (struct be_res_desc_hdr
*)buf
;
4257 struct be_pcie_res_desc
*pcie
;
4260 for (i
= 0; i
< desc_count
; i
++) {
4261 if (hdr
->desc_type
== PCIE_RESOURCE_DESC_TYPE_V0
||
4262 hdr
->desc_type
== PCIE_RESOURCE_DESC_TYPE_V1
) {
4263 pcie
= (struct be_pcie_res_desc
*)hdr
;
4264 if (pcie
->pf_num
== pf_num
)
4268 hdr
->desc_len
= hdr
->desc_len
? : RESOURCE_DESC_SIZE_V0
;
4269 hdr
= (void *)hdr
+ hdr
->desc_len
;
4274 static struct be_port_res_desc
*be_get_port_desc(u8
*buf
, u32 desc_count
)
4276 struct be_res_desc_hdr
*hdr
= (struct be_res_desc_hdr
*)buf
;
4279 for (i
= 0; i
< desc_count
; i
++) {
4280 if (hdr
->desc_type
== PORT_RESOURCE_DESC_TYPE_V1
)
4281 return (struct be_port_res_desc
*)hdr
;
4283 hdr
->desc_len
= hdr
->desc_len
? : RESOURCE_DESC_SIZE_V0
;
4284 hdr
= (void *)hdr
+ hdr
->desc_len
;
4289 static void be_copy_nic_desc(struct be_resources
*res
,
4290 struct be_nic_res_desc
*desc
)
4292 res
->max_uc_mac
= le16_to_cpu(desc
->unicast_mac_count
);
4293 res
->max_vlans
= le16_to_cpu(desc
->vlan_count
);
4294 res
->max_mcast_mac
= le16_to_cpu(desc
->mcast_mac_count
);
4295 res
->max_tx_qs
= le16_to_cpu(desc
->txq_count
);
4296 res
->max_rss_qs
= le16_to_cpu(desc
->rssq_count
);
4297 res
->max_rx_qs
= le16_to_cpu(desc
->rq_count
);
4298 res
->max_evt_qs
= le16_to_cpu(desc
->eq_count
);
4299 res
->max_cq_count
= le16_to_cpu(desc
->cq_count
);
4300 res
->max_iface_count
= le16_to_cpu(desc
->iface_count
);
4301 res
->max_mcc_count
= le16_to_cpu(desc
->mcc_count
);
4302 /* Clear flags that driver is not interested in */
4303 res
->if_cap_flags
= le32_to_cpu(desc
->cap_flags
) &
4304 BE_IF_CAP_FLAGS_WANT
;
4308 int be_cmd_get_func_config(struct be_adapter
*adapter
, struct be_resources
*res
)
4310 struct be_mcc_wrb
*wrb
;
4311 struct be_cmd_req_get_func_config
*req
;
4313 struct be_dma_mem cmd
;
4315 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
4318 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
4319 cmd
.size
= sizeof(struct be_cmd_resp_get_func_config
);
4320 cmd
.va
= dma_zalloc_coherent(&adapter
->pdev
->dev
, cmd
.size
, &cmd
.dma
,
4323 dev_err(&adapter
->pdev
->dev
, "Memory alloc failure\n");
4328 wrb
= wrb_from_mbox(adapter
);
4336 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
4337 OPCODE_COMMON_GET_FUNC_CONFIG
,
4338 cmd
.size
, wrb
, &cmd
);
4340 if (skyhawk_chip(adapter
))
4341 req
->hdr
.version
= 1;
4343 status
= be_mbox_notify_wait(adapter
);
4345 struct be_cmd_resp_get_func_config
*resp
= cmd
.va
;
4346 u32 desc_count
= le32_to_cpu(resp
->desc_count
);
4347 struct be_nic_res_desc
*desc
;
4349 /* GET_FUNC_CONFIG returns resource descriptors of the
4350 * current function only. So, pf_num should be set to
4353 desc
= be_get_func_nic_desc(resp
->func_param
, desc_count
,
4360 /* Store pf_num & vf_num for later use in GET_PROFILE_CONFIG */
4361 adapter
->pf_num
= desc
->pf_num
;
4362 adapter
->vf_num
= desc
->vf_num
;
4365 be_copy_nic_desc(res
, desc
);
4368 mutex_unlock(&adapter
->mbox_lock
);
4370 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
,
4375 /* This routine returns a list of all the NIC PF_nums in the adapter */
4376 u16
be_get_nic_pf_num_list(u8
*buf
, u32 desc_count
, u16
*nic_pf_nums
)
4378 struct be_res_desc_hdr
*hdr
= (struct be_res_desc_hdr
*)buf
;
4379 struct be_pcie_res_desc
*pcie
= NULL
;
4381 u16 nic_pf_count
= 0;
4383 for (i
= 0; i
< desc_count
; i
++) {
4384 if (hdr
->desc_type
== PCIE_RESOURCE_DESC_TYPE_V0
||
4385 hdr
->desc_type
== PCIE_RESOURCE_DESC_TYPE_V1
) {
4386 pcie
= (struct be_pcie_res_desc
*)hdr
;
4387 if (pcie
->pf_state
&& (pcie
->pf_type
== MISSION_NIC
||
4388 pcie
->pf_type
== MISSION_RDMA
)) {
4389 nic_pf_nums
[nic_pf_count
++] = pcie
->pf_num
;
4393 hdr
->desc_len
= hdr
->desc_len
? : RESOURCE_DESC_SIZE_V0
;
4394 hdr
= (void *)hdr
+ hdr
->desc_len
;
4396 return nic_pf_count
;
4399 /* Will use MBOX only if MCCQ has not been created */
4400 int be_cmd_get_profile_config(struct be_adapter
*adapter
,
4401 struct be_resources
*res
,
4402 struct be_port_resources
*port_res
,
4403 u8 profile_type
, u8 query
, u8 domain
)
4405 struct be_cmd_resp_get_profile_config
*resp
;
4406 struct be_cmd_req_get_profile_config
*req
;
4407 struct be_nic_res_desc
*vf_res
;
4408 struct be_pcie_res_desc
*pcie
;
4409 struct be_port_res_desc
*port
;
4410 struct be_nic_res_desc
*nic
;
4411 struct be_mcc_wrb wrb
= {0};
4412 struct be_dma_mem cmd
;
4416 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
4417 cmd
.size
= sizeof(struct be_cmd_resp_get_profile_config
);
4418 cmd
.va
= dma_zalloc_coherent(&adapter
->pdev
->dev
, cmd
.size
, &cmd
.dma
,
4424 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
4425 OPCODE_COMMON_GET_PROFILE_CONFIG
,
4426 cmd
.size
, &wrb
, &cmd
);
4428 if (!lancer_chip(adapter
))
4429 req
->hdr
.version
= 1;
4430 req
->type
= profile_type
;
4431 req
->hdr
.domain
= domain
;
4433 /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
4434 * descriptors with all bits set to "1" for the fields which can be
4435 * modified using SET_PROFILE_CONFIG cmd.
4437 if (query
== RESOURCE_MODIFIABLE
)
4438 req
->type
|= QUERY_MODIFIABLE_FIELDS_TYPE
;
4440 status
= be_cmd_notify_wait(adapter
, &wrb
);
4445 desc_count
= le16_to_cpu(resp
->desc_count
);
4448 u16 nic_pf_cnt
= 0, i
;
4449 u16 nic_pf_num_list
[MAX_NIC_FUNCS
];
4451 nic_pf_cnt
= be_get_nic_pf_num_list(resp
->func_param
,
4455 for (i
= 0; i
< nic_pf_cnt
; i
++) {
4456 nic
= be_get_func_nic_desc(resp
->func_param
, desc_count
,
4457 nic_pf_num_list
[i
]);
4458 if (nic
->link_param
== adapter
->port_num
) {
4459 port_res
->nic_pfs
++;
4460 pcie
= be_get_pcie_desc(resp
->func_param
,
4462 nic_pf_num_list
[i
]);
4463 port_res
->max_vfs
+= le16_to_cpu(pcie
->num_vfs
);
4469 pcie
= be_get_pcie_desc(resp
->func_param
, desc_count
,
4472 res
->max_vfs
= le16_to_cpu(pcie
->num_vfs
);
4474 port
= be_get_port_desc(resp
->func_param
, desc_count
);
4476 adapter
->mc_type
= port
->mc_type
;
4478 nic
= be_get_func_nic_desc(resp
->func_param
, desc_count
,
4481 be_copy_nic_desc(res
, nic
);
4483 vf_res
= be_get_vft_desc(resp
->func_param
, desc_count
,
4486 res
->vf_if_cap_flags
= vf_res
->cap_flags
;
4489 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
,
4494 /* Will use MBOX only if MCCQ has not been created */
4495 static int be_cmd_set_profile_config(struct be_adapter
*adapter
, void *desc
,
4496 int size
, int count
, u8 version
, u8 domain
)
4498 struct be_cmd_req_set_profile_config
*req
;
4499 struct be_mcc_wrb wrb
= {0};
4500 struct be_dma_mem cmd
;
4503 memset(&cmd
, 0, sizeof(struct be_dma_mem
));
4504 cmd
.size
= sizeof(struct be_cmd_req_set_profile_config
);
4505 cmd
.va
= dma_zalloc_coherent(&adapter
->pdev
->dev
, cmd
.size
, &cmd
.dma
,
4511 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
4512 OPCODE_COMMON_SET_PROFILE_CONFIG
, cmd
.size
,
4514 req
->hdr
.version
= version
;
4515 req
->hdr
.domain
= domain
;
4516 req
->desc_count
= cpu_to_le32(count
);
4517 memcpy(req
->desc
, desc
, size
);
4519 status
= be_cmd_notify_wait(adapter
, &wrb
);
4522 dma_free_coherent(&adapter
->pdev
->dev
, cmd
.size
, cmd
.va
,
4527 /* Mark all fields invalid */
4528 void be_reset_nic_desc(struct be_nic_res_desc
*nic
)
4530 memset(nic
, 0, sizeof(*nic
));
4531 nic
->unicast_mac_count
= 0xFFFF;
4532 nic
->mcc_count
= 0xFFFF;
4533 nic
->vlan_count
= 0xFFFF;
4534 nic
->mcast_mac_count
= 0xFFFF;
4535 nic
->txq_count
= 0xFFFF;
4536 nic
->rq_count
= 0xFFFF;
4537 nic
->rssq_count
= 0xFFFF;
4538 nic
->lro_count
= 0xFFFF;
4539 nic
->cq_count
= 0xFFFF;
4540 nic
->toe_conn_count
= 0xFFFF;
4541 nic
->eq_count
= 0xFFFF;
4542 nic
->iface_count
= 0xFFFF;
4543 nic
->link_param
= 0xFF;
4544 nic
->channel_id_param
= cpu_to_le16(0xF000);
4545 nic
->acpi_params
= 0xFF;
4546 nic
->wol_param
= 0x0F;
4547 nic
->tunnel_iface_count
= 0xFFFF;
4548 nic
->direct_tenant_iface_count
= 0xFFFF;
4549 nic
->bw_min
= 0xFFFFFFFF;
4550 nic
->bw_max
= 0xFFFFFFFF;
4553 /* Mark all fields invalid */
4554 static void be_reset_pcie_desc(struct be_pcie_res_desc
*pcie
)
4556 memset(pcie
, 0, sizeof(*pcie
));
4557 pcie
->sriov_state
= 0xFF;
4558 pcie
->pf_state
= 0xFF;
4559 pcie
->pf_type
= 0xFF;
4560 pcie
->num_vfs
= 0xFFFF;
4563 int be_cmd_config_qos(struct be_adapter
*adapter
, u32 max_rate
, u16 link_speed
,
4566 struct be_nic_res_desc nic_desc
;
4570 if (BE3_chip(adapter
))
4571 return be_cmd_set_qos(adapter
, max_rate
/ 10, domain
);
4573 be_reset_nic_desc(&nic_desc
);
4574 nic_desc
.pf_num
= adapter
->pf_num
;
4575 nic_desc
.vf_num
= domain
;
4576 nic_desc
.bw_min
= 0;
4577 if (lancer_chip(adapter
)) {
4578 nic_desc
.hdr
.desc_type
= NIC_RESOURCE_DESC_TYPE_V0
;
4579 nic_desc
.hdr
.desc_len
= RESOURCE_DESC_SIZE_V0
;
4580 nic_desc
.flags
= (1 << QUN_SHIFT
) | (1 << IMM_SHIFT
) |
4582 nic_desc
.bw_max
= cpu_to_le32(max_rate
/ 10);
4585 nic_desc
.hdr
.desc_type
= NIC_RESOURCE_DESC_TYPE_V1
;
4586 nic_desc
.hdr
.desc_len
= RESOURCE_DESC_SIZE_V1
;
4587 nic_desc
.flags
= (1 << IMM_SHIFT
) | (1 << NOSV_SHIFT
);
4588 bw_percent
= max_rate
? (max_rate
* 100) / link_speed
: 100;
4589 nic_desc
.bw_max
= cpu_to_le32(bw_percent
);
4592 return be_cmd_set_profile_config(adapter
, &nic_desc
,
4593 nic_desc
.hdr
.desc_len
,
4594 1, version
, domain
);
4597 int be_cmd_set_sriov_config(struct be_adapter
*adapter
,
4598 struct be_resources pool_res
, u16 num_vfs
,
4599 struct be_resources
*vft_res
)
4602 struct be_pcie_res_desc pcie
;
4603 struct be_nic_res_desc nic_vft
;
4606 /* PF PCIE descriptor */
4607 be_reset_pcie_desc(&desc
.pcie
);
4608 desc
.pcie
.hdr
.desc_type
= PCIE_RESOURCE_DESC_TYPE_V1
;
4609 desc
.pcie
.hdr
.desc_len
= RESOURCE_DESC_SIZE_V1
;
4610 desc
.pcie
.flags
= BIT(IMM_SHIFT
) | BIT(NOSV_SHIFT
);
4611 desc
.pcie
.pf_num
= adapter
->pdev
->devfn
;
4612 desc
.pcie
.sriov_state
= num_vfs
? 1 : 0;
4613 desc
.pcie
.num_vfs
= cpu_to_le16(num_vfs
);
4615 /* VF NIC Template descriptor */
4616 be_reset_nic_desc(&desc
.nic_vft
);
4617 desc
.nic_vft
.hdr
.desc_type
= NIC_RESOURCE_DESC_TYPE_V1
;
4618 desc
.nic_vft
.hdr
.desc_len
= RESOURCE_DESC_SIZE_V1
;
4619 desc
.nic_vft
.flags
= vft_res
->flags
| BIT(VFT_SHIFT
) |
4620 BIT(IMM_SHIFT
) | BIT(NOSV_SHIFT
);
4621 desc
.nic_vft
.pf_num
= adapter
->pdev
->devfn
;
4622 desc
.nic_vft
.vf_num
= 0;
4623 desc
.nic_vft
.cap_flags
= cpu_to_le32(vft_res
->vf_if_cap_flags
);
4624 desc
.nic_vft
.rq_count
= cpu_to_le16(vft_res
->max_rx_qs
);
4625 desc
.nic_vft
.txq_count
= cpu_to_le16(vft_res
->max_tx_qs
);
4626 desc
.nic_vft
.rssq_count
= cpu_to_le16(vft_res
->max_rss_qs
);
4627 desc
.nic_vft
.cq_count
= cpu_to_le16(vft_res
->max_cq_count
);
4629 if (vft_res
->max_uc_mac
)
4630 desc
.nic_vft
.unicast_mac_count
=
4631 cpu_to_le16(vft_res
->max_uc_mac
);
4632 if (vft_res
->max_vlans
)
4633 desc
.nic_vft
.vlan_count
= cpu_to_le16(vft_res
->max_vlans
);
4634 if (vft_res
->max_iface_count
)
4635 desc
.nic_vft
.iface_count
=
4636 cpu_to_le16(vft_res
->max_iface_count
);
4637 if (vft_res
->max_mcc_count
)
4638 desc
.nic_vft
.mcc_count
= cpu_to_le16(vft_res
->max_mcc_count
);
4640 return be_cmd_set_profile_config(adapter
, &desc
,
4641 2 * RESOURCE_DESC_SIZE_V1
, 2, 1, 0);
4644 int be_cmd_manage_iface(struct be_adapter
*adapter
, u32 iface
, u8 op
)
4646 struct be_mcc_wrb
*wrb
;
4647 struct be_cmd_req_manage_iface_filters
*req
;
4650 if (iface
== 0xFFFFFFFF)
4653 spin_lock_bh(&adapter
->mcc_lock
);
4655 wrb
= wrb_from_mccq(adapter
);
4660 req
= embedded_payload(wrb
);
4662 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
4663 OPCODE_COMMON_MANAGE_IFACE_FILTERS
, sizeof(*req
),
4666 req
->target_iface_id
= cpu_to_le32(iface
);
4668 status
= be_mcc_notify_wait(adapter
);
4670 spin_unlock_bh(&adapter
->mcc_lock
);
4674 int be_cmd_set_vxlan_port(struct be_adapter
*adapter
, __be16 port
)
4676 struct be_port_res_desc port_desc
;
4678 memset(&port_desc
, 0, sizeof(port_desc
));
4679 port_desc
.hdr
.desc_type
= PORT_RESOURCE_DESC_TYPE_V1
;
4680 port_desc
.hdr
.desc_len
= RESOURCE_DESC_SIZE_V1
;
4681 port_desc
.flags
= (1 << IMM_SHIFT
) | (1 << NOSV_SHIFT
);
4682 port_desc
.link_num
= adapter
->hba_port_num
;
4684 port_desc
.nv_flags
= NV_TYPE_VXLAN
| (1 << SOCVID_SHIFT
) |
4686 port_desc
.nv_port
= swab16(port
);
4688 port_desc
.nv_flags
= NV_TYPE_DISABLED
;
4689 port_desc
.nv_port
= 0;
4692 return be_cmd_set_profile_config(adapter
, &port_desc
,
4693 RESOURCE_DESC_SIZE_V1
, 1, 1, 0);
4696 int be_cmd_get_if_id(struct be_adapter
*adapter
, struct be_vf_cfg
*vf_cfg
,
4699 struct be_mcc_wrb
*wrb
;
4700 struct be_cmd_req_get_iface_list
*req
;
4701 struct be_cmd_resp_get_iface_list
*resp
;
4704 spin_lock_bh(&adapter
->mcc_lock
);
4706 wrb
= wrb_from_mccq(adapter
);
4711 req
= embedded_payload(wrb
);
4713 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
4714 OPCODE_COMMON_GET_IFACE_LIST
, sizeof(*resp
),
4716 req
->hdr
.domain
= vf_num
+ 1;
4718 status
= be_mcc_notify_wait(adapter
);
4720 resp
= (struct be_cmd_resp_get_iface_list
*)req
;
4721 vf_cfg
->if_handle
= le32_to_cpu(resp
->if_desc
.if_id
);
4725 spin_unlock_bh(&adapter
->mcc_lock
);
4729 static int lancer_wait_idle(struct be_adapter
*adapter
)
4731 #define SLIPORT_IDLE_TIMEOUT 30
4735 for (i
= 0; i
< SLIPORT_IDLE_TIMEOUT
; i
++) {
4736 reg_val
= ioread32(adapter
->db
+ PHYSDEV_CONTROL_OFFSET
);
4737 if ((reg_val
& PHYSDEV_CONTROL_INP_MASK
) == 0)
4743 if (i
== SLIPORT_IDLE_TIMEOUT
)
4749 int lancer_physdev_ctrl(struct be_adapter
*adapter
, u32 mask
)
4753 status
= lancer_wait_idle(adapter
);
4757 iowrite32(mask
, adapter
->db
+ PHYSDEV_CONTROL_OFFSET
);
4762 /* Routine to check whether dump image is present or not */
4763 bool dump_present(struct be_adapter
*adapter
)
4765 u32 sliport_status
= 0;
4767 sliport_status
= ioread32(adapter
->db
+ SLIPORT_STATUS_OFFSET
);
4768 return !!(sliport_status
& SLIPORT_STATUS_DIP_MASK
);
4771 int lancer_initiate_dump(struct be_adapter
*adapter
)
4773 struct device
*dev
= &adapter
->pdev
->dev
;
4776 if (dump_present(adapter
)) {
4777 dev_info(dev
, "Previous dump not cleared, not forcing dump\n");
4781 /* give firmware reset and diagnostic dump */
4782 status
= lancer_physdev_ctrl(adapter
, PHYSDEV_CONTROL_FW_RESET_MASK
|
4783 PHYSDEV_CONTROL_DD_MASK
);
4785 dev_err(dev
, "FW reset failed\n");
4789 status
= lancer_wait_idle(adapter
);
4793 if (!dump_present(adapter
)) {
4794 dev_err(dev
, "FW dump not generated\n");
4801 int lancer_delete_dump(struct be_adapter
*adapter
)
4805 status
= lancer_cmd_delete_object(adapter
, LANCER_FW_DUMP_FILE
);
4806 return be_cmd_status(status
);
4810 int be_cmd_enable_vf(struct be_adapter
*adapter
, u8 domain
)
4812 struct be_mcc_wrb
*wrb
;
4813 struct be_cmd_enable_disable_vf
*req
;
4816 if (BEx_chip(adapter
))
4819 spin_lock_bh(&adapter
->mcc_lock
);
4821 wrb
= wrb_from_mccq(adapter
);
4827 req
= embedded_payload(wrb
);
4829 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
4830 OPCODE_COMMON_ENABLE_DISABLE_VF
, sizeof(*req
),
4833 req
->hdr
.domain
= domain
;
4835 status
= be_mcc_notify_wait(adapter
);
4837 spin_unlock_bh(&adapter
->mcc_lock
);
4841 int be_cmd_intr_set(struct be_adapter
*adapter
, bool intr_enable
)
4843 struct be_mcc_wrb
*wrb
;
4844 struct be_cmd_req_intr_set
*req
;
4847 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
4850 wrb
= wrb_from_mbox(adapter
);
4852 req
= embedded_payload(wrb
);
4854 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
4855 OPCODE_COMMON_SET_INTERRUPT_ENABLE
, sizeof(*req
),
4858 req
->intr_enabled
= intr_enable
;
4860 status
= be_mbox_notify_wait(adapter
);
4862 mutex_unlock(&adapter
->mbox_lock
);
4867 int be_cmd_get_active_profile(struct be_adapter
*adapter
, u16
*profile_id
)
4869 struct be_cmd_req_get_active_profile
*req
;
4870 struct be_mcc_wrb
*wrb
;
4873 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
4876 wrb
= wrb_from_mbox(adapter
);
4882 req
= embedded_payload(wrb
);
4884 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
4885 OPCODE_COMMON_GET_ACTIVE_PROFILE
, sizeof(*req
),
4888 status
= be_mbox_notify_wait(adapter
);
4890 struct be_cmd_resp_get_active_profile
*resp
=
4891 embedded_payload(wrb
);
4893 *profile_id
= le16_to_cpu(resp
->active_profile_id
);
4897 mutex_unlock(&adapter
->mbox_lock
);
4901 int __be_cmd_set_logical_link_config(struct be_adapter
*adapter
,
4902 int link_state
, int version
, u8 domain
)
4904 struct be_mcc_wrb
*wrb
;
4905 struct be_cmd_req_set_ll_link
*req
;
4908 spin_lock_bh(&adapter
->mcc_lock
);
4910 wrb
= wrb_from_mccq(adapter
);
4916 req
= embedded_payload(wrb
);
4918 be_wrb_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
4919 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG
,
4920 sizeof(*req
), wrb
, NULL
);
4922 req
->hdr
.version
= version
;
4923 req
->hdr
.domain
= domain
;
4925 if (link_state
== IFLA_VF_LINK_STATE_ENABLE
||
4926 link_state
== IFLA_VF_LINK_STATE_AUTO
)
4927 req
->link_config
|= PLINK_ENABLE
;
4929 if (link_state
== IFLA_VF_LINK_STATE_AUTO
)
4930 req
->link_config
|= PLINK_TRACK
;
4932 status
= be_mcc_notify_wait(adapter
);
4934 spin_unlock_bh(&adapter
->mcc_lock
);
4938 int be_cmd_set_logical_link_config(struct be_adapter
*adapter
,
4939 int link_state
, u8 domain
)
4943 if (BEx_chip(adapter
))
4946 status
= __be_cmd_set_logical_link_config(adapter
, link_state
,
4949 /* Version 2 of the command will not be recognized by older FW.
4950 * On such a failure issue version 1 of the command.
4952 if (base_status(status
) == MCC_STATUS_ILLEGAL_REQUEST
)
4953 status
= __be_cmd_set_logical_link_config(adapter
, link_state
,
4957 int be_roce_mcc_cmd(void *netdev_handle
, void *wrb_payload
,
4958 int wrb_payload_size
, u16
*cmd_status
, u16
*ext_status
)
4960 struct be_adapter
*adapter
= netdev_priv(netdev_handle
);
4961 struct be_mcc_wrb
*wrb
;
4962 struct be_cmd_req_hdr
*hdr
= (struct be_cmd_req_hdr
*)wrb_payload
;
4963 struct be_cmd_req_hdr
*req
;
4964 struct be_cmd_resp_hdr
*resp
;
4967 spin_lock_bh(&adapter
->mcc_lock
);
4969 wrb
= wrb_from_mccq(adapter
);
4974 req
= embedded_payload(wrb
);
4975 resp
= embedded_payload(wrb
);
4977 be_wrb_cmd_hdr_prepare(req
, hdr
->subsystem
,
4978 hdr
->opcode
, wrb_payload_size
, wrb
, NULL
);
4979 memcpy(req
, wrb_payload
, wrb_payload_size
);
4980 be_dws_cpu_to_le(req
, wrb_payload_size
);
4982 status
= be_mcc_notify_wait(adapter
);
4984 *cmd_status
= (status
& 0xffff);
4987 memcpy(wrb_payload
, resp
, sizeof(*resp
) + resp
->response_length
);
4988 be_dws_le_to_cpu(wrb_payload
, sizeof(*resp
) + resp
->response_length
);
4990 spin_unlock_bh(&adapter
->mcc_lock
);
4993 EXPORT_SYMBOL(be_roce_mcc_cmd
);