2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
21 /* Must be a power of 2 or else MODULO will BUG_ON */
22 static int be_get_temp_freq
= 64;
24 static inline void *embedded_payload(struct be_mcc_wrb
*wrb
)
26 return wrb
->payload
.embedded_payload
;
29 static void be_mcc_notify(struct be_adapter
*adapter
)
31 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
34 if (adapter
->eeh_err
) {
35 dev_info(&adapter
->pdev
->dev
,
36 "Error in Card Detected! Cannot issue commands\n");
40 val
|= mccq
->id
& DB_MCCQ_RING_ID_MASK
;
41 val
|= 1 << DB_MCCQ_NUM_POSTED_SHIFT
;
44 iowrite32(val
, adapter
->db
+ DB_MCCQ_OFFSET
);
47 /* To check if valid bit is set, check the entire word as we don't know
48 * the endianness of the data (old entry is host endian while a new entry is
50 static inline bool be_mcc_compl_is_new(struct be_mcc_compl
*compl)
52 if (compl->flags
!= 0) {
53 compl->flags
= le32_to_cpu(compl->flags
);
54 BUG_ON((compl->flags
& CQE_FLAGS_VALID_MASK
) == 0);
61 /* Need to reset the entire word that houses the valid bit */
62 static inline void be_mcc_compl_use(struct be_mcc_compl
*compl)
67 static int be_mcc_compl_process(struct be_adapter
*adapter
,
68 struct be_mcc_compl
*compl)
70 u16 compl_status
, extd_status
;
72 /* Just swap the status to host endian; mcc tag is opaquely copied
74 be_dws_le_to_cpu(compl, 4);
76 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
77 CQE_STATUS_COMPL_MASK
;
79 if (((compl->tag0
== OPCODE_COMMON_WRITE_FLASHROM
) ||
80 (compl->tag0
== OPCODE_COMMON_WRITE_OBJECT
)) &&
81 (compl->tag1
== CMD_SUBSYSTEM_COMMON
)) {
82 adapter
->flash_status
= compl_status
;
83 complete(&adapter
->flash_compl
);
86 if (compl_status
== MCC_STATUS_SUCCESS
) {
87 if (((compl->tag0
== OPCODE_ETH_GET_STATISTICS
) ||
88 (compl->tag0
== OPCODE_ETH_GET_PPORT_STATS
)) &&
89 (compl->tag1
== CMD_SUBSYSTEM_ETH
)) {
90 be_parse_stats(adapter
);
91 adapter
->stats_cmd_sent
= false;
94 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
) {
95 struct be_mcc_wrb
*mcc_wrb
=
96 queue_index_node(&adapter
->mcc_obj
.q
,
98 struct be_cmd_resp_get_cntl_addnl_attribs
*resp
=
99 embedded_payload(mcc_wrb
);
100 adapter
->drv_stats
.be_on_die_temperature
=
101 resp
->on_die_temperature
;
104 if (compl->tag0
== OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
)
105 be_get_temp_freq
= 0;
107 if (compl_status
== MCC_STATUS_NOT_SUPPORTED
||
108 compl_status
== MCC_STATUS_ILLEGAL_REQUEST
)
111 if (compl_status
== MCC_STATUS_UNAUTHORIZED_REQUEST
) {
112 dev_warn(&adapter
->pdev
->dev
, "This domain(VM) is not "
113 "permitted to execute this cmd (opcode %d)\n",
116 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
117 CQE_STATUS_EXTD_MASK
;
118 dev_err(&adapter
->pdev
->dev
, "Cmd (opcode %d) failed:"
119 "status %d, extd-status %d\n",
120 compl->tag0
, compl_status
, extd_status
);
127 /* Link state evt is a string of bytes; no need for endian swapping */
128 static void be_async_link_state_process(struct be_adapter
*adapter
,
129 struct be_async_event_link_state
*evt
)
131 be_link_status_update(adapter
, evt
->port_link_status
);
134 /* Grp5 CoS Priority evt */
135 static void be_async_grp5_cos_priority_process(struct be_adapter
*adapter
,
136 struct be_async_event_grp5_cos_priority
*evt
)
139 adapter
->vlan_prio_bmap
= evt
->available_priority_bmap
;
140 adapter
->recommended_prio
&= ~VLAN_PRIO_MASK
;
141 adapter
->recommended_prio
=
142 evt
->reco_default_priority
<< VLAN_PRIO_SHIFT
;
146 /* Grp5 QOS Speed evt */
147 static void be_async_grp5_qos_speed_process(struct be_adapter
*adapter
,
148 struct be_async_event_grp5_qos_link_speed
*evt
)
150 if (evt
->physical_port
== adapter
->port_num
) {
151 /* qos_link_speed is in units of 10 Mbps */
152 adapter
->link_speed
= evt
->qos_link_speed
* 10;
157 static void be_async_grp5_pvid_state_process(struct be_adapter
*adapter
,
158 struct be_async_event_grp5_pvid_state
*evt
)
161 adapter
->pvid
= le16_to_cpu(evt
->tag
) & VLAN_VID_MASK
;
166 static void be_async_grp5_evt_process(struct be_adapter
*adapter
,
167 u32 trailer
, struct be_mcc_compl
*evt
)
171 event_type
= (trailer
>> ASYNC_TRAILER_EVENT_TYPE_SHIFT
) &
172 ASYNC_TRAILER_EVENT_TYPE_MASK
;
174 switch (event_type
) {
175 case ASYNC_EVENT_COS_PRIORITY
:
176 be_async_grp5_cos_priority_process(adapter
,
177 (struct be_async_event_grp5_cos_priority
*)evt
);
179 case ASYNC_EVENT_QOS_SPEED
:
180 be_async_grp5_qos_speed_process(adapter
,
181 (struct be_async_event_grp5_qos_link_speed
*)evt
);
183 case ASYNC_EVENT_PVID_STATE
:
184 be_async_grp5_pvid_state_process(adapter
,
185 (struct be_async_event_grp5_pvid_state
*)evt
);
188 dev_warn(&adapter
->pdev
->dev
, "Unknown grp5 event!\n");
193 static inline bool is_link_state_evt(u32 trailer
)
195 return ((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
196 ASYNC_TRAILER_EVENT_CODE_MASK
) ==
197 ASYNC_EVENT_CODE_LINK_STATE
;
200 static inline bool is_grp5_evt(u32 trailer
)
202 return (((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
203 ASYNC_TRAILER_EVENT_CODE_MASK
) ==
204 ASYNC_EVENT_CODE_GRP_5
);
207 static struct be_mcc_compl
*be_mcc_compl_get(struct be_adapter
*adapter
)
209 struct be_queue_info
*mcc_cq
= &adapter
->mcc_obj
.cq
;
210 struct be_mcc_compl
*compl = queue_tail_node(mcc_cq
);
212 if (be_mcc_compl_is_new(compl)) {
213 queue_tail_inc(mcc_cq
);
219 void be_async_mcc_enable(struct be_adapter
*adapter
)
221 spin_lock_bh(&adapter
->mcc_cq_lock
);
223 be_cq_notify(adapter
, adapter
->mcc_obj
.cq
.id
, true, 0);
224 adapter
->mcc_obj
.rearm_cq
= true;
226 spin_unlock_bh(&adapter
->mcc_cq_lock
);
229 void be_async_mcc_disable(struct be_adapter
*adapter
)
231 adapter
->mcc_obj
.rearm_cq
= false;
234 int be_process_mcc(struct be_adapter
*adapter
, int *status
)
236 struct be_mcc_compl
*compl;
238 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
240 spin_lock_bh(&adapter
->mcc_cq_lock
);
241 while ((compl = be_mcc_compl_get(adapter
))) {
242 if (compl->flags
& CQE_FLAGS_ASYNC_MASK
) {
243 /* Interpret flags as an async trailer */
244 if (is_link_state_evt(compl->flags
))
245 be_async_link_state_process(adapter
,
246 (struct be_async_event_link_state
*) compl);
247 else if (is_grp5_evt(compl->flags
))
248 be_async_grp5_evt_process(adapter
,
249 compl->flags
, compl);
250 } else if (compl->flags
& CQE_FLAGS_COMPLETED_MASK
) {
251 *status
= be_mcc_compl_process(adapter
, compl);
252 atomic_dec(&mcc_obj
->q
.used
);
254 be_mcc_compl_use(compl);
258 spin_unlock_bh(&adapter
->mcc_cq_lock
);
262 /* Wait till no more pending mcc requests are present */
263 static int be_mcc_wait_compl(struct be_adapter
*adapter
)
265 #define mcc_timeout 120000 /* 12s timeout */
266 int i
, num
, status
= 0;
267 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
269 if (adapter
->eeh_err
)
272 for (i
= 0; i
< mcc_timeout
; i
++) {
273 num
= be_process_mcc(adapter
, &status
);
275 be_cq_notify(adapter
, mcc_obj
->cq
.id
,
276 mcc_obj
->rearm_cq
, num
);
278 if (atomic_read(&mcc_obj
->q
.used
) == 0)
282 if (i
== mcc_timeout
) {
283 dev_err(&adapter
->pdev
->dev
, "mccq poll timed out\n");
289 /* Notify MCC requests and wait for completion */
290 static int be_mcc_notify_wait(struct be_adapter
*adapter
)
292 be_mcc_notify(adapter
);
293 return be_mcc_wait_compl(adapter
);
296 static int be_mbox_db_ready_wait(struct be_adapter
*adapter
, void __iomem
*db
)
301 if (adapter
->eeh_err
) {
302 dev_err(&adapter
->pdev
->dev
,
303 "Error detected in card.Cannot issue commands\n");
308 ready
= ioread32(db
);
309 if (ready
== 0xffffffff) {
310 dev_err(&adapter
->pdev
->dev
,
311 "pci slot disconnected\n");
315 ready
&= MPU_MAILBOX_DB_RDY_MASK
;
320 dev_err(&adapter
->pdev
->dev
, "mbox poll timed out\n");
321 if (!lancer_chip(adapter
))
322 be_detect_dump_ue(adapter
);
334 * Insert the mailbox address into the doorbell in two steps
335 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
337 static int be_mbox_notify_wait(struct be_adapter
*adapter
)
341 void __iomem
*db
= adapter
->db
+ MPU_MAILBOX_DB_OFFSET
;
342 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
343 struct be_mcc_mailbox
*mbox
= mbox_mem
->va
;
344 struct be_mcc_compl
*compl = &mbox
->compl;
346 /* wait for ready to be set */
347 status
= be_mbox_db_ready_wait(adapter
, db
);
351 val
|= MPU_MAILBOX_DB_HI_MASK
;
352 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
353 val
|= (upper_32_bits(mbox_mem
->dma
) >> 2) << 2;
356 /* wait for ready to be set */
357 status
= be_mbox_db_ready_wait(adapter
, db
);
362 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
363 val
|= (u32
)(mbox_mem
->dma
>> 4) << 2;
366 status
= be_mbox_db_ready_wait(adapter
, db
);
370 /* A cq entry has been made now */
371 if (be_mcc_compl_is_new(compl)) {
372 status
= be_mcc_compl_process(adapter
, &mbox
->compl);
373 be_mcc_compl_use(compl);
377 dev_err(&adapter
->pdev
->dev
, "invalid mailbox completion\n");
383 static int be_POST_stage_get(struct be_adapter
*adapter
, u16
*stage
)
387 if (lancer_chip(adapter
))
388 sem
= ioread32(adapter
->db
+ MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET
);
390 sem
= ioread32(adapter
->csr
+ MPU_EP_SEMAPHORE_OFFSET
);
392 *stage
= sem
& EP_SEMAPHORE_POST_STAGE_MASK
;
393 if ((sem
>> EP_SEMAPHORE_POST_ERR_SHIFT
) & EP_SEMAPHORE_POST_ERR_MASK
)
399 int be_cmd_POST(struct be_adapter
*adapter
)
402 int status
, timeout
= 0;
403 struct device
*dev
= &adapter
->pdev
->dev
;
406 status
= be_POST_stage_get(adapter
, &stage
);
408 dev_err(dev
, "POST error; stage=0x%x\n", stage
);
410 } else if (stage
!= POST_STAGE_ARMFW_RDY
) {
411 if (msleep_interruptible(2000)) {
412 dev_err(dev
, "Waiting for POST aborted\n");
419 } while (timeout
< 60);
421 dev_err(dev
, "POST timeout; stage=0x%x\n", stage
);
426 static inline struct be_sge
*nonembedded_sgl(struct be_mcc_wrb
*wrb
)
428 return &wrb
->payload
.sgl
[0];
431 /* Don't touch the hdr after it's prepared */
432 static void be_wrb_hdr_prepare(struct be_mcc_wrb
*wrb
, int payload_len
,
433 bool embedded
, u8 sge_cnt
, u32 opcode
)
436 wrb
->embedded
|= MCC_WRB_EMBEDDED_MASK
;
438 wrb
->embedded
|= (sge_cnt
& MCC_WRB_SGE_CNT_MASK
) <<
439 MCC_WRB_SGE_CNT_SHIFT
;
440 wrb
->payload_length
= payload_len
;
442 be_dws_cpu_to_le(wrb
, 8);
445 /* Don't touch the hdr after it's prepared */
446 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr
*req_hdr
,
447 u8 subsystem
, u8 opcode
, int cmd_len
)
449 req_hdr
->opcode
= opcode
;
450 req_hdr
->subsystem
= subsystem
;
451 req_hdr
->request_length
= cpu_to_le32(cmd_len
- sizeof(*req_hdr
));
452 req_hdr
->version
= 0;
455 static void be_cmd_page_addrs_prepare(struct phys_addr
*pages
, u32 max_pages
,
456 struct be_dma_mem
*mem
)
458 int i
, buf_pages
= min(PAGES_4K_SPANNED(mem
->va
, mem
->size
), max_pages
);
459 u64 dma
= (u64
)mem
->dma
;
461 for (i
= 0; i
< buf_pages
; i
++) {
462 pages
[i
].lo
= cpu_to_le32(dma
& 0xFFFFFFFF);
463 pages
[i
].hi
= cpu_to_le32(upper_32_bits(dma
));
468 /* Converts interrupt delay in microseconds to multiplier value */
469 static u32
eq_delay_to_mult(u32 usec_delay
)
471 #define MAX_INTR_RATE 651042
472 const u32 round
= 10;
478 u32 interrupt_rate
= 1000000 / usec_delay
;
479 /* Max delay, corresponding to the lowest interrupt rate */
480 if (interrupt_rate
== 0)
483 multiplier
= (MAX_INTR_RATE
- interrupt_rate
) * round
;
484 multiplier
/= interrupt_rate
;
485 /* Round the multiplier to the closest value.*/
486 multiplier
= (multiplier
+ round
/2) / round
;
487 multiplier
= min(multiplier
, (u32
)1023);
493 static inline struct be_mcc_wrb
*wrb_from_mbox(struct be_adapter
*adapter
)
495 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
496 struct be_mcc_wrb
*wrb
497 = &((struct be_mcc_mailbox
*)(mbox_mem
->va
))->wrb
;
498 memset(wrb
, 0, sizeof(*wrb
));
502 static struct be_mcc_wrb
*wrb_from_mccq(struct be_adapter
*adapter
)
504 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
505 struct be_mcc_wrb
*wrb
;
507 if (atomic_read(&mccq
->used
) >= mccq
->len
) {
508 dev_err(&adapter
->pdev
->dev
, "Out of MCCQ wrbs\n");
512 wrb
= queue_head_node(mccq
);
513 queue_head_inc(mccq
);
514 atomic_inc(&mccq
->used
);
515 memset(wrb
, 0, sizeof(*wrb
));
519 /* Tell fw we're about to start firing cmds by writing a
520 * special pattern across the wrb hdr; uses mbox
522 int be_cmd_fw_init(struct be_adapter
*adapter
)
527 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
530 wrb
= (u8
*)wrb_from_mbox(adapter
);
540 status
= be_mbox_notify_wait(adapter
);
542 mutex_unlock(&adapter
->mbox_lock
);
546 /* Tell fw we're done with firing cmds by writing a
547 * special pattern across the wrb hdr; uses mbox
549 int be_cmd_fw_clean(struct be_adapter
*adapter
)
554 if (adapter
->eeh_err
)
557 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
560 wrb
= (u8
*)wrb_from_mbox(adapter
);
570 status
= be_mbox_notify_wait(adapter
);
572 mutex_unlock(&adapter
->mbox_lock
);
575 int be_cmd_eq_create(struct be_adapter
*adapter
,
576 struct be_queue_info
*eq
, int eq_delay
)
578 struct be_mcc_wrb
*wrb
;
579 struct be_cmd_req_eq_create
*req
;
580 struct be_dma_mem
*q_mem
= &eq
->dma_mem
;
583 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
586 wrb
= wrb_from_mbox(adapter
);
587 req
= embedded_payload(wrb
);
589 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0, OPCODE_COMMON_EQ_CREATE
);
591 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
592 OPCODE_COMMON_EQ_CREATE
, sizeof(*req
));
594 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
596 AMAP_SET_BITS(struct amap_eq_context
, valid
, req
->context
, 1);
598 AMAP_SET_BITS(struct amap_eq_context
, size
, req
->context
, 0);
599 AMAP_SET_BITS(struct amap_eq_context
, count
, req
->context
,
600 __ilog2_u32(eq
->len
/256));
601 AMAP_SET_BITS(struct amap_eq_context
, delaymult
, req
->context
,
602 eq_delay_to_mult(eq_delay
));
603 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
605 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
607 status
= be_mbox_notify_wait(adapter
);
609 struct be_cmd_resp_eq_create
*resp
= embedded_payload(wrb
);
610 eq
->id
= le16_to_cpu(resp
->eq_id
);
614 mutex_unlock(&adapter
->mbox_lock
);
619 int be_cmd_mac_addr_query(struct be_adapter
*adapter
, u8
*mac_addr
,
620 u8 type
, bool permanent
, u32 if_handle
)
622 struct be_mcc_wrb
*wrb
;
623 struct be_cmd_req_mac_query
*req
;
626 spin_lock_bh(&adapter
->mcc_lock
);
628 wrb
= wrb_from_mccq(adapter
);
633 req
= embedded_payload(wrb
);
635 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
636 OPCODE_COMMON_NTWK_MAC_QUERY
);
638 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
639 OPCODE_COMMON_NTWK_MAC_QUERY
, sizeof(*req
));
645 req
->if_id
= cpu_to_le16((u16
) if_handle
);
649 status
= be_mcc_notify_wait(adapter
);
651 struct be_cmd_resp_mac_query
*resp
= embedded_payload(wrb
);
652 memcpy(mac_addr
, resp
->mac
.addr
, ETH_ALEN
);
656 spin_unlock_bh(&adapter
->mcc_lock
);
660 /* Uses synchronous MCCQ */
661 int be_cmd_pmac_add(struct be_adapter
*adapter
, u8
*mac_addr
,
662 u32 if_id
, u32
*pmac_id
, u32 domain
)
664 struct be_mcc_wrb
*wrb
;
665 struct be_cmd_req_pmac_add
*req
;
668 spin_lock_bh(&adapter
->mcc_lock
);
670 wrb
= wrb_from_mccq(adapter
);
675 req
= embedded_payload(wrb
);
677 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
678 OPCODE_COMMON_NTWK_PMAC_ADD
);
680 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
681 OPCODE_COMMON_NTWK_PMAC_ADD
, sizeof(*req
));
683 req
->hdr
.domain
= domain
;
684 req
->if_id
= cpu_to_le32(if_id
);
685 memcpy(req
->mac_address
, mac_addr
, ETH_ALEN
);
687 status
= be_mcc_notify_wait(adapter
);
689 struct be_cmd_resp_pmac_add
*resp
= embedded_payload(wrb
);
690 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
694 spin_unlock_bh(&adapter
->mcc_lock
);
698 /* Uses synchronous MCCQ */
699 int be_cmd_pmac_del(struct be_adapter
*adapter
, u32 if_id
, u32 pmac_id
, u32 dom
)
701 struct be_mcc_wrb
*wrb
;
702 struct be_cmd_req_pmac_del
*req
;
705 spin_lock_bh(&adapter
->mcc_lock
);
707 wrb
= wrb_from_mccq(adapter
);
712 req
= embedded_payload(wrb
);
714 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
715 OPCODE_COMMON_NTWK_PMAC_DEL
);
717 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
718 OPCODE_COMMON_NTWK_PMAC_DEL
, sizeof(*req
));
720 req
->hdr
.domain
= dom
;
721 req
->if_id
= cpu_to_le32(if_id
);
722 req
->pmac_id
= cpu_to_le32(pmac_id
);
724 status
= be_mcc_notify_wait(adapter
);
727 spin_unlock_bh(&adapter
->mcc_lock
);
732 int be_cmd_cq_create(struct be_adapter
*adapter
,
733 struct be_queue_info
*cq
, struct be_queue_info
*eq
,
734 bool sol_evts
, bool no_delay
, int coalesce_wm
)
736 struct be_mcc_wrb
*wrb
;
737 struct be_cmd_req_cq_create
*req
;
738 struct be_dma_mem
*q_mem
= &cq
->dma_mem
;
742 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
745 wrb
= wrb_from_mbox(adapter
);
746 req
= embedded_payload(wrb
);
747 ctxt
= &req
->context
;
749 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
750 OPCODE_COMMON_CQ_CREATE
);
752 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
753 OPCODE_COMMON_CQ_CREATE
, sizeof(*req
));
755 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
756 if (lancer_chip(adapter
)) {
757 req
->hdr
.version
= 2;
758 req
->page_size
= 1; /* 1 for 4K */
759 AMAP_SET_BITS(struct amap_cq_context_lancer
, nodelay
, ctxt
,
761 AMAP_SET_BITS(struct amap_cq_context_lancer
, count
, ctxt
,
762 __ilog2_u32(cq
->len
/256));
763 AMAP_SET_BITS(struct amap_cq_context_lancer
, valid
, ctxt
, 1);
764 AMAP_SET_BITS(struct amap_cq_context_lancer
, eventable
,
766 AMAP_SET_BITS(struct amap_cq_context_lancer
, eqid
,
768 AMAP_SET_BITS(struct amap_cq_context_lancer
, armed
, ctxt
, 1);
770 AMAP_SET_BITS(struct amap_cq_context_be
, coalescwm
, ctxt
,
772 AMAP_SET_BITS(struct amap_cq_context_be
, nodelay
,
774 AMAP_SET_BITS(struct amap_cq_context_be
, count
, ctxt
,
775 __ilog2_u32(cq
->len
/256));
776 AMAP_SET_BITS(struct amap_cq_context_be
, valid
, ctxt
, 1);
777 AMAP_SET_BITS(struct amap_cq_context_be
, solevent
,
779 AMAP_SET_BITS(struct amap_cq_context_be
, eventable
, ctxt
, 1);
780 AMAP_SET_BITS(struct amap_cq_context_be
, eqid
, ctxt
, eq
->id
);
781 AMAP_SET_BITS(struct amap_cq_context_be
, armed
, ctxt
, 1);
784 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
786 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
788 status
= be_mbox_notify_wait(adapter
);
790 struct be_cmd_resp_cq_create
*resp
= embedded_payload(wrb
);
791 cq
->id
= le16_to_cpu(resp
->cq_id
);
795 mutex_unlock(&adapter
->mbox_lock
);
800 static u32
be_encoded_q_len(int q_len
)
802 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
803 if (len_encoded
== 16)
808 int be_cmd_mccq_ext_create(struct be_adapter
*adapter
,
809 struct be_queue_info
*mccq
,
810 struct be_queue_info
*cq
)
812 struct be_mcc_wrb
*wrb
;
813 struct be_cmd_req_mcc_ext_create
*req
;
814 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
818 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
821 wrb
= wrb_from_mbox(adapter
);
822 req
= embedded_payload(wrb
);
823 ctxt
= &req
->context
;
825 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
826 OPCODE_COMMON_MCC_CREATE_EXT
);
828 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
829 OPCODE_COMMON_MCC_CREATE_EXT
, sizeof(*req
));
831 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
832 if (lancer_chip(adapter
)) {
833 req
->hdr
.version
= 1;
834 req
->cq_id
= cpu_to_le16(cq
->id
);
836 AMAP_SET_BITS(struct amap_mcc_context_lancer
, ring_size
, ctxt
,
837 be_encoded_q_len(mccq
->len
));
838 AMAP_SET_BITS(struct amap_mcc_context_lancer
, valid
, ctxt
, 1);
839 AMAP_SET_BITS(struct amap_mcc_context_lancer
, async_cq_id
,
841 AMAP_SET_BITS(struct amap_mcc_context_lancer
, async_cq_valid
,
845 AMAP_SET_BITS(struct amap_mcc_context_be
, valid
, ctxt
, 1);
846 AMAP_SET_BITS(struct amap_mcc_context_be
, ring_size
, ctxt
,
847 be_encoded_q_len(mccq
->len
));
848 AMAP_SET_BITS(struct amap_mcc_context_be
, cq_id
, ctxt
, cq
->id
);
851 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
852 req
->async_event_bitmap
[0] = cpu_to_le32(0x00000022);
853 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
855 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
857 status
= be_mbox_notify_wait(adapter
);
859 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
860 mccq
->id
= le16_to_cpu(resp
->id
);
861 mccq
->created
= true;
863 mutex_unlock(&adapter
->mbox_lock
);
868 int be_cmd_mccq_org_create(struct be_adapter
*adapter
,
869 struct be_queue_info
*mccq
,
870 struct be_queue_info
*cq
)
872 struct be_mcc_wrb
*wrb
;
873 struct be_cmd_req_mcc_create
*req
;
874 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
878 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
881 wrb
= wrb_from_mbox(adapter
);
882 req
= embedded_payload(wrb
);
883 ctxt
= &req
->context
;
885 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
886 OPCODE_COMMON_MCC_CREATE
);
888 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
889 OPCODE_COMMON_MCC_CREATE
, sizeof(*req
));
891 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
893 AMAP_SET_BITS(struct amap_mcc_context_be
, valid
, ctxt
, 1);
894 AMAP_SET_BITS(struct amap_mcc_context_be
, ring_size
, ctxt
,
895 be_encoded_q_len(mccq
->len
));
896 AMAP_SET_BITS(struct amap_mcc_context_be
, cq_id
, ctxt
, cq
->id
);
898 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
900 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
902 status
= be_mbox_notify_wait(adapter
);
904 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
905 mccq
->id
= le16_to_cpu(resp
->id
);
906 mccq
->created
= true;
909 mutex_unlock(&adapter
->mbox_lock
);
913 int be_cmd_mccq_create(struct be_adapter
*adapter
,
914 struct be_queue_info
*mccq
,
915 struct be_queue_info
*cq
)
919 status
= be_cmd_mccq_ext_create(adapter
, mccq
, cq
);
920 if (status
&& !lancer_chip(adapter
)) {
921 dev_warn(&adapter
->pdev
->dev
, "Upgrade to F/W ver 2.102.235.0 "
922 "or newer to avoid conflicting priorities between NIC "
924 status
= be_cmd_mccq_org_create(adapter
, mccq
, cq
);
929 int be_cmd_txq_create(struct be_adapter
*adapter
,
930 struct be_queue_info
*txq
,
931 struct be_queue_info
*cq
)
933 struct be_mcc_wrb
*wrb
;
934 struct be_cmd_req_eth_tx_create
*req
;
935 struct be_dma_mem
*q_mem
= &txq
->dma_mem
;
939 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
942 wrb
= wrb_from_mbox(adapter
);
943 req
= embedded_payload(wrb
);
944 ctxt
= &req
->context
;
946 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
947 OPCODE_ETH_TX_CREATE
);
949 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
, OPCODE_ETH_TX_CREATE
,
952 if (lancer_chip(adapter
)) {
953 req
->hdr
.version
= 1;
954 AMAP_SET_BITS(struct amap_tx_context
, if_id
, ctxt
,
958 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
959 req
->ulp_num
= BE_ULP1_NUM
;
960 req
->type
= BE_ETH_TX_RING_TYPE_STANDARD
;
962 AMAP_SET_BITS(struct amap_tx_context
, tx_ring_size
, ctxt
,
963 be_encoded_q_len(txq
->len
));
964 AMAP_SET_BITS(struct amap_tx_context
, ctx_valid
, ctxt
, 1);
965 AMAP_SET_BITS(struct amap_tx_context
, cq_id_send
, ctxt
, cq
->id
);
967 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
969 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
971 status
= be_mbox_notify_wait(adapter
);
973 struct be_cmd_resp_eth_tx_create
*resp
= embedded_payload(wrb
);
974 txq
->id
= le16_to_cpu(resp
->cid
);
978 mutex_unlock(&adapter
->mbox_lock
);
984 int be_cmd_rxq_create(struct be_adapter
*adapter
,
985 struct be_queue_info
*rxq
, u16 cq_id
, u16 frag_size
,
986 u16 max_frame_size
, u32 if_id
, u32 rss
, u8
*rss_id
)
988 struct be_mcc_wrb
*wrb
;
989 struct be_cmd_req_eth_rx_create
*req
;
990 struct be_dma_mem
*q_mem
= &rxq
->dma_mem
;
993 spin_lock_bh(&adapter
->mcc_lock
);
995 wrb
= wrb_from_mccq(adapter
);
1000 req
= embedded_payload(wrb
);
1002 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1003 OPCODE_ETH_RX_CREATE
);
1005 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
, OPCODE_ETH_RX_CREATE
,
1008 req
->cq_id
= cpu_to_le16(cq_id
);
1009 req
->frag_size
= fls(frag_size
) - 1;
1011 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1012 req
->interface_id
= cpu_to_le32(if_id
);
1013 req
->max_frame_size
= cpu_to_le16(max_frame_size
);
1014 req
->rss_queue
= cpu_to_le32(rss
);
1016 status
= be_mcc_notify_wait(adapter
);
1018 struct be_cmd_resp_eth_rx_create
*resp
= embedded_payload(wrb
);
1019 rxq
->id
= le16_to_cpu(resp
->id
);
1020 rxq
->created
= true;
1021 *rss_id
= resp
->rss_id
;
1025 spin_unlock_bh(&adapter
->mcc_lock
);
1029 /* Generic destroyer function for all types of queues
1032 int be_cmd_q_destroy(struct be_adapter
*adapter
, struct be_queue_info
*q
,
1035 struct be_mcc_wrb
*wrb
;
1036 struct be_cmd_req_q_destroy
*req
;
1037 u8 subsys
= 0, opcode
= 0;
1040 if (adapter
->eeh_err
)
1043 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1046 wrb
= wrb_from_mbox(adapter
);
1047 req
= embedded_payload(wrb
);
1049 switch (queue_type
) {
1051 subsys
= CMD_SUBSYSTEM_COMMON
;
1052 opcode
= OPCODE_COMMON_EQ_DESTROY
;
1055 subsys
= CMD_SUBSYSTEM_COMMON
;
1056 opcode
= OPCODE_COMMON_CQ_DESTROY
;
1059 subsys
= CMD_SUBSYSTEM_ETH
;
1060 opcode
= OPCODE_ETH_TX_DESTROY
;
1063 subsys
= CMD_SUBSYSTEM_ETH
;
1064 opcode
= OPCODE_ETH_RX_DESTROY
;
1067 subsys
= CMD_SUBSYSTEM_COMMON
;
1068 opcode
= OPCODE_COMMON_MCC_DESTROY
;
1074 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0, opcode
);
1076 be_cmd_hdr_prepare(&req
->hdr
, subsys
, opcode
, sizeof(*req
));
1077 req
->id
= cpu_to_le16(q
->id
);
1079 status
= be_mbox_notify_wait(adapter
);
1083 mutex_unlock(&adapter
->mbox_lock
);
1088 int be_cmd_rxq_destroy(struct be_adapter
*adapter
, struct be_queue_info
*q
)
1090 struct be_mcc_wrb
*wrb
;
1091 struct be_cmd_req_q_destroy
*req
;
1094 spin_lock_bh(&adapter
->mcc_lock
);
1096 wrb
= wrb_from_mccq(adapter
);
1101 req
= embedded_payload(wrb
);
1103 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0, OPCODE_ETH_RX_DESTROY
);
1104 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
, OPCODE_ETH_RX_DESTROY
,
1106 req
->id
= cpu_to_le16(q
->id
);
1108 status
= be_mcc_notify_wait(adapter
);
1113 spin_unlock_bh(&adapter
->mcc_lock
);
1117 /* Create an rx filtering policy configuration on an i/f
1120 int be_cmd_if_create(struct be_adapter
*adapter
, u32 cap_flags
, u32 en_flags
,
1121 u8
*mac
, u32
*if_handle
, u32
*pmac_id
, u32 domain
)
1123 struct be_mcc_wrb
*wrb
;
1124 struct be_cmd_req_if_create
*req
;
1127 spin_lock_bh(&adapter
->mcc_lock
);
1129 wrb
= wrb_from_mccq(adapter
);
1134 req
= embedded_payload(wrb
);
1136 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1137 OPCODE_COMMON_NTWK_INTERFACE_CREATE
);
1139 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1140 OPCODE_COMMON_NTWK_INTERFACE_CREATE
, sizeof(*req
));
1142 req
->hdr
.domain
= domain
;
1143 req
->capability_flags
= cpu_to_le32(cap_flags
);
1144 req
->enable_flags
= cpu_to_le32(en_flags
);
1146 memcpy(req
->mac_addr
, mac
, ETH_ALEN
);
1148 req
->pmac_invalid
= true;
1150 status
= be_mcc_notify_wait(adapter
);
1152 struct be_cmd_resp_if_create
*resp
= embedded_payload(wrb
);
1153 *if_handle
= le32_to_cpu(resp
->interface_id
);
1155 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
1159 spin_unlock_bh(&adapter
->mcc_lock
);
1164 int be_cmd_if_destroy(struct be_adapter
*adapter
, u32 interface_id
, u32 domain
)
1166 struct be_mcc_wrb
*wrb
;
1167 struct be_cmd_req_if_destroy
*req
;
1170 if (adapter
->eeh_err
)
1176 spin_lock_bh(&adapter
->mcc_lock
);
1178 wrb
= wrb_from_mccq(adapter
);
1183 req
= embedded_payload(wrb
);
1185 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1186 OPCODE_COMMON_NTWK_INTERFACE_DESTROY
);
1188 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1189 OPCODE_COMMON_NTWK_INTERFACE_DESTROY
, sizeof(*req
));
1191 req
->hdr
.domain
= domain
;
1192 req
->interface_id
= cpu_to_le32(interface_id
);
1194 status
= be_mcc_notify_wait(adapter
);
1196 spin_unlock_bh(&adapter
->mcc_lock
);
1200 /* Get stats is a non embedded command: the request is not embedded inside
1201 * WRB but is a separate dma memory block
1202 * Uses asynchronous MCC
1204 int be_cmd_get_stats(struct be_adapter
*adapter
, struct be_dma_mem
*nonemb_cmd
)
1206 struct be_mcc_wrb
*wrb
;
1207 struct be_cmd_req_hdr
*hdr
;
1211 if (MODULO(adapter
->work_counter
, be_get_temp_freq
) == 0)
1212 be_cmd_get_die_temperature(adapter
);
1214 spin_lock_bh(&adapter
->mcc_lock
);
1216 wrb
= wrb_from_mccq(adapter
);
1221 hdr
= nonemb_cmd
->va
;
1222 sge
= nonembedded_sgl(wrb
);
1224 be_wrb_hdr_prepare(wrb
, nonemb_cmd
->size
, false, 1,
1225 OPCODE_ETH_GET_STATISTICS
);
1227 be_cmd_hdr_prepare(hdr
, CMD_SUBSYSTEM_ETH
,
1228 OPCODE_ETH_GET_STATISTICS
, nonemb_cmd
->size
);
1230 if (adapter
->generation
== BE_GEN3
)
1233 wrb
->tag1
= CMD_SUBSYSTEM_ETH
;
1234 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
->dma
));
1235 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
->dma
& 0xFFFFFFFF);
1236 sge
->len
= cpu_to_le32(nonemb_cmd
->size
);
1238 be_mcc_notify(adapter
);
1239 adapter
->stats_cmd_sent
= true;
1242 spin_unlock_bh(&adapter
->mcc_lock
);
1247 int lancer_cmd_get_pport_stats(struct be_adapter
*adapter
,
1248 struct be_dma_mem
*nonemb_cmd
)
1251 struct be_mcc_wrb
*wrb
;
1252 struct lancer_cmd_req_pport_stats
*req
;
1256 spin_lock_bh(&adapter
->mcc_lock
);
1258 wrb
= wrb_from_mccq(adapter
);
1263 req
= nonemb_cmd
->va
;
1264 sge
= nonembedded_sgl(wrb
);
1266 be_wrb_hdr_prepare(wrb
, nonemb_cmd
->size
, false, 1,
1267 OPCODE_ETH_GET_PPORT_STATS
);
1269 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1270 OPCODE_ETH_GET_PPORT_STATS
, nonemb_cmd
->size
);
1273 req
->cmd_params
.params
.pport_num
= cpu_to_le16(adapter
->port_num
);
1274 req
->cmd_params
.params
.reset_stats
= 0;
1276 wrb
->tag1
= CMD_SUBSYSTEM_ETH
;
1277 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
->dma
));
1278 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
->dma
& 0xFFFFFFFF);
1279 sge
->len
= cpu_to_le32(nonemb_cmd
->size
);
1281 be_mcc_notify(adapter
);
1282 adapter
->stats_cmd_sent
= true;
1285 spin_unlock_bh(&adapter
->mcc_lock
);
1289 /* Uses synchronous mcc */
1290 int be_cmd_link_status_query(struct be_adapter
*adapter
, u8
*mac_speed
,
1291 u16
*link_speed
, u32 dom
)
1293 struct be_mcc_wrb
*wrb
;
1294 struct be_cmd_req_link_status
*req
;
1297 spin_lock_bh(&adapter
->mcc_lock
);
1299 wrb
= wrb_from_mccq(adapter
);
1304 req
= embedded_payload(wrb
);
1306 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1307 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY
);
1309 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1310 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY
, sizeof(*req
));
1312 status
= be_mcc_notify_wait(adapter
);
1314 struct be_cmd_resp_link_status
*resp
= embedded_payload(wrb
);
1315 if (resp
->mac_speed
!= PHY_LINK_SPEED_ZERO
) {
1316 *link_speed
= le16_to_cpu(resp
->link_speed
);
1318 *mac_speed
= resp
->mac_speed
;
1323 spin_unlock_bh(&adapter
->mcc_lock
);
1327 /* Uses synchronous mcc */
1328 int be_cmd_get_die_temperature(struct be_adapter
*adapter
)
1330 struct be_mcc_wrb
*wrb
;
1331 struct be_cmd_req_get_cntl_addnl_attribs
*req
;
1335 spin_lock_bh(&adapter
->mcc_lock
);
1337 mccq_index
= adapter
->mcc_obj
.q
.head
;
1339 wrb
= wrb_from_mccq(adapter
);
1344 req
= embedded_payload(wrb
);
1346 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1347 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
);
1349 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1350 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
, sizeof(*req
));
1352 wrb
->tag1
= mccq_index
;
1354 be_mcc_notify(adapter
);
1357 spin_unlock_bh(&adapter
->mcc_lock
);
1361 /* Uses synchronous mcc */
1362 int be_cmd_get_reg_len(struct be_adapter
*adapter
, u32
*log_size
)
1364 struct be_mcc_wrb
*wrb
;
1365 struct be_cmd_req_get_fat
*req
;
1368 spin_lock_bh(&adapter
->mcc_lock
);
1370 wrb
= wrb_from_mccq(adapter
);
1375 req
= embedded_payload(wrb
);
1377 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1378 OPCODE_COMMON_MANAGE_FAT
);
1380 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1381 OPCODE_COMMON_MANAGE_FAT
, sizeof(*req
));
1382 req
->fat_operation
= cpu_to_le32(QUERY_FAT
);
1383 status
= be_mcc_notify_wait(adapter
);
1385 struct be_cmd_resp_get_fat
*resp
= embedded_payload(wrb
);
1386 if (log_size
&& resp
->log_size
)
1387 *log_size
= le32_to_cpu(resp
->log_size
) -
1391 spin_unlock_bh(&adapter
->mcc_lock
);
1395 void be_cmd_get_regs(struct be_adapter
*adapter
, u32 buf_len
, void *buf
)
1397 struct be_dma_mem get_fat_cmd
;
1398 struct be_mcc_wrb
*wrb
;
1399 struct be_cmd_req_get_fat
*req
;
1401 u32 offset
= 0, total_size
, buf_size
,
1402 log_offset
= sizeof(u32
), payload_len
;
1408 total_size
= buf_len
;
1410 get_fat_cmd
.size
= sizeof(struct be_cmd_req_get_fat
) + 60*1024;
1411 get_fat_cmd
.va
= pci_alloc_consistent(adapter
->pdev
,
1414 if (!get_fat_cmd
.va
) {
1416 dev_err(&adapter
->pdev
->dev
,
1417 "Memory allocation failure while retrieving FAT data\n");
1421 spin_lock_bh(&adapter
->mcc_lock
);
1423 while (total_size
) {
1424 buf_size
= min(total_size
, (u32
)60*1024);
1425 total_size
-= buf_size
;
1427 wrb
= wrb_from_mccq(adapter
);
1432 req
= get_fat_cmd
.va
;
1433 sge
= nonembedded_sgl(wrb
);
1435 payload_len
= sizeof(struct be_cmd_req_get_fat
) + buf_size
;
1436 be_wrb_hdr_prepare(wrb
, payload_len
, false, 1,
1437 OPCODE_COMMON_MANAGE_FAT
);
1439 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1440 OPCODE_COMMON_MANAGE_FAT
, payload_len
);
1442 sge
->pa_hi
= cpu_to_le32(upper_32_bits(get_fat_cmd
.dma
));
1443 sge
->pa_lo
= cpu_to_le32(get_fat_cmd
.dma
& 0xFFFFFFFF);
1444 sge
->len
= cpu_to_le32(get_fat_cmd
.size
);
1446 req
->fat_operation
= cpu_to_le32(RETRIEVE_FAT
);
1447 req
->read_log_offset
= cpu_to_le32(log_offset
);
1448 req
->read_log_length
= cpu_to_le32(buf_size
);
1449 req
->data_buffer_size
= cpu_to_le32(buf_size
);
1451 status
= be_mcc_notify_wait(adapter
);
1453 struct be_cmd_resp_get_fat
*resp
= get_fat_cmd
.va
;
1454 memcpy(buf
+ offset
,
1456 le32_to_cpu(resp
->read_log_length
));
1458 dev_err(&adapter
->pdev
->dev
, "FAT Table Retrieve error\n");
1462 log_offset
+= buf_size
;
1465 pci_free_consistent(adapter
->pdev
, get_fat_cmd
.size
,
1468 spin_unlock_bh(&adapter
->mcc_lock
);
1471 /* Uses synchronous mcc */
1472 int be_cmd_get_fw_ver(struct be_adapter
*adapter
, char *fw_ver
,
1475 struct be_mcc_wrb
*wrb
;
1476 struct be_cmd_req_get_fw_version
*req
;
1479 spin_lock_bh(&adapter
->mcc_lock
);
1481 wrb
= wrb_from_mccq(adapter
);
1487 req
= embedded_payload(wrb
);
1488 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1489 OPCODE_COMMON_GET_FW_VERSION
);
1490 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1491 OPCODE_COMMON_GET_FW_VERSION
, sizeof(*req
));
1493 status
= be_mcc_notify_wait(adapter
);
1495 struct be_cmd_resp_get_fw_version
*resp
= embedded_payload(wrb
);
1496 strcpy(fw_ver
, resp
->firmware_version_string
);
1498 strcpy(fw_on_flash
, resp
->fw_on_flash_version_string
);
1501 spin_unlock_bh(&adapter
->mcc_lock
);
1505 /* set the EQ delay interval of an EQ to specified value
1508 int be_cmd_modify_eqd(struct be_adapter
*adapter
, u32 eq_id
, u32 eqd
)
1510 struct be_mcc_wrb
*wrb
;
1511 struct be_cmd_req_modify_eq_delay
*req
;
1514 spin_lock_bh(&adapter
->mcc_lock
);
1516 wrb
= wrb_from_mccq(adapter
);
1521 req
= embedded_payload(wrb
);
1523 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1524 OPCODE_COMMON_MODIFY_EQ_DELAY
);
1526 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1527 OPCODE_COMMON_MODIFY_EQ_DELAY
, sizeof(*req
));
1529 req
->num_eq
= cpu_to_le32(1);
1530 req
->delay
[0].eq_id
= cpu_to_le32(eq_id
);
1531 req
->delay
[0].phase
= 0;
1532 req
->delay
[0].delay_multiplier
= cpu_to_le32(eqd
);
1534 be_mcc_notify(adapter
);
1537 spin_unlock_bh(&adapter
->mcc_lock
);
1541 /* Uses sycnhronous mcc */
1542 int be_cmd_vlan_config(struct be_adapter
*adapter
, u32 if_id
, u16
*vtag_array
,
1543 u32 num
, bool untagged
, bool promiscuous
)
1545 struct be_mcc_wrb
*wrb
;
1546 struct be_cmd_req_vlan_config
*req
;
1549 spin_lock_bh(&adapter
->mcc_lock
);
1551 wrb
= wrb_from_mccq(adapter
);
1556 req
= embedded_payload(wrb
);
1558 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1559 OPCODE_COMMON_NTWK_VLAN_CONFIG
);
1561 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1562 OPCODE_COMMON_NTWK_VLAN_CONFIG
, sizeof(*req
));
1564 req
->interface_id
= if_id
;
1565 req
->promiscuous
= promiscuous
;
1566 req
->untagged
= untagged
;
1567 req
->num_vlan
= num
;
1569 memcpy(req
->normal_vlan
, vtag_array
,
1570 req
->num_vlan
* sizeof(vtag_array
[0]));
1573 status
= be_mcc_notify_wait(adapter
);
1576 spin_unlock_bh(&adapter
->mcc_lock
);
1580 int be_cmd_rx_filter(struct be_adapter
*adapter
, u32 flags
, u32 value
)
1582 struct be_mcc_wrb
*wrb
;
1583 struct be_dma_mem
*mem
= &adapter
->rx_filter
;
1584 struct be_cmd_req_rx_filter
*req
= mem
->va
;
1588 spin_lock_bh(&adapter
->mcc_lock
);
1590 wrb
= wrb_from_mccq(adapter
);
1595 sge
= nonembedded_sgl(wrb
);
1596 sge
->pa_hi
= cpu_to_le32(upper_32_bits(mem
->dma
));
1597 sge
->pa_lo
= cpu_to_le32(mem
->dma
& 0xFFFFFFFF);
1598 sge
->len
= cpu_to_le32(mem
->size
);
1599 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
1600 OPCODE_COMMON_NTWK_RX_FILTER
);
1602 memset(req
, 0, sizeof(*req
));
1603 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1604 OPCODE_COMMON_NTWK_RX_FILTER
, sizeof(*req
));
1606 req
->if_id
= cpu_to_le32(adapter
->if_handle
);
1607 if (flags
& IFF_PROMISC
) {
1608 req
->if_flags_mask
= cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS
|
1609 BE_IF_FLAGS_VLAN_PROMISCUOUS
);
1611 req
->if_flags
= cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS
|
1612 BE_IF_FLAGS_VLAN_PROMISCUOUS
);
1613 } else if (flags
& IFF_ALLMULTI
) {
1614 req
->if_flags_mask
= req
->if_flags
=
1615 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS
);
1617 struct netdev_hw_addr
*ha
;
1620 req
->if_flags_mask
= req
->if_flags
=
1621 cpu_to_le32(BE_IF_FLAGS_MULTICAST
);
1622 req
->mcast_num
= cpu_to_le16(netdev_mc_count(adapter
->netdev
));
1623 netdev_for_each_mc_addr(ha
, adapter
->netdev
)
1624 memcpy(req
->mcast_mac
[i
++].byte
, ha
->addr
, ETH_ALEN
);
1627 status
= be_mcc_notify_wait(adapter
);
1629 spin_unlock_bh(&adapter
->mcc_lock
);
1633 /* Uses synchrounous mcc */
1634 int be_cmd_set_flow_control(struct be_adapter
*adapter
, u32 tx_fc
, u32 rx_fc
)
1636 struct be_mcc_wrb
*wrb
;
1637 struct be_cmd_req_set_flow_control
*req
;
1640 spin_lock_bh(&adapter
->mcc_lock
);
1642 wrb
= wrb_from_mccq(adapter
);
1647 req
= embedded_payload(wrb
);
1649 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1650 OPCODE_COMMON_SET_FLOW_CONTROL
);
1652 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1653 OPCODE_COMMON_SET_FLOW_CONTROL
, sizeof(*req
));
1655 req
->tx_flow_control
= cpu_to_le16((u16
)tx_fc
);
1656 req
->rx_flow_control
= cpu_to_le16((u16
)rx_fc
);
1658 status
= be_mcc_notify_wait(adapter
);
1661 spin_unlock_bh(&adapter
->mcc_lock
);
1666 int be_cmd_get_flow_control(struct be_adapter
*adapter
, u32
*tx_fc
, u32
*rx_fc
)
1668 struct be_mcc_wrb
*wrb
;
1669 struct be_cmd_req_get_flow_control
*req
;
1672 spin_lock_bh(&adapter
->mcc_lock
);
1674 wrb
= wrb_from_mccq(adapter
);
1679 req
= embedded_payload(wrb
);
1681 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1682 OPCODE_COMMON_GET_FLOW_CONTROL
);
1684 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1685 OPCODE_COMMON_GET_FLOW_CONTROL
, sizeof(*req
));
1687 status
= be_mcc_notify_wait(adapter
);
1689 struct be_cmd_resp_get_flow_control
*resp
=
1690 embedded_payload(wrb
);
1691 *tx_fc
= le16_to_cpu(resp
->tx_flow_control
);
1692 *rx_fc
= le16_to_cpu(resp
->rx_flow_control
);
1696 spin_unlock_bh(&adapter
->mcc_lock
);
1701 int be_cmd_query_fw_cfg(struct be_adapter
*adapter
, u32
*port_num
,
1702 u32
*mode
, u32
*caps
)
1704 struct be_mcc_wrb
*wrb
;
1705 struct be_cmd_req_query_fw_cfg
*req
;
1708 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1711 wrb
= wrb_from_mbox(adapter
);
1712 req
= embedded_payload(wrb
);
1714 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1715 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG
);
1717 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1718 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG
, sizeof(*req
));
1720 status
= be_mbox_notify_wait(adapter
);
1722 struct be_cmd_resp_query_fw_cfg
*resp
= embedded_payload(wrb
);
1723 *port_num
= le32_to_cpu(resp
->phys_port
);
1724 *mode
= le32_to_cpu(resp
->function_mode
);
1725 *caps
= le32_to_cpu(resp
->function_caps
);
1728 mutex_unlock(&adapter
->mbox_lock
);
1733 int be_cmd_reset_function(struct be_adapter
*adapter
)
1735 struct be_mcc_wrb
*wrb
;
1736 struct be_cmd_req_hdr
*req
;
1739 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1742 wrb
= wrb_from_mbox(adapter
);
1743 req
= embedded_payload(wrb
);
1745 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1746 OPCODE_COMMON_FUNCTION_RESET
);
1748 be_cmd_hdr_prepare(req
, CMD_SUBSYSTEM_COMMON
,
1749 OPCODE_COMMON_FUNCTION_RESET
, sizeof(*req
));
1751 status
= be_mbox_notify_wait(adapter
);
1753 mutex_unlock(&adapter
->mbox_lock
);
1757 int be_cmd_rss_config(struct be_adapter
*adapter
, u8
*rsstable
, u16 table_size
)
1759 struct be_mcc_wrb
*wrb
;
1760 struct be_cmd_req_rss_config
*req
;
1761 u32 myhash
[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
1762 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
1765 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1768 wrb
= wrb_from_mbox(adapter
);
1769 req
= embedded_payload(wrb
);
1771 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1772 OPCODE_ETH_RSS_CONFIG
);
1774 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1775 OPCODE_ETH_RSS_CONFIG
, sizeof(*req
));
1777 req
->if_id
= cpu_to_le32(adapter
->if_handle
);
1778 req
->enable_rss
= cpu_to_le16(RSS_ENABLE_TCP_IPV4
| RSS_ENABLE_IPV4
);
1779 req
->cpu_table_size_log2
= cpu_to_le16(fls(table_size
) - 1);
1780 memcpy(req
->cpu_table
, rsstable
, table_size
);
1781 memcpy(req
->hash
, myhash
, sizeof(myhash
));
1782 be_dws_cpu_to_le(req
->hash
, sizeof(req
->hash
));
1784 status
= be_mbox_notify_wait(adapter
);
1786 mutex_unlock(&adapter
->mbox_lock
);
1791 int be_cmd_set_beacon_state(struct be_adapter
*adapter
, u8 port_num
,
1792 u8 bcn
, u8 sts
, u8 state
)
1794 struct be_mcc_wrb
*wrb
;
1795 struct be_cmd_req_enable_disable_beacon
*req
;
1798 spin_lock_bh(&adapter
->mcc_lock
);
1800 wrb
= wrb_from_mccq(adapter
);
1805 req
= embedded_payload(wrb
);
1807 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1808 OPCODE_COMMON_ENABLE_DISABLE_BEACON
);
1810 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1811 OPCODE_COMMON_ENABLE_DISABLE_BEACON
, sizeof(*req
));
1813 req
->port_num
= port_num
;
1814 req
->beacon_state
= state
;
1815 req
->beacon_duration
= bcn
;
1816 req
->status_duration
= sts
;
1818 status
= be_mcc_notify_wait(adapter
);
1821 spin_unlock_bh(&adapter
->mcc_lock
);
1826 int be_cmd_get_beacon_state(struct be_adapter
*adapter
, u8 port_num
, u32
*state
)
1828 struct be_mcc_wrb
*wrb
;
1829 struct be_cmd_req_get_beacon_state
*req
;
1832 spin_lock_bh(&adapter
->mcc_lock
);
1834 wrb
= wrb_from_mccq(adapter
);
1839 req
= embedded_payload(wrb
);
1841 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1842 OPCODE_COMMON_GET_BEACON_STATE
);
1844 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1845 OPCODE_COMMON_GET_BEACON_STATE
, sizeof(*req
));
1847 req
->port_num
= port_num
;
1849 status
= be_mcc_notify_wait(adapter
);
1851 struct be_cmd_resp_get_beacon_state
*resp
=
1852 embedded_payload(wrb
);
1853 *state
= resp
->beacon_state
;
1857 spin_unlock_bh(&adapter
->mcc_lock
);
1861 int lancer_cmd_write_object(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
,
1862 u32 data_size
, u32 data_offset
, const char *obj_name
,
1863 u32
*data_written
, u8
*addn_status
)
1865 struct be_mcc_wrb
*wrb
;
1866 struct lancer_cmd_req_write_object
*req
;
1867 struct lancer_cmd_resp_write_object
*resp
;
1871 spin_lock_bh(&adapter
->mcc_lock
);
1872 adapter
->flash_status
= 0;
1874 wrb
= wrb_from_mccq(adapter
);
1880 req
= embedded_payload(wrb
);
1882 be_wrb_hdr_prepare(wrb
, sizeof(struct lancer_cmd_req_write_object
),
1883 true, 1, OPCODE_COMMON_WRITE_OBJECT
);
1884 wrb
->tag1
= CMD_SUBSYSTEM_COMMON
;
1886 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1887 OPCODE_COMMON_WRITE_OBJECT
,
1888 sizeof(struct lancer_cmd_req_write_object
));
1890 ctxt
= &req
->context
;
1891 AMAP_SET_BITS(struct amap_lancer_write_obj_context
,
1892 write_length
, ctxt
, data_size
);
1895 AMAP_SET_BITS(struct amap_lancer_write_obj_context
,
1898 AMAP_SET_BITS(struct amap_lancer_write_obj_context
,
1901 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
1902 req
->write_offset
= cpu_to_le32(data_offset
);
1903 strcpy(req
->object_name
, obj_name
);
1904 req
->descriptor_count
= cpu_to_le32(1);
1905 req
->buf_len
= cpu_to_le32(data_size
);
1906 req
->addr_low
= cpu_to_le32((cmd
->dma
+
1907 sizeof(struct lancer_cmd_req_write_object
))
1909 req
->addr_high
= cpu_to_le32(upper_32_bits(cmd
->dma
+
1910 sizeof(struct lancer_cmd_req_write_object
)));
1912 be_mcc_notify(adapter
);
1913 spin_unlock_bh(&adapter
->mcc_lock
);
1915 if (!wait_for_completion_timeout(&adapter
->flash_compl
,
1916 msecs_to_jiffies(12000)))
1919 status
= adapter
->flash_status
;
1921 resp
= embedded_payload(wrb
);
1923 *data_written
= le32_to_cpu(resp
->actual_write_len
);
1925 *addn_status
= resp
->additional_status
;
1926 status
= resp
->status
;
1932 spin_unlock_bh(&adapter
->mcc_lock
);
1936 int be_cmd_write_flashrom(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
,
1937 u32 flash_type
, u32 flash_opcode
, u32 buf_size
)
1939 struct be_mcc_wrb
*wrb
;
1940 struct be_cmd_write_flashrom
*req
;
1944 spin_lock_bh(&adapter
->mcc_lock
);
1945 adapter
->flash_status
= 0;
1947 wrb
= wrb_from_mccq(adapter
);
1953 sge
= nonembedded_sgl(wrb
);
1955 be_wrb_hdr_prepare(wrb
, cmd
->size
, false, 1,
1956 OPCODE_COMMON_WRITE_FLASHROM
);
1957 wrb
->tag1
= CMD_SUBSYSTEM_COMMON
;
1959 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1960 OPCODE_COMMON_WRITE_FLASHROM
, cmd
->size
);
1961 sge
->pa_hi
= cpu_to_le32(upper_32_bits(cmd
->dma
));
1962 sge
->pa_lo
= cpu_to_le32(cmd
->dma
& 0xFFFFFFFF);
1963 sge
->len
= cpu_to_le32(cmd
->size
);
1965 req
->params
.op_type
= cpu_to_le32(flash_type
);
1966 req
->params
.op_code
= cpu_to_le32(flash_opcode
);
1967 req
->params
.data_buf_size
= cpu_to_le32(buf_size
);
1969 be_mcc_notify(adapter
);
1970 spin_unlock_bh(&adapter
->mcc_lock
);
1972 if (!wait_for_completion_timeout(&adapter
->flash_compl
,
1973 msecs_to_jiffies(40000)))
1976 status
= adapter
->flash_status
;
1981 spin_unlock_bh(&adapter
->mcc_lock
);
1985 int be_cmd_get_flash_crc(struct be_adapter
*adapter
, u8
*flashed_crc
,
1988 struct be_mcc_wrb
*wrb
;
1989 struct be_cmd_write_flashrom
*req
;
1992 spin_lock_bh(&adapter
->mcc_lock
);
1994 wrb
= wrb_from_mccq(adapter
);
1999 req
= embedded_payload(wrb
);
2001 be_wrb_hdr_prepare(wrb
, sizeof(*req
)+4, true, 0,
2002 OPCODE_COMMON_READ_FLASHROM
);
2004 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2005 OPCODE_COMMON_READ_FLASHROM
, sizeof(*req
)+4);
2007 req
->params
.op_type
= cpu_to_le32(IMG_TYPE_REDBOOT
);
2008 req
->params
.op_code
= cpu_to_le32(FLASHROM_OPER_REPORT
);
2009 req
->params
.offset
= cpu_to_le32(offset
);
2010 req
->params
.data_buf_size
= cpu_to_le32(0x4);
2012 status
= be_mcc_notify_wait(adapter
);
2014 memcpy(flashed_crc
, req
->params
.data_buf
, 4);
2017 spin_unlock_bh(&adapter
->mcc_lock
);
2021 int be_cmd_enable_magic_wol(struct be_adapter
*adapter
, u8
*mac
,
2022 struct be_dma_mem
*nonemb_cmd
)
2024 struct be_mcc_wrb
*wrb
;
2025 struct be_cmd_req_acpi_wol_magic_config
*req
;
2029 spin_lock_bh(&adapter
->mcc_lock
);
2031 wrb
= wrb_from_mccq(adapter
);
2036 req
= nonemb_cmd
->va
;
2037 sge
= nonembedded_sgl(wrb
);
2039 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
2040 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
);
2042 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
2043 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
, sizeof(*req
));
2044 memcpy(req
->magic_mac
, mac
, ETH_ALEN
);
2046 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
->dma
));
2047 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
->dma
& 0xFFFFFFFF);
2048 sge
->len
= cpu_to_le32(nonemb_cmd
->size
);
2050 status
= be_mcc_notify_wait(adapter
);
2053 spin_unlock_bh(&adapter
->mcc_lock
);
2057 int be_cmd_set_loopback(struct be_adapter
*adapter
, u8 port_num
,
2058 u8 loopback_type
, u8 enable
)
2060 struct be_mcc_wrb
*wrb
;
2061 struct be_cmd_req_set_lmode
*req
;
2064 spin_lock_bh(&adapter
->mcc_lock
);
2066 wrb
= wrb_from_mccq(adapter
);
2072 req
= embedded_payload(wrb
);
2074 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
2075 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE
);
2077 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
2078 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE
,
2081 req
->src_port
= port_num
;
2082 req
->dest_port
= port_num
;
2083 req
->loopback_type
= loopback_type
;
2084 req
->loopback_state
= enable
;
2086 status
= be_mcc_notify_wait(adapter
);
2088 spin_unlock_bh(&adapter
->mcc_lock
);
2092 int be_cmd_loopback_test(struct be_adapter
*adapter
, u32 port_num
,
2093 u32 loopback_type
, u32 pkt_size
, u32 num_pkts
, u64 pattern
)
2095 struct be_mcc_wrb
*wrb
;
2096 struct be_cmd_req_loopback_test
*req
;
2099 spin_lock_bh(&adapter
->mcc_lock
);
2101 wrb
= wrb_from_mccq(adapter
);
2107 req
= embedded_payload(wrb
);
2109 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
2110 OPCODE_LOWLEVEL_LOOPBACK_TEST
);
2112 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
2113 OPCODE_LOWLEVEL_LOOPBACK_TEST
, sizeof(*req
));
2114 req
->hdr
.timeout
= cpu_to_le32(4);
2116 req
->pattern
= cpu_to_le64(pattern
);
2117 req
->src_port
= cpu_to_le32(port_num
);
2118 req
->dest_port
= cpu_to_le32(port_num
);
2119 req
->pkt_size
= cpu_to_le32(pkt_size
);
2120 req
->num_pkts
= cpu_to_le32(num_pkts
);
2121 req
->loopback_type
= cpu_to_le32(loopback_type
);
2123 status
= be_mcc_notify_wait(adapter
);
2125 struct be_cmd_resp_loopback_test
*resp
= embedded_payload(wrb
);
2126 status
= le32_to_cpu(resp
->status
);
2130 spin_unlock_bh(&adapter
->mcc_lock
);
2134 int be_cmd_ddr_dma_test(struct be_adapter
*adapter
, u64 pattern
,
2135 u32 byte_cnt
, struct be_dma_mem
*cmd
)
2137 struct be_mcc_wrb
*wrb
;
2138 struct be_cmd_req_ddrdma_test
*req
;
2143 spin_lock_bh(&adapter
->mcc_lock
);
2145 wrb
= wrb_from_mccq(adapter
);
2151 sge
= nonembedded_sgl(wrb
);
2152 be_wrb_hdr_prepare(wrb
, cmd
->size
, false, 1,
2153 OPCODE_LOWLEVEL_HOST_DDR_DMA
);
2154 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
2155 OPCODE_LOWLEVEL_HOST_DDR_DMA
, cmd
->size
);
2157 sge
->pa_hi
= cpu_to_le32(upper_32_bits(cmd
->dma
));
2158 sge
->pa_lo
= cpu_to_le32(cmd
->dma
& 0xFFFFFFFF);
2159 sge
->len
= cpu_to_le32(cmd
->size
);
2161 req
->pattern
= cpu_to_le64(pattern
);
2162 req
->byte_count
= cpu_to_le32(byte_cnt
);
2163 for (i
= 0; i
< byte_cnt
; i
++) {
2164 req
->snd_buff
[i
] = (u8
)(pattern
>> (j
*8));
2170 status
= be_mcc_notify_wait(adapter
);
2173 struct be_cmd_resp_ddrdma_test
*resp
;
2175 if ((memcmp(resp
->rcv_buff
, req
->snd_buff
, byte_cnt
) != 0) ||
2182 spin_unlock_bh(&adapter
->mcc_lock
);
2186 int be_cmd_get_seeprom_data(struct be_adapter
*adapter
,
2187 struct be_dma_mem
*nonemb_cmd
)
2189 struct be_mcc_wrb
*wrb
;
2190 struct be_cmd_req_seeprom_read
*req
;
2194 spin_lock_bh(&adapter
->mcc_lock
);
2196 wrb
= wrb_from_mccq(adapter
);
2201 req
= nonemb_cmd
->va
;
2202 sge
= nonembedded_sgl(wrb
);
2204 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
2205 OPCODE_COMMON_SEEPROM_READ
);
2207 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2208 OPCODE_COMMON_SEEPROM_READ
, sizeof(*req
));
2210 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
->dma
));
2211 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
->dma
& 0xFFFFFFFF);
2212 sge
->len
= cpu_to_le32(nonemb_cmd
->size
);
2214 status
= be_mcc_notify_wait(adapter
);
2217 spin_unlock_bh(&adapter
->mcc_lock
);
2221 int be_cmd_get_phy_info(struct be_adapter
*adapter
,
2222 struct be_phy_info
*phy_info
)
2224 struct be_mcc_wrb
*wrb
;
2225 struct be_cmd_req_get_phy_info
*req
;
2227 struct be_dma_mem cmd
;
2230 spin_lock_bh(&adapter
->mcc_lock
);
2232 wrb
= wrb_from_mccq(adapter
);
2237 cmd
.size
= sizeof(struct be_cmd_req_get_phy_info
);
2238 cmd
.va
= pci_alloc_consistent(adapter
->pdev
, cmd
.size
,
2241 dev_err(&adapter
->pdev
->dev
, "Memory alloc failure\n");
2247 sge
= nonembedded_sgl(wrb
);
2249 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
2250 OPCODE_COMMON_GET_PHY_DETAILS
);
2252 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2253 OPCODE_COMMON_GET_PHY_DETAILS
,
2256 sge
->pa_hi
= cpu_to_le32(upper_32_bits(cmd
.dma
));
2257 sge
->pa_lo
= cpu_to_le32(cmd
.dma
& 0xFFFFFFFF);
2258 sge
->len
= cpu_to_le32(cmd
.size
);
2260 status
= be_mcc_notify_wait(adapter
);
2262 struct be_phy_info
*resp_phy_info
=
2263 cmd
.va
+ sizeof(struct be_cmd_req_hdr
);
2264 phy_info
->phy_type
= le16_to_cpu(resp_phy_info
->phy_type
);
2265 phy_info
->interface_type
=
2266 le16_to_cpu(resp_phy_info
->interface_type
);
2268 pci_free_consistent(adapter
->pdev
, cmd
.size
,
2271 spin_unlock_bh(&adapter
->mcc_lock
);
2275 int be_cmd_set_qos(struct be_adapter
*adapter
, u32 bps
, u32 domain
)
2277 struct be_mcc_wrb
*wrb
;
2278 struct be_cmd_req_set_qos
*req
;
2281 spin_lock_bh(&adapter
->mcc_lock
);
2283 wrb
= wrb_from_mccq(adapter
);
2289 req
= embedded_payload(wrb
);
2291 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
2292 OPCODE_COMMON_SET_QOS
);
2294 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2295 OPCODE_COMMON_SET_QOS
, sizeof(*req
));
2297 req
->hdr
.domain
= domain
;
2298 req
->valid_bits
= cpu_to_le32(BE_QOS_BITS_NIC
);
2299 req
->max_bps_nic
= cpu_to_le32(bps
);
2301 status
= be_mcc_notify_wait(adapter
);
2304 spin_unlock_bh(&adapter
->mcc_lock
);
2308 int be_cmd_get_cntl_attributes(struct be_adapter
*adapter
)
2310 struct be_mcc_wrb
*wrb
;
2311 struct be_cmd_req_cntl_attribs
*req
;
2312 struct be_cmd_resp_cntl_attribs
*resp
;
2315 int payload_len
= max(sizeof(*req
), sizeof(*resp
));
2316 struct mgmt_controller_attrib
*attribs
;
2317 struct be_dma_mem attribs_cmd
;
2319 memset(&attribs_cmd
, 0, sizeof(struct be_dma_mem
));
2320 attribs_cmd
.size
= sizeof(struct be_cmd_resp_cntl_attribs
);
2321 attribs_cmd
.va
= pci_alloc_consistent(adapter
->pdev
, attribs_cmd
.size
,
2323 if (!attribs_cmd
.va
) {
2324 dev_err(&adapter
->pdev
->dev
,
2325 "Memory allocation failure\n");
2329 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
2332 wrb
= wrb_from_mbox(adapter
);
2337 req
= attribs_cmd
.va
;
2338 sge
= nonembedded_sgl(wrb
);
2340 be_wrb_hdr_prepare(wrb
, payload_len
, false, 1,
2341 OPCODE_COMMON_GET_CNTL_ATTRIBUTES
);
2342 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2343 OPCODE_COMMON_GET_CNTL_ATTRIBUTES
, payload_len
);
2344 sge
->pa_hi
= cpu_to_le32(upper_32_bits(attribs_cmd
.dma
));
2345 sge
->pa_lo
= cpu_to_le32(attribs_cmd
.dma
& 0xFFFFFFFF);
2346 sge
->len
= cpu_to_le32(attribs_cmd
.size
);
2348 status
= be_mbox_notify_wait(adapter
);
2350 attribs
= attribs_cmd
.va
+ sizeof(struct be_cmd_resp_hdr
);
2351 adapter
->hba_port_num
= attribs
->hba_attribs
.phy_port
;
2355 mutex_unlock(&adapter
->mbox_lock
);
2356 pci_free_consistent(adapter
->pdev
, attribs_cmd
.size
, attribs_cmd
.va
,
2362 int be_cmd_req_native_mode(struct be_adapter
*adapter
)
2364 struct be_mcc_wrb
*wrb
;
2365 struct be_cmd_req_set_func_cap
*req
;
2368 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
2371 wrb
= wrb_from_mbox(adapter
);
2377 req
= embedded_payload(wrb
);
2379 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
2380 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP
);
2382 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2383 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP
, sizeof(*req
));
2385 req
->valid_cap_flags
= cpu_to_le32(CAPABILITY_SW_TIMESTAMPS
|
2386 CAPABILITY_BE3_NATIVE_ERX_API
);
2387 req
->cap_flags
= cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API
);
2389 status
= be_mbox_notify_wait(adapter
);
2391 struct be_cmd_resp_set_func_cap
*resp
= embedded_payload(wrb
);
2392 adapter
->be3_native
= le32_to_cpu(resp
->cap_flags
) &
2393 CAPABILITY_BE3_NATIVE_ERX_API
;
2396 mutex_unlock(&adapter
->mbox_lock
);