2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
21 /* Must be a power of 2 or else MODULO will BUG_ON */
22 static int be_get_temp_freq
= 32;
24 static void be_mcc_notify(struct be_adapter
*adapter
)
26 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
29 if (adapter
->eeh_err
) {
30 dev_info(&adapter
->pdev
->dev
,
31 "Error in Card Detected! Cannot issue commands\n");
35 val
|= mccq
->id
& DB_MCCQ_RING_ID_MASK
;
36 val
|= 1 << DB_MCCQ_NUM_POSTED_SHIFT
;
39 iowrite32(val
, adapter
->db
+ DB_MCCQ_OFFSET
);
42 /* To check if valid bit is set, check the entire word as we don't know
43 * the endianness of the data (old entry is host endian while a new entry is
45 static inline bool be_mcc_compl_is_new(struct be_mcc_compl
*compl)
47 if (compl->flags
!= 0) {
48 compl->flags
= le32_to_cpu(compl->flags
);
49 BUG_ON((compl->flags
& CQE_FLAGS_VALID_MASK
) == 0);
56 /* Need to reset the entire word that houses the valid bit */
57 static inline void be_mcc_compl_use(struct be_mcc_compl
*compl)
62 static int be_mcc_compl_process(struct be_adapter
*adapter
,
63 struct be_mcc_compl
*compl)
65 u16 compl_status
, extd_status
;
67 /* Just swap the status to host endian; mcc tag is opaquely copied
69 be_dws_le_to_cpu(compl, 4);
71 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
72 CQE_STATUS_COMPL_MASK
;
74 if ((compl->tag0
== OPCODE_COMMON_WRITE_FLASHROM
) &&
75 (compl->tag1
== CMD_SUBSYSTEM_COMMON
)) {
76 adapter
->flash_status
= compl_status
;
77 complete(&adapter
->flash_compl
);
80 if (compl_status
== MCC_STATUS_SUCCESS
) {
81 if (compl->tag0
== OPCODE_ETH_GET_STATISTICS
) {
82 struct be_cmd_resp_get_stats
*resp
=
83 adapter
->stats_cmd
.va
;
84 be_dws_le_to_cpu(&resp
->hw_stats
,
85 sizeof(resp
->hw_stats
));
86 netdev_stats_update(adapter
);
87 adapter
->stats_cmd_sent
= false;
89 } else if ((compl_status
!= MCC_STATUS_NOT_SUPPORTED
) &&
90 (compl->tag0
!= OPCODE_COMMON_NTWK_MAC_QUERY
)) {
91 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
93 dev_warn(&adapter
->pdev
->dev
,
94 "Error in cmd completion - opcode %d, compl %d, extd %d\n",
95 compl->tag0
, compl_status
, extd_status
);
100 /* Link state evt is a string of bytes; no need for endian swapping */
101 static void be_async_link_state_process(struct be_adapter
*adapter
,
102 struct be_async_event_link_state
*evt
)
104 be_link_status_update(adapter
,
105 evt
->port_link_status
== ASYNC_EVENT_LINK_UP
);
108 /* Grp5 CoS Priority evt */
109 static void be_async_grp5_cos_priority_process(struct be_adapter
*adapter
,
110 struct be_async_event_grp5_cos_priority
*evt
)
113 adapter
->vlan_prio_bmap
= evt
->available_priority_bmap
;
114 adapter
->recommended_prio
&= ~VLAN_PRIO_MASK
;
115 adapter
->recommended_prio
=
116 evt
->reco_default_priority
<< VLAN_PRIO_SHIFT
;
120 /* Grp5 QOS Speed evt */
121 static void be_async_grp5_qos_speed_process(struct be_adapter
*adapter
,
122 struct be_async_event_grp5_qos_link_speed
*evt
)
124 if (evt
->physical_port
== adapter
->port_num
) {
125 /* qos_link_speed is in units of 10 Mbps */
126 adapter
->link_speed
= evt
->qos_link_speed
* 10;
131 static void be_async_grp5_pvid_state_process(struct be_adapter
*adapter
,
132 struct be_async_event_grp5_pvid_state
*evt
)
135 adapter
->pvid
= le16_to_cpu(evt
->tag
);
140 static void be_async_grp5_evt_process(struct be_adapter
*adapter
,
141 u32 trailer
, struct be_mcc_compl
*evt
)
145 event_type
= (trailer
>> ASYNC_TRAILER_EVENT_TYPE_SHIFT
) &
146 ASYNC_TRAILER_EVENT_TYPE_MASK
;
148 switch (event_type
) {
149 case ASYNC_EVENT_COS_PRIORITY
:
150 be_async_grp5_cos_priority_process(adapter
,
151 (struct be_async_event_grp5_cos_priority
*)evt
);
153 case ASYNC_EVENT_QOS_SPEED
:
154 be_async_grp5_qos_speed_process(adapter
,
155 (struct be_async_event_grp5_qos_link_speed
*)evt
);
157 case ASYNC_EVENT_PVID_STATE
:
158 be_async_grp5_pvid_state_process(adapter
,
159 (struct be_async_event_grp5_pvid_state
*)evt
);
162 dev_warn(&adapter
->pdev
->dev
, "Unknown grp5 event!\n");
167 static inline bool is_link_state_evt(u32 trailer
)
169 return ((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
170 ASYNC_TRAILER_EVENT_CODE_MASK
) ==
171 ASYNC_EVENT_CODE_LINK_STATE
;
174 static inline bool is_grp5_evt(u32 trailer
)
176 return (((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
177 ASYNC_TRAILER_EVENT_CODE_MASK
) ==
178 ASYNC_EVENT_CODE_GRP_5
);
181 static struct be_mcc_compl
*be_mcc_compl_get(struct be_adapter
*adapter
)
183 struct be_queue_info
*mcc_cq
= &adapter
->mcc_obj
.cq
;
184 struct be_mcc_compl
*compl = queue_tail_node(mcc_cq
);
186 if (be_mcc_compl_is_new(compl)) {
187 queue_tail_inc(mcc_cq
);
193 void be_async_mcc_enable(struct be_adapter
*adapter
)
195 spin_lock_bh(&adapter
->mcc_cq_lock
);
197 be_cq_notify(adapter
, adapter
->mcc_obj
.cq
.id
, true, 0);
198 adapter
->mcc_obj
.rearm_cq
= true;
200 spin_unlock_bh(&adapter
->mcc_cq_lock
);
203 void be_async_mcc_disable(struct be_adapter
*adapter
)
205 adapter
->mcc_obj
.rearm_cq
= false;
208 int be_process_mcc(struct be_adapter
*adapter
, int *status
)
210 struct be_mcc_compl
*compl;
212 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
214 spin_lock_bh(&adapter
->mcc_cq_lock
);
215 while ((compl = be_mcc_compl_get(adapter
))) {
216 if (compl->flags
& CQE_FLAGS_ASYNC_MASK
) {
217 /* Interpret flags as an async trailer */
218 if (is_link_state_evt(compl->flags
))
219 be_async_link_state_process(adapter
,
220 (struct be_async_event_link_state
*) compl);
221 else if (is_grp5_evt(compl->flags
))
222 be_async_grp5_evt_process(adapter
,
223 compl->flags
, compl);
224 } else if (compl->flags
& CQE_FLAGS_COMPLETED_MASK
) {
225 *status
= be_mcc_compl_process(adapter
, compl);
226 atomic_dec(&mcc_obj
->q
.used
);
228 be_mcc_compl_use(compl);
232 spin_unlock_bh(&adapter
->mcc_cq_lock
);
236 /* Wait till no more pending mcc requests are present */
237 static int be_mcc_wait_compl(struct be_adapter
*adapter
)
239 #define mcc_timeout 120000 /* 12s timeout */
240 int i
, num
, status
= 0;
241 struct be_mcc_obj
*mcc_obj
= &adapter
->mcc_obj
;
243 if (adapter
->eeh_err
)
246 for (i
= 0; i
< mcc_timeout
; i
++) {
247 num
= be_process_mcc(adapter
, &status
);
249 be_cq_notify(adapter
, mcc_obj
->cq
.id
,
250 mcc_obj
->rearm_cq
, num
);
252 if (atomic_read(&mcc_obj
->q
.used
) == 0)
256 if (i
== mcc_timeout
) {
257 dev_err(&adapter
->pdev
->dev
, "mccq poll timed out\n");
263 /* Notify MCC requests and wait for completion */
264 static int be_mcc_notify_wait(struct be_adapter
*adapter
)
266 be_mcc_notify(adapter
);
267 return be_mcc_wait_compl(adapter
);
270 static int be_mbox_db_ready_wait(struct be_adapter
*adapter
, void __iomem
*db
)
275 if (adapter
->eeh_err
) {
276 dev_err(&adapter
->pdev
->dev
,
277 "Error detected in card.Cannot issue commands\n");
282 ready
= ioread32(db
);
283 if (ready
== 0xffffffff) {
284 dev_err(&adapter
->pdev
->dev
,
285 "pci slot disconnected\n");
289 ready
&= MPU_MAILBOX_DB_RDY_MASK
;
294 dev_err(&adapter
->pdev
->dev
, "mbox poll timed out\n");
295 be_detect_dump_ue(adapter
);
299 set_current_state(TASK_INTERRUPTIBLE
);
300 schedule_timeout(msecs_to_jiffies(1));
308 * Insert the mailbox address into the doorbell in two steps
309 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
311 static int be_mbox_notify_wait(struct be_adapter
*adapter
)
315 void __iomem
*db
= adapter
->db
+ MPU_MAILBOX_DB_OFFSET
;
316 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
317 struct be_mcc_mailbox
*mbox
= mbox_mem
->va
;
318 struct be_mcc_compl
*compl = &mbox
->compl;
320 /* wait for ready to be set */
321 status
= be_mbox_db_ready_wait(adapter
, db
);
325 val
|= MPU_MAILBOX_DB_HI_MASK
;
326 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
327 val
|= (upper_32_bits(mbox_mem
->dma
) >> 2) << 2;
330 /* wait for ready to be set */
331 status
= be_mbox_db_ready_wait(adapter
, db
);
336 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
337 val
|= (u32
)(mbox_mem
->dma
>> 4) << 2;
340 status
= be_mbox_db_ready_wait(adapter
, db
);
344 /* A cq entry has been made now */
345 if (be_mcc_compl_is_new(compl)) {
346 status
= be_mcc_compl_process(adapter
, &mbox
->compl);
347 be_mcc_compl_use(compl);
351 dev_err(&adapter
->pdev
->dev
, "invalid mailbox completion\n");
357 static int be_POST_stage_get(struct be_adapter
*adapter
, u16
*stage
)
361 if (lancer_chip(adapter
))
362 sem
= ioread32(adapter
->db
+ MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET
);
364 sem
= ioread32(adapter
->csr
+ MPU_EP_SEMAPHORE_OFFSET
);
366 *stage
= sem
& EP_SEMAPHORE_POST_STAGE_MASK
;
367 if ((sem
>> EP_SEMAPHORE_POST_ERR_SHIFT
) & EP_SEMAPHORE_POST_ERR_MASK
)
373 int be_cmd_POST(struct be_adapter
*adapter
)
376 int status
, timeout
= 0;
379 status
= be_POST_stage_get(adapter
, &stage
);
381 dev_err(&adapter
->pdev
->dev
, "POST error; stage=0x%x\n",
384 } else if (stage
!= POST_STAGE_ARMFW_RDY
) {
385 set_current_state(TASK_INTERRUPTIBLE
);
386 schedule_timeout(2 * HZ
);
391 } while (timeout
< 40);
393 dev_err(&adapter
->pdev
->dev
, "POST timeout; stage=0x%x\n", stage
);
397 static inline void *embedded_payload(struct be_mcc_wrb
*wrb
)
399 return wrb
->payload
.embedded_payload
;
402 static inline struct be_sge
*nonembedded_sgl(struct be_mcc_wrb
*wrb
)
404 return &wrb
->payload
.sgl
[0];
407 /* Don't touch the hdr after it's prepared */
408 static void be_wrb_hdr_prepare(struct be_mcc_wrb
*wrb
, int payload_len
,
409 bool embedded
, u8 sge_cnt
, u32 opcode
)
412 wrb
->embedded
|= MCC_WRB_EMBEDDED_MASK
;
414 wrb
->embedded
|= (sge_cnt
& MCC_WRB_SGE_CNT_MASK
) <<
415 MCC_WRB_SGE_CNT_SHIFT
;
416 wrb
->payload_length
= payload_len
;
418 be_dws_cpu_to_le(wrb
, 8);
421 /* Don't touch the hdr after it's prepared */
422 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr
*req_hdr
,
423 u8 subsystem
, u8 opcode
, int cmd_len
)
425 req_hdr
->opcode
= opcode
;
426 req_hdr
->subsystem
= subsystem
;
427 req_hdr
->request_length
= cpu_to_le32(cmd_len
- sizeof(*req_hdr
));
428 req_hdr
->version
= 0;
431 static void be_cmd_page_addrs_prepare(struct phys_addr
*pages
, u32 max_pages
,
432 struct be_dma_mem
*mem
)
434 int i
, buf_pages
= min(PAGES_4K_SPANNED(mem
->va
, mem
->size
), max_pages
);
435 u64 dma
= (u64
)mem
->dma
;
437 for (i
= 0; i
< buf_pages
; i
++) {
438 pages
[i
].lo
= cpu_to_le32(dma
& 0xFFFFFFFF);
439 pages
[i
].hi
= cpu_to_le32(upper_32_bits(dma
));
444 /* Converts interrupt delay in microseconds to multiplier value */
445 static u32
eq_delay_to_mult(u32 usec_delay
)
447 #define MAX_INTR_RATE 651042
448 const u32 round
= 10;
454 u32 interrupt_rate
= 1000000 / usec_delay
;
455 /* Max delay, corresponding to the lowest interrupt rate */
456 if (interrupt_rate
== 0)
459 multiplier
= (MAX_INTR_RATE
- interrupt_rate
) * round
;
460 multiplier
/= interrupt_rate
;
461 /* Round the multiplier to the closest value.*/
462 multiplier
= (multiplier
+ round
/2) / round
;
463 multiplier
= min(multiplier
, (u32
)1023);
469 static inline struct be_mcc_wrb
*wrb_from_mbox(struct be_adapter
*adapter
)
471 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
472 struct be_mcc_wrb
*wrb
473 = &((struct be_mcc_mailbox
*)(mbox_mem
->va
))->wrb
;
474 memset(wrb
, 0, sizeof(*wrb
));
478 static struct be_mcc_wrb
*wrb_from_mccq(struct be_adapter
*adapter
)
480 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
481 struct be_mcc_wrb
*wrb
;
483 if (atomic_read(&mccq
->used
) >= mccq
->len
) {
484 dev_err(&adapter
->pdev
->dev
, "Out of MCCQ wrbs\n");
488 wrb
= queue_head_node(mccq
);
489 queue_head_inc(mccq
);
490 atomic_inc(&mccq
->used
);
491 memset(wrb
, 0, sizeof(*wrb
));
495 /* Tell fw we're about to start firing cmds by writing a
496 * special pattern across the wrb hdr; uses mbox
498 int be_cmd_fw_init(struct be_adapter
*adapter
)
503 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
506 wrb
= (u8
*)wrb_from_mbox(adapter
);
516 status
= be_mbox_notify_wait(adapter
);
518 mutex_unlock(&adapter
->mbox_lock
);
522 /* Tell fw we're done with firing cmds by writing a
523 * special pattern across the wrb hdr; uses mbox
525 int be_cmd_fw_clean(struct be_adapter
*adapter
)
530 if (adapter
->eeh_err
)
533 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
536 wrb
= (u8
*)wrb_from_mbox(adapter
);
546 status
= be_mbox_notify_wait(adapter
);
548 mutex_unlock(&adapter
->mbox_lock
);
551 int be_cmd_eq_create(struct be_adapter
*adapter
,
552 struct be_queue_info
*eq
, int eq_delay
)
554 struct be_mcc_wrb
*wrb
;
555 struct be_cmd_req_eq_create
*req
;
556 struct be_dma_mem
*q_mem
= &eq
->dma_mem
;
559 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
562 wrb
= wrb_from_mbox(adapter
);
563 req
= embedded_payload(wrb
);
565 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0, OPCODE_COMMON_EQ_CREATE
);
567 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
568 OPCODE_COMMON_EQ_CREATE
, sizeof(*req
));
570 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
572 AMAP_SET_BITS(struct amap_eq_context
, valid
, req
->context
, 1);
574 AMAP_SET_BITS(struct amap_eq_context
, size
, req
->context
, 0);
575 AMAP_SET_BITS(struct amap_eq_context
, count
, req
->context
,
576 __ilog2_u32(eq
->len
/256));
577 AMAP_SET_BITS(struct amap_eq_context
, delaymult
, req
->context
,
578 eq_delay_to_mult(eq_delay
));
579 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
581 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
583 status
= be_mbox_notify_wait(adapter
);
585 struct be_cmd_resp_eq_create
*resp
= embedded_payload(wrb
);
586 eq
->id
= le16_to_cpu(resp
->eq_id
);
590 mutex_unlock(&adapter
->mbox_lock
);
595 int be_cmd_mac_addr_query(struct be_adapter
*adapter
, u8
*mac_addr
,
596 u8 type
, bool permanent
, u32 if_handle
)
598 struct be_mcc_wrb
*wrb
;
599 struct be_cmd_req_mac_query
*req
;
602 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
605 wrb
= wrb_from_mbox(adapter
);
606 req
= embedded_payload(wrb
);
608 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
609 OPCODE_COMMON_NTWK_MAC_QUERY
);
611 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
612 OPCODE_COMMON_NTWK_MAC_QUERY
, sizeof(*req
));
618 req
->if_id
= cpu_to_le16((u16
) if_handle
);
622 status
= be_mbox_notify_wait(adapter
);
624 struct be_cmd_resp_mac_query
*resp
= embedded_payload(wrb
);
625 memcpy(mac_addr
, resp
->mac
.addr
, ETH_ALEN
);
628 mutex_unlock(&adapter
->mbox_lock
);
632 /* Uses synchronous MCCQ */
633 int be_cmd_pmac_add(struct be_adapter
*adapter
, u8
*mac_addr
,
634 u32 if_id
, u32
*pmac_id
, u32 domain
)
636 struct be_mcc_wrb
*wrb
;
637 struct be_cmd_req_pmac_add
*req
;
640 spin_lock_bh(&adapter
->mcc_lock
);
642 wrb
= wrb_from_mccq(adapter
);
647 req
= embedded_payload(wrb
);
649 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
650 OPCODE_COMMON_NTWK_PMAC_ADD
);
652 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
653 OPCODE_COMMON_NTWK_PMAC_ADD
, sizeof(*req
));
655 req
->hdr
.domain
= domain
;
656 req
->if_id
= cpu_to_le32(if_id
);
657 memcpy(req
->mac_address
, mac_addr
, ETH_ALEN
);
659 status
= be_mcc_notify_wait(adapter
);
661 struct be_cmd_resp_pmac_add
*resp
= embedded_payload(wrb
);
662 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
666 spin_unlock_bh(&adapter
->mcc_lock
);
670 /* Uses synchronous MCCQ */
671 int be_cmd_pmac_del(struct be_adapter
*adapter
, u32 if_id
, u32 pmac_id
, u32 dom
)
673 struct be_mcc_wrb
*wrb
;
674 struct be_cmd_req_pmac_del
*req
;
677 spin_lock_bh(&adapter
->mcc_lock
);
679 wrb
= wrb_from_mccq(adapter
);
684 req
= embedded_payload(wrb
);
686 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
687 OPCODE_COMMON_NTWK_PMAC_DEL
);
689 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
690 OPCODE_COMMON_NTWK_PMAC_DEL
, sizeof(*req
));
692 req
->hdr
.domain
= dom
;
693 req
->if_id
= cpu_to_le32(if_id
);
694 req
->pmac_id
= cpu_to_le32(pmac_id
);
696 status
= be_mcc_notify_wait(adapter
);
699 spin_unlock_bh(&adapter
->mcc_lock
);
704 int be_cmd_cq_create(struct be_adapter
*adapter
,
705 struct be_queue_info
*cq
, struct be_queue_info
*eq
,
706 bool sol_evts
, bool no_delay
, int coalesce_wm
)
708 struct be_mcc_wrb
*wrb
;
709 struct be_cmd_req_cq_create
*req
;
710 struct be_dma_mem
*q_mem
= &cq
->dma_mem
;
714 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
717 wrb
= wrb_from_mbox(adapter
);
718 req
= embedded_payload(wrb
);
719 ctxt
= &req
->context
;
721 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
722 OPCODE_COMMON_CQ_CREATE
);
724 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
725 OPCODE_COMMON_CQ_CREATE
, sizeof(*req
));
727 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
728 if (lancer_chip(adapter
)) {
729 req
->hdr
.version
= 2;
730 req
->page_size
= 1; /* 1 for 4K */
731 AMAP_SET_BITS(struct amap_cq_context_lancer
, coalescwm
, ctxt
,
733 AMAP_SET_BITS(struct amap_cq_context_lancer
, nodelay
, ctxt
,
735 AMAP_SET_BITS(struct amap_cq_context_lancer
, count
, ctxt
,
736 __ilog2_u32(cq
->len
/256));
737 AMAP_SET_BITS(struct amap_cq_context_lancer
, valid
, ctxt
, 1);
738 AMAP_SET_BITS(struct amap_cq_context_lancer
, eventable
,
740 AMAP_SET_BITS(struct amap_cq_context_lancer
, eqid
,
742 AMAP_SET_BITS(struct amap_cq_context_lancer
, armed
, ctxt
, 1);
744 AMAP_SET_BITS(struct amap_cq_context_be
, coalescwm
, ctxt
,
746 AMAP_SET_BITS(struct amap_cq_context_be
, nodelay
,
748 AMAP_SET_BITS(struct amap_cq_context_be
, count
, ctxt
,
749 __ilog2_u32(cq
->len
/256));
750 AMAP_SET_BITS(struct amap_cq_context_be
, valid
, ctxt
, 1);
751 AMAP_SET_BITS(struct amap_cq_context_be
, solevent
,
753 AMAP_SET_BITS(struct amap_cq_context_be
, eventable
, ctxt
, 1);
754 AMAP_SET_BITS(struct amap_cq_context_be
, eqid
, ctxt
, eq
->id
);
755 AMAP_SET_BITS(struct amap_cq_context_be
, armed
, ctxt
, 1);
758 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
760 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
762 status
= be_mbox_notify_wait(adapter
);
764 struct be_cmd_resp_cq_create
*resp
= embedded_payload(wrb
);
765 cq
->id
= le16_to_cpu(resp
->cq_id
);
769 mutex_unlock(&adapter
->mbox_lock
);
774 static u32
be_encoded_q_len(int q_len
)
776 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
777 if (len_encoded
== 16)
782 int be_cmd_mccq_create(struct be_adapter
*adapter
,
783 struct be_queue_info
*mccq
,
784 struct be_queue_info
*cq
)
786 struct be_mcc_wrb
*wrb
;
787 struct be_cmd_req_mcc_create
*req
;
788 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
792 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
795 wrb
= wrb_from_mbox(adapter
);
796 req
= embedded_payload(wrb
);
797 ctxt
= &req
->context
;
799 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
800 OPCODE_COMMON_MCC_CREATE_EXT
);
802 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
803 OPCODE_COMMON_MCC_CREATE_EXT
, sizeof(*req
));
805 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
806 if (lancer_chip(adapter
)) {
807 req
->hdr
.version
= 1;
808 req
->cq_id
= cpu_to_le16(cq
->id
);
810 AMAP_SET_BITS(struct amap_mcc_context_lancer
, ring_size
, ctxt
,
811 be_encoded_q_len(mccq
->len
));
812 AMAP_SET_BITS(struct amap_mcc_context_lancer
, valid
, ctxt
, 1);
813 AMAP_SET_BITS(struct amap_mcc_context_lancer
, async_cq_id
,
815 AMAP_SET_BITS(struct amap_mcc_context_lancer
, async_cq_valid
,
819 AMAP_SET_BITS(struct amap_mcc_context_be
, valid
, ctxt
, 1);
820 AMAP_SET_BITS(struct amap_mcc_context_be
, ring_size
, ctxt
,
821 be_encoded_q_len(mccq
->len
));
822 AMAP_SET_BITS(struct amap_mcc_context_be
, cq_id
, ctxt
, cq
->id
);
825 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
826 req
->async_event_bitmap
[0] = cpu_to_le32(0x00000022);
827 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
829 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
831 status
= be_mbox_notify_wait(adapter
);
833 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
834 mccq
->id
= le16_to_cpu(resp
->id
);
835 mccq
->created
= true;
837 mutex_unlock(&adapter
->mbox_lock
);
842 int be_cmd_txq_create(struct be_adapter
*adapter
,
843 struct be_queue_info
*txq
,
844 struct be_queue_info
*cq
)
846 struct be_mcc_wrb
*wrb
;
847 struct be_cmd_req_eth_tx_create
*req
;
848 struct be_dma_mem
*q_mem
= &txq
->dma_mem
;
852 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
855 wrb
= wrb_from_mbox(adapter
);
856 req
= embedded_payload(wrb
);
857 ctxt
= &req
->context
;
859 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
860 OPCODE_ETH_TX_CREATE
);
862 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
, OPCODE_ETH_TX_CREATE
,
865 if (lancer_chip(adapter
)) {
866 req
->hdr
.version
= 1;
867 AMAP_SET_BITS(struct amap_tx_context
, if_id
, ctxt
,
871 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
872 req
->ulp_num
= BE_ULP1_NUM
;
873 req
->type
= BE_ETH_TX_RING_TYPE_STANDARD
;
875 AMAP_SET_BITS(struct amap_tx_context
, tx_ring_size
, ctxt
,
876 be_encoded_q_len(txq
->len
));
877 AMAP_SET_BITS(struct amap_tx_context
, ctx_valid
, ctxt
, 1);
878 AMAP_SET_BITS(struct amap_tx_context
, cq_id_send
, ctxt
, cq
->id
);
880 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
882 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
884 status
= be_mbox_notify_wait(adapter
);
886 struct be_cmd_resp_eth_tx_create
*resp
= embedded_payload(wrb
);
887 txq
->id
= le16_to_cpu(resp
->cid
);
891 mutex_unlock(&adapter
->mbox_lock
);
897 int be_cmd_rxq_create(struct be_adapter
*adapter
,
898 struct be_queue_info
*rxq
, u16 cq_id
, u16 frag_size
,
899 u16 max_frame_size
, u32 if_id
, u32 rss
, u8
*rss_id
)
901 struct be_mcc_wrb
*wrb
;
902 struct be_cmd_req_eth_rx_create
*req
;
903 struct be_dma_mem
*q_mem
= &rxq
->dma_mem
;
906 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
909 wrb
= wrb_from_mbox(adapter
);
910 req
= embedded_payload(wrb
);
912 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
913 OPCODE_ETH_RX_CREATE
);
915 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
, OPCODE_ETH_RX_CREATE
,
918 req
->cq_id
= cpu_to_le16(cq_id
);
919 req
->frag_size
= fls(frag_size
) - 1;
921 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
922 req
->interface_id
= cpu_to_le32(if_id
);
923 req
->max_frame_size
= cpu_to_le16(max_frame_size
);
924 req
->rss_queue
= cpu_to_le32(rss
);
926 status
= be_mbox_notify_wait(adapter
);
928 struct be_cmd_resp_eth_rx_create
*resp
= embedded_payload(wrb
);
929 rxq
->id
= le16_to_cpu(resp
->id
);
931 *rss_id
= resp
->rss_id
;
934 mutex_unlock(&adapter
->mbox_lock
);
939 /* Generic destroyer function for all types of queues
942 int be_cmd_q_destroy(struct be_adapter
*adapter
, struct be_queue_info
*q
,
945 struct be_mcc_wrb
*wrb
;
946 struct be_cmd_req_q_destroy
*req
;
947 u8 subsys
= 0, opcode
= 0;
950 if (adapter
->eeh_err
)
953 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
956 wrb
= wrb_from_mbox(adapter
);
957 req
= embedded_payload(wrb
);
959 switch (queue_type
) {
961 subsys
= CMD_SUBSYSTEM_COMMON
;
962 opcode
= OPCODE_COMMON_EQ_DESTROY
;
965 subsys
= CMD_SUBSYSTEM_COMMON
;
966 opcode
= OPCODE_COMMON_CQ_DESTROY
;
969 subsys
= CMD_SUBSYSTEM_ETH
;
970 opcode
= OPCODE_ETH_TX_DESTROY
;
973 subsys
= CMD_SUBSYSTEM_ETH
;
974 opcode
= OPCODE_ETH_RX_DESTROY
;
977 subsys
= CMD_SUBSYSTEM_COMMON
;
978 opcode
= OPCODE_COMMON_MCC_DESTROY
;
984 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0, opcode
);
986 be_cmd_hdr_prepare(&req
->hdr
, subsys
, opcode
, sizeof(*req
));
987 req
->id
= cpu_to_le16(q
->id
);
989 status
= be_mbox_notify_wait(adapter
);
991 mutex_unlock(&adapter
->mbox_lock
);
996 /* Create an rx filtering policy configuration on an i/f
999 int be_cmd_if_create(struct be_adapter
*adapter
, u32 cap_flags
, u32 en_flags
,
1000 u8
*mac
, bool pmac_invalid
, u32
*if_handle
, u32
*pmac_id
,
1003 struct be_mcc_wrb
*wrb
;
1004 struct be_cmd_req_if_create
*req
;
1007 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1010 wrb
= wrb_from_mbox(adapter
);
1011 req
= embedded_payload(wrb
);
1013 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1014 OPCODE_COMMON_NTWK_INTERFACE_CREATE
);
1016 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1017 OPCODE_COMMON_NTWK_INTERFACE_CREATE
, sizeof(*req
));
1019 req
->hdr
.domain
= domain
;
1020 req
->capability_flags
= cpu_to_le32(cap_flags
);
1021 req
->enable_flags
= cpu_to_le32(en_flags
);
1022 req
->pmac_invalid
= pmac_invalid
;
1024 memcpy(req
->mac_addr
, mac
, ETH_ALEN
);
1026 status
= be_mbox_notify_wait(adapter
);
1028 struct be_cmd_resp_if_create
*resp
= embedded_payload(wrb
);
1029 *if_handle
= le32_to_cpu(resp
->interface_id
);
1031 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
1034 mutex_unlock(&adapter
->mbox_lock
);
1039 int be_cmd_if_destroy(struct be_adapter
*adapter
, u32 interface_id
, u32 domain
)
1041 struct be_mcc_wrb
*wrb
;
1042 struct be_cmd_req_if_destroy
*req
;
1045 if (adapter
->eeh_err
)
1048 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1051 wrb
= wrb_from_mbox(adapter
);
1052 req
= embedded_payload(wrb
);
1054 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1055 OPCODE_COMMON_NTWK_INTERFACE_DESTROY
);
1057 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1058 OPCODE_COMMON_NTWK_INTERFACE_DESTROY
, sizeof(*req
));
1060 req
->hdr
.domain
= domain
;
1061 req
->interface_id
= cpu_to_le32(interface_id
);
1063 status
= be_mbox_notify_wait(adapter
);
1065 mutex_unlock(&adapter
->mbox_lock
);
1070 /* Get stats is a non embedded command: the request is not embedded inside
1071 * WRB but is a separate dma memory block
1072 * Uses asynchronous MCC
1074 int be_cmd_get_stats(struct be_adapter
*adapter
, struct be_dma_mem
*nonemb_cmd
)
1076 struct be_mcc_wrb
*wrb
;
1077 struct be_cmd_req_get_stats
*req
;
1081 if (MODULO(adapter
->work_counter
, be_get_temp_freq
) == 0)
1082 be_cmd_get_die_temperature(adapter
);
1084 spin_lock_bh(&adapter
->mcc_lock
);
1086 wrb
= wrb_from_mccq(adapter
);
1091 req
= nonemb_cmd
->va
;
1092 sge
= nonembedded_sgl(wrb
);
1094 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
1095 OPCODE_ETH_GET_STATISTICS
);
1097 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1098 OPCODE_ETH_GET_STATISTICS
, sizeof(*req
));
1099 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
->dma
));
1100 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
->dma
& 0xFFFFFFFF);
1101 sge
->len
= cpu_to_le32(nonemb_cmd
->size
);
1103 be_mcc_notify(adapter
);
1104 adapter
->stats_cmd_sent
= true;
1107 spin_unlock_bh(&adapter
->mcc_lock
);
1111 /* Uses synchronous mcc */
1112 int be_cmd_link_status_query(struct be_adapter
*adapter
,
1113 bool *link_up
, u8
*mac_speed
, u16
*link_speed
)
1115 struct be_mcc_wrb
*wrb
;
1116 struct be_cmd_req_link_status
*req
;
1119 spin_lock_bh(&adapter
->mcc_lock
);
1121 wrb
= wrb_from_mccq(adapter
);
1126 req
= embedded_payload(wrb
);
1130 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1131 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY
);
1133 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1134 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY
, sizeof(*req
));
1136 status
= be_mcc_notify_wait(adapter
);
1138 struct be_cmd_resp_link_status
*resp
= embedded_payload(wrb
);
1139 if (resp
->mac_speed
!= PHY_LINK_SPEED_ZERO
) {
1141 *link_speed
= le16_to_cpu(resp
->link_speed
);
1142 *mac_speed
= resp
->mac_speed
;
1147 spin_unlock_bh(&adapter
->mcc_lock
);
1151 /* Uses synchronous mcc */
1152 int be_cmd_get_die_temperature(struct be_adapter
*adapter
)
1154 struct be_mcc_wrb
*wrb
;
1155 struct be_cmd_req_get_cntl_addnl_attribs
*req
;
1158 spin_lock_bh(&adapter
->mcc_lock
);
1160 wrb
= wrb_from_mccq(adapter
);
1165 req
= embedded_payload(wrb
);
1167 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1168 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
);
1170 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1171 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES
, sizeof(*req
));
1173 status
= be_mcc_notify_wait(adapter
);
1175 struct be_cmd_resp_get_cntl_addnl_attribs
*resp
=
1176 embedded_payload(wrb
);
1177 adapter
->drv_stats
.be_on_die_temperature
=
1178 resp
->on_die_temperature
;
1180 /* If IOCTL fails once, do not bother issuing it again */
1182 be_get_temp_freq
= 0;
1185 spin_unlock_bh(&adapter
->mcc_lock
);
1190 int be_cmd_get_fw_ver(struct be_adapter
*adapter
, char *fw_ver
)
1192 struct be_mcc_wrb
*wrb
;
1193 struct be_cmd_req_get_fw_version
*req
;
1196 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1199 wrb
= wrb_from_mbox(adapter
);
1200 req
= embedded_payload(wrb
);
1202 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1203 OPCODE_COMMON_GET_FW_VERSION
);
1205 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1206 OPCODE_COMMON_GET_FW_VERSION
, sizeof(*req
));
1208 status
= be_mbox_notify_wait(adapter
);
1210 struct be_cmd_resp_get_fw_version
*resp
= embedded_payload(wrb
);
1211 strncpy(fw_ver
, resp
->firmware_version_string
, FW_VER_LEN
);
1214 mutex_unlock(&adapter
->mbox_lock
);
1218 /* set the EQ delay interval of an EQ to specified value
1221 int be_cmd_modify_eqd(struct be_adapter
*adapter
, u32 eq_id
, u32 eqd
)
1223 struct be_mcc_wrb
*wrb
;
1224 struct be_cmd_req_modify_eq_delay
*req
;
1227 spin_lock_bh(&adapter
->mcc_lock
);
1229 wrb
= wrb_from_mccq(adapter
);
1234 req
= embedded_payload(wrb
);
1236 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1237 OPCODE_COMMON_MODIFY_EQ_DELAY
);
1239 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1240 OPCODE_COMMON_MODIFY_EQ_DELAY
, sizeof(*req
));
1242 req
->num_eq
= cpu_to_le32(1);
1243 req
->delay
[0].eq_id
= cpu_to_le32(eq_id
);
1244 req
->delay
[0].phase
= 0;
1245 req
->delay
[0].delay_multiplier
= cpu_to_le32(eqd
);
1247 be_mcc_notify(adapter
);
1250 spin_unlock_bh(&adapter
->mcc_lock
);
1254 /* Uses sycnhronous mcc */
1255 int be_cmd_vlan_config(struct be_adapter
*adapter
, u32 if_id
, u16
*vtag_array
,
1256 u32 num
, bool untagged
, bool promiscuous
)
1258 struct be_mcc_wrb
*wrb
;
1259 struct be_cmd_req_vlan_config
*req
;
1262 spin_lock_bh(&adapter
->mcc_lock
);
1264 wrb
= wrb_from_mccq(adapter
);
1269 req
= embedded_payload(wrb
);
1271 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1272 OPCODE_COMMON_NTWK_VLAN_CONFIG
);
1274 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1275 OPCODE_COMMON_NTWK_VLAN_CONFIG
, sizeof(*req
));
1277 req
->interface_id
= if_id
;
1278 req
->promiscuous
= promiscuous
;
1279 req
->untagged
= untagged
;
1280 req
->num_vlan
= num
;
1282 memcpy(req
->normal_vlan
, vtag_array
,
1283 req
->num_vlan
* sizeof(vtag_array
[0]));
1286 status
= be_mcc_notify_wait(adapter
);
1289 spin_unlock_bh(&adapter
->mcc_lock
);
1293 /* Uses MCC for this command as it may be called in BH context
1294 * Uses synchronous mcc
1296 int be_cmd_promiscuous_config(struct be_adapter
*adapter
, u8 port_num
, bool en
)
1298 struct be_mcc_wrb
*wrb
;
1299 struct be_cmd_req_promiscuous_config
*req
;
1302 spin_lock_bh(&adapter
->mcc_lock
);
1304 wrb
= wrb_from_mccq(adapter
);
1309 req
= embedded_payload(wrb
);
1311 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0, OPCODE_ETH_PROMISCUOUS
);
1313 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1314 OPCODE_ETH_PROMISCUOUS
, sizeof(*req
));
1316 /* In FW versions X.102.149/X.101.487 and later,
1317 * the port setting associated only with the
1318 * issuing pci function will take effect
1321 req
->port1_promiscuous
= en
;
1323 req
->port0_promiscuous
= en
;
1325 status
= be_mcc_notify_wait(adapter
);
1328 spin_unlock_bh(&adapter
->mcc_lock
);
1333 * Uses MCC for this command as it may be called in BH context
1334 * (mc == NULL) => multicast promiscuous
1336 int be_cmd_multicast_set(struct be_adapter
*adapter
, u32 if_id
,
1337 struct net_device
*netdev
, struct be_dma_mem
*mem
)
1339 struct be_mcc_wrb
*wrb
;
1340 struct be_cmd_req_mcast_mac_config
*req
= mem
->va
;
1344 spin_lock_bh(&adapter
->mcc_lock
);
1346 wrb
= wrb_from_mccq(adapter
);
1351 sge
= nonembedded_sgl(wrb
);
1352 memset(req
, 0, sizeof(*req
));
1354 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
1355 OPCODE_COMMON_NTWK_MULTICAST_SET
);
1356 sge
->pa_hi
= cpu_to_le32(upper_32_bits(mem
->dma
));
1357 sge
->pa_lo
= cpu_to_le32(mem
->dma
& 0xFFFFFFFF);
1358 sge
->len
= cpu_to_le32(mem
->size
);
1360 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1361 OPCODE_COMMON_NTWK_MULTICAST_SET
, sizeof(*req
));
1363 req
->interface_id
= if_id
;
1366 struct netdev_hw_addr
*ha
;
1368 req
->num_mac
= cpu_to_le16(netdev_mc_count(netdev
));
1371 netdev_for_each_mc_addr(ha
, netdev
)
1372 memcpy(req
->mac
[i
++].byte
, ha
->addr
, ETH_ALEN
);
1374 req
->promiscuous
= 1;
1377 status
= be_mcc_notify_wait(adapter
);
1380 spin_unlock_bh(&adapter
->mcc_lock
);
1384 /* Uses synchrounous mcc */
1385 int be_cmd_set_flow_control(struct be_adapter
*adapter
, u32 tx_fc
, u32 rx_fc
)
1387 struct be_mcc_wrb
*wrb
;
1388 struct be_cmd_req_set_flow_control
*req
;
1391 spin_lock_bh(&adapter
->mcc_lock
);
1393 wrb
= wrb_from_mccq(adapter
);
1398 req
= embedded_payload(wrb
);
1400 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1401 OPCODE_COMMON_SET_FLOW_CONTROL
);
1403 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1404 OPCODE_COMMON_SET_FLOW_CONTROL
, sizeof(*req
));
1406 req
->tx_flow_control
= cpu_to_le16((u16
)tx_fc
);
1407 req
->rx_flow_control
= cpu_to_le16((u16
)rx_fc
);
1409 status
= be_mcc_notify_wait(adapter
);
1412 spin_unlock_bh(&adapter
->mcc_lock
);
1417 int be_cmd_get_flow_control(struct be_adapter
*adapter
, u32
*tx_fc
, u32
*rx_fc
)
1419 struct be_mcc_wrb
*wrb
;
1420 struct be_cmd_req_get_flow_control
*req
;
1423 spin_lock_bh(&adapter
->mcc_lock
);
1425 wrb
= wrb_from_mccq(adapter
);
1430 req
= embedded_payload(wrb
);
1432 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1433 OPCODE_COMMON_GET_FLOW_CONTROL
);
1435 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1436 OPCODE_COMMON_GET_FLOW_CONTROL
, sizeof(*req
));
1438 status
= be_mcc_notify_wait(adapter
);
1440 struct be_cmd_resp_get_flow_control
*resp
=
1441 embedded_payload(wrb
);
1442 *tx_fc
= le16_to_cpu(resp
->tx_flow_control
);
1443 *rx_fc
= le16_to_cpu(resp
->rx_flow_control
);
1447 spin_unlock_bh(&adapter
->mcc_lock
);
1452 int be_cmd_query_fw_cfg(struct be_adapter
*adapter
, u32
*port_num
,
1453 u32
*mode
, u32
*caps
)
1455 struct be_mcc_wrb
*wrb
;
1456 struct be_cmd_req_query_fw_cfg
*req
;
1459 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1462 wrb
= wrb_from_mbox(adapter
);
1463 req
= embedded_payload(wrb
);
1465 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1466 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG
);
1468 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1469 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG
, sizeof(*req
));
1471 status
= be_mbox_notify_wait(adapter
);
1473 struct be_cmd_resp_query_fw_cfg
*resp
= embedded_payload(wrb
);
1474 *port_num
= le32_to_cpu(resp
->phys_port
);
1475 *mode
= le32_to_cpu(resp
->function_mode
);
1476 *caps
= le32_to_cpu(resp
->function_caps
);
1479 mutex_unlock(&adapter
->mbox_lock
);
1484 int be_cmd_reset_function(struct be_adapter
*adapter
)
1486 struct be_mcc_wrb
*wrb
;
1487 struct be_cmd_req_hdr
*req
;
1490 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1493 wrb
= wrb_from_mbox(adapter
);
1494 req
= embedded_payload(wrb
);
1496 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1497 OPCODE_COMMON_FUNCTION_RESET
);
1499 be_cmd_hdr_prepare(req
, CMD_SUBSYSTEM_COMMON
,
1500 OPCODE_COMMON_FUNCTION_RESET
, sizeof(*req
));
1502 status
= be_mbox_notify_wait(adapter
);
1504 mutex_unlock(&adapter
->mbox_lock
);
1508 int be_cmd_rss_config(struct be_adapter
*adapter
, u8
*rsstable
, u16 table_size
)
1510 struct be_mcc_wrb
*wrb
;
1511 struct be_cmd_req_rss_config
*req
;
1515 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1518 wrb
= wrb_from_mbox(adapter
);
1519 req
= embedded_payload(wrb
);
1521 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1522 OPCODE_ETH_RSS_CONFIG
);
1524 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1525 OPCODE_ETH_RSS_CONFIG
, sizeof(*req
));
1527 req
->if_id
= cpu_to_le32(adapter
->if_handle
);
1528 req
->enable_rss
= cpu_to_le16(RSS_ENABLE_TCP_IPV4
| RSS_ENABLE_IPV4
);
1529 req
->cpu_table_size_log2
= cpu_to_le16(fls(table_size
) - 1);
1530 memcpy(req
->cpu_table
, rsstable
, table_size
);
1531 memcpy(req
->hash
, myhash
, sizeof(myhash
));
1532 be_dws_cpu_to_le(req
->hash
, sizeof(req
->hash
));
1534 status
= be_mbox_notify_wait(adapter
);
1536 mutex_unlock(&adapter
->mbox_lock
);
1541 int be_cmd_set_beacon_state(struct be_adapter
*adapter
, u8 port_num
,
1542 u8 bcn
, u8 sts
, u8 state
)
1544 struct be_mcc_wrb
*wrb
;
1545 struct be_cmd_req_enable_disable_beacon
*req
;
1548 spin_lock_bh(&adapter
->mcc_lock
);
1550 wrb
= wrb_from_mccq(adapter
);
1555 req
= embedded_payload(wrb
);
1557 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1558 OPCODE_COMMON_ENABLE_DISABLE_BEACON
);
1560 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1561 OPCODE_COMMON_ENABLE_DISABLE_BEACON
, sizeof(*req
));
1563 req
->port_num
= port_num
;
1564 req
->beacon_state
= state
;
1565 req
->beacon_duration
= bcn
;
1566 req
->status_duration
= sts
;
1568 status
= be_mcc_notify_wait(adapter
);
1571 spin_unlock_bh(&adapter
->mcc_lock
);
1576 int be_cmd_get_beacon_state(struct be_adapter
*adapter
, u8 port_num
, u32
*state
)
1578 struct be_mcc_wrb
*wrb
;
1579 struct be_cmd_req_get_beacon_state
*req
;
1582 spin_lock_bh(&adapter
->mcc_lock
);
1584 wrb
= wrb_from_mccq(adapter
);
1589 req
= embedded_payload(wrb
);
1591 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1592 OPCODE_COMMON_GET_BEACON_STATE
);
1594 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1595 OPCODE_COMMON_GET_BEACON_STATE
, sizeof(*req
));
1597 req
->port_num
= port_num
;
1599 status
= be_mcc_notify_wait(adapter
);
1601 struct be_cmd_resp_get_beacon_state
*resp
=
1602 embedded_payload(wrb
);
1603 *state
= resp
->beacon_state
;
1607 spin_unlock_bh(&adapter
->mcc_lock
);
1611 int be_cmd_write_flashrom(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
,
1612 u32 flash_type
, u32 flash_opcode
, u32 buf_size
)
1614 struct be_mcc_wrb
*wrb
;
1615 struct be_cmd_write_flashrom
*req
;
1619 spin_lock_bh(&adapter
->mcc_lock
);
1620 adapter
->flash_status
= 0;
1622 wrb
= wrb_from_mccq(adapter
);
1628 sge
= nonembedded_sgl(wrb
);
1630 be_wrb_hdr_prepare(wrb
, cmd
->size
, false, 1,
1631 OPCODE_COMMON_WRITE_FLASHROM
);
1632 wrb
->tag1
= CMD_SUBSYSTEM_COMMON
;
1634 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1635 OPCODE_COMMON_WRITE_FLASHROM
, cmd
->size
);
1636 sge
->pa_hi
= cpu_to_le32(upper_32_bits(cmd
->dma
));
1637 sge
->pa_lo
= cpu_to_le32(cmd
->dma
& 0xFFFFFFFF);
1638 sge
->len
= cpu_to_le32(cmd
->size
);
1640 req
->params
.op_type
= cpu_to_le32(flash_type
);
1641 req
->params
.op_code
= cpu_to_le32(flash_opcode
);
1642 req
->params
.data_buf_size
= cpu_to_le32(buf_size
);
1644 be_mcc_notify(adapter
);
1645 spin_unlock_bh(&adapter
->mcc_lock
);
1647 if (!wait_for_completion_timeout(&adapter
->flash_compl
,
1648 msecs_to_jiffies(12000)))
1651 status
= adapter
->flash_status
;
1656 spin_unlock_bh(&adapter
->mcc_lock
);
1660 int be_cmd_get_flash_crc(struct be_adapter
*adapter
, u8
*flashed_crc
,
1663 struct be_mcc_wrb
*wrb
;
1664 struct be_cmd_write_flashrom
*req
;
1667 spin_lock_bh(&adapter
->mcc_lock
);
1669 wrb
= wrb_from_mccq(adapter
);
1674 req
= embedded_payload(wrb
);
1676 be_wrb_hdr_prepare(wrb
, sizeof(*req
)+4, true, 0,
1677 OPCODE_COMMON_READ_FLASHROM
);
1679 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1680 OPCODE_COMMON_READ_FLASHROM
, sizeof(*req
)+4);
1682 req
->params
.op_type
= cpu_to_le32(IMG_TYPE_REDBOOT
);
1683 req
->params
.op_code
= cpu_to_le32(FLASHROM_OPER_REPORT
);
1684 req
->params
.offset
= cpu_to_le32(offset
);
1685 req
->params
.data_buf_size
= cpu_to_le32(0x4);
1687 status
= be_mcc_notify_wait(adapter
);
1689 memcpy(flashed_crc
, req
->params
.data_buf
, 4);
1692 spin_unlock_bh(&adapter
->mcc_lock
);
1696 int be_cmd_enable_magic_wol(struct be_adapter
*adapter
, u8
*mac
,
1697 struct be_dma_mem
*nonemb_cmd
)
1699 struct be_mcc_wrb
*wrb
;
1700 struct be_cmd_req_acpi_wol_magic_config
*req
;
1704 spin_lock_bh(&adapter
->mcc_lock
);
1706 wrb
= wrb_from_mccq(adapter
);
1711 req
= nonemb_cmd
->va
;
1712 sge
= nonembedded_sgl(wrb
);
1714 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
1715 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
);
1717 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
1718 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG
, sizeof(*req
));
1719 memcpy(req
->magic_mac
, mac
, ETH_ALEN
);
1721 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
->dma
));
1722 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
->dma
& 0xFFFFFFFF);
1723 sge
->len
= cpu_to_le32(nonemb_cmd
->size
);
1725 status
= be_mcc_notify_wait(adapter
);
1728 spin_unlock_bh(&adapter
->mcc_lock
);
1732 int be_cmd_set_loopback(struct be_adapter
*adapter
, u8 port_num
,
1733 u8 loopback_type
, u8 enable
)
1735 struct be_mcc_wrb
*wrb
;
1736 struct be_cmd_req_set_lmode
*req
;
1739 spin_lock_bh(&adapter
->mcc_lock
);
1741 wrb
= wrb_from_mccq(adapter
);
1747 req
= embedded_payload(wrb
);
1749 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1750 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE
);
1752 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
1753 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE
,
1756 req
->src_port
= port_num
;
1757 req
->dest_port
= port_num
;
1758 req
->loopback_type
= loopback_type
;
1759 req
->loopback_state
= enable
;
1761 status
= be_mcc_notify_wait(adapter
);
1763 spin_unlock_bh(&adapter
->mcc_lock
);
1767 int be_cmd_loopback_test(struct be_adapter
*adapter
, u32 port_num
,
1768 u32 loopback_type
, u32 pkt_size
, u32 num_pkts
, u64 pattern
)
1770 struct be_mcc_wrb
*wrb
;
1771 struct be_cmd_req_loopback_test
*req
;
1774 spin_lock_bh(&adapter
->mcc_lock
);
1776 wrb
= wrb_from_mccq(adapter
);
1782 req
= embedded_payload(wrb
);
1784 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1785 OPCODE_LOWLEVEL_LOOPBACK_TEST
);
1787 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
1788 OPCODE_LOWLEVEL_LOOPBACK_TEST
, sizeof(*req
));
1789 req
->hdr
.timeout
= cpu_to_le32(4);
1791 req
->pattern
= cpu_to_le64(pattern
);
1792 req
->src_port
= cpu_to_le32(port_num
);
1793 req
->dest_port
= cpu_to_le32(port_num
);
1794 req
->pkt_size
= cpu_to_le32(pkt_size
);
1795 req
->num_pkts
= cpu_to_le32(num_pkts
);
1796 req
->loopback_type
= cpu_to_le32(loopback_type
);
1798 status
= be_mcc_notify_wait(adapter
);
1800 struct be_cmd_resp_loopback_test
*resp
= embedded_payload(wrb
);
1801 status
= le32_to_cpu(resp
->status
);
1805 spin_unlock_bh(&adapter
->mcc_lock
);
1809 int be_cmd_ddr_dma_test(struct be_adapter
*adapter
, u64 pattern
,
1810 u32 byte_cnt
, struct be_dma_mem
*cmd
)
1812 struct be_mcc_wrb
*wrb
;
1813 struct be_cmd_req_ddrdma_test
*req
;
1818 spin_lock_bh(&adapter
->mcc_lock
);
1820 wrb
= wrb_from_mccq(adapter
);
1826 sge
= nonembedded_sgl(wrb
);
1827 be_wrb_hdr_prepare(wrb
, cmd
->size
, false, 1,
1828 OPCODE_LOWLEVEL_HOST_DDR_DMA
);
1829 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_LOWLEVEL
,
1830 OPCODE_LOWLEVEL_HOST_DDR_DMA
, cmd
->size
);
1832 sge
->pa_hi
= cpu_to_le32(upper_32_bits(cmd
->dma
));
1833 sge
->pa_lo
= cpu_to_le32(cmd
->dma
& 0xFFFFFFFF);
1834 sge
->len
= cpu_to_le32(cmd
->size
);
1836 req
->pattern
= cpu_to_le64(pattern
);
1837 req
->byte_count
= cpu_to_le32(byte_cnt
);
1838 for (i
= 0; i
< byte_cnt
; i
++) {
1839 req
->snd_buff
[i
] = (u8
)(pattern
>> (j
*8));
1845 status
= be_mcc_notify_wait(adapter
);
1848 struct be_cmd_resp_ddrdma_test
*resp
;
1850 if ((memcmp(resp
->rcv_buff
, req
->snd_buff
, byte_cnt
) != 0) ||
1857 spin_unlock_bh(&adapter
->mcc_lock
);
1861 int be_cmd_get_seeprom_data(struct be_adapter
*adapter
,
1862 struct be_dma_mem
*nonemb_cmd
)
1864 struct be_mcc_wrb
*wrb
;
1865 struct be_cmd_req_seeprom_read
*req
;
1869 spin_lock_bh(&adapter
->mcc_lock
);
1871 wrb
= wrb_from_mccq(adapter
);
1876 req
= nonemb_cmd
->va
;
1877 sge
= nonembedded_sgl(wrb
);
1879 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
1880 OPCODE_COMMON_SEEPROM_READ
);
1882 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1883 OPCODE_COMMON_SEEPROM_READ
, sizeof(*req
));
1885 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
->dma
));
1886 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
->dma
& 0xFFFFFFFF);
1887 sge
->len
= cpu_to_le32(nonemb_cmd
->size
);
1889 status
= be_mcc_notify_wait(adapter
);
1892 spin_unlock_bh(&adapter
->mcc_lock
);
1896 int be_cmd_get_phy_info(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
)
1898 struct be_mcc_wrb
*wrb
;
1899 struct be_cmd_req_get_phy_info
*req
;
1903 spin_lock_bh(&adapter
->mcc_lock
);
1905 wrb
= wrb_from_mccq(adapter
);
1912 sge
= nonembedded_sgl(wrb
);
1914 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1,
1915 OPCODE_COMMON_GET_PHY_DETAILS
);
1917 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1918 OPCODE_COMMON_GET_PHY_DETAILS
,
1921 sge
->pa_hi
= cpu_to_le32(upper_32_bits(cmd
->dma
));
1922 sge
->pa_lo
= cpu_to_le32(cmd
->dma
& 0xFFFFFFFF);
1923 sge
->len
= cpu_to_le32(cmd
->size
);
1925 status
= be_mcc_notify_wait(adapter
);
1927 spin_unlock_bh(&adapter
->mcc_lock
);
1931 int be_cmd_set_qos(struct be_adapter
*adapter
, u32 bps
, u32 domain
)
1933 struct be_mcc_wrb
*wrb
;
1934 struct be_cmd_req_set_qos
*req
;
1937 spin_lock_bh(&adapter
->mcc_lock
);
1939 wrb
= wrb_from_mccq(adapter
);
1945 req
= embedded_payload(wrb
);
1947 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
1948 OPCODE_COMMON_SET_QOS
);
1950 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1951 OPCODE_COMMON_SET_QOS
, sizeof(*req
));
1953 req
->hdr
.domain
= domain
;
1954 req
->valid_bits
= cpu_to_le32(BE_QOS_BITS_NIC
);
1955 req
->max_bps_nic
= cpu_to_le32(bps
);
1957 status
= be_mcc_notify_wait(adapter
);
1960 spin_unlock_bh(&adapter
->mcc_lock
);
1964 int be_cmd_get_cntl_attributes(struct be_adapter
*adapter
)
1966 struct be_mcc_wrb
*wrb
;
1967 struct be_cmd_req_cntl_attribs
*req
;
1968 struct be_cmd_resp_cntl_attribs
*resp
;
1971 int payload_len
= max(sizeof(*req
), sizeof(*resp
));
1972 struct mgmt_controller_attrib
*attribs
;
1973 struct be_dma_mem attribs_cmd
;
1975 memset(&attribs_cmd
, 0, sizeof(struct be_dma_mem
));
1976 attribs_cmd
.size
= sizeof(struct be_cmd_resp_cntl_attribs
);
1977 attribs_cmd
.va
= pci_alloc_consistent(adapter
->pdev
, attribs_cmd
.size
,
1979 if (!attribs_cmd
.va
) {
1980 dev_err(&adapter
->pdev
->dev
,
1981 "Memory allocation failure\n");
1985 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
1988 wrb
= wrb_from_mbox(adapter
);
1993 req
= attribs_cmd
.va
;
1994 sge
= nonembedded_sgl(wrb
);
1996 be_wrb_hdr_prepare(wrb
, payload_len
, false, 1,
1997 OPCODE_COMMON_GET_CNTL_ATTRIBUTES
);
1998 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1999 OPCODE_COMMON_GET_CNTL_ATTRIBUTES
, payload_len
);
2000 sge
->pa_hi
= cpu_to_le32(upper_32_bits(attribs_cmd
.dma
));
2001 sge
->pa_lo
= cpu_to_le32(attribs_cmd
.dma
& 0xFFFFFFFF);
2002 sge
->len
= cpu_to_le32(attribs_cmd
.size
);
2004 status
= be_mbox_notify_wait(adapter
);
2006 attribs
= (struct mgmt_controller_attrib
*)( attribs_cmd
.va
+
2007 sizeof(struct be_cmd_resp_hdr
));
2008 adapter
->hba_port_num
= attribs
->hba_attribs
.phy_port
;
2012 mutex_unlock(&adapter
->mbox_lock
);
2013 pci_free_consistent(adapter
->pdev
, attribs_cmd
.size
, attribs_cmd
.va
,
2019 int be_cmd_check_native_mode(struct be_adapter
*adapter
)
2021 struct be_mcc_wrb
*wrb
;
2022 struct be_cmd_req_set_func_cap
*req
;
2025 if (mutex_lock_interruptible(&adapter
->mbox_lock
))
2028 wrb
= wrb_from_mbox(adapter
);
2034 req
= embedded_payload(wrb
);
2036 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0,
2037 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP
);
2039 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
2040 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP
, sizeof(*req
));
2042 req
->valid_cap_flags
= cpu_to_le32(CAPABILITY_SW_TIMESTAMPS
|
2043 CAPABILITY_BE3_NATIVE_ERX_API
);
2044 req
->cap_flags
= cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API
);
2046 status
= be_mbox_notify_wait(adapter
);
2048 struct be_cmd_resp_set_func_cap
*resp
= embedded_payload(wrb
);
2049 adapter
->be3_native
= le32_to_cpu(resp
->cap_flags
) &
2050 CAPABILITY_BE3_NATIVE_ERX_API
;
2053 mutex_unlock(&adapter
->mbox_lock
);