2 * Copyright (C) 2005 - 2009 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
21 static void be_mcc_notify(struct be_adapter
*adapter
)
23 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
26 val
|= mccq
->id
& DB_MCCQ_RING_ID_MASK
;
27 val
|= 1 << DB_MCCQ_NUM_POSTED_SHIFT
;
28 iowrite32(val
, adapter
->db
+ DB_MCCQ_OFFSET
);
31 /* To check if valid bit is set, check the entire word as we don't know
32 * the endianness of the data (old entry is host endian while a new entry is
34 static inline bool be_mcc_compl_is_new(struct be_mcc_compl
*compl)
36 if (compl->flags
!= 0) {
37 compl->flags
= le32_to_cpu(compl->flags
);
38 BUG_ON((compl->flags
& CQE_FLAGS_VALID_MASK
) == 0);
45 /* Need to reset the entire word that houses the valid bit */
46 static inline void be_mcc_compl_use(struct be_mcc_compl
*compl)
51 static int be_mcc_compl_process(struct be_adapter
*adapter
,
52 struct be_mcc_compl
*compl)
54 u16 compl_status
, extd_status
;
56 /* Just swap the status to host endian; mcc tag is opaquely copied
58 be_dws_le_to_cpu(compl, 4);
60 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
61 CQE_STATUS_COMPL_MASK
;
62 if (compl_status
== MCC_STATUS_SUCCESS
) {
63 if (compl->tag0
== OPCODE_ETH_GET_STATISTICS
) {
64 struct be_cmd_resp_get_stats
*resp
=
65 adapter
->stats
.cmd
.va
;
66 be_dws_le_to_cpu(&resp
->hw_stats
,
67 sizeof(resp
->hw_stats
));
68 netdev_stats_update(adapter
);
70 } else if (compl_status
!= MCC_STATUS_NOT_SUPPORTED
) {
71 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
73 dev_warn(&adapter
->pdev
->dev
,
74 "Error in cmd completion: status(compl/extd)=%d/%d\n",
75 compl_status
, extd_status
);
80 /* Link state evt is a string of bytes; no need for endian swapping */
81 static void be_async_link_state_process(struct be_adapter
*adapter
,
82 struct be_async_event_link_state
*evt
)
84 be_link_status_update(adapter
,
85 evt
->port_link_status
== ASYNC_EVENT_LINK_UP
);
88 static inline bool is_link_state_evt(u32 trailer
)
90 return (((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
91 ASYNC_TRAILER_EVENT_CODE_MASK
) ==
92 ASYNC_EVENT_CODE_LINK_STATE
);
95 static struct be_mcc_compl
*be_mcc_compl_get(struct be_adapter
*adapter
)
97 struct be_queue_info
*mcc_cq
= &adapter
->mcc_obj
.cq
;
98 struct be_mcc_compl
*compl = queue_tail_node(mcc_cq
);
100 if (be_mcc_compl_is_new(compl)) {
101 queue_tail_inc(mcc_cq
);
107 int be_process_mcc(struct be_adapter
*adapter
)
109 struct be_mcc_compl
*compl;
110 int num
= 0, status
= 0;
112 spin_lock_bh(&adapter
->mcc_cq_lock
);
113 while ((compl = be_mcc_compl_get(adapter
))) {
114 if (compl->flags
& CQE_FLAGS_ASYNC_MASK
) {
115 /* Interpret flags as an async trailer */
116 BUG_ON(!is_link_state_evt(compl->flags
));
118 /* Interpret compl as a async link evt */
119 be_async_link_state_process(adapter
,
120 (struct be_async_event_link_state
*) compl);
121 } else if (compl->flags
& CQE_FLAGS_COMPLETED_MASK
) {
122 status
= be_mcc_compl_process(adapter
, compl);
123 atomic_dec(&adapter
->mcc_obj
.q
.used
);
125 be_mcc_compl_use(compl);
130 be_cq_notify(adapter
, adapter
->mcc_obj
.cq
.id
, true, num
);
132 spin_unlock_bh(&adapter
->mcc_cq_lock
);
136 /* Wait till no more pending mcc requests are present */
137 static int be_mcc_wait_compl(struct be_adapter
*adapter
)
139 #define mcc_timeout 120000 /* 12s timeout */
141 for (i
= 0; i
< mcc_timeout
; i
++) {
142 status
= be_process_mcc(adapter
);
146 if (atomic_read(&adapter
->mcc_obj
.q
.used
) == 0)
150 if (i
== mcc_timeout
) {
151 dev_err(&adapter
->pdev
->dev
, "mccq poll timed out\n");
157 /* Notify MCC requests and wait for completion */
158 static int be_mcc_notify_wait(struct be_adapter
*adapter
)
160 be_mcc_notify(adapter
);
161 return be_mcc_wait_compl(adapter
);
164 static int be_mbox_db_ready_wait(struct be_adapter
*adapter
, void __iomem
*db
)
166 int cnt
= 0, wait
= 5;
170 ready
= ioread32(db
) & MPU_MAILBOX_DB_RDY_MASK
;
175 dev_err(&adapter
->pdev
->dev
, "mbox poll timed out\n");
189 * Insert the mailbox address into the doorbell in two steps
190 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
192 static int be_mbox_notify_wait(struct be_adapter
*adapter
)
196 void __iomem
*db
= adapter
->db
+ MPU_MAILBOX_DB_OFFSET
;
197 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
198 struct be_mcc_mailbox
*mbox
= mbox_mem
->va
;
199 struct be_mcc_compl
*compl = &mbox
->compl;
201 val
|= MPU_MAILBOX_DB_HI_MASK
;
202 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
203 val
|= (upper_32_bits(mbox_mem
->dma
) >> 2) << 2;
206 /* wait for ready to be set */
207 status
= be_mbox_db_ready_wait(adapter
, db
);
212 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
213 val
|= (u32
)(mbox_mem
->dma
>> 4) << 2;
216 status
= be_mbox_db_ready_wait(adapter
, db
);
220 /* A cq entry has been made now */
221 if (be_mcc_compl_is_new(compl)) {
222 status
= be_mcc_compl_process(adapter
, &mbox
->compl);
223 be_mcc_compl_use(compl);
227 dev_err(&adapter
->pdev
->dev
, "invalid mailbox completion\n");
233 static int be_POST_stage_get(struct be_adapter
*adapter
, u16
*stage
)
235 u32 sem
= ioread32(adapter
->csr
+ MPU_EP_SEMAPHORE_OFFSET
);
237 *stage
= sem
& EP_SEMAPHORE_POST_STAGE_MASK
;
238 if ((sem
>> EP_SEMAPHORE_POST_ERR_SHIFT
) & EP_SEMAPHORE_POST_ERR_MASK
)
244 int be_cmd_POST(struct be_adapter
*adapter
)
248 error
= be_POST_stage_get(adapter
, &stage
);
249 if (error
|| stage
!= POST_STAGE_ARMFW_RDY
) {
250 dev_err(&adapter
->pdev
->dev
, "POST failed.\n");
257 static inline void *embedded_payload(struct be_mcc_wrb
*wrb
)
259 return wrb
->payload
.embedded_payload
;
262 static inline struct be_sge
*nonembedded_sgl(struct be_mcc_wrb
*wrb
)
264 return &wrb
->payload
.sgl
[0];
267 /* Don't touch the hdr after it's prepared */
268 static void be_wrb_hdr_prepare(struct be_mcc_wrb
*wrb
, int payload_len
,
269 bool embedded
, u8 sge_cnt
)
272 wrb
->embedded
|= MCC_WRB_EMBEDDED_MASK
;
274 wrb
->embedded
|= (sge_cnt
& MCC_WRB_SGE_CNT_MASK
) <<
275 MCC_WRB_SGE_CNT_SHIFT
;
276 wrb
->payload_length
= payload_len
;
277 be_dws_cpu_to_le(wrb
, 20);
280 /* Don't touch the hdr after it's prepared */
281 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr
*req_hdr
,
282 u8 subsystem
, u8 opcode
, int cmd_len
)
284 req_hdr
->opcode
= opcode
;
285 req_hdr
->subsystem
= subsystem
;
286 req_hdr
->request_length
= cpu_to_le32(cmd_len
- sizeof(*req_hdr
));
289 static void be_cmd_page_addrs_prepare(struct phys_addr
*pages
, u32 max_pages
,
290 struct be_dma_mem
*mem
)
292 int i
, buf_pages
= min(PAGES_4K_SPANNED(mem
->va
, mem
->size
), max_pages
);
293 u64 dma
= (u64
)mem
->dma
;
295 for (i
= 0; i
< buf_pages
; i
++) {
296 pages
[i
].lo
= cpu_to_le32(dma
& 0xFFFFFFFF);
297 pages
[i
].hi
= cpu_to_le32(upper_32_bits(dma
));
302 /* Converts interrupt delay in microseconds to multiplier value */
303 static u32
eq_delay_to_mult(u32 usec_delay
)
305 #define MAX_INTR_RATE 651042
306 const u32 round
= 10;
312 u32 interrupt_rate
= 1000000 / usec_delay
;
313 /* Max delay, corresponding to the lowest interrupt rate */
314 if (interrupt_rate
== 0)
317 multiplier
= (MAX_INTR_RATE
- interrupt_rate
) * round
;
318 multiplier
/= interrupt_rate
;
319 /* Round the multiplier to the closest value.*/
320 multiplier
= (multiplier
+ round
/2) / round
;
321 multiplier
= min(multiplier
, (u32
)1023);
327 static inline struct be_mcc_wrb
*wrb_from_mbox(struct be_adapter
*adapter
)
329 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
330 struct be_mcc_wrb
*wrb
331 = &((struct be_mcc_mailbox
*)(mbox_mem
->va
))->wrb
;
332 memset(wrb
, 0, sizeof(*wrb
));
336 static struct be_mcc_wrb
*wrb_from_mccq(struct be_adapter
*adapter
)
338 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
339 struct be_mcc_wrb
*wrb
;
341 BUG_ON(atomic_read(&mccq
->used
) >= mccq
->len
);
342 wrb
= queue_head_node(mccq
);
343 queue_head_inc(mccq
);
344 atomic_inc(&mccq
->used
);
345 memset(wrb
, 0, sizeof(*wrb
));
349 int be_cmd_eq_create(struct be_adapter
*adapter
,
350 struct be_queue_info
*eq
, int eq_delay
)
352 struct be_mcc_wrb
*wrb
;
353 struct be_cmd_req_eq_create
*req
;
354 struct be_dma_mem
*q_mem
= &eq
->dma_mem
;
357 spin_lock(&adapter
->mbox_lock
);
359 wrb
= wrb_from_mbox(adapter
);
360 req
= embedded_payload(wrb
);
362 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
364 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
365 OPCODE_COMMON_EQ_CREATE
, sizeof(*req
));
367 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
369 AMAP_SET_BITS(struct amap_eq_context
, func
, req
->context
,
370 be_pci_func(adapter
));
371 AMAP_SET_BITS(struct amap_eq_context
, valid
, req
->context
, 1);
373 AMAP_SET_BITS(struct amap_eq_context
, size
, req
->context
, 0);
374 AMAP_SET_BITS(struct amap_eq_context
, count
, req
->context
,
375 __ilog2_u32(eq
->len
/256));
376 AMAP_SET_BITS(struct amap_eq_context
, delaymult
, req
->context
,
377 eq_delay_to_mult(eq_delay
));
378 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
380 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
382 status
= be_mbox_notify_wait(adapter
);
384 struct be_cmd_resp_eq_create
*resp
= embedded_payload(wrb
);
385 eq
->id
= le16_to_cpu(resp
->eq_id
);
389 spin_unlock(&adapter
->mbox_lock
);
394 int be_cmd_mac_addr_query(struct be_adapter
*adapter
, u8
*mac_addr
,
395 u8 type
, bool permanent
, u32 if_handle
)
397 struct be_mcc_wrb
*wrb
;
398 struct be_cmd_req_mac_query
*req
;
401 spin_lock(&adapter
->mbox_lock
);
403 wrb
= wrb_from_mbox(adapter
);
404 req
= embedded_payload(wrb
);
406 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
408 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
409 OPCODE_COMMON_NTWK_MAC_QUERY
, sizeof(*req
));
415 req
->if_id
= cpu_to_le16((u16
) if_handle
);
419 status
= be_mbox_notify_wait(adapter
);
421 struct be_cmd_resp_mac_query
*resp
= embedded_payload(wrb
);
422 memcpy(mac_addr
, resp
->mac
.addr
, ETH_ALEN
);
425 spin_unlock(&adapter
->mbox_lock
);
429 /* Uses synchronous MCCQ */
430 int be_cmd_pmac_add(struct be_adapter
*adapter
, u8
*mac_addr
,
431 u32 if_id
, u32
*pmac_id
)
433 struct be_mcc_wrb
*wrb
;
434 struct be_cmd_req_pmac_add
*req
;
437 spin_lock_bh(&adapter
->mcc_lock
);
439 wrb
= wrb_from_mccq(adapter
);
440 req
= embedded_payload(wrb
);
442 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
444 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
445 OPCODE_COMMON_NTWK_PMAC_ADD
, sizeof(*req
));
447 req
->if_id
= cpu_to_le32(if_id
);
448 memcpy(req
->mac_address
, mac_addr
, ETH_ALEN
);
450 status
= be_mcc_notify_wait(adapter
);
452 struct be_cmd_resp_pmac_add
*resp
= embedded_payload(wrb
);
453 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
456 spin_unlock_bh(&adapter
->mcc_lock
);
460 /* Uses synchronous MCCQ */
461 int be_cmd_pmac_del(struct be_adapter
*adapter
, u32 if_id
, u32 pmac_id
)
463 struct be_mcc_wrb
*wrb
;
464 struct be_cmd_req_pmac_del
*req
;
467 spin_lock_bh(&adapter
->mcc_lock
);
469 wrb
= wrb_from_mccq(adapter
);
470 req
= embedded_payload(wrb
);
472 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
474 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
475 OPCODE_COMMON_NTWK_PMAC_DEL
, sizeof(*req
));
477 req
->if_id
= cpu_to_le32(if_id
);
478 req
->pmac_id
= cpu_to_le32(pmac_id
);
480 status
= be_mcc_notify_wait(adapter
);
482 spin_unlock_bh(&adapter
->mcc_lock
);
488 int be_cmd_cq_create(struct be_adapter
*adapter
,
489 struct be_queue_info
*cq
, struct be_queue_info
*eq
,
490 bool sol_evts
, bool no_delay
, int coalesce_wm
)
492 struct be_mcc_wrb
*wrb
;
493 struct be_cmd_req_cq_create
*req
;
494 struct be_dma_mem
*q_mem
= &cq
->dma_mem
;
498 spin_lock(&adapter
->mbox_lock
);
500 wrb
= wrb_from_mbox(adapter
);
501 req
= embedded_payload(wrb
);
502 ctxt
= &req
->context
;
504 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
506 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
507 OPCODE_COMMON_CQ_CREATE
, sizeof(*req
));
509 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
511 AMAP_SET_BITS(struct amap_cq_context
, coalescwm
, ctxt
, coalesce_wm
);
512 AMAP_SET_BITS(struct amap_cq_context
, nodelay
, ctxt
, no_delay
);
513 AMAP_SET_BITS(struct amap_cq_context
, count
, ctxt
,
514 __ilog2_u32(cq
->len
/256));
515 AMAP_SET_BITS(struct amap_cq_context
, valid
, ctxt
, 1);
516 AMAP_SET_BITS(struct amap_cq_context
, solevent
, ctxt
, sol_evts
);
517 AMAP_SET_BITS(struct amap_cq_context
, eventable
, ctxt
, 1);
518 AMAP_SET_BITS(struct amap_cq_context
, eqid
, ctxt
, eq
->id
);
519 AMAP_SET_BITS(struct amap_cq_context
, armed
, ctxt
, 1);
520 AMAP_SET_BITS(struct amap_cq_context
, func
, ctxt
, be_pci_func(adapter
));
521 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
523 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
525 status
= be_mbox_notify_wait(adapter
);
527 struct be_cmd_resp_cq_create
*resp
= embedded_payload(wrb
);
528 cq
->id
= le16_to_cpu(resp
->cq_id
);
532 spin_unlock(&adapter
->mbox_lock
);
537 static u32
be_encoded_q_len(int q_len
)
539 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
540 if (len_encoded
== 16)
545 int be_cmd_mccq_create(struct be_adapter
*adapter
,
546 struct be_queue_info
*mccq
,
547 struct be_queue_info
*cq
)
549 struct be_mcc_wrb
*wrb
;
550 struct be_cmd_req_mcc_create
*req
;
551 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
555 spin_lock(&adapter
->mbox_lock
);
557 wrb
= wrb_from_mbox(adapter
);
558 req
= embedded_payload(wrb
);
559 ctxt
= &req
->context
;
561 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
563 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
564 OPCODE_COMMON_MCC_CREATE
, sizeof(*req
));
566 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
568 AMAP_SET_BITS(struct amap_mcc_context
, fid
, ctxt
, be_pci_func(adapter
));
569 AMAP_SET_BITS(struct amap_mcc_context
, valid
, ctxt
, 1);
570 AMAP_SET_BITS(struct amap_mcc_context
, ring_size
, ctxt
,
571 be_encoded_q_len(mccq
->len
));
572 AMAP_SET_BITS(struct amap_mcc_context
, cq_id
, ctxt
, cq
->id
);
574 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
576 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
578 status
= be_mbox_notify_wait(adapter
);
580 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
581 mccq
->id
= le16_to_cpu(resp
->id
);
582 mccq
->created
= true;
584 spin_unlock(&adapter
->mbox_lock
);
589 int be_cmd_txq_create(struct be_adapter
*adapter
,
590 struct be_queue_info
*txq
,
591 struct be_queue_info
*cq
)
593 struct be_mcc_wrb
*wrb
;
594 struct be_cmd_req_eth_tx_create
*req
;
595 struct be_dma_mem
*q_mem
= &txq
->dma_mem
;
599 spin_lock(&adapter
->mbox_lock
);
601 wrb
= wrb_from_mbox(adapter
);
602 req
= embedded_payload(wrb
);
603 ctxt
= &req
->context
;
605 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
607 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
, OPCODE_ETH_TX_CREATE
,
610 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
611 req
->ulp_num
= BE_ULP1_NUM
;
612 req
->type
= BE_ETH_TX_RING_TYPE_STANDARD
;
614 AMAP_SET_BITS(struct amap_tx_context
, tx_ring_size
, ctxt
,
615 be_encoded_q_len(txq
->len
));
616 AMAP_SET_BITS(struct amap_tx_context
, pci_func_id
, ctxt
,
617 be_pci_func(adapter
));
618 AMAP_SET_BITS(struct amap_tx_context
, ctx_valid
, ctxt
, 1);
619 AMAP_SET_BITS(struct amap_tx_context
, cq_id_send
, ctxt
, cq
->id
);
621 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
623 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
625 status
= be_mbox_notify_wait(adapter
);
627 struct be_cmd_resp_eth_tx_create
*resp
= embedded_payload(wrb
);
628 txq
->id
= le16_to_cpu(resp
->cid
);
632 spin_unlock(&adapter
->mbox_lock
);
638 int be_cmd_rxq_create(struct be_adapter
*adapter
,
639 struct be_queue_info
*rxq
, u16 cq_id
, u16 frag_size
,
640 u16 max_frame_size
, u32 if_id
, u32 rss
)
642 struct be_mcc_wrb
*wrb
;
643 struct be_cmd_req_eth_rx_create
*req
;
644 struct be_dma_mem
*q_mem
= &rxq
->dma_mem
;
647 spin_lock(&adapter
->mbox_lock
);
649 wrb
= wrb_from_mbox(adapter
);
650 req
= embedded_payload(wrb
);
652 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
654 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
, OPCODE_ETH_RX_CREATE
,
657 req
->cq_id
= cpu_to_le16(cq_id
);
658 req
->frag_size
= fls(frag_size
) - 1;
660 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
661 req
->interface_id
= cpu_to_le32(if_id
);
662 req
->max_frame_size
= cpu_to_le16(max_frame_size
);
663 req
->rss_queue
= cpu_to_le32(rss
);
665 status
= be_mbox_notify_wait(adapter
);
667 struct be_cmd_resp_eth_rx_create
*resp
= embedded_payload(wrb
);
668 rxq
->id
= le16_to_cpu(resp
->id
);
672 spin_unlock(&adapter
->mbox_lock
);
677 /* Generic destroyer function for all types of queues
680 int be_cmd_q_destroy(struct be_adapter
*adapter
, struct be_queue_info
*q
,
683 struct be_mcc_wrb
*wrb
;
684 struct be_cmd_req_q_destroy
*req
;
685 u8 subsys
= 0, opcode
= 0;
688 spin_lock(&adapter
->mbox_lock
);
690 wrb
= wrb_from_mbox(adapter
);
691 req
= embedded_payload(wrb
);
693 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
695 switch (queue_type
) {
697 subsys
= CMD_SUBSYSTEM_COMMON
;
698 opcode
= OPCODE_COMMON_EQ_DESTROY
;
701 subsys
= CMD_SUBSYSTEM_COMMON
;
702 opcode
= OPCODE_COMMON_CQ_DESTROY
;
705 subsys
= CMD_SUBSYSTEM_ETH
;
706 opcode
= OPCODE_ETH_TX_DESTROY
;
709 subsys
= CMD_SUBSYSTEM_ETH
;
710 opcode
= OPCODE_ETH_RX_DESTROY
;
713 subsys
= CMD_SUBSYSTEM_COMMON
;
714 opcode
= OPCODE_COMMON_MCC_DESTROY
;
719 be_cmd_hdr_prepare(&req
->hdr
, subsys
, opcode
, sizeof(*req
));
720 req
->id
= cpu_to_le16(q
->id
);
722 status
= be_mbox_notify_wait(adapter
);
724 spin_unlock(&adapter
->mbox_lock
);
729 /* Create an rx filtering policy configuration on an i/f
732 int be_cmd_if_create(struct be_adapter
*adapter
, u32 flags
, u8
*mac
,
733 bool pmac_invalid
, u32
*if_handle
, u32
*pmac_id
)
735 struct be_mcc_wrb
*wrb
;
736 struct be_cmd_req_if_create
*req
;
739 spin_lock(&adapter
->mbox_lock
);
741 wrb
= wrb_from_mbox(adapter
);
742 req
= embedded_payload(wrb
);
744 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
746 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
747 OPCODE_COMMON_NTWK_INTERFACE_CREATE
, sizeof(*req
));
749 req
->capability_flags
= cpu_to_le32(flags
);
750 req
->enable_flags
= cpu_to_le32(flags
);
751 req
->pmac_invalid
= pmac_invalid
;
753 memcpy(req
->mac_addr
, mac
, ETH_ALEN
);
755 status
= be_mbox_notify_wait(adapter
);
757 struct be_cmd_resp_if_create
*resp
= embedded_payload(wrb
);
758 *if_handle
= le32_to_cpu(resp
->interface_id
);
760 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
763 spin_unlock(&adapter
->mbox_lock
);
768 int be_cmd_if_destroy(struct be_adapter
*adapter
, u32 interface_id
)
770 struct be_mcc_wrb
*wrb
;
771 struct be_cmd_req_if_destroy
*req
;
774 spin_lock(&adapter
->mbox_lock
);
776 wrb
= wrb_from_mbox(adapter
);
777 req
= embedded_payload(wrb
);
779 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
781 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
782 OPCODE_COMMON_NTWK_INTERFACE_DESTROY
, sizeof(*req
));
784 req
->interface_id
= cpu_to_le32(interface_id
);
786 status
= be_mbox_notify_wait(adapter
);
788 spin_unlock(&adapter
->mbox_lock
);
793 /* Get stats is a non embedded command: the request is not embedded inside
794 * WRB but is a separate dma memory block
795 * Uses asynchronous MCC
797 int be_cmd_get_stats(struct be_adapter
*adapter
, struct be_dma_mem
*nonemb_cmd
)
799 struct be_mcc_wrb
*wrb
;
800 struct be_cmd_req_get_stats
*req
;
803 spin_lock_bh(&adapter
->mcc_lock
);
805 wrb
= wrb_from_mccq(adapter
);
806 req
= nonemb_cmd
->va
;
807 sge
= nonembedded_sgl(wrb
);
809 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1);
810 wrb
->tag0
= OPCODE_ETH_GET_STATISTICS
;
812 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
813 OPCODE_ETH_GET_STATISTICS
, sizeof(*req
));
814 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
->dma
));
815 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
->dma
& 0xFFFFFFFF);
816 sge
->len
= cpu_to_le32(nonemb_cmd
->size
);
818 be_mcc_notify(adapter
);
820 spin_unlock_bh(&adapter
->mcc_lock
);
824 /* Uses synchronous mcc */
825 int be_cmd_link_status_query(struct be_adapter
*adapter
,
828 struct be_mcc_wrb
*wrb
;
829 struct be_cmd_req_link_status
*req
;
832 spin_lock_bh(&adapter
->mcc_lock
);
834 wrb
= wrb_from_mccq(adapter
);
835 req
= embedded_payload(wrb
);
839 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
841 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
842 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY
, sizeof(*req
));
844 status
= be_mcc_notify_wait(adapter
);
846 struct be_cmd_resp_link_status
*resp
= embedded_payload(wrb
);
847 if (resp
->mac_speed
!= PHY_LINK_SPEED_ZERO
)
851 spin_unlock_bh(&adapter
->mcc_lock
);
856 int be_cmd_get_fw_ver(struct be_adapter
*adapter
, char *fw_ver
)
858 struct be_mcc_wrb
*wrb
;
859 struct be_cmd_req_get_fw_version
*req
;
862 spin_lock(&adapter
->mbox_lock
);
864 wrb
= wrb_from_mbox(adapter
);
865 req
= embedded_payload(wrb
);
867 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
869 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
870 OPCODE_COMMON_GET_FW_VERSION
, sizeof(*req
));
872 status
= be_mbox_notify_wait(adapter
);
874 struct be_cmd_resp_get_fw_version
*resp
= embedded_payload(wrb
);
875 strncpy(fw_ver
, resp
->firmware_version_string
, FW_VER_LEN
);
878 spin_unlock(&adapter
->mbox_lock
);
882 /* set the EQ delay interval of an EQ to specified value
885 int be_cmd_modify_eqd(struct be_adapter
*adapter
, u32 eq_id
, u32 eqd
)
887 struct be_mcc_wrb
*wrb
;
888 struct be_cmd_req_modify_eq_delay
*req
;
890 spin_lock_bh(&adapter
->mcc_lock
);
892 wrb
= wrb_from_mccq(adapter
);
893 req
= embedded_payload(wrb
);
895 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
897 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
898 OPCODE_COMMON_MODIFY_EQ_DELAY
, sizeof(*req
));
900 req
->num_eq
= cpu_to_le32(1);
901 req
->delay
[0].eq_id
= cpu_to_le32(eq_id
);
902 req
->delay
[0].phase
= 0;
903 req
->delay
[0].delay_multiplier
= cpu_to_le32(eqd
);
905 be_mcc_notify(adapter
);
907 spin_unlock_bh(&adapter
->mcc_lock
);
911 /* Uses sycnhronous mcc */
912 int be_cmd_vlan_config(struct be_adapter
*adapter
, u32 if_id
, u16
*vtag_array
,
913 u32 num
, bool untagged
, bool promiscuous
)
915 struct be_mcc_wrb
*wrb
;
916 struct be_cmd_req_vlan_config
*req
;
919 spin_lock_bh(&adapter
->mcc_lock
);
921 wrb
= wrb_from_mccq(adapter
);
922 req
= embedded_payload(wrb
);
924 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
926 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
927 OPCODE_COMMON_NTWK_VLAN_CONFIG
, sizeof(*req
));
929 req
->interface_id
= if_id
;
930 req
->promiscuous
= promiscuous
;
931 req
->untagged
= untagged
;
934 memcpy(req
->normal_vlan
, vtag_array
,
935 req
->num_vlan
* sizeof(vtag_array
[0]));
938 status
= be_mcc_notify_wait(adapter
);
940 spin_unlock_bh(&adapter
->mcc_lock
);
944 /* Uses MCC for this command as it may be called in BH context
945 * Uses synchronous mcc
947 int be_cmd_promiscuous_config(struct be_adapter
*adapter
, u8 port_num
, bool en
)
949 struct be_mcc_wrb
*wrb
;
950 struct be_cmd_req_promiscuous_config
*req
;
953 spin_lock_bh(&adapter
->mcc_lock
);
955 wrb
= wrb_from_mccq(adapter
);
956 req
= embedded_payload(wrb
);
958 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
960 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
961 OPCODE_ETH_PROMISCUOUS
, sizeof(*req
));
964 req
->port1_promiscuous
= en
;
966 req
->port0_promiscuous
= en
;
968 status
= be_mcc_notify_wait(adapter
);
970 spin_unlock_bh(&adapter
->mcc_lock
);
975 * Uses MCC for this command as it may be called in BH context
976 * (mc == NULL) => multicast promiscous
978 int be_cmd_multicast_set(struct be_adapter
*adapter
, u32 if_id
,
979 struct dev_mc_list
*mc_list
, u32 mc_count
)
981 #define BE_MAX_MC 32 /* set mcast promisc if > 32 */
982 struct be_mcc_wrb
*wrb
;
983 struct be_cmd_req_mcast_mac_config
*req
;
985 spin_lock_bh(&adapter
->mcc_lock
);
987 wrb
= wrb_from_mccq(adapter
);
988 req
= embedded_payload(wrb
);
990 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
992 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
993 OPCODE_COMMON_NTWK_MULTICAST_SET
, sizeof(*req
));
995 req
->interface_id
= if_id
;
996 if (mc_list
&& mc_count
<= BE_MAX_MC
) {
998 struct dev_mc_list
*mc
;
1000 req
->num_mac
= cpu_to_le16(mc_count
);
1002 for (mc
= mc_list
, i
= 0; mc
; mc
= mc
->next
, i
++)
1003 memcpy(req
->mac
[i
].byte
, mc
->dmi_addr
, ETH_ALEN
);
1005 req
->promiscuous
= 1;
1008 be_mcc_notify_wait(adapter
);
1010 spin_unlock_bh(&adapter
->mcc_lock
);
1015 /* Uses synchrounous mcc */
1016 int be_cmd_set_flow_control(struct be_adapter
*adapter
, u32 tx_fc
, u32 rx_fc
)
1018 struct be_mcc_wrb
*wrb
;
1019 struct be_cmd_req_set_flow_control
*req
;
1022 spin_lock_bh(&adapter
->mcc_lock
);
1024 wrb
= wrb_from_mccq(adapter
);
1025 req
= embedded_payload(wrb
);
1027 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1029 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1030 OPCODE_COMMON_SET_FLOW_CONTROL
, sizeof(*req
));
1032 req
->tx_flow_control
= cpu_to_le16((u16
)tx_fc
);
1033 req
->rx_flow_control
= cpu_to_le16((u16
)rx_fc
);
1035 status
= be_mcc_notify_wait(adapter
);
1037 spin_unlock_bh(&adapter
->mcc_lock
);
1042 int be_cmd_get_flow_control(struct be_adapter
*adapter
, u32
*tx_fc
, u32
*rx_fc
)
1044 struct be_mcc_wrb
*wrb
;
1045 struct be_cmd_req_get_flow_control
*req
;
1048 spin_lock_bh(&adapter
->mcc_lock
);
1050 wrb
= wrb_from_mccq(adapter
);
1051 req
= embedded_payload(wrb
);
1053 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1055 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1056 OPCODE_COMMON_GET_FLOW_CONTROL
, sizeof(*req
));
1058 status
= be_mcc_notify_wait(adapter
);
1060 struct be_cmd_resp_get_flow_control
*resp
=
1061 embedded_payload(wrb
);
1062 *tx_fc
= le16_to_cpu(resp
->tx_flow_control
);
1063 *rx_fc
= le16_to_cpu(resp
->rx_flow_control
);
1066 spin_unlock_bh(&adapter
->mcc_lock
);
1071 int be_cmd_query_fw_cfg(struct be_adapter
*adapter
, u32
*port_num
, u32
*cap
)
1073 struct be_mcc_wrb
*wrb
;
1074 struct be_cmd_req_query_fw_cfg
*req
;
1077 spin_lock(&adapter
->mbox_lock
);
1079 wrb
= wrb_from_mbox(adapter
);
1080 req
= embedded_payload(wrb
);
1082 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1084 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1085 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG
, sizeof(*req
));
1087 status
= be_mbox_notify_wait(adapter
);
1089 struct be_cmd_resp_query_fw_cfg
*resp
= embedded_payload(wrb
);
1090 *port_num
= le32_to_cpu(resp
->phys_port
);
1091 *cap
= le32_to_cpu(resp
->function_cap
);
1094 spin_unlock(&adapter
->mbox_lock
);
1099 int be_cmd_reset_function(struct be_adapter
*adapter
)
1101 struct be_mcc_wrb
*wrb
;
1102 struct be_cmd_req_hdr
*req
;
1105 spin_lock(&adapter
->mbox_lock
);
1107 wrb
= wrb_from_mbox(adapter
);
1108 req
= embedded_payload(wrb
);
1110 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1112 be_cmd_hdr_prepare(req
, CMD_SUBSYSTEM_COMMON
,
1113 OPCODE_COMMON_FUNCTION_RESET
, sizeof(*req
));
1115 status
= be_mbox_notify_wait(adapter
);
1117 spin_unlock(&adapter
->mbox_lock
);
1121 int be_cmd_write_flashrom(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
,
1122 u32 flash_type
, u32 flash_opcode
, u32 buf_size
)
1124 struct be_mcc_wrb
*wrb
;
1125 struct be_cmd_write_flashrom
*req
= cmd
->va
;
1129 spin_lock_bh(&adapter
->mcc_lock
);
1131 wrb
= wrb_from_mccq(adapter
);
1132 sge
= nonembedded_sgl(wrb
);
1134 be_wrb_hdr_prepare(wrb
, cmd
->size
, false, 1);
1136 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1137 OPCODE_COMMON_WRITE_FLASHROM
, cmd
->size
);
1138 sge
->pa_hi
= cpu_to_le32(upper_32_bits(cmd
->dma
));
1139 sge
->pa_lo
= cpu_to_le32(cmd
->dma
& 0xFFFFFFFF);
1140 sge
->len
= cpu_to_le32(cmd
->size
);
1142 req
->params
.op_type
= cpu_to_le32(flash_type
);
1143 req
->params
.op_code
= cpu_to_le32(flash_opcode
);
1144 req
->params
.data_buf_size
= cpu_to_le32(buf_size
);
1146 status
= be_mcc_notify_wait(adapter
);
1148 spin_unlock_bh(&adapter
->mcc_lock
);