2 * Copyright (C) 2005 - 2009 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
21 static void be_mcc_notify(struct be_adapter
*adapter
)
23 struct be_queue_info
*mccq
= &adapter
->mcc_obj
.q
;
26 val
|= mccq
->id
& DB_MCCQ_RING_ID_MASK
;
27 val
|= 1 << DB_MCCQ_NUM_POSTED_SHIFT
;
28 iowrite32(val
, adapter
->db
+ DB_MCCQ_OFFSET
);
31 /* To check if valid bit is set, check the entire word as we don't know
32 * the endianness of the data (old entry is host endian while a new entry is
34 static inline bool be_mcc_compl_is_new(struct be_mcc_compl
*compl)
36 if (compl->flags
!= 0) {
37 compl->flags
= le32_to_cpu(compl->flags
);
38 BUG_ON((compl->flags
& CQE_FLAGS_VALID_MASK
) == 0);
45 /* Need to reset the entire word that houses the valid bit */
46 static inline void be_mcc_compl_use(struct be_mcc_compl
*compl)
51 static int be_mcc_compl_process(struct be_adapter
*adapter
,
52 struct be_mcc_compl
*compl)
54 u16 compl_status
, extd_status
;
56 /* Just swap the status to host endian; mcc tag is opaquely copied
58 be_dws_le_to_cpu(compl, 4);
60 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
61 CQE_STATUS_COMPL_MASK
;
62 if (compl_status
!= MCC_STATUS_SUCCESS
) {
63 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
65 dev_warn(&adapter
->pdev
->dev
,
66 "Error in cmd completion: status(compl/extd)=%d/%d\n",
67 compl_status
, extd_status
);
73 /* Link state evt is a string of bytes; no need for endian swapping */
74 static void be_async_link_state_process(struct be_adapter
*adapter
,
75 struct be_async_event_link_state
*evt
)
77 be_link_status_update(adapter
,
78 evt
->port_link_status
== ASYNC_EVENT_LINK_UP
);
81 static inline bool is_link_state_evt(u32 trailer
)
83 return (((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
84 ASYNC_TRAILER_EVENT_CODE_MASK
) ==
85 ASYNC_EVENT_CODE_LINK_STATE
);
88 static struct be_mcc_compl
*be_mcc_compl_get(struct be_adapter
*adapter
)
90 struct be_queue_info
*mcc_cq
= &adapter
->mcc_obj
.cq
;
91 struct be_mcc_compl
*compl = queue_tail_node(mcc_cq
);
93 if (be_mcc_compl_is_new(compl)) {
94 queue_tail_inc(mcc_cq
);
100 void be_process_mcc(struct be_adapter
*adapter
)
102 struct be_mcc_compl
*compl;
105 spin_lock_bh(&adapter
->mcc_cq_lock
);
106 while ((compl = be_mcc_compl_get(adapter
))) {
107 if (compl->flags
& CQE_FLAGS_ASYNC_MASK
) {
108 /* Interpret flags as an async trailer */
109 BUG_ON(!is_link_state_evt(compl->flags
));
111 /* Interpret compl as a async link evt */
112 be_async_link_state_process(adapter
,
113 (struct be_async_event_link_state
*) compl);
115 be_mcc_compl_process(adapter
, compl);
116 atomic_dec(&adapter
->mcc_obj
.q
.used
);
118 be_mcc_compl_use(compl);
122 be_cq_notify(adapter
, adapter
->mcc_obj
.cq
.id
, true, num
);
123 spin_unlock_bh(&adapter
->mcc_cq_lock
);
126 /* Wait till no more pending mcc requests are present */
127 static void be_mcc_wait_compl(struct be_adapter
*adapter
)
129 #define mcc_timeout 50000 /* 5s timeout */
131 for (i
= 0; i
< mcc_timeout
; i
++) {
132 be_process_mcc(adapter
);
133 if (atomic_read(&adapter
->mcc_obj
.q
.used
) == 0)
137 if (i
== mcc_timeout
)
138 dev_err(&adapter
->pdev
->dev
, "mccq poll timed out\n");
141 /* Notify MCC requests and wait for completion */
142 static void be_mcc_notify_wait(struct be_adapter
*adapter
)
144 be_mcc_notify(adapter
);
145 be_mcc_wait_compl(adapter
);
148 static int be_mbox_db_ready_wait(struct be_adapter
*adapter
, void __iomem
*db
)
150 int cnt
= 0, wait
= 5;
154 ready
= ioread32(db
) & MPU_MAILBOX_DB_RDY_MASK
;
159 dev_err(&adapter
->pdev
->dev
, "mbox poll timed out\n");
173 * Insert the mailbox address into the doorbell in two steps
174 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
176 static int be_mbox_notify(struct be_adapter
*adapter
)
180 void __iomem
*db
= adapter
->db
+ MPU_MAILBOX_DB_OFFSET
;
181 struct be_dma_mem
*mbox_mem
= &adapter
->mbox_mem
;
182 struct be_mcc_mailbox
*mbox
= mbox_mem
->va
;
183 struct be_mcc_compl
*compl = &mbox
->compl;
185 memset(compl, 0, sizeof(*compl));
187 val
|= MPU_MAILBOX_DB_HI_MASK
;
188 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
189 val
|= (upper_32_bits(mbox_mem
->dma
) >> 2) << 2;
192 /* wait for ready to be set */
193 status
= be_mbox_db_ready_wait(adapter
, db
);
198 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
199 val
|= (u32
)(mbox_mem
->dma
>> 4) << 2;
202 status
= be_mbox_db_ready_wait(adapter
, db
);
206 /* A cq entry has been made now */
207 if (be_mcc_compl_is_new(compl)) {
208 status
= be_mcc_compl_process(adapter
, &mbox
->compl);
209 be_mcc_compl_use(compl);
213 dev_err(&adapter
->pdev
->dev
, "invalid mailbox completion\n");
219 static int be_POST_stage_get(struct be_adapter
*adapter
, u16
*stage
)
221 u32 sem
= ioread32(adapter
->csr
+ MPU_EP_SEMAPHORE_OFFSET
);
223 *stage
= sem
& EP_SEMAPHORE_POST_STAGE_MASK
;
224 if ((sem
>> EP_SEMAPHORE_POST_ERR_SHIFT
) & EP_SEMAPHORE_POST_ERR_MASK
)
230 int be_cmd_POST(struct be_adapter
*adapter
)
234 error
= be_POST_stage_get(adapter
, &stage
);
235 if (error
|| stage
!= POST_STAGE_ARMFW_RDY
) {
236 dev_err(&adapter
->pdev
->dev
, "POST failed.\n");
243 static inline void *embedded_payload(struct be_mcc_wrb
*wrb
)
245 return wrb
->payload
.embedded_payload
;
248 static inline struct be_sge
*nonembedded_sgl(struct be_mcc_wrb
*wrb
)
250 return &wrb
->payload
.sgl
[0];
253 /* Don't touch the hdr after it's prepared */
254 static void be_wrb_hdr_prepare(struct be_mcc_wrb
*wrb
, int payload_len
,
255 bool embedded
, u8 sge_cnt
)
258 wrb
->embedded
|= MCC_WRB_EMBEDDED_MASK
;
260 wrb
->embedded
|= (sge_cnt
& MCC_WRB_SGE_CNT_MASK
) <<
261 MCC_WRB_SGE_CNT_SHIFT
;
262 wrb
->payload_length
= payload_len
;
263 be_dws_cpu_to_le(wrb
, 20);
266 /* Don't touch the hdr after it's prepared */
267 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr
*req_hdr
,
268 u8 subsystem
, u8 opcode
, int cmd_len
)
270 req_hdr
->opcode
= opcode
;
271 req_hdr
->subsystem
= subsystem
;
272 req_hdr
->request_length
= cpu_to_le32(cmd_len
- sizeof(*req_hdr
));
275 static void be_cmd_page_addrs_prepare(struct phys_addr
*pages
, u32 max_pages
,
276 struct be_dma_mem
*mem
)
278 int i
, buf_pages
= min(PAGES_4K_SPANNED(mem
->va
, mem
->size
), max_pages
);
279 u64 dma
= (u64
)mem
->dma
;
281 for (i
= 0; i
< buf_pages
; i
++) {
282 pages
[i
].lo
= cpu_to_le32(dma
& 0xFFFFFFFF);
283 pages
[i
].hi
= cpu_to_le32(upper_32_bits(dma
));
288 /* Converts interrupt delay in microseconds to multiplier value */
289 static u32
eq_delay_to_mult(u32 usec_delay
)
291 #define MAX_INTR_RATE 651042
292 const u32 round
= 10;
298 u32 interrupt_rate
= 1000000 / usec_delay
;
299 /* Max delay, corresponding to the lowest interrupt rate */
300 if (interrupt_rate
== 0)
303 multiplier
= (MAX_INTR_RATE
- interrupt_rate
) * round
;
304 multiplier
/= interrupt_rate
;
305 /* Round the multiplier to the closest value.*/
306 multiplier
= (multiplier
+ round
/2) / round
;
307 multiplier
= min(multiplier
, (u32
)1023);
313 static inline struct be_mcc_wrb
*wrb_from_mbox(struct be_dma_mem
*mbox_mem
)
315 return &((struct be_mcc_mailbox
*)(mbox_mem
->va
))->wrb
;
318 static inline struct be_mcc_wrb
*wrb_from_mcc(struct be_queue_info
*mccq
)
320 struct be_mcc_wrb
*wrb
= NULL
;
321 if (atomic_read(&mccq
->used
) < mccq
->len
) {
322 wrb
= queue_head_node(mccq
);
323 queue_head_inc(mccq
);
324 atomic_inc(&mccq
->used
);
325 memset(wrb
, 0, sizeof(*wrb
));
330 int be_cmd_eq_create(struct be_adapter
*adapter
,
331 struct be_queue_info
*eq
, int eq_delay
)
333 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
334 struct be_cmd_req_eq_create
*req
= embedded_payload(wrb
);
335 struct be_cmd_resp_eq_create
*resp
= embedded_payload(wrb
);
336 struct be_dma_mem
*q_mem
= &eq
->dma_mem
;
339 spin_lock(&adapter
->mbox_lock
);
340 memset(wrb
, 0, sizeof(*wrb
));
342 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
344 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
345 OPCODE_COMMON_EQ_CREATE
, sizeof(*req
));
347 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
349 AMAP_SET_BITS(struct amap_eq_context
, func
, req
->context
,
350 be_pci_func(adapter
));
351 AMAP_SET_BITS(struct amap_eq_context
, valid
, req
->context
, 1);
353 AMAP_SET_BITS(struct amap_eq_context
, size
, req
->context
, 0);
354 AMAP_SET_BITS(struct amap_eq_context
, count
, req
->context
,
355 __ilog2_u32(eq
->len
/256));
356 AMAP_SET_BITS(struct amap_eq_context
, delaymult
, req
->context
,
357 eq_delay_to_mult(eq_delay
));
358 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
360 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
362 status
= be_mbox_notify(adapter
);
364 eq
->id
= le16_to_cpu(resp
->eq_id
);
367 spin_unlock(&adapter
->mbox_lock
);
371 int be_cmd_mac_addr_query(struct be_adapter
*adapter
, u8
*mac_addr
,
372 u8 type
, bool permanent
, u32 if_handle
)
374 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
375 struct be_cmd_req_mac_query
*req
= embedded_payload(wrb
);
376 struct be_cmd_resp_mac_query
*resp
= embedded_payload(wrb
);
379 spin_lock(&adapter
->mbox_lock
);
380 memset(wrb
, 0, sizeof(*wrb
));
382 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
384 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
385 OPCODE_COMMON_NTWK_MAC_QUERY
, sizeof(*req
));
391 req
->if_id
= cpu_to_le16((u16
)if_handle
);
395 status
= be_mbox_notify(adapter
);
397 memcpy(mac_addr
, resp
->mac
.addr
, ETH_ALEN
);
399 spin_unlock(&adapter
->mbox_lock
);
403 int be_cmd_pmac_add(struct be_adapter
*adapter
, u8
*mac_addr
,
404 u32 if_id
, u32
*pmac_id
)
406 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
407 struct be_cmd_req_pmac_add
*req
= embedded_payload(wrb
);
410 spin_lock(&adapter
->mbox_lock
);
411 memset(wrb
, 0, sizeof(*wrb
));
413 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
415 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
416 OPCODE_COMMON_NTWK_PMAC_ADD
, sizeof(*req
));
418 req
->if_id
= cpu_to_le32(if_id
);
419 memcpy(req
->mac_address
, mac_addr
, ETH_ALEN
);
421 status
= be_mbox_notify(adapter
);
423 struct be_cmd_resp_pmac_add
*resp
= embedded_payload(wrb
);
424 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
427 spin_unlock(&adapter
->mbox_lock
);
431 int be_cmd_pmac_del(struct be_adapter
*adapter
, u32 if_id
, u32 pmac_id
)
433 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
434 struct be_cmd_req_pmac_del
*req
= embedded_payload(wrb
);
437 spin_lock(&adapter
->mbox_lock
);
438 memset(wrb
, 0, sizeof(*wrb
));
440 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
442 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
443 OPCODE_COMMON_NTWK_PMAC_DEL
, sizeof(*req
));
445 req
->if_id
= cpu_to_le32(if_id
);
446 req
->pmac_id
= cpu_to_le32(pmac_id
);
448 status
= be_mbox_notify(adapter
);
449 spin_unlock(&adapter
->mbox_lock
);
454 int be_cmd_cq_create(struct be_adapter
*adapter
,
455 struct be_queue_info
*cq
, struct be_queue_info
*eq
,
456 bool sol_evts
, bool no_delay
, int coalesce_wm
)
458 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
459 struct be_cmd_req_cq_create
*req
= embedded_payload(wrb
);
460 struct be_cmd_resp_cq_create
*resp
= embedded_payload(wrb
);
461 struct be_dma_mem
*q_mem
= &cq
->dma_mem
;
462 void *ctxt
= &req
->context
;
465 spin_lock(&adapter
->mbox_lock
);
466 memset(wrb
, 0, sizeof(*wrb
));
468 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
470 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
471 OPCODE_COMMON_CQ_CREATE
, sizeof(*req
));
473 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
475 AMAP_SET_BITS(struct amap_cq_context
, coalescwm
, ctxt
, coalesce_wm
);
476 AMAP_SET_BITS(struct amap_cq_context
, nodelay
, ctxt
, no_delay
);
477 AMAP_SET_BITS(struct amap_cq_context
, count
, ctxt
,
478 __ilog2_u32(cq
->len
/256));
479 AMAP_SET_BITS(struct amap_cq_context
, valid
, ctxt
, 1);
480 AMAP_SET_BITS(struct amap_cq_context
, solevent
, ctxt
, sol_evts
);
481 AMAP_SET_BITS(struct amap_cq_context
, eventable
, ctxt
, 1);
482 AMAP_SET_BITS(struct amap_cq_context
, eqid
, ctxt
, eq
->id
);
483 AMAP_SET_BITS(struct amap_cq_context
, armed
, ctxt
, 1);
484 AMAP_SET_BITS(struct amap_cq_context
, func
, ctxt
, be_pci_func(adapter
));
485 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
487 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
489 status
= be_mbox_notify(adapter
);
491 cq
->id
= le16_to_cpu(resp
->cq_id
);
494 spin_unlock(&adapter
->mbox_lock
);
499 static u32
be_encoded_q_len(int q_len
)
501 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
502 if (len_encoded
== 16)
507 int be_cmd_mccq_create(struct be_adapter
*adapter
,
508 struct be_queue_info
*mccq
,
509 struct be_queue_info
*cq
)
511 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
512 struct be_cmd_req_mcc_create
*req
= embedded_payload(wrb
);
513 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
514 void *ctxt
= &req
->context
;
517 spin_lock(&adapter
->mbox_lock
);
518 memset(wrb
, 0, sizeof(*wrb
));
520 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
522 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
523 OPCODE_COMMON_MCC_CREATE
, sizeof(*req
));
525 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
527 AMAP_SET_BITS(struct amap_mcc_context
, fid
, ctxt
, be_pci_func(adapter
));
528 AMAP_SET_BITS(struct amap_mcc_context
, valid
, ctxt
, 1);
529 AMAP_SET_BITS(struct amap_mcc_context
, ring_size
, ctxt
,
530 be_encoded_q_len(mccq
->len
));
531 AMAP_SET_BITS(struct amap_mcc_context
, cq_id
, ctxt
, cq
->id
);
533 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
535 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
537 status
= be_mbox_notify(adapter
);
539 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
540 mccq
->id
= le16_to_cpu(resp
->id
);
541 mccq
->created
= true;
543 spin_unlock(&adapter
->mbox_lock
);
548 int be_cmd_txq_create(struct be_adapter
*adapter
,
549 struct be_queue_info
*txq
,
550 struct be_queue_info
*cq
)
552 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
553 struct be_cmd_req_eth_tx_create
*req
= embedded_payload(wrb
);
554 struct be_dma_mem
*q_mem
= &txq
->dma_mem
;
555 void *ctxt
= &req
->context
;
559 spin_lock(&adapter
->mbox_lock
);
560 memset(wrb
, 0, sizeof(*wrb
));
562 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
564 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
, OPCODE_ETH_TX_CREATE
,
567 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
568 req
->ulp_num
= BE_ULP1_NUM
;
569 req
->type
= BE_ETH_TX_RING_TYPE_STANDARD
;
571 len_encoded
= fls(txq
->len
); /* log2(len) + 1 */
572 if (len_encoded
== 16)
574 AMAP_SET_BITS(struct amap_tx_context
, tx_ring_size
, ctxt
, len_encoded
);
575 AMAP_SET_BITS(struct amap_tx_context
, pci_func_id
, ctxt
,
576 be_pci_func(adapter
));
577 AMAP_SET_BITS(struct amap_tx_context
, ctx_valid
, ctxt
, 1);
578 AMAP_SET_BITS(struct amap_tx_context
, cq_id_send
, ctxt
, cq
->id
);
580 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
582 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
584 status
= be_mbox_notify(adapter
);
586 struct be_cmd_resp_eth_tx_create
*resp
= embedded_payload(wrb
);
587 txq
->id
= le16_to_cpu(resp
->cid
);
590 spin_unlock(&adapter
->mbox_lock
);
595 int be_cmd_rxq_create(struct be_adapter
*adapter
,
596 struct be_queue_info
*rxq
, u16 cq_id
, u16 frag_size
,
597 u16 max_frame_size
, u32 if_id
, u32 rss
)
599 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
600 struct be_cmd_req_eth_rx_create
*req
= embedded_payload(wrb
);
601 struct be_dma_mem
*q_mem
= &rxq
->dma_mem
;
604 spin_lock(&adapter
->mbox_lock
);
605 memset(wrb
, 0, sizeof(*wrb
));
607 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
609 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
, OPCODE_ETH_RX_CREATE
,
612 req
->cq_id
= cpu_to_le16(cq_id
);
613 req
->frag_size
= fls(frag_size
) - 1;
615 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
616 req
->interface_id
= cpu_to_le32(if_id
);
617 req
->max_frame_size
= cpu_to_le16(max_frame_size
);
618 req
->rss_queue
= cpu_to_le32(rss
);
620 status
= be_mbox_notify(adapter
);
622 struct be_cmd_resp_eth_rx_create
*resp
= embedded_payload(wrb
);
623 rxq
->id
= le16_to_cpu(resp
->id
);
626 spin_unlock(&adapter
->mbox_lock
);
631 /* Generic destroyer function for all types of queues */
632 int be_cmd_q_destroy(struct be_adapter
*adapter
, struct be_queue_info
*q
,
635 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
636 struct be_cmd_req_q_destroy
*req
= embedded_payload(wrb
);
637 u8 subsys
= 0, opcode
= 0;
640 spin_lock(&adapter
->mbox_lock
);
642 memset(wrb
, 0, sizeof(*wrb
));
643 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
645 switch (queue_type
) {
647 subsys
= CMD_SUBSYSTEM_COMMON
;
648 opcode
= OPCODE_COMMON_EQ_DESTROY
;
651 subsys
= CMD_SUBSYSTEM_COMMON
;
652 opcode
= OPCODE_COMMON_CQ_DESTROY
;
655 subsys
= CMD_SUBSYSTEM_ETH
;
656 opcode
= OPCODE_ETH_TX_DESTROY
;
659 subsys
= CMD_SUBSYSTEM_ETH
;
660 opcode
= OPCODE_ETH_RX_DESTROY
;
663 subsys
= CMD_SUBSYSTEM_COMMON
;
664 opcode
= OPCODE_COMMON_MCC_DESTROY
;
669 be_cmd_hdr_prepare(&req
->hdr
, subsys
, opcode
, sizeof(*req
));
670 req
->id
= cpu_to_le16(q
->id
);
672 status
= be_mbox_notify(adapter
);
674 spin_unlock(&adapter
->mbox_lock
);
679 /* Create an rx filtering policy configuration on an i/f */
680 int be_cmd_if_create(struct be_adapter
*adapter
, u32 flags
, u8
*mac
,
681 bool pmac_invalid
, u32
*if_handle
, u32
*pmac_id
)
683 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
684 struct be_cmd_req_if_create
*req
= embedded_payload(wrb
);
687 spin_lock(&adapter
->mbox_lock
);
688 memset(wrb
, 0, sizeof(*wrb
));
690 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
692 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
693 OPCODE_COMMON_NTWK_INTERFACE_CREATE
, sizeof(*req
));
695 req
->capability_flags
= cpu_to_le32(flags
);
696 req
->enable_flags
= cpu_to_le32(flags
);
698 memcpy(req
->mac_addr
, mac
, ETH_ALEN
);
700 status
= be_mbox_notify(adapter
);
702 struct be_cmd_resp_if_create
*resp
= embedded_payload(wrb
);
703 *if_handle
= le32_to_cpu(resp
->interface_id
);
705 *pmac_id
= le32_to_cpu(resp
->pmac_id
);
708 spin_unlock(&adapter
->mbox_lock
);
712 int be_cmd_if_destroy(struct be_adapter
*adapter
, u32 interface_id
)
714 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
715 struct be_cmd_req_if_destroy
*req
= embedded_payload(wrb
);
718 spin_lock(&adapter
->mbox_lock
);
719 memset(wrb
, 0, sizeof(*wrb
));
721 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
723 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
724 OPCODE_COMMON_NTWK_INTERFACE_DESTROY
, sizeof(*req
));
726 req
->interface_id
= cpu_to_le32(interface_id
);
727 status
= be_mbox_notify(adapter
);
729 spin_unlock(&adapter
->mbox_lock
);
734 /* Get stats is a non embedded command: the request is not embedded inside
735 * WRB but is a separate dma memory block
737 int be_cmd_get_stats(struct be_adapter
*adapter
, struct be_dma_mem
*nonemb_cmd
)
739 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
740 struct be_cmd_req_get_stats
*req
= nonemb_cmd
->va
;
741 struct be_sge
*sge
= nonembedded_sgl(wrb
);
744 spin_lock(&adapter
->mbox_lock
);
745 memset(wrb
, 0, sizeof(*wrb
));
747 memset(req
, 0, sizeof(*req
));
749 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1);
751 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
752 OPCODE_ETH_GET_STATISTICS
, sizeof(*req
));
753 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
->dma
));
754 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
->dma
& 0xFFFFFFFF);
755 sge
->len
= cpu_to_le32(nonemb_cmd
->size
);
757 status
= be_mbox_notify(adapter
);
759 struct be_cmd_resp_get_stats
*resp
= nonemb_cmd
->va
;
760 be_dws_le_to_cpu(&resp
->hw_stats
, sizeof(resp
->hw_stats
));
763 spin_unlock(&adapter
->mbox_lock
);
767 int be_cmd_link_status_query(struct be_adapter
*adapter
,
770 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
771 struct be_cmd_req_link_status
*req
= embedded_payload(wrb
);
774 spin_lock(&adapter
->mbox_lock
);
777 memset(wrb
, 0, sizeof(*wrb
));
779 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
781 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
782 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY
, sizeof(*req
));
784 status
= be_mbox_notify(adapter
);
786 struct be_cmd_resp_link_status
*resp
= embedded_payload(wrb
);
787 if (resp
->mac_speed
!= PHY_LINK_SPEED_ZERO
)
791 spin_unlock(&adapter
->mbox_lock
);
795 int be_cmd_get_fw_ver(struct be_adapter
*adapter
, char *fw_ver
)
797 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
798 struct be_cmd_req_get_fw_version
*req
= embedded_payload(wrb
);
801 spin_lock(&adapter
->mbox_lock
);
802 memset(wrb
, 0, sizeof(*wrb
));
804 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
806 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
807 OPCODE_COMMON_GET_FW_VERSION
, sizeof(*req
));
809 status
= be_mbox_notify(adapter
);
811 struct be_cmd_resp_get_fw_version
*resp
= embedded_payload(wrb
);
812 strncpy(fw_ver
, resp
->firmware_version_string
, FW_VER_LEN
);
815 spin_unlock(&adapter
->mbox_lock
);
819 /* set the EQ delay interval of an EQ to specified value */
820 int be_cmd_modify_eqd(struct be_adapter
*adapter
, u32 eq_id
, u32 eqd
)
822 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
823 struct be_cmd_req_modify_eq_delay
*req
= embedded_payload(wrb
);
826 spin_lock(&adapter
->mbox_lock
);
827 memset(wrb
, 0, sizeof(*wrb
));
829 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
831 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
832 OPCODE_COMMON_MODIFY_EQ_DELAY
, sizeof(*req
));
834 req
->num_eq
= cpu_to_le32(1);
835 req
->delay
[0].eq_id
= cpu_to_le32(eq_id
);
836 req
->delay
[0].phase
= 0;
837 req
->delay
[0].delay_multiplier
= cpu_to_le32(eqd
);
839 status
= be_mbox_notify(adapter
);
841 spin_unlock(&adapter
->mbox_lock
);
845 int be_cmd_vlan_config(struct be_adapter
*adapter
, u32 if_id
, u16
*vtag_array
,
846 u32 num
, bool untagged
, bool promiscuous
)
848 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
849 struct be_cmd_req_vlan_config
*req
= embedded_payload(wrb
);
852 spin_lock(&adapter
->mbox_lock
);
853 memset(wrb
, 0, sizeof(*wrb
));
855 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
857 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
858 OPCODE_COMMON_NTWK_VLAN_CONFIG
, sizeof(*req
));
860 req
->interface_id
= if_id
;
861 req
->promiscuous
= promiscuous
;
862 req
->untagged
= untagged
;
865 memcpy(req
->normal_vlan
, vtag_array
,
866 req
->num_vlan
* sizeof(vtag_array
[0]));
869 status
= be_mbox_notify(adapter
);
871 spin_unlock(&adapter
->mbox_lock
);
875 /* Use MCC for this command as it may be called in BH context */
876 int be_cmd_promiscuous_config(struct be_adapter
*adapter
, u8 port_num
, bool en
)
878 struct be_mcc_wrb
*wrb
;
879 struct be_cmd_req_promiscuous_config
*req
;
881 spin_lock_bh(&adapter
->mcc_lock
);
883 wrb
= wrb_from_mcc(&adapter
->mcc_obj
.q
);
886 req
= embedded_payload(wrb
);
888 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
890 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ETH
,
891 OPCODE_ETH_PROMISCUOUS
, sizeof(*req
));
894 req
->port1_promiscuous
= en
;
896 req
->port0_promiscuous
= en
;
898 be_mcc_notify_wait(adapter
);
900 spin_unlock_bh(&adapter
->mcc_lock
);
905 * Use MCC for this command as it may be called in BH context
906 * (mc == NULL) => multicast promiscous
908 int be_cmd_multicast_set(struct be_adapter
*adapter
, u32 if_id
,
909 struct dev_mc_list
*mc_list
, u32 mc_count
)
911 #define BE_MAX_MC 32 /* set mcast promisc if > 32 */
912 struct be_mcc_wrb
*wrb
;
913 struct be_cmd_req_mcast_mac_config
*req
;
915 spin_lock_bh(&adapter
->mcc_lock
);
917 wrb
= wrb_from_mcc(&adapter
->mcc_obj
.q
);
920 req
= embedded_payload(wrb
);
922 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
924 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
925 OPCODE_COMMON_NTWK_MULTICAST_SET
, sizeof(*req
));
927 req
->interface_id
= if_id
;
928 if (mc_list
&& mc_count
<= BE_MAX_MC
) {
930 struct dev_mc_list
*mc
;
932 req
->num_mac
= cpu_to_le16(mc_count
);
934 for (mc
= mc_list
, i
= 0; mc
; mc
= mc
->next
, i
++)
935 memcpy(req
->mac
[i
].byte
, mc
->dmi_addr
, ETH_ALEN
);
937 req
->promiscuous
= 1;
940 be_mcc_notify_wait(adapter
);
942 spin_unlock_bh(&adapter
->mcc_lock
);
947 int be_cmd_set_flow_control(struct be_adapter
*adapter
, u32 tx_fc
, u32 rx_fc
)
949 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
950 struct be_cmd_req_set_flow_control
*req
= embedded_payload(wrb
);
953 spin_lock(&adapter
->mbox_lock
);
955 memset(wrb
, 0, sizeof(*wrb
));
957 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
959 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
960 OPCODE_COMMON_SET_FLOW_CONTROL
, sizeof(*req
));
962 req
->tx_flow_control
= cpu_to_le16((u16
)tx_fc
);
963 req
->rx_flow_control
= cpu_to_le16((u16
)rx_fc
);
965 status
= be_mbox_notify(adapter
);
967 spin_unlock(&adapter
->mbox_lock
);
971 int be_cmd_get_flow_control(struct be_adapter
*adapter
, u32
*tx_fc
, u32
*rx_fc
)
973 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
974 struct be_cmd_req_get_flow_control
*req
= embedded_payload(wrb
);
977 spin_lock(&adapter
->mbox_lock
);
979 memset(wrb
, 0, sizeof(*wrb
));
981 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
983 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
984 OPCODE_COMMON_GET_FLOW_CONTROL
, sizeof(*req
));
986 status
= be_mbox_notify(adapter
);
988 struct be_cmd_resp_get_flow_control
*resp
=
989 embedded_payload(wrb
);
990 *tx_fc
= le16_to_cpu(resp
->tx_flow_control
);
991 *rx_fc
= le16_to_cpu(resp
->rx_flow_control
);
994 spin_unlock(&adapter
->mbox_lock
);
998 int be_cmd_query_fw_cfg(struct be_adapter
*adapter
, u32
*port_num
)
1000 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
1001 struct be_cmd_req_query_fw_cfg
*req
= embedded_payload(wrb
);
1004 spin_lock(&adapter
->mbox_lock
);
1006 memset(wrb
, 0, sizeof(*wrb
));
1008 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1010 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1011 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG
, sizeof(*req
));
1013 status
= be_mbox_notify(adapter
);
1015 struct be_cmd_resp_query_fw_cfg
*resp
= embedded_payload(wrb
);
1016 *port_num
= le32_to_cpu(resp
->phys_port
);
1019 spin_unlock(&adapter
->mbox_lock
);
1023 int be_cmd_reset_function(struct be_adapter
*adapter
)
1025 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
1026 struct be_cmd_req_hdr
*req
= embedded_payload(wrb
);
1029 spin_lock(&adapter
->mbox_lock
);
1031 memset(wrb
, 0, sizeof(*wrb
));
1033 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1035 be_cmd_hdr_prepare(req
, CMD_SUBSYSTEM_COMMON
,
1036 OPCODE_COMMON_FUNCTION_RESET
, sizeof(*req
));
1038 status
= be_mbox_notify(adapter
);
1040 spin_unlock(&adapter
->mbox_lock
);
1044 int be_cmd_write_flashrom(struct be_adapter
*adapter
, struct be_dma_mem
*cmd
,
1045 u32 flash_type
, u32 flash_opcode
, u32 buf_size
)
1047 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&adapter
->mbox_mem
);
1048 struct be_cmd_write_flashrom
*req
= cmd
->va
;
1049 struct be_sge
*sge
= nonembedded_sgl(wrb
);
1052 spin_lock(&adapter
->mbox_lock
);
1053 memset(wrb
, 0, sizeof(*wrb
));
1054 be_wrb_hdr_prepare(wrb
, cmd
->size
, false, 1);
1056 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1057 OPCODE_COMMON_WRITE_FLASHROM
, cmd
->size
);
1058 sge
->pa_hi
= cpu_to_le32(upper_32_bits(cmd
->dma
));
1059 sge
->pa_lo
= cpu_to_le32(cmd
->dma
& 0xFFFFFFFF);
1060 sge
->len
= cpu_to_le32(cmd
->size
);
1062 req
->params
.op_type
= cpu_to_le32(flash_type
);
1063 req
->params
.op_code
= cpu_to_le32(flash_opcode
);
1064 req
->params
.data_buf_size
= cpu_to_le32(buf_size
);
1066 status
= be_mbox_notify(adapter
);
1068 spin_unlock(&adapter
->mbox_lock
);