2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
22 int beiscsi_pci_soft_reset(struct beiscsi_hba
*phba
)
25 u8
*pci_reset_offset
= 0;
26 u8
*pci_online0_offset
= 0;
27 u8
*pci_online1_offset
= 0;
32 pci_reset_offset
= (u8
*)phba
->pci_va
+ BE2_SOFT_RESET
;
33 pci_online0_offset
= (u8
*)phba
->pci_va
+ BE2_PCI_ONLINE0
;
34 pci_online1_offset
= (u8
*)phba
->pci_va
+ BE2_PCI_ONLINE1
;
35 sreset
= readl((void *)pci_reset_offset
);
36 sreset
|= BE2_SET_RESET
;
37 writel(sreset
, (void *)pci_reset_offset
);
40 while (sreset
& BE2_SET_RESET
) {
44 sreset
= readl((void *)pci_reset_offset
);
48 if (sreset
& BE2_SET_RESET
) {
49 printk(KERN_ERR
"Soft Reset did not deassert\n");
52 pconline1
= BE2_MPU_IRAM_ONLINE
;
53 writel(pconline0
, (void *)pci_online0_offset
);
54 writel(pconline1
, (void *)pci_online1_offset
);
56 sreset
= BE2_SET_RESET
;
57 writel(sreset
, (void *)pci_reset_offset
);
60 while (sreset
& BE2_SET_RESET
) {
64 sreset
= readl((void *)pci_reset_offset
);
67 if (sreset
& BE2_SET_RESET
) {
68 printk(KERN_ERR
"MPU Online Soft Reset did not deassert\n");
74 int be_chk_reset_complete(struct beiscsi_hba
*phba
)
76 unsigned int num_loop
;
81 mpu_sem
= (u8
*)phba
->csr_va
+ MPU_EP_SEMAPHORE
;
85 status
= readl((void *)mpu_sem
);
87 if ((status
& 0x80000000) || (status
& 0x0000FFFF) == 0xC000)
93 if ((status
& 0x80000000) || (!num_loop
)) {
94 printk(KERN_ERR
"Failed in be_chk_reset_complete"
95 "status = 0x%x\n", status
);
102 void be_mcc_notify(struct beiscsi_hba
*phba
)
104 struct be_queue_info
*mccq
= &phba
->ctrl
.mcc_obj
.q
;
107 val
|= mccq
->id
& DB_MCCQ_RING_ID_MASK
;
108 val
|= 1 << DB_MCCQ_NUM_POSTED_SHIFT
;
109 iowrite32(val
, phba
->db_va
+ DB_MCCQ_OFFSET
);
112 unsigned int alloc_mcc_tag(struct beiscsi_hba
*phba
)
114 unsigned int tag
= 0;
116 if (phba
->ctrl
.mcc_tag_available
) {
117 tag
= phba
->ctrl
.mcc_tag
[phba
->ctrl
.mcc_alloc_index
];
118 phba
->ctrl
.mcc_tag
[phba
->ctrl
.mcc_alloc_index
] = 0;
119 phba
->ctrl
.mcc_numtag
[tag
] = 0;
122 phba
->ctrl
.mcc_tag_available
--;
123 if (phba
->ctrl
.mcc_alloc_index
== (MAX_MCC_CMD
- 1))
124 phba
->ctrl
.mcc_alloc_index
= 0;
126 phba
->ctrl
.mcc_alloc_index
++;
131 void free_mcc_tag(struct be_ctrl_info
*ctrl
, unsigned int tag
)
133 spin_lock(&ctrl
->mbox_lock
);
134 tag
= tag
& 0x000000FF;
135 ctrl
->mcc_tag
[ctrl
->mcc_free_index
] = tag
;
136 if (ctrl
->mcc_free_index
== (MAX_MCC_CMD
- 1))
137 ctrl
->mcc_free_index
= 0;
139 ctrl
->mcc_free_index
++;
140 ctrl
->mcc_tag_available
++;
141 spin_unlock(&ctrl
->mbox_lock
);
144 bool is_link_state_evt(u32 trailer
)
146 return (((trailer
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
) &
147 ASYNC_TRAILER_EVENT_CODE_MASK
) ==
148 ASYNC_EVENT_CODE_LINK_STATE
);
151 static inline bool be_mcc_compl_is_new(struct be_mcc_compl
*compl)
153 if (compl->flags
!= 0) {
154 compl->flags
= le32_to_cpu(compl->flags
);
155 WARN_ON((compl->flags
& CQE_FLAGS_VALID_MASK
) == 0);
161 static inline void be_mcc_compl_use(struct be_mcc_compl
*compl)
166 static int be_mcc_compl_process(struct be_ctrl_info
*ctrl
,
167 struct be_mcc_compl
*compl)
169 u16 compl_status
, extd_status
;
171 be_dws_le_to_cpu(compl, 4);
173 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
174 CQE_STATUS_COMPL_MASK
;
175 if (compl_status
!= MCC_STATUS_SUCCESS
) {
176 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
177 CQE_STATUS_EXTD_MASK
;
178 dev_err(&ctrl
->pdev
->dev
,
179 "error in cmd completion: status(compl/extd)=%d/%d\n",
180 compl_status
, extd_status
);
186 int be_mcc_compl_process_isr(struct be_ctrl_info
*ctrl
,
187 struct be_mcc_compl
*compl)
189 u16 compl_status
, extd_status
;
192 be_dws_le_to_cpu(compl, 4);
194 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
195 CQE_STATUS_COMPL_MASK
;
196 /* The ctrl.mcc_numtag[tag] is filled with
197 * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
198 * [7:0] = compl_status
200 tag
= (compl->tag0
& 0x000000FF);
201 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
202 CQE_STATUS_EXTD_MASK
;
204 ctrl
->mcc_numtag
[tag
] = 0x80000000;
205 ctrl
->mcc_numtag
[tag
] |= (compl->tag0
& 0x00FF0000);
206 ctrl
->mcc_numtag
[tag
] |= (extd_status
& 0x000000FF) << 8;
207 ctrl
->mcc_numtag
[tag
] |= (compl_status
& 0x000000FF);
208 wake_up_interruptible(&ctrl
->mcc_wait
[tag
]);
212 static struct be_mcc_compl
*be_mcc_compl_get(struct beiscsi_hba
*phba
)
214 struct be_queue_info
*mcc_cq
= &phba
->ctrl
.mcc_obj
.cq
;
215 struct be_mcc_compl
*compl = queue_tail_node(mcc_cq
);
217 if (be_mcc_compl_is_new(compl)) {
218 queue_tail_inc(mcc_cq
);
224 static void be2iscsi_fail_session(struct iscsi_cls_session
*cls_session
)
226 iscsi_session_failure(cls_session
->dd_data
, ISCSI_ERR_CONN_FAILED
);
229 void beiscsi_async_link_state_process(struct beiscsi_hba
*phba
,
230 struct be_async_event_link_state
*evt
)
232 switch (evt
->port_link_status
) {
233 case ASYNC_EVENT_LINK_DOWN
:
234 SE_DEBUG(DBG_LVL_1
, "Link Down on Physical Port %d\n",
236 phba
->state
|= BE_ADAPTER_LINK_DOWN
;
237 iscsi_host_for_each_session(phba
->shost
,
238 be2iscsi_fail_session
);
240 case ASYNC_EVENT_LINK_UP
:
241 phba
->state
= BE_ADAPTER_UP
;
242 SE_DEBUG(DBG_LVL_1
, "Link UP on Physical Port %d\n",
246 SE_DEBUG(DBG_LVL_1
, "Unexpected Async Notification %d on"
247 "Physical Port %d\n",
248 evt
->port_link_status
,
253 static void beiscsi_cq_notify(struct beiscsi_hba
*phba
, u16 qid
, bool arm
,
257 val
|= qid
& DB_CQ_RING_ID_MASK
;
259 val
|= 1 << DB_CQ_REARM_SHIFT
;
260 val
|= num_popped
<< DB_CQ_NUM_POPPED_SHIFT
;
261 iowrite32(val
, phba
->db_va
+ DB_CQ_OFFSET
);
265 int beiscsi_process_mcc(struct beiscsi_hba
*phba
)
267 struct be_mcc_compl
*compl;
268 int num
= 0, status
= 0;
269 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
271 spin_lock_bh(&phba
->ctrl
.mcc_cq_lock
);
272 while ((compl = be_mcc_compl_get(phba
))) {
273 if (compl->flags
& CQE_FLAGS_ASYNC_MASK
) {
274 /* Interpret flags as an async trailer */
275 if (is_link_state_evt(compl->flags
))
276 /* Interpret compl as a async link evt */
277 beiscsi_async_link_state_process(phba
,
278 (struct be_async_event_link_state
*) compl);
281 " Unsupported Async Event, flags"
282 " = 0x%08x\n", compl->flags
);
284 } else if (compl->flags
& CQE_FLAGS_COMPLETED_MASK
) {
285 status
= be_mcc_compl_process(ctrl
, compl);
286 atomic_dec(&phba
->ctrl
.mcc_obj
.q
.used
);
288 be_mcc_compl_use(compl);
293 beiscsi_cq_notify(phba
, phba
->ctrl
.mcc_obj
.cq
.id
, true, num
);
295 spin_unlock_bh(&phba
->ctrl
.mcc_cq_lock
);
299 /* Wait till no more pending mcc requests are present */
300 static int be_mcc_wait_compl(struct beiscsi_hba
*phba
)
303 for (i
= 0; i
< mcc_timeout
; i
++) {
304 status
= beiscsi_process_mcc(phba
);
308 if (atomic_read(&phba
->ctrl
.mcc_obj
.q
.used
) == 0)
312 if (i
== mcc_timeout
) {
313 dev_err(&phba
->pcidev
->dev
, "mccq poll timed out\n");
319 /* Notify MCC requests and wait for completion */
320 int be_mcc_notify_wait(struct beiscsi_hba
*phba
)
323 return be_mcc_wait_compl(phba
);
326 static int be_mbox_db_ready_wait(struct be_ctrl_info
*ctrl
)
328 #define long_delay 2000
329 void __iomem
*db
= ctrl
->db
+ MPU_MAILBOX_DB_OFFSET
;
330 int cnt
= 0, wait
= 5; /* in usecs */
334 ready
= ioread32(db
) & MPU_MAILBOX_DB_RDY_MASK
;
338 if (cnt
> 12000000) {
339 dev_err(&ctrl
->pdev
->dev
, "mbox_db poll timed out\n");
345 mdelay(long_delay
/ 1000);
353 int be_mbox_notify(struct be_ctrl_info
*ctrl
)
357 void __iomem
*db
= ctrl
->db
+ MPU_MAILBOX_DB_OFFSET
;
358 struct be_dma_mem
*mbox_mem
= &ctrl
->mbox_mem
;
359 struct be_mcc_mailbox
*mbox
= mbox_mem
->va
;
360 struct be_mcc_compl
*compl = &mbox
->compl;
362 val
&= ~MPU_MAILBOX_DB_RDY_MASK
;
363 val
|= MPU_MAILBOX_DB_HI_MASK
;
364 val
|= (upper_32_bits(mbox_mem
->dma
) >> 2) << 2;
367 status
= be_mbox_db_ready_wait(ctrl
);
369 SE_DEBUG(DBG_LVL_1
, " be_mbox_db_ready_wait failed\n");
373 val
&= ~MPU_MAILBOX_DB_RDY_MASK
;
374 val
&= ~MPU_MAILBOX_DB_HI_MASK
;
375 val
|= (u32
) (mbox_mem
->dma
>> 4) << 2;
378 status
= be_mbox_db_ready_wait(ctrl
);
380 SE_DEBUG(DBG_LVL_1
, " be_mbox_db_ready_wait failed\n");
383 if (be_mcc_compl_is_new(compl)) {
384 status
= be_mcc_compl_process(ctrl
, &mbox
->compl);
385 be_mcc_compl_use(compl);
387 SE_DEBUG(DBG_LVL_1
, "After be_mcc_compl_process\n");
391 dev_err(&ctrl
->pdev
->dev
, "invalid mailbox completion\n");
398 * Insert the mailbox address into the doorbell in two steps
399 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
401 static int be_mbox_notify_wait(struct beiscsi_hba
*phba
)
405 void __iomem
*db
= phba
->ctrl
.db
+ MPU_MAILBOX_DB_OFFSET
;
406 struct be_dma_mem
*mbox_mem
= &phba
->ctrl
.mbox_mem
;
407 struct be_mcc_mailbox
*mbox
= mbox_mem
->va
;
408 struct be_mcc_compl
*compl = &mbox
->compl;
409 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
411 val
|= MPU_MAILBOX_DB_HI_MASK
;
412 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
413 val
|= (upper_32_bits(mbox_mem
->dma
) >> 2) << 2;
416 /* wait for ready to be set */
417 status
= be_mbox_db_ready_wait(ctrl
);
422 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
423 val
|= (u32
)(mbox_mem
->dma
>> 4) << 2;
426 status
= be_mbox_db_ready_wait(ctrl
);
430 /* A cq entry has been made now */
431 if (be_mcc_compl_is_new(compl)) {
432 status
= be_mcc_compl_process(ctrl
, &mbox
->compl);
433 be_mcc_compl_use(compl);
437 dev_err(&phba
->pcidev
->dev
, "invalid mailbox completion\n");
443 void be_wrb_hdr_prepare(struct be_mcc_wrb
*wrb
, int payload_len
,
444 bool embedded
, u8 sge_cnt
)
447 wrb
->embedded
|= MCC_WRB_EMBEDDED_MASK
;
449 wrb
->embedded
|= (sge_cnt
& MCC_WRB_SGE_CNT_MASK
) <<
450 MCC_WRB_SGE_CNT_SHIFT
;
451 wrb
->payload_length
= payload_len
;
452 be_dws_cpu_to_le(wrb
, 8);
455 void be_cmd_hdr_prepare(struct be_cmd_req_hdr
*req_hdr
,
456 u8 subsystem
, u8 opcode
, int cmd_len
)
458 req_hdr
->opcode
= opcode
;
459 req_hdr
->subsystem
= subsystem
;
460 req_hdr
->request_length
= cpu_to_le32(cmd_len
- sizeof(*req_hdr
));
461 req_hdr
->timeout
= 120;
464 static void be_cmd_page_addrs_prepare(struct phys_addr
*pages
, u32 max_pages
,
465 struct be_dma_mem
*mem
)
468 u64 dma
= (u64
) mem
->dma
;
470 buf_pages
= min(PAGES_4K_SPANNED(mem
->va
, mem
->size
), max_pages
);
471 for (i
= 0; i
< buf_pages
; i
++) {
472 pages
[i
].lo
= cpu_to_le32(dma
& 0xFFFFFFFF);
473 pages
[i
].hi
= cpu_to_le32(upper_32_bits(dma
));
478 static u32
eq_delay_to_mult(u32 usec_delay
)
480 #define MAX_INTR_RATE 651042
481 const u32 round
= 10;
487 u32 interrupt_rate
= 1000000 / usec_delay
;
488 if (interrupt_rate
== 0)
491 multiplier
= (MAX_INTR_RATE
- interrupt_rate
) * round
;
492 multiplier
/= interrupt_rate
;
493 multiplier
= (multiplier
+ round
/ 2) / round
;
494 multiplier
= min(multiplier
, (u32
) 1023);
500 struct be_mcc_wrb
*wrb_from_mbox(struct be_dma_mem
*mbox_mem
)
502 return &((struct be_mcc_mailbox
*)(mbox_mem
->va
))->wrb
;
505 struct be_mcc_wrb
*wrb_from_mccq(struct beiscsi_hba
*phba
)
507 struct be_queue_info
*mccq
= &phba
->ctrl
.mcc_obj
.q
;
508 struct be_mcc_wrb
*wrb
;
510 BUG_ON(atomic_read(&mccq
->used
) >= mccq
->len
);
511 wrb
= queue_head_node(mccq
);
512 memset(wrb
, 0, sizeof(*wrb
));
513 wrb
->tag0
= (mccq
->head
& 0x000000FF) << 16;
514 queue_head_inc(mccq
);
515 atomic_inc(&mccq
->used
);
520 int beiscsi_cmd_eq_create(struct be_ctrl_info
*ctrl
,
521 struct be_queue_info
*eq
, int eq_delay
)
523 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
524 struct be_cmd_req_eq_create
*req
= embedded_payload(wrb
);
525 struct be_cmd_resp_eq_create
*resp
= embedded_payload(wrb
);
526 struct be_dma_mem
*q_mem
= &eq
->dma_mem
;
529 SE_DEBUG(DBG_LVL_8
, "In beiscsi_cmd_eq_create\n");
530 spin_lock(&ctrl
->mbox_lock
);
531 memset(wrb
, 0, sizeof(*wrb
));
533 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
535 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
536 OPCODE_COMMON_EQ_CREATE
, sizeof(*req
));
538 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
540 AMAP_SET_BITS(struct amap_eq_context
, func
, req
->context
,
541 PCI_FUNC(ctrl
->pdev
->devfn
));
542 AMAP_SET_BITS(struct amap_eq_context
, valid
, req
->context
, 1);
543 AMAP_SET_BITS(struct amap_eq_context
, size
, req
->context
, 0);
544 AMAP_SET_BITS(struct amap_eq_context
, count
, req
->context
,
545 __ilog2_u32(eq
->len
/ 256));
546 AMAP_SET_BITS(struct amap_eq_context
, delaymult
, req
->context
,
547 eq_delay_to_mult(eq_delay
));
548 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
550 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
552 status
= be_mbox_notify(ctrl
);
554 eq
->id
= le16_to_cpu(resp
->eq_id
);
557 spin_unlock(&ctrl
->mbox_lock
);
561 int be_cmd_fw_initialize(struct be_ctrl_info
*ctrl
)
563 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
567 SE_DEBUG(DBG_LVL_8
, "In be_cmd_fw_initialize\n");
568 spin_lock(&ctrl
->mbox_lock
);
569 memset(wrb
, 0, sizeof(*wrb
));
571 endian_check
= (u8
*) wrb
;
572 *endian_check
++ = 0xFF;
573 *endian_check
++ = 0x12;
574 *endian_check
++ = 0x34;
575 *endian_check
++ = 0xFF;
576 *endian_check
++ = 0xFF;
577 *endian_check
++ = 0x56;
578 *endian_check
++ = 0x78;
579 *endian_check
++ = 0xFF;
580 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
582 status
= be_mbox_notify(ctrl
);
584 SE_DEBUG(DBG_LVL_1
, "be_cmd_fw_initialize Failed\n");
586 spin_unlock(&ctrl
->mbox_lock
);
590 int beiscsi_cmd_cq_create(struct be_ctrl_info
*ctrl
,
591 struct be_queue_info
*cq
, struct be_queue_info
*eq
,
592 bool sol_evts
, bool no_delay
, int coalesce_wm
)
594 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
595 struct be_cmd_req_cq_create
*req
= embedded_payload(wrb
);
596 struct be_cmd_resp_cq_create
*resp
= embedded_payload(wrb
);
597 struct be_dma_mem
*q_mem
= &cq
->dma_mem
;
598 void *ctxt
= &req
->context
;
601 SE_DEBUG(DBG_LVL_8
, "In beiscsi_cmd_cq_create\n");
602 spin_lock(&ctrl
->mbox_lock
);
603 memset(wrb
, 0, sizeof(*wrb
));
605 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
607 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
608 OPCODE_COMMON_CQ_CREATE
, sizeof(*req
));
610 SE_DEBUG(DBG_LVL_1
, "uninitialized q_mem->va\n");
612 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
614 AMAP_SET_BITS(struct amap_cq_context
, coalescwm
, ctxt
, coalesce_wm
);
615 AMAP_SET_BITS(struct amap_cq_context
, nodelay
, ctxt
, no_delay
);
616 AMAP_SET_BITS(struct amap_cq_context
, count
, ctxt
,
617 __ilog2_u32(cq
->len
/ 256));
618 AMAP_SET_BITS(struct amap_cq_context
, valid
, ctxt
, 1);
619 AMAP_SET_BITS(struct amap_cq_context
, solevent
, ctxt
, sol_evts
);
620 AMAP_SET_BITS(struct amap_cq_context
, eventable
, ctxt
, 1);
621 AMAP_SET_BITS(struct amap_cq_context
, eqid
, ctxt
, eq
->id
);
622 AMAP_SET_BITS(struct amap_cq_context
, armed
, ctxt
, 1);
623 AMAP_SET_BITS(struct amap_cq_context
, func
, ctxt
,
624 PCI_FUNC(ctrl
->pdev
->devfn
));
625 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
627 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
629 status
= be_mbox_notify(ctrl
);
631 cq
->id
= le16_to_cpu(resp
->cq_id
);
634 SE_DEBUG(DBG_LVL_1
, "In be_cmd_cq_create, status=ox%08x\n",
636 spin_unlock(&ctrl
->mbox_lock
);
641 static u32
be_encoded_q_len(int q_len
)
643 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
644 if (len_encoded
== 16)
649 int beiscsi_cmd_mccq_create(struct beiscsi_hba
*phba
,
650 struct be_queue_info
*mccq
,
651 struct be_queue_info
*cq
)
653 struct be_mcc_wrb
*wrb
;
654 struct be_cmd_req_mcc_create
*req
;
655 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
656 struct be_ctrl_info
*ctrl
;
660 spin_lock(&phba
->ctrl
.mbox_lock
);
662 wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
663 memset(wrb
, 0, sizeof(*wrb
));
664 req
= embedded_payload(wrb
);
665 ctxt
= &req
->context
;
667 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
669 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
670 OPCODE_COMMON_MCC_CREATE
, sizeof(*req
));
672 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
674 AMAP_SET_BITS(struct amap_mcc_context
, fid
, ctxt
,
675 PCI_FUNC(phba
->pcidev
->devfn
));
676 AMAP_SET_BITS(struct amap_mcc_context
, valid
, ctxt
, 1);
677 AMAP_SET_BITS(struct amap_mcc_context
, ring_size
, ctxt
,
678 be_encoded_q_len(mccq
->len
));
679 AMAP_SET_BITS(struct amap_mcc_context
, cq_id
, ctxt
, cq
->id
);
681 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
683 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
685 status
= be_mbox_notify_wait(phba
);
687 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
688 mccq
->id
= le16_to_cpu(resp
->id
);
689 mccq
->created
= true;
691 spin_unlock(&phba
->ctrl
.mbox_lock
);
696 int beiscsi_cmd_q_destroy(struct be_ctrl_info
*ctrl
, struct be_queue_info
*q
,
699 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
700 struct be_cmd_req_q_destroy
*req
= embedded_payload(wrb
);
701 u8 subsys
= 0, opcode
= 0;
704 SE_DEBUG(DBG_LVL_8
, "In beiscsi_cmd_q_destroy\n");
705 spin_lock(&ctrl
->mbox_lock
);
706 memset(wrb
, 0, sizeof(*wrb
));
707 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
709 switch (queue_type
) {
711 subsys
= CMD_SUBSYSTEM_COMMON
;
712 opcode
= OPCODE_COMMON_EQ_DESTROY
;
715 subsys
= CMD_SUBSYSTEM_COMMON
;
716 opcode
= OPCODE_COMMON_CQ_DESTROY
;
719 subsys
= CMD_SUBSYSTEM_COMMON
;
720 opcode
= OPCODE_COMMON_MCC_DESTROY
;
723 subsys
= CMD_SUBSYSTEM_ISCSI
;
724 opcode
= OPCODE_COMMON_ISCSI_WRBQ_DESTROY
;
727 subsys
= CMD_SUBSYSTEM_ISCSI
;
728 opcode
= OPCODE_COMMON_ISCSI_DEFQ_DESTROY
;
731 subsys
= CMD_SUBSYSTEM_ISCSI
;
732 opcode
= OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES
;
735 spin_unlock(&ctrl
->mbox_lock
);
739 be_cmd_hdr_prepare(&req
->hdr
, subsys
, opcode
, sizeof(*req
));
740 if (queue_type
!= QTYPE_SGL
)
741 req
->id
= cpu_to_le16(q
->id
);
743 status
= be_mbox_notify(ctrl
);
745 spin_unlock(&ctrl
->mbox_lock
);
749 int be_cmd_create_default_pdu_queue(struct be_ctrl_info
*ctrl
,
750 struct be_queue_info
*cq
,
751 struct be_queue_info
*dq
, int length
,
754 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
755 struct be_defq_create_req
*req
= embedded_payload(wrb
);
756 struct be_dma_mem
*q_mem
= &dq
->dma_mem
;
757 void *ctxt
= &req
->context
;
760 SE_DEBUG(DBG_LVL_8
, "In be_cmd_create_default_pdu_queue\n");
761 spin_lock(&ctrl
->mbox_lock
);
762 memset(wrb
, 0, sizeof(*wrb
));
764 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
766 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
767 OPCODE_COMMON_ISCSI_DEFQ_CREATE
, sizeof(*req
));
769 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
770 AMAP_SET_BITS(struct amap_be_default_pdu_context
, rx_pdid
, ctxt
, 0);
771 AMAP_SET_BITS(struct amap_be_default_pdu_context
, rx_pdid_valid
, ctxt
,
773 AMAP_SET_BITS(struct amap_be_default_pdu_context
, pci_func_id
, ctxt
,
774 PCI_FUNC(ctrl
->pdev
->devfn
));
775 AMAP_SET_BITS(struct amap_be_default_pdu_context
, ring_size
, ctxt
,
776 be_encoded_q_len(length
/ sizeof(struct phys_addr
)));
777 AMAP_SET_BITS(struct amap_be_default_pdu_context
, default_buffer_size
,
779 AMAP_SET_BITS(struct amap_be_default_pdu_context
, cq_id_recv
, ctxt
,
782 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
784 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
786 status
= be_mbox_notify(ctrl
);
788 struct be_defq_create_resp
*resp
= embedded_payload(wrb
);
790 dq
->id
= le16_to_cpu(resp
->id
);
793 spin_unlock(&ctrl
->mbox_lock
);
798 int be_cmd_wrbq_create(struct be_ctrl_info
*ctrl
, struct be_dma_mem
*q_mem
,
799 struct be_queue_info
*wrbq
)
801 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
802 struct be_wrbq_create_req
*req
= embedded_payload(wrb
);
803 struct be_wrbq_create_resp
*resp
= embedded_payload(wrb
);
806 spin_lock(&ctrl
->mbox_lock
);
807 memset(wrb
, 0, sizeof(*wrb
));
809 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
811 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
812 OPCODE_COMMON_ISCSI_WRBQ_CREATE
, sizeof(*req
));
813 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
814 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
816 status
= be_mbox_notify(ctrl
);
818 wrbq
->id
= le16_to_cpu(resp
->cid
);
819 wrbq
->created
= true;
821 spin_unlock(&ctrl
->mbox_lock
);
825 int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info
*ctrl
,
826 struct be_dma_mem
*q_mem
,
827 u32 page_offset
, u32 num_pages
)
829 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
830 struct be_post_sgl_pages_req
*req
= embedded_payload(wrb
);
832 unsigned int curr_pages
;
833 u32 internal_page_offset
= 0;
834 u32 temp_num_pages
= num_pages
;
836 if (num_pages
== 0xff)
839 spin_lock(&ctrl
->mbox_lock
);
841 memset(wrb
, 0, sizeof(*wrb
));
842 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
843 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
844 OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES
,
846 curr_pages
= BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req
,
848 req
->num_pages
= min(num_pages
, curr_pages
);
849 req
->page_offset
= page_offset
;
850 be_cmd_page_addrs_prepare(req
->pages
, req
->num_pages
, q_mem
);
851 q_mem
->dma
= q_mem
->dma
+ (req
->num_pages
* PAGE_SIZE
);
852 internal_page_offset
+= req
->num_pages
;
853 page_offset
+= req
->num_pages
;
854 num_pages
-= req
->num_pages
;
856 if (temp_num_pages
== 0xff)
857 req
->num_pages
= temp_num_pages
;
859 status
= be_mbox_notify(ctrl
);
862 "FW CMD to map iscsi frags failed.\n");
865 } while (num_pages
> 0);
867 spin_unlock(&ctrl
->mbox_lock
);
869 beiscsi_cmd_q_destroy(ctrl
, NULL
, QTYPE_SGL
);
873 int beiscsi_cmd_reset_function(struct beiscsi_hba
*phba
)
875 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
876 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
877 struct be_post_sgl_pages_req
*req
= embedded_payload(wrb
);
880 spin_lock(&ctrl
->mbox_lock
);
882 req
= embedded_payload(wrb
);
883 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
884 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
885 OPCODE_COMMON_FUNCTION_RESET
, sizeof(*req
));
886 status
= be_mbox_notify_wait(phba
);
888 spin_unlock(&ctrl
->mbox_lock
);