2 * Copyright (C) 2005 - 2015 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@avagotech.com
15 * Costa Mesa, CA 92626
18 #include <scsi/iscsi_proto.h>
24 int beiscsi_pci_soft_reset(struct beiscsi_hba
*phba
)
27 u8
*pci_reset_offset
= 0;
28 u8
*pci_online0_offset
= 0;
29 u8
*pci_online1_offset
= 0;
34 pci_reset_offset
= (u8
*)phba
->pci_va
+ BE2_SOFT_RESET
;
35 pci_online0_offset
= (u8
*)phba
->pci_va
+ BE2_PCI_ONLINE0
;
36 pci_online1_offset
= (u8
*)phba
->pci_va
+ BE2_PCI_ONLINE1
;
37 sreset
= readl((void *)pci_reset_offset
);
38 sreset
|= BE2_SET_RESET
;
39 writel(sreset
, (void *)pci_reset_offset
);
42 while (sreset
& BE2_SET_RESET
) {
46 sreset
= readl((void *)pci_reset_offset
);
50 if (sreset
& BE2_SET_RESET
) {
51 printk(KERN_ERR DRV_NAME
52 " Soft Reset did not deassert\n");
55 pconline1
= BE2_MPU_IRAM_ONLINE
;
56 writel(pconline0
, (void *)pci_online0_offset
);
57 writel(pconline1
, (void *)pci_online1_offset
);
59 sreset
|= BE2_SET_RESET
;
60 writel(sreset
, (void *)pci_reset_offset
);
63 while (sreset
& BE2_SET_RESET
) {
67 sreset
= readl((void *)pci_reset_offset
);
70 if (sreset
& BE2_SET_RESET
) {
71 printk(KERN_ERR DRV_NAME
72 " MPU Online Soft Reset did not deassert\n");
78 int be_chk_reset_complete(struct beiscsi_hba
*phba
)
80 unsigned int num_loop
;
85 mpu_sem
= (u8
*)phba
->csr_va
+ MPU_EP_SEMAPHORE
;
89 status
= readl((void *)mpu_sem
);
91 if ((status
& 0x80000000) || (status
& 0x0000FFFF) == 0xC000)
97 if ((status
& 0x80000000) || (!num_loop
)) {
98 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
99 "BC_%d : Failed in be_chk_reset_complete"
100 "status = 0x%x\n", status
);
107 unsigned int alloc_mcc_tag(struct beiscsi_hba
*phba
)
109 unsigned int tag
= 0;
111 spin_lock(&phba
->ctrl
.mcc_lock
);
112 if (phba
->ctrl
.mcc_tag_available
) {
113 tag
= phba
->ctrl
.mcc_tag
[phba
->ctrl
.mcc_alloc_index
];
114 phba
->ctrl
.mcc_tag
[phba
->ctrl
.mcc_alloc_index
] = 0;
115 phba
->ctrl
.mcc_tag_status
[tag
] = 0;
116 phba
->ctrl
.ptag_state
[tag
].tag_state
= 0;
119 phba
->ctrl
.mcc_tag_available
--;
120 if (phba
->ctrl
.mcc_alloc_index
== (MAX_MCC_CMD
- 1))
121 phba
->ctrl
.mcc_alloc_index
= 0;
123 phba
->ctrl
.mcc_alloc_index
++;
125 spin_unlock(&phba
->ctrl
.mcc_lock
);
129 struct be_mcc_wrb
*alloc_mcc_wrb(struct beiscsi_hba
*phba
,
130 unsigned int *ref_tag
)
132 struct be_queue_info
*mccq
= &phba
->ctrl
.mcc_obj
.q
;
133 struct be_mcc_wrb
*wrb
= NULL
;
136 spin_lock_bh(&phba
->ctrl
.mcc_lock
);
137 if (mccq
->used
== mccq
->len
) {
138 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
|
139 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
140 "BC_%d : MCC queue full: WRB used %u tag avail %u\n",
141 mccq
->used
, phba
->ctrl
.mcc_tag_available
);
145 if (!phba
->ctrl
.mcc_tag_available
)
148 tag
= phba
->ctrl
.mcc_tag
[phba
->ctrl
.mcc_alloc_index
];
150 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
|
151 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
152 "BC_%d : MCC tag 0 allocated: tag avail %u alloc index %u\n",
153 phba
->ctrl
.mcc_tag_available
,
154 phba
->ctrl
.mcc_alloc_index
);
158 /* return this tag for further reference */
160 phba
->ctrl
.mcc_tag
[phba
->ctrl
.mcc_alloc_index
] = 0;
161 phba
->ctrl
.mcc_tag_status
[tag
] = 0;
162 phba
->ctrl
.ptag_state
[tag
].tag_state
= 0;
163 phba
->ctrl
.mcc_tag_available
--;
164 if (phba
->ctrl
.mcc_alloc_index
== (MAX_MCC_CMD
- 1))
165 phba
->ctrl
.mcc_alloc_index
= 0;
167 phba
->ctrl
.mcc_alloc_index
++;
169 wrb
= queue_head_node(mccq
);
170 memset(wrb
, 0, sizeof(*wrb
));
172 wrb
->tag0
|= (mccq
->head
<< MCC_Q_WRB_IDX_SHIFT
) & MCC_Q_WRB_IDX_MASK
;
173 queue_head_inc(mccq
);
177 spin_unlock_bh(&phba
->ctrl
.mcc_lock
);
181 void free_mcc_wrb(struct be_ctrl_info
*ctrl
, unsigned int tag
)
183 struct be_queue_info
*mccq
= &ctrl
->mcc_obj
.q
;
185 spin_lock_bh(&ctrl
->mcc_lock
);
186 tag
= tag
& MCC_Q_CMD_TAG_MASK
;
187 ctrl
->mcc_tag
[ctrl
->mcc_free_index
] = tag
;
188 if (ctrl
->mcc_free_index
== (MAX_MCC_CMD
- 1))
189 ctrl
->mcc_free_index
= 0;
191 ctrl
->mcc_free_index
++;
192 ctrl
->mcc_tag_available
++;
194 spin_unlock_bh(&ctrl
->mcc_lock
);
198 * beiscsi_fail_session(): Closing session with appropriate error
199 * @cls_session: ptr to session
201 void beiscsi_fail_session(struct iscsi_cls_session
*cls_session
)
203 iscsi_session_failure(cls_session
->dd_data
, ISCSI_ERR_CONN_FAILED
);
207 * beiscsi_mccq_compl_wait()- Process completion in MCC CQ
208 * @phba: Driver private structure
209 * @tag: Tag for the MBX Command
210 * @wrb: the WRB used for the MBX Command
211 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
213 * Waits for MBX completion with the passed TAG.
219 int beiscsi_mccq_compl_wait(struct beiscsi_hba
*phba
,
220 uint32_t tag
, struct be_mcc_wrb
**wrb
,
221 struct be_dma_mem
*mbx_cmd_mem
)
224 uint32_t mcc_tag_status
;
225 uint16_t status
= 0, addl_status
= 0, wrb_num
= 0;
226 struct be_mcc_wrb
*temp_wrb
;
227 struct be_cmd_req_hdr
*mbx_hdr
;
228 struct be_cmd_resp_hdr
*mbx_resp_hdr
;
229 struct be_queue_info
*mccq
= &phba
->ctrl
.mcc_obj
.q
;
231 if (beiscsi_error(phba
))
234 /* wait for the mccq completion */
235 rc
= wait_event_interruptible_timeout(
236 phba
->ctrl
.mcc_wait
[tag
],
237 phba
->ctrl
.mcc_tag_status
[tag
],
239 BEISCSI_HOST_MBX_TIMEOUT
));
241 * If MBOX cmd timeout expired, tag and resource allocated
242 * for cmd is not freed until FW returns completion.
245 struct be_dma_mem
*tag_mem
;
248 * PCI/DMA memory allocated and posted in non-embedded mode
249 * will have mbx_cmd_mem != NULL.
250 * Save virtual and bus addresses for the command so that it
251 * can be freed later.
253 tag_mem
= &phba
->ctrl
.ptag_state
[tag
].tag_mem_state
;
255 tag_mem
->size
= mbx_cmd_mem
->size
;
256 tag_mem
->va
= mbx_cmd_mem
->va
;
257 tag_mem
->dma
= mbx_cmd_mem
->dma
;
261 /* first make tag_mem_state visible to all */
263 set_bit(MCC_TAG_STATE_TIMEOUT
,
264 &phba
->ctrl
.ptag_state
[tag
].tag_state
);
266 beiscsi_log(phba
, KERN_ERR
,
267 BEISCSI_LOG_INIT
| BEISCSI_LOG_EH
|
269 "BC_%d : MBX Cmd Completion timed out\n");
274 mcc_tag_status
= phba
->ctrl
.mcc_tag_status
[tag
];
275 status
= (mcc_tag_status
& CQE_STATUS_MASK
);
276 addl_status
= ((mcc_tag_status
& CQE_STATUS_ADDL_MASK
) >>
277 CQE_STATUS_ADDL_SHIFT
);
280 mbx_hdr
= (struct be_cmd_req_hdr
*)mbx_cmd_mem
->va
;
282 wrb_num
= (mcc_tag_status
& CQE_STATUS_WRB_MASK
) >>
283 CQE_STATUS_WRB_SHIFT
;
284 temp_wrb
= (struct be_mcc_wrb
*)queue_get_wrb(mccq
, wrb_num
);
285 mbx_hdr
= embedded_payload(temp_wrb
);
291 if (status
|| addl_status
) {
292 beiscsi_log(phba
, KERN_WARNING
,
293 BEISCSI_LOG_INIT
| BEISCSI_LOG_EH
|
295 "BC_%d : MBX Cmd Failed for "
296 "Subsys : %d Opcode : %d with "
297 "Status : %d and Extd_Status : %d\n",
300 status
, addl_status
);
302 if (status
== MCC_STATUS_INSUFFICIENT_BUFFER
) {
303 mbx_resp_hdr
= (struct be_cmd_resp_hdr
*) mbx_hdr
;
304 beiscsi_log(phba
, KERN_WARNING
,
305 BEISCSI_LOG_INIT
| BEISCSI_LOG_EH
|
307 "BC_%d : Insufficient Buffer Error "
308 "Resp_Len : %d Actual_Resp_Len : %d\n",
309 mbx_resp_hdr
->response_length
,
310 mbx_resp_hdr
->actual_resp_len
);
315 free_mcc_wrb(&phba
->ctrl
, tag
);
320 * beiscsi_process_mbox_compl()- Check the MBX completion status
321 * @ctrl: Function specific MBX data structure
322 * @compl: Completion status of MBX Command
324 * Check for the MBX completion status when BMBX method used
330 static int beiscsi_process_mbox_compl(struct be_ctrl_info
*ctrl
,
331 struct be_mcc_compl
*compl)
333 u16 compl_status
, extd_status
;
334 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
335 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
336 struct be_cmd_req_hdr
*hdr
= embedded_payload(wrb
);
337 struct be_cmd_resp_hdr
*resp_hdr
;
340 * To check if valid bit is set, check the entire word as we don't know
341 * the endianness of the data (old entry is host endian while a new
342 * entry is little endian)
345 beiscsi_log(phba
, KERN_ERR
,
346 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
347 "BC_%d : BMBX busy, no completion\n");
350 compl->flags
= le32_to_cpu(compl->flags
);
351 WARN_ON((compl->flags
& CQE_FLAGS_VALID_MASK
) == 0);
354 * Just swap the status to host endian;
355 * mcc tag is opaquely copied from mcc_wrb.
357 be_dws_le_to_cpu(compl, 4);
358 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
359 CQE_STATUS_COMPL_MASK
;
360 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
361 CQE_STATUS_EXTD_MASK
;
362 /* Need to reset the entire word that houses the valid bit */
365 if (compl_status
== MCC_STATUS_SUCCESS
)
368 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
369 "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
370 hdr
->subsystem
, hdr
->opcode
, compl_status
, extd_status
);
372 if (compl_status
== MCC_STATUS_INSUFFICIENT_BUFFER
) {
373 /* if status is insufficient buffer, check the length */
374 resp_hdr
= (struct be_cmd_resp_hdr
*) hdr
;
375 if (resp_hdr
->response_length
)
381 static void beiscsi_process_async_link(struct beiscsi_hba
*phba
,
382 struct be_mcc_compl
*compl)
384 struct be_async_event_link_state
*evt
;
386 evt
= (struct be_async_event_link_state
*)compl;
388 phba
->port_speed
= evt
->port_speed
;
390 * Check logical link status in ASYNC event.
391 * This has been newly introduced in SKH-R Firmware 10.0.338.45.
393 if (evt
->port_link_status
& BE_ASYNC_LINK_UP_MASK
) {
394 phba
->state
= BE_ADAPTER_LINK_UP
| BE_ADAPTER_CHECK_BOOT
;
395 phba
->get_boot
= BE_GET_BOOT_RETRIES
;
396 __beiscsi_log(phba
, KERN_ERR
,
397 "BC_%d : Link Up on Port %d tag 0x%x\n",
398 evt
->physical_port
, evt
->event_tag
);
400 phba
->state
= BE_ADAPTER_LINK_DOWN
;
401 __beiscsi_log(phba
, KERN_ERR
,
402 "BC_%d : Link Down on Port %d tag 0x%x\n",
403 evt
->physical_port
, evt
->event_tag
);
404 iscsi_host_for_each_session(phba
->shost
,
405 beiscsi_fail_session
);
409 static char *beiscsi_port_misconf_event_msg
[] = {
410 "Physical Link is functional.",
411 "Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.",
412 "Optics of two types installed - Remove one optic or install matching pair of optics.",
413 "Incompatible optics - Replace with compatible optics for card to function.",
414 "Unqualified optics - Replace with Avago optics for Warranty and Technical Support.",
415 "Uncertified optics - Replace with Avago Certified optics to enable link operation."
418 static void beiscsi_process_async_sli(struct beiscsi_hba
*phba
,
419 struct be_mcc_compl
*compl)
421 struct be_async_event_sli
*async_sli
;
422 u8 evt_type
, state
, old_state
, le
;
423 char *sev
= KERN_WARNING
;
426 evt_type
= compl->flags
>> ASYNC_TRAILER_EVENT_TYPE_SHIFT
;
427 evt_type
&= ASYNC_TRAILER_EVENT_TYPE_MASK
;
429 /* processing only MISCONFIGURED physical port event */
430 if (evt_type
!= ASYNC_SLI_EVENT_TYPE_MISCONFIGURED
)
433 async_sli
= (struct be_async_event_sli
*)compl;
434 state
= async_sli
->event_data1
>>
435 (phba
->fw_config
.phys_port
* 8) & 0xff;
436 le
= async_sli
->event_data2
>>
437 (phba
->fw_config
.phys_port
* 8) & 0xff;
439 old_state
= phba
->optic_state
;
440 phba
->optic_state
= state
;
442 if (state
>= ARRAY_SIZE(beiscsi_port_misconf_event_msg
)) {
443 /* fw is reporting a state we don't know, log and return */
444 __beiscsi_log(phba
, KERN_ERR
,
445 "BC_%d : Port %c: Unrecognized optic state 0x%x\n",
446 phba
->port_name
, async_sli
->event_data1
);
450 if (ASYNC_SLI_LINK_EFFECT_VALID(le
)) {
451 /* log link effect for unqualified-4, uncertified-5 optics */
453 msg
= (ASYNC_SLI_LINK_EFFECT_STATE(le
)) ?
454 " Link is non-operational." :
455 " Link is operational.";
457 if (ASYNC_SLI_LINK_EFFECT_SEV(le
) == 1)
460 if (ASYNC_SLI_LINK_EFFECT_SEV(le
) == 2)
464 if (old_state
!= phba
->optic_state
)
465 __beiscsi_log(phba
, sev
, "BC_%d : Port %c: %s%s\n",
467 beiscsi_port_misconf_event_msg
[state
],
471 void beiscsi_process_async_event(struct beiscsi_hba
*phba
,
472 struct be_mcc_compl
*compl)
474 char *sev
= KERN_INFO
;
477 /* interpret flags as an async trailer */
478 evt_code
= compl->flags
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
;
479 evt_code
&= ASYNC_TRAILER_EVENT_CODE_MASK
;
481 case ASYNC_EVENT_CODE_LINK_STATE
:
482 beiscsi_process_async_link(phba
, compl);
484 case ASYNC_EVENT_CODE_ISCSI
:
485 phba
->state
|= BE_ADAPTER_CHECK_BOOT
;
486 phba
->get_boot
= BE_GET_BOOT_RETRIES
;
489 case ASYNC_EVENT_CODE_SLI
:
490 beiscsi_process_async_sli(phba
, compl);
493 /* event not registered */
497 beiscsi_log(phba
, sev
, BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
498 "BC_%d : ASYNC Event %x: status 0x%08x flags 0x%08x\n",
499 evt_code
, compl->status
, compl->flags
);
502 int beiscsi_process_mcc_compl(struct be_ctrl_info
*ctrl
,
503 struct be_mcc_compl
*compl)
505 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
506 u16 compl_status
, extd_status
;
507 struct be_dma_mem
*tag_mem
;
508 unsigned int tag
, wrb_idx
;
510 be_dws_le_to_cpu(compl, 4);
511 tag
= (compl->tag0
& MCC_Q_CMD_TAG_MASK
);
512 wrb_idx
= (compl->tag0
& CQE_STATUS_WRB_MASK
) >> CQE_STATUS_WRB_SHIFT
;
514 if (!test_bit(MCC_TAG_STATE_RUNNING
,
515 &ctrl
->ptag_state
[tag
].tag_state
)) {
516 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_MBOX
|
517 BEISCSI_LOG_INIT
| BEISCSI_LOG_CONFIG
,
518 "BC_%d : MBX cmd completed but not posted\n");
522 if (test_bit(MCC_TAG_STATE_TIMEOUT
, &ctrl
->ptag_state
[tag
].tag_state
)) {
523 beiscsi_log(phba
, KERN_WARNING
,
524 BEISCSI_LOG_MBOX
| BEISCSI_LOG_INIT
|
526 "BC_%d : MBX Completion for timeout Command from FW\n");
528 * Check for the size before freeing resource.
529 * Only for non-embedded cmd, PCI resource is allocated.
531 tag_mem
= &ctrl
->ptag_state
[tag
].tag_mem_state
;
533 pci_free_consistent(ctrl
->pdev
, tag_mem
->size
,
534 tag_mem
->va
, tag_mem
->dma
);
535 free_mcc_wrb(ctrl
, tag
);
539 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
540 CQE_STATUS_COMPL_MASK
;
541 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
542 CQE_STATUS_EXTD_MASK
;
543 /* The ctrl.mcc_tag_status[tag] is filled with
544 * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
545 * [7:0] = compl_status
547 ctrl
->mcc_tag_status
[tag
] = CQE_VALID_MASK
;
548 ctrl
->mcc_tag_status
[tag
] |= (wrb_idx
<< CQE_STATUS_WRB_SHIFT
);
549 ctrl
->mcc_tag_status
[tag
] |= (extd_status
<< CQE_STATUS_ADDL_SHIFT
) &
550 CQE_STATUS_ADDL_MASK
;
551 ctrl
->mcc_tag_status
[tag
] |= (compl_status
& CQE_STATUS_MASK
);
553 /* write ordering forced in wake_up_interruptible */
554 clear_bit(MCC_TAG_STATE_RUNNING
, &ctrl
->ptag_state
[tag
].tag_state
);
555 wake_up_interruptible(&ctrl
->mcc_wait
[tag
]);
560 * be_mcc_compl_poll()- Wait for MBX completion
561 * @phba: driver private structure
563 * Wait till no more pending mcc requests are present
570 int be_mcc_compl_poll(struct beiscsi_hba
*phba
, unsigned int tag
)
572 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
575 if (!test_bit(MCC_TAG_STATE_RUNNING
,
576 &ctrl
->ptag_state
[tag
].tag_state
)) {
577 beiscsi_log(phba
, KERN_ERR
,
578 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
579 "BC_%d: tag %u state not running\n", tag
);
582 for (i
= 0; i
< mcc_timeout
; i
++) {
583 if (beiscsi_error(phba
))
586 beiscsi_process_mcc_cq(phba
);
587 /* after polling, wrb and tag need to be released */
588 if (!test_bit(MCC_TAG_STATE_RUNNING
,
589 &ctrl
->ptag_state
[tag
].tag_state
)) {
590 free_mcc_wrb(ctrl
, tag
);
599 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
600 "BC_%d : FW Timed Out\n");
601 phba
->fw_timeout
= true;
602 beiscsi_ue_detect(phba
);
606 void be_mcc_notify(struct beiscsi_hba
*phba
, unsigned int tag
)
608 struct be_queue_info
*mccq
= &phba
->ctrl
.mcc_obj
.q
;
611 set_bit(MCC_TAG_STATE_RUNNING
, &phba
->ctrl
.ptag_state
[tag
].tag_state
);
612 val
|= mccq
->id
& DB_MCCQ_RING_ID_MASK
;
613 val
|= 1 << DB_MCCQ_NUM_POSTED_SHIFT
;
614 /* make request available for DMA */
616 iowrite32(val
, phba
->db_va
+ DB_MCCQ_OFFSET
);
620 * be_mbox_db_ready_poll()- Check ready status
621 * @ctrl: Function specific MBX data structure
623 * Check for the ready status of FW to send BMBX
624 * commands to adapter.
630 static int be_mbox_db_ready_poll(struct be_ctrl_info
*ctrl
)
632 /* wait 30s for generic non-flash MBOX operation */
633 #define BEISCSI_MBX_RDY_BIT_TIMEOUT 30000
634 void __iomem
*db
= ctrl
->db
+ MPU_MAILBOX_DB_OFFSET
;
635 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
636 unsigned long timeout
;
640 * This BMBX busy wait path is used during init only.
641 * For the commands executed during init, 5s should suffice.
643 timeout
= jiffies
+ msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT
);
645 if (beiscsi_error(phba
))
648 ready
= ioread32(db
);
649 if (ready
== 0xffffffff)
652 ready
&= MPU_MAILBOX_DB_RDY_MASK
;
656 if (time_after(jiffies
, timeout
))
661 beiscsi_log(phba
, KERN_ERR
,
662 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
663 "BC_%d : FW Timed Out\n");
665 phba
->fw_timeout
= true;
666 beiscsi_ue_detect(phba
);
672 * be_mbox_notify: Notify adapter of new BMBX command
673 * @ctrl: Function specific MBX data structure
675 * Ring doorbell to inform adapter of a BMBX command
682 int be_mbox_notify(struct be_ctrl_info
*ctrl
)
686 void __iomem
*db
= ctrl
->db
+ MPU_MAILBOX_DB_OFFSET
;
687 struct be_dma_mem
*mbox_mem
= &ctrl
->mbox_mem
;
688 struct be_mcc_mailbox
*mbox
= mbox_mem
->va
;
690 status
= be_mbox_db_ready_poll(ctrl
);
694 val
&= ~MPU_MAILBOX_DB_RDY_MASK
;
695 val
|= MPU_MAILBOX_DB_HI_MASK
;
696 val
|= (upper_32_bits(mbox_mem
->dma
) >> 2) << 2;
699 status
= be_mbox_db_ready_poll(ctrl
);
704 val
&= ~MPU_MAILBOX_DB_RDY_MASK
;
705 val
&= ~MPU_MAILBOX_DB_HI_MASK
;
706 val
|= (u32
) (mbox_mem
->dma
>> 4) << 2;
709 status
= be_mbox_db_ready_poll(ctrl
);
713 /* RDY is set; small delay before CQE read. */
716 status
= beiscsi_process_mbox_compl(ctrl
, &mbox
->compl);
720 void be_wrb_hdr_prepare(struct be_mcc_wrb
*wrb
, int payload_len
,
721 bool embedded
, u8 sge_cnt
)
724 wrb
->embedded
|= MCC_WRB_EMBEDDED_MASK
;
726 wrb
->embedded
|= (sge_cnt
& MCC_WRB_SGE_CNT_MASK
) <<
727 MCC_WRB_SGE_CNT_SHIFT
;
728 wrb
->payload_length
= payload_len
;
729 be_dws_cpu_to_le(wrb
, 8);
732 void be_cmd_hdr_prepare(struct be_cmd_req_hdr
*req_hdr
,
733 u8 subsystem
, u8 opcode
, int cmd_len
)
735 req_hdr
->opcode
= opcode
;
736 req_hdr
->subsystem
= subsystem
;
737 req_hdr
->request_length
= cpu_to_le32(cmd_len
- sizeof(*req_hdr
));
738 req_hdr
->timeout
= BEISCSI_FW_MBX_TIMEOUT
;
741 static void be_cmd_page_addrs_prepare(struct phys_addr
*pages
, u32 max_pages
,
742 struct be_dma_mem
*mem
)
745 u64 dma
= (u64
) mem
->dma
;
747 buf_pages
= min(PAGES_4K_SPANNED(mem
->va
, mem
->size
), max_pages
);
748 for (i
= 0; i
< buf_pages
; i
++) {
749 pages
[i
].lo
= cpu_to_le32(dma
& 0xFFFFFFFF);
750 pages
[i
].hi
= cpu_to_le32(upper_32_bits(dma
));
755 static u32
eq_delay_to_mult(u32 usec_delay
)
757 #define MAX_INTR_RATE 651042
758 const u32 round
= 10;
764 u32 interrupt_rate
= 1000000 / usec_delay
;
765 if (interrupt_rate
== 0)
768 multiplier
= (MAX_INTR_RATE
- interrupt_rate
) * round
;
769 multiplier
/= interrupt_rate
;
770 multiplier
= (multiplier
+ round
/ 2) / round
;
771 multiplier
= min(multiplier
, (u32
) 1023);
777 struct be_mcc_wrb
*wrb_from_mbox(struct be_dma_mem
*mbox_mem
)
779 return &((struct be_mcc_mailbox
*)(mbox_mem
->va
))->wrb
;
782 int beiscsi_cmd_eq_create(struct be_ctrl_info
*ctrl
,
783 struct be_queue_info
*eq
, int eq_delay
)
785 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
786 struct be_cmd_req_eq_create
*req
= embedded_payload(wrb
);
787 struct be_cmd_resp_eq_create
*resp
= embedded_payload(wrb
);
788 struct be_dma_mem
*q_mem
= &eq
->dma_mem
;
791 mutex_lock(&ctrl
->mbox_lock
);
792 memset(wrb
, 0, sizeof(*wrb
));
794 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
796 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
797 OPCODE_COMMON_EQ_CREATE
, sizeof(*req
));
799 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
801 AMAP_SET_BITS(struct amap_eq_context
, func
, req
->context
,
802 PCI_FUNC(ctrl
->pdev
->devfn
));
803 AMAP_SET_BITS(struct amap_eq_context
, valid
, req
->context
, 1);
804 AMAP_SET_BITS(struct amap_eq_context
, size
, req
->context
, 0);
805 AMAP_SET_BITS(struct amap_eq_context
, count
, req
->context
,
806 __ilog2_u32(eq
->len
/ 256));
807 AMAP_SET_BITS(struct amap_eq_context
, delaymult
, req
->context
,
808 eq_delay_to_mult(eq_delay
));
809 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
811 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
813 status
= be_mbox_notify(ctrl
);
815 eq
->id
= le16_to_cpu(resp
->eq_id
);
818 mutex_unlock(&ctrl
->mbox_lock
);
823 * be_cmd_fw_initialize()- Initialize FW
824 * @ctrl: Pointer to function control structure
826 * Send FW initialize pattern for the function.
830 * Failure: Non-Zero value
832 int be_cmd_fw_initialize(struct be_ctrl_info
*ctrl
)
834 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
835 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
839 mutex_lock(&ctrl
->mbox_lock
);
840 memset(wrb
, 0, sizeof(*wrb
));
842 endian_check
= (u8
*) wrb
;
843 *endian_check
++ = 0xFF;
844 *endian_check
++ = 0x12;
845 *endian_check
++ = 0x34;
846 *endian_check
++ = 0xFF;
847 *endian_check
++ = 0xFF;
848 *endian_check
++ = 0x56;
849 *endian_check
++ = 0x78;
850 *endian_check
++ = 0xFF;
851 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
853 status
= be_mbox_notify(ctrl
);
855 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
856 "BC_%d : be_cmd_fw_initialize Failed\n");
858 mutex_unlock(&ctrl
->mbox_lock
);
863 * be_cmd_fw_uninit()- Uinitialize FW
864 * @ctrl: Pointer to function control structure
866 * Send FW uninitialize pattern for the function
870 * Failure: Non-Zero value
872 int be_cmd_fw_uninit(struct be_ctrl_info
*ctrl
)
874 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
875 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
879 mutex_lock(&ctrl
->mbox_lock
);
880 memset(wrb
, 0, sizeof(*wrb
));
882 endian_check
= (u8
*) wrb
;
883 *endian_check
++ = 0xFF;
884 *endian_check
++ = 0xAA;
885 *endian_check
++ = 0xBB;
886 *endian_check
++ = 0xFF;
887 *endian_check
++ = 0xFF;
888 *endian_check
++ = 0xCC;
889 *endian_check
++ = 0xDD;
890 *endian_check
= 0xFF;
892 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
894 status
= be_mbox_notify(ctrl
);
896 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
897 "BC_%d : be_cmd_fw_uninit Failed\n");
899 mutex_unlock(&ctrl
->mbox_lock
);
903 int beiscsi_cmd_cq_create(struct be_ctrl_info
*ctrl
,
904 struct be_queue_info
*cq
, struct be_queue_info
*eq
,
905 bool sol_evts
, bool no_delay
, int coalesce_wm
)
907 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
908 struct be_cmd_req_cq_create
*req
= embedded_payload(wrb
);
909 struct be_cmd_resp_cq_create
*resp
= embedded_payload(wrb
);
910 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
911 struct be_dma_mem
*q_mem
= &cq
->dma_mem
;
912 void *ctxt
= &req
->context
;
915 mutex_lock(&ctrl
->mbox_lock
);
916 memset(wrb
, 0, sizeof(*wrb
));
918 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
920 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
921 OPCODE_COMMON_CQ_CREATE
, sizeof(*req
));
923 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
924 if (is_chip_be2_be3r(phba
)) {
925 AMAP_SET_BITS(struct amap_cq_context
, coalescwm
,
927 AMAP_SET_BITS(struct amap_cq_context
, nodelay
, ctxt
, no_delay
);
928 AMAP_SET_BITS(struct amap_cq_context
, count
, ctxt
,
929 __ilog2_u32(cq
->len
/ 256));
930 AMAP_SET_BITS(struct amap_cq_context
, valid
, ctxt
, 1);
931 AMAP_SET_BITS(struct amap_cq_context
, solevent
, ctxt
, sol_evts
);
932 AMAP_SET_BITS(struct amap_cq_context
, eventable
, ctxt
, 1);
933 AMAP_SET_BITS(struct amap_cq_context
, eqid
, ctxt
, eq
->id
);
934 AMAP_SET_BITS(struct amap_cq_context
, armed
, ctxt
, 1);
935 AMAP_SET_BITS(struct amap_cq_context
, func
, ctxt
,
936 PCI_FUNC(ctrl
->pdev
->devfn
));
938 req
->hdr
.version
= MBX_CMD_VER2
;
940 AMAP_SET_BITS(struct amap_cq_context_v2
, coalescwm
,
942 AMAP_SET_BITS(struct amap_cq_context_v2
, nodelay
,
944 AMAP_SET_BITS(struct amap_cq_context_v2
, count
, ctxt
,
945 __ilog2_u32(cq
->len
/ 256));
946 AMAP_SET_BITS(struct amap_cq_context_v2
, valid
, ctxt
, 1);
947 AMAP_SET_BITS(struct amap_cq_context_v2
, eventable
, ctxt
, 1);
948 AMAP_SET_BITS(struct amap_cq_context_v2
, eqid
, ctxt
, eq
->id
);
949 AMAP_SET_BITS(struct amap_cq_context_v2
, armed
, ctxt
, 1);
952 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
954 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
956 status
= be_mbox_notify(ctrl
);
958 cq
->id
= le16_to_cpu(resp
->cq_id
);
961 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
962 "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
965 mutex_unlock(&ctrl
->mbox_lock
);
970 static u32
be_encoded_q_len(int q_len
)
972 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
973 if (len_encoded
== 16)
978 int beiscsi_cmd_mccq_create(struct beiscsi_hba
*phba
,
979 struct be_queue_info
*mccq
,
980 struct be_queue_info
*cq
)
982 struct be_mcc_wrb
*wrb
;
983 struct be_cmd_req_mcc_create_ext
*req
;
984 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
985 struct be_ctrl_info
*ctrl
;
989 mutex_lock(&phba
->ctrl
.mbox_lock
);
991 wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
992 memset(wrb
, 0, sizeof(*wrb
));
993 req
= embedded_payload(wrb
);
994 ctxt
= &req
->context
;
996 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
998 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
999 OPCODE_COMMON_MCC_CREATE_EXT
, sizeof(*req
));
1001 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
1002 req
->async_evt_bitmap
= 1 << ASYNC_EVENT_CODE_LINK_STATE
;
1003 req
->async_evt_bitmap
|= 1 << ASYNC_EVENT_CODE_ISCSI
;
1004 req
->async_evt_bitmap
|= 1 << ASYNC_EVENT_CODE_SLI
;
1006 AMAP_SET_BITS(struct amap_mcc_context
, fid
, ctxt
,
1007 PCI_FUNC(phba
->pcidev
->devfn
));
1008 AMAP_SET_BITS(struct amap_mcc_context
, valid
, ctxt
, 1);
1009 AMAP_SET_BITS(struct amap_mcc_context
, ring_size
, ctxt
,
1010 be_encoded_q_len(mccq
->len
));
1011 AMAP_SET_BITS(struct amap_mcc_context
, cq_id
, ctxt
, cq
->id
);
1013 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
1015 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1017 status
= be_mbox_notify(ctrl
);
1019 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
1020 mccq
->id
= le16_to_cpu(resp
->id
);
1021 mccq
->created
= true;
1023 mutex_unlock(&phba
->ctrl
.mbox_lock
);
1028 int beiscsi_cmd_q_destroy(struct be_ctrl_info
*ctrl
, struct be_queue_info
*q
,
1031 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1032 struct be_cmd_req_q_destroy
*req
= embedded_payload(wrb
);
1033 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
1034 u8 subsys
= 0, opcode
= 0;
1037 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1038 "BC_%d : In beiscsi_cmd_q_destroy "
1039 "queue_type : %d\n", queue_type
);
1041 mutex_lock(&ctrl
->mbox_lock
);
1042 memset(wrb
, 0, sizeof(*wrb
));
1043 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1045 switch (queue_type
) {
1047 subsys
= CMD_SUBSYSTEM_COMMON
;
1048 opcode
= OPCODE_COMMON_EQ_DESTROY
;
1051 subsys
= CMD_SUBSYSTEM_COMMON
;
1052 opcode
= OPCODE_COMMON_CQ_DESTROY
;
1055 subsys
= CMD_SUBSYSTEM_COMMON
;
1056 opcode
= OPCODE_COMMON_MCC_DESTROY
;
1059 subsys
= CMD_SUBSYSTEM_ISCSI
;
1060 opcode
= OPCODE_COMMON_ISCSI_WRBQ_DESTROY
;
1063 subsys
= CMD_SUBSYSTEM_ISCSI
;
1064 opcode
= OPCODE_COMMON_ISCSI_DEFQ_DESTROY
;
1067 subsys
= CMD_SUBSYSTEM_ISCSI
;
1068 opcode
= OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES
;
1071 mutex_unlock(&ctrl
->mbox_lock
);
1075 be_cmd_hdr_prepare(&req
->hdr
, subsys
, opcode
, sizeof(*req
));
1076 if (queue_type
!= QTYPE_SGL
)
1077 req
->id
= cpu_to_le16(q
->id
);
1079 status
= be_mbox_notify(ctrl
);
1081 mutex_unlock(&ctrl
->mbox_lock
);
1086 * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
1087 * @ctrl: ptr to ctrl_info
1088 * @cq: Completion Queue
1089 * @dq: Default Queue
1090 * @lenght: ring size
1091 * @entry_size: size of each entry in DEFQ
1092 * @is_header: Header or Data DEFQ
1093 * @ulp_num: Bind to which ULP
1095 * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
1096 * on this queue by the FW
1100 * Failure: Non-Zero Value
1103 int be_cmd_create_default_pdu_queue(struct be_ctrl_info
*ctrl
,
1104 struct be_queue_info
*cq
,
1105 struct be_queue_info
*dq
, int length
,
1106 int entry_size
, uint8_t is_header
,
1109 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1110 struct be_defq_create_req
*req
= embedded_payload(wrb
);
1111 struct be_dma_mem
*q_mem
= &dq
->dma_mem
;
1112 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
1113 void *ctxt
= &req
->context
;
1116 mutex_lock(&ctrl
->mbox_lock
);
1117 memset(wrb
, 0, sizeof(*wrb
));
1119 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1121 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
1122 OPCODE_COMMON_ISCSI_DEFQ_CREATE
, sizeof(*req
));
1124 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
1125 if (phba
->fw_config
.dual_ulp_aware
) {
1126 req
->ulp_num
= ulp_num
;
1127 req
->dua_feature
|= (1 << BEISCSI_DUAL_ULP_AWARE_BIT
);
1128 req
->dua_feature
|= (1 << BEISCSI_BIND_Q_TO_ULP_BIT
);
1131 if (is_chip_be2_be3r(phba
)) {
1132 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1134 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1135 rx_pdid_valid
, ctxt
, 1);
1136 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1137 pci_func_id
, ctxt
, PCI_FUNC(ctrl
->pdev
->devfn
));
1138 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1140 be_encoded_q_len(length
/
1141 sizeof(struct phys_addr
)));
1142 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1143 default_buffer_size
, ctxt
, entry_size
);
1144 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1145 cq_id_recv
, ctxt
, cq
->id
);
1147 AMAP_SET_BITS(struct amap_default_pdu_context_ext
,
1149 AMAP_SET_BITS(struct amap_default_pdu_context_ext
,
1150 rx_pdid_valid
, ctxt
, 1);
1151 AMAP_SET_BITS(struct amap_default_pdu_context_ext
,
1153 be_encoded_q_len(length
/
1154 sizeof(struct phys_addr
)));
1155 AMAP_SET_BITS(struct amap_default_pdu_context_ext
,
1156 default_buffer_size
, ctxt
, entry_size
);
1157 AMAP_SET_BITS(struct amap_default_pdu_context_ext
,
1158 cq_id_recv
, ctxt
, cq
->id
);
1161 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
1163 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1165 status
= be_mbox_notify(ctrl
);
1167 struct be_ring
*defq_ring
;
1168 struct be_defq_create_resp
*resp
= embedded_payload(wrb
);
1170 dq
->id
= le16_to_cpu(resp
->id
);
1173 defq_ring
= &phba
->phwi_ctrlr
->default_pdu_hdr
[ulp_num
];
1175 defq_ring
= &phba
->phwi_ctrlr
->
1176 default_pdu_data
[ulp_num
];
1178 defq_ring
->id
= dq
->id
;
1180 if (!phba
->fw_config
.dual_ulp_aware
) {
1181 defq_ring
->ulp_num
= BEISCSI_ULP0
;
1182 defq_ring
->doorbell_offset
= DB_RXULP0_OFFSET
;
1184 defq_ring
->ulp_num
= resp
->ulp_num
;
1185 defq_ring
->doorbell_offset
= resp
->doorbell_offset
;
1188 mutex_unlock(&ctrl
->mbox_lock
);
1194 * be_cmd_wrbq_create()- Create WRBQ
1195 * @ctrl: ptr to ctrl_info
1196 * @q_mem: memory details for the queue
1198 * @pwrb_context: ptr to wrb_context
1199 * @ulp_num: ULP on which the WRBQ is to be created
1201 * Create WRBQ on the passed ULP_NUM.
1204 int be_cmd_wrbq_create(struct be_ctrl_info
*ctrl
,
1205 struct be_dma_mem
*q_mem
,
1206 struct be_queue_info
*wrbq
,
1207 struct hwi_wrb_context
*pwrb_context
,
1210 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1211 struct be_wrbq_create_req
*req
= embedded_payload(wrb
);
1212 struct be_wrbq_create_resp
*resp
= embedded_payload(wrb
);
1213 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
1216 mutex_lock(&ctrl
->mbox_lock
);
1217 memset(wrb
, 0, sizeof(*wrb
));
1219 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1221 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
1222 OPCODE_COMMON_ISCSI_WRBQ_CREATE
, sizeof(*req
));
1223 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
1225 if (phba
->fw_config
.dual_ulp_aware
) {
1226 req
->ulp_num
= ulp_num
;
1227 req
->dua_feature
|= (1 << BEISCSI_DUAL_ULP_AWARE_BIT
);
1228 req
->dua_feature
|= (1 << BEISCSI_BIND_Q_TO_ULP_BIT
);
1231 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1233 status
= be_mbox_notify(ctrl
);
1235 wrbq
->id
= le16_to_cpu(resp
->cid
);
1236 wrbq
->created
= true;
1238 pwrb_context
->cid
= wrbq
->id
;
1239 if (!phba
->fw_config
.dual_ulp_aware
) {
1240 pwrb_context
->doorbell_offset
= DB_TXULP0_OFFSET
;
1241 pwrb_context
->ulp_num
= BEISCSI_ULP0
;
1243 pwrb_context
->ulp_num
= resp
->ulp_num
;
1244 pwrb_context
->doorbell_offset
= resp
->doorbell_offset
;
1247 mutex_unlock(&ctrl
->mbox_lock
);
1251 int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info
*ctrl
,
1252 struct be_dma_mem
*q_mem
)
1254 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1255 struct be_post_template_pages_req
*req
= embedded_payload(wrb
);
1258 mutex_lock(&ctrl
->mbox_lock
);
1260 memset(wrb
, 0, sizeof(*wrb
));
1261 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1262 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1263 OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS
,
1266 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
1267 req
->type
= BEISCSI_TEMPLATE_HDR_TYPE_ISCSI
;
1268 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1270 status
= be_mbox_notify(ctrl
);
1271 mutex_unlock(&ctrl
->mbox_lock
);
1275 int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info
*ctrl
)
1277 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1278 struct be_remove_template_pages_req
*req
= embedded_payload(wrb
);
1281 mutex_lock(&ctrl
->mbox_lock
);
1283 memset(wrb
, 0, sizeof(*wrb
));
1284 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1285 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1286 OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS
,
1289 req
->type
= BEISCSI_TEMPLATE_HDR_TYPE_ISCSI
;
1291 status
= be_mbox_notify(ctrl
);
1292 mutex_unlock(&ctrl
->mbox_lock
);
1296 int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info
*ctrl
,
1297 struct be_dma_mem
*q_mem
,
1298 u32 page_offset
, u32 num_pages
)
1300 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1301 struct be_post_sgl_pages_req
*req
= embedded_payload(wrb
);
1302 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
1304 unsigned int curr_pages
;
1305 u32 internal_page_offset
= 0;
1306 u32 temp_num_pages
= num_pages
;
1308 if (num_pages
== 0xff)
1311 mutex_lock(&ctrl
->mbox_lock
);
1313 memset(wrb
, 0, sizeof(*wrb
));
1314 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1315 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
1316 OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES
,
1318 curr_pages
= BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req
,
1320 req
->num_pages
= min(num_pages
, curr_pages
);
1321 req
->page_offset
= page_offset
;
1322 be_cmd_page_addrs_prepare(req
->pages
, req
->num_pages
, q_mem
);
1323 q_mem
->dma
= q_mem
->dma
+ (req
->num_pages
* PAGE_SIZE
);
1324 internal_page_offset
+= req
->num_pages
;
1325 page_offset
+= req
->num_pages
;
1326 num_pages
-= req
->num_pages
;
1328 if (temp_num_pages
== 0xff)
1329 req
->num_pages
= temp_num_pages
;
1331 status
= be_mbox_notify(ctrl
);
1333 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1334 "BC_%d : FW CMD to map iscsi frags failed.\n");
1338 } while (num_pages
> 0);
1340 mutex_unlock(&ctrl
->mbox_lock
);
1342 beiscsi_cmd_q_destroy(ctrl
, NULL
, QTYPE_SGL
);
1346 int beiscsi_cmd_reset_function(struct beiscsi_hba
*phba
)
1348 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
1349 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1350 struct be_post_sgl_pages_req
*req
= embedded_payload(wrb
);
1353 mutex_lock(&ctrl
->mbox_lock
);
1355 req
= embedded_payload(wrb
);
1356 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1357 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1358 OPCODE_COMMON_FUNCTION_RESET
, sizeof(*req
));
1359 status
= be_mbox_notify(ctrl
);
1361 mutex_unlock(&ctrl
->mbox_lock
);
1366 * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
1367 * @phba: device priv structure instance
1368 * @vlan_tag: TAG to be set
1370 * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
1373 * TAG for the MBX Cmd
1375 int be_cmd_set_vlan(struct beiscsi_hba
*phba
,
1379 struct be_mcc_wrb
*wrb
;
1380 struct be_cmd_set_vlan_req
*req
;
1381 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
1383 if (mutex_lock_interruptible(&ctrl
->mbox_lock
))
1385 wrb
= alloc_mcc_wrb(phba
, &tag
);
1387 mutex_unlock(&ctrl
->mbox_lock
);
1391 req
= embedded_payload(wrb
);
1392 be_wrb_hdr_prepare(wrb
, sizeof(*wrb
), true, 0);
1393 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
1394 OPCODE_COMMON_ISCSI_NTWK_SET_VLAN
,
1397 req
->interface_hndl
= phba
->interface_handle
;
1398 req
->vlan_priority
= vlan_tag
;
1400 be_mcc_notify(phba
, tag
);
1401 mutex_unlock(&ctrl
->mbox_lock
);