1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2017 Broadcom. All Rights Reserved.
4 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
7 * linux-drivers@broadcom.com
10 #include <scsi/iscsi_proto.h>
16 /* UE Status Low CSR */
17 static const char * const desc_ue_status_low
[] = {
52 /* UE Status High CSR */
53 static const char * const desc_ue_status_hi
[] = {
88 struct be_mcc_wrb
*alloc_mcc_wrb(struct beiscsi_hba
*phba
,
89 unsigned int *ref_tag
)
91 struct be_queue_info
*mccq
= &phba
->ctrl
.mcc_obj
.q
;
92 struct be_mcc_wrb
*wrb
= NULL
;
95 spin_lock(&phba
->ctrl
.mcc_lock
);
96 if (mccq
->used
== mccq
->len
) {
97 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
|
98 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
99 "BC_%d : MCC queue full: WRB used %u tag avail %u\n",
100 mccq
->used
, phba
->ctrl
.mcc_tag_available
);
104 if (!phba
->ctrl
.mcc_tag_available
)
107 tag
= phba
->ctrl
.mcc_tag
[phba
->ctrl
.mcc_alloc_index
];
109 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
|
110 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
111 "BC_%d : MCC tag 0 allocated: tag avail %u alloc index %u\n",
112 phba
->ctrl
.mcc_tag_available
,
113 phba
->ctrl
.mcc_alloc_index
);
117 /* return this tag for further reference */
119 phba
->ctrl
.mcc_tag
[phba
->ctrl
.mcc_alloc_index
] = 0;
120 phba
->ctrl
.mcc_tag_status
[tag
] = 0;
121 phba
->ctrl
.ptag_state
[tag
].tag_state
= 0;
122 phba
->ctrl
.ptag_state
[tag
].cbfn
= NULL
;
123 phba
->ctrl
.mcc_tag_available
--;
124 if (phba
->ctrl
.mcc_alloc_index
== (MAX_MCC_CMD
- 1))
125 phba
->ctrl
.mcc_alloc_index
= 0;
127 phba
->ctrl
.mcc_alloc_index
++;
129 wrb
= queue_head_node(mccq
);
130 memset(wrb
, 0, sizeof(*wrb
));
132 wrb
->tag0
|= (mccq
->head
<< MCC_Q_WRB_IDX_SHIFT
) & MCC_Q_WRB_IDX_MASK
;
133 queue_head_inc(mccq
);
137 spin_unlock(&phba
->ctrl
.mcc_lock
);
141 void free_mcc_wrb(struct be_ctrl_info
*ctrl
, unsigned int tag
)
143 struct be_queue_info
*mccq
= &ctrl
->mcc_obj
.q
;
145 spin_lock(&ctrl
->mcc_lock
);
146 tag
= tag
& MCC_Q_CMD_TAG_MASK
;
147 ctrl
->mcc_tag
[ctrl
->mcc_free_index
] = tag
;
148 if (ctrl
->mcc_free_index
== (MAX_MCC_CMD
- 1))
149 ctrl
->mcc_free_index
= 0;
151 ctrl
->mcc_free_index
++;
152 ctrl
->mcc_tag_available
++;
154 spin_unlock(&ctrl
->mcc_lock
);
158 * beiscsi_mcc_compl_status - Return the status of MCC completion
159 * @phba: Driver private structure
160 * @tag: Tag for the MBX Command
161 * @wrb: the WRB used for the MBX Command
162 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
168 int __beiscsi_mcc_compl_status(struct beiscsi_hba
*phba
,
170 struct be_mcc_wrb
**wrb
,
171 struct be_dma_mem
*mbx_cmd_mem
)
173 struct be_queue_info
*mccq
= &phba
->ctrl
.mcc_obj
.q
;
174 uint16_t status
= 0, addl_status
= 0, wrb_num
= 0;
175 struct be_cmd_resp_hdr
*mbx_resp_hdr
;
176 struct be_cmd_req_hdr
*mbx_hdr
;
177 struct be_mcc_wrb
*temp_wrb
;
178 uint32_t mcc_tag_status
;
181 mcc_tag_status
= phba
->ctrl
.mcc_tag_status
[tag
];
182 status
= (mcc_tag_status
& CQE_STATUS_MASK
);
183 addl_status
= ((mcc_tag_status
& CQE_STATUS_ADDL_MASK
) >>
184 CQE_STATUS_ADDL_SHIFT
);
187 mbx_hdr
= (struct be_cmd_req_hdr
*)mbx_cmd_mem
->va
;
189 wrb_num
= (mcc_tag_status
& CQE_STATUS_WRB_MASK
) >>
190 CQE_STATUS_WRB_SHIFT
;
191 temp_wrb
= (struct be_mcc_wrb
*)queue_get_wrb(mccq
, wrb_num
);
192 mbx_hdr
= embedded_payload(temp_wrb
);
198 if (status
|| addl_status
) {
199 beiscsi_log(phba
, KERN_WARNING
,
200 BEISCSI_LOG_INIT
| BEISCSI_LOG_EH
|
202 "BC_%d : MBX Cmd Failed for Subsys : %d Opcode : %d with Status : %d and Extd_Status : %d\n",
203 mbx_hdr
->subsystem
, mbx_hdr
->opcode
,
204 status
, addl_status
);
206 if (status
== MCC_STATUS_INSUFFICIENT_BUFFER
) {
207 mbx_resp_hdr
= (struct be_cmd_resp_hdr
*)mbx_hdr
;
208 beiscsi_log(phba
, KERN_WARNING
,
209 BEISCSI_LOG_INIT
| BEISCSI_LOG_EH
|
211 "BC_%d : Insufficient Buffer Error Resp_Len : %d Actual_Resp_Len : %d\n",
212 mbx_resp_hdr
->response_length
,
213 mbx_resp_hdr
->actual_resp_len
);
222 * beiscsi_mccq_compl_wait()- Process completion in MCC CQ
223 * @phba: Driver private structure
224 * @tag: Tag for the MBX Command
225 * @wrb: the WRB used for the MBX Command
226 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
228 * Waits for MBX completion with the passed TAG.
234 int beiscsi_mccq_compl_wait(struct beiscsi_hba
*phba
,
236 struct be_mcc_wrb
**wrb
,
237 struct be_dma_mem
*mbx_cmd_mem
)
241 if (!tag
|| tag
> MAX_MCC_CMD
) {
242 __beiscsi_log(phba
, KERN_ERR
,
243 "BC_%d : invalid tag %u\n", tag
);
247 if (beiscsi_hba_in_error(phba
)) {
248 clear_bit(MCC_TAG_STATE_RUNNING
,
249 &phba
->ctrl
.ptag_state
[tag
].tag_state
);
253 /* wait for the mccq completion */
254 rc
= wait_event_interruptible_timeout(phba
->ctrl
.mcc_wait
[tag
],
255 phba
->ctrl
.mcc_tag_status
[tag
],
257 BEISCSI_HOST_MBX_TIMEOUT
));
259 * Return EIO if port is being disabled. Associated DMA memory, if any,
260 * is freed by the caller. When port goes offline, MCCQ is cleaned up
263 if (!test_bit(BEISCSI_HBA_ONLINE
, &phba
->state
)) {
264 clear_bit(MCC_TAG_STATE_RUNNING
,
265 &phba
->ctrl
.ptag_state
[tag
].tag_state
);
270 * If MBOX cmd timeout expired, tag and resource allocated
271 * for cmd is not freed until FW returns completion.
274 struct be_dma_mem
*tag_mem
;
277 * PCI/DMA memory allocated and posted in non-embedded mode
278 * will have mbx_cmd_mem != NULL.
279 * Save virtual and bus addresses for the command so that it
280 * can be freed later.
282 tag_mem
= &phba
->ctrl
.ptag_state
[tag
].tag_mem_state
;
284 tag_mem
->size
= mbx_cmd_mem
->size
;
285 tag_mem
->va
= mbx_cmd_mem
->va
;
286 tag_mem
->dma
= mbx_cmd_mem
->dma
;
290 /* first make tag_mem_state visible to all */
292 set_bit(MCC_TAG_STATE_TIMEOUT
,
293 &phba
->ctrl
.ptag_state
[tag
].tag_state
);
295 beiscsi_log(phba
, KERN_ERR
,
296 BEISCSI_LOG_INIT
| BEISCSI_LOG_EH
|
298 "BC_%d : MBX Cmd Completion timed out\n");
302 rc
= __beiscsi_mcc_compl_status(phba
, tag
, wrb
, mbx_cmd_mem
);
304 free_mcc_wrb(&phba
->ctrl
, tag
);
309 * beiscsi_process_mbox_compl()- Check the MBX completion status
310 * @ctrl: Function specific MBX data structure
311 * @compl: Completion status of MBX Command
313 * Check for the MBX completion status when BMBX method used
319 static int beiscsi_process_mbox_compl(struct be_ctrl_info
*ctrl
,
320 struct be_mcc_compl
*compl)
322 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
323 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
324 struct be_cmd_req_hdr
*hdr
= embedded_payload(wrb
);
325 u16 compl_status
, extd_status
;
328 * To check if valid bit is set, check the entire word as we don't know
329 * the endianness of the data (old entry is host endian while a new
330 * entry is little endian)
333 beiscsi_log(phba
, KERN_ERR
,
334 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
335 "BC_%d : BMBX busy, no completion\n");
338 compl->flags
= le32_to_cpu(compl->flags
);
339 WARN_ON((compl->flags
& CQE_FLAGS_VALID_MASK
) == 0);
342 * Just swap the status to host endian;
343 * mcc tag is opaquely copied from mcc_wrb.
345 be_dws_le_to_cpu(compl, 4);
346 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
347 CQE_STATUS_COMPL_MASK
;
348 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
349 CQE_STATUS_EXTD_MASK
;
350 /* Need to reset the entire word that houses the valid bit */
353 if (compl_status
== MCC_STATUS_SUCCESS
)
356 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
357 "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
358 hdr
->subsystem
, hdr
->opcode
, compl_status
, extd_status
);
362 static void beiscsi_process_async_link(struct beiscsi_hba
*phba
,
363 struct be_mcc_compl
*compl)
365 struct be_async_event_link_state
*evt
;
367 evt
= (struct be_async_event_link_state
*)compl;
369 phba
->port_speed
= evt
->port_speed
;
371 * Check logical link status in ASYNC event.
372 * This has been newly introduced in SKH-R Firmware 10.0.338.45.
374 if (evt
->port_link_status
& BE_ASYNC_LINK_UP_MASK
) {
375 set_bit(BEISCSI_HBA_LINK_UP
, &phba
->state
);
376 if (test_bit(BEISCSI_HBA_BOOT_FOUND
, &phba
->state
))
377 beiscsi_start_boot_work(phba
, BE_BOOT_INVALID_SHANDLE
);
378 __beiscsi_log(phba
, KERN_ERR
,
379 "BC_%d : Link Up on Port %d tag 0x%x\n",
380 evt
->physical_port
, evt
->event_tag
);
382 clear_bit(BEISCSI_HBA_LINK_UP
, &phba
->state
);
383 __beiscsi_log(phba
, KERN_ERR
,
384 "BC_%d : Link Down on Port %d tag 0x%x\n",
385 evt
->physical_port
, evt
->event_tag
);
386 iscsi_host_for_each_session(phba
->shost
,
387 beiscsi_session_fail
);
391 static char *beiscsi_port_misconf_event_msg
[] = {
392 "Physical Link is functional.",
393 "Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.",
394 "Optics of two types installed - Remove one optic or install matching pair of optics.",
395 "Incompatible optics - Replace with compatible optics for card to function.",
396 "Unqualified optics - Replace with Avago optics for Warranty and Technical Support.",
397 "Uncertified optics - Replace with Avago Certified optics to enable link operation."
400 static void beiscsi_process_async_sli(struct beiscsi_hba
*phba
,
401 struct be_mcc_compl
*compl)
403 struct be_async_event_sli
*async_sli
;
404 u8 evt_type
, state
, old_state
, le
;
405 char *sev
= KERN_WARNING
;
408 evt_type
= compl->flags
>> ASYNC_TRAILER_EVENT_TYPE_SHIFT
;
409 evt_type
&= ASYNC_TRAILER_EVENT_TYPE_MASK
;
411 /* processing only MISCONFIGURED physical port event */
412 if (evt_type
!= ASYNC_SLI_EVENT_TYPE_MISCONFIGURED
)
415 async_sli
= (struct be_async_event_sli
*)compl;
416 state
= async_sli
->event_data1
>>
417 (phba
->fw_config
.phys_port
* 8) & 0xff;
418 le
= async_sli
->event_data2
>>
419 (phba
->fw_config
.phys_port
* 8) & 0xff;
421 old_state
= phba
->optic_state
;
422 phba
->optic_state
= state
;
424 if (state
>= ARRAY_SIZE(beiscsi_port_misconf_event_msg
)) {
425 /* fw is reporting a state we don't know, log and return */
426 __beiscsi_log(phba
, KERN_ERR
,
427 "BC_%d : Port %c: Unrecognized optic state 0x%x\n",
428 phba
->port_name
, async_sli
->event_data1
);
432 if (ASYNC_SLI_LINK_EFFECT_VALID(le
)) {
433 /* log link effect for unqualified-4, uncertified-5 optics */
435 msg
= (ASYNC_SLI_LINK_EFFECT_STATE(le
)) ?
436 " Link is non-operational." :
437 " Link is operational.";
439 if (ASYNC_SLI_LINK_EFFECT_SEV(le
) == 1)
442 if (ASYNC_SLI_LINK_EFFECT_SEV(le
) == 2)
446 if (old_state
!= phba
->optic_state
)
447 __beiscsi_log(phba
, sev
, "BC_%d : Port %c: %s%s\n",
449 beiscsi_port_misconf_event_msg
[state
],
453 void beiscsi_process_async_event(struct beiscsi_hba
*phba
,
454 struct be_mcc_compl
*compl)
456 char *sev
= KERN_INFO
;
459 /* interpret flags as an async trailer */
460 evt_code
= compl->flags
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
;
461 evt_code
&= ASYNC_TRAILER_EVENT_CODE_MASK
;
463 case ASYNC_EVENT_CODE_LINK_STATE
:
464 beiscsi_process_async_link(phba
, compl);
466 case ASYNC_EVENT_CODE_ISCSI
:
467 if (test_bit(BEISCSI_HBA_BOOT_FOUND
, &phba
->state
))
468 beiscsi_start_boot_work(phba
, BE_BOOT_INVALID_SHANDLE
);
471 case ASYNC_EVENT_CODE_SLI
:
472 beiscsi_process_async_sli(phba
, compl);
475 /* event not registered */
479 beiscsi_log(phba
, sev
, BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
480 "BC_%d : ASYNC Event %x: status 0x%08x flags 0x%08x\n",
481 evt_code
, compl->status
, compl->flags
);
484 int beiscsi_process_mcc_compl(struct be_ctrl_info
*ctrl
,
485 struct be_mcc_compl
*compl)
487 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
488 u16 compl_status
, extd_status
;
489 struct be_dma_mem
*tag_mem
;
490 unsigned int tag
, wrb_idx
;
492 be_dws_le_to_cpu(compl, 4);
493 tag
= (compl->tag0
& MCC_Q_CMD_TAG_MASK
);
494 wrb_idx
= (compl->tag0
& CQE_STATUS_WRB_MASK
) >> CQE_STATUS_WRB_SHIFT
;
496 if (!test_bit(MCC_TAG_STATE_RUNNING
,
497 &ctrl
->ptag_state
[tag
].tag_state
)) {
498 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_MBOX
|
499 BEISCSI_LOG_INIT
| BEISCSI_LOG_CONFIG
,
500 "BC_%d : MBX cmd completed but not posted\n");
504 /* end MCC with this tag */
505 clear_bit(MCC_TAG_STATE_RUNNING
, &ctrl
->ptag_state
[tag
].tag_state
);
507 if (test_bit(MCC_TAG_STATE_TIMEOUT
, &ctrl
->ptag_state
[tag
].tag_state
)) {
508 beiscsi_log(phba
, KERN_WARNING
,
509 BEISCSI_LOG_MBOX
| BEISCSI_LOG_INIT
|
511 "BC_%d : MBX Completion for timeout Command from FW\n");
513 * Check for the size before freeing resource.
514 * Only for non-embedded cmd, PCI resource is allocated.
516 tag_mem
= &ctrl
->ptag_state
[tag
].tag_mem_state
;
518 dma_free_coherent(&ctrl
->pdev
->dev
, tag_mem
->size
,
519 tag_mem
->va
, tag_mem
->dma
);
522 free_mcc_wrb(ctrl
, tag
);
526 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
527 CQE_STATUS_COMPL_MASK
;
528 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
529 CQE_STATUS_EXTD_MASK
;
530 /* The ctrl.mcc_tag_status[tag] is filled with
531 * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
532 * [7:0] = compl_status
534 ctrl
->mcc_tag_status
[tag
] = CQE_VALID_MASK
;
535 ctrl
->mcc_tag_status
[tag
] |= (wrb_idx
<< CQE_STATUS_WRB_SHIFT
);
536 ctrl
->mcc_tag_status
[tag
] |= (extd_status
<< CQE_STATUS_ADDL_SHIFT
) &
537 CQE_STATUS_ADDL_MASK
;
538 ctrl
->mcc_tag_status
[tag
] |= (compl_status
& CQE_STATUS_MASK
);
540 if (test_bit(MCC_TAG_STATE_ASYNC
, &ctrl
->ptag_state
[tag
].tag_state
)) {
541 if (ctrl
->ptag_state
[tag
].cbfn
)
542 ctrl
->ptag_state
[tag
].cbfn(phba
, tag
);
544 __beiscsi_log(phba
, KERN_ERR
,
545 "BC_%d : MBX ASYNC command with no callback\n");
546 free_mcc_wrb(ctrl
, tag
);
550 if (test_bit(MCC_TAG_STATE_IGNORE
, &ctrl
->ptag_state
[tag
].tag_state
)) {
551 /* just check completion status and free wrb */
552 __beiscsi_mcc_compl_status(phba
, tag
, NULL
, NULL
);
553 free_mcc_wrb(ctrl
, tag
);
557 wake_up_interruptible(&ctrl
->mcc_wait
[tag
]);
561 void be_mcc_notify(struct beiscsi_hba
*phba
, unsigned int tag
)
563 struct be_queue_info
*mccq
= &phba
->ctrl
.mcc_obj
.q
;
566 set_bit(MCC_TAG_STATE_RUNNING
, &phba
->ctrl
.ptag_state
[tag
].tag_state
);
567 val
|= mccq
->id
& DB_MCCQ_RING_ID_MASK
;
568 val
|= 1 << DB_MCCQ_NUM_POSTED_SHIFT
;
569 /* make request available for DMA */
571 iowrite32(val
, phba
->db_va
+ DB_MCCQ_OFFSET
);
575 * be_mbox_db_ready_poll()- Check ready status
576 * @ctrl: Function specific MBX data structure
578 * Check for the ready status of FW to send BMBX
579 * commands to adapter.
585 static int be_mbox_db_ready_poll(struct be_ctrl_info
*ctrl
)
587 /* wait 30s for generic non-flash MBOX operation */
588 #define BEISCSI_MBX_RDY_BIT_TIMEOUT 30000
589 void __iomem
*db
= ctrl
->db
+ MPU_MAILBOX_DB_OFFSET
;
590 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
591 unsigned long timeout
;
595 * This BMBX busy wait path is used during init only.
596 * For the commands executed during init, 5s should suffice.
598 timeout
= jiffies
+ msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT
);
600 if (beiscsi_hba_in_error(phba
))
603 ready
= ioread32(db
);
604 if (ready
== 0xffffffff)
607 ready
&= MPU_MAILBOX_DB_RDY_MASK
;
611 if (time_after(jiffies
, timeout
))
613 /* 1ms sleep is enough in most cases */
614 schedule_timeout_uninterruptible(msecs_to_jiffies(1));
617 beiscsi_log(phba
, KERN_ERR
,
618 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
619 "BC_%d : FW Timed Out\n");
620 set_bit(BEISCSI_HBA_FW_TIMEOUT
, &phba
->state
);
625 * be_mbox_notify: Notify adapter of new BMBX command
626 * @ctrl: Function specific MBX data structure
628 * Ring doorbell to inform adapter of a BMBX command
635 static int be_mbox_notify(struct be_ctrl_info
*ctrl
)
639 void __iomem
*db
= ctrl
->db
+ MPU_MAILBOX_DB_OFFSET
;
640 struct be_dma_mem
*mbox_mem
= &ctrl
->mbox_mem
;
641 struct be_mcc_mailbox
*mbox
= mbox_mem
->va
;
643 status
= be_mbox_db_ready_poll(ctrl
);
647 val
&= ~MPU_MAILBOX_DB_RDY_MASK
;
648 val
|= MPU_MAILBOX_DB_HI_MASK
;
649 val
|= (upper_32_bits(mbox_mem
->dma
) >> 2) << 2;
652 status
= be_mbox_db_ready_poll(ctrl
);
657 val
&= ~MPU_MAILBOX_DB_RDY_MASK
;
658 val
&= ~MPU_MAILBOX_DB_HI_MASK
;
659 val
|= (u32
) (mbox_mem
->dma
>> 4) << 2;
662 status
= be_mbox_db_ready_poll(ctrl
);
666 /* RDY is set; small delay before CQE read. */
669 status
= beiscsi_process_mbox_compl(ctrl
, &mbox
->compl);
673 void be_wrb_hdr_prepare(struct be_mcc_wrb
*wrb
, u32 payload_len
,
674 bool embedded
, u8 sge_cnt
)
677 wrb
->emb_sgecnt_special
|= MCC_WRB_EMBEDDED_MASK
;
679 wrb
->emb_sgecnt_special
|= (sge_cnt
& MCC_WRB_SGE_CNT_MASK
) <<
680 MCC_WRB_SGE_CNT_SHIFT
;
681 wrb
->payload_length
= payload_len
;
682 be_dws_cpu_to_le(wrb
, 8);
685 void be_cmd_hdr_prepare(struct be_cmd_req_hdr
*req_hdr
,
686 u8 subsystem
, u8 opcode
, u32 cmd_len
)
688 req_hdr
->opcode
= opcode
;
689 req_hdr
->subsystem
= subsystem
;
690 req_hdr
->request_length
= cpu_to_le32(cmd_len
- sizeof(*req_hdr
));
691 req_hdr
->timeout
= BEISCSI_FW_MBX_TIMEOUT
;
694 static void be_cmd_page_addrs_prepare(struct phys_addr
*pages
, u32 max_pages
,
695 struct be_dma_mem
*mem
)
698 u64 dma
= (u64
) mem
->dma
;
700 buf_pages
= min(PAGES_4K_SPANNED(mem
->va
, mem
->size
), max_pages
);
701 for (i
= 0; i
< buf_pages
; i
++) {
702 pages
[i
].lo
= cpu_to_le32(dma
& 0xFFFFFFFF);
703 pages
[i
].hi
= cpu_to_le32(upper_32_bits(dma
));
708 static u32
eq_delay_to_mult(u32 usec_delay
)
710 #define MAX_INTR_RATE 651042
711 const u32 round
= 10;
717 u32 interrupt_rate
= 1000000 / usec_delay
;
718 if (interrupt_rate
== 0)
721 multiplier
= (MAX_INTR_RATE
- interrupt_rate
) * round
;
722 multiplier
/= interrupt_rate
;
723 multiplier
= (multiplier
+ round
/ 2) / round
;
724 multiplier
= min(multiplier
, (u32
) 1023);
730 struct be_mcc_wrb
*wrb_from_mbox(struct be_dma_mem
*mbox_mem
)
732 return &((struct be_mcc_mailbox
*)(mbox_mem
->va
))->wrb
;
735 int beiscsi_cmd_eq_create(struct be_ctrl_info
*ctrl
,
736 struct be_queue_info
*eq
, int eq_delay
)
738 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
739 struct be_cmd_req_eq_create
*req
= embedded_payload(wrb
);
740 struct be_cmd_resp_eq_create
*resp
= embedded_payload(wrb
);
741 struct be_dma_mem
*q_mem
= &eq
->dma_mem
;
744 mutex_lock(&ctrl
->mbox_lock
);
745 memset(wrb
, 0, sizeof(*wrb
));
747 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
749 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
750 OPCODE_COMMON_EQ_CREATE
, sizeof(*req
));
752 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
754 AMAP_SET_BITS(struct amap_eq_context
, func
, req
->context
,
755 PCI_FUNC(ctrl
->pdev
->devfn
));
756 AMAP_SET_BITS(struct amap_eq_context
, valid
, req
->context
, 1);
757 AMAP_SET_BITS(struct amap_eq_context
, size
, req
->context
, 0);
758 AMAP_SET_BITS(struct amap_eq_context
, count
, req
->context
,
759 __ilog2_u32(eq
->len
/ 256));
760 AMAP_SET_BITS(struct amap_eq_context
, delaymult
, req
->context
,
761 eq_delay_to_mult(eq_delay
));
762 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
764 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
766 status
= be_mbox_notify(ctrl
);
768 eq
->id
= le16_to_cpu(resp
->eq_id
);
771 mutex_unlock(&ctrl
->mbox_lock
);
775 int beiscsi_cmd_cq_create(struct be_ctrl_info
*ctrl
,
776 struct be_queue_info
*cq
, struct be_queue_info
*eq
,
777 bool sol_evts
, bool no_delay
, int coalesce_wm
)
779 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
780 struct be_cmd_req_cq_create
*req
= embedded_payload(wrb
);
781 struct be_cmd_resp_cq_create
*resp
= embedded_payload(wrb
);
782 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
783 struct be_dma_mem
*q_mem
= &cq
->dma_mem
;
784 void *ctxt
= &req
->context
;
787 mutex_lock(&ctrl
->mbox_lock
);
788 memset(wrb
, 0, sizeof(*wrb
));
790 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
792 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
793 OPCODE_COMMON_CQ_CREATE
, sizeof(*req
));
795 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
796 if (is_chip_be2_be3r(phba
)) {
797 AMAP_SET_BITS(struct amap_cq_context
, coalescwm
,
799 AMAP_SET_BITS(struct amap_cq_context
, nodelay
, ctxt
, no_delay
);
800 AMAP_SET_BITS(struct amap_cq_context
, count
, ctxt
,
801 __ilog2_u32(cq
->len
/ 256));
802 AMAP_SET_BITS(struct amap_cq_context
, valid
, ctxt
, 1);
803 AMAP_SET_BITS(struct amap_cq_context
, solevent
, ctxt
, sol_evts
);
804 AMAP_SET_BITS(struct amap_cq_context
, eventable
, ctxt
, 1);
805 AMAP_SET_BITS(struct amap_cq_context
, eqid
, ctxt
, eq
->id
);
806 AMAP_SET_BITS(struct amap_cq_context
, armed
, ctxt
, 1);
807 AMAP_SET_BITS(struct amap_cq_context
, func
, ctxt
,
808 PCI_FUNC(ctrl
->pdev
->devfn
));
810 req
->hdr
.version
= MBX_CMD_VER2
;
812 AMAP_SET_BITS(struct amap_cq_context_v2
, coalescwm
,
814 AMAP_SET_BITS(struct amap_cq_context_v2
, nodelay
,
816 AMAP_SET_BITS(struct amap_cq_context_v2
, count
, ctxt
,
817 __ilog2_u32(cq
->len
/ 256));
818 AMAP_SET_BITS(struct amap_cq_context_v2
, valid
, ctxt
, 1);
819 AMAP_SET_BITS(struct amap_cq_context_v2
, eventable
, ctxt
, 1);
820 AMAP_SET_BITS(struct amap_cq_context_v2
, eqid
, ctxt
, eq
->id
);
821 AMAP_SET_BITS(struct amap_cq_context_v2
, armed
, ctxt
, 1);
824 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
826 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
828 status
= be_mbox_notify(ctrl
);
830 cq
->id
= le16_to_cpu(resp
->cq_id
);
833 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
834 "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
837 mutex_unlock(&ctrl
->mbox_lock
);
842 static u32
be_encoded_q_len(int q_len
)
844 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
845 if (len_encoded
== 16)
850 int beiscsi_cmd_mccq_create(struct beiscsi_hba
*phba
,
851 struct be_queue_info
*mccq
,
852 struct be_queue_info
*cq
)
854 struct be_mcc_wrb
*wrb
;
855 struct be_cmd_req_mcc_create_ext
*req
;
856 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
857 struct be_ctrl_info
*ctrl
;
861 mutex_lock(&phba
->ctrl
.mbox_lock
);
863 wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
864 memset(wrb
, 0, sizeof(*wrb
));
865 req
= embedded_payload(wrb
);
866 ctxt
= &req
->context
;
868 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
870 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
871 OPCODE_COMMON_MCC_CREATE_EXT
, sizeof(*req
));
873 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
874 req
->async_evt_bitmap
= 1 << ASYNC_EVENT_CODE_LINK_STATE
;
875 req
->async_evt_bitmap
|= 1 << ASYNC_EVENT_CODE_ISCSI
;
876 req
->async_evt_bitmap
|= 1 << ASYNC_EVENT_CODE_SLI
;
878 AMAP_SET_BITS(struct amap_mcc_context
, fid
, ctxt
,
879 PCI_FUNC(phba
->pcidev
->devfn
));
880 AMAP_SET_BITS(struct amap_mcc_context
, valid
, ctxt
, 1);
881 AMAP_SET_BITS(struct amap_mcc_context
, ring_size
, ctxt
,
882 be_encoded_q_len(mccq
->len
));
883 AMAP_SET_BITS(struct amap_mcc_context
, cq_id
, ctxt
, cq
->id
);
885 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
887 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
889 status
= be_mbox_notify(ctrl
);
891 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
892 mccq
->id
= le16_to_cpu(resp
->id
);
893 mccq
->created
= true;
895 mutex_unlock(&phba
->ctrl
.mbox_lock
);
900 int beiscsi_cmd_q_destroy(struct be_ctrl_info
*ctrl
, struct be_queue_info
*q
,
903 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
904 struct be_cmd_req_q_destroy
*req
= embedded_payload(wrb
);
905 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
906 u8 subsys
= 0, opcode
= 0;
909 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
910 "BC_%d : In beiscsi_cmd_q_destroy "
911 "queue_type : %d\n", queue_type
);
913 mutex_lock(&ctrl
->mbox_lock
);
914 memset(wrb
, 0, sizeof(*wrb
));
915 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
917 switch (queue_type
) {
919 subsys
= CMD_SUBSYSTEM_COMMON
;
920 opcode
= OPCODE_COMMON_EQ_DESTROY
;
923 subsys
= CMD_SUBSYSTEM_COMMON
;
924 opcode
= OPCODE_COMMON_CQ_DESTROY
;
927 subsys
= CMD_SUBSYSTEM_COMMON
;
928 opcode
= OPCODE_COMMON_MCC_DESTROY
;
931 subsys
= CMD_SUBSYSTEM_ISCSI
;
932 opcode
= OPCODE_COMMON_ISCSI_WRBQ_DESTROY
;
935 subsys
= CMD_SUBSYSTEM_ISCSI
;
936 opcode
= OPCODE_COMMON_ISCSI_DEFQ_DESTROY
;
939 subsys
= CMD_SUBSYSTEM_ISCSI
;
940 opcode
= OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES
;
943 mutex_unlock(&ctrl
->mbox_lock
);
946 be_cmd_hdr_prepare(&req
->hdr
, subsys
, opcode
, sizeof(*req
));
947 if (queue_type
!= QTYPE_SGL
)
948 req
->id
= cpu_to_le16(q
->id
);
950 status
= be_mbox_notify(ctrl
);
952 mutex_unlock(&ctrl
->mbox_lock
);
957 * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
958 * @ctrl: ptr to ctrl_info
959 * @cq: Completion Queue
962 * @entry_size: size of each entry in DEFQ
963 * @is_header: Header or Data DEFQ
964 * @ulp_num: Bind to which ULP
966 * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
967 * on this queue by the FW
971 * Failure: Non-Zero Value
974 int be_cmd_create_default_pdu_queue(struct be_ctrl_info
*ctrl
,
975 struct be_queue_info
*cq
,
976 struct be_queue_info
*dq
, int length
,
977 int entry_size
, uint8_t is_header
,
980 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
981 struct be_defq_create_req
*req
= embedded_payload(wrb
);
982 struct be_dma_mem
*q_mem
= &dq
->dma_mem
;
983 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
984 void *ctxt
= &req
->context
;
987 mutex_lock(&ctrl
->mbox_lock
);
988 memset(wrb
, 0, sizeof(*wrb
));
990 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
992 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
993 OPCODE_COMMON_ISCSI_DEFQ_CREATE
, sizeof(*req
));
995 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
996 if (phba
->fw_config
.dual_ulp_aware
) {
997 req
->ulp_num
= ulp_num
;
998 req
->dua_feature
|= (1 << BEISCSI_DUAL_ULP_AWARE_BIT
);
999 req
->dua_feature
|= (1 << BEISCSI_BIND_Q_TO_ULP_BIT
);
1002 if (is_chip_be2_be3r(phba
)) {
1003 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1005 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1006 rx_pdid_valid
, ctxt
, 1);
1007 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1008 pci_func_id
, ctxt
, PCI_FUNC(ctrl
->pdev
->devfn
));
1009 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1011 be_encoded_q_len(length
/
1012 sizeof(struct phys_addr
)));
1013 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1014 default_buffer_size
, ctxt
, entry_size
);
1015 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1016 cq_id_recv
, ctxt
, cq
->id
);
1018 AMAP_SET_BITS(struct amap_default_pdu_context_ext
,
1020 AMAP_SET_BITS(struct amap_default_pdu_context_ext
,
1021 rx_pdid_valid
, ctxt
, 1);
1022 AMAP_SET_BITS(struct amap_default_pdu_context_ext
,
1024 be_encoded_q_len(length
/
1025 sizeof(struct phys_addr
)));
1026 AMAP_SET_BITS(struct amap_default_pdu_context_ext
,
1027 default_buffer_size
, ctxt
, entry_size
);
1028 AMAP_SET_BITS(struct amap_default_pdu_context_ext
,
1029 cq_id_recv
, ctxt
, cq
->id
);
1032 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
1034 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1036 status
= be_mbox_notify(ctrl
);
1038 struct be_ring
*defq_ring
;
1039 struct be_defq_create_resp
*resp
= embedded_payload(wrb
);
1041 dq
->id
= le16_to_cpu(resp
->id
);
1044 defq_ring
= &phba
->phwi_ctrlr
->default_pdu_hdr
[ulp_num
];
1046 defq_ring
= &phba
->phwi_ctrlr
->
1047 default_pdu_data
[ulp_num
];
1049 defq_ring
->id
= dq
->id
;
1051 if (!phba
->fw_config
.dual_ulp_aware
) {
1052 defq_ring
->ulp_num
= BEISCSI_ULP0
;
1053 defq_ring
->doorbell_offset
= DB_RXULP0_OFFSET
;
1055 defq_ring
->ulp_num
= resp
->ulp_num
;
1056 defq_ring
->doorbell_offset
= resp
->doorbell_offset
;
1059 mutex_unlock(&ctrl
->mbox_lock
);
1065 * be_cmd_wrbq_create()- Create WRBQ
1066 * @ctrl: ptr to ctrl_info
1067 * @q_mem: memory details for the queue
1069 * @pwrb_context: ptr to wrb_context
1070 * @ulp_num: ULP on which the WRBQ is to be created
1072 * Create WRBQ on the passed ULP_NUM.
1075 int be_cmd_wrbq_create(struct be_ctrl_info
*ctrl
,
1076 struct be_dma_mem
*q_mem
,
1077 struct be_queue_info
*wrbq
,
1078 struct hwi_wrb_context
*pwrb_context
,
1081 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1082 struct be_wrbq_create_req
*req
= embedded_payload(wrb
);
1083 struct be_wrbq_create_resp
*resp
= embedded_payload(wrb
);
1084 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
1087 mutex_lock(&ctrl
->mbox_lock
);
1088 memset(wrb
, 0, sizeof(*wrb
));
1090 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1092 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
1093 OPCODE_COMMON_ISCSI_WRBQ_CREATE
, sizeof(*req
));
1094 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
1096 if (phba
->fw_config
.dual_ulp_aware
) {
1097 req
->ulp_num
= ulp_num
;
1098 req
->dua_feature
|= (1 << BEISCSI_DUAL_ULP_AWARE_BIT
);
1099 req
->dua_feature
|= (1 << BEISCSI_BIND_Q_TO_ULP_BIT
);
1102 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1104 status
= be_mbox_notify(ctrl
);
1106 wrbq
->id
= le16_to_cpu(resp
->cid
);
1107 wrbq
->created
= true;
1109 pwrb_context
->cid
= wrbq
->id
;
1110 if (!phba
->fw_config
.dual_ulp_aware
) {
1111 pwrb_context
->doorbell_offset
= DB_TXULP0_OFFSET
;
1112 pwrb_context
->ulp_num
= BEISCSI_ULP0
;
1114 pwrb_context
->ulp_num
= resp
->ulp_num
;
1115 pwrb_context
->doorbell_offset
= resp
->doorbell_offset
;
1118 mutex_unlock(&ctrl
->mbox_lock
);
1122 int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info
*ctrl
,
1123 struct be_dma_mem
*q_mem
)
1125 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1126 struct be_post_template_pages_req
*req
= embedded_payload(wrb
);
1129 mutex_lock(&ctrl
->mbox_lock
);
1131 memset(wrb
, 0, sizeof(*wrb
));
1132 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1133 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1134 OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS
,
1137 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
1138 req
->type
= BEISCSI_TEMPLATE_HDR_TYPE_ISCSI
;
1139 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1141 status
= be_mbox_notify(ctrl
);
1142 mutex_unlock(&ctrl
->mbox_lock
);
1146 int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info
*ctrl
)
1148 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1149 struct be_remove_template_pages_req
*req
= embedded_payload(wrb
);
1152 mutex_lock(&ctrl
->mbox_lock
);
1154 memset(wrb
, 0, sizeof(*wrb
));
1155 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1156 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1157 OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS
,
1160 req
->type
= BEISCSI_TEMPLATE_HDR_TYPE_ISCSI
;
1162 status
= be_mbox_notify(ctrl
);
1163 mutex_unlock(&ctrl
->mbox_lock
);
1167 int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info
*ctrl
,
1168 struct be_dma_mem
*q_mem
,
1169 u32 page_offset
, u32 num_pages
)
1171 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1172 struct be_post_sgl_pages_req
*req
= embedded_payload(wrb
);
1173 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
1175 unsigned int curr_pages
;
1176 u32 temp_num_pages
= num_pages
;
1178 if (num_pages
== 0xff)
1181 mutex_lock(&ctrl
->mbox_lock
);
1183 memset(wrb
, 0, sizeof(*wrb
));
1184 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1185 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
1186 OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES
,
1188 curr_pages
= BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req
,
1190 req
->num_pages
= min(num_pages
, curr_pages
);
1191 req
->page_offset
= page_offset
;
1192 be_cmd_page_addrs_prepare(req
->pages
, req
->num_pages
, q_mem
);
1193 q_mem
->dma
= q_mem
->dma
+ (req
->num_pages
* PAGE_SIZE
);
1194 page_offset
+= req
->num_pages
;
1195 num_pages
-= req
->num_pages
;
1197 if (temp_num_pages
== 0xff)
1198 req
->num_pages
= temp_num_pages
;
1200 status
= be_mbox_notify(ctrl
);
1202 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1203 "BC_%d : FW CMD to map iscsi frags failed.\n");
1207 } while (num_pages
> 0);
1209 mutex_unlock(&ctrl
->mbox_lock
);
1211 beiscsi_cmd_q_destroy(ctrl
, NULL
, QTYPE_SGL
);
1216 * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
1217 * @phba: device priv structure instance
1218 * @vlan_tag: TAG to be set
1220 * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
1223 * TAG for the MBX Cmd
1225 int be_cmd_set_vlan(struct beiscsi_hba
*phba
,
1229 struct be_mcc_wrb
*wrb
;
1230 struct be_cmd_set_vlan_req
*req
;
1231 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
1233 if (mutex_lock_interruptible(&ctrl
->mbox_lock
))
1235 wrb
= alloc_mcc_wrb(phba
, &tag
);
1237 mutex_unlock(&ctrl
->mbox_lock
);
1241 req
= embedded_payload(wrb
);
1242 be_wrb_hdr_prepare(wrb
, sizeof(*wrb
), true, 0);
1243 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
1244 OPCODE_COMMON_ISCSI_NTWK_SET_VLAN
,
1247 req
->interface_hndl
= phba
->interface_handle
;
1248 req
->vlan_priority
= vlan_tag
;
1250 be_mcc_notify(phba
, tag
);
1251 mutex_unlock(&ctrl
->mbox_lock
);
1256 int beiscsi_check_supported_fw(struct be_ctrl_info
*ctrl
,
1257 struct beiscsi_hba
*phba
)
1259 struct be_dma_mem nonemb_cmd
;
1260 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1261 struct be_mgmt_controller_attributes
*req
;
1262 struct be_sge
*sge
= nonembedded_sgl(wrb
);
1265 nonemb_cmd
.va
= dma_alloc_coherent(&ctrl
->pdev
->dev
,
1266 sizeof(struct be_mgmt_controller_attributes
),
1267 &nonemb_cmd
.dma
, GFP_KERNEL
);
1268 if (nonemb_cmd
.va
== NULL
) {
1269 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1270 "BG_%d : dma_alloc_coherent failed in %s\n",
1274 nonemb_cmd
.size
= sizeof(struct be_mgmt_controller_attributes
);
1275 req
= nonemb_cmd
.va
;
1276 memset(req
, 0, sizeof(*req
));
1277 mutex_lock(&ctrl
->mbox_lock
);
1278 memset(wrb
, 0, sizeof(*wrb
));
1279 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1);
1280 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1281 OPCODE_COMMON_GET_CNTL_ATTRIBUTES
, sizeof(*req
));
1282 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
.dma
));
1283 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
.dma
& 0xFFFFFFFF);
1284 sge
->len
= cpu_to_le32(nonemb_cmd
.size
);
1285 status
= be_mbox_notify(ctrl
);
1287 struct be_mgmt_controller_attributes_resp
*resp
= nonemb_cmd
.va
;
1289 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1290 "BG_%d : Firmware Version of CMD : %s\n"
1291 "Firmware Version is : %s\n"
1292 "Developer Build, not performing version check...\n",
1293 resp
->params
.hba_attribs
1294 .flashrom_version_string
,
1295 resp
->params
.hba_attribs
.
1296 firmware_version_string
);
1298 phba
->fw_config
.iscsi_features
=
1299 resp
->params
.hba_attribs
.iscsi_features
;
1300 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1301 "BM_%d : phba->fw_config.iscsi_features = %d\n",
1302 phba
->fw_config
.iscsi_features
);
1303 memcpy(phba
->fw_ver_str
, resp
->params
.hba_attribs
.
1304 firmware_version_string
, BEISCSI_VER_STRLEN
);
1306 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1307 "BG_%d : Failed in beiscsi_check_supported_fw\n");
1308 mutex_unlock(&ctrl
->mbox_lock
);
1310 dma_free_coherent(&ctrl
->pdev
->dev
, nonemb_cmd
.size
,
1311 nonemb_cmd
.va
, nonemb_cmd
.dma
);
1317 * beiscsi_get_fw_config()- Get the FW config for the function
1318 * @ctrl: ptr to Ctrl Info
1319 * @phba: ptr to the dev priv structure
1321 * Get the FW config and resources available for the function.
1322 * The resources are created based on the count received here.
1326 * Failure: Non-Zero Value
1328 int beiscsi_get_fw_config(struct be_ctrl_info
*ctrl
,
1329 struct beiscsi_hba
*phba
)
1331 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1332 struct be_fw_cfg
*pfw_cfg
= embedded_payload(wrb
);
1333 uint32_t cid_count
, icd_count
;
1334 int status
= -EINVAL
;
1335 uint8_t ulp_num
= 0;
1337 mutex_lock(&ctrl
->mbox_lock
);
1338 memset(wrb
, 0, sizeof(*wrb
));
1339 be_wrb_hdr_prepare(wrb
, sizeof(*pfw_cfg
), true, 0);
1341 be_cmd_hdr_prepare(&pfw_cfg
->hdr
, CMD_SUBSYSTEM_COMMON
,
1342 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG
,
1343 EMBED_MBX_MAX_PAYLOAD_SIZE
);
1345 if (be_mbox_notify(ctrl
)) {
1346 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1347 "BG_%d : Failed in beiscsi_get_fw_config\n");
1351 /* FW response formats depend on port id */
1352 phba
->fw_config
.phys_port
= pfw_cfg
->phys_port
;
1353 if (phba
->fw_config
.phys_port
>= BEISCSI_PHYS_PORT_MAX
) {
1354 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1355 "BG_%d : invalid physical port id %d\n",
1356 phba
->fw_config
.phys_port
);
1360 /* populate and check FW config against min and max values */
1361 if (!is_chip_be2_be3r(phba
)) {
1362 phba
->fw_config
.eqid_count
= pfw_cfg
->eqid_count
;
1363 phba
->fw_config
.cqid_count
= pfw_cfg
->cqid_count
;
1364 if (phba
->fw_config
.eqid_count
== 0 ||
1365 phba
->fw_config
.eqid_count
> 2048) {
1366 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1367 "BG_%d : invalid EQ count %d\n",
1368 phba
->fw_config
.eqid_count
);
1371 if (phba
->fw_config
.cqid_count
== 0 ||
1372 phba
->fw_config
.cqid_count
> 4096) {
1373 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1374 "BG_%d : invalid CQ count %d\n",
1375 phba
->fw_config
.cqid_count
);
1378 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1379 "BG_%d : EQ_Count : %d CQ_Count : %d\n",
1380 phba
->fw_config
.eqid_count
,
1381 phba
->fw_config
.cqid_count
);
1385 * Check on which all ULP iSCSI Protocol is loaded.
1386 * Set the Bit for those ULP. This set flag is used
1387 * at all places in the code to check on which ULP
1388 * iSCSi Protocol is loaded
1390 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++) {
1391 if (pfw_cfg
->ulp
[ulp_num
].ulp_mode
&
1392 BEISCSI_ULP_ISCSI_INI_MODE
) {
1393 set_bit(ulp_num
, &phba
->fw_config
.ulp_supported
);
1395 /* Get the CID, ICD and Chain count for each ULP */
1396 phba
->fw_config
.iscsi_cid_start
[ulp_num
] =
1397 pfw_cfg
->ulp
[ulp_num
].sq_base
;
1398 phba
->fw_config
.iscsi_cid_count
[ulp_num
] =
1399 pfw_cfg
->ulp
[ulp_num
].sq_count
;
1401 phba
->fw_config
.iscsi_icd_start
[ulp_num
] =
1402 pfw_cfg
->ulp
[ulp_num
].icd_base
;
1403 phba
->fw_config
.iscsi_icd_count
[ulp_num
] =
1404 pfw_cfg
->ulp
[ulp_num
].icd_count
;
1406 phba
->fw_config
.iscsi_chain_start
[ulp_num
] =
1407 pfw_cfg
->chain_icd
[ulp_num
].chain_base
;
1408 phba
->fw_config
.iscsi_chain_count
[ulp_num
] =
1409 pfw_cfg
->chain_icd
[ulp_num
].chain_count
;
1411 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1412 "BG_%d : Function loaded on ULP : %d\n"
1413 "\tiscsi_cid_count : %d\n"
1414 "\tiscsi_cid_start : %d\n"
1415 "\t iscsi_icd_count : %d\n"
1416 "\t iscsi_icd_start : %d\n",
1419 iscsi_cid_count
[ulp_num
],
1421 iscsi_cid_start
[ulp_num
],
1423 iscsi_icd_count
[ulp_num
],
1425 iscsi_icd_start
[ulp_num
]);
1429 if (phba
->fw_config
.ulp_supported
== 0) {
1430 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1431 "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
1432 pfw_cfg
->ulp
[BEISCSI_ULP0
].ulp_mode
,
1433 pfw_cfg
->ulp
[BEISCSI_ULP1
].ulp_mode
);
1438 * ICD is shared among ULPs. Use icd_count of any one loaded ULP
1440 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++)
1441 if (test_bit(ulp_num
, &phba
->fw_config
.ulp_supported
))
1443 icd_count
= phba
->fw_config
.iscsi_icd_count
[ulp_num
];
1444 if (icd_count
== 0 || icd_count
> 65536) {
1445 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1446 "BG_%d: invalid ICD count %d\n", icd_count
);
1450 cid_count
= BEISCSI_GET_CID_COUNT(phba
, BEISCSI_ULP0
) +
1451 BEISCSI_GET_CID_COUNT(phba
, BEISCSI_ULP1
);
1452 if (cid_count
== 0 || cid_count
> 4096) {
1453 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1454 "BG_%d: invalid CID count %d\n", cid_count
);
1459 * Check FW is dual ULP aware i.e. can handle either
1462 phba
->fw_config
.dual_ulp_aware
= (pfw_cfg
->function_mode
&
1463 BEISCSI_FUNC_DUA_MODE
);
1465 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1466 "BG_%d : DUA Mode : 0x%x\n",
1467 phba
->fw_config
.dual_ulp_aware
);
1469 /* all set, continue using this FW config */
1472 mutex_unlock(&ctrl
->mbox_lock
);
1477 * beiscsi_get_port_name()- Get port name for the function
1478 * @ctrl: ptr to Ctrl Info
1479 * @phba: ptr to the dev priv structure
1481 * Get the alphanumeric character for port
1484 int beiscsi_get_port_name(struct be_ctrl_info
*ctrl
, struct beiscsi_hba
*phba
)
1487 struct be_mcc_wrb
*wrb
;
1488 struct be_cmd_get_port_name
*ioctl
;
1490 mutex_lock(&ctrl
->mbox_lock
);
1491 wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1492 memset(wrb
, 0, sizeof(*wrb
));
1493 ioctl
= embedded_payload(wrb
);
1495 be_wrb_hdr_prepare(wrb
, sizeof(*ioctl
), true, 0);
1496 be_cmd_hdr_prepare(&ioctl
->h
.req_hdr
, CMD_SUBSYSTEM_COMMON
,
1497 OPCODE_COMMON_GET_PORT_NAME
,
1498 EMBED_MBX_MAX_PAYLOAD_SIZE
);
1499 ret
= be_mbox_notify(ctrl
);
1500 phba
->port_name
= 0;
1502 phba
->port_name
= ioctl
->p
.resp
.port_names
>>
1503 (phba
->fw_config
.phys_port
* 8) & 0xff;
1505 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1506 "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
1507 ret
, ioctl
->h
.resp_hdr
.status
);
1510 if (phba
->port_name
== 0)
1511 phba
->port_name
= '?';
1513 mutex_unlock(&ctrl
->mbox_lock
);
1517 int beiscsi_set_host_data(struct beiscsi_hba
*phba
)
1519 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
1520 struct be_cmd_set_host_data
*ioctl
;
1521 struct be_mcc_wrb
*wrb
;
1524 if (is_chip_be2_be3r(phba
))
1527 mutex_lock(&ctrl
->mbox_lock
);
1528 wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1529 memset(wrb
, 0, sizeof(*wrb
));
1530 ioctl
= embedded_payload(wrb
);
1532 be_wrb_hdr_prepare(wrb
, sizeof(*ioctl
), true, 0);
1533 be_cmd_hdr_prepare(&ioctl
->h
.req_hdr
, CMD_SUBSYSTEM_COMMON
,
1534 OPCODE_COMMON_SET_HOST_DATA
,
1535 EMBED_MBX_MAX_PAYLOAD_SIZE
);
1536 ioctl
->param
.req
.param_id
= BE_CMD_SET_HOST_PARAM_ID
;
1537 ioctl
->param
.req
.param_len
=
1538 snprintf((char *)ioctl
->param
.req
.param_data
,
1539 sizeof(ioctl
->param
.req
.param_data
),
1540 "Linux iSCSI v%s", BUILD_STR
);
1541 ioctl
->param
.req
.param_len
= ALIGN(ioctl
->param
.req
.param_len
+ 1, 4);
1542 if (ioctl
->param
.req
.param_len
> BE_CMD_MAX_DRV_VERSION
)
1543 ioctl
->param
.req
.param_len
= BE_CMD_MAX_DRV_VERSION
;
1544 ret
= be_mbox_notify(ctrl
);
1546 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1547 "BG_%d : HBA set host driver version\n");
1550 * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
1551 * Older FW versions return this error.
1553 if (ret
== MCC_STATUS_ILLEGAL_REQUEST
||
1554 ret
== MCC_STATUS_INVALID_LENGTH
)
1555 __beiscsi_log(phba
, KERN_INFO
,
1556 "BG_%d : HBA failed to set host driver version\n");
1559 mutex_unlock(&ctrl
->mbox_lock
);
1563 int beiscsi_set_uer_feature(struct beiscsi_hba
*phba
)
1565 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
1566 struct be_cmd_set_features
*ioctl
;
1567 struct be_mcc_wrb
*wrb
;
1570 mutex_lock(&ctrl
->mbox_lock
);
1571 wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1572 memset(wrb
, 0, sizeof(*wrb
));
1573 ioctl
= embedded_payload(wrb
);
1575 be_wrb_hdr_prepare(wrb
, sizeof(*ioctl
), true, 0);
1576 be_cmd_hdr_prepare(&ioctl
->h
.req_hdr
, CMD_SUBSYSTEM_COMMON
,
1577 OPCODE_COMMON_SET_FEATURES
,
1578 EMBED_MBX_MAX_PAYLOAD_SIZE
);
1579 ioctl
->feature
= BE_CMD_SET_FEATURE_UER
;
1580 ioctl
->param_len
= sizeof(ioctl
->param
.req
);
1581 ioctl
->param
.req
.uer
= BE_CMD_UER_SUPP_BIT
;
1582 ret
= be_mbox_notify(ctrl
);
1584 phba
->ue2rp
= ioctl
->param
.resp
.ue2rp
;
1585 set_bit(BEISCSI_HBA_UER_SUPP
, &phba
->state
);
1586 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1587 "BG_%d : HBA error recovery supported\n");
1590 * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
1591 * Older FW versions return this error.
1593 if (ret
== MCC_STATUS_ILLEGAL_REQUEST
||
1594 ret
== MCC_STATUS_INVALID_LENGTH
)
1595 __beiscsi_log(phba
, KERN_INFO
,
1596 "BG_%d : HBA error recovery not supported\n");
1599 mutex_unlock(&ctrl
->mbox_lock
);
1603 static u32
beiscsi_get_post_stage(struct beiscsi_hba
*phba
)
1607 if (is_chip_be2_be3r(phba
))
1608 sem
= ioread32(phba
->csr_va
+ SLIPORT_SEMAPHORE_OFFSET_BEx
);
1610 pci_read_config_dword(phba
->pcidev
,
1611 SLIPORT_SEMAPHORE_OFFSET_SH
, &sem
);
1615 int beiscsi_check_fw_rdy(struct beiscsi_hba
*phba
)
1617 u32 loop
, post
, rdy
= 0;
1621 post
= beiscsi_get_post_stage(phba
);
1622 if (post
& POST_ERROR_BIT
)
1624 if ((post
& POST_STAGE_MASK
) == POST_STAGE_ARMFW_RDY
) {
1632 __beiscsi_log(phba
, KERN_ERR
,
1633 "BC_%d : FW not ready 0x%x\n", post
);
1639 int beiscsi_cmd_function_reset(struct beiscsi_hba
*phba
)
1641 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
1642 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1643 struct be_post_sgl_pages_req
*req
;
1646 mutex_lock(&ctrl
->mbox_lock
);
1648 req
= embedded_payload(wrb
);
1649 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1650 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1651 OPCODE_COMMON_FUNCTION_RESET
, sizeof(*req
));
1652 status
= be_mbox_notify(ctrl
);
1654 mutex_unlock(&ctrl
->mbox_lock
);
1658 int beiscsi_cmd_special_wrb(struct be_ctrl_info
*ctrl
, u32 load
)
1660 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1661 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
1665 mutex_lock(&ctrl
->mbox_lock
);
1666 memset(wrb
, 0, sizeof(*wrb
));
1668 endian_check
= (u8
*) wrb
;
1670 /* to start communicating */
1671 *endian_check
++ = 0xFF;
1672 *endian_check
++ = 0x12;
1673 *endian_check
++ = 0x34;
1674 *endian_check
++ = 0xFF;
1675 *endian_check
++ = 0xFF;
1676 *endian_check
++ = 0x56;
1677 *endian_check
++ = 0x78;
1678 *endian_check
++ = 0xFF;
1680 /* to stop communicating */
1681 *endian_check
++ = 0xFF;
1682 *endian_check
++ = 0xAA;
1683 *endian_check
++ = 0xBB;
1684 *endian_check
++ = 0xFF;
1685 *endian_check
++ = 0xFF;
1686 *endian_check
++ = 0xCC;
1687 *endian_check
++ = 0xDD;
1688 *endian_check
= 0xFF;
1690 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
1692 status
= be_mbox_notify(ctrl
);
1694 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1695 "BC_%d : special WRB message failed\n");
1696 mutex_unlock(&ctrl
->mbox_lock
);
1700 int beiscsi_init_sliport(struct beiscsi_hba
*phba
)
1704 /* check POST stage before talking to FW */
1705 status
= beiscsi_check_fw_rdy(phba
);
1709 /* clear all error states after checking FW rdy */
1710 phba
->state
&= ~BEISCSI_HBA_IN_ERR
;
1712 /* check again UER support */
1713 phba
->state
&= ~BEISCSI_HBA_UER_SUPP
;
1716 * SLI COMMON_FUNCTION_RESET completion is indicated by BMBX RDY bit.
1717 * It should clean up any stale info in FW for this fn.
1719 status
= beiscsi_cmd_function_reset(phba
);
1721 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1722 "BC_%d : SLI Function Reset failed\n");
1726 /* indicate driver is loading */
1727 return beiscsi_cmd_special_wrb(&phba
->ctrl
, 1);
1731 * beiscsi_cmd_iscsi_cleanup()- Inform FW to cleanup EP data structures.
1732 * @phba: pointer to dev priv structure
1737 * Failure: Non-Zero Value
1739 int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba
*phba
, unsigned short ulp
)
1741 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
1742 struct iscsi_cleanup_req_v1
*req_v1
;
1743 struct iscsi_cleanup_req
*req
;
1744 u16 hdr_ring_id
, data_ring_id
;
1745 struct be_mcc_wrb
*wrb
;
1748 mutex_lock(&ctrl
->mbox_lock
);
1749 wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1751 hdr_ring_id
= HWI_GET_DEF_HDRQ_ID(phba
, ulp
);
1752 data_ring_id
= HWI_GET_DEF_BUFQ_ID(phba
, ulp
);
1753 if (is_chip_be2_be3r(phba
)) {
1754 req
= embedded_payload(wrb
);
1755 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1756 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
1757 OPCODE_COMMON_ISCSI_CLEANUP
, sizeof(*req
));
1758 req
->chute
= (1 << ulp
);
1759 /* BE2/BE3 FW creates 8-bit ring id */
1760 req
->hdr_ring_id
= hdr_ring_id
;
1761 req
->data_ring_id
= data_ring_id
;
1763 req_v1
= embedded_payload(wrb
);
1764 be_wrb_hdr_prepare(wrb
, sizeof(*req_v1
), true, 0);
1765 be_cmd_hdr_prepare(&req_v1
->hdr
, CMD_SUBSYSTEM_ISCSI
,
1766 OPCODE_COMMON_ISCSI_CLEANUP
,
1768 req_v1
->hdr
.version
= 1;
1769 req_v1
->chute
= (1 << ulp
);
1770 req_v1
->hdr_ring_id
= cpu_to_le16(hdr_ring_id
);
1771 req_v1
->data_ring_id
= cpu_to_le16(data_ring_id
);
1774 status
= be_mbox_notify(ctrl
);
1776 beiscsi_log(phba
, KERN_WARNING
, BEISCSI_LOG_INIT
,
1777 "BG_%d : %s failed %d\n", __func__
, ulp
);
1778 mutex_unlock(&ctrl
->mbox_lock
);
1783 * beiscsi_detect_ue()- Detect Unrecoverable Error on adapter
1784 * @phba: Driver priv structure
1786 * Read registers linked to UE and check for the UE status
1788 int beiscsi_detect_ue(struct beiscsi_hba
*phba
)
1790 uint32_t ue_mask_hi
= 0, ue_mask_lo
= 0;
1791 uint32_t ue_hi
= 0, ue_lo
= 0;
1795 pci_read_config_dword(phba
->pcidev
,
1796 PCICFG_UE_STATUS_LOW
, &ue_lo
);
1797 pci_read_config_dword(phba
->pcidev
,
1798 PCICFG_UE_STATUS_MASK_LOW
,
1800 pci_read_config_dword(phba
->pcidev
,
1801 PCICFG_UE_STATUS_HIGH
,
1803 pci_read_config_dword(phba
->pcidev
,
1804 PCICFG_UE_STATUS_MASK_HI
,
1807 ue_lo
= (ue_lo
& ~ue_mask_lo
);
1808 ue_hi
= (ue_hi
& ~ue_mask_hi
);
1811 if (ue_lo
|| ue_hi
) {
1812 set_bit(BEISCSI_HBA_IN_UE
, &phba
->state
);
1813 __beiscsi_log(phba
, KERN_ERR
,
1814 "BC_%d : HBA error detected\n");
1819 for (i
= 0; ue_lo
; ue_lo
>>= 1, i
++) {
1821 __beiscsi_log(phba
, KERN_ERR
,
1822 "BC_%d : UE_LOW %s bit set\n",
1823 desc_ue_status_low
[i
]);
1828 for (i
= 0; ue_hi
; ue_hi
>>= 1, i
++) {
1830 __beiscsi_log(phba
, KERN_ERR
,
1831 "BC_%d : UE_HIGH %s bit set\n",
1832 desc_ue_status_hi
[i
]);
1839 * beiscsi_detect_tpe()- Detect Transient Parity Error on adapter
1840 * @phba: Driver priv structure
1842 * Read SLIPORT SEMAPHORE register to check for UER
1845 int beiscsi_detect_tpe(struct beiscsi_hba
*phba
)
1850 post
= beiscsi_get_post_stage(phba
);
1851 status
= post
& POST_STAGE_MASK
;
1852 if ((status
& POST_ERR_RECOVERY_CODE_MASK
) ==
1853 POST_STAGE_RECOVERABLE_ERR
) {
1854 set_bit(BEISCSI_HBA_IN_TPE
, &phba
->state
);
1855 __beiscsi_log(phba
, KERN_INFO
,
1856 "BC_%d : HBA error recoverable: 0x%x\n", post
);
1859 __beiscsi_log(phba
, KERN_INFO
,
1860 "BC_%d : HBA in UE: 0x%x\n", post
);