2 * Copyright (C) 2005 - 2016 Broadcom
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@broadcom.com
15 * Costa Mesa, CA 92626
18 #include <scsi/iscsi_proto.h>
24 /* UE Status Low CSR */
25 static const char * const desc_ue_status_low
[] = {
60 /* UE Status High CSR */
61 static const char * const desc_ue_status_hi
[] = {
96 struct be_mcc_wrb
*alloc_mcc_wrb(struct beiscsi_hba
*phba
,
97 unsigned int *ref_tag
)
99 struct be_queue_info
*mccq
= &phba
->ctrl
.mcc_obj
.q
;
100 struct be_mcc_wrb
*wrb
= NULL
;
103 spin_lock(&phba
->ctrl
.mcc_lock
);
104 if (mccq
->used
== mccq
->len
) {
105 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
|
106 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
107 "BC_%d : MCC queue full: WRB used %u tag avail %u\n",
108 mccq
->used
, phba
->ctrl
.mcc_tag_available
);
112 if (!phba
->ctrl
.mcc_tag_available
)
115 tag
= phba
->ctrl
.mcc_tag
[phba
->ctrl
.mcc_alloc_index
];
117 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
|
118 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
119 "BC_%d : MCC tag 0 allocated: tag avail %u alloc index %u\n",
120 phba
->ctrl
.mcc_tag_available
,
121 phba
->ctrl
.mcc_alloc_index
);
125 /* return this tag for further reference */
127 phba
->ctrl
.mcc_tag
[phba
->ctrl
.mcc_alloc_index
] = 0;
128 phba
->ctrl
.mcc_tag_status
[tag
] = 0;
129 phba
->ctrl
.ptag_state
[tag
].tag_state
= 0;
130 phba
->ctrl
.ptag_state
[tag
].cbfn
= NULL
;
131 phba
->ctrl
.mcc_tag_available
--;
132 if (phba
->ctrl
.mcc_alloc_index
== (MAX_MCC_CMD
- 1))
133 phba
->ctrl
.mcc_alloc_index
= 0;
135 phba
->ctrl
.mcc_alloc_index
++;
137 wrb
= queue_head_node(mccq
);
138 memset(wrb
, 0, sizeof(*wrb
));
140 wrb
->tag0
|= (mccq
->head
<< MCC_Q_WRB_IDX_SHIFT
) & MCC_Q_WRB_IDX_MASK
;
141 queue_head_inc(mccq
);
145 spin_unlock(&phba
->ctrl
.mcc_lock
);
149 void free_mcc_wrb(struct be_ctrl_info
*ctrl
, unsigned int tag
)
151 struct be_queue_info
*mccq
= &ctrl
->mcc_obj
.q
;
153 spin_lock(&ctrl
->mcc_lock
);
154 tag
= tag
& MCC_Q_CMD_TAG_MASK
;
155 ctrl
->mcc_tag
[ctrl
->mcc_free_index
] = tag
;
156 if (ctrl
->mcc_free_index
== (MAX_MCC_CMD
- 1))
157 ctrl
->mcc_free_index
= 0;
159 ctrl
->mcc_free_index
++;
160 ctrl
->mcc_tag_available
++;
162 spin_unlock(&ctrl
->mcc_lock
);
166 * beiscsi_mcc_compl_status - Return the status of MCC completion
167 * @phba: Driver private structure
168 * @tag: Tag for the MBX Command
169 * @wrb: the WRB used for the MBX Command
170 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
176 int __beiscsi_mcc_compl_status(struct beiscsi_hba
*phba
,
178 struct be_mcc_wrb
**wrb
,
179 struct be_dma_mem
*mbx_cmd_mem
)
181 struct be_queue_info
*mccq
= &phba
->ctrl
.mcc_obj
.q
;
182 uint16_t status
= 0, addl_status
= 0, wrb_num
= 0;
183 struct be_cmd_resp_hdr
*mbx_resp_hdr
;
184 struct be_cmd_req_hdr
*mbx_hdr
;
185 struct be_mcc_wrb
*temp_wrb
;
186 uint32_t mcc_tag_status
;
189 mcc_tag_status
= phba
->ctrl
.mcc_tag_status
[tag
];
190 status
= (mcc_tag_status
& CQE_STATUS_MASK
);
191 addl_status
= ((mcc_tag_status
& CQE_STATUS_ADDL_MASK
) >>
192 CQE_STATUS_ADDL_SHIFT
);
195 mbx_hdr
= (struct be_cmd_req_hdr
*)mbx_cmd_mem
->va
;
197 wrb_num
= (mcc_tag_status
& CQE_STATUS_WRB_MASK
) >>
198 CQE_STATUS_WRB_SHIFT
;
199 temp_wrb
= (struct be_mcc_wrb
*)queue_get_wrb(mccq
, wrb_num
);
200 mbx_hdr
= embedded_payload(temp_wrb
);
206 if (status
|| addl_status
) {
207 beiscsi_log(phba
, KERN_WARNING
,
208 BEISCSI_LOG_INIT
| BEISCSI_LOG_EH
|
210 "BC_%d : MBX Cmd Failed for Subsys : %d Opcode : %d with Status : %d and Extd_Status : %d\n",
211 mbx_hdr
->subsystem
, mbx_hdr
->opcode
,
212 status
, addl_status
);
214 if (status
== MCC_STATUS_INSUFFICIENT_BUFFER
) {
215 mbx_resp_hdr
= (struct be_cmd_resp_hdr
*)mbx_hdr
;
216 beiscsi_log(phba
, KERN_WARNING
,
217 BEISCSI_LOG_INIT
| BEISCSI_LOG_EH
|
219 "BC_%d : Insufficient Buffer Error Resp_Len : %d Actual_Resp_Len : %d\n",
220 mbx_resp_hdr
->response_length
,
221 mbx_resp_hdr
->actual_resp_len
);
230 * beiscsi_mccq_compl_wait()- Process completion in MCC CQ
231 * @phba: Driver private structure
232 * @tag: Tag for the MBX Command
233 * @wrb: the WRB used for the MBX Command
234 * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
236 * Waits for MBX completion with the passed TAG.
242 int beiscsi_mccq_compl_wait(struct beiscsi_hba
*phba
,
244 struct be_mcc_wrb
**wrb
,
245 struct be_dma_mem
*mbx_cmd_mem
)
249 if (beiscsi_hba_in_error(phba
)) {
250 clear_bit(MCC_TAG_STATE_RUNNING
,
251 &phba
->ctrl
.ptag_state
[tag
].tag_state
);
255 /* wait for the mccq completion */
256 rc
= wait_event_interruptible_timeout(phba
->ctrl
.mcc_wait
[tag
],
257 phba
->ctrl
.mcc_tag_status
[tag
],
259 BEISCSI_HOST_MBX_TIMEOUT
));
261 * Return EIO if port is being disabled. Associated DMA memory, if any,
262 * is freed by the caller. When port goes offline, MCCQ is cleaned up
265 if (!test_bit(BEISCSI_HBA_ONLINE
, &phba
->state
)) {
266 clear_bit(MCC_TAG_STATE_RUNNING
,
267 &phba
->ctrl
.ptag_state
[tag
].tag_state
);
272 * If MBOX cmd timeout expired, tag and resource allocated
273 * for cmd is not freed until FW returns completion.
276 struct be_dma_mem
*tag_mem
;
279 * PCI/DMA memory allocated and posted in non-embedded mode
280 * will have mbx_cmd_mem != NULL.
281 * Save virtual and bus addresses for the command so that it
282 * can be freed later.
284 tag_mem
= &phba
->ctrl
.ptag_state
[tag
].tag_mem_state
;
286 tag_mem
->size
= mbx_cmd_mem
->size
;
287 tag_mem
->va
= mbx_cmd_mem
->va
;
288 tag_mem
->dma
= mbx_cmd_mem
->dma
;
292 /* first make tag_mem_state visible to all */
294 set_bit(MCC_TAG_STATE_TIMEOUT
,
295 &phba
->ctrl
.ptag_state
[tag
].tag_state
);
297 beiscsi_log(phba
, KERN_ERR
,
298 BEISCSI_LOG_INIT
| BEISCSI_LOG_EH
|
300 "BC_%d : MBX Cmd Completion timed out\n");
304 rc
= __beiscsi_mcc_compl_status(phba
, tag
, wrb
, mbx_cmd_mem
);
306 free_mcc_wrb(&phba
->ctrl
, tag
);
311 * beiscsi_process_mbox_compl()- Check the MBX completion status
312 * @ctrl: Function specific MBX data structure
313 * @compl: Completion status of MBX Command
315 * Check for the MBX completion status when BMBX method used
321 static int beiscsi_process_mbox_compl(struct be_ctrl_info
*ctrl
,
322 struct be_mcc_compl
*compl)
324 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
325 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
326 struct be_cmd_req_hdr
*hdr
= embedded_payload(wrb
);
327 u16 compl_status
, extd_status
;
330 * To check if valid bit is set, check the entire word as we don't know
331 * the endianness of the data (old entry is host endian while a new
332 * entry is little endian)
335 beiscsi_log(phba
, KERN_ERR
,
336 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
337 "BC_%d : BMBX busy, no completion\n");
340 compl->flags
= le32_to_cpu(compl->flags
);
341 WARN_ON((compl->flags
& CQE_FLAGS_VALID_MASK
) == 0);
344 * Just swap the status to host endian;
345 * mcc tag is opaquely copied from mcc_wrb.
347 be_dws_le_to_cpu(compl, 4);
348 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
349 CQE_STATUS_COMPL_MASK
;
350 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
351 CQE_STATUS_EXTD_MASK
;
352 /* Need to reset the entire word that houses the valid bit */
355 if (compl_status
== MCC_STATUS_SUCCESS
)
358 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
359 "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
360 hdr
->subsystem
, hdr
->opcode
, compl_status
, extd_status
);
364 static void beiscsi_process_async_link(struct beiscsi_hba
*phba
,
365 struct be_mcc_compl
*compl)
367 struct be_async_event_link_state
*evt
;
369 evt
= (struct be_async_event_link_state
*)compl;
371 phba
->port_speed
= evt
->port_speed
;
373 * Check logical link status in ASYNC event.
374 * This has been newly introduced in SKH-R Firmware 10.0.338.45.
376 if (evt
->port_link_status
& BE_ASYNC_LINK_UP_MASK
) {
377 set_bit(BEISCSI_HBA_LINK_UP
, &phba
->state
);
378 if (test_bit(BEISCSI_HBA_BOOT_FOUND
, &phba
->state
))
379 beiscsi_start_boot_work(phba
, BE_BOOT_INVALID_SHANDLE
);
380 __beiscsi_log(phba
, KERN_ERR
,
381 "BC_%d : Link Up on Port %d tag 0x%x\n",
382 evt
->physical_port
, evt
->event_tag
);
384 clear_bit(BEISCSI_HBA_LINK_UP
, &phba
->state
);
385 __beiscsi_log(phba
, KERN_ERR
,
386 "BC_%d : Link Down on Port %d tag 0x%x\n",
387 evt
->physical_port
, evt
->event_tag
);
388 iscsi_host_for_each_session(phba
->shost
,
389 beiscsi_session_fail
);
393 static char *beiscsi_port_misconf_event_msg
[] = {
394 "Physical Link is functional.",
395 "Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.",
396 "Optics of two types installed - Remove one optic or install matching pair of optics.",
397 "Incompatible optics - Replace with compatible optics for card to function.",
398 "Unqualified optics - Replace with Avago optics for Warranty and Technical Support.",
399 "Uncertified optics - Replace with Avago Certified optics to enable link operation."
402 static void beiscsi_process_async_sli(struct beiscsi_hba
*phba
,
403 struct be_mcc_compl
*compl)
405 struct be_async_event_sli
*async_sli
;
406 u8 evt_type
, state
, old_state
, le
;
407 char *sev
= KERN_WARNING
;
410 evt_type
= compl->flags
>> ASYNC_TRAILER_EVENT_TYPE_SHIFT
;
411 evt_type
&= ASYNC_TRAILER_EVENT_TYPE_MASK
;
413 /* processing only MISCONFIGURED physical port event */
414 if (evt_type
!= ASYNC_SLI_EVENT_TYPE_MISCONFIGURED
)
417 async_sli
= (struct be_async_event_sli
*)compl;
418 state
= async_sli
->event_data1
>>
419 (phba
->fw_config
.phys_port
* 8) & 0xff;
420 le
= async_sli
->event_data2
>>
421 (phba
->fw_config
.phys_port
* 8) & 0xff;
423 old_state
= phba
->optic_state
;
424 phba
->optic_state
= state
;
426 if (state
>= ARRAY_SIZE(beiscsi_port_misconf_event_msg
)) {
427 /* fw is reporting a state we don't know, log and return */
428 __beiscsi_log(phba
, KERN_ERR
,
429 "BC_%d : Port %c: Unrecognized optic state 0x%x\n",
430 phba
->port_name
, async_sli
->event_data1
);
434 if (ASYNC_SLI_LINK_EFFECT_VALID(le
)) {
435 /* log link effect for unqualified-4, uncertified-5 optics */
437 msg
= (ASYNC_SLI_LINK_EFFECT_STATE(le
)) ?
438 " Link is non-operational." :
439 " Link is operational.";
441 if (ASYNC_SLI_LINK_EFFECT_SEV(le
) == 1)
444 if (ASYNC_SLI_LINK_EFFECT_SEV(le
) == 2)
448 if (old_state
!= phba
->optic_state
)
449 __beiscsi_log(phba
, sev
, "BC_%d : Port %c: %s%s\n",
451 beiscsi_port_misconf_event_msg
[state
],
455 void beiscsi_process_async_event(struct beiscsi_hba
*phba
,
456 struct be_mcc_compl
*compl)
458 char *sev
= KERN_INFO
;
461 /* interpret flags as an async trailer */
462 evt_code
= compl->flags
>> ASYNC_TRAILER_EVENT_CODE_SHIFT
;
463 evt_code
&= ASYNC_TRAILER_EVENT_CODE_MASK
;
465 case ASYNC_EVENT_CODE_LINK_STATE
:
466 beiscsi_process_async_link(phba
, compl);
468 case ASYNC_EVENT_CODE_ISCSI
:
469 if (test_bit(BEISCSI_HBA_BOOT_FOUND
, &phba
->state
))
470 beiscsi_start_boot_work(phba
, BE_BOOT_INVALID_SHANDLE
);
473 case ASYNC_EVENT_CODE_SLI
:
474 beiscsi_process_async_sli(phba
, compl);
477 /* event not registered */
481 beiscsi_log(phba
, sev
, BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
482 "BC_%d : ASYNC Event %x: status 0x%08x flags 0x%08x\n",
483 evt_code
, compl->status
, compl->flags
);
486 int beiscsi_process_mcc_compl(struct be_ctrl_info
*ctrl
,
487 struct be_mcc_compl
*compl)
489 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
490 u16 compl_status
, extd_status
;
491 struct be_dma_mem
*tag_mem
;
492 unsigned int tag
, wrb_idx
;
494 be_dws_le_to_cpu(compl, 4);
495 tag
= (compl->tag0
& MCC_Q_CMD_TAG_MASK
);
496 wrb_idx
= (compl->tag0
& CQE_STATUS_WRB_MASK
) >> CQE_STATUS_WRB_SHIFT
;
498 if (!test_bit(MCC_TAG_STATE_RUNNING
,
499 &ctrl
->ptag_state
[tag
].tag_state
)) {
500 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_MBOX
|
501 BEISCSI_LOG_INIT
| BEISCSI_LOG_CONFIG
,
502 "BC_%d : MBX cmd completed but not posted\n");
506 /* end MCC with this tag */
507 clear_bit(MCC_TAG_STATE_RUNNING
, &ctrl
->ptag_state
[tag
].tag_state
);
509 if (test_bit(MCC_TAG_STATE_TIMEOUT
, &ctrl
->ptag_state
[tag
].tag_state
)) {
510 beiscsi_log(phba
, KERN_WARNING
,
511 BEISCSI_LOG_MBOX
| BEISCSI_LOG_INIT
|
513 "BC_%d : MBX Completion for timeout Command from FW\n");
515 * Check for the size before freeing resource.
516 * Only for non-embedded cmd, PCI resource is allocated.
518 tag_mem
= &ctrl
->ptag_state
[tag
].tag_mem_state
;
520 pci_free_consistent(ctrl
->pdev
, tag_mem
->size
,
521 tag_mem
->va
, tag_mem
->dma
);
524 free_mcc_wrb(ctrl
, tag
);
528 compl_status
= (compl->status
>> CQE_STATUS_COMPL_SHIFT
) &
529 CQE_STATUS_COMPL_MASK
;
530 extd_status
= (compl->status
>> CQE_STATUS_EXTD_SHIFT
) &
531 CQE_STATUS_EXTD_MASK
;
532 /* The ctrl.mcc_tag_status[tag] is filled with
533 * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
534 * [7:0] = compl_status
536 ctrl
->mcc_tag_status
[tag
] = CQE_VALID_MASK
;
537 ctrl
->mcc_tag_status
[tag
] |= (wrb_idx
<< CQE_STATUS_WRB_SHIFT
);
538 ctrl
->mcc_tag_status
[tag
] |= (extd_status
<< CQE_STATUS_ADDL_SHIFT
) &
539 CQE_STATUS_ADDL_MASK
;
540 ctrl
->mcc_tag_status
[tag
] |= (compl_status
& CQE_STATUS_MASK
);
542 if (test_bit(MCC_TAG_STATE_ASYNC
, &ctrl
->ptag_state
[tag
].tag_state
)) {
543 if (ctrl
->ptag_state
[tag
].cbfn
)
544 ctrl
->ptag_state
[tag
].cbfn(phba
, tag
);
546 __beiscsi_log(phba
, KERN_ERR
,
547 "BC_%d : MBX ASYNC command with no callback\n");
548 free_mcc_wrb(ctrl
, tag
);
552 if (test_bit(MCC_TAG_STATE_IGNORE
, &ctrl
->ptag_state
[tag
].tag_state
)) {
553 /* just check completion status and free wrb */
554 __beiscsi_mcc_compl_status(phba
, tag
, NULL
, NULL
);
555 free_mcc_wrb(ctrl
, tag
);
559 wake_up_interruptible(&ctrl
->mcc_wait
[tag
]);
563 void be_mcc_notify(struct beiscsi_hba
*phba
, unsigned int tag
)
565 struct be_queue_info
*mccq
= &phba
->ctrl
.mcc_obj
.q
;
568 set_bit(MCC_TAG_STATE_RUNNING
, &phba
->ctrl
.ptag_state
[tag
].tag_state
);
569 val
|= mccq
->id
& DB_MCCQ_RING_ID_MASK
;
570 val
|= 1 << DB_MCCQ_NUM_POSTED_SHIFT
;
571 /* make request available for DMA */
573 iowrite32(val
, phba
->db_va
+ DB_MCCQ_OFFSET
);
577 * be_mbox_db_ready_poll()- Check ready status
578 * @ctrl: Function specific MBX data structure
580 * Check for the ready status of FW to send BMBX
581 * commands to adapter.
587 static int be_mbox_db_ready_poll(struct be_ctrl_info
*ctrl
)
589 /* wait 30s for generic non-flash MBOX operation */
590 #define BEISCSI_MBX_RDY_BIT_TIMEOUT 30000
591 void __iomem
*db
= ctrl
->db
+ MPU_MAILBOX_DB_OFFSET
;
592 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
593 unsigned long timeout
;
597 * This BMBX busy wait path is used during init only.
598 * For the commands executed during init, 5s should suffice.
600 timeout
= jiffies
+ msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT
);
602 if (beiscsi_hba_in_error(phba
))
605 ready
= ioread32(db
);
606 if (ready
== 0xffffffff)
609 ready
&= MPU_MAILBOX_DB_RDY_MASK
;
613 if (time_after(jiffies
, timeout
))
615 /* 1ms sleep is enough in most cases */
616 schedule_timeout_uninterruptible(msecs_to_jiffies(1));
619 beiscsi_log(phba
, KERN_ERR
,
620 BEISCSI_LOG_CONFIG
| BEISCSI_LOG_MBOX
,
621 "BC_%d : FW Timed Out\n");
622 set_bit(BEISCSI_HBA_FW_TIMEOUT
, &phba
->state
);
627 * be_mbox_notify: Notify adapter of new BMBX command
628 * @ctrl: Function specific MBX data structure
630 * Ring doorbell to inform adapter of a BMBX command
637 static int be_mbox_notify(struct be_ctrl_info
*ctrl
)
641 void __iomem
*db
= ctrl
->db
+ MPU_MAILBOX_DB_OFFSET
;
642 struct be_dma_mem
*mbox_mem
= &ctrl
->mbox_mem
;
643 struct be_mcc_mailbox
*mbox
= mbox_mem
->va
;
645 status
= be_mbox_db_ready_poll(ctrl
);
649 val
&= ~MPU_MAILBOX_DB_RDY_MASK
;
650 val
|= MPU_MAILBOX_DB_HI_MASK
;
651 val
|= (upper_32_bits(mbox_mem
->dma
) >> 2) << 2;
654 status
= be_mbox_db_ready_poll(ctrl
);
659 val
&= ~MPU_MAILBOX_DB_RDY_MASK
;
660 val
&= ~MPU_MAILBOX_DB_HI_MASK
;
661 val
|= (u32
) (mbox_mem
->dma
>> 4) << 2;
664 status
= be_mbox_db_ready_poll(ctrl
);
668 /* RDY is set; small delay before CQE read. */
671 status
= beiscsi_process_mbox_compl(ctrl
, &mbox
->compl);
675 void be_wrb_hdr_prepare(struct be_mcc_wrb
*wrb
, int payload_len
,
676 bool embedded
, u8 sge_cnt
)
679 wrb
->embedded
|= MCC_WRB_EMBEDDED_MASK
;
681 wrb
->embedded
|= (sge_cnt
& MCC_WRB_SGE_CNT_MASK
) <<
682 MCC_WRB_SGE_CNT_SHIFT
;
683 wrb
->payload_length
= payload_len
;
684 be_dws_cpu_to_le(wrb
, 8);
687 void be_cmd_hdr_prepare(struct be_cmd_req_hdr
*req_hdr
,
688 u8 subsystem
, u8 opcode
, int cmd_len
)
690 req_hdr
->opcode
= opcode
;
691 req_hdr
->subsystem
= subsystem
;
692 req_hdr
->request_length
= cpu_to_le32(cmd_len
- sizeof(*req_hdr
));
693 req_hdr
->timeout
= BEISCSI_FW_MBX_TIMEOUT
;
696 static void be_cmd_page_addrs_prepare(struct phys_addr
*pages
, u32 max_pages
,
697 struct be_dma_mem
*mem
)
700 u64 dma
= (u64
) mem
->dma
;
702 buf_pages
= min(PAGES_4K_SPANNED(mem
->va
, mem
->size
), max_pages
);
703 for (i
= 0; i
< buf_pages
; i
++) {
704 pages
[i
].lo
= cpu_to_le32(dma
& 0xFFFFFFFF);
705 pages
[i
].hi
= cpu_to_le32(upper_32_bits(dma
));
710 static u32
eq_delay_to_mult(u32 usec_delay
)
712 #define MAX_INTR_RATE 651042
713 const u32 round
= 10;
719 u32 interrupt_rate
= 1000000 / usec_delay
;
720 if (interrupt_rate
== 0)
723 multiplier
= (MAX_INTR_RATE
- interrupt_rate
) * round
;
724 multiplier
/= interrupt_rate
;
725 multiplier
= (multiplier
+ round
/ 2) / round
;
726 multiplier
= min(multiplier
, (u32
) 1023);
732 struct be_mcc_wrb
*wrb_from_mbox(struct be_dma_mem
*mbox_mem
)
734 return &((struct be_mcc_mailbox
*)(mbox_mem
->va
))->wrb
;
737 int beiscsi_cmd_eq_create(struct be_ctrl_info
*ctrl
,
738 struct be_queue_info
*eq
, int eq_delay
)
740 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
741 struct be_cmd_req_eq_create
*req
= embedded_payload(wrb
);
742 struct be_cmd_resp_eq_create
*resp
= embedded_payload(wrb
);
743 struct be_dma_mem
*q_mem
= &eq
->dma_mem
;
746 mutex_lock(&ctrl
->mbox_lock
);
747 memset(wrb
, 0, sizeof(*wrb
));
749 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
751 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
752 OPCODE_COMMON_EQ_CREATE
, sizeof(*req
));
754 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
756 AMAP_SET_BITS(struct amap_eq_context
, func
, req
->context
,
757 PCI_FUNC(ctrl
->pdev
->devfn
));
758 AMAP_SET_BITS(struct amap_eq_context
, valid
, req
->context
, 1);
759 AMAP_SET_BITS(struct amap_eq_context
, size
, req
->context
, 0);
760 AMAP_SET_BITS(struct amap_eq_context
, count
, req
->context
,
761 __ilog2_u32(eq
->len
/ 256));
762 AMAP_SET_BITS(struct amap_eq_context
, delaymult
, req
->context
,
763 eq_delay_to_mult(eq_delay
));
764 be_dws_cpu_to_le(req
->context
, sizeof(req
->context
));
766 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
768 status
= be_mbox_notify(ctrl
);
770 eq
->id
= le16_to_cpu(resp
->eq_id
);
773 mutex_unlock(&ctrl
->mbox_lock
);
777 int beiscsi_cmd_cq_create(struct be_ctrl_info
*ctrl
,
778 struct be_queue_info
*cq
, struct be_queue_info
*eq
,
779 bool sol_evts
, bool no_delay
, int coalesce_wm
)
781 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
782 struct be_cmd_req_cq_create
*req
= embedded_payload(wrb
);
783 struct be_cmd_resp_cq_create
*resp
= embedded_payload(wrb
);
784 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
785 struct be_dma_mem
*q_mem
= &cq
->dma_mem
;
786 void *ctxt
= &req
->context
;
789 mutex_lock(&ctrl
->mbox_lock
);
790 memset(wrb
, 0, sizeof(*wrb
));
792 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
794 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
795 OPCODE_COMMON_CQ_CREATE
, sizeof(*req
));
797 req
->num_pages
= cpu_to_le16(PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
));
798 if (is_chip_be2_be3r(phba
)) {
799 AMAP_SET_BITS(struct amap_cq_context
, coalescwm
,
801 AMAP_SET_BITS(struct amap_cq_context
, nodelay
, ctxt
, no_delay
);
802 AMAP_SET_BITS(struct amap_cq_context
, count
, ctxt
,
803 __ilog2_u32(cq
->len
/ 256));
804 AMAP_SET_BITS(struct amap_cq_context
, valid
, ctxt
, 1);
805 AMAP_SET_BITS(struct amap_cq_context
, solevent
, ctxt
, sol_evts
);
806 AMAP_SET_BITS(struct amap_cq_context
, eventable
, ctxt
, 1);
807 AMAP_SET_BITS(struct amap_cq_context
, eqid
, ctxt
, eq
->id
);
808 AMAP_SET_BITS(struct amap_cq_context
, armed
, ctxt
, 1);
809 AMAP_SET_BITS(struct amap_cq_context
, func
, ctxt
,
810 PCI_FUNC(ctrl
->pdev
->devfn
));
812 req
->hdr
.version
= MBX_CMD_VER2
;
814 AMAP_SET_BITS(struct amap_cq_context_v2
, coalescwm
,
816 AMAP_SET_BITS(struct amap_cq_context_v2
, nodelay
,
818 AMAP_SET_BITS(struct amap_cq_context_v2
, count
, ctxt
,
819 __ilog2_u32(cq
->len
/ 256));
820 AMAP_SET_BITS(struct amap_cq_context_v2
, valid
, ctxt
, 1);
821 AMAP_SET_BITS(struct amap_cq_context_v2
, eventable
, ctxt
, 1);
822 AMAP_SET_BITS(struct amap_cq_context_v2
, eqid
, ctxt
, eq
->id
);
823 AMAP_SET_BITS(struct amap_cq_context_v2
, armed
, ctxt
, 1);
826 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
828 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
830 status
= be_mbox_notify(ctrl
);
832 cq
->id
= le16_to_cpu(resp
->cq_id
);
835 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
836 "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
839 mutex_unlock(&ctrl
->mbox_lock
);
844 static u32
be_encoded_q_len(int q_len
)
846 u32 len_encoded
= fls(q_len
); /* log2(len) + 1 */
847 if (len_encoded
== 16)
852 int beiscsi_cmd_mccq_create(struct beiscsi_hba
*phba
,
853 struct be_queue_info
*mccq
,
854 struct be_queue_info
*cq
)
856 struct be_mcc_wrb
*wrb
;
857 struct be_cmd_req_mcc_create_ext
*req
;
858 struct be_dma_mem
*q_mem
= &mccq
->dma_mem
;
859 struct be_ctrl_info
*ctrl
;
863 mutex_lock(&phba
->ctrl
.mbox_lock
);
865 wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
866 memset(wrb
, 0, sizeof(*wrb
));
867 req
= embedded_payload(wrb
);
868 ctxt
= &req
->context
;
870 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
872 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
873 OPCODE_COMMON_MCC_CREATE_EXT
, sizeof(*req
));
875 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
876 req
->async_evt_bitmap
= 1 << ASYNC_EVENT_CODE_LINK_STATE
;
877 req
->async_evt_bitmap
|= 1 << ASYNC_EVENT_CODE_ISCSI
;
878 req
->async_evt_bitmap
|= 1 << ASYNC_EVENT_CODE_SLI
;
880 AMAP_SET_BITS(struct amap_mcc_context
, fid
, ctxt
,
881 PCI_FUNC(phba
->pcidev
->devfn
));
882 AMAP_SET_BITS(struct amap_mcc_context
, valid
, ctxt
, 1);
883 AMAP_SET_BITS(struct amap_mcc_context
, ring_size
, ctxt
,
884 be_encoded_q_len(mccq
->len
));
885 AMAP_SET_BITS(struct amap_mcc_context
, cq_id
, ctxt
, cq
->id
);
887 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
889 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
891 status
= be_mbox_notify(ctrl
);
893 struct be_cmd_resp_mcc_create
*resp
= embedded_payload(wrb
);
894 mccq
->id
= le16_to_cpu(resp
->id
);
895 mccq
->created
= true;
897 mutex_unlock(&phba
->ctrl
.mbox_lock
);
902 int beiscsi_cmd_q_destroy(struct be_ctrl_info
*ctrl
, struct be_queue_info
*q
,
905 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
906 struct be_cmd_req_q_destroy
*req
= embedded_payload(wrb
);
907 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
908 u8 subsys
= 0, opcode
= 0;
911 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
912 "BC_%d : In beiscsi_cmd_q_destroy "
913 "queue_type : %d\n", queue_type
);
915 mutex_lock(&ctrl
->mbox_lock
);
916 memset(wrb
, 0, sizeof(*wrb
));
917 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
919 switch (queue_type
) {
921 subsys
= CMD_SUBSYSTEM_COMMON
;
922 opcode
= OPCODE_COMMON_EQ_DESTROY
;
925 subsys
= CMD_SUBSYSTEM_COMMON
;
926 opcode
= OPCODE_COMMON_CQ_DESTROY
;
929 subsys
= CMD_SUBSYSTEM_COMMON
;
930 opcode
= OPCODE_COMMON_MCC_DESTROY
;
933 subsys
= CMD_SUBSYSTEM_ISCSI
;
934 opcode
= OPCODE_COMMON_ISCSI_WRBQ_DESTROY
;
937 subsys
= CMD_SUBSYSTEM_ISCSI
;
938 opcode
= OPCODE_COMMON_ISCSI_DEFQ_DESTROY
;
941 subsys
= CMD_SUBSYSTEM_ISCSI
;
942 opcode
= OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES
;
945 mutex_unlock(&ctrl
->mbox_lock
);
949 be_cmd_hdr_prepare(&req
->hdr
, subsys
, opcode
, sizeof(*req
));
950 if (queue_type
!= QTYPE_SGL
)
951 req
->id
= cpu_to_le16(q
->id
);
953 status
= be_mbox_notify(ctrl
);
955 mutex_unlock(&ctrl
->mbox_lock
);
960 * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
961 * @ctrl: ptr to ctrl_info
962 * @cq: Completion Queue
965 * @entry_size: size of each entry in DEFQ
966 * @is_header: Header or Data DEFQ
967 * @ulp_num: Bind to which ULP
969 * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
970 * on this queue by the FW
974 * Failure: Non-Zero Value
977 int be_cmd_create_default_pdu_queue(struct be_ctrl_info
*ctrl
,
978 struct be_queue_info
*cq
,
979 struct be_queue_info
*dq
, int length
,
980 int entry_size
, uint8_t is_header
,
983 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
984 struct be_defq_create_req
*req
= embedded_payload(wrb
);
985 struct be_dma_mem
*q_mem
= &dq
->dma_mem
;
986 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
987 void *ctxt
= &req
->context
;
990 mutex_lock(&ctrl
->mbox_lock
);
991 memset(wrb
, 0, sizeof(*wrb
));
993 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
995 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
996 OPCODE_COMMON_ISCSI_DEFQ_CREATE
, sizeof(*req
));
998 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
999 if (phba
->fw_config
.dual_ulp_aware
) {
1000 req
->ulp_num
= ulp_num
;
1001 req
->dua_feature
|= (1 << BEISCSI_DUAL_ULP_AWARE_BIT
);
1002 req
->dua_feature
|= (1 << BEISCSI_BIND_Q_TO_ULP_BIT
);
1005 if (is_chip_be2_be3r(phba
)) {
1006 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1008 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1009 rx_pdid_valid
, ctxt
, 1);
1010 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1011 pci_func_id
, ctxt
, PCI_FUNC(ctrl
->pdev
->devfn
));
1012 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1014 be_encoded_q_len(length
/
1015 sizeof(struct phys_addr
)));
1016 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1017 default_buffer_size
, ctxt
, entry_size
);
1018 AMAP_SET_BITS(struct amap_be_default_pdu_context
,
1019 cq_id_recv
, ctxt
, cq
->id
);
1021 AMAP_SET_BITS(struct amap_default_pdu_context_ext
,
1023 AMAP_SET_BITS(struct amap_default_pdu_context_ext
,
1024 rx_pdid_valid
, ctxt
, 1);
1025 AMAP_SET_BITS(struct amap_default_pdu_context_ext
,
1027 be_encoded_q_len(length
/
1028 sizeof(struct phys_addr
)));
1029 AMAP_SET_BITS(struct amap_default_pdu_context_ext
,
1030 default_buffer_size
, ctxt
, entry_size
);
1031 AMAP_SET_BITS(struct amap_default_pdu_context_ext
,
1032 cq_id_recv
, ctxt
, cq
->id
);
1035 be_dws_cpu_to_le(ctxt
, sizeof(req
->context
));
1037 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1039 status
= be_mbox_notify(ctrl
);
1041 struct be_ring
*defq_ring
;
1042 struct be_defq_create_resp
*resp
= embedded_payload(wrb
);
1044 dq
->id
= le16_to_cpu(resp
->id
);
1047 defq_ring
= &phba
->phwi_ctrlr
->default_pdu_hdr
[ulp_num
];
1049 defq_ring
= &phba
->phwi_ctrlr
->
1050 default_pdu_data
[ulp_num
];
1052 defq_ring
->id
= dq
->id
;
1054 if (!phba
->fw_config
.dual_ulp_aware
) {
1055 defq_ring
->ulp_num
= BEISCSI_ULP0
;
1056 defq_ring
->doorbell_offset
= DB_RXULP0_OFFSET
;
1058 defq_ring
->ulp_num
= resp
->ulp_num
;
1059 defq_ring
->doorbell_offset
= resp
->doorbell_offset
;
1062 mutex_unlock(&ctrl
->mbox_lock
);
1068 * be_cmd_wrbq_create()- Create WRBQ
1069 * @ctrl: ptr to ctrl_info
1070 * @q_mem: memory details for the queue
1072 * @pwrb_context: ptr to wrb_context
1073 * @ulp_num: ULP on which the WRBQ is to be created
1075 * Create WRBQ on the passed ULP_NUM.
1078 int be_cmd_wrbq_create(struct be_ctrl_info
*ctrl
,
1079 struct be_dma_mem
*q_mem
,
1080 struct be_queue_info
*wrbq
,
1081 struct hwi_wrb_context
*pwrb_context
,
1084 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1085 struct be_wrbq_create_req
*req
= embedded_payload(wrb
);
1086 struct be_wrbq_create_resp
*resp
= embedded_payload(wrb
);
1087 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
1090 mutex_lock(&ctrl
->mbox_lock
);
1091 memset(wrb
, 0, sizeof(*wrb
));
1093 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1095 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
1096 OPCODE_COMMON_ISCSI_WRBQ_CREATE
, sizeof(*req
));
1097 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
1099 if (phba
->fw_config
.dual_ulp_aware
) {
1100 req
->ulp_num
= ulp_num
;
1101 req
->dua_feature
|= (1 << BEISCSI_DUAL_ULP_AWARE_BIT
);
1102 req
->dua_feature
|= (1 << BEISCSI_BIND_Q_TO_ULP_BIT
);
1105 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1107 status
= be_mbox_notify(ctrl
);
1109 wrbq
->id
= le16_to_cpu(resp
->cid
);
1110 wrbq
->created
= true;
1112 pwrb_context
->cid
= wrbq
->id
;
1113 if (!phba
->fw_config
.dual_ulp_aware
) {
1114 pwrb_context
->doorbell_offset
= DB_TXULP0_OFFSET
;
1115 pwrb_context
->ulp_num
= BEISCSI_ULP0
;
1117 pwrb_context
->ulp_num
= resp
->ulp_num
;
1118 pwrb_context
->doorbell_offset
= resp
->doorbell_offset
;
1121 mutex_unlock(&ctrl
->mbox_lock
);
1125 int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info
*ctrl
,
1126 struct be_dma_mem
*q_mem
)
1128 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1129 struct be_post_template_pages_req
*req
= embedded_payload(wrb
);
1132 mutex_lock(&ctrl
->mbox_lock
);
1134 memset(wrb
, 0, sizeof(*wrb
));
1135 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1136 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1137 OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS
,
1140 req
->num_pages
= PAGES_4K_SPANNED(q_mem
->va
, q_mem
->size
);
1141 req
->type
= BEISCSI_TEMPLATE_HDR_TYPE_ISCSI
;
1142 be_cmd_page_addrs_prepare(req
->pages
, ARRAY_SIZE(req
->pages
), q_mem
);
1144 status
= be_mbox_notify(ctrl
);
1145 mutex_unlock(&ctrl
->mbox_lock
);
1149 int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info
*ctrl
)
1151 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1152 struct be_remove_template_pages_req
*req
= embedded_payload(wrb
);
1155 mutex_lock(&ctrl
->mbox_lock
);
1157 memset(wrb
, 0, sizeof(*wrb
));
1158 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1159 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1160 OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS
,
1163 req
->type
= BEISCSI_TEMPLATE_HDR_TYPE_ISCSI
;
1165 status
= be_mbox_notify(ctrl
);
1166 mutex_unlock(&ctrl
->mbox_lock
);
1170 int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info
*ctrl
,
1171 struct be_dma_mem
*q_mem
,
1172 u32 page_offset
, u32 num_pages
)
1174 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1175 struct be_post_sgl_pages_req
*req
= embedded_payload(wrb
);
1176 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
1178 unsigned int curr_pages
;
1179 u32 internal_page_offset
= 0;
1180 u32 temp_num_pages
= num_pages
;
1182 if (num_pages
== 0xff)
1185 mutex_lock(&ctrl
->mbox_lock
);
1187 memset(wrb
, 0, sizeof(*wrb
));
1188 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1189 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
1190 OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES
,
1192 curr_pages
= BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req
,
1194 req
->num_pages
= min(num_pages
, curr_pages
);
1195 req
->page_offset
= page_offset
;
1196 be_cmd_page_addrs_prepare(req
->pages
, req
->num_pages
, q_mem
);
1197 q_mem
->dma
= q_mem
->dma
+ (req
->num_pages
* PAGE_SIZE
);
1198 internal_page_offset
+= req
->num_pages
;
1199 page_offset
+= req
->num_pages
;
1200 num_pages
-= req
->num_pages
;
1202 if (temp_num_pages
== 0xff)
1203 req
->num_pages
= temp_num_pages
;
1205 status
= be_mbox_notify(ctrl
);
1207 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1208 "BC_%d : FW CMD to map iscsi frags failed.\n");
1212 } while (num_pages
> 0);
1214 mutex_unlock(&ctrl
->mbox_lock
);
1216 beiscsi_cmd_q_destroy(ctrl
, NULL
, QTYPE_SGL
);
1221 * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
1222 * @phba: device priv structure instance
1223 * @vlan_tag: TAG to be set
1225 * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
1228 * TAG for the MBX Cmd
1230 int be_cmd_set_vlan(struct beiscsi_hba
*phba
,
1234 struct be_mcc_wrb
*wrb
;
1235 struct be_cmd_set_vlan_req
*req
;
1236 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
1238 if (mutex_lock_interruptible(&ctrl
->mbox_lock
))
1240 wrb
= alloc_mcc_wrb(phba
, &tag
);
1242 mutex_unlock(&ctrl
->mbox_lock
);
1246 req
= embedded_payload(wrb
);
1247 be_wrb_hdr_prepare(wrb
, sizeof(*wrb
), true, 0);
1248 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
1249 OPCODE_COMMON_ISCSI_NTWK_SET_VLAN
,
1252 req
->interface_hndl
= phba
->interface_handle
;
1253 req
->vlan_priority
= vlan_tag
;
1255 be_mcc_notify(phba
, tag
);
1256 mutex_unlock(&ctrl
->mbox_lock
);
1261 int beiscsi_check_supported_fw(struct be_ctrl_info
*ctrl
,
1262 struct beiscsi_hba
*phba
)
1264 struct be_dma_mem nonemb_cmd
;
1265 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1266 struct be_mgmt_controller_attributes
*req
;
1267 struct be_sge
*sge
= nonembedded_sgl(wrb
);
1270 nonemb_cmd
.va
= pci_alloc_consistent(ctrl
->pdev
,
1271 sizeof(struct be_mgmt_controller_attributes
),
1273 if (nonemb_cmd
.va
== NULL
) {
1274 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1275 "BG_%d : pci_alloc_consistent failed in %s\n",
1279 nonemb_cmd
.size
= sizeof(struct be_mgmt_controller_attributes
);
1280 req
= nonemb_cmd
.va
;
1281 memset(req
, 0, sizeof(*req
));
1282 mutex_lock(&ctrl
->mbox_lock
);
1283 memset(wrb
, 0, sizeof(*wrb
));
1284 be_wrb_hdr_prepare(wrb
, sizeof(*req
), false, 1);
1285 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1286 OPCODE_COMMON_GET_CNTL_ATTRIBUTES
, sizeof(*req
));
1287 sge
->pa_hi
= cpu_to_le32(upper_32_bits(nonemb_cmd
.dma
));
1288 sge
->pa_lo
= cpu_to_le32(nonemb_cmd
.dma
& 0xFFFFFFFF);
1289 sge
->len
= cpu_to_le32(nonemb_cmd
.size
);
1290 status
= be_mbox_notify(ctrl
);
1292 struct be_mgmt_controller_attributes_resp
*resp
= nonemb_cmd
.va
;
1294 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1295 "BG_%d : Firmware Version of CMD : %s\n"
1296 "Firmware Version is : %s\n"
1297 "Developer Build, not performing version check...\n",
1298 resp
->params
.hba_attribs
1299 .flashrom_version_string
,
1300 resp
->params
.hba_attribs
.
1301 firmware_version_string
);
1303 phba
->fw_config
.iscsi_features
=
1304 resp
->params
.hba_attribs
.iscsi_features
;
1305 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1306 "BM_%d : phba->fw_config.iscsi_features = %d\n",
1307 phba
->fw_config
.iscsi_features
);
1308 memcpy(phba
->fw_ver_str
, resp
->params
.hba_attribs
.
1309 firmware_version_string
, BEISCSI_VER_STRLEN
);
1311 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1312 "BG_%d : Failed in beiscsi_check_supported_fw\n");
1313 mutex_unlock(&ctrl
->mbox_lock
);
1315 pci_free_consistent(ctrl
->pdev
, nonemb_cmd
.size
,
1316 nonemb_cmd
.va
, nonemb_cmd
.dma
);
1322 * beiscsi_get_fw_config()- Get the FW config for the function
1323 * @ctrl: ptr to Ctrl Info
1324 * @phba: ptr to the dev priv structure
1326 * Get the FW config and resources available for the function.
1327 * The resources are created based on the count received here.
1331 * Failure: Non-Zero Value
1333 int beiscsi_get_fw_config(struct be_ctrl_info
*ctrl
,
1334 struct beiscsi_hba
*phba
)
1336 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1337 struct be_fw_cfg
*pfw_cfg
= embedded_payload(wrb
);
1338 uint32_t cid_count
, icd_count
;
1339 int status
= -EINVAL
;
1340 uint8_t ulp_num
= 0;
1342 mutex_lock(&ctrl
->mbox_lock
);
1343 memset(wrb
, 0, sizeof(*wrb
));
1344 be_wrb_hdr_prepare(wrb
, sizeof(*pfw_cfg
), true, 0);
1346 be_cmd_hdr_prepare(&pfw_cfg
->hdr
, CMD_SUBSYSTEM_COMMON
,
1347 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG
,
1348 EMBED_MBX_MAX_PAYLOAD_SIZE
);
1350 if (be_mbox_notify(ctrl
)) {
1351 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1352 "BG_%d : Failed in beiscsi_get_fw_config\n");
1356 /* FW response formats depend on port id */
1357 phba
->fw_config
.phys_port
= pfw_cfg
->phys_port
;
1358 if (phba
->fw_config
.phys_port
>= BEISCSI_PHYS_PORT_MAX
) {
1359 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1360 "BG_%d : invalid physical port id %d\n",
1361 phba
->fw_config
.phys_port
);
1365 /* populate and check FW config against min and max values */
1366 if (!is_chip_be2_be3r(phba
)) {
1367 phba
->fw_config
.eqid_count
= pfw_cfg
->eqid_count
;
1368 phba
->fw_config
.cqid_count
= pfw_cfg
->cqid_count
;
1369 if (phba
->fw_config
.eqid_count
== 0 ||
1370 phba
->fw_config
.eqid_count
> 2048) {
1371 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1372 "BG_%d : invalid EQ count %d\n",
1373 phba
->fw_config
.eqid_count
);
1376 if (phba
->fw_config
.cqid_count
== 0 ||
1377 phba
->fw_config
.cqid_count
> 4096) {
1378 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1379 "BG_%d : invalid CQ count %d\n",
1380 phba
->fw_config
.cqid_count
);
1383 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1384 "BG_%d : EQ_Count : %d CQ_Count : %d\n",
1385 phba
->fw_config
.eqid_count
,
1386 phba
->fw_config
.cqid_count
);
1390 * Check on which all ULP iSCSI Protocol is loaded.
1391 * Set the Bit for those ULP. This set flag is used
1392 * at all places in the code to check on which ULP
1393 * iSCSi Protocol is loaded
1395 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++) {
1396 if (pfw_cfg
->ulp
[ulp_num
].ulp_mode
&
1397 BEISCSI_ULP_ISCSI_INI_MODE
) {
1398 set_bit(ulp_num
, &phba
->fw_config
.ulp_supported
);
1400 /* Get the CID, ICD and Chain count for each ULP */
1401 phba
->fw_config
.iscsi_cid_start
[ulp_num
] =
1402 pfw_cfg
->ulp
[ulp_num
].sq_base
;
1403 phba
->fw_config
.iscsi_cid_count
[ulp_num
] =
1404 pfw_cfg
->ulp
[ulp_num
].sq_count
;
1406 phba
->fw_config
.iscsi_icd_start
[ulp_num
] =
1407 pfw_cfg
->ulp
[ulp_num
].icd_base
;
1408 phba
->fw_config
.iscsi_icd_count
[ulp_num
] =
1409 pfw_cfg
->ulp
[ulp_num
].icd_count
;
1411 phba
->fw_config
.iscsi_chain_start
[ulp_num
] =
1412 pfw_cfg
->chain_icd
[ulp_num
].chain_base
;
1413 phba
->fw_config
.iscsi_chain_count
[ulp_num
] =
1414 pfw_cfg
->chain_icd
[ulp_num
].chain_count
;
1416 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1417 "BG_%d : Function loaded on ULP : %d\n"
1418 "\tiscsi_cid_count : %d\n"
1419 "\tiscsi_cid_start : %d\n"
1420 "\t iscsi_icd_count : %d\n"
1421 "\t iscsi_icd_start : %d\n",
1424 iscsi_cid_count
[ulp_num
],
1426 iscsi_cid_start
[ulp_num
],
1428 iscsi_icd_count
[ulp_num
],
1430 iscsi_icd_start
[ulp_num
]);
1434 if (phba
->fw_config
.ulp_supported
== 0) {
1435 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1436 "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
1437 pfw_cfg
->ulp
[BEISCSI_ULP0
].ulp_mode
,
1438 pfw_cfg
->ulp
[BEISCSI_ULP1
].ulp_mode
);
1443 * ICD is shared among ULPs. Use icd_count of any one loaded ULP
1445 for (ulp_num
= 0; ulp_num
< BEISCSI_ULP_COUNT
; ulp_num
++)
1446 if (test_bit(ulp_num
, &phba
->fw_config
.ulp_supported
))
1448 icd_count
= phba
->fw_config
.iscsi_icd_count
[ulp_num
];
1449 if (icd_count
== 0 || icd_count
> 65536) {
1450 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1451 "BG_%d: invalid ICD count %d\n", icd_count
);
1455 cid_count
= BEISCSI_GET_CID_COUNT(phba
, BEISCSI_ULP0
) +
1456 BEISCSI_GET_CID_COUNT(phba
, BEISCSI_ULP1
);
1457 if (cid_count
== 0 || cid_count
> 4096) {
1458 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1459 "BG_%d: invalid CID count %d\n", cid_count
);
1464 * Check FW is dual ULP aware i.e. can handle either
1467 phba
->fw_config
.dual_ulp_aware
= (pfw_cfg
->function_mode
&
1468 BEISCSI_FUNC_DUA_MODE
);
1470 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1471 "BG_%d : DUA Mode : 0x%x\n",
1472 phba
->fw_config
.dual_ulp_aware
);
1474 /* all set, continue using this FW config */
1477 mutex_unlock(&ctrl
->mbox_lock
);
1482 * beiscsi_get_port_name()- Get port name for the function
1483 * @ctrl: ptr to Ctrl Info
1484 * @phba: ptr to the dev priv structure
1486 * Get the alphanumeric character for port
1489 int beiscsi_get_port_name(struct be_ctrl_info
*ctrl
, struct beiscsi_hba
*phba
)
1492 struct be_mcc_wrb
*wrb
;
1493 struct be_cmd_get_port_name
*ioctl
;
1495 mutex_lock(&ctrl
->mbox_lock
);
1496 wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1497 memset(wrb
, 0, sizeof(*wrb
));
1498 ioctl
= embedded_payload(wrb
);
1500 be_wrb_hdr_prepare(wrb
, sizeof(*ioctl
), true, 0);
1501 be_cmd_hdr_prepare(&ioctl
->h
.req_hdr
, CMD_SUBSYSTEM_COMMON
,
1502 OPCODE_COMMON_GET_PORT_NAME
,
1503 EMBED_MBX_MAX_PAYLOAD_SIZE
);
1504 ret
= be_mbox_notify(ctrl
);
1505 phba
->port_name
= 0;
1507 phba
->port_name
= ioctl
->p
.resp
.port_names
>>
1508 (phba
->fw_config
.phys_port
* 8) & 0xff;
1510 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1511 "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
1512 ret
, ioctl
->h
.resp_hdr
.status
);
1515 if (phba
->port_name
== 0)
1516 phba
->port_name
= '?';
1518 mutex_unlock(&ctrl
->mbox_lock
);
1522 int beiscsi_set_uer_feature(struct beiscsi_hba
*phba
)
1524 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
1525 struct be_cmd_set_features
*ioctl
;
1526 struct be_mcc_wrb
*wrb
;
1529 mutex_lock(&ctrl
->mbox_lock
);
1530 wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1531 memset(wrb
, 0, sizeof(*wrb
));
1532 ioctl
= embedded_payload(wrb
);
1534 be_wrb_hdr_prepare(wrb
, sizeof(*ioctl
), true, 0);
1535 be_cmd_hdr_prepare(&ioctl
->h
.req_hdr
, CMD_SUBSYSTEM_COMMON
,
1536 OPCODE_COMMON_SET_FEATURES
,
1537 EMBED_MBX_MAX_PAYLOAD_SIZE
);
1538 ioctl
->feature
= BE_CMD_SET_FEATURE_UER
;
1539 ioctl
->param_len
= sizeof(ioctl
->param
.req
);
1540 ioctl
->param
.req
.uer
= BE_CMD_UER_SUPP_BIT
;
1541 ret
= be_mbox_notify(ctrl
);
1543 phba
->ue2rp
= ioctl
->param
.resp
.ue2rp
;
1544 set_bit(BEISCSI_HBA_UER_SUPP
, &phba
->state
);
1545 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1546 "BG_%d : HBA error recovery supported\n");
1549 * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
1550 * Older FW versions return this error.
1552 if (ret
== MCC_STATUS_ILLEGAL_REQUEST
||
1553 ret
== MCC_STATUS_INVALID_LENGTH
)
1554 __beiscsi_log(phba
, KERN_INFO
,
1555 "BG_%d : HBA error recovery not supported\n");
1558 mutex_unlock(&ctrl
->mbox_lock
);
1562 static u32
beiscsi_get_post_stage(struct beiscsi_hba
*phba
)
1566 if (is_chip_be2_be3r(phba
))
1567 sem
= ioread32(phba
->csr_va
+ SLIPORT_SEMAPHORE_OFFSET_BEx
);
1569 pci_read_config_dword(phba
->pcidev
,
1570 SLIPORT_SEMAPHORE_OFFSET_SH
, &sem
);
1574 int beiscsi_check_fw_rdy(struct beiscsi_hba
*phba
)
1576 u32 loop
, post
, rdy
= 0;
1580 post
= beiscsi_get_post_stage(phba
);
1581 if (post
& POST_ERROR_BIT
)
1583 if ((post
& POST_STAGE_MASK
) == POST_STAGE_ARMFW_RDY
) {
1591 __beiscsi_log(phba
, KERN_ERR
,
1592 "BC_%d : FW not ready 0x%x\n", post
);
1598 int beiscsi_cmd_function_reset(struct beiscsi_hba
*phba
)
1600 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
1601 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1602 struct be_post_sgl_pages_req
*req
= embedded_payload(wrb
);
1605 mutex_lock(&ctrl
->mbox_lock
);
1607 req
= embedded_payload(wrb
);
1608 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1609 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_COMMON
,
1610 OPCODE_COMMON_FUNCTION_RESET
, sizeof(*req
));
1611 status
= be_mbox_notify(ctrl
);
1613 mutex_unlock(&ctrl
->mbox_lock
);
1617 int beiscsi_cmd_special_wrb(struct be_ctrl_info
*ctrl
, u32 load
)
1619 struct be_mcc_wrb
*wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1620 struct beiscsi_hba
*phba
= pci_get_drvdata(ctrl
->pdev
);
1624 mutex_lock(&ctrl
->mbox_lock
);
1625 memset(wrb
, 0, sizeof(*wrb
));
1627 endian_check
= (u8
*) wrb
;
1629 /* to start communicating */
1630 *endian_check
++ = 0xFF;
1631 *endian_check
++ = 0x12;
1632 *endian_check
++ = 0x34;
1633 *endian_check
++ = 0xFF;
1634 *endian_check
++ = 0xFF;
1635 *endian_check
++ = 0x56;
1636 *endian_check
++ = 0x78;
1637 *endian_check
++ = 0xFF;
1639 /* to stop communicating */
1640 *endian_check
++ = 0xFF;
1641 *endian_check
++ = 0xAA;
1642 *endian_check
++ = 0xBB;
1643 *endian_check
++ = 0xFF;
1644 *endian_check
++ = 0xFF;
1645 *endian_check
++ = 0xCC;
1646 *endian_check
++ = 0xDD;
1647 *endian_check
= 0xFF;
1649 be_dws_cpu_to_le(wrb
, sizeof(*wrb
));
1651 status
= be_mbox_notify(ctrl
);
1653 beiscsi_log(phba
, KERN_INFO
, BEISCSI_LOG_INIT
,
1654 "BC_%d : special WRB message failed\n");
1655 mutex_unlock(&ctrl
->mbox_lock
);
1659 int beiscsi_init_sliport(struct beiscsi_hba
*phba
)
1663 /* check POST stage before talking to FW */
1664 status
= beiscsi_check_fw_rdy(phba
);
1668 /* clear all error states after checking FW rdy */
1669 phba
->state
&= ~BEISCSI_HBA_IN_ERR
;
1671 /* check again UER support */
1672 phba
->state
&= ~BEISCSI_HBA_UER_SUPP
;
1675 * SLI COMMON_FUNCTION_RESET completion is indicated by BMBX RDY bit.
1676 * It should clean up any stale info in FW for this fn.
1678 status
= beiscsi_cmd_function_reset(phba
);
1680 beiscsi_log(phba
, KERN_ERR
, BEISCSI_LOG_INIT
,
1681 "BC_%d : SLI Function Reset failed\n");
1685 /* indicate driver is loading */
1686 return beiscsi_cmd_special_wrb(&phba
->ctrl
, 1);
1690 * beiscsi_cmd_iscsi_cleanup()- Inform FW to cleanup EP data structures.
1691 * @phba: pointer to dev priv structure
1696 * Failure: Non-Zero Value
1698 int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba
*phba
, unsigned short ulp
)
1700 struct be_ctrl_info
*ctrl
= &phba
->ctrl
;
1701 struct iscsi_cleanup_req_v1
*req_v1
;
1702 struct iscsi_cleanup_req
*req
;
1703 struct be_mcc_wrb
*wrb
;
1706 mutex_lock(&ctrl
->mbox_lock
);
1707 wrb
= wrb_from_mbox(&ctrl
->mbox_mem
);
1708 req
= embedded_payload(wrb
);
1709 be_wrb_hdr_prepare(wrb
, sizeof(*req
), true, 0);
1710 be_cmd_hdr_prepare(&req
->hdr
, CMD_SUBSYSTEM_ISCSI
,
1711 OPCODE_COMMON_ISCSI_CLEANUP
, sizeof(*req
));
1714 * TODO: Check with FW folks the chute value to be set.
1715 * For now, use the ULP_MASK as the chute value.
1717 if (is_chip_be2_be3r(phba
)) {
1718 req
->chute
= (1 << ulp
);
1719 req
->hdr_ring_id
= HWI_GET_DEF_HDRQ_ID(phba
, ulp
);
1720 req
->data_ring_id
= HWI_GET_DEF_BUFQ_ID(phba
, ulp
);
1722 req_v1
= (struct iscsi_cleanup_req_v1
*)req
;
1723 req_v1
->hdr
.version
= 1;
1724 req_v1
->hdr_ring_id
= cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba
,
1726 req_v1
->data_ring_id
= cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba
,
1730 status
= be_mbox_notify(ctrl
);
1732 beiscsi_log(phba
, KERN_WARNING
, BEISCSI_LOG_INIT
,
1733 "BG_%d : %s failed %d\n", __func__
, ulp
);
1734 mutex_unlock(&ctrl
->mbox_lock
);
1739 * beiscsi_detect_ue()- Detect Unrecoverable Error on adapter
1740 * @phba: Driver priv structure
1742 * Read registers linked to UE and check for the UE status
1744 int beiscsi_detect_ue(struct beiscsi_hba
*phba
)
1746 uint32_t ue_mask_hi
= 0, ue_mask_lo
= 0;
1747 uint32_t ue_hi
= 0, ue_lo
= 0;
1751 pci_read_config_dword(phba
->pcidev
,
1752 PCICFG_UE_STATUS_LOW
, &ue_lo
);
1753 pci_read_config_dword(phba
->pcidev
,
1754 PCICFG_UE_STATUS_MASK_LOW
,
1756 pci_read_config_dword(phba
->pcidev
,
1757 PCICFG_UE_STATUS_HIGH
,
1759 pci_read_config_dword(phba
->pcidev
,
1760 PCICFG_UE_STATUS_MASK_HI
,
1763 ue_lo
= (ue_lo
& ~ue_mask_lo
);
1764 ue_hi
= (ue_hi
& ~ue_mask_hi
);
1767 if (ue_lo
|| ue_hi
) {
1768 set_bit(BEISCSI_HBA_IN_UE
, &phba
->state
);
1769 __beiscsi_log(phba
, KERN_ERR
,
1770 "BC_%d : HBA error detected\n");
1775 for (i
= 0; ue_lo
; ue_lo
>>= 1, i
++) {
1777 __beiscsi_log(phba
, KERN_ERR
,
1778 "BC_%d : UE_LOW %s bit set\n",
1779 desc_ue_status_low
[i
]);
1784 for (i
= 0; ue_hi
; ue_hi
>>= 1, i
++) {
1786 __beiscsi_log(phba
, KERN_ERR
,
1787 "BC_%d : UE_HIGH %s bit set\n",
1788 desc_ue_status_hi
[i
]);
1795 * beiscsi_detect_tpe()- Detect Transient Parity Error on adapter
1796 * @phba: Driver priv structure
1798 * Read SLIPORT SEMAPHORE register to check for UER
1801 int beiscsi_detect_tpe(struct beiscsi_hba
*phba
)
1806 post
= beiscsi_get_post_stage(phba
);
1807 status
= post
& POST_STAGE_MASK
;
1808 if ((status
& POST_ERR_RECOVERY_CODE_MASK
) ==
1809 POST_STAGE_RECOVERABLE_ERR
) {
1810 set_bit(BEISCSI_HBA_IN_TPE
, &phba
->state
);
1811 __beiscsi_log(phba
, KERN_INFO
,
1812 "BC_%d : HBA error recoverable: 0x%x\n", post
);
1815 __beiscsi_log(phba
, KERN_INFO
,
1816 "BC_%d : HBA in UE: 0x%x\n", post
);