1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2009-2015 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
10 * This program is free software; you can redistribute it and/or *
11 * modify it under the terms of version 2 of the GNU General *
12 * Public License as published by the Free Software Foundation. *
13 * This program is distributed in the hope that it will be useful. *
14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
18 * TO BE LEGALLY INVALID. See the GNU General Public License for *
19 * more details, a copy of which can be found in the file COPYING *
20 * included with this package. *
21 *******************************************************************/
23 #include <linux/interrupt.h>
24 #include <linux/mempool.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/list.h>
29 #include <linux/bsg-lib.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_transport_fc.h>
34 #include <scsi/scsi_bsg_fc.h>
35 #include <scsi/fc/fc_fs.h>
40 #include "lpfc_sli4.h"
43 #include "lpfc_disc.h"
44 #include "lpfc_scsi.h"
46 #include "lpfc_logmsg.h"
47 #include "lpfc_crtn.h"
48 #include "lpfc_debugfs.h"
49 #include "lpfc_vport.h"
50 #include "lpfc_version.h"
52 struct lpfc_bsg_event
{
53 struct list_head node
;
57 /* Event type and waiter identifiers */
62 /* next two flags are here for the auto-delete logic */
63 unsigned long wait_time_stamp
;
66 /* seen and not seen events */
67 struct list_head events_to_get
;
68 struct list_head events_to_see
;
70 /* driver data associated with the job */
74 struct lpfc_bsg_iocb
{
75 struct lpfc_iocbq
*cmdiocbq
;
76 struct lpfc_dmabuf
*rmp
;
77 struct lpfc_nodelist
*ndlp
;
80 struct lpfc_bsg_mbox
{
83 struct lpfc_dmabuf
*dmabuffers
; /* for BIU diags */
84 uint8_t *ext
; /* extended mailbox data */
85 uint32_t mbOffset
; /* from app */
86 uint32_t inExtWLen
; /* from app */
87 uint32_t outExtWLen
; /* from app */
90 #define MENLO_DID 0x0000FC0E
92 struct lpfc_bsg_menlo
{
93 struct lpfc_iocbq
*cmdiocbq
;
94 struct lpfc_dmabuf
*rmp
;
101 struct bsg_job_data
{
103 struct bsg_job
*set_job
; /* job waiting for this iocb to finish */
105 struct lpfc_bsg_event
*evt
;
106 struct lpfc_bsg_iocb iocb
;
107 struct lpfc_bsg_mbox mbox
;
108 struct lpfc_bsg_menlo menlo
;
113 struct list_head node
;
120 #define BUF_SZ_4K 4096
121 #define SLI_CT_ELX_LOOPBACK 0x10
123 enum ELX_LOOPBACK_CMD
{
124 ELX_LOOPBACK_XRI_SETUP
,
128 #define ELX_LOOPBACK_HEADER_SZ \
129 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
131 struct lpfc_dmabufext
{
132 struct lpfc_dmabuf dma
;
138 lpfc_free_bsg_buffers(struct lpfc_hba
*phba
, struct lpfc_dmabuf
*mlist
)
140 struct lpfc_dmabuf
*mlast
, *next_mlast
;
143 list_for_each_entry_safe(mlast
, next_mlast
, &mlist
->list
,
145 lpfc_mbuf_free(phba
, mlast
->virt
, mlast
->phys
);
146 list_del(&mlast
->list
);
149 lpfc_mbuf_free(phba
, mlist
->virt
, mlist
->phys
);
155 static struct lpfc_dmabuf
*
156 lpfc_alloc_bsg_buffers(struct lpfc_hba
*phba
, unsigned int size
,
157 int outbound_buffers
, struct ulp_bde64
*bpl
,
160 struct lpfc_dmabuf
*mlist
= NULL
;
161 struct lpfc_dmabuf
*mp
;
162 unsigned int bytes_left
= size
;
164 /* Verify we can support the size specified */
165 if (!size
|| (size
> (*bpl_entries
* LPFC_BPL_SIZE
)))
168 /* Determine the number of dma buffers to allocate */
169 *bpl_entries
= (size
% LPFC_BPL_SIZE
? size
/LPFC_BPL_SIZE
+ 1 :
172 /* Allocate dma buffer and place in BPL passed */
174 /* Allocate dma buffer */
175 mp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
178 lpfc_free_bsg_buffers(phba
, mlist
);
182 INIT_LIST_HEAD(&mp
->list
);
183 mp
->virt
= lpfc_mbuf_alloc(phba
, MEM_PRI
, &(mp
->phys
));
188 lpfc_free_bsg_buffers(phba
, mlist
);
192 /* Queue it to a linked list */
196 list_add_tail(&mp
->list
, &mlist
->list
);
198 /* Add buffer to buffer pointer list */
199 if (outbound_buffers
)
200 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
202 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
203 bpl
->addrLow
= le32_to_cpu(putPaddrLow(mp
->phys
));
204 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(mp
->phys
));
205 bpl
->tus
.f
.bdeSize
= (uint16_t)
206 (bytes_left
>= LPFC_BPL_SIZE
? LPFC_BPL_SIZE
:
208 bytes_left
-= bpl
->tus
.f
.bdeSize
;
209 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
216 lpfc_bsg_copy_data(struct lpfc_dmabuf
*dma_buffers
,
217 struct bsg_buffer
*bsg_buffers
,
218 unsigned int bytes_to_transfer
, int to_buffers
)
221 struct lpfc_dmabuf
*mp
;
222 unsigned int transfer_bytes
, bytes_copied
= 0;
223 unsigned int sg_offset
, dma_offset
;
224 unsigned char *dma_address
, *sg_address
;
225 LIST_HEAD(temp_list
);
226 struct sg_mapping_iter miter
;
228 unsigned int sg_flags
= SG_MITER_ATOMIC
;
231 list_splice_init(&dma_buffers
->list
, &temp_list
);
232 list_add(&dma_buffers
->list
, &temp_list
);
235 sg_flags
|= SG_MITER_FROM_SG
;
237 sg_flags
|= SG_MITER_TO_SG
;
238 sg_miter_start(&miter
, bsg_buffers
->sg_list
, bsg_buffers
->sg_cnt
,
240 local_irq_save(flags
);
241 sg_valid
= sg_miter_next(&miter
);
242 list_for_each_entry(mp
, &temp_list
, list
) {
244 while (bytes_to_transfer
&& sg_valid
&&
245 (dma_offset
< LPFC_BPL_SIZE
)) {
246 dma_address
= mp
->virt
+ dma_offset
;
248 /* Continue previous partial transfer of sg */
249 sg_address
= miter
.addr
+ sg_offset
;
250 transfer_bytes
= miter
.length
- sg_offset
;
252 sg_address
= miter
.addr
;
253 transfer_bytes
= miter
.length
;
255 if (bytes_to_transfer
< transfer_bytes
)
256 transfer_bytes
= bytes_to_transfer
;
257 if (transfer_bytes
> (LPFC_BPL_SIZE
- dma_offset
))
258 transfer_bytes
= LPFC_BPL_SIZE
- dma_offset
;
260 memcpy(dma_address
, sg_address
, transfer_bytes
);
262 memcpy(sg_address
, dma_address
, transfer_bytes
);
263 dma_offset
+= transfer_bytes
;
264 sg_offset
+= transfer_bytes
;
265 bytes_to_transfer
-= transfer_bytes
;
266 bytes_copied
+= transfer_bytes
;
267 if (sg_offset
>= miter
.length
) {
269 sg_valid
= sg_miter_next(&miter
);
273 sg_miter_stop(&miter
);
274 local_irq_restore(flags
);
275 list_del_init(&dma_buffers
->list
);
276 list_splice(&temp_list
, &dma_buffers
->list
);
281 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
282 * @phba: Pointer to HBA context object.
283 * @cmdiocbq: Pointer to command iocb.
284 * @rspiocbq: Pointer to response iocb.
286 * This function is the completion handler for iocbs issued using
287 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
288 * ring event handler function without any lock held. This function
289 * can be called from both worker thread context and interrupt
290 * context. This function also can be called from another thread which
291 * cleans up the SLI layer objects.
292 * This function copies the contents of the response iocb to the
293 * response iocb memory object provided by the caller of
294 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
295 * sleeps for the iocb completion.
298 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba
*phba
,
299 struct lpfc_iocbq
*cmdiocbq
,
300 struct lpfc_iocbq
*rspiocbq
)
302 struct bsg_job_data
*dd_data
;
304 struct fc_bsg_reply
*bsg_reply
;
306 struct lpfc_dmabuf
*bmp
, *cmp
, *rmp
;
307 struct lpfc_nodelist
*ndlp
;
308 struct lpfc_bsg_iocb
*iocb
;
310 unsigned int rsp_size
;
313 dd_data
= cmdiocbq
->context1
;
315 /* Determine if job has been aborted */
316 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
317 job
= dd_data
->set_job
;
319 bsg_reply
= job
->reply
;
320 /* Prevent timeout handling from trying to abort job */
323 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
325 /* Close the timeout handler abort window */
326 spin_lock_irqsave(&phba
->hbalock
, flags
);
327 cmdiocbq
->iocb_flag
&= ~LPFC_IO_CMD_OUTSTANDING
;
328 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
330 iocb
= &dd_data
->context_un
.iocb
;
333 cmp
= cmdiocbq
->context2
;
334 bmp
= cmdiocbq
->context3
;
335 rsp
= &rspiocbq
->iocb
;
337 /* Copy the completed data or set the error status */
340 if (rsp
->ulpStatus
) {
341 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
342 switch (rsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) {
343 case IOERR_SEQUENCE_TIMEOUT
:
346 case IOERR_INVALID_RPI
:
357 rsp_size
= rsp
->un
.genreq64
.bdl
.bdeSize
;
358 bsg_reply
->reply_payload_rcv_len
=
359 lpfc_bsg_copy_data(rmp
, &job
->reply_payload
,
364 lpfc_free_bsg_buffers(phba
, cmp
);
365 lpfc_free_bsg_buffers(phba
, rmp
);
366 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
368 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
372 /* Complete the job if the job is still active */
375 bsg_reply
->result
= rc
;
376 bsg_job_done(job
, bsg_reply
->result
,
377 bsg_reply
->reply_payload_rcv_len
);
383 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
384 * @job: fc_bsg_job to handle
387 lpfc_bsg_send_mgmt_cmd(struct bsg_job
*job
)
389 struct lpfc_vport
*vport
= shost_priv(fc_bsg_to_shost(job
));
390 struct lpfc_hba
*phba
= vport
->phba
;
391 struct lpfc_rport_data
*rdata
= fc_bsg_to_rport(job
)->dd_data
;
392 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
393 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
394 struct ulp_bde64
*bpl
= NULL
;
396 struct lpfc_iocbq
*cmdiocbq
= NULL
;
398 struct lpfc_dmabuf
*bmp
= NULL
, *cmp
= NULL
, *rmp
= NULL
;
401 struct bsg_job_data
*dd_data
;
407 /* in case no data is transferred */
408 bsg_reply
->reply_payload_rcv_len
= 0;
410 /* allocate our bsg tracking structure */
411 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
413 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
414 "2733 Failed allocation of dd_data\n");
419 if (!lpfc_nlp_get(ndlp
)) {
424 if (ndlp
->nlp_flag
& NLP_ELS_SND_MASK
) {
429 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
435 cmd
= &cmdiocbq
->iocb
;
437 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
442 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
448 INIT_LIST_HEAD(&bmp
->list
);
450 bpl
= (struct ulp_bde64
*) bmp
->virt
;
451 request_nseg
= LPFC_BPL_SIZE
/sizeof(struct ulp_bde64
);
452 cmp
= lpfc_alloc_bsg_buffers(phba
, job
->request_payload
.payload_len
,
453 1, bpl
, &request_nseg
);
458 lpfc_bsg_copy_data(cmp
, &job
->request_payload
,
459 job
->request_payload
.payload_len
, 1);
462 reply_nseg
= LPFC_BPL_SIZE
/sizeof(struct ulp_bde64
) - request_nseg
;
463 rmp
= lpfc_alloc_bsg_buffers(phba
, job
->reply_payload
.payload_len
, 0,
470 cmd
->un
.genreq64
.bdl
.ulpIoTag32
= 0;
471 cmd
->un
.genreq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
472 cmd
->un
.genreq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
473 cmd
->un
.genreq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
474 cmd
->un
.genreq64
.bdl
.bdeSize
=
475 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
476 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CR
;
477 cmd
->un
.genreq64
.w5
.hcsw
.Fctl
= (SI
| LA
);
478 cmd
->un
.genreq64
.w5
.hcsw
.Dfctl
= 0;
479 cmd
->un
.genreq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CTL
;
480 cmd
->un
.genreq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
481 cmd
->ulpBdeCount
= 1;
483 cmd
->ulpClass
= CLASS3
;
484 cmd
->ulpContext
= ndlp
->nlp_rpi
;
485 if (phba
->sli_rev
== LPFC_SLI_REV4
)
486 cmd
->ulpContext
= phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
487 cmd
->ulpOwner
= OWN_CHIP
;
488 cmdiocbq
->vport
= phba
->pport
;
489 cmdiocbq
->context3
= bmp
;
490 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
491 timeout
= phba
->fc_ratov
* 2;
492 cmd
->ulpTimeout
= timeout
;
494 cmdiocbq
->iocb_cmpl
= lpfc_bsg_send_mgmt_cmd_cmp
;
495 cmdiocbq
->context1
= dd_data
;
496 cmdiocbq
->context2
= cmp
;
497 cmdiocbq
->context3
= bmp
;
498 cmdiocbq
->context_un
.ndlp
= ndlp
;
499 dd_data
->type
= TYPE_IOCB
;
500 dd_data
->set_job
= job
;
501 dd_data
->context_un
.iocb
.cmdiocbq
= cmdiocbq
;
502 dd_data
->context_un
.iocb
.ndlp
= ndlp
;
503 dd_data
->context_un
.iocb
.rmp
= rmp
;
504 job
->dd_data
= dd_data
;
506 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
507 if (lpfc_readl(phba
->HCregaddr
, &creg_val
)) {
511 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
512 writel(creg_val
, phba
->HCregaddr
);
513 readl(phba
->HCregaddr
); /* flush */
516 iocb_stat
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
, 0);
518 if (iocb_stat
== IOCB_SUCCESS
) {
519 spin_lock_irqsave(&phba
->hbalock
, flags
);
520 /* make sure the I/O had not been completed yet */
521 if (cmdiocbq
->iocb_flag
& LPFC_IO_LIBDFC
) {
522 /* open up abort window to timeout handler */
523 cmdiocbq
->iocb_flag
|= LPFC_IO_CMD_OUTSTANDING
;
525 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
526 return 0; /* done for now */
527 } else if (iocb_stat
== IOCB_BUSY
) {
533 /* iocb failed so cleanup */
537 lpfc_free_bsg_buffers(phba
, rmp
);
539 lpfc_free_bsg_buffers(phba
, cmp
);
542 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
545 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
551 /* make error code available to userspace */
552 bsg_reply
->result
= rc
;
558 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
559 * @phba: Pointer to HBA context object.
560 * @cmdiocbq: Pointer to command iocb.
561 * @rspiocbq: Pointer to response iocb.
563 * This function is the completion handler for iocbs issued using
564 * lpfc_bsg_rport_els_cmp function. This function is called by the
565 * ring event handler function without any lock held. This function
566 * can be called from both worker thread context and interrupt
567 * context. This function also can be called from other thread which
568 * cleans up the SLI layer objects.
569 * This function copies the contents of the response iocb to the
570 * response iocb memory object provided by the caller of
571 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
572 * sleeps for the iocb completion.
575 lpfc_bsg_rport_els_cmp(struct lpfc_hba
*phba
,
576 struct lpfc_iocbq
*cmdiocbq
,
577 struct lpfc_iocbq
*rspiocbq
)
579 struct bsg_job_data
*dd_data
;
581 struct fc_bsg_reply
*bsg_reply
;
583 struct lpfc_nodelist
*ndlp
;
584 struct lpfc_dmabuf
*pcmd
= NULL
, *prsp
= NULL
;
585 struct fc_bsg_ctels_reply
*els_reply
;
588 unsigned int rsp_size
;
591 dd_data
= cmdiocbq
->context1
;
592 ndlp
= dd_data
->context_un
.iocb
.ndlp
;
593 cmdiocbq
->context1
= ndlp
;
595 /* Determine if job has been aborted */
596 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
597 job
= dd_data
->set_job
;
599 bsg_reply
= job
->reply
;
600 /* Prevent timeout handling from trying to abort job */
603 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
605 /* Close the timeout handler abort window */
606 spin_lock_irqsave(&phba
->hbalock
, flags
);
607 cmdiocbq
->iocb_flag
&= ~LPFC_IO_CMD_OUTSTANDING
;
608 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
610 rsp
= &rspiocbq
->iocb
;
611 pcmd
= (struct lpfc_dmabuf
*)cmdiocbq
->context2
;
612 prsp
= (struct lpfc_dmabuf
*)pcmd
->list
.next
;
614 /* Copy the completed job data or determine the job status if job is
619 if (rsp
->ulpStatus
== IOSTAT_SUCCESS
) {
620 rsp_size
= rsp
->un
.elsreq64
.bdl
.bdeSize
;
621 bsg_reply
->reply_payload_rcv_len
=
622 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
623 job
->reply_payload
.sg_cnt
,
626 } else if (rsp
->ulpStatus
== IOSTAT_LS_RJT
) {
627 bsg_reply
->reply_payload_rcv_len
=
628 sizeof(struct fc_bsg_ctels_reply
);
629 /* LS_RJT data returned in word 4 */
630 rjt_data
= (uint8_t *)&rsp
->un
.ulpWord
[4];
631 els_reply
= &bsg_reply
->reply_data
.ctels_reply
;
632 els_reply
->status
= FC_CTELS_STATUS_REJECT
;
633 els_reply
->rjt_data
.action
= rjt_data
[3];
634 els_reply
->rjt_data
.reason_code
= rjt_data
[2];
635 els_reply
->rjt_data
.reason_explanation
= rjt_data
[1];
636 els_reply
->rjt_data
.vendor_unique
= rjt_data
[0];
643 lpfc_els_free_iocb(phba
, cmdiocbq
);
646 /* Complete the job if the job is still active */
649 bsg_reply
->result
= rc
;
650 bsg_job_done(job
, bsg_reply
->result
,
651 bsg_reply
->reply_payload_rcv_len
);
657 * lpfc_bsg_rport_els - send an ELS command from a bsg request
658 * @job: fc_bsg_job to handle
661 lpfc_bsg_rport_els(struct bsg_job
*job
)
663 struct lpfc_vport
*vport
= shost_priv(fc_bsg_to_shost(job
));
664 struct lpfc_hba
*phba
= vport
->phba
;
665 struct lpfc_rport_data
*rdata
= fc_bsg_to_rport(job
)->dd_data
;
666 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
667 struct fc_bsg_request
*bsg_request
= job
->request
;
668 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
671 struct lpfc_iocbq
*cmdiocbq
;
673 struct bsg_job_data
*dd_data
;
678 /* in case no data is transferred */
679 bsg_reply
->reply_payload_rcv_len
= 0;
681 /* verify the els command is not greater than the
682 * maximum ELS transfer size.
685 if (job
->request_payload
.payload_len
> FCELSSIZE
) {
690 /* allocate our bsg tracking structure */
691 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
693 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
694 "2735 Failed allocation of dd_data\n");
699 elscmd
= bsg_request
->rqst_data
.r_els
.els_code
;
700 cmdsize
= job
->request_payload
.payload_len
;
702 if (!lpfc_nlp_get(ndlp
)) {
707 /* We will use the allocated dma buffers by prep els iocb for command
708 * and response to ensure if the job times out and the request is freed,
709 * we won't be dma into memory that is no longer allocated to for the
713 cmdiocbq
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, 0, ndlp
,
714 ndlp
->nlp_DID
, elscmd
);
722 /* Transfer the request payload to allocated command dma buffer */
724 sg_copy_to_buffer(job
->request_payload
.sg_list
,
725 job
->request_payload
.sg_cnt
,
726 ((struct lpfc_dmabuf
*)cmdiocbq
->context2
)->virt
,
729 if (phba
->sli_rev
== LPFC_SLI_REV4
)
730 cmdiocbq
->iocb
.ulpContext
= phba
->sli4_hba
.rpi_ids
[rpi
];
732 cmdiocbq
->iocb
.ulpContext
= rpi
;
733 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
734 cmdiocbq
->context1
= dd_data
;
735 cmdiocbq
->context_un
.ndlp
= ndlp
;
736 cmdiocbq
->iocb_cmpl
= lpfc_bsg_rport_els_cmp
;
737 dd_data
->type
= TYPE_IOCB
;
738 dd_data
->set_job
= job
;
739 dd_data
->context_un
.iocb
.cmdiocbq
= cmdiocbq
;
740 dd_data
->context_un
.iocb
.ndlp
= ndlp
;
741 dd_data
->context_un
.iocb
.rmp
= NULL
;
742 job
->dd_data
= dd_data
;
744 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
745 if (lpfc_readl(phba
->HCregaddr
, &creg_val
)) {
749 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
750 writel(creg_val
, phba
->HCregaddr
);
751 readl(phba
->HCregaddr
); /* flush */
754 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
, 0);
756 if (rc
== IOCB_SUCCESS
) {
757 spin_lock_irqsave(&phba
->hbalock
, flags
);
758 /* make sure the I/O had not been completed/released */
759 if (cmdiocbq
->iocb_flag
& LPFC_IO_LIBDFC
) {
760 /* open up abort window to timeout handler */
761 cmdiocbq
->iocb_flag
|= LPFC_IO_CMD_OUTSTANDING
;
763 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
764 return 0; /* done for now */
765 } else if (rc
== IOCB_BUSY
) {
771 /* iocb failed so cleanup */
775 cmdiocbq
->context1
= ndlp
;
776 lpfc_els_free_iocb(phba
, cmdiocbq
);
785 /* make error code available to userspace */
786 bsg_reply
->result
= rc
;
792 * lpfc_bsg_event_free - frees an allocated event structure
793 * @kref: Pointer to a kref.
795 * Called from kref_put. Back cast the kref into an event structure address.
796 * Free any events to get, delete associated nodes, free any events to see,
797 * free any data then free the event itself.
800 lpfc_bsg_event_free(struct kref
*kref
)
802 struct lpfc_bsg_event
*evt
= container_of(kref
, struct lpfc_bsg_event
,
804 struct event_data
*ed
;
806 list_del(&evt
->node
);
808 while (!list_empty(&evt
->events_to_get
)) {
809 ed
= list_entry(evt
->events_to_get
.next
, typeof(*ed
), node
);
815 while (!list_empty(&evt
->events_to_see
)) {
816 ed
= list_entry(evt
->events_to_see
.next
, typeof(*ed
), node
);
827 * lpfc_bsg_event_ref - increments the kref for an event
828 * @evt: Pointer to an event structure.
831 lpfc_bsg_event_ref(struct lpfc_bsg_event
*evt
)
833 kref_get(&evt
->kref
);
837 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
838 * @evt: Pointer to an event structure.
841 lpfc_bsg_event_unref(struct lpfc_bsg_event
*evt
)
843 kref_put(&evt
->kref
, lpfc_bsg_event_free
);
847 * lpfc_bsg_event_new - allocate and initialize a event structure
848 * @ev_mask: Mask of events.
849 * @ev_reg_id: Event reg id.
850 * @ev_req_id: Event request id.
852 static struct lpfc_bsg_event
*
853 lpfc_bsg_event_new(uint32_t ev_mask
, int ev_reg_id
, uint32_t ev_req_id
)
855 struct lpfc_bsg_event
*evt
= kzalloc(sizeof(*evt
), GFP_KERNEL
);
860 INIT_LIST_HEAD(&evt
->events_to_get
);
861 INIT_LIST_HEAD(&evt
->events_to_see
);
862 evt
->type_mask
= ev_mask
;
863 evt
->req_id
= ev_req_id
;
864 evt
->reg_id
= ev_reg_id
;
865 evt
->wait_time_stamp
= jiffies
;
867 init_waitqueue_head(&evt
->wq
);
868 kref_init(&evt
->kref
);
873 * diag_cmd_data_free - Frees an lpfc dma buffer extension
874 * @phba: Pointer to HBA context object.
875 * @mlist: Pointer to an lpfc dma buffer extension.
878 diag_cmd_data_free(struct lpfc_hba
*phba
, struct lpfc_dmabufext
*mlist
)
880 struct lpfc_dmabufext
*mlast
;
881 struct pci_dev
*pcidev
;
882 struct list_head head
, *curr
, *next
;
884 if ((!mlist
) || (!lpfc_is_link_up(phba
) &&
885 (phba
->link_flag
& LS_LOOPBACK_MODE
))) {
889 pcidev
= phba
->pcidev
;
890 list_add_tail(&head
, &mlist
->dma
.list
);
892 list_for_each_safe(curr
, next
, &head
) {
893 mlast
= list_entry(curr
, struct lpfc_dmabufext
, dma
.list
);
895 dma_free_coherent(&pcidev
->dev
,
905 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
910 * This function is called when an unsolicited CT command is received. It
911 * forwards the event to any processes registered to receive CT events.
914 lpfc_bsg_ct_unsol_event(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
915 struct lpfc_iocbq
*piocbq
)
917 uint32_t evt_req_id
= 0;
919 struct lpfc_dmabuf
*dmabuf
= NULL
;
920 struct lpfc_bsg_event
*evt
;
921 struct event_data
*evt_dat
= NULL
;
922 struct lpfc_iocbq
*iocbq
;
924 struct list_head head
;
925 struct ulp_bde64
*bde
;
928 struct lpfc_dmabuf
*bdeBuf1
= piocbq
->context2
;
929 struct lpfc_dmabuf
*bdeBuf2
= piocbq
->context3
;
930 struct lpfc_hbq_entry
*hbqe
;
931 struct lpfc_sli_ct_request
*ct_req
;
932 struct bsg_job
*job
= NULL
;
933 struct fc_bsg_reply
*bsg_reply
;
934 struct bsg_job_data
*dd_data
= NULL
;
938 INIT_LIST_HEAD(&head
);
939 list_add_tail(&head
, &piocbq
->list
);
941 if (piocbq
->iocb
.ulpBdeCount
== 0 ||
942 piocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
== 0)
943 goto error_ct_unsol_exit
;
945 if (phba
->link_state
== LPFC_HBA_ERROR
||
946 (!(phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)))
947 goto error_ct_unsol_exit
;
949 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)
952 dma_addr
= getPaddr(piocbq
->iocb
.un
.cont64
[0].addrHigh
,
953 piocbq
->iocb
.un
.cont64
[0].addrLow
);
954 dmabuf
= lpfc_sli_ringpostbuf_get(phba
, pring
, dma_addr
);
957 goto error_ct_unsol_exit
;
958 ct_req
= (struct lpfc_sli_ct_request
*)dmabuf
->virt
;
959 evt_req_id
= ct_req
->FsType
;
960 cmd
= ct_req
->CommandResponse
.bits
.CmdRsp
;
961 if (!(phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
))
962 lpfc_sli_ringpostbuf_put(phba
, pring
, dmabuf
);
964 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
965 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
966 if (!(evt
->type_mask
& FC_REG_CT_EVENT
) ||
967 evt
->req_id
!= evt_req_id
)
970 lpfc_bsg_event_ref(evt
);
971 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
972 evt_dat
= kzalloc(sizeof(*evt_dat
), GFP_KERNEL
);
973 if (evt_dat
== NULL
) {
974 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
975 lpfc_bsg_event_unref(evt
);
976 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
977 "2614 Memory allocation failed for "
982 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
983 /* take accumulated byte count from the last iocbq */
984 iocbq
= list_entry(head
.prev
, typeof(*iocbq
), list
);
985 evt_dat
->len
= iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
;
987 list_for_each_entry(iocbq
, &head
, list
) {
988 for (i
= 0; i
< iocbq
->iocb
.ulpBdeCount
; i
++)
990 iocbq
->iocb
.un
.cont64
[i
].tus
.f
.bdeSize
;
994 evt_dat
->data
= kzalloc(evt_dat
->len
, GFP_KERNEL
);
995 if (evt_dat
->data
== NULL
) {
996 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
997 "2615 Memory allocation failed for "
998 "CT event data, size %d\n",
1001 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1002 lpfc_bsg_event_unref(evt
);
1003 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1004 goto error_ct_unsol_exit
;
1007 list_for_each_entry(iocbq
, &head
, list
) {
1009 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
1010 bdeBuf1
= iocbq
->context2
;
1011 bdeBuf2
= iocbq
->context3
;
1013 for (i
= 0; i
< iocbq
->iocb
.ulpBdeCount
; i
++) {
1014 if (phba
->sli3_options
&
1015 LPFC_SLI3_HBQ_ENABLED
) {
1017 hbqe
= (struct lpfc_hbq_entry
*)
1018 &iocbq
->iocb
.un
.ulpWord
[0];
1019 size
= hbqe
->bde
.tus
.f
.bdeSize
;
1021 } else if (i
== 1) {
1022 hbqe
= (struct lpfc_hbq_entry
*)
1023 &iocbq
->iocb
.unsli3
.
1025 size
= hbqe
->bde
.tus
.f
.bdeSize
;
1028 if ((offset
+ size
) > evt_dat
->len
)
1029 size
= evt_dat
->len
- offset
;
1031 size
= iocbq
->iocb
.un
.cont64
[i
].
1033 bde
= &iocbq
->iocb
.un
.cont64
[i
];
1034 dma_addr
= getPaddr(bde
->addrHigh
,
1036 dmabuf
= lpfc_sli_ringpostbuf_get(phba
,
1040 lpfc_printf_log(phba
, KERN_ERR
,
1041 LOG_LIBDFC
, "2616 No dmabuf "
1042 "found for iocbq 0x%p\n",
1044 kfree(evt_dat
->data
);
1046 spin_lock_irqsave(&phba
->ct_ev_lock
,
1048 lpfc_bsg_event_unref(evt
);
1049 spin_unlock_irqrestore(
1050 &phba
->ct_ev_lock
, flags
);
1051 goto error_ct_unsol_exit
;
1053 memcpy((char *)(evt_dat
->data
) + offset
,
1054 dmabuf
->virt
, size
);
1056 if (evt_req_id
!= SLI_CT_ELX_LOOPBACK
&&
1057 !(phba
->sli3_options
&
1058 LPFC_SLI3_HBQ_ENABLED
)) {
1059 lpfc_sli_ringpostbuf_put(phba
, pring
,
1063 case ELX_LOOPBACK_DATA
:
1066 diag_cmd_data_free(phba
,
1067 (struct lpfc_dmabufext
1070 case ELX_LOOPBACK_XRI_SETUP
:
1071 if ((phba
->sli_rev
==
1073 (phba
->sli3_options
&
1074 LPFC_SLI3_HBQ_ENABLED
1076 lpfc_in_buf_free(phba
,
1079 lpfc_post_buffer(phba
,
1085 if (!(phba
->sli3_options
&
1086 LPFC_SLI3_HBQ_ENABLED
))
1087 lpfc_post_buffer(phba
,
1096 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1097 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
1098 evt_dat
->immed_dat
= phba
->ctx_idx
;
1099 phba
->ctx_idx
= (phba
->ctx_idx
+ 1) % LPFC_CT_CTX_MAX
;
1100 /* Provide warning for over-run of the ct_ctx array */
1101 if (phba
->ct_ctx
[evt_dat
->immed_dat
].valid
==
1103 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
,
1104 "2717 CT context array entry "
1105 "[%d] over-run: oxid:x%x, "
1106 "sid:x%x\n", phba
->ctx_idx
,
1108 evt_dat
->immed_dat
].oxid
,
1110 evt_dat
->immed_dat
].SID
);
1111 phba
->ct_ctx
[evt_dat
->immed_dat
].rxid
=
1112 piocbq
->iocb
.ulpContext
;
1113 phba
->ct_ctx
[evt_dat
->immed_dat
].oxid
=
1114 piocbq
->iocb
.unsli3
.rcvsli3
.ox_id
;
1115 phba
->ct_ctx
[evt_dat
->immed_dat
].SID
=
1116 piocbq
->iocb
.un
.rcvels
.remoteID
;
1117 phba
->ct_ctx
[evt_dat
->immed_dat
].valid
= UNSOL_VALID
;
1119 evt_dat
->immed_dat
= piocbq
->iocb
.ulpContext
;
1121 evt_dat
->type
= FC_REG_CT_EVENT
;
1122 list_add(&evt_dat
->node
, &evt
->events_to_see
);
1123 if (evt_req_id
== SLI_CT_ELX_LOOPBACK
) {
1124 wake_up_interruptible(&evt
->wq
);
1125 lpfc_bsg_event_unref(evt
);
1129 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
1131 dd_data
= (struct bsg_job_data
*)evt
->dd_data
;
1132 job
= dd_data
->set_job
;
1133 dd_data
->set_job
= NULL
;
1134 lpfc_bsg_event_unref(evt
);
1136 bsg_reply
= job
->reply
;
1137 bsg_reply
->reply_payload_rcv_len
= size
;
1138 /* make error code available to userspace */
1139 bsg_reply
->result
= 0;
1140 job
->dd_data
= NULL
;
1141 /* complete the job back to userspace */
1142 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1143 bsg_job_done(job
, bsg_reply
->result
,
1144 bsg_reply
->reply_payload_rcv_len
);
1145 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1148 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1150 error_ct_unsol_exit
:
1151 if (!list_empty(&head
))
1153 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
1154 (evt_req_id
== SLI_CT_ELX_LOOPBACK
))
1160 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
1161 * @phba: Pointer to HBA context object.
1162 * @dmabuf: pointer to a dmabuf that describes the FC sequence
1164 * This function handles abort to the CT command toward management plane
1167 * If the pending context of a CT command to management plane present, clears
1168 * such context and returns 1 for handled; otherwise, it returns 0 indicating
1169 * no context exists.
1172 lpfc_bsg_ct_unsol_abort(struct lpfc_hba
*phba
, struct hbq_dmabuf
*dmabuf
)
1174 struct fc_frame_header fc_hdr
;
1175 struct fc_frame_header
*fc_hdr_ptr
= &fc_hdr
;
1176 int ctx_idx
, handled
= 0;
1177 uint16_t oxid
, rxid
;
1180 memcpy(fc_hdr_ptr
, dmabuf
->hbuf
.virt
, sizeof(struct fc_frame_header
));
1181 sid
= sli4_sid_from_fc_hdr(fc_hdr_ptr
);
1182 oxid
= be16_to_cpu(fc_hdr_ptr
->fh_ox_id
);
1183 rxid
= be16_to_cpu(fc_hdr_ptr
->fh_rx_id
);
1185 for (ctx_idx
= 0; ctx_idx
< LPFC_CT_CTX_MAX
; ctx_idx
++) {
1186 if (phba
->ct_ctx
[ctx_idx
].valid
!= UNSOL_VALID
)
1188 if (phba
->ct_ctx
[ctx_idx
].rxid
!= rxid
)
1190 if (phba
->ct_ctx
[ctx_idx
].oxid
!= oxid
)
1192 if (phba
->ct_ctx
[ctx_idx
].SID
!= sid
)
1194 phba
->ct_ctx
[ctx_idx
].valid
= UNSOL_INVALID
;
1201 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1202 * @job: SET_EVENT fc_bsg_job
1205 lpfc_bsg_hba_set_event(struct bsg_job
*job
)
1207 struct lpfc_vport
*vport
= shost_priv(fc_bsg_to_shost(job
));
1208 struct lpfc_hba
*phba
= vport
->phba
;
1209 struct fc_bsg_request
*bsg_request
= job
->request
;
1210 struct set_ct_event
*event_req
;
1211 struct lpfc_bsg_event
*evt
;
1213 struct bsg_job_data
*dd_data
= NULL
;
1215 unsigned long flags
;
1217 if (job
->request_len
<
1218 sizeof(struct fc_bsg_request
) + sizeof(struct set_ct_event
)) {
1219 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1220 "2612 Received SET_CT_EVENT below minimum "
1226 event_req
= (struct set_ct_event
*)
1227 bsg_request
->rqst_data
.h_vendor
.vendor_cmd
;
1228 ev_mask
= ((uint32_t)(unsigned long)event_req
->type_mask
&
1230 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1231 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
1232 if (evt
->reg_id
== event_req
->ev_reg_id
) {
1233 lpfc_bsg_event_ref(evt
);
1234 evt
->wait_time_stamp
= jiffies
;
1235 dd_data
= (struct bsg_job_data
*)evt
->dd_data
;
1239 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1241 if (&evt
->node
== &phba
->ct_ev_waiters
) {
1242 /* no event waiting struct yet - first call */
1243 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
1244 if (dd_data
== NULL
) {
1245 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1246 "2734 Failed allocation of dd_data\n");
1250 evt
= lpfc_bsg_event_new(ev_mask
, event_req
->ev_reg_id
,
1251 event_req
->ev_req_id
);
1253 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1254 "2617 Failed allocation of event "
1259 dd_data
->type
= TYPE_EVT
;
1260 dd_data
->set_job
= NULL
;
1261 dd_data
->context_un
.evt
= evt
;
1262 evt
->dd_data
= (void *)dd_data
;
1263 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1264 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
1265 lpfc_bsg_event_ref(evt
);
1266 evt
->wait_time_stamp
= jiffies
;
1267 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1270 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1272 dd_data
->set_job
= job
; /* for unsolicited command */
1273 job
->dd_data
= dd_data
; /* for fc transport timeout callback*/
1274 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1275 return 0; /* call job done later */
1278 if (dd_data
!= NULL
)
1281 job
->dd_data
= NULL
;
1286 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1287 * @job: GET_EVENT fc_bsg_job
1290 lpfc_bsg_hba_get_event(struct bsg_job
*job
)
1292 struct lpfc_vport
*vport
= shost_priv(fc_bsg_to_shost(job
));
1293 struct lpfc_hba
*phba
= vport
->phba
;
1294 struct fc_bsg_request
*bsg_request
= job
->request
;
1295 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
1296 struct get_ct_event
*event_req
;
1297 struct get_ct_event_reply
*event_reply
;
1298 struct lpfc_bsg_event
*evt
, *evt_next
;
1299 struct event_data
*evt_dat
= NULL
;
1300 unsigned long flags
;
1303 if (job
->request_len
<
1304 sizeof(struct fc_bsg_request
) + sizeof(struct get_ct_event
)) {
1305 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1306 "2613 Received GET_CT_EVENT request below "
1312 event_req
= (struct get_ct_event
*)
1313 bsg_request
->rqst_data
.h_vendor
.vendor_cmd
;
1315 event_reply
= (struct get_ct_event_reply
*)
1316 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
;
1317 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1318 list_for_each_entry_safe(evt
, evt_next
, &phba
->ct_ev_waiters
, node
) {
1319 if (evt
->reg_id
== event_req
->ev_reg_id
) {
1320 if (list_empty(&evt
->events_to_get
))
1322 lpfc_bsg_event_ref(evt
);
1323 evt
->wait_time_stamp
= jiffies
;
1324 evt_dat
= list_entry(evt
->events_to_get
.prev
,
1325 struct event_data
, node
);
1326 list_del(&evt_dat
->node
);
1330 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1332 /* The app may continue to ask for event data until it gets
1333 * an error indicating that there isn't anymore
1335 if (evt_dat
== NULL
) {
1336 bsg_reply
->reply_payload_rcv_len
= 0;
1341 if (evt_dat
->len
> job
->request_payload
.payload_len
) {
1342 evt_dat
->len
= job
->request_payload
.payload_len
;
1343 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1344 "2618 Truncated event data at %d "
1346 job
->request_payload
.payload_len
);
1349 event_reply
->type
= evt_dat
->type
;
1350 event_reply
->immed_data
= evt_dat
->immed_dat
;
1351 if (evt_dat
->len
> 0)
1352 bsg_reply
->reply_payload_rcv_len
=
1353 sg_copy_from_buffer(job
->request_payload
.sg_list
,
1354 job
->request_payload
.sg_cnt
,
1355 evt_dat
->data
, evt_dat
->len
);
1357 bsg_reply
->reply_payload_rcv_len
= 0;
1360 kfree(evt_dat
->data
);
1364 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1365 lpfc_bsg_event_unref(evt
);
1366 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1367 job
->dd_data
= NULL
;
1368 bsg_reply
->result
= 0;
1369 bsg_job_done(job
, bsg_reply
->result
,
1370 bsg_reply
->reply_payload_rcv_len
);
1374 job
->dd_data
= NULL
;
1375 bsg_reply
->result
= rc
;
1380 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1381 * @phba: Pointer to HBA context object.
1382 * @cmdiocbq: Pointer to command iocb.
1383 * @rspiocbq: Pointer to response iocb.
1385 * This function is the completion handler for iocbs issued using
1386 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1387 * ring event handler function without any lock held. This function
1388 * can be called from both worker thread context and interrupt
1389 * context. This function also can be called from other thread which
1390 * cleans up the SLI layer objects.
1391 * This function copy the contents of the response iocb to the
1392 * response iocb memory object provided by the caller of
1393 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1394 * sleeps for the iocb completion.
1397 lpfc_issue_ct_rsp_cmp(struct lpfc_hba
*phba
,
1398 struct lpfc_iocbq
*cmdiocbq
,
1399 struct lpfc_iocbq
*rspiocbq
)
1401 struct bsg_job_data
*dd_data
;
1402 struct bsg_job
*job
;
1403 struct fc_bsg_reply
*bsg_reply
;
1405 struct lpfc_dmabuf
*bmp
, *cmp
;
1406 struct lpfc_nodelist
*ndlp
;
1407 unsigned long flags
;
1410 dd_data
= cmdiocbq
->context1
;
1412 /* Determine if job has been aborted */
1413 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1414 job
= dd_data
->set_job
;
1416 /* Prevent timeout handling from trying to abort job */
1417 job
->dd_data
= NULL
;
1419 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1421 /* Close the timeout handler abort window */
1422 spin_lock_irqsave(&phba
->hbalock
, flags
);
1423 cmdiocbq
->iocb_flag
&= ~LPFC_IO_CMD_OUTSTANDING
;
1424 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1426 ndlp
= dd_data
->context_un
.iocb
.ndlp
;
1427 cmp
= cmdiocbq
->context2
;
1428 bmp
= cmdiocbq
->context3
;
1429 rsp
= &rspiocbq
->iocb
;
1431 /* Copy the completed job data or set the error status */
1434 bsg_reply
= job
->reply
;
1435 if (rsp
->ulpStatus
) {
1436 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
1437 switch (rsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) {
1438 case IOERR_SEQUENCE_TIMEOUT
:
1441 case IOERR_INVALID_RPI
:
1452 bsg_reply
->reply_payload_rcv_len
= 0;
1456 lpfc_free_bsg_buffers(phba
, cmp
);
1457 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
1459 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
1463 /* Complete the job if the job is still active */
1466 bsg_reply
->result
= rc
;
1467 bsg_job_done(job
, bsg_reply
->result
,
1468 bsg_reply
->reply_payload_rcv_len
);
1474 * lpfc_issue_ct_rsp - issue a ct response
1475 * @phba: Pointer to HBA context object.
1476 * @job: Pointer to the job object.
1477 * @tag: tag index value into the ports context exchange array.
1478 * @bmp: Pointer to a dma buffer descriptor.
1479 * @num_entry: Number of enties in the bde.
1482 lpfc_issue_ct_rsp(struct lpfc_hba
*phba
, struct bsg_job
*job
, uint32_t tag
,
1483 struct lpfc_dmabuf
*cmp
, struct lpfc_dmabuf
*bmp
,
1487 struct lpfc_iocbq
*ctiocb
= NULL
;
1489 struct lpfc_nodelist
*ndlp
= NULL
;
1490 struct bsg_job_data
*dd_data
;
1491 unsigned long flags
;
1494 /* allocate our bsg tracking structure */
1495 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
1497 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1498 "2736 Failed allocation of dd_data\n");
1503 /* Allocate buffer for command iocb */
1504 ctiocb
= lpfc_sli_get_iocbq(phba
);
1510 icmd
= &ctiocb
->iocb
;
1511 icmd
->un
.xseq64
.bdl
.ulpIoTag32
= 0;
1512 icmd
->un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
1513 icmd
->un
.xseq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
1514 icmd
->un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
1515 icmd
->un
.xseq64
.bdl
.bdeSize
= (num_entry
* sizeof(struct ulp_bde64
));
1516 icmd
->un
.xseq64
.w5
.hcsw
.Fctl
= (LS
| LA
);
1517 icmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
1518 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_SOL_CTL
;
1519 icmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
1521 /* Fill in rest of iocb */
1522 icmd
->ulpCommand
= CMD_XMIT_SEQUENCE64_CX
;
1523 icmd
->ulpBdeCount
= 1;
1525 icmd
->ulpClass
= CLASS3
;
1526 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
1527 /* Do not issue unsol response if oxid not marked as valid */
1528 if (phba
->ct_ctx
[tag
].valid
!= UNSOL_VALID
) {
1530 goto issue_ct_rsp_exit
;
1532 icmd
->ulpContext
= phba
->ct_ctx
[tag
].rxid
;
1533 icmd
->unsli3
.rcvsli3
.ox_id
= phba
->ct_ctx
[tag
].oxid
;
1534 ndlp
= lpfc_findnode_did(phba
->pport
, phba
->ct_ctx
[tag
].SID
);
1536 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
,
1537 "2721 ndlp null for oxid %x SID %x\n",
1539 phba
->ct_ctx
[tag
].SID
);
1541 goto issue_ct_rsp_exit
;
1544 /* Check if the ndlp is active */
1545 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
1547 goto issue_ct_rsp_exit
;
1550 /* get a refernece count so the ndlp doesn't go away while
1553 if (!lpfc_nlp_get(ndlp
)) {
1555 goto issue_ct_rsp_exit
;
1558 icmd
->un
.ulpWord
[3] =
1559 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
1561 /* The exchange is done, mark the entry as invalid */
1562 phba
->ct_ctx
[tag
].valid
= UNSOL_INVALID
;
1564 icmd
->ulpContext
= (ushort
) tag
;
1566 icmd
->ulpTimeout
= phba
->fc_ratov
* 2;
1568 /* Xmit CT response on exchange <xid> */
1569 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
1570 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1571 icmd
->ulpContext
, icmd
->ulpIoTag
, tag
, phba
->link_state
);
1573 ctiocb
->iocb_cmpl
= NULL
;
1574 ctiocb
->iocb_flag
|= LPFC_IO_LIBDFC
;
1575 ctiocb
->vport
= phba
->pport
;
1576 ctiocb
->context1
= dd_data
;
1577 ctiocb
->context2
= cmp
;
1578 ctiocb
->context3
= bmp
;
1579 ctiocb
->context_un
.ndlp
= ndlp
;
1580 ctiocb
->iocb_cmpl
= lpfc_issue_ct_rsp_cmp
;
1582 dd_data
->type
= TYPE_IOCB
;
1583 dd_data
->set_job
= job
;
1584 dd_data
->context_un
.iocb
.cmdiocbq
= ctiocb
;
1585 dd_data
->context_un
.iocb
.ndlp
= ndlp
;
1586 dd_data
->context_un
.iocb
.rmp
= NULL
;
1587 job
->dd_data
= dd_data
;
1589 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
1590 if (lpfc_readl(phba
->HCregaddr
, &creg_val
)) {
1592 goto issue_ct_rsp_exit
;
1594 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
1595 writel(creg_val
, phba
->HCregaddr
);
1596 readl(phba
->HCregaddr
); /* flush */
1599 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, ctiocb
, 0);
1601 if (rc
== IOCB_SUCCESS
) {
1602 spin_lock_irqsave(&phba
->hbalock
, flags
);
1603 /* make sure the I/O had not been completed/released */
1604 if (ctiocb
->iocb_flag
& LPFC_IO_LIBDFC
) {
1605 /* open up abort window to timeout handler */
1606 ctiocb
->iocb_flag
|= LPFC_IO_CMD_OUTSTANDING
;
1608 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1609 return 0; /* done for now */
1612 /* iocb failed so cleanup */
1613 job
->dd_data
= NULL
;
1616 lpfc_sli_release_iocbq(phba
, ctiocb
);
1624 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1625 * @job: SEND_MGMT_RESP fc_bsg_job
1628 lpfc_bsg_send_mgmt_rsp(struct bsg_job
*job
)
1630 struct lpfc_vport
*vport
= shost_priv(fc_bsg_to_shost(job
));
1631 struct lpfc_hba
*phba
= vport
->phba
;
1632 struct fc_bsg_request
*bsg_request
= job
->request
;
1633 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
1634 struct send_mgmt_resp
*mgmt_resp
= (struct send_mgmt_resp
*)
1635 bsg_request
->rqst_data
.h_vendor
.vendor_cmd
;
1636 struct ulp_bde64
*bpl
;
1637 struct lpfc_dmabuf
*bmp
= NULL
, *cmp
= NULL
;
1639 uint32_t tag
= mgmt_resp
->tag
;
1640 unsigned long reqbfrcnt
=
1641 (unsigned long)job
->request_payload
.payload_len
;
1644 /* in case no data is transferred */
1645 bsg_reply
->reply_payload_rcv_len
= 0;
1647 if (!reqbfrcnt
|| (reqbfrcnt
> (80 * BUF_SZ_4K
))) {
1649 goto send_mgmt_rsp_exit
;
1652 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
1655 goto send_mgmt_rsp_exit
;
1658 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
1661 goto send_mgmt_rsp_free_bmp
;
1664 INIT_LIST_HEAD(&bmp
->list
);
1665 bpl
= (struct ulp_bde64
*) bmp
->virt
;
1666 bpl_entries
= (LPFC_BPL_SIZE
/sizeof(struct ulp_bde64
));
1667 cmp
= lpfc_alloc_bsg_buffers(phba
, job
->request_payload
.payload_len
,
1668 1, bpl
, &bpl_entries
);
1671 goto send_mgmt_rsp_free_bmp
;
1673 lpfc_bsg_copy_data(cmp
, &job
->request_payload
,
1674 job
->request_payload
.payload_len
, 1);
1676 rc
= lpfc_issue_ct_rsp(phba
, job
, tag
, cmp
, bmp
, bpl_entries
);
1678 if (rc
== IOCB_SUCCESS
)
1679 return 0; /* done for now */
1683 lpfc_free_bsg_buffers(phba
, cmp
);
1685 send_mgmt_rsp_free_bmp
:
1687 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
1690 /* make error code available to userspace */
1691 bsg_reply
->result
= rc
;
1692 job
->dd_data
= NULL
;
1697 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1698 * @phba: Pointer to HBA context object.
1700 * This function is responsible for preparing driver for diag loopback
1704 lpfc_bsg_diag_mode_enter(struct lpfc_hba
*phba
)
1706 struct lpfc_vport
**vports
;
1707 struct Scsi_Host
*shost
;
1708 struct lpfc_sli
*psli
;
1709 struct lpfc_queue
*qp
= NULL
;
1710 struct lpfc_sli_ring
*pring
;
1718 if ((phba
->link_state
== LPFC_HBA_ERROR
) ||
1719 (psli
->sli_flag
& LPFC_BLOCK_MGMT_IO
) ||
1720 (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
)))
1723 vports
= lpfc_create_vport_work_array(phba
);
1725 for (i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
1726 shost
= lpfc_shost_from_vport(vports
[i
]);
1727 scsi_block_requests(shost
);
1729 lpfc_destroy_vport_work_array(phba
, vports
);
1731 shost
= lpfc_shost_from_vport(phba
->pport
);
1732 scsi_block_requests(shost
);
1735 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
1736 pring
= &psli
->sli3_ring
[LPFC_FCP_RING
];
1737 lpfc_emptyq_wait(phba
, &pring
->txcmplq
, &phba
->hbalock
);
1740 list_for_each_entry(qp
, &phba
->sli4_hba
.lpfc_wq_list
, wq_list
) {
1742 if (!pring
|| (pring
->ringno
!= LPFC_FCP_RING
))
1744 if (!lpfc_emptyq_wait(phba
, &pring
->txcmplq
,
1752 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1753 * @phba: Pointer to HBA context object.
1755 * This function is responsible for driver exit processing of setting up
1756 * diag loopback mode on device.
1759 lpfc_bsg_diag_mode_exit(struct lpfc_hba
*phba
)
1761 struct Scsi_Host
*shost
;
1762 struct lpfc_vport
**vports
;
1765 vports
= lpfc_create_vport_work_array(phba
);
1767 for (i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
1768 shost
= lpfc_shost_from_vport(vports
[i
]);
1769 scsi_unblock_requests(shost
);
1771 lpfc_destroy_vport_work_array(phba
, vports
);
1773 shost
= lpfc_shost_from_vport(phba
->pport
);
1774 scsi_unblock_requests(shost
);
1780 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1781 * @phba: Pointer to HBA context object.
1782 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1784 * This function is responsible for placing an sli3 port into diagnostic
1785 * loopback mode in order to perform a diagnostic loopback test.
1786 * All new scsi requests are blocked, a small delay is used to allow the
1787 * scsi requests to complete then the link is brought down. If the link is
1788 * is placed in loopback mode then scsi requests are again allowed
1789 * so the scsi mid-layer doesn't give up on the port.
1790 * All of this is done in-line.
1793 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba
*phba
, struct bsg_job
*job
)
1795 struct fc_bsg_request
*bsg_request
= job
->request
;
1796 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
1797 struct diag_mode_set
*loopback_mode
;
1798 uint32_t link_flags
;
1800 LPFC_MBOXQ_t
*pmboxq
= NULL
;
1801 int mbxstatus
= MBX_SUCCESS
;
1805 /* no data to return just the return code */
1806 bsg_reply
->reply_payload_rcv_len
= 0;
1808 if (job
->request_len
< sizeof(struct fc_bsg_request
) +
1809 sizeof(struct diag_mode_set
)) {
1810 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1811 "2738 Received DIAG MODE request size:%d "
1812 "below the minimum size:%d\n",
1814 (int)(sizeof(struct fc_bsg_request
) +
1815 sizeof(struct diag_mode_set
)));
1820 rc
= lpfc_bsg_diag_mode_enter(phba
);
1824 /* bring the link to diagnostic mode */
1825 loopback_mode
= (struct diag_mode_set
*)
1826 bsg_request
->rqst_data
.h_vendor
.vendor_cmd
;
1827 link_flags
= loopback_mode
->type
;
1828 timeout
= loopback_mode
->timeout
* 100;
1830 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1833 goto loopback_mode_exit
;
1835 memset((void *)pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
1836 pmboxq
->u
.mb
.mbxCommand
= MBX_DOWN_LINK
;
1837 pmboxq
->u
.mb
.mbxOwner
= OWN_HOST
;
1839 mbxstatus
= lpfc_sli_issue_mbox_wait(phba
, pmboxq
, LPFC_MBOX_TMO
);
1841 if ((mbxstatus
== MBX_SUCCESS
) && (pmboxq
->u
.mb
.mbxStatus
== 0)) {
1842 /* wait for link down before proceeding */
1844 while (phba
->link_state
!= LPFC_LINK_DOWN
) {
1845 if (i
++ > timeout
) {
1847 goto loopback_mode_exit
;
1852 memset((void *)pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
1853 if (link_flags
== INTERNAL_LOOP_BACK
)
1854 pmboxq
->u
.mb
.un
.varInitLnk
.link_flags
= FLAGS_LOCAL_LB
;
1856 pmboxq
->u
.mb
.un
.varInitLnk
.link_flags
=
1857 FLAGS_TOPOLOGY_MODE_LOOP
;
1859 pmboxq
->u
.mb
.mbxCommand
= MBX_INIT_LINK
;
1860 pmboxq
->u
.mb
.mbxOwner
= OWN_HOST
;
1862 mbxstatus
= lpfc_sli_issue_mbox_wait(phba
, pmboxq
,
1865 if ((mbxstatus
!= MBX_SUCCESS
) || (pmboxq
->u
.mb
.mbxStatus
))
1868 spin_lock_irq(&phba
->hbalock
);
1869 phba
->link_flag
|= LS_LOOPBACK_MODE
;
1870 spin_unlock_irq(&phba
->hbalock
);
1871 /* wait for the link attention interrupt */
1875 while (phba
->link_state
!= LPFC_HBA_READY
) {
1876 if (i
++ > timeout
) {
1889 lpfc_bsg_diag_mode_exit(phba
);
1892 * Let SLI layer release mboxq if mbox command completed after timeout.
1894 if (pmboxq
&& mbxstatus
!= MBX_TIMEOUT
)
1895 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
1898 /* make error code available to userspace */
1899 bsg_reply
->result
= rc
;
1900 /* complete the job back to userspace if no error */
1902 bsg_job_done(job
, bsg_reply
->result
,
1903 bsg_reply
->reply_payload_rcv_len
);
1908 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1909 * @phba: Pointer to HBA context object.
1910 * @diag: Flag for set link to diag or nomral operation state.
1912 * This function is responsible for issuing a sli4 mailbox command for setting
1913 * link to either diag state or normal operation state.
1916 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba
*phba
, uint32_t diag
)
1918 LPFC_MBOXQ_t
*pmboxq
;
1919 struct lpfc_mbx_set_link_diag_state
*link_diag_state
;
1920 uint32_t req_len
, alloc_len
;
1921 int mbxstatus
= MBX_SUCCESS
, rc
;
1923 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1927 req_len
= (sizeof(struct lpfc_mbx_set_link_diag_state
) -
1928 sizeof(struct lpfc_sli4_cfg_mhdr
));
1929 alloc_len
= lpfc_sli4_config(phba
, pmboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
1930 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE
,
1931 req_len
, LPFC_SLI4_MBX_EMBED
);
1932 if (alloc_len
!= req_len
) {
1934 goto link_diag_state_set_out
;
1936 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
1937 "3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1938 diag
, phba
->sli4_hba
.lnk_info
.lnk_tp
,
1939 phba
->sli4_hba
.lnk_info
.lnk_no
);
1941 link_diag_state
= &pmboxq
->u
.mqe
.un
.link_diag_state
;
1942 bf_set(lpfc_mbx_set_diag_state_diag_bit_valid
, &link_diag_state
->u
.req
,
1943 LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE
);
1944 bf_set(lpfc_mbx_set_diag_state_link_num
, &link_diag_state
->u
.req
,
1945 phba
->sli4_hba
.lnk_info
.lnk_no
);
1946 bf_set(lpfc_mbx_set_diag_state_link_type
, &link_diag_state
->u
.req
,
1947 phba
->sli4_hba
.lnk_info
.lnk_tp
);
1949 bf_set(lpfc_mbx_set_diag_state_diag
,
1950 &link_diag_state
->u
.req
, 1);
1952 bf_set(lpfc_mbx_set_diag_state_diag
,
1953 &link_diag_state
->u
.req
, 0);
1955 mbxstatus
= lpfc_sli_issue_mbox_wait(phba
, pmboxq
, LPFC_MBOX_TMO
);
1957 if ((mbxstatus
== MBX_SUCCESS
) && (pmboxq
->u
.mb
.mbxStatus
== 0))
1962 link_diag_state_set_out
:
1963 if (pmboxq
&& (mbxstatus
!= MBX_TIMEOUT
))
1964 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
1970 * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
1971 * @phba: Pointer to HBA context object.
1973 * This function is responsible for issuing a sli4 mailbox command for setting
1974 * up internal loopback diagnostic.
1977 lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba
*phba
)
1979 LPFC_MBOXQ_t
*pmboxq
;
1980 uint32_t req_len
, alloc_len
;
1981 struct lpfc_mbx_set_link_diag_loopback
*link_diag_loopback
;
1982 int mbxstatus
= MBX_SUCCESS
, rc
= 0;
1984 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1987 req_len
= (sizeof(struct lpfc_mbx_set_link_diag_loopback
) -
1988 sizeof(struct lpfc_sli4_cfg_mhdr
));
1989 alloc_len
= lpfc_sli4_config(phba
, pmboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
1990 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK
,
1991 req_len
, LPFC_SLI4_MBX_EMBED
);
1992 if (alloc_len
!= req_len
) {
1993 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
1996 link_diag_loopback
= &pmboxq
->u
.mqe
.un
.link_diag_loopback
;
1997 bf_set(lpfc_mbx_set_diag_state_link_num
,
1998 &link_diag_loopback
->u
.req
, phba
->sli4_hba
.lnk_info
.lnk_no
);
1999 bf_set(lpfc_mbx_set_diag_state_link_type
,
2000 &link_diag_loopback
->u
.req
, phba
->sli4_hba
.lnk_info
.lnk_tp
);
2001 bf_set(lpfc_mbx_set_diag_lpbk_type
, &link_diag_loopback
->u
.req
,
2002 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL
);
2004 mbxstatus
= lpfc_sli_issue_mbox_wait(phba
, pmboxq
, LPFC_MBOX_TMO
);
2005 if ((mbxstatus
!= MBX_SUCCESS
) || (pmboxq
->u
.mb
.mbxStatus
)) {
2006 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2007 "3127 Failed setup loopback mode mailbox "
2008 "command, rc:x%x, status:x%x\n", mbxstatus
,
2009 pmboxq
->u
.mb
.mbxStatus
);
2012 if (pmboxq
&& (mbxstatus
!= MBX_TIMEOUT
))
2013 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
2018 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
2019 * @phba: Pointer to HBA context object.
2021 * This function set up SLI4 FC port registrations for diagnostic run, which
2022 * includes all the rpis, vfi, and also vpi.
2025 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba
*phba
)
2029 if (phba
->pport
->fc_flag
& FC_VFI_REGISTERED
) {
2030 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2031 "3136 Port still had vfi registered: "
2032 "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
2033 phba
->pport
->fc_myDID
, phba
->fcf
.fcfi
,
2034 phba
->sli4_hba
.vfi_ids
[phba
->pport
->vfi
],
2035 phba
->vpi_ids
[phba
->pport
->vpi
]);
2038 rc
= lpfc_issue_reg_vfi(phba
->pport
);
2043 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
2044 * @phba: Pointer to HBA context object.
2045 * @job: LPFC_BSG_VENDOR_DIAG_MODE
2047 * This function is responsible for placing an sli4 port into diagnostic
2048 * loopback mode in order to perform a diagnostic loopback test.
2051 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba
*phba
, struct bsg_job
*job
)
2053 struct fc_bsg_request
*bsg_request
= job
->request
;
2054 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
2055 struct diag_mode_set
*loopback_mode
;
2056 uint32_t link_flags
, timeout
;
2059 /* no data to return just the return code */
2060 bsg_reply
->reply_payload_rcv_len
= 0;
2062 if (job
->request_len
< sizeof(struct fc_bsg_request
) +
2063 sizeof(struct diag_mode_set
)) {
2064 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2065 "3011 Received DIAG MODE request size:%d "
2066 "below the minimum size:%d\n",
2068 (int)(sizeof(struct fc_bsg_request
) +
2069 sizeof(struct diag_mode_set
)));
2074 rc
= lpfc_bsg_diag_mode_enter(phba
);
2078 /* indicate we are in loobpack diagnostic mode */
2079 spin_lock_irq(&phba
->hbalock
);
2080 phba
->link_flag
|= LS_LOOPBACK_MODE
;
2081 spin_unlock_irq(&phba
->hbalock
);
2083 /* reset port to start frome scratch */
2084 rc
= lpfc_selective_reset(phba
);
2088 /* bring the link to diagnostic mode */
2089 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
2090 "3129 Bring link to diagnostic state.\n");
2091 loopback_mode
= (struct diag_mode_set
*)
2092 bsg_request
->rqst_data
.h_vendor
.vendor_cmd
;
2093 link_flags
= loopback_mode
->type
;
2094 timeout
= loopback_mode
->timeout
* 100;
2096 rc
= lpfc_sli4_bsg_set_link_diag_state(phba
, 1);
2098 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2099 "3130 Failed to bring link to diagnostic "
2100 "state, rc:x%x\n", rc
);
2101 goto loopback_mode_exit
;
2104 /* wait for link down before proceeding */
2106 while (phba
->link_state
!= LPFC_LINK_DOWN
) {
2107 if (i
++ > timeout
) {
2109 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
2110 "3131 Timeout waiting for link to "
2111 "diagnostic mode, timeout:%d ms\n",
2113 goto loopback_mode_exit
;
2118 /* set up loopback mode */
2119 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
2120 "3132 Set up loopback mode:x%x\n", link_flags
);
2122 if (link_flags
== INTERNAL_LOOP_BACK
)
2123 rc
= lpfc_sli4_bsg_set_internal_loopback(phba
);
2124 else if (link_flags
== EXTERNAL_LOOP_BACK
)
2125 rc
= lpfc_hba_init_link_fc_topology(phba
,
2126 FLAGS_TOPOLOGY_MODE_PT_PT
,
2130 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
2131 "3141 Loopback mode:x%x not supported\n",
2133 goto loopback_mode_exit
;
2137 /* wait for the link attention interrupt */
2140 while (phba
->link_state
< LPFC_LINK_UP
) {
2141 if (i
++ > timeout
) {
2143 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
2144 "3137 Timeout waiting for link up "
2145 "in loopback mode, timeout:%d ms\n",
2153 /* port resource registration setup for loopback diagnostic */
2155 /* set up a none zero myDID for loopback test */
2156 phba
->pport
->fc_myDID
= 1;
2157 rc
= lpfc_sli4_diag_fcport_reg_setup(phba
);
2159 goto loopback_mode_exit
;
2162 /* wait for the port ready */
2165 while (phba
->link_state
!= LPFC_HBA_READY
) {
2166 if (i
++ > timeout
) {
2168 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
2169 "3133 Timeout waiting for port "
2170 "loopback mode ready, timeout:%d ms\n",
2179 /* clear loopback diagnostic mode */
2181 spin_lock_irq(&phba
->hbalock
);
2182 phba
->link_flag
&= ~LS_LOOPBACK_MODE
;
2183 spin_unlock_irq(&phba
->hbalock
);
2185 lpfc_bsg_diag_mode_exit(phba
);
2188 /* make error code available to userspace */
2189 bsg_reply
->result
= rc
;
2190 /* complete the job back to userspace if no error */
2192 bsg_job_done(job
, bsg_reply
->result
,
2193 bsg_reply
->reply_payload_rcv_len
);
2198 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
2199 * @job: LPFC_BSG_VENDOR_DIAG_MODE
2201 * This function is responsible for responding to check and dispatch bsg diag
2202 * command from the user to proper driver action routines.
2205 lpfc_bsg_diag_loopback_mode(struct bsg_job
*job
)
2207 struct Scsi_Host
*shost
;
2208 struct lpfc_vport
*vport
;
2209 struct lpfc_hba
*phba
;
2212 shost
= fc_bsg_to_shost(job
);
2215 vport
= shost_priv(shost
);
2222 if (phba
->sli_rev
< LPFC_SLI_REV4
)
2223 rc
= lpfc_sli3_bsg_diag_loopback_mode(phba
, job
);
2224 else if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
2225 LPFC_SLI_INTF_IF_TYPE_2
)
2226 rc
= lpfc_sli4_bsg_diag_loopback_mode(phba
, job
);
2234 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
2235 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
2237 * This function is responsible for responding to check and dispatch bsg diag
2238 * command from the user to proper driver action routines.
2241 lpfc_sli4_bsg_diag_mode_end(struct bsg_job
*job
)
2243 struct fc_bsg_request
*bsg_request
= job
->request
;
2244 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
2245 struct Scsi_Host
*shost
;
2246 struct lpfc_vport
*vport
;
2247 struct lpfc_hba
*phba
;
2248 struct diag_mode_set
*loopback_mode_end_cmd
;
2252 shost
= fc_bsg_to_shost(job
);
2255 vport
= shost_priv(shost
);
2262 if (phba
->sli_rev
< LPFC_SLI_REV4
)
2264 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
2265 LPFC_SLI_INTF_IF_TYPE_2
)
2268 /* clear loopback diagnostic mode */
2269 spin_lock_irq(&phba
->hbalock
);
2270 phba
->link_flag
&= ~LS_LOOPBACK_MODE
;
2271 spin_unlock_irq(&phba
->hbalock
);
2272 loopback_mode_end_cmd
= (struct diag_mode_set
*)
2273 bsg_request
->rqst_data
.h_vendor
.vendor_cmd
;
2274 timeout
= loopback_mode_end_cmd
->timeout
* 100;
2276 rc
= lpfc_sli4_bsg_set_link_diag_state(phba
, 0);
2278 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2279 "3139 Failed to bring link to diagnostic "
2280 "state, rc:x%x\n", rc
);
2281 goto loopback_mode_end_exit
;
2284 /* wait for link down before proceeding */
2286 while (phba
->link_state
!= LPFC_LINK_DOWN
) {
2287 if (i
++ > timeout
) {
2288 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
2289 "3140 Timeout waiting for link to "
2290 "diagnostic mode_end, timeout:%d ms\n",
2292 /* there is nothing much we can do here */
2298 /* reset port resource registrations */
2299 rc
= lpfc_selective_reset(phba
);
2300 phba
->pport
->fc_myDID
= 0;
2302 loopback_mode_end_exit
:
2303 /* make return code available to userspace */
2304 bsg_reply
->result
= rc
;
2305 /* complete the job back to userspace if no error */
2307 bsg_job_done(job
, bsg_reply
->result
,
2308 bsg_reply
->reply_payload_rcv_len
);
2313 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
2314 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
2316 * This function is to perform SLI4 diag link test request from the user
2320 lpfc_sli4_bsg_link_diag_test(struct bsg_job
*job
)
2322 struct fc_bsg_request
*bsg_request
= job
->request
;
2323 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
2324 struct Scsi_Host
*shost
;
2325 struct lpfc_vport
*vport
;
2326 struct lpfc_hba
*phba
;
2327 LPFC_MBOXQ_t
*pmboxq
;
2328 struct sli4_link_diag
*link_diag_test_cmd
;
2329 uint32_t req_len
, alloc_len
;
2330 struct lpfc_mbx_run_link_diag_test
*run_link_diag_test
;
2331 union lpfc_sli4_cfg_shdr
*shdr
;
2332 uint32_t shdr_status
, shdr_add_status
;
2333 struct diag_status
*diag_status_reply
;
2334 int mbxstatus
, rc
= 0;
2336 shost
= fc_bsg_to_shost(job
);
2341 vport
= shost_priv(shost
);
2352 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
2356 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) !=
2357 LPFC_SLI_INTF_IF_TYPE_2
) {
2362 if (job
->request_len
< sizeof(struct fc_bsg_request
) +
2363 sizeof(struct sli4_link_diag
)) {
2364 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2365 "3013 Received LINK DIAG TEST request "
2366 " size:%d below the minimum size:%d\n",
2368 (int)(sizeof(struct fc_bsg_request
) +
2369 sizeof(struct sli4_link_diag
)));
2374 rc
= lpfc_bsg_diag_mode_enter(phba
);
2378 link_diag_test_cmd
= (struct sli4_link_diag
*)
2379 bsg_request
->rqst_data
.h_vendor
.vendor_cmd
;
2381 rc
= lpfc_sli4_bsg_set_link_diag_state(phba
, 1);
2386 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2388 goto link_diag_test_exit
;
2390 req_len
= (sizeof(struct lpfc_mbx_set_link_diag_state
) -
2391 sizeof(struct lpfc_sli4_cfg_mhdr
));
2392 alloc_len
= lpfc_sli4_config(phba
, pmboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
2393 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE
,
2394 req_len
, LPFC_SLI4_MBX_EMBED
);
2395 if (alloc_len
!= req_len
)
2396 goto link_diag_test_exit
;
2398 run_link_diag_test
= &pmboxq
->u
.mqe
.un
.link_diag_test
;
2399 bf_set(lpfc_mbx_run_diag_test_link_num
, &run_link_diag_test
->u
.req
,
2400 phba
->sli4_hba
.lnk_info
.lnk_no
);
2401 bf_set(lpfc_mbx_run_diag_test_link_type
, &run_link_diag_test
->u
.req
,
2402 phba
->sli4_hba
.lnk_info
.lnk_tp
);
2403 bf_set(lpfc_mbx_run_diag_test_test_id
, &run_link_diag_test
->u
.req
,
2404 link_diag_test_cmd
->test_id
);
2405 bf_set(lpfc_mbx_run_diag_test_loops
, &run_link_diag_test
->u
.req
,
2406 link_diag_test_cmd
->loops
);
2407 bf_set(lpfc_mbx_run_diag_test_test_ver
, &run_link_diag_test
->u
.req
,
2408 link_diag_test_cmd
->test_version
);
2409 bf_set(lpfc_mbx_run_diag_test_err_act
, &run_link_diag_test
->u
.req
,
2410 link_diag_test_cmd
->error_action
);
2412 mbxstatus
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_POLL
);
2414 shdr
= (union lpfc_sli4_cfg_shdr
*)
2415 &pmboxq
->u
.mqe
.un
.sli4_config
.header
.cfg_shdr
;
2416 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
2417 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
2418 if (shdr_status
|| shdr_add_status
|| mbxstatus
) {
2419 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
2420 "3010 Run link diag test mailbox failed with "
2421 "mbx_status x%x status x%x, add_status x%x\n",
2422 mbxstatus
, shdr_status
, shdr_add_status
);
2425 diag_status_reply
= (struct diag_status
*)
2426 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
;
2428 if (job
->reply_len
<
2429 sizeof(struct fc_bsg_request
) + sizeof(struct diag_status
)) {
2430 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2431 "3012 Received Run link diag test reply "
2432 "below minimum size (%d): reply_len:%d\n",
2433 (int)(sizeof(struct fc_bsg_request
) +
2434 sizeof(struct diag_status
)),
2440 diag_status_reply
->mbox_status
= mbxstatus
;
2441 diag_status_reply
->shdr_status
= shdr_status
;
2442 diag_status_reply
->shdr_add_status
= shdr_add_status
;
2444 link_diag_test_exit
:
2445 rc
= lpfc_sli4_bsg_set_link_diag_state(phba
, 0);
2448 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
2450 lpfc_bsg_diag_mode_exit(phba
);
2453 /* make error code available to userspace */
2454 bsg_reply
->result
= rc
;
2455 /* complete the job back to userspace if no error */
2457 bsg_job_done(job
, bsg_reply
->result
,
2458 bsg_reply
->reply_payload_rcv_len
);
2463 * lpfcdiag_loop_self_reg - obtains a remote port login id
2464 * @phba: Pointer to HBA context object
2465 * @rpi: Pointer to a remote port login id
2467 * This function obtains a remote port login id so the diag loopback test
2468 * can send and receive its own unsolicited CT command.
2470 static int lpfcdiag_loop_self_reg(struct lpfc_hba
*phba
, uint16_t *rpi
)
2473 struct lpfc_dmabuf
*dmabuff
;
2476 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2480 if (phba
->sli_rev
< LPFC_SLI_REV4
)
2481 status
= lpfc_reg_rpi(phba
, 0, phba
->pport
->fc_myDID
,
2482 (uint8_t *)&phba
->pport
->fc_sparam
,
2485 *rpi
= lpfc_sli4_alloc_rpi(phba
);
2486 if (*rpi
== LPFC_RPI_ALLOC_ERROR
) {
2487 mempool_free(mbox
, phba
->mbox_mem_pool
);
2490 status
= lpfc_reg_rpi(phba
, phba
->pport
->vpi
,
2491 phba
->pport
->fc_myDID
,
2492 (uint8_t *)&phba
->pport
->fc_sparam
,
2497 mempool_free(mbox
, phba
->mbox_mem_pool
);
2498 if (phba
->sli_rev
== LPFC_SLI_REV4
)
2499 lpfc_sli4_free_rpi(phba
, *rpi
);
2503 dmabuff
= (struct lpfc_dmabuf
*) mbox
->context1
;
2504 mbox
->context1
= NULL
;
2505 mbox
->context2
= NULL
;
2506 status
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
2508 if ((status
!= MBX_SUCCESS
) || (mbox
->u
.mb
.mbxStatus
)) {
2509 lpfc_mbuf_free(phba
, dmabuff
->virt
, dmabuff
->phys
);
2511 if (status
!= MBX_TIMEOUT
)
2512 mempool_free(mbox
, phba
->mbox_mem_pool
);
2513 if (phba
->sli_rev
== LPFC_SLI_REV4
)
2514 lpfc_sli4_free_rpi(phba
, *rpi
);
2518 if (phba
->sli_rev
< LPFC_SLI_REV4
)
2519 *rpi
= mbox
->u
.mb
.un
.varWords
[0];
2521 lpfc_mbuf_free(phba
, dmabuff
->virt
, dmabuff
->phys
);
2523 mempool_free(mbox
, phba
->mbox_mem_pool
);
2528 * lpfcdiag_loop_self_unreg - unregs from the rpi
2529 * @phba: Pointer to HBA context object
2530 * @rpi: Remote port login id
2532 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
2534 static int lpfcdiag_loop_self_unreg(struct lpfc_hba
*phba
, uint16_t rpi
)
2539 /* Allocate mboxq structure */
2540 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2544 if (phba
->sli_rev
< LPFC_SLI_REV4
)
2545 lpfc_unreg_login(phba
, 0, rpi
, mbox
);
2547 lpfc_unreg_login(phba
, phba
->pport
->vpi
,
2548 phba
->sli4_hba
.rpi_ids
[rpi
], mbox
);
2550 status
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
2552 if ((status
!= MBX_SUCCESS
) || (mbox
->u
.mb
.mbxStatus
)) {
2553 if (status
!= MBX_TIMEOUT
)
2554 mempool_free(mbox
, phba
->mbox_mem_pool
);
2557 mempool_free(mbox
, phba
->mbox_mem_pool
);
2558 if (phba
->sli_rev
== LPFC_SLI_REV4
)
2559 lpfc_sli4_free_rpi(phba
, rpi
);
2564 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
2565 * @phba: Pointer to HBA context object
2566 * @rpi: Remote port login id
2567 * @txxri: Pointer to transmit exchange id
2568 * @rxxri: Pointer to response exchabge id
2570 * This function obtains the transmit and receive ids required to send
2571 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
2572 * flags are used to the unsolicted response handler is able to process
2573 * the ct command sent on the same port.
2575 static int lpfcdiag_loop_get_xri(struct lpfc_hba
*phba
, uint16_t rpi
,
2576 uint16_t *txxri
, uint16_t * rxxri
)
2578 struct lpfc_bsg_event
*evt
;
2579 struct lpfc_iocbq
*cmdiocbq
, *rspiocbq
;
2581 struct lpfc_dmabuf
*dmabuf
;
2582 struct ulp_bde64
*bpl
= NULL
;
2583 struct lpfc_sli_ct_request
*ctreq
= NULL
;
2586 int iocb_stat
= IOCB_SUCCESS
;
2587 unsigned long flags
;
2591 evt
= lpfc_bsg_event_new(FC_REG_CT_EVENT
, current
->pid
,
2592 SLI_CT_ELX_LOOPBACK
);
2596 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2597 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
2598 lpfc_bsg_event_ref(evt
);
2599 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2601 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
2602 rspiocbq
= lpfc_sli_get_iocbq(phba
);
2604 dmabuf
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2606 dmabuf
->virt
= lpfc_mbuf_alloc(phba
, 0, &dmabuf
->phys
);
2608 INIT_LIST_HEAD(&dmabuf
->list
);
2609 bpl
= (struct ulp_bde64
*) dmabuf
->virt
;
2610 memset(bpl
, 0, sizeof(*bpl
));
2611 ctreq
= (struct lpfc_sli_ct_request
*)(bpl
+ 1);
2613 le32_to_cpu(putPaddrHigh(dmabuf
->phys
+
2616 le32_to_cpu(putPaddrLow(dmabuf
->phys
+
2618 bpl
->tus
.f
.bdeFlags
= 0;
2619 bpl
->tus
.f
.bdeSize
= ELX_LOOPBACK_HEADER_SZ
;
2620 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
2624 if (cmdiocbq
== NULL
|| rspiocbq
== NULL
||
2625 dmabuf
== NULL
|| bpl
== NULL
|| ctreq
== NULL
||
2626 dmabuf
->virt
== NULL
) {
2628 goto err_get_xri_exit
;
2631 cmd
= &cmdiocbq
->iocb
;
2632 rsp
= &rspiocbq
->iocb
;
2634 memset(ctreq
, 0, ELX_LOOPBACK_HEADER_SZ
);
2636 ctreq
->RevisionId
.bits
.Revision
= SLI_CT_REVISION
;
2637 ctreq
->RevisionId
.bits
.InId
= 0;
2638 ctreq
->FsType
= SLI_CT_ELX_LOOPBACK
;
2639 ctreq
->FsSubType
= 0;
2640 ctreq
->CommandResponse
.bits
.CmdRsp
= ELX_LOOPBACK_XRI_SETUP
;
2641 ctreq
->CommandResponse
.bits
.Size
= 0;
2644 cmd
->un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(dmabuf
->phys
);
2645 cmd
->un
.xseq64
.bdl
.addrLow
= putPaddrLow(dmabuf
->phys
);
2646 cmd
->un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
2647 cmd
->un
.xseq64
.bdl
.bdeSize
= sizeof(*bpl
);
2649 cmd
->un
.xseq64
.w5
.hcsw
.Fctl
= LA
;
2650 cmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
2651 cmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CTL
;
2652 cmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
2654 cmd
->ulpCommand
= CMD_XMIT_SEQUENCE64_CR
;
2655 cmd
->ulpBdeCount
= 1;
2657 cmd
->ulpClass
= CLASS3
;
2658 cmd
->ulpContext
= rpi
;
2660 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
2661 cmdiocbq
->vport
= phba
->pport
;
2662 cmdiocbq
->iocb_cmpl
= NULL
;
2664 iocb_stat
= lpfc_sli_issue_iocb_wait(phba
, LPFC_ELS_RING
, cmdiocbq
,
2666 (phba
->fc_ratov
* 2)
2667 + LPFC_DRVR_TIMEOUT
);
2668 if ((iocb_stat
!= IOCB_SUCCESS
) || (rsp
->ulpStatus
!= IOSTAT_SUCCESS
)) {
2670 goto err_get_xri_exit
;
2672 *txxri
= rsp
->ulpContext
;
2675 evt
->wait_time_stamp
= jiffies
;
2676 time_left
= wait_event_interruptible_timeout(
2677 evt
->wq
, !list_empty(&evt
->events_to_see
),
2678 msecs_to_jiffies(1000 *
2679 ((phba
->fc_ratov
* 2) + LPFC_DRVR_TIMEOUT
)));
2680 if (list_empty(&evt
->events_to_see
))
2681 ret_val
= (time_left
) ? -EINTR
: -ETIMEDOUT
;
2683 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2684 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
2685 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2686 *rxxri
= (list_entry(evt
->events_to_get
.prev
,
2687 typeof(struct event_data
),
2693 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2694 lpfc_bsg_event_unref(evt
); /* release ref */
2695 lpfc_bsg_event_unref(evt
); /* delete */
2696 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2700 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
2704 if (cmdiocbq
&& (iocb_stat
!= IOCB_TIMEDOUT
))
2705 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
2707 lpfc_sli_release_iocbq(phba
, rspiocbq
);
2712 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2713 * @phba: Pointer to HBA context object
2715 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and
2716 * returns the pointer to the buffer.
2718 static struct lpfc_dmabuf
*
2719 lpfc_bsg_dma_page_alloc(struct lpfc_hba
*phba
)
2721 struct lpfc_dmabuf
*dmabuf
;
2722 struct pci_dev
*pcidev
= phba
->pcidev
;
2724 /* allocate dma buffer struct */
2725 dmabuf
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2729 INIT_LIST_HEAD(&dmabuf
->list
);
2731 /* now, allocate dma buffer */
2732 dmabuf
->virt
= dma_zalloc_coherent(&pcidev
->dev
, BSG_MBOX_SIZE
,
2733 &(dmabuf
->phys
), GFP_KERNEL
);
2735 if (!dmabuf
->virt
) {
2744 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2745 * @phba: Pointer to HBA context object.
2746 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2748 * This routine just simply frees a dma buffer and its associated buffer
2749 * descriptor referred by @dmabuf.
2752 lpfc_bsg_dma_page_free(struct lpfc_hba
*phba
, struct lpfc_dmabuf
*dmabuf
)
2754 struct pci_dev
*pcidev
= phba
->pcidev
;
2760 dma_free_coherent(&pcidev
->dev
, BSG_MBOX_SIZE
,
2761 dmabuf
->virt
, dmabuf
->phys
);
2767 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2768 * @phba: Pointer to HBA context object.
2769 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2771 * This routine just simply frees all dma buffers and their associated buffer
2772 * descriptors referred by @dmabuf_list.
2775 lpfc_bsg_dma_page_list_free(struct lpfc_hba
*phba
,
2776 struct list_head
*dmabuf_list
)
2778 struct lpfc_dmabuf
*dmabuf
, *next_dmabuf
;
2780 if (list_empty(dmabuf_list
))
2783 list_for_each_entry_safe(dmabuf
, next_dmabuf
, dmabuf_list
, list
) {
2784 list_del_init(&dmabuf
->list
);
2785 lpfc_bsg_dma_page_free(phba
, dmabuf
);
2791 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
2792 * @phba: Pointer to HBA context object
2793 * @bpl: Pointer to 64 bit bde structure
2794 * @size: Number of bytes to process
2795 * @nocopydata: Flag to copy user data into the allocated buffer
2797 * This function allocates page size buffers and populates an lpfc_dmabufext.
2798 * If allowed the user data pointed to with indataptr is copied into the kernel
2799 * memory. The chained list of page size buffers is returned.
2801 static struct lpfc_dmabufext
*
2802 diag_cmd_data_alloc(struct lpfc_hba
*phba
,
2803 struct ulp_bde64
*bpl
, uint32_t size
,
2806 struct lpfc_dmabufext
*mlist
= NULL
;
2807 struct lpfc_dmabufext
*dmp
;
2808 int cnt
, offset
= 0, i
= 0;
2809 struct pci_dev
*pcidev
;
2811 pcidev
= phba
->pcidev
;
2814 /* We get chunks of 4K */
2815 if (size
> BUF_SZ_4K
)
2820 /* allocate struct lpfc_dmabufext buffer header */
2821 dmp
= kmalloc(sizeof(struct lpfc_dmabufext
), GFP_KERNEL
);
2825 INIT_LIST_HEAD(&dmp
->dma
.list
);
2827 /* Queue it to a linked list */
2829 list_add_tail(&dmp
->dma
.list
, &mlist
->dma
.list
);
2833 /* allocate buffer */
2834 dmp
->dma
.virt
= dma_alloc_coherent(&pcidev
->dev
,
2845 bpl
->tus
.f
.bdeFlags
= 0;
2846 pci_dma_sync_single_for_device(phba
->pcidev
,
2847 dmp
->dma
.phys
, LPFC_BPL_SIZE
, PCI_DMA_TODEVICE
);
2850 memset((uint8_t *)dmp
->dma
.virt
, 0, cnt
);
2851 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
2854 /* build buffer ptr list for IOCB */
2855 bpl
->addrLow
= le32_to_cpu(putPaddrLow(dmp
->dma
.phys
));
2856 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(dmp
->dma
.phys
));
2857 bpl
->tus
.f
.bdeSize
= (ushort
) cnt
;
2858 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
2871 diag_cmd_data_free(phba
, mlist
);
2876 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
2877 * @phba: Pointer to HBA context object
2878 * @rxxri: Receive exchange id
2879 * @len: Number of data bytes
2881 * This function allocates and posts a data buffer of sufficient size to receive
2882 * an unsolicted CT command.
2884 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba
*phba
, uint16_t rxxri
,
2887 struct lpfc_sli_ring
*pring
;
2888 struct lpfc_iocbq
*cmdiocbq
;
2890 struct list_head head
, *curr
, *next
;
2891 struct lpfc_dmabuf
*rxbmp
;
2892 struct lpfc_dmabuf
*dmp
;
2893 struct lpfc_dmabuf
*mp
[2] = {NULL
, NULL
};
2894 struct ulp_bde64
*rxbpl
= NULL
;
2896 struct lpfc_dmabufext
*rxbuffer
= NULL
;
2901 pring
= lpfc_phba_elsring(phba
);
2903 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
2904 rxbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2905 if (rxbmp
!= NULL
) {
2906 rxbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &rxbmp
->phys
);
2908 INIT_LIST_HEAD(&rxbmp
->list
);
2909 rxbpl
= (struct ulp_bde64
*) rxbmp
->virt
;
2910 rxbuffer
= diag_cmd_data_alloc(phba
, rxbpl
, len
, 0);
2914 if (!cmdiocbq
|| !rxbmp
|| !rxbpl
|| !rxbuffer
|| !pring
) {
2916 goto err_post_rxbufs_exit
;
2919 /* Queue buffers for the receive exchange */
2920 num_bde
= (uint32_t)rxbuffer
->flag
;
2921 dmp
= &rxbuffer
->dma
;
2923 cmd
= &cmdiocbq
->iocb
;
2926 INIT_LIST_HEAD(&head
);
2927 list_add_tail(&head
, &dmp
->list
);
2928 list_for_each_safe(curr
, next
, &head
) {
2929 mp
[i
] = list_entry(curr
, struct lpfc_dmabuf
, list
);
2932 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
2933 mp
[i
]->buffer_tag
= lpfc_sli_get_buffer_tag(phba
);
2934 cmd
->un
.quexri64cx
.buff
.bde
.addrHigh
=
2935 putPaddrHigh(mp
[i
]->phys
);
2936 cmd
->un
.quexri64cx
.buff
.bde
.addrLow
=
2937 putPaddrLow(mp
[i
]->phys
);
2938 cmd
->un
.quexri64cx
.buff
.bde
.tus
.f
.bdeSize
=
2939 ((struct lpfc_dmabufext
*)mp
[i
])->size
;
2940 cmd
->un
.quexri64cx
.buff
.buffer_tag
= mp
[i
]->buffer_tag
;
2941 cmd
->ulpCommand
= CMD_QUE_XRI64_CX
;
2944 cmd
->ulpBdeCount
= 1;
2945 cmd
->unsli3
.que_xri64cx_ext_words
.ebde_count
= 0;
2948 cmd
->un
.cont64
[i
].addrHigh
= putPaddrHigh(mp
[i
]->phys
);
2949 cmd
->un
.cont64
[i
].addrLow
= putPaddrLow(mp
[i
]->phys
);
2950 cmd
->un
.cont64
[i
].tus
.f
.bdeSize
=
2951 ((struct lpfc_dmabufext
*)mp
[i
])->size
;
2952 cmd
->ulpBdeCount
= ++i
;
2954 if ((--num_bde
> 0) && (i
< 2))
2957 cmd
->ulpCommand
= CMD_QUE_XRI_BUF64_CX
;
2961 cmd
->ulpClass
= CLASS3
;
2962 cmd
->ulpContext
= rxxri
;
2964 iocb_stat
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
,
2966 if (iocb_stat
== IOCB_ERROR
) {
2967 diag_cmd_data_free(phba
,
2968 (struct lpfc_dmabufext
*)mp
[0]);
2970 diag_cmd_data_free(phba
,
2971 (struct lpfc_dmabufext
*)mp
[1]);
2972 dmp
= list_entry(next
, struct lpfc_dmabuf
, list
);
2974 goto err_post_rxbufs_exit
;
2977 lpfc_sli_ringpostbuf_put(phba
, pring
, mp
[0]);
2979 lpfc_sli_ringpostbuf_put(phba
, pring
, mp
[1]);
2983 /* The iocb was freed by lpfc_sli_issue_iocb */
2984 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
2986 dmp
= list_entry(next
, struct lpfc_dmabuf
, list
);
2988 goto err_post_rxbufs_exit
;
2991 cmd
= &cmdiocbq
->iocb
;
2996 err_post_rxbufs_exit
:
3000 lpfc_mbuf_free(phba
, rxbmp
->virt
, rxbmp
->phys
);
3005 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
3010 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
3011 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
3013 * This function receives a user data buffer to be transmitted and received on
3014 * the same port, the link must be up and in loopback mode prior
3016 * 1. A kernel buffer is allocated to copy the user data into.
3017 * 2. The port registers with "itself".
3018 * 3. The transmit and receive exchange ids are obtained.
3019 * 4. The receive exchange id is posted.
3020 * 5. A new els loopback event is created.
3021 * 6. The command and response iocbs are allocated.
3022 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
3024 * This function is meant to be called n times while the port is in loopback
3025 * so it is the apps responsibility to issue a reset to take the port out
3029 lpfc_bsg_diag_loopback_run(struct bsg_job
*job
)
3031 struct lpfc_vport
*vport
= shost_priv(fc_bsg_to_shost(job
));
3032 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
3033 struct lpfc_hba
*phba
= vport
->phba
;
3034 struct lpfc_bsg_event
*evt
;
3035 struct event_data
*evdat
;
3036 struct lpfc_sli
*psli
= &phba
->sli
;
3039 size_t segment_len
= 0, segment_offset
= 0, current_offset
= 0;
3041 struct lpfc_iocbq
*cmdiocbq
, *rspiocbq
= NULL
;
3042 IOCB_t
*cmd
, *rsp
= NULL
;
3043 struct lpfc_sli_ct_request
*ctreq
;
3044 struct lpfc_dmabuf
*txbmp
;
3045 struct ulp_bde64
*txbpl
= NULL
;
3046 struct lpfc_dmabufext
*txbuffer
= NULL
;
3047 struct list_head head
;
3048 struct lpfc_dmabuf
*curr
;
3049 uint16_t txxri
= 0, rxxri
;
3051 uint8_t *ptr
= NULL
, *rx_databuf
= NULL
;
3054 int iocb_stat
= IOCB_SUCCESS
;
3055 unsigned long flags
;
3056 void *dataout
= NULL
;
3059 /* in case no data is returned return just the return code */
3060 bsg_reply
->reply_payload_rcv_len
= 0;
3062 if (job
->request_len
<
3063 sizeof(struct fc_bsg_request
) + sizeof(struct diag_mode_test
)) {
3064 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3065 "2739 Received DIAG TEST request below minimum "
3068 goto loopback_test_exit
;
3071 if (job
->request_payload
.payload_len
!=
3072 job
->reply_payload
.payload_len
) {
3074 goto loopback_test_exit
;
3077 if ((phba
->link_state
== LPFC_HBA_ERROR
) ||
3078 (psli
->sli_flag
& LPFC_BLOCK_MGMT_IO
) ||
3079 (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
))) {
3081 goto loopback_test_exit
;
3084 if (!lpfc_is_link_up(phba
) || !(phba
->link_flag
& LS_LOOPBACK_MODE
)) {
3086 goto loopback_test_exit
;
3089 size
= job
->request_payload
.payload_len
;
3090 full_size
= size
+ ELX_LOOPBACK_HEADER_SZ
; /* plus the header */
3092 if ((size
== 0) || (size
> 80 * BUF_SZ_4K
)) {
3094 goto loopback_test_exit
;
3097 if (full_size
>= BUF_SZ_4K
) {
3099 * Allocate memory for ioctl data. If buffer is bigger than 64k,
3100 * then we allocate 64k and re-use that buffer over and over to
3101 * xfer the whole block. This is because Linux kernel has a
3102 * problem allocating more than 120k of kernel space memory. Saw
3103 * problem with GET_FCPTARGETMAPPING...
3105 if (size
<= (64 * 1024))
3106 total_mem
= full_size
;
3108 total_mem
= 64 * 1024;
3110 /* Allocate memory for ioctl data */
3111 total_mem
= BUF_SZ_4K
;
3113 dataout
= kmalloc(total_mem
, GFP_KERNEL
);
3114 if (dataout
== NULL
) {
3116 goto loopback_test_exit
;
3120 ptr
+= ELX_LOOPBACK_HEADER_SZ
;
3121 sg_copy_to_buffer(job
->request_payload
.sg_list
,
3122 job
->request_payload
.sg_cnt
,
3124 rc
= lpfcdiag_loop_self_reg(phba
, &rpi
);
3126 goto loopback_test_exit
;
3128 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
3129 rc
= lpfcdiag_loop_get_xri(phba
, rpi
, &txxri
, &rxxri
);
3131 lpfcdiag_loop_self_unreg(phba
, rpi
);
3132 goto loopback_test_exit
;
3135 rc
= lpfcdiag_loop_post_rxbufs(phba
, rxxri
, full_size
);
3137 lpfcdiag_loop_self_unreg(phba
, rpi
);
3138 goto loopback_test_exit
;
3141 evt
= lpfc_bsg_event_new(FC_REG_CT_EVENT
, current
->pid
,
3142 SLI_CT_ELX_LOOPBACK
);
3144 lpfcdiag_loop_self_unreg(phba
, rpi
);
3146 goto loopback_test_exit
;
3149 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
3150 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
3151 lpfc_bsg_event_ref(evt
);
3152 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3154 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
3155 if (phba
->sli_rev
< LPFC_SLI_REV4
)
3156 rspiocbq
= lpfc_sli_get_iocbq(phba
);
3157 txbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
3160 txbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &txbmp
->phys
);
3162 INIT_LIST_HEAD(&txbmp
->list
);
3163 txbpl
= (struct ulp_bde64
*) txbmp
->virt
;
3164 txbuffer
= diag_cmd_data_alloc(phba
,
3165 txbpl
, full_size
, 0);
3169 if (!cmdiocbq
|| !txbmp
|| !txbpl
|| !txbuffer
|| !txbmp
->virt
) {
3171 goto err_loopback_test_exit
;
3173 if ((phba
->sli_rev
< LPFC_SLI_REV4
) && !rspiocbq
) {
3175 goto err_loopback_test_exit
;
3178 cmd
= &cmdiocbq
->iocb
;
3179 if (phba
->sli_rev
< LPFC_SLI_REV4
)
3180 rsp
= &rspiocbq
->iocb
;
3182 INIT_LIST_HEAD(&head
);
3183 list_add_tail(&head
, &txbuffer
->dma
.list
);
3184 list_for_each_entry(curr
, &head
, list
) {
3185 segment_len
= ((struct lpfc_dmabufext
*)curr
)->size
;
3186 if (current_offset
== 0) {
3188 memset(ctreq
, 0, ELX_LOOPBACK_HEADER_SZ
);
3189 ctreq
->RevisionId
.bits
.Revision
= SLI_CT_REVISION
;
3190 ctreq
->RevisionId
.bits
.InId
= 0;
3191 ctreq
->FsType
= SLI_CT_ELX_LOOPBACK
;
3192 ctreq
->FsSubType
= 0;
3193 ctreq
->CommandResponse
.bits
.CmdRsp
= ELX_LOOPBACK_DATA
;
3194 ctreq
->CommandResponse
.bits
.Size
= size
;
3195 segment_offset
= ELX_LOOPBACK_HEADER_SZ
;
3199 BUG_ON(segment_offset
>= segment_len
);
3200 memcpy(curr
->virt
+ segment_offset
,
3201 ptr
+ current_offset
,
3202 segment_len
- segment_offset
);
3204 current_offset
+= segment_len
- segment_offset
;
3205 BUG_ON(current_offset
> size
);
3209 /* Build the XMIT_SEQUENCE iocb */
3210 num_bde
= (uint32_t)txbuffer
->flag
;
3212 cmd
->un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(txbmp
->phys
);
3213 cmd
->un
.xseq64
.bdl
.addrLow
= putPaddrLow(txbmp
->phys
);
3214 cmd
->un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
3215 cmd
->un
.xseq64
.bdl
.bdeSize
= (num_bde
* sizeof(struct ulp_bde64
));
3217 cmd
->un
.xseq64
.w5
.hcsw
.Fctl
= (LS
| LA
);
3218 cmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
3219 cmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CTL
;
3220 cmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
3222 cmd
->ulpCommand
= CMD_XMIT_SEQUENCE64_CX
;
3223 cmd
->ulpBdeCount
= 1;
3225 cmd
->ulpClass
= CLASS3
;
3227 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
3228 cmd
->ulpContext
= txxri
;
3230 cmd
->un
.xseq64
.bdl
.ulpIoTag32
= 0;
3231 cmd
->un
.ulpWord
[3] = phba
->sli4_hba
.rpi_ids
[rpi
];
3232 cmdiocbq
->context3
= txbmp
;
3233 cmdiocbq
->sli4_xritag
= NO_XRI
;
3234 cmd
->unsli3
.rcvsli3
.ox_id
= 0xffff;
3236 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
3237 cmdiocbq
->iocb_flag
|= LPFC_IO_LOOPBACK
;
3238 cmdiocbq
->vport
= phba
->pport
;
3239 cmdiocbq
->iocb_cmpl
= NULL
;
3240 iocb_stat
= lpfc_sli_issue_iocb_wait(phba
, LPFC_ELS_RING
, cmdiocbq
,
3241 rspiocbq
, (phba
->fc_ratov
* 2) +
3244 if ((iocb_stat
!= IOCB_SUCCESS
) ||
3245 ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
3246 (rsp
->ulpStatus
!= IOSTAT_SUCCESS
))) {
3247 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
3248 "3126 Failed loopback test issue iocb: "
3249 "iocb_stat:x%x\n", iocb_stat
);
3251 goto err_loopback_test_exit
;
3255 time_left
= wait_event_interruptible_timeout(
3256 evt
->wq
, !list_empty(&evt
->events_to_see
),
3257 msecs_to_jiffies(1000 *
3258 ((phba
->fc_ratov
* 2) + LPFC_DRVR_TIMEOUT
)));
3260 if (list_empty(&evt
->events_to_see
)) {
3261 rc
= (time_left
) ? -EINTR
: -ETIMEDOUT
;
3262 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
3263 "3125 Not receiving unsolicited event, "
3266 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
3267 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
3268 evdat
= list_entry(evt
->events_to_get
.prev
,
3269 typeof(*evdat
), node
);
3270 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3271 rx_databuf
= evdat
->data
;
3272 if (evdat
->len
!= full_size
) {
3273 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
3274 "1603 Loopback test did not receive expected "
3275 "data length. actual length 0x%x expected "
3277 evdat
->len
, full_size
);
3279 } else if (rx_databuf
== NULL
)
3283 /* skip over elx loopback header */
3284 rx_databuf
+= ELX_LOOPBACK_HEADER_SZ
;
3285 bsg_reply
->reply_payload_rcv_len
=
3286 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
3287 job
->reply_payload
.sg_cnt
,
3289 bsg_reply
->reply_payload_rcv_len
= size
;
3293 err_loopback_test_exit
:
3294 lpfcdiag_loop_self_unreg(phba
, rpi
);
3296 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
3297 lpfc_bsg_event_unref(evt
); /* release ref */
3298 lpfc_bsg_event_unref(evt
); /* delete */
3299 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3301 if ((cmdiocbq
!= NULL
) && (iocb_stat
!= IOCB_TIMEDOUT
))
3302 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
3304 if (rspiocbq
!= NULL
)
3305 lpfc_sli_release_iocbq(phba
, rspiocbq
);
3307 if (txbmp
!= NULL
) {
3308 if (txbpl
!= NULL
) {
3309 if (txbuffer
!= NULL
)
3310 diag_cmd_data_free(phba
, txbuffer
);
3311 lpfc_mbuf_free(phba
, txbmp
->virt
, txbmp
->phys
);
3318 /* make error code available to userspace */
3319 bsg_reply
->result
= rc
;
3320 job
->dd_data
= NULL
;
3321 /* complete the job back to userspace if no error */
3322 if (rc
== IOCB_SUCCESS
)
3323 bsg_job_done(job
, bsg_reply
->result
,
3324 bsg_reply
->reply_payload_rcv_len
);
3329 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
3330 * @job: GET_DFC_REV fc_bsg_job
3333 lpfc_bsg_get_dfc_rev(struct bsg_job
*job
)
3335 struct lpfc_vport
*vport
= shost_priv(fc_bsg_to_shost(job
));
3336 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
3337 struct lpfc_hba
*phba
= vport
->phba
;
3338 struct get_mgmt_rev_reply
*event_reply
;
3341 if (job
->request_len
<
3342 sizeof(struct fc_bsg_request
) + sizeof(struct get_mgmt_rev
)) {
3343 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3344 "2740 Received GET_DFC_REV request below "
3350 event_reply
= (struct get_mgmt_rev_reply
*)
3351 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
;
3353 if (job
->reply_len
<
3354 sizeof(struct fc_bsg_request
) + sizeof(struct get_mgmt_rev_reply
)) {
3355 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3356 "2741 Received GET_DFC_REV reply below "
3362 event_reply
->info
.a_Major
= MANAGEMENT_MAJOR_REV
;
3363 event_reply
->info
.a_Minor
= MANAGEMENT_MINOR_REV
;
3365 bsg_reply
->result
= rc
;
3367 bsg_job_done(job
, bsg_reply
->result
,
3368 bsg_reply
->reply_payload_rcv_len
);
3373 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
3374 * @phba: Pointer to HBA context object.
3375 * @pmboxq: Pointer to mailbox command.
3377 * This is completion handler function for mailbox commands issued from
3378 * lpfc_bsg_issue_mbox function. This function is called by the
3379 * mailbox event handler function with no lock held. This function
3380 * will wake up thread waiting on the wait queue pointed by context1
3384 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
3386 struct bsg_job_data
*dd_data
;
3387 struct fc_bsg_reply
*bsg_reply
;
3388 struct bsg_job
*job
;
3390 unsigned long flags
;
3391 uint8_t *pmb
, *pmb_buf
;
3393 dd_data
= pmboxq
->context1
;
3396 * The outgoing buffer is readily referred from the dma buffer,
3397 * just need to get header part from mailboxq structure.
3399 pmb
= (uint8_t *)&pmboxq
->u
.mb
;
3400 pmb_buf
= (uint8_t *)dd_data
->context_un
.mbox
.mb
;
3401 memcpy(pmb_buf
, pmb
, sizeof(MAILBOX_t
));
3403 /* Determine if job has been aborted */
3405 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
3406 job
= dd_data
->set_job
;
3408 /* Prevent timeout handling from trying to abort job */
3409 job
->dd_data
= NULL
;
3411 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3413 /* Copy the mailbox data to the job if it is still active */
3416 bsg_reply
= job
->reply
;
3417 size
= job
->reply_payload
.payload_len
;
3418 bsg_reply
->reply_payload_rcv_len
=
3419 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
3420 job
->reply_payload
.sg_cnt
,
3424 dd_data
->set_job
= NULL
;
3425 mempool_free(dd_data
->context_un
.mbox
.pmboxq
, phba
->mbox_mem_pool
);
3426 lpfc_bsg_dma_page_free(phba
, dd_data
->context_un
.mbox
.dmabuffers
);
3429 /* Complete the job if the job is still active */
3432 bsg_reply
->result
= 0;
3433 bsg_job_done(job
, bsg_reply
->result
,
3434 bsg_reply
->reply_payload_rcv_len
);
3440 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
3441 * @phba: Pointer to HBA context object.
3442 * @mb: Pointer to a mailbox object.
3443 * @vport: Pointer to a vport object.
3445 * Some commands require the port to be offline, some may not be called from
3448 static int lpfc_bsg_check_cmd_access(struct lpfc_hba
*phba
,
3449 MAILBOX_t
*mb
, struct lpfc_vport
*vport
)
3451 /* return negative error values for bsg job */
3452 switch (mb
->mbxCommand
) {
3456 case MBX_CONFIG_LINK
:
3457 case MBX_CONFIG_RING
:
3458 case MBX_RESET_RING
:
3459 case MBX_UNREG_LOGIN
:
3461 case MBX_DUMP_CONTEXT
:
3465 if (!(vport
->fc_flag
& FC_OFFLINE_MODE
)) {
3466 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3467 "2743 Command 0x%x is illegal in on-line "
3473 case MBX_WRITE_VPARMS
:
3476 case MBX_READ_CONFIG
:
3477 case MBX_READ_RCONFIG
:
3478 case MBX_READ_STATUS
:
3481 case MBX_READ_LNK_STAT
:
3482 case MBX_DUMP_MEMORY
:
3484 case MBX_UPDATE_CFG
:
3485 case MBX_KILL_BOARD
:
3486 case MBX_READ_TOPOLOGY
:
3488 case MBX_LOAD_EXP_ROM
:
3490 case MBX_DEL_LD_ENTRY
:
3493 case MBX_SLI4_CONFIG
:
3494 case MBX_READ_EVENT_LOG
:
3495 case MBX_READ_EVENT_LOG_STATUS
:
3496 case MBX_WRITE_EVENT_LOG
:
3497 case MBX_PORT_CAPABILITIES
:
3498 case MBX_PORT_IOV_CONTROL
:
3499 case MBX_RUN_BIU_DIAG64
:
3501 case MBX_SET_VARIABLE
:
3502 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3503 "1226 mbox: set_variable 0x%x, 0x%x\n",
3505 mb
->un
.varWords
[1]);
3506 if ((mb
->un
.varWords
[0] == SETVAR_MLOMNT
)
3507 && (mb
->un
.varWords
[1] == 1)) {
3508 phba
->wait_4_mlo_maint_flg
= 1;
3509 } else if (mb
->un
.varWords
[0] == SETVAR_MLORST
) {
3510 spin_lock_irq(&phba
->hbalock
);
3511 phba
->link_flag
&= ~LS_LOOPBACK_MODE
;
3512 spin_unlock_irq(&phba
->hbalock
);
3513 phba
->fc_topology
= LPFC_TOPOLOGY_PT_PT
;
3516 case MBX_READ_SPARM64
:
3518 case MBX_REG_LOGIN64
:
3519 case MBX_CONFIG_PORT
:
3520 case MBX_RUN_BIU_DIAG
:
3522 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3523 "2742 Unknown Command 0x%x\n",
3532 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
3533 * @phba: Pointer to HBA context object.
3535 * This is routine clean up and reset BSG handling of multi-buffer mbox
3539 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba
*phba
)
3541 if (phba
->mbox_ext_buf_ctx
.state
== LPFC_BSG_MBOX_IDLE
)
3544 /* free all memory, including dma buffers */
3545 lpfc_bsg_dma_page_list_free(phba
,
3546 &phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
);
3547 lpfc_bsg_dma_page_free(phba
, phba
->mbox_ext_buf_ctx
.mbx_dmabuf
);
3548 /* multi-buffer write mailbox command pass-through complete */
3549 memset((char *)&phba
->mbox_ext_buf_ctx
, 0,
3550 sizeof(struct lpfc_mbox_ext_buf_ctx
));
3551 INIT_LIST_HEAD(&phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
);
3557 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3558 * @phba: Pointer to HBA context object.
3559 * @pmboxq: Pointer to mailbox command.
3561 * This is routine handles BSG job for mailbox commands completions with
3562 * multiple external buffers.
3564 static struct bsg_job
*
3565 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
3567 struct bsg_job_data
*dd_data
;
3568 struct bsg_job
*job
;
3569 struct fc_bsg_reply
*bsg_reply
;
3570 uint8_t *pmb
, *pmb_buf
;
3571 unsigned long flags
;
3574 struct lpfc_dmabuf
*dmabuf
;
3575 struct lpfc_sli_config_mbox
*sli_cfg_mbx
;
3578 dd_data
= pmboxq
->context1
;
3580 /* Determine if job has been aborted */
3581 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
3582 job
= dd_data
->set_job
;
3584 bsg_reply
= job
->reply
;
3585 /* Prevent timeout handling from trying to abort job */
3586 job
->dd_data
= NULL
;
3588 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3591 * The outgoing buffer is readily referred from the dma buffer,
3592 * just need to get header part from mailboxq structure.
3595 pmb
= (uint8_t *)&pmboxq
->u
.mb
;
3596 pmb_buf
= (uint8_t *)dd_data
->context_un
.mbox
.mb
;
3597 /* Copy the byte swapped response mailbox back to the user */
3598 memcpy(pmb_buf
, pmb
, sizeof(MAILBOX_t
));
3599 /* if there is any non-embedded extended data copy that too */
3600 dmabuf
= phba
->mbox_ext_buf_ctx
.mbx_dmabuf
;
3601 sli_cfg_mbx
= (struct lpfc_sli_config_mbox
*)dmabuf
->virt
;
3602 if (!bsg_bf_get(lpfc_mbox_hdr_emb
,
3603 &sli_cfg_mbx
->un
.sli_config_emb0_subsys
.sli_config_hdr
)) {
3604 pmbx
= (uint8_t *)dmabuf
->virt
;
3605 /* byte swap the extended data following the mailbox command */
3606 lpfc_sli_pcimem_bcopy(&pmbx
[sizeof(MAILBOX_t
)],
3607 &pmbx
[sizeof(MAILBOX_t
)],
3608 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.mse
[0].buf_len
);
3611 /* Complete the job if the job is still active */
3614 size
= job
->reply_payload
.payload_len
;
3615 bsg_reply
->reply_payload_rcv_len
=
3616 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
3617 job
->reply_payload
.sg_cnt
,
3620 /* result for successful */
3621 bsg_reply
->result
= 0;
3623 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3624 "2937 SLI_CONFIG ext-buffer mailbox command "
3625 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3626 phba
->mbox_ext_buf_ctx
.nembType
,
3627 phba
->mbox_ext_buf_ctx
.mboxType
, size
);
3628 lpfc_idiag_mbxacc_dump_bsg_mbox(phba
,
3629 phba
->mbox_ext_buf_ctx
.nembType
,
3630 phba
->mbox_ext_buf_ctx
.mboxType
,
3631 dma_ebuf
, sta_pos_addr
,
3632 phba
->mbox_ext_buf_ctx
.mbx_dmabuf
, 0);
3634 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
3635 "2938 SLI_CONFIG ext-buffer mailbox "
3636 "command (x%x/x%x) failure, rc:x%x\n",
3637 phba
->mbox_ext_buf_ctx
.nembType
,
3638 phba
->mbox_ext_buf_ctx
.mboxType
, rc
);
3643 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_DONE
;
3649 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3650 * @phba: Pointer to HBA context object.
3651 * @pmboxq: Pointer to mailbox command.
3653 * This is completion handler function for mailbox read commands with multiple
3657 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
3659 struct bsg_job
*job
;
3660 struct fc_bsg_reply
*bsg_reply
;
3662 job
= lpfc_bsg_issue_mbox_ext_handle_job(phba
, pmboxq
);
3664 /* handle the BSG job with mailbox command */
3666 pmboxq
->u
.mb
.mbxStatus
= MBXERR_ERROR
;
3668 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3669 "2939 SLI_CONFIG ext-buffer rd mailbox command "
3670 "complete, ctxState:x%x, mbxStatus:x%x\n",
3671 phba
->mbox_ext_buf_ctx
.state
, pmboxq
->u
.mb
.mbxStatus
);
3673 if (pmboxq
->u
.mb
.mbxStatus
|| phba
->mbox_ext_buf_ctx
.numBuf
== 1)
3674 lpfc_bsg_mbox_ext_session_reset(phba
);
3676 /* free base driver mailbox structure memory */
3677 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
3679 /* if the job is still active, call job done */
3681 bsg_reply
= job
->reply
;
3682 bsg_job_done(job
, bsg_reply
->result
,
3683 bsg_reply
->reply_payload_rcv_len
);
3689 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3690 * @phba: Pointer to HBA context object.
3691 * @pmboxq: Pointer to mailbox command.
3693 * This is completion handler function for mailbox write commands with multiple
3697 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
3699 struct bsg_job
*job
;
3700 struct fc_bsg_reply
*bsg_reply
;
3702 job
= lpfc_bsg_issue_mbox_ext_handle_job(phba
, pmboxq
);
3704 /* handle the BSG job with the mailbox command */
3706 pmboxq
->u
.mb
.mbxStatus
= MBXERR_ERROR
;
3708 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3709 "2940 SLI_CONFIG ext-buffer wr mailbox command "
3710 "complete, ctxState:x%x, mbxStatus:x%x\n",
3711 phba
->mbox_ext_buf_ctx
.state
, pmboxq
->u
.mb
.mbxStatus
);
3713 /* free all memory, including dma buffers */
3714 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
3715 lpfc_bsg_mbox_ext_session_reset(phba
);
3717 /* if the job is still active, call job done */
3719 bsg_reply
= job
->reply
;
3720 bsg_job_done(job
, bsg_reply
->result
,
3721 bsg_reply
->reply_payload_rcv_len
);
3728 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba
*phba
, enum nemb_type nemb_tp
,
3729 uint32_t index
, struct lpfc_dmabuf
*mbx_dmabuf
,
3730 struct lpfc_dmabuf
*ext_dmabuf
)
3732 struct lpfc_sli_config_mbox
*sli_cfg_mbx
;
3734 /* pointer to the start of mailbox command */
3735 sli_cfg_mbx
= (struct lpfc_sli_config_mbox
*)mbx_dmabuf
->virt
;
3737 if (nemb_tp
== nemb_mse
) {
3739 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3741 putPaddrHigh(mbx_dmabuf
->phys
+
3743 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3745 putPaddrLow(mbx_dmabuf
->phys
+
3747 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3748 "2943 SLI_CONFIG(mse)[%d], "
3749 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3751 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3753 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3755 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3758 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3760 putPaddrHigh(ext_dmabuf
->phys
);
3761 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3763 putPaddrLow(ext_dmabuf
->phys
);
3764 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3765 "2944 SLI_CONFIG(mse)[%d], "
3766 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3768 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3770 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3772 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3777 sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
3779 putPaddrHigh(mbx_dmabuf
->phys
+
3781 sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
3783 putPaddrLow(mbx_dmabuf
->phys
+
3785 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3786 "3007 SLI_CONFIG(hbd)[%d], "
3787 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3789 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len
,
3791 sli_config_emb1_subsys
.hbd
[index
]),
3792 sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
3794 sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
3798 sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
3800 putPaddrHigh(ext_dmabuf
->phys
);
3801 sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
3803 putPaddrLow(ext_dmabuf
->phys
);
3804 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3805 "3008 SLI_CONFIG(hbd)[%d], "
3806 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3808 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len
,
3810 sli_config_emb1_subsys
.hbd
[index
]),
3811 sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
3813 sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
3821 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
3822 * @phba: Pointer to HBA context object.
3823 * @mb: Pointer to a BSG mailbox object.
3824 * @nemb_tp: Enumerate of non-embedded mailbox command type.
3825 * @dmabuff: Pointer to a DMA buffer descriptor.
3827 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3828 * non-embedded external bufffers.
3831 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba
*phba
, struct bsg_job
*job
,
3832 enum nemb_type nemb_tp
,
3833 struct lpfc_dmabuf
*dmabuf
)
3835 struct fc_bsg_request
*bsg_request
= job
->request
;
3836 struct lpfc_sli_config_mbox
*sli_cfg_mbx
;
3837 struct dfc_mbox_req
*mbox_req
;
3838 struct lpfc_dmabuf
*curr_dmabuf
, *next_dmabuf
;
3839 uint32_t ext_buf_cnt
, ext_buf_index
;
3840 struct lpfc_dmabuf
*ext_dmabuf
= NULL
;
3841 struct bsg_job_data
*dd_data
= NULL
;
3842 LPFC_MBOXQ_t
*pmboxq
= NULL
;
3848 (struct dfc_mbox_req
*)bsg_request
->rqst_data
.h_vendor
.vendor_cmd
;
3850 /* pointer to the start of mailbox command */
3851 sli_cfg_mbx
= (struct lpfc_sli_config_mbox
*)dmabuf
->virt
;
3853 if (nemb_tp
== nemb_mse
) {
3854 ext_buf_cnt
= bsg_bf_get(lpfc_mbox_hdr_mse_cnt
,
3855 &sli_cfg_mbx
->un
.sli_config_emb0_subsys
.sli_config_hdr
);
3856 if (ext_buf_cnt
> LPFC_MBX_SLI_CONFIG_MAX_MSE
) {
3857 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
3858 "2945 Handled SLI_CONFIG(mse) rd, "
3859 "ext_buf_cnt(%d) out of range(%d)\n",
3861 LPFC_MBX_SLI_CONFIG_MAX_MSE
);
3865 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3866 "2941 Handled SLI_CONFIG(mse) rd, "
3867 "ext_buf_cnt:%d\n", ext_buf_cnt
);
3869 /* sanity check on interface type for support */
3870 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) <
3871 LPFC_SLI_INTF_IF_TYPE_2
) {
3875 /* nemb_tp == nemb_hbd */
3876 ext_buf_cnt
= sli_cfg_mbx
->un
.sli_config_emb1_subsys
.hbd_count
;
3877 if (ext_buf_cnt
> LPFC_MBX_SLI_CONFIG_MAX_HBD
) {
3878 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
3879 "2946 Handled SLI_CONFIG(hbd) rd, "
3880 "ext_buf_cnt(%d) out of range(%d)\n",
3882 LPFC_MBX_SLI_CONFIG_MAX_HBD
);
3886 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3887 "2942 Handled SLI_CONFIG(hbd) rd, "
3888 "ext_buf_cnt:%d\n", ext_buf_cnt
);
3891 /* before dma descriptor setup */
3892 lpfc_idiag_mbxacc_dump_bsg_mbox(phba
, nemb_tp
, mbox_rd
, dma_mbox
,
3893 sta_pre_addr
, dmabuf
, ext_buf_cnt
);
3895 /* reject non-embedded mailbox command with none external buffer */
3896 if (ext_buf_cnt
== 0) {
3899 } else if (ext_buf_cnt
> 1) {
3900 /* additional external read buffers */
3901 for (i
= 1; i
< ext_buf_cnt
; i
++) {
3902 ext_dmabuf
= lpfc_bsg_dma_page_alloc(phba
);
3907 list_add_tail(&ext_dmabuf
->list
,
3908 &phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
);
3912 /* bsg tracking structure */
3913 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
3919 /* mailbox command structure for base driver */
3920 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3925 memset(pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
3927 /* for the first external buffer */
3928 lpfc_bsg_sli_cfg_dma_desc_setup(phba
, nemb_tp
, 0, dmabuf
, dmabuf
);
3930 /* for the rest of external buffer descriptors if any */
3931 if (ext_buf_cnt
> 1) {
3933 list_for_each_entry_safe(curr_dmabuf
, next_dmabuf
,
3934 &phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
, list
) {
3935 lpfc_bsg_sli_cfg_dma_desc_setup(phba
, nemb_tp
,
3936 ext_buf_index
, dmabuf
,
3942 /* after dma descriptor setup */
3943 lpfc_idiag_mbxacc_dump_bsg_mbox(phba
, nemb_tp
, mbox_rd
, dma_mbox
,
3944 sta_pos_addr
, dmabuf
, ext_buf_cnt
);
3946 /* construct base driver mbox command */
3947 pmb
= &pmboxq
->u
.mb
;
3948 pmbx
= (uint8_t *)dmabuf
->virt
;
3949 memcpy(pmb
, pmbx
, sizeof(*pmb
));
3950 pmb
->mbxOwner
= OWN_HOST
;
3951 pmboxq
->vport
= phba
->pport
;
3953 /* multi-buffer handling context */
3954 phba
->mbox_ext_buf_ctx
.nembType
= nemb_tp
;
3955 phba
->mbox_ext_buf_ctx
.mboxType
= mbox_rd
;
3956 phba
->mbox_ext_buf_ctx
.numBuf
= ext_buf_cnt
;
3957 phba
->mbox_ext_buf_ctx
.mbxTag
= mbox_req
->extMboxTag
;
3958 phba
->mbox_ext_buf_ctx
.seqNum
= mbox_req
->extSeqNum
;
3959 phba
->mbox_ext_buf_ctx
.mbx_dmabuf
= dmabuf
;
3961 /* callback for multi-buffer read mailbox command */
3962 pmboxq
->mbox_cmpl
= lpfc_bsg_issue_read_mbox_ext_cmpl
;
3964 /* context fields to callback function */
3965 pmboxq
->context1
= dd_data
;
3966 dd_data
->type
= TYPE_MBOX
;
3967 dd_data
->set_job
= job
;
3968 dd_data
->context_un
.mbox
.pmboxq
= pmboxq
;
3969 dd_data
->context_un
.mbox
.mb
= (MAILBOX_t
*)pmbx
;
3970 job
->dd_data
= dd_data
;
3973 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_PORT
;
3976 * Non-embedded mailbox subcommand data gets byte swapped here because
3977 * the lower level driver code only does the first 64 mailbox words.
3979 if ((!bsg_bf_get(lpfc_mbox_hdr_emb
,
3980 &sli_cfg_mbx
->un
.sli_config_emb0_subsys
.sli_config_hdr
)) &&
3981 (nemb_tp
== nemb_mse
))
3982 lpfc_sli_pcimem_bcopy(&pmbx
[sizeof(MAILBOX_t
)],
3983 &pmbx
[sizeof(MAILBOX_t
)],
3984 sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
3987 rc
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
3988 if ((rc
== MBX_SUCCESS
) || (rc
== MBX_BUSY
)) {
3989 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
3990 "2947 Issued SLI_CONFIG ext-buffer "
3991 "mailbox command, rc:x%x\n", rc
);
3992 return SLI_CONFIG_HANDLED
;
3994 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
3995 "2948 Failed to issue SLI_CONFIG ext-buffer "
3996 "mailbox command, rc:x%x\n", rc
);
4001 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
4002 lpfc_bsg_dma_page_list_free(phba
,
4003 &phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
);
4005 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_IDLE
;
4010 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
4011 * @phba: Pointer to HBA context object.
4012 * @mb: Pointer to a BSG mailbox object.
4013 * @dmabuff: Pointer to a DMA buffer descriptor.
4015 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
4016 * non-embedded external bufffers.
4019 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba
*phba
, struct bsg_job
*job
,
4020 enum nemb_type nemb_tp
,
4021 struct lpfc_dmabuf
*dmabuf
)
4023 struct fc_bsg_request
*bsg_request
= job
->request
;
4024 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
4025 struct dfc_mbox_req
*mbox_req
;
4026 struct lpfc_sli_config_mbox
*sli_cfg_mbx
;
4027 uint32_t ext_buf_cnt
;
4028 struct bsg_job_data
*dd_data
= NULL
;
4029 LPFC_MBOXQ_t
*pmboxq
= NULL
;
4032 int rc
= SLI_CONFIG_NOT_HANDLED
, i
;
4035 (struct dfc_mbox_req
*)bsg_request
->rqst_data
.h_vendor
.vendor_cmd
;
4037 /* pointer to the start of mailbox command */
4038 sli_cfg_mbx
= (struct lpfc_sli_config_mbox
*)dmabuf
->virt
;
4040 if (nemb_tp
== nemb_mse
) {
4041 ext_buf_cnt
= bsg_bf_get(lpfc_mbox_hdr_mse_cnt
,
4042 &sli_cfg_mbx
->un
.sli_config_emb0_subsys
.sli_config_hdr
);
4043 if (ext_buf_cnt
> LPFC_MBX_SLI_CONFIG_MAX_MSE
) {
4044 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
4045 "2953 Failed SLI_CONFIG(mse) wr, "
4046 "ext_buf_cnt(%d) out of range(%d)\n",
4048 LPFC_MBX_SLI_CONFIG_MAX_MSE
);
4051 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4052 "2949 Handled SLI_CONFIG(mse) wr, "
4053 "ext_buf_cnt:%d\n", ext_buf_cnt
);
4055 /* sanity check on interface type for support */
4056 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) <
4057 LPFC_SLI_INTF_IF_TYPE_2
)
4059 /* nemb_tp == nemb_hbd */
4060 ext_buf_cnt
= sli_cfg_mbx
->un
.sli_config_emb1_subsys
.hbd_count
;
4061 if (ext_buf_cnt
> LPFC_MBX_SLI_CONFIG_MAX_HBD
) {
4062 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
4063 "2954 Failed SLI_CONFIG(hbd) wr, "
4064 "ext_buf_cnt(%d) out of range(%d)\n",
4066 LPFC_MBX_SLI_CONFIG_MAX_HBD
);
4069 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4070 "2950 Handled SLI_CONFIG(hbd) wr, "
4071 "ext_buf_cnt:%d\n", ext_buf_cnt
);
4074 /* before dma buffer descriptor setup */
4075 lpfc_idiag_mbxacc_dump_bsg_mbox(phba
, nemb_tp
, mbox_wr
, dma_mbox
,
4076 sta_pre_addr
, dmabuf
, ext_buf_cnt
);
4078 if (ext_buf_cnt
== 0)
4081 /* for the first external buffer */
4082 lpfc_bsg_sli_cfg_dma_desc_setup(phba
, nemb_tp
, 0, dmabuf
, dmabuf
);
4084 /* after dma descriptor setup */
4085 lpfc_idiag_mbxacc_dump_bsg_mbox(phba
, nemb_tp
, mbox_wr
, dma_mbox
,
4086 sta_pos_addr
, dmabuf
, ext_buf_cnt
);
4088 /* log for looking forward */
4089 for (i
= 1; i
< ext_buf_cnt
; i
++) {
4090 if (nemb_tp
== nemb_mse
)
4091 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4092 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
4093 i
, sli_cfg_mbx
->un
.sli_config_emb0_subsys
.
4096 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4097 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
4098 i
, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len
,
4099 &sli_cfg_mbx
->un
.sli_config_emb1_subsys
.
4103 /* multi-buffer handling context */
4104 phba
->mbox_ext_buf_ctx
.nembType
= nemb_tp
;
4105 phba
->mbox_ext_buf_ctx
.mboxType
= mbox_wr
;
4106 phba
->mbox_ext_buf_ctx
.numBuf
= ext_buf_cnt
;
4107 phba
->mbox_ext_buf_ctx
.mbxTag
= mbox_req
->extMboxTag
;
4108 phba
->mbox_ext_buf_ctx
.seqNum
= mbox_req
->extSeqNum
;
4109 phba
->mbox_ext_buf_ctx
.mbx_dmabuf
= dmabuf
;
4111 if (ext_buf_cnt
== 1) {
4112 /* bsg tracking structure */
4113 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
4119 /* mailbox command structure for base driver */
4120 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4125 memset(pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
4126 pmb
= &pmboxq
->u
.mb
;
4127 mbx
= (uint8_t *)dmabuf
->virt
;
4128 memcpy(pmb
, mbx
, sizeof(*pmb
));
4129 pmb
->mbxOwner
= OWN_HOST
;
4130 pmboxq
->vport
= phba
->pport
;
4132 /* callback for multi-buffer read mailbox command */
4133 pmboxq
->mbox_cmpl
= lpfc_bsg_issue_write_mbox_ext_cmpl
;
4135 /* context fields to callback function */
4136 pmboxq
->context1
= dd_data
;
4137 dd_data
->type
= TYPE_MBOX
;
4138 dd_data
->set_job
= job
;
4139 dd_data
->context_un
.mbox
.pmboxq
= pmboxq
;
4140 dd_data
->context_un
.mbox
.mb
= (MAILBOX_t
*)mbx
;
4141 job
->dd_data
= dd_data
;
4145 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_PORT
;
4146 rc
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
4147 if ((rc
== MBX_SUCCESS
) || (rc
== MBX_BUSY
)) {
4148 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4149 "2955 Issued SLI_CONFIG ext-buffer "
4150 "mailbox command, rc:x%x\n", rc
);
4151 return SLI_CONFIG_HANDLED
;
4153 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
4154 "2956 Failed to issue SLI_CONFIG ext-buffer "
4155 "mailbox command, rc:x%x\n", rc
);
4160 /* wait for additoinal external buffers */
4162 bsg_reply
->result
= 0;
4163 bsg_job_done(job
, bsg_reply
->result
,
4164 bsg_reply
->reply_payload_rcv_len
);
4165 return SLI_CONFIG_HANDLED
;
4169 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
4176 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
4177 * @phba: Pointer to HBA context object.
4178 * @mb: Pointer to a BSG mailbox object.
4179 * @dmabuff: Pointer to a DMA buffer descriptor.
4181 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
4182 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
4183 * with embedded sussystem 0x1 and opcodes with external HBDs.
4186 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba
*phba
, struct bsg_job
*job
,
4187 struct lpfc_dmabuf
*dmabuf
)
4189 struct lpfc_sli_config_mbox
*sli_cfg_mbx
;
4192 int rc
= SLI_CONFIG_NOT_HANDLED
;
4194 /* state change on new multi-buffer pass-through mailbox command */
4195 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_HOST
;
4197 sli_cfg_mbx
= (struct lpfc_sli_config_mbox
*)dmabuf
->virt
;
4199 if (!bsg_bf_get(lpfc_mbox_hdr_emb
,
4200 &sli_cfg_mbx
->un
.sli_config_emb0_subsys
.sli_config_hdr
)) {
4201 subsys
= bsg_bf_get(lpfc_emb0_subcmnd_subsys
,
4202 &sli_cfg_mbx
->un
.sli_config_emb0_subsys
);
4203 opcode
= bsg_bf_get(lpfc_emb0_subcmnd_opcode
,
4204 &sli_cfg_mbx
->un
.sli_config_emb0_subsys
);
4205 if (subsys
== SLI_CONFIG_SUBSYS_FCOE
) {
4207 case FCOE_OPCODE_READ_FCF
:
4208 case FCOE_OPCODE_GET_DPORT_RESULTS
:
4209 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4210 "2957 Handled SLI_CONFIG "
4211 "subsys_fcoe, opcode:x%x\n",
4213 rc
= lpfc_bsg_sli_cfg_read_cmd_ext(phba
, job
,
4216 case FCOE_OPCODE_ADD_FCF
:
4217 case FCOE_OPCODE_SET_DPORT_MODE
:
4218 case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE
:
4219 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4220 "2958 Handled SLI_CONFIG "
4221 "subsys_fcoe, opcode:x%x\n",
4223 rc
= lpfc_bsg_sli_cfg_write_cmd_ext(phba
, job
,
4227 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4228 "2959 Reject SLI_CONFIG "
4229 "subsys_fcoe, opcode:x%x\n",
4234 } else if (subsys
== SLI_CONFIG_SUBSYS_COMN
) {
4236 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES
:
4237 case COMN_OPCODE_GET_CNTL_ATTRIBUTES
:
4238 case COMN_OPCODE_GET_PROFILE_CONFIG
:
4239 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4240 "3106 Handled SLI_CONFIG "
4241 "subsys_comn, opcode:x%x\n",
4243 rc
= lpfc_bsg_sli_cfg_read_cmd_ext(phba
, job
,
4247 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4248 "3107 Reject SLI_CONFIG "
4249 "subsys_comn, opcode:x%x\n",
4255 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4256 "2977 Reject SLI_CONFIG "
4257 "subsys:x%d, opcode:x%x\n",
4262 subsys
= bsg_bf_get(lpfc_emb1_subcmnd_subsys
,
4263 &sli_cfg_mbx
->un
.sli_config_emb1_subsys
);
4264 opcode
= bsg_bf_get(lpfc_emb1_subcmnd_opcode
,
4265 &sli_cfg_mbx
->un
.sli_config_emb1_subsys
);
4266 if (subsys
== SLI_CONFIG_SUBSYS_COMN
) {
4268 case COMN_OPCODE_READ_OBJECT
:
4269 case COMN_OPCODE_READ_OBJECT_LIST
:
4270 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4271 "2960 Handled SLI_CONFIG "
4272 "subsys_comn, opcode:x%x\n",
4274 rc
= lpfc_bsg_sli_cfg_read_cmd_ext(phba
, job
,
4277 case COMN_OPCODE_WRITE_OBJECT
:
4278 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4279 "2961 Handled SLI_CONFIG "
4280 "subsys_comn, opcode:x%x\n",
4282 rc
= lpfc_bsg_sli_cfg_write_cmd_ext(phba
, job
,
4286 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4287 "2962 Not handled SLI_CONFIG "
4288 "subsys_comn, opcode:x%x\n",
4290 rc
= SLI_CONFIG_NOT_HANDLED
;
4294 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4295 "2978 Not handled SLI_CONFIG "
4296 "subsys:x%d, opcode:x%x\n",
4298 rc
= SLI_CONFIG_NOT_HANDLED
;
4302 /* state reset on not handled new multi-buffer mailbox command */
4303 if (rc
!= SLI_CONFIG_HANDLED
)
4304 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_IDLE
;
4310 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
4311 * @phba: Pointer to HBA context object.
4313 * This routine is for requesting to abort a pass-through mailbox command with
4314 * multiple external buffers due to error condition.
4317 lpfc_bsg_mbox_ext_abort(struct lpfc_hba
*phba
)
4319 if (phba
->mbox_ext_buf_ctx
.state
== LPFC_BSG_MBOX_PORT
)
4320 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_ABTS
;
4322 lpfc_bsg_mbox_ext_session_reset(phba
);
4327 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
4328 * @phba: Pointer to HBA context object.
4329 * @dmabuf: Pointer to a DMA buffer descriptor.
4331 * This routine extracts the next mailbox read external buffer back to
4332 * user space through BSG.
4335 lpfc_bsg_read_ebuf_get(struct lpfc_hba
*phba
, struct bsg_job
*job
)
4337 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
4338 struct lpfc_sli_config_mbox
*sli_cfg_mbx
;
4339 struct lpfc_dmabuf
*dmabuf
;
4344 index
= phba
->mbox_ext_buf_ctx
.seqNum
;
4345 phba
->mbox_ext_buf_ctx
.seqNum
++;
4347 sli_cfg_mbx
= (struct lpfc_sli_config_mbox
*)
4348 phba
->mbox_ext_buf_ctx
.mbx_dmabuf
->virt
;
4350 if (phba
->mbox_ext_buf_ctx
.nembType
== nemb_mse
) {
4351 size
= bsg_bf_get(lpfc_mbox_sli_config_mse_len
,
4352 &sli_cfg_mbx
->un
.sli_config_emb0_subsys
.mse
[index
]);
4353 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4354 "2963 SLI_CONFIG (mse) ext-buffer rd get "
4355 "buffer[%d], size:%d\n", index
, size
);
4357 size
= bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len
,
4358 &sli_cfg_mbx
->un
.sli_config_emb1_subsys
.hbd
[index
]);
4359 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4360 "2964 SLI_CONFIG (hbd) ext-buffer rd get "
4361 "buffer[%d], size:%d\n", index
, size
);
4363 if (list_empty(&phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
))
4365 dmabuf
= list_first_entry(&phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
,
4366 struct lpfc_dmabuf
, list
);
4367 list_del_init(&dmabuf
->list
);
4369 /* after dma buffer descriptor setup */
4370 lpfc_idiag_mbxacc_dump_bsg_mbox(phba
, phba
->mbox_ext_buf_ctx
.nembType
,
4371 mbox_rd
, dma_ebuf
, sta_pos_addr
,
4374 pbuf
= (uint8_t *)dmabuf
->virt
;
4375 bsg_reply
->reply_payload_rcv_len
=
4376 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
4377 job
->reply_payload
.sg_cnt
,
4380 lpfc_bsg_dma_page_free(phba
, dmabuf
);
4382 if (phba
->mbox_ext_buf_ctx
.seqNum
== phba
->mbox_ext_buf_ctx
.numBuf
) {
4383 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4384 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
4385 "command session done\n");
4386 lpfc_bsg_mbox_ext_session_reset(phba
);
4389 bsg_reply
->result
= 0;
4390 bsg_job_done(job
, bsg_reply
->result
,
4391 bsg_reply
->reply_payload_rcv_len
);
4393 return SLI_CONFIG_HANDLED
;
4397 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
4398 * @phba: Pointer to HBA context object.
4399 * @dmabuf: Pointer to a DMA buffer descriptor.
4401 * This routine sets up the next mailbox read external buffer obtained
4402 * from user space through BSG.
4405 lpfc_bsg_write_ebuf_set(struct lpfc_hba
*phba
, struct bsg_job
*job
,
4406 struct lpfc_dmabuf
*dmabuf
)
4408 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
4409 struct bsg_job_data
*dd_data
= NULL
;
4410 LPFC_MBOXQ_t
*pmboxq
= NULL
;
4412 enum nemb_type nemb_tp
;
4418 index
= phba
->mbox_ext_buf_ctx
.seqNum
;
4419 phba
->mbox_ext_buf_ctx
.seqNum
++;
4420 nemb_tp
= phba
->mbox_ext_buf_ctx
.nembType
;
4422 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
4428 pbuf
= (uint8_t *)dmabuf
->virt
;
4429 size
= job
->request_payload
.payload_len
;
4430 sg_copy_to_buffer(job
->request_payload
.sg_list
,
4431 job
->request_payload
.sg_cnt
,
4434 if (phba
->mbox_ext_buf_ctx
.nembType
== nemb_mse
) {
4435 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4436 "2966 SLI_CONFIG (mse) ext-buffer wr set "
4437 "buffer[%d], size:%d\n",
4438 phba
->mbox_ext_buf_ctx
.seqNum
, size
);
4441 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4442 "2967 SLI_CONFIG (hbd) ext-buffer wr set "
4443 "buffer[%d], size:%d\n",
4444 phba
->mbox_ext_buf_ctx
.seqNum
, size
);
4448 /* set up external buffer descriptor and add to external buffer list */
4449 lpfc_bsg_sli_cfg_dma_desc_setup(phba
, nemb_tp
, index
,
4450 phba
->mbox_ext_buf_ctx
.mbx_dmabuf
,
4452 list_add_tail(&dmabuf
->list
, &phba
->mbox_ext_buf_ctx
.ext_dmabuf_list
);
4454 /* after write dma buffer */
4455 lpfc_idiag_mbxacc_dump_bsg_mbox(phba
, phba
->mbox_ext_buf_ctx
.nembType
,
4456 mbox_wr
, dma_ebuf
, sta_pos_addr
,
4459 if (phba
->mbox_ext_buf_ctx
.seqNum
== phba
->mbox_ext_buf_ctx
.numBuf
) {
4460 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4461 "2968 SLI_CONFIG ext-buffer wr all %d "
4462 "ebuffers received\n",
4463 phba
->mbox_ext_buf_ctx
.numBuf
);
4464 /* mailbox command structure for base driver */
4465 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4470 memset(pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
4471 pbuf
= (uint8_t *)phba
->mbox_ext_buf_ctx
.mbx_dmabuf
->virt
;
4472 pmb
= &pmboxq
->u
.mb
;
4473 memcpy(pmb
, pbuf
, sizeof(*pmb
));
4474 pmb
->mbxOwner
= OWN_HOST
;
4475 pmboxq
->vport
= phba
->pport
;
4477 /* callback for multi-buffer write mailbox command */
4478 pmboxq
->mbox_cmpl
= lpfc_bsg_issue_write_mbox_ext_cmpl
;
4480 /* context fields to callback function */
4481 pmboxq
->context1
= dd_data
;
4482 dd_data
->type
= TYPE_MBOX
;
4483 dd_data
->set_job
= job
;
4484 dd_data
->context_un
.mbox
.pmboxq
= pmboxq
;
4485 dd_data
->context_un
.mbox
.mb
= (MAILBOX_t
*)pbuf
;
4486 job
->dd_data
= dd_data
;
4489 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_PORT
;
4491 rc
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
4492 if ((rc
== MBX_SUCCESS
) || (rc
== MBX_BUSY
)) {
4493 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4494 "2969 Issued SLI_CONFIG ext-buffer "
4495 "mailbox command, rc:x%x\n", rc
);
4496 return SLI_CONFIG_HANDLED
;
4498 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
4499 "2970 Failed to issue SLI_CONFIG ext-buffer "
4500 "mailbox command, rc:x%x\n", rc
);
4505 /* wait for additoinal external buffers */
4506 bsg_reply
->result
= 0;
4507 bsg_job_done(job
, bsg_reply
->result
,
4508 bsg_reply
->reply_payload_rcv_len
);
4509 return SLI_CONFIG_HANDLED
;
4512 lpfc_bsg_dma_page_free(phba
, dmabuf
);
4519 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
4520 * @phba: Pointer to HBA context object.
4521 * @mb: Pointer to a BSG mailbox object.
4522 * @dmabuff: Pointer to a DMA buffer descriptor.
4524 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
4525 * command with multiple non-embedded external buffers.
4528 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba
*phba
, struct bsg_job
*job
,
4529 struct lpfc_dmabuf
*dmabuf
)
4533 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4534 "2971 SLI_CONFIG buffer (type:x%x)\n",
4535 phba
->mbox_ext_buf_ctx
.mboxType
);
4537 if (phba
->mbox_ext_buf_ctx
.mboxType
== mbox_rd
) {
4538 if (phba
->mbox_ext_buf_ctx
.state
!= LPFC_BSG_MBOX_DONE
) {
4539 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
4540 "2972 SLI_CONFIG rd buffer state "
4542 phba
->mbox_ext_buf_ctx
.state
);
4543 lpfc_bsg_mbox_ext_abort(phba
);
4546 rc
= lpfc_bsg_read_ebuf_get(phba
, job
);
4547 if (rc
== SLI_CONFIG_HANDLED
)
4548 lpfc_bsg_dma_page_free(phba
, dmabuf
);
4549 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4550 if (phba
->mbox_ext_buf_ctx
.state
!= LPFC_BSG_MBOX_HOST
) {
4551 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
4552 "2973 SLI_CONFIG wr buffer state "
4554 phba
->mbox_ext_buf_ctx
.state
);
4555 lpfc_bsg_mbox_ext_abort(phba
);
4558 rc
= lpfc_bsg_write_ebuf_set(phba
, job
, dmabuf
);
4564 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4565 * @phba: Pointer to HBA context object.
4566 * @mb: Pointer to a BSG mailbox object.
4567 * @dmabuff: Pointer to a DMA buffer descriptor.
4569 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
4570 * (0x9B) mailbox commands and external buffers.
4573 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba
*phba
, struct bsg_job
*job
,
4574 struct lpfc_dmabuf
*dmabuf
)
4576 struct fc_bsg_request
*bsg_request
= job
->request
;
4577 struct dfc_mbox_req
*mbox_req
;
4578 int rc
= SLI_CONFIG_NOT_HANDLED
;
4581 (struct dfc_mbox_req
*)bsg_request
->rqst_data
.h_vendor
.vendor_cmd
;
4583 /* mbox command with/without single external buffer */
4584 if (mbox_req
->extMboxTag
== 0 && mbox_req
->extSeqNum
== 0)
4587 /* mbox command and first external buffer */
4588 if (phba
->mbox_ext_buf_ctx
.state
== LPFC_BSG_MBOX_IDLE
) {
4589 if (mbox_req
->extSeqNum
== 1) {
4590 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4591 "2974 SLI_CONFIG mailbox: tag:%d, "
4592 "seq:%d\n", mbox_req
->extMboxTag
,
4593 mbox_req
->extSeqNum
);
4594 rc
= lpfc_bsg_handle_sli_cfg_mbox(phba
, job
, dmabuf
);
4597 goto sli_cfg_ext_error
;
4601 * handle additional external buffers
4604 /* check broken pipe conditions */
4605 if (mbox_req
->extMboxTag
!= phba
->mbox_ext_buf_ctx
.mbxTag
)
4606 goto sli_cfg_ext_error
;
4607 if (mbox_req
->extSeqNum
> phba
->mbox_ext_buf_ctx
.numBuf
)
4608 goto sli_cfg_ext_error
;
4609 if (mbox_req
->extSeqNum
!= phba
->mbox_ext_buf_ctx
.seqNum
+ 1)
4610 goto sli_cfg_ext_error
;
4612 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4613 "2975 SLI_CONFIG mailbox external buffer: "
4614 "extSta:x%x, tag:%d, seq:%d\n",
4615 phba
->mbox_ext_buf_ctx
.state
, mbox_req
->extMboxTag
,
4616 mbox_req
->extSeqNum
);
4617 rc
= lpfc_bsg_handle_sli_cfg_ebuf(phba
, job
, dmabuf
);
4621 /* all other cases, broken pipe */
4622 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
4623 "2976 SLI_CONFIG mailbox broken pipe: "
4624 "ctxSta:x%x, ctxNumBuf:%d "
4625 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4626 phba
->mbox_ext_buf_ctx
.state
,
4627 phba
->mbox_ext_buf_ctx
.numBuf
,
4628 phba
->mbox_ext_buf_ctx
.mbxTag
,
4629 phba
->mbox_ext_buf_ctx
.seqNum
,
4630 mbox_req
->extMboxTag
, mbox_req
->extSeqNum
);
4632 lpfc_bsg_mbox_ext_session_reset(phba
);
4638 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
4639 * @phba: Pointer to HBA context object.
4640 * @mb: Pointer to a mailbox object.
4641 * @vport: Pointer to a vport object.
4643 * Allocate a tracking object, mailbox command memory, get a mailbox
4644 * from the mailbox pool, copy the caller mailbox command.
4646 * If offline and the sli is active we need to poll for the command (port is
4647 * being reset) and com-plete the job, otherwise issue the mailbox command and
4648 * let our completion handler finish the command.
4651 lpfc_bsg_issue_mbox(struct lpfc_hba
*phba
, struct bsg_job
*job
,
4652 struct lpfc_vport
*vport
)
4654 struct fc_bsg_request
*bsg_request
= job
->request
;
4655 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
4656 LPFC_MBOXQ_t
*pmboxq
= NULL
; /* internal mailbox queue */
4657 MAILBOX_t
*pmb
; /* shortcut to the pmboxq mailbox */
4658 /* a 4k buffer to hold the mb and extended data from/to the bsg */
4659 uint8_t *pmbx
= NULL
;
4660 struct bsg_job_data
*dd_data
= NULL
; /* bsg data tracking structure */
4661 struct lpfc_dmabuf
*dmabuf
= NULL
;
4662 struct dfc_mbox_req
*mbox_req
;
4663 struct READ_EVENT_LOG_VAR
*rdEventLog
;
4664 uint32_t transmit_length
, receive_length
, mode
;
4665 struct lpfc_mbx_sli4_config
*sli4_config
;
4666 struct lpfc_mbx_nembed_cmd
*nembed_sge
;
4667 struct ulp_bde64
*bde
;
4668 uint8_t *ext
= NULL
;
4673 /* in case no data is transferred */
4674 bsg_reply
->reply_payload_rcv_len
= 0;
4676 /* sanity check to protect driver */
4677 if (job
->reply_payload
.payload_len
> BSG_MBOX_SIZE
||
4678 job
->request_payload
.payload_len
> BSG_MBOX_SIZE
) {
4684 * Don't allow mailbox commands to be sent when blocked or when in
4685 * the middle of discovery
4687 if (phba
->sli
.sli_flag
& LPFC_BLOCK_MGMT_IO
) {
4693 (struct dfc_mbox_req
*)bsg_request
->rqst_data
.h_vendor
.vendor_cmd
;
4695 /* check if requested extended data lengths are valid */
4696 if ((mbox_req
->inExtWLen
> BSG_MBOX_SIZE
/sizeof(uint32_t)) ||
4697 (mbox_req
->outExtWLen
> BSG_MBOX_SIZE
/sizeof(uint32_t))) {
4702 dmabuf
= lpfc_bsg_dma_page_alloc(phba
);
4703 if (!dmabuf
|| !dmabuf
->virt
) {
4708 /* Get the mailbox command or external buffer from BSG */
4709 pmbx
= (uint8_t *)dmabuf
->virt
;
4710 size
= job
->request_payload
.payload_len
;
4711 sg_copy_to_buffer(job
->request_payload
.sg_list
,
4712 job
->request_payload
.sg_cnt
, pmbx
, size
);
4714 /* Handle possible SLI_CONFIG with non-embedded payloads */
4715 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4716 rc
= lpfc_bsg_handle_sli_cfg_ext(phba
, job
, dmabuf
);
4717 if (rc
== SLI_CONFIG_HANDLED
)
4721 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4724 rc
= lpfc_bsg_check_cmd_access(phba
, (MAILBOX_t
*)pmbx
, vport
);
4726 goto job_done
; /* must be negative */
4728 /* allocate our bsg tracking structure */
4729 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
4731 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
4732 "2727 Failed allocation of dd_data\n");
4737 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4742 memset(pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
4744 pmb
= &pmboxq
->u
.mb
;
4745 memcpy(pmb
, pmbx
, sizeof(*pmb
));
4746 pmb
->mbxOwner
= OWN_HOST
;
4747 pmboxq
->vport
= vport
;
4749 /* If HBA encountered an error attention, allow only DUMP
4750 * or RESTART mailbox commands until the HBA is restarted.
4752 if (phba
->pport
->stopped
&&
4753 pmb
->mbxCommand
!= MBX_DUMP_MEMORY
&&
4754 pmb
->mbxCommand
!= MBX_RESTART
&&
4755 pmb
->mbxCommand
!= MBX_WRITE_VPARMS
&&
4756 pmb
->mbxCommand
!= MBX_WRITE_WWN
)
4757 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
,
4758 "2797 mbox: Issued mailbox cmd "
4759 "0x%x while in stopped state.\n",
4762 /* extended mailbox commands will need an extended buffer */
4763 if (mbox_req
->inExtWLen
|| mbox_req
->outExtWLen
) {
4765 ext
= from
+ sizeof(MAILBOX_t
);
4766 pmboxq
->context2
= ext
;
4767 pmboxq
->in_ext_byte_len
=
4768 mbox_req
->inExtWLen
* sizeof(uint32_t);
4769 pmboxq
->out_ext_byte_len
=
4770 mbox_req
->outExtWLen
* sizeof(uint32_t);
4771 pmboxq
->mbox_offset_word
= mbox_req
->mbOffset
;
4774 /* biu diag will need a kernel buffer to transfer the data
4775 * allocate our own buffer and setup the mailbox command to
4778 if (pmb
->mbxCommand
== MBX_RUN_BIU_DIAG64
) {
4779 transmit_length
= pmb
->un
.varWords
[1];
4780 receive_length
= pmb
->un
.varWords
[4];
4781 /* transmit length cannot be greater than receive length or
4782 * mailbox extension size
4784 if ((transmit_length
> receive_length
) ||
4785 (transmit_length
> BSG_MBOX_SIZE
- sizeof(MAILBOX_t
))) {
4789 pmb
->un
.varBIUdiag
.un
.s2
.xmit_bde64
.addrHigh
=
4790 putPaddrHigh(dmabuf
->phys
+ sizeof(MAILBOX_t
));
4791 pmb
->un
.varBIUdiag
.un
.s2
.xmit_bde64
.addrLow
=
4792 putPaddrLow(dmabuf
->phys
+ sizeof(MAILBOX_t
));
4794 pmb
->un
.varBIUdiag
.un
.s2
.rcv_bde64
.addrHigh
=
4795 putPaddrHigh(dmabuf
->phys
+ sizeof(MAILBOX_t
)
4796 + pmb
->un
.varBIUdiag
.un
.s2
.xmit_bde64
.tus
.f
.bdeSize
);
4797 pmb
->un
.varBIUdiag
.un
.s2
.rcv_bde64
.addrLow
=
4798 putPaddrLow(dmabuf
->phys
+ sizeof(MAILBOX_t
)
4799 + pmb
->un
.varBIUdiag
.un
.s2
.xmit_bde64
.tus
.f
.bdeSize
);
4800 } else if (pmb
->mbxCommand
== MBX_READ_EVENT_LOG
) {
4801 rdEventLog
= &pmb
->un
.varRdEventLog
;
4802 receive_length
= rdEventLog
->rcv_bde64
.tus
.f
.bdeSize
;
4803 mode
= bf_get(lpfc_event_log
, rdEventLog
);
4805 /* receive length cannot be greater than mailbox
4808 if (receive_length
> BSG_MBOX_SIZE
- sizeof(MAILBOX_t
)) {
4813 /* mode zero uses a bde like biu diags command */
4815 pmb
->un
.varWords
[3] = putPaddrLow(dmabuf
->phys
4816 + sizeof(MAILBOX_t
));
4817 pmb
->un
.varWords
[4] = putPaddrHigh(dmabuf
->phys
4818 + sizeof(MAILBOX_t
));
4820 } else if (phba
->sli_rev
== LPFC_SLI_REV4
) {
4821 /* Let type 4 (well known data) through because the data is
4822 * returned in varwords[4-8]
4823 * otherwise check the recieve length and fetch the buffer addr
4825 if ((pmb
->mbxCommand
== MBX_DUMP_MEMORY
) &&
4826 (pmb
->un
.varDmp
.type
!= DMP_WELL_KNOWN
)) {
4827 /* rebuild the command for sli4 using our own buffers
4828 * like we do for biu diags
4830 receive_length
= pmb
->un
.varWords
[2];
4831 /* receive length cannot be greater than mailbox
4834 if (receive_length
== 0) {
4838 pmb
->un
.varWords
[3] = putPaddrLow(dmabuf
->phys
4839 + sizeof(MAILBOX_t
));
4840 pmb
->un
.varWords
[4] = putPaddrHigh(dmabuf
->phys
4841 + sizeof(MAILBOX_t
));
4842 } else if ((pmb
->mbxCommand
== MBX_UPDATE_CFG
) &&
4843 pmb
->un
.varUpdateCfg
.co
) {
4844 bde
= (struct ulp_bde64
*)&pmb
->un
.varWords
[4];
4846 /* bde size cannot be greater than mailbox ext size */
4847 if (bde
->tus
.f
.bdeSize
>
4848 BSG_MBOX_SIZE
- sizeof(MAILBOX_t
)) {
4852 bde
->addrHigh
= putPaddrHigh(dmabuf
->phys
4853 + sizeof(MAILBOX_t
));
4854 bde
->addrLow
= putPaddrLow(dmabuf
->phys
4855 + sizeof(MAILBOX_t
));
4856 } else if (pmb
->mbxCommand
== MBX_SLI4_CONFIG
) {
4857 /* Handling non-embedded SLI_CONFIG mailbox command */
4858 sli4_config
= &pmboxq
->u
.mqe
.un
.sli4_config
;
4859 if (!bf_get(lpfc_mbox_hdr_emb
,
4860 &sli4_config
->header
.cfg_mhdr
)) {
4861 /* rebuild the command for sli4 using our
4862 * own buffers like we do for biu diags
4864 nembed_sge
= (struct lpfc_mbx_nembed_cmd
*)
4865 &pmb
->un
.varWords
[0];
4866 receive_length
= nembed_sge
->sge
[0].length
;
4868 /* receive length cannot be greater than
4869 * mailbox extension size
4871 if ((receive_length
== 0) ||
4873 BSG_MBOX_SIZE
- sizeof(MAILBOX_t
))) {
4878 nembed_sge
->sge
[0].pa_hi
=
4879 putPaddrHigh(dmabuf
->phys
4880 + sizeof(MAILBOX_t
));
4881 nembed_sge
->sge
[0].pa_lo
=
4882 putPaddrLow(dmabuf
->phys
4883 + sizeof(MAILBOX_t
));
4888 dd_data
->context_un
.mbox
.dmabuffers
= dmabuf
;
4890 /* setup wake call as IOCB callback */
4891 pmboxq
->mbox_cmpl
= lpfc_bsg_issue_mbox_cmpl
;
4893 /* setup context field to pass wait_queue pointer to wake function */
4894 pmboxq
->context1
= dd_data
;
4895 dd_data
->type
= TYPE_MBOX
;
4896 dd_data
->set_job
= job
;
4897 dd_data
->context_un
.mbox
.pmboxq
= pmboxq
;
4898 dd_data
->context_un
.mbox
.mb
= (MAILBOX_t
*)pmbx
;
4899 dd_data
->context_un
.mbox
.ext
= ext
;
4900 dd_data
->context_un
.mbox
.mbOffset
= mbox_req
->mbOffset
;
4901 dd_data
->context_un
.mbox
.inExtWLen
= mbox_req
->inExtWLen
;
4902 dd_data
->context_un
.mbox
.outExtWLen
= mbox_req
->outExtWLen
;
4903 job
->dd_data
= dd_data
;
4905 if ((vport
->fc_flag
& FC_OFFLINE_MODE
) ||
4906 (!(phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
))) {
4907 rc
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_POLL
);
4908 if (rc
!= MBX_SUCCESS
) {
4909 rc
= (rc
== MBX_TIMEOUT
) ? -ETIME
: -ENODEV
;
4913 /* job finished, copy the data */
4914 memcpy(pmbx
, pmb
, sizeof(*pmb
));
4915 bsg_reply
->reply_payload_rcv_len
=
4916 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
4917 job
->reply_payload
.sg_cnt
,
4919 /* not waiting mbox already done */
4924 rc
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
4925 if ((rc
== MBX_SUCCESS
) || (rc
== MBX_BUSY
))
4926 return 1; /* job started */
4929 /* common exit for error or job completed inline */
4931 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
4932 lpfc_bsg_dma_page_free(phba
, dmabuf
);
4940 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
4941 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
4944 lpfc_bsg_mbox_cmd(struct bsg_job
*job
)
4946 struct lpfc_vport
*vport
= shost_priv(fc_bsg_to_shost(job
));
4947 struct fc_bsg_request
*bsg_request
= job
->request
;
4948 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
4949 struct lpfc_hba
*phba
= vport
->phba
;
4950 struct dfc_mbox_req
*mbox_req
;
4953 /* mix-and-match backward compatibility */
4954 bsg_reply
->reply_payload_rcv_len
= 0;
4955 if (job
->request_len
<
4956 sizeof(struct fc_bsg_request
) + sizeof(struct dfc_mbox_req
)) {
4957 lpfc_printf_log(phba
, KERN_INFO
, LOG_LIBDFC
,
4958 "2737 Mix-and-match backward compatibility "
4959 "between MBOX_REQ old size:%d and "
4960 "new request size:%d\n",
4961 (int)(job
->request_len
-
4962 sizeof(struct fc_bsg_request
)),
4963 (int)sizeof(struct dfc_mbox_req
));
4964 mbox_req
= (struct dfc_mbox_req
*)
4965 bsg_request
->rqst_data
.h_vendor
.vendor_cmd
;
4966 mbox_req
->extMboxTag
= 0;
4967 mbox_req
->extSeqNum
= 0;
4970 rc
= lpfc_bsg_issue_mbox(phba
, job
, vport
);
4974 bsg_reply
->result
= 0;
4975 job
->dd_data
= NULL
;
4976 bsg_job_done(job
, bsg_reply
->result
,
4977 bsg_reply
->reply_payload_rcv_len
);
4979 /* job submitted, will complete later*/
4980 rc
= 0; /* return zero, no error */
4982 /* some error occurred */
4983 bsg_reply
->result
= rc
;
4984 job
->dd_data
= NULL
;
4991 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
4992 * @phba: Pointer to HBA context object.
4993 * @cmdiocbq: Pointer to command iocb.
4994 * @rspiocbq: Pointer to response iocb.
4996 * This function is the completion handler for iocbs issued using
4997 * lpfc_menlo_cmd function. This function is called by the
4998 * ring event handler function without any lock held. This function
4999 * can be called from both worker thread context and interrupt
5000 * context. This function also can be called from another thread which
5001 * cleans up the SLI layer objects.
5002 * This function copies the contents of the response iocb to the
5003 * response iocb memory object provided by the caller of
5004 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
5005 * sleeps for the iocb completion.
5008 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba
*phba
,
5009 struct lpfc_iocbq
*cmdiocbq
,
5010 struct lpfc_iocbq
*rspiocbq
)
5012 struct bsg_job_data
*dd_data
;
5013 struct bsg_job
*job
;
5014 struct fc_bsg_reply
*bsg_reply
;
5016 struct lpfc_dmabuf
*bmp
, *cmp
, *rmp
;
5017 struct lpfc_bsg_menlo
*menlo
;
5018 unsigned long flags
;
5019 struct menlo_response
*menlo_resp
;
5020 unsigned int rsp_size
;
5023 dd_data
= cmdiocbq
->context1
;
5024 cmp
= cmdiocbq
->context2
;
5025 bmp
= cmdiocbq
->context3
;
5026 menlo
= &dd_data
->context_un
.menlo
;
5028 rsp
= &rspiocbq
->iocb
;
5030 /* Determine if job has been aborted */
5031 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
5032 job
= dd_data
->set_job
;
5034 bsg_reply
= job
->reply
;
5035 /* Prevent timeout handling from trying to abort job */
5036 job
->dd_data
= NULL
;
5038 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
5040 /* Copy the job data or set the failing status for the job */
5043 /* always return the xri, this would be used in the case
5044 * of a menlo download to allow the data to be sent as a
5045 * continuation of the exchange.
5048 menlo_resp
= (struct menlo_response
*)
5049 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
;
5050 menlo_resp
->xri
= rsp
->ulpContext
;
5051 if (rsp
->ulpStatus
) {
5052 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
5053 switch (rsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) {
5054 case IOERR_SEQUENCE_TIMEOUT
:
5057 case IOERR_INVALID_RPI
:
5068 rsp_size
= rsp
->un
.genreq64
.bdl
.bdeSize
;
5069 bsg_reply
->reply_payload_rcv_len
=
5070 lpfc_bsg_copy_data(rmp
, &job
->reply_payload
,
5076 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
5077 lpfc_free_bsg_buffers(phba
, cmp
);
5078 lpfc_free_bsg_buffers(phba
, rmp
);
5079 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
5083 /* Complete the job if active */
5086 bsg_reply
->result
= rc
;
5087 bsg_job_done(job
, bsg_reply
->result
,
5088 bsg_reply
->reply_payload_rcv_len
);
5095 * lpfc_menlo_cmd - send an ioctl for menlo hardware
5096 * @job: fc_bsg_job to handle
5098 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
5099 * all the command completions will return the xri for the command.
5100 * For menlo data requests a gen request 64 CX is used to continue the exchange
5101 * supplied in the menlo request header xri field.
5104 lpfc_menlo_cmd(struct bsg_job
*job
)
5106 struct lpfc_vport
*vport
= shost_priv(fc_bsg_to_shost(job
));
5107 struct fc_bsg_request
*bsg_request
= job
->request
;
5108 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
5109 struct lpfc_hba
*phba
= vport
->phba
;
5110 struct lpfc_iocbq
*cmdiocbq
;
5113 struct menlo_command
*menlo_cmd
;
5114 struct lpfc_dmabuf
*bmp
= NULL
, *cmp
= NULL
, *rmp
= NULL
;
5117 struct bsg_job_data
*dd_data
;
5118 struct ulp_bde64
*bpl
= NULL
;
5120 /* in case no data is returned return just the return code */
5121 bsg_reply
->reply_payload_rcv_len
= 0;
5123 if (job
->request_len
<
5124 sizeof(struct fc_bsg_request
) +
5125 sizeof(struct menlo_command
)) {
5126 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
5127 "2784 Received MENLO_CMD request below "
5133 if (job
->reply_len
<
5134 sizeof(struct fc_bsg_request
) + sizeof(struct menlo_response
)) {
5135 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
5136 "2785 Received MENLO_CMD reply below "
5142 if (!(phba
->menlo_flag
& HBA_MENLO_SUPPORT
)) {
5143 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
5144 "2786 Adapter does not support menlo "
5150 menlo_cmd
= (struct menlo_command
*)
5151 bsg_request
->rqst_data
.h_vendor
.vendor_cmd
;
5153 /* allocate our bsg tracking structure */
5154 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
5156 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
5157 "2787 Failed allocation of dd_data\n");
5162 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
5168 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
5174 INIT_LIST_HEAD(&bmp
->list
);
5176 bpl
= (struct ulp_bde64
*)bmp
->virt
;
5177 request_nseg
= LPFC_BPL_SIZE
/sizeof(struct ulp_bde64
);
5178 cmp
= lpfc_alloc_bsg_buffers(phba
, job
->request_payload
.payload_len
,
5179 1, bpl
, &request_nseg
);
5184 lpfc_bsg_copy_data(cmp
, &job
->request_payload
,
5185 job
->request_payload
.payload_len
, 1);
5187 bpl
+= request_nseg
;
5188 reply_nseg
= LPFC_BPL_SIZE
/sizeof(struct ulp_bde64
) - request_nseg
;
5189 rmp
= lpfc_alloc_bsg_buffers(phba
, job
->reply_payload
.payload_len
, 0,
5196 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
5202 cmd
= &cmdiocbq
->iocb
;
5203 cmd
->un
.genreq64
.bdl
.ulpIoTag32
= 0;
5204 cmd
->un
.genreq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
5205 cmd
->un
.genreq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
5206 cmd
->un
.genreq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
5207 cmd
->un
.genreq64
.bdl
.bdeSize
=
5208 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
5209 cmd
->un
.genreq64
.w5
.hcsw
.Fctl
= (SI
| LA
);
5210 cmd
->un
.genreq64
.w5
.hcsw
.Dfctl
= 0;
5211 cmd
->un
.genreq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CMD
;
5212 cmd
->un
.genreq64
.w5
.hcsw
.Type
= MENLO_TRANSPORT_TYPE
; /* 0xfe */
5213 cmd
->ulpBdeCount
= 1;
5214 cmd
->ulpClass
= CLASS3
;
5215 cmd
->ulpOwner
= OWN_CHIP
;
5216 cmd
->ulpLe
= 1; /* Limited Edition */
5217 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
5218 cmdiocbq
->vport
= phba
->pport
;
5219 /* We want the firmware to timeout before we do */
5220 cmd
->ulpTimeout
= MENLO_TIMEOUT
- 5;
5221 cmdiocbq
->iocb_cmpl
= lpfc_bsg_menlo_cmd_cmp
;
5222 cmdiocbq
->context1
= dd_data
;
5223 cmdiocbq
->context2
= cmp
;
5224 cmdiocbq
->context3
= bmp
;
5225 if (menlo_cmd
->cmd
== LPFC_BSG_VENDOR_MENLO_CMD
) {
5226 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CR
;
5227 cmd
->ulpPU
= MENLO_PU
; /* 3 */
5228 cmd
->un
.ulpWord
[4] = MENLO_DID
; /* 0x0000FC0E */
5229 cmd
->ulpContext
= MENLO_CONTEXT
; /* 0 */
5231 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CX
;
5233 cmd
->un
.ulpWord
[4] = 0;
5234 cmd
->ulpContext
= menlo_cmd
->xri
;
5237 dd_data
->type
= TYPE_MENLO
;
5238 dd_data
->set_job
= job
;
5239 dd_data
->context_un
.menlo
.cmdiocbq
= cmdiocbq
;
5240 dd_data
->context_un
.menlo
.rmp
= rmp
;
5241 job
->dd_data
= dd_data
;
5243 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
,
5245 if (rc
== IOCB_SUCCESS
)
5246 return 0; /* done for now */
5248 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
5251 lpfc_free_bsg_buffers(phba
, rmp
);
5253 lpfc_free_bsg_buffers(phba
, cmp
);
5256 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
5261 /* make error code available to userspace */
5262 bsg_reply
->result
= rc
;
5263 job
->dd_data
= NULL
;
5268 lpfc_forced_link_speed(struct bsg_job
*job
)
5270 struct Scsi_Host
*shost
= fc_bsg_to_shost(job
);
5271 struct lpfc_vport
*vport
= shost_priv(shost
);
5272 struct lpfc_hba
*phba
= vport
->phba
;
5273 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
5274 struct forced_link_speed_support_reply
*forced_reply
;
5277 if (job
->request_len
<
5278 sizeof(struct fc_bsg_request
) +
5279 sizeof(struct get_forced_link_speed_support
)) {
5280 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
5281 "0048 Received FORCED_LINK_SPEED request "
5282 "below minimum size\n");
5287 forced_reply
= (struct forced_link_speed_support_reply
*)
5288 bsg_reply
->reply_data
.vendor_reply
.vendor_rsp
;
5290 if (job
->reply_len
<
5291 sizeof(struct fc_bsg_request
) +
5292 sizeof(struct forced_link_speed_support_reply
)) {
5293 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
5294 "0049 Received FORCED_LINK_SPEED reply below "
5300 forced_reply
->supported
= (phba
->hba_flag
& HBA_FORCED_LINK_SPEED
)
5301 ? LPFC_FORCED_LINK_SPEED_SUPPORTED
5302 : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED
;
5304 bsg_reply
->result
= rc
;
5306 bsg_job_done(job
, bsg_reply
->result
,
5307 bsg_reply
->reply_payload_rcv_len
);
5312 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
5313 * @job: fc_bsg_job to handle
5316 lpfc_bsg_hst_vendor(struct bsg_job
*job
)
5318 struct fc_bsg_request
*bsg_request
= job
->request
;
5319 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
5320 int command
= bsg_request
->rqst_data
.h_vendor
.vendor_cmd
[0];
5324 case LPFC_BSG_VENDOR_SET_CT_EVENT
:
5325 rc
= lpfc_bsg_hba_set_event(job
);
5327 case LPFC_BSG_VENDOR_GET_CT_EVENT
:
5328 rc
= lpfc_bsg_hba_get_event(job
);
5330 case LPFC_BSG_VENDOR_SEND_MGMT_RESP
:
5331 rc
= lpfc_bsg_send_mgmt_rsp(job
);
5333 case LPFC_BSG_VENDOR_DIAG_MODE
:
5334 rc
= lpfc_bsg_diag_loopback_mode(job
);
5336 case LPFC_BSG_VENDOR_DIAG_MODE_END
:
5337 rc
= lpfc_sli4_bsg_diag_mode_end(job
);
5339 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK
:
5340 rc
= lpfc_bsg_diag_loopback_run(job
);
5342 case LPFC_BSG_VENDOR_LINK_DIAG_TEST
:
5343 rc
= lpfc_sli4_bsg_link_diag_test(job
);
5345 case LPFC_BSG_VENDOR_GET_MGMT_REV
:
5346 rc
= lpfc_bsg_get_dfc_rev(job
);
5348 case LPFC_BSG_VENDOR_MBOX
:
5349 rc
= lpfc_bsg_mbox_cmd(job
);
5351 case LPFC_BSG_VENDOR_MENLO_CMD
:
5352 case LPFC_BSG_VENDOR_MENLO_DATA
:
5353 rc
= lpfc_menlo_cmd(job
);
5355 case LPFC_BSG_VENDOR_FORCED_LINK_SPEED
:
5356 rc
= lpfc_forced_link_speed(job
);
5360 bsg_reply
->reply_payload_rcv_len
= 0;
5361 /* make error code available to userspace */
5362 bsg_reply
->result
= rc
;
5370 * lpfc_bsg_request - handle a bsg request from the FC transport
5371 * @job: fc_bsg_job to handle
5374 lpfc_bsg_request(struct bsg_job
*job
)
5376 struct fc_bsg_request
*bsg_request
= job
->request
;
5377 struct fc_bsg_reply
*bsg_reply
= job
->reply
;
5381 msgcode
= bsg_request
->msgcode
;
5383 case FC_BSG_HST_VENDOR
:
5384 rc
= lpfc_bsg_hst_vendor(job
);
5386 case FC_BSG_RPT_ELS
:
5387 rc
= lpfc_bsg_rport_els(job
);
5390 rc
= lpfc_bsg_send_mgmt_cmd(job
);
5394 bsg_reply
->reply_payload_rcv_len
= 0;
5395 /* make error code available to userspace */
5396 bsg_reply
->result
= rc
;
5404 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
5405 * @job: fc_bsg_job that has timed out
5407 * This function just aborts the job's IOCB. The aborted IOCB will return to
5408 * the waiting function which will handle passing the error back to userspace
5411 lpfc_bsg_timeout(struct bsg_job
*job
)
5413 struct lpfc_vport
*vport
= shost_priv(fc_bsg_to_shost(job
));
5414 struct lpfc_hba
*phba
= vport
->phba
;
5415 struct lpfc_iocbq
*cmdiocb
;
5416 struct lpfc_sli_ring
*pring
;
5417 struct bsg_job_data
*dd_data
;
5418 unsigned long flags
;
5420 LIST_HEAD(completions
);
5421 struct lpfc_iocbq
*check_iocb
, *next_iocb
;
5423 pring
= lpfc_phba_elsring(phba
);
5424 if (unlikely(!pring
))
5427 /* if job's driver data is NULL, the command completed or is in the
5428 * the process of completing. In this case, return status to request
5429 * so the timeout is retried. This avoids double completion issues
5430 * and the request will be pulled off the timer queue when the
5431 * command's completion handler executes. Otherwise, prevent the
5432 * command's completion handler from executing the job done callback
5433 * and continue processing to abort the outstanding the command.
5436 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
5437 dd_data
= (struct bsg_job_data
*)job
->dd_data
;
5439 dd_data
->set_job
= NULL
;
5440 job
->dd_data
= NULL
;
5442 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
5446 switch (dd_data
->type
) {
5448 /* Check to see if IOCB was issued to the port or not. If not,
5449 * remove it from the txq queue and call cancel iocbs.
5450 * Otherwise, call abort iotag
5452 cmdiocb
= dd_data
->context_un
.iocb
.cmdiocbq
;
5453 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
5455 spin_lock_irqsave(&phba
->hbalock
, flags
);
5456 /* make sure the I/O abort window is still open */
5457 if (!(cmdiocb
->iocb_flag
& LPFC_IO_CMD_OUTSTANDING
)) {
5458 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
5461 list_for_each_entry_safe(check_iocb
, next_iocb
, &pring
->txq
,
5463 if (check_iocb
== cmdiocb
) {
5464 list_move_tail(&check_iocb
->list
, &completions
);
5468 if (list_empty(&completions
))
5469 lpfc_sli_issue_abort_iotag(phba
, pring
, cmdiocb
);
5470 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
5471 if (!list_empty(&completions
)) {
5472 lpfc_sli_cancel_iocbs(phba
, &completions
,
5473 IOSTAT_LOCAL_REJECT
,
5479 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
5483 /* Update the ext buf ctx state if needed */
5485 if (phba
->mbox_ext_buf_ctx
.state
== LPFC_BSG_MBOX_PORT
)
5486 phba
->mbox_ext_buf_ctx
.state
= LPFC_BSG_MBOX_ABTS
;
5487 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
5490 /* Check to see if IOCB was issued to the port or not. If not,
5491 * remove it from the txq queue and call cancel iocbs.
5492 * Otherwise, call abort iotag.
5494 cmdiocb
= dd_data
->context_un
.menlo
.cmdiocbq
;
5495 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
5497 spin_lock_irqsave(&phba
->hbalock
, flags
);
5498 list_for_each_entry_safe(check_iocb
, next_iocb
, &pring
->txq
,
5500 if (check_iocb
== cmdiocb
) {
5501 list_move_tail(&check_iocb
->list
, &completions
);
5505 if (list_empty(&completions
))
5506 lpfc_sli_issue_abort_iotag(phba
, pring
, cmdiocb
);
5507 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
5508 if (!list_empty(&completions
)) {
5509 lpfc_sli_cancel_iocbs(phba
, &completions
,
5510 IOSTAT_LOCAL_REJECT
,
5515 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
5519 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
5520 * otherwise an error message will be displayed on the console
5521 * so always return success (zero)