1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_transport_fc.h>
30 #include <scsi/scsi_bsg_fc.h>
31 #include <scsi/fc/fc_fs.h>
36 #include "lpfc_sli4.h"
39 #include "lpfc_disc.h"
40 #include "lpfc_scsi.h"
42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_vport.h"
45 #include "lpfc_version.h"
47 struct lpfc_bsg_event
{
48 struct list_head node
;
52 /* Event type and waiter identifiers */
57 /* next two flags are here for the auto-delete logic */
58 unsigned long wait_time_stamp
;
61 /* seen and not seen events */
62 struct list_head events_to_get
;
63 struct list_head events_to_see
;
65 /* job waiting for this event to finish */
66 struct fc_bsg_job
*set_job
;
69 struct lpfc_bsg_iocb
{
70 struct lpfc_iocbq
*cmdiocbq
;
71 struct lpfc_iocbq
*rspiocbq
;
72 struct lpfc_dmabuf
*bmp
;
73 struct lpfc_nodelist
*ndlp
;
75 /* job waiting for this iocb to finish */
76 struct fc_bsg_job
*set_job
;
79 struct lpfc_bsg_mbox
{
82 struct lpfc_dmabuf
*rxbmp
; /* for BIU diags */
83 struct lpfc_dmabufext
*dmp
; /* for BIU diags */
84 uint8_t *ext
; /* extended mailbox data */
85 uint32_t mbOffset
; /* from app */
86 uint32_t inExtWLen
; /* from app */
87 uint32_t outExtWLen
; /* from app */
89 /* job waiting for this mbox command to finish */
90 struct fc_bsg_job
*set_job
;
93 #define MENLO_DID 0x0000FC0E
95 struct lpfc_bsg_menlo
{
96 struct lpfc_iocbq
*cmdiocbq
;
97 struct lpfc_iocbq
*rspiocbq
;
98 struct lpfc_dmabuf
*bmp
;
100 /* job waiting for this iocb to finish */
101 struct fc_bsg_job
*set_job
;
108 struct bsg_job_data
{
111 struct lpfc_bsg_event
*evt
;
112 struct lpfc_bsg_iocb iocb
;
113 struct lpfc_bsg_mbox mbox
;
114 struct lpfc_bsg_menlo menlo
;
119 struct list_head node
;
126 #define BUF_SZ_4K 4096
127 #define SLI_CT_ELX_LOOPBACK 0x10
129 enum ELX_LOOPBACK_CMD
{
130 ELX_LOOPBACK_XRI_SETUP
,
134 #define ELX_LOOPBACK_HEADER_SZ \
135 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
137 struct lpfc_dmabufext
{
138 struct lpfc_dmabuf dma
;
144 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
145 * @phba: Pointer to HBA context object.
146 * @cmdiocbq: Pointer to command iocb.
147 * @rspiocbq: Pointer to response iocb.
149 * This function is the completion handler for iocbs issued using
150 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
151 * ring event handler function without any lock held. This function
152 * can be called from both worker thread context and interrupt
153 * context. This function also can be called from another thread which
154 * cleans up the SLI layer objects.
155 * This function copies the contents of the response iocb to the
156 * response iocb memory object provided by the caller of
157 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
158 * sleeps for the iocb completion.
161 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba
*phba
,
162 struct lpfc_iocbq
*cmdiocbq
,
163 struct lpfc_iocbq
*rspiocbq
)
165 struct bsg_job_data
*dd_data
;
166 struct fc_bsg_job
*job
;
168 struct lpfc_dmabuf
*bmp
;
169 struct lpfc_nodelist
*ndlp
;
170 struct lpfc_bsg_iocb
*iocb
;
174 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
175 dd_data
= cmdiocbq
->context2
;
177 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
178 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
182 iocb
= &dd_data
->context_un
.iocb
;
184 job
->dd_data
= NULL
; /* so timeout handler does not reply */
187 rsp
= &rspiocbq
->iocb
;
188 ndlp
= cmdiocbq
->context1
;
190 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
191 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
192 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
193 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
195 if (rsp
->ulpStatus
) {
196 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
197 switch (rsp
->un
.ulpWord
[4] & 0xff) {
198 case IOERR_SEQUENCE_TIMEOUT
:
201 case IOERR_INVALID_RPI
:
211 job
->reply
->reply_payload_rcv_len
=
212 rsp
->un
.genreq64
.bdl
.bdeSize
;
214 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
215 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
219 /* make error code available to userspace */
220 job
->reply
->result
= rc
;
221 /* complete the job back to userspace */
223 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
228 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
229 * @job: fc_bsg_job to handle
232 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job
*job
)
234 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
235 struct lpfc_hba
*phba
= vport
->phba
;
236 struct lpfc_rport_data
*rdata
= job
->rport
->dd_data
;
237 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
238 struct ulp_bde64
*bpl
= NULL
;
240 struct lpfc_iocbq
*cmdiocbq
= NULL
;
242 struct lpfc_dmabuf
*bmp
= NULL
;
245 struct scatterlist
*sgel
= NULL
;
248 struct bsg_job_data
*dd_data
;
253 /* in case no data is transferred */
254 job
->reply
->reply_payload_rcv_len
= 0;
256 /* allocate our bsg tracking structure */
257 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
259 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
260 "2733 Failed allocation of dd_data\n");
265 if (!lpfc_nlp_get(ndlp
)) {
270 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
276 if (ndlp
->nlp_flag
& NLP_ELS_SND_MASK
) {
281 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
287 cmd
= &cmdiocbq
->iocb
;
288 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
294 INIT_LIST_HEAD(&bmp
->list
);
295 bpl
= (struct ulp_bde64
*) bmp
->virt
;
296 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
297 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
298 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
299 busaddr
= sg_dma_address(sgel
);
300 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
301 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
302 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
303 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
304 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
308 reply_nseg
= pci_map_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
309 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
310 for_each_sg(job
->reply_payload
.sg_list
, sgel
, reply_nseg
, numbde
) {
311 busaddr
= sg_dma_address(sgel
);
312 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
313 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
314 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
315 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
316 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
320 cmd
->un
.genreq64
.bdl
.ulpIoTag32
= 0;
321 cmd
->un
.genreq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
322 cmd
->un
.genreq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
323 cmd
->un
.genreq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
324 cmd
->un
.genreq64
.bdl
.bdeSize
=
325 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
326 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CR
;
327 cmd
->un
.genreq64
.w5
.hcsw
.Fctl
= (SI
| LA
);
328 cmd
->un
.genreq64
.w5
.hcsw
.Dfctl
= 0;
329 cmd
->un
.genreq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CTL
;
330 cmd
->un
.genreq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
331 cmd
->ulpBdeCount
= 1;
333 cmd
->ulpClass
= CLASS3
;
334 cmd
->ulpContext
= ndlp
->nlp_rpi
;
335 cmd
->ulpOwner
= OWN_CHIP
;
336 cmdiocbq
->vport
= phba
->pport
;
337 cmdiocbq
->context3
= bmp
;
338 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
339 timeout
= phba
->fc_ratov
* 2;
340 cmd
->ulpTimeout
= timeout
;
342 cmdiocbq
->iocb_cmpl
= lpfc_bsg_send_mgmt_cmd_cmp
;
343 cmdiocbq
->context1
= ndlp
;
344 cmdiocbq
->context2
= dd_data
;
345 dd_data
->type
= TYPE_IOCB
;
346 dd_data
->context_un
.iocb
.cmdiocbq
= cmdiocbq
;
347 dd_data
->context_un
.iocb
.set_job
= job
;
348 dd_data
->context_un
.iocb
.bmp
= bmp
;
350 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
351 if (lpfc_readl(phba
->HCregaddr
, &creg_val
)) {
355 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
356 writel(creg_val
, phba
->HCregaddr
);
357 readl(phba
->HCregaddr
); /* flush */
360 iocb_stat
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
, 0);
361 if (iocb_stat
== IOCB_SUCCESS
)
362 return 0; /* done for now */
363 else if (iocb_stat
== IOCB_BUSY
)
369 /* iocb failed so cleanup */
370 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
371 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
372 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
373 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
375 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
378 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
386 /* make error code available to userspace */
387 job
->reply
->result
= rc
;
393 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
394 * @phba: Pointer to HBA context object.
395 * @cmdiocbq: Pointer to command iocb.
396 * @rspiocbq: Pointer to response iocb.
398 * This function is the completion handler for iocbs issued using
399 * lpfc_bsg_rport_els_cmp function. This function is called by the
400 * ring event handler function without any lock held. This function
401 * can be called from both worker thread context and interrupt
402 * context. This function also can be called from other thread which
403 * cleans up the SLI layer objects.
404 * This function copies the contents of the response iocb to the
405 * response iocb memory object provided by the caller of
406 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
407 * sleeps for the iocb completion.
410 lpfc_bsg_rport_els_cmp(struct lpfc_hba
*phba
,
411 struct lpfc_iocbq
*cmdiocbq
,
412 struct lpfc_iocbq
*rspiocbq
)
414 struct bsg_job_data
*dd_data
;
415 struct fc_bsg_job
*job
;
417 struct lpfc_nodelist
*ndlp
;
418 struct lpfc_dmabuf
*pbuflist
= NULL
;
419 struct fc_bsg_ctels_reply
*els_reply
;
424 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
425 dd_data
= cmdiocbq
->context1
;
426 /* normal completion and timeout crossed paths, already done */
428 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
432 cmdiocbq
->iocb_flag
|= LPFC_IO_WAKE
;
433 if (cmdiocbq
->context2
&& rspiocbq
)
434 memcpy(&((struct lpfc_iocbq
*)cmdiocbq
->context2
)->iocb
,
435 &rspiocbq
->iocb
, sizeof(IOCB_t
));
437 job
= dd_data
->context_un
.iocb
.set_job
;
438 cmdiocbq
= dd_data
->context_un
.iocb
.cmdiocbq
;
439 rspiocbq
= dd_data
->context_un
.iocb
.rspiocbq
;
440 rsp
= &rspiocbq
->iocb
;
441 ndlp
= dd_data
->context_un
.iocb
.ndlp
;
443 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
444 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
445 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
446 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
448 if (job
->reply
->result
== -EAGAIN
)
450 else if (rsp
->ulpStatus
== IOSTAT_SUCCESS
)
451 job
->reply
->reply_payload_rcv_len
=
452 rsp
->un
.elsreq64
.bdl
.bdeSize
;
453 else if (rsp
->ulpStatus
== IOSTAT_LS_RJT
) {
454 job
->reply
->reply_payload_rcv_len
=
455 sizeof(struct fc_bsg_ctels_reply
);
456 /* LS_RJT data returned in word 4 */
457 rjt_data
= (uint8_t *)&rsp
->un
.ulpWord
[4];
458 els_reply
= &job
->reply
->reply_data
.ctels_reply
;
459 els_reply
->status
= FC_CTELS_STATUS_REJECT
;
460 els_reply
->rjt_data
.action
= rjt_data
[3];
461 els_reply
->rjt_data
.reason_code
= rjt_data
[2];
462 els_reply
->rjt_data
.reason_explanation
= rjt_data
[1];
463 els_reply
->rjt_data
.vendor_unique
= rjt_data
[0];
467 pbuflist
= (struct lpfc_dmabuf
*) cmdiocbq
->context3
;
468 lpfc_mbuf_free(phba
, pbuflist
->virt
, pbuflist
->phys
);
469 lpfc_sli_release_iocbq(phba
, rspiocbq
);
470 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
473 /* make error code available to userspace */
474 job
->reply
->result
= rc
;
476 /* complete the job back to userspace */
478 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
483 * lpfc_bsg_rport_els - send an ELS command from a bsg request
484 * @job: fc_bsg_job to handle
487 lpfc_bsg_rport_els(struct fc_bsg_job
*job
)
489 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
490 struct lpfc_hba
*phba
= vport
->phba
;
491 struct lpfc_rport_data
*rdata
= job
->rport
->dd_data
;
492 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
496 struct lpfc_iocbq
*rspiocbq
;
497 struct lpfc_iocbq
*cmdiocbq
;
500 struct lpfc_dmabuf
*pcmd
;
501 struct lpfc_dmabuf
*prsp
;
502 struct lpfc_dmabuf
*pbuflist
= NULL
;
503 struct ulp_bde64
*bpl
;
506 struct scatterlist
*sgel
= NULL
;
509 struct bsg_job_data
*dd_data
;
513 /* in case no data is transferred */
514 job
->reply
->reply_payload_rcv_len
= 0;
516 /* allocate our bsg tracking structure */
517 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
519 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
520 "2735 Failed allocation of dd_data\n");
525 if (!lpfc_nlp_get(ndlp
)) {
530 elscmd
= job
->request
->rqst_data
.r_els
.els_code
;
531 cmdsize
= job
->request_payload
.payload_len
;
532 rspsize
= job
->reply_payload
.payload_len
;
533 rspiocbq
= lpfc_sli_get_iocbq(phba
);
540 rsp
= &rspiocbq
->iocb
;
543 cmdiocbq
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, 0, ndlp
,
544 ndlp
->nlp_DID
, elscmd
);
550 /* prep els iocb set context1 to the ndlp, context2 to the command
551 * dmabuf, context3 holds the data dmabuf
553 pcmd
= (struct lpfc_dmabuf
*) cmdiocbq
->context2
;
554 prsp
= (struct lpfc_dmabuf
*) pcmd
->list
.next
;
555 lpfc_mbuf_free(phba
, pcmd
->virt
, pcmd
->phys
);
557 lpfc_mbuf_free(phba
, prsp
->virt
, prsp
->phys
);
559 cmdiocbq
->context2
= NULL
;
561 pbuflist
= (struct lpfc_dmabuf
*) cmdiocbq
->context3
;
562 bpl
= (struct ulp_bde64
*) pbuflist
->virt
;
564 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
565 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
566 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
567 busaddr
= sg_dma_address(sgel
);
568 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
569 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
570 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
571 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
572 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
576 reply_nseg
= pci_map_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
577 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
578 for_each_sg(job
->reply_payload
.sg_list
, sgel
, reply_nseg
, numbde
) {
579 busaddr
= sg_dma_address(sgel
);
580 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
581 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
582 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
583 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
584 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
587 cmdiocbq
->iocb
.un
.elsreq64
.bdl
.bdeSize
=
588 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
589 cmdiocbq
->iocb
.ulpContext
= rpi
;
590 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
591 cmdiocbq
->context1
= NULL
;
592 cmdiocbq
->context2
= NULL
;
594 cmdiocbq
->iocb_cmpl
= lpfc_bsg_rport_els_cmp
;
595 cmdiocbq
->context1
= dd_data
;
596 cmdiocbq
->context2
= rspiocbq
;
597 dd_data
->type
= TYPE_IOCB
;
598 dd_data
->context_un
.iocb
.cmdiocbq
= cmdiocbq
;
599 dd_data
->context_un
.iocb
.rspiocbq
= rspiocbq
;
600 dd_data
->context_un
.iocb
.set_job
= job
;
601 dd_data
->context_un
.iocb
.bmp
= NULL
;;
602 dd_data
->context_un
.iocb
.ndlp
= ndlp
;
604 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
605 if (lpfc_readl(phba
->HCregaddr
, &creg_val
)) {
609 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
610 writel(creg_val
, phba
->HCregaddr
);
611 readl(phba
->HCregaddr
); /* flush */
613 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
, 0);
615 if (rc
== IOCB_SUCCESS
)
616 return 0; /* done for now */
617 else if (rc
== IOCB_BUSY
)
623 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
624 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
625 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
626 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
628 lpfc_mbuf_free(phba
, pbuflist
->virt
, pbuflist
->phys
);
630 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
633 lpfc_sli_release_iocbq(phba
, rspiocbq
);
639 /* make error code available to userspace */
640 job
->reply
->result
= rc
;
646 * lpfc_bsg_event_free - frees an allocated event structure
647 * @kref: Pointer to a kref.
649 * Called from kref_put. Back cast the kref into an event structure address.
650 * Free any events to get, delete associated nodes, free any events to see,
651 * free any data then free the event itself.
654 lpfc_bsg_event_free(struct kref
*kref
)
656 struct lpfc_bsg_event
*evt
= container_of(kref
, struct lpfc_bsg_event
,
658 struct event_data
*ed
;
660 list_del(&evt
->node
);
662 while (!list_empty(&evt
->events_to_get
)) {
663 ed
= list_entry(evt
->events_to_get
.next
, typeof(*ed
), node
);
669 while (!list_empty(&evt
->events_to_see
)) {
670 ed
= list_entry(evt
->events_to_see
.next
, typeof(*ed
), node
);
680 * lpfc_bsg_event_ref - increments the kref for an event
681 * @evt: Pointer to an event structure.
684 lpfc_bsg_event_ref(struct lpfc_bsg_event
*evt
)
686 kref_get(&evt
->kref
);
690 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
691 * @evt: Pointer to an event structure.
694 lpfc_bsg_event_unref(struct lpfc_bsg_event
*evt
)
696 kref_put(&evt
->kref
, lpfc_bsg_event_free
);
700 * lpfc_bsg_event_new - allocate and initialize a event structure
701 * @ev_mask: Mask of events.
702 * @ev_reg_id: Event reg id.
703 * @ev_req_id: Event request id.
705 static struct lpfc_bsg_event
*
706 lpfc_bsg_event_new(uint32_t ev_mask
, int ev_reg_id
, uint32_t ev_req_id
)
708 struct lpfc_bsg_event
*evt
= kzalloc(sizeof(*evt
), GFP_KERNEL
);
713 INIT_LIST_HEAD(&evt
->events_to_get
);
714 INIT_LIST_HEAD(&evt
->events_to_see
);
715 evt
->type_mask
= ev_mask
;
716 evt
->req_id
= ev_req_id
;
717 evt
->reg_id
= ev_reg_id
;
718 evt
->wait_time_stamp
= jiffies
;
719 init_waitqueue_head(&evt
->wq
);
720 kref_init(&evt
->kref
);
725 * diag_cmd_data_free - Frees an lpfc dma buffer extension
726 * @phba: Pointer to HBA context object.
727 * @mlist: Pointer to an lpfc dma buffer extension.
730 diag_cmd_data_free(struct lpfc_hba
*phba
, struct lpfc_dmabufext
*mlist
)
732 struct lpfc_dmabufext
*mlast
;
733 struct pci_dev
*pcidev
;
734 struct list_head head
, *curr
, *next
;
736 if ((!mlist
) || (!lpfc_is_link_up(phba
) &&
737 (phba
->link_flag
& LS_LOOPBACK_MODE
))) {
741 pcidev
= phba
->pcidev
;
742 list_add_tail(&head
, &mlist
->dma
.list
);
744 list_for_each_safe(curr
, next
, &head
) {
745 mlast
= list_entry(curr
, struct lpfc_dmabufext
, dma
.list
);
747 dma_free_coherent(&pcidev
->dev
,
757 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
762 * This function is called when an unsolicited CT command is received. It
763 * forwards the event to any processes registered to receive CT events.
766 lpfc_bsg_ct_unsol_event(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
767 struct lpfc_iocbq
*piocbq
)
769 uint32_t evt_req_id
= 0;
772 struct lpfc_dmabuf
*dmabuf
= NULL
;
773 struct lpfc_bsg_event
*evt
;
774 struct event_data
*evt_dat
= NULL
;
775 struct lpfc_iocbq
*iocbq
;
777 struct list_head head
;
778 struct ulp_bde64
*bde
;
781 struct lpfc_dmabuf
*bdeBuf1
= piocbq
->context2
;
782 struct lpfc_dmabuf
*bdeBuf2
= piocbq
->context3
;
783 struct lpfc_hbq_entry
*hbqe
;
784 struct lpfc_sli_ct_request
*ct_req
;
785 struct fc_bsg_job
*job
= NULL
;
789 INIT_LIST_HEAD(&head
);
790 list_add_tail(&head
, &piocbq
->list
);
792 if (piocbq
->iocb
.ulpBdeCount
== 0 ||
793 piocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
== 0)
794 goto error_ct_unsol_exit
;
796 if (phba
->link_state
== LPFC_HBA_ERROR
||
797 (!(phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)))
798 goto error_ct_unsol_exit
;
800 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)
803 dma_addr
= getPaddr(piocbq
->iocb
.un
.cont64
[0].addrHigh
,
804 piocbq
->iocb
.un
.cont64
[0].addrLow
);
805 dmabuf
= lpfc_sli_ringpostbuf_get(phba
, pring
, dma_addr
);
808 goto error_ct_unsol_exit
;
809 ct_req
= (struct lpfc_sli_ct_request
*)dmabuf
->virt
;
810 evt_req_id
= ct_req
->FsType
;
811 cmd
= ct_req
->CommandResponse
.bits
.CmdRsp
;
812 len
= ct_req
->CommandResponse
.bits
.Size
;
813 if (!(phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
))
814 lpfc_sli_ringpostbuf_put(phba
, pring
, dmabuf
);
816 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
817 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
818 if (!(evt
->type_mask
& FC_REG_CT_EVENT
) ||
819 evt
->req_id
!= evt_req_id
)
822 lpfc_bsg_event_ref(evt
);
823 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
824 evt_dat
= kzalloc(sizeof(*evt_dat
), GFP_KERNEL
);
825 if (evt_dat
== NULL
) {
826 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
827 lpfc_bsg_event_unref(evt
);
828 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
829 "2614 Memory allocation failed for "
834 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
835 /* take accumulated byte count from the last iocbq */
836 iocbq
= list_entry(head
.prev
, typeof(*iocbq
), list
);
837 evt_dat
->len
= iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
;
839 list_for_each_entry(iocbq
, &head
, list
) {
840 for (i
= 0; i
< iocbq
->iocb
.ulpBdeCount
; i
++)
842 iocbq
->iocb
.un
.cont64
[i
].tus
.f
.bdeSize
;
846 evt_dat
->data
= kzalloc(evt_dat
->len
, GFP_KERNEL
);
847 if (evt_dat
->data
== NULL
) {
848 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
849 "2615 Memory allocation failed for "
850 "CT event data, size %d\n",
853 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
854 lpfc_bsg_event_unref(evt
);
855 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
856 goto error_ct_unsol_exit
;
859 list_for_each_entry(iocbq
, &head
, list
) {
861 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
862 bdeBuf1
= iocbq
->context2
;
863 bdeBuf2
= iocbq
->context3
;
865 for (i
= 0; i
< iocbq
->iocb
.ulpBdeCount
; i
++) {
866 if (phba
->sli3_options
&
867 LPFC_SLI3_HBQ_ENABLED
) {
869 hbqe
= (struct lpfc_hbq_entry
*)
870 &iocbq
->iocb
.un
.ulpWord
[0];
871 size
= hbqe
->bde
.tus
.f
.bdeSize
;
874 hbqe
= (struct lpfc_hbq_entry
*)
877 size
= hbqe
->bde
.tus
.f
.bdeSize
;
880 if ((offset
+ size
) > evt_dat
->len
)
881 size
= evt_dat
->len
- offset
;
883 size
= iocbq
->iocb
.un
.cont64
[i
].
885 bde
= &iocbq
->iocb
.un
.cont64
[i
];
886 dma_addr
= getPaddr(bde
->addrHigh
,
888 dmabuf
= lpfc_sli_ringpostbuf_get(phba
,
892 lpfc_printf_log(phba
, KERN_ERR
,
893 LOG_LIBDFC
, "2616 No dmabuf "
894 "found for iocbq 0x%p\n",
896 kfree(evt_dat
->data
);
898 spin_lock_irqsave(&phba
->ct_ev_lock
,
900 lpfc_bsg_event_unref(evt
);
901 spin_unlock_irqrestore(
902 &phba
->ct_ev_lock
, flags
);
903 goto error_ct_unsol_exit
;
905 memcpy((char *)(evt_dat
->data
) + offset
,
908 if (evt_req_id
!= SLI_CT_ELX_LOOPBACK
&&
909 !(phba
->sli3_options
&
910 LPFC_SLI3_HBQ_ENABLED
)) {
911 lpfc_sli_ringpostbuf_put(phba
, pring
,
915 case ELX_LOOPBACK_DATA
:
916 diag_cmd_data_free(phba
,
917 (struct lpfc_dmabufext
*)
920 case ELX_LOOPBACK_XRI_SETUP
:
921 if ((phba
->sli_rev
==
923 (phba
->sli3_options
&
924 LPFC_SLI3_HBQ_ENABLED
926 lpfc_in_buf_free(phba
,
929 lpfc_post_buffer(phba
,
935 if (!(phba
->sli3_options
&
936 LPFC_SLI3_HBQ_ENABLED
))
937 lpfc_post_buffer(phba
,
946 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
947 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
948 evt_dat
->immed_dat
= phba
->ctx_idx
;
949 phba
->ctx_idx
= (phba
->ctx_idx
+ 1) % 64;
950 /* Provide warning for over-run of the ct_ctx array */
951 if (phba
->ct_ctx
[evt_dat
->immed_dat
].flags
&
953 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
,
954 "2717 CT context array entry "
955 "[%d] over-run: oxid:x%x, "
956 "sid:x%x\n", phba
->ctx_idx
,
958 evt_dat
->immed_dat
].oxid
,
960 evt_dat
->immed_dat
].SID
);
961 phba
->ct_ctx
[evt_dat
->immed_dat
].oxid
=
962 piocbq
->iocb
.ulpContext
;
963 phba
->ct_ctx
[evt_dat
->immed_dat
].SID
=
964 piocbq
->iocb
.un
.rcvels
.remoteID
;
965 phba
->ct_ctx
[evt_dat
->immed_dat
].flags
= UNSOL_VALID
;
967 evt_dat
->immed_dat
= piocbq
->iocb
.ulpContext
;
969 evt_dat
->type
= FC_REG_CT_EVENT
;
970 list_add(&evt_dat
->node
, &evt
->events_to_see
);
971 if (evt_req_id
== SLI_CT_ELX_LOOPBACK
) {
972 wake_up_interruptible(&evt
->wq
);
973 lpfc_bsg_event_unref(evt
);
977 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
978 lpfc_bsg_event_unref(evt
);
983 job
->reply
->reply_payload_rcv_len
= size
;
984 /* make error code available to userspace */
985 job
->reply
->result
= 0;
987 /* complete the job back to userspace */
988 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
990 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
993 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
996 if (!list_empty(&head
))
998 if (evt_req_id
== SLI_CT_ELX_LOOPBACK
)
1004 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1005 * @job: SET_EVENT fc_bsg_job
1008 lpfc_bsg_hba_set_event(struct fc_bsg_job
*job
)
1010 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
1011 struct lpfc_hba
*phba
= vport
->phba
;
1012 struct set_ct_event
*event_req
;
1013 struct lpfc_bsg_event
*evt
;
1015 struct bsg_job_data
*dd_data
= NULL
;
1017 unsigned long flags
;
1019 if (job
->request_len
<
1020 sizeof(struct fc_bsg_request
) + sizeof(struct set_ct_event
)) {
1021 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1022 "2612 Received SET_CT_EVENT below minimum "
1028 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
1029 if (dd_data
== NULL
) {
1030 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1031 "2734 Failed allocation of dd_data\n");
1036 event_req
= (struct set_ct_event
*)
1037 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
1038 ev_mask
= ((uint32_t)(unsigned long)event_req
->type_mask
&
1040 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1041 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
1042 if (evt
->reg_id
== event_req
->ev_reg_id
) {
1043 lpfc_bsg_event_ref(evt
);
1044 evt
->wait_time_stamp
= jiffies
;
1048 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1050 if (&evt
->node
== &phba
->ct_ev_waiters
) {
1051 /* no event waiting struct yet - first call */
1052 evt
= lpfc_bsg_event_new(ev_mask
, event_req
->ev_reg_id
,
1053 event_req
->ev_req_id
);
1055 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1056 "2617 Failed allocation of event "
1062 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1063 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
1064 lpfc_bsg_event_ref(evt
);
1065 evt
->wait_time_stamp
= jiffies
;
1066 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1069 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1071 dd_data
->type
= TYPE_EVT
;
1072 dd_data
->context_un
.evt
= evt
;
1073 evt
->set_job
= job
; /* for unsolicited command */
1074 job
->dd_data
= dd_data
; /* for fc transport timeout callback*/
1075 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1076 return 0; /* call job done later */
1079 if (dd_data
!= NULL
)
1082 job
->dd_data
= NULL
;
1087 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1088 * @job: GET_EVENT fc_bsg_job
1091 lpfc_bsg_hba_get_event(struct fc_bsg_job
*job
)
1093 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
1094 struct lpfc_hba
*phba
= vport
->phba
;
1095 struct get_ct_event
*event_req
;
1096 struct get_ct_event_reply
*event_reply
;
1097 struct lpfc_bsg_event
*evt
;
1098 struct event_data
*evt_dat
= NULL
;
1099 unsigned long flags
;
1102 if (job
->request_len
<
1103 sizeof(struct fc_bsg_request
) + sizeof(struct get_ct_event
)) {
1104 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1105 "2613 Received GET_CT_EVENT request below "
1111 event_req
= (struct get_ct_event
*)
1112 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
1114 event_reply
= (struct get_ct_event_reply
*)
1115 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
1116 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1117 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
1118 if (evt
->reg_id
== event_req
->ev_reg_id
) {
1119 if (list_empty(&evt
->events_to_get
))
1121 lpfc_bsg_event_ref(evt
);
1122 evt
->wait_time_stamp
= jiffies
;
1123 evt_dat
= list_entry(evt
->events_to_get
.prev
,
1124 struct event_data
, node
);
1125 list_del(&evt_dat
->node
);
1129 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1131 /* The app may continue to ask for event data until it gets
1132 * an error indicating that there isn't anymore
1134 if (evt_dat
== NULL
) {
1135 job
->reply
->reply_payload_rcv_len
= 0;
1140 if (evt_dat
->len
> job
->request_payload
.payload_len
) {
1141 evt_dat
->len
= job
->request_payload
.payload_len
;
1142 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1143 "2618 Truncated event data at %d "
1145 job
->request_payload
.payload_len
);
1148 event_reply
->type
= evt_dat
->type
;
1149 event_reply
->immed_data
= evt_dat
->immed_dat
;
1150 if (evt_dat
->len
> 0)
1151 job
->reply
->reply_payload_rcv_len
=
1152 sg_copy_from_buffer(job
->request_payload
.sg_list
,
1153 job
->request_payload
.sg_cnt
,
1154 evt_dat
->data
, evt_dat
->len
);
1156 job
->reply
->reply_payload_rcv_len
= 0;
1159 kfree(evt_dat
->data
);
1163 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1164 lpfc_bsg_event_unref(evt
);
1165 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1166 job
->dd_data
= NULL
;
1167 job
->reply
->result
= 0;
1172 job
->dd_data
= NULL
;
1173 job
->reply
->result
= rc
;
1178 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1179 * @phba: Pointer to HBA context object.
1180 * @cmdiocbq: Pointer to command iocb.
1181 * @rspiocbq: Pointer to response iocb.
1183 * This function is the completion handler for iocbs issued using
1184 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1185 * ring event handler function without any lock held. This function
1186 * can be called from both worker thread context and interrupt
1187 * context. This function also can be called from other thread which
1188 * cleans up the SLI layer objects.
1189 * This function copy the contents of the response iocb to the
1190 * response iocb memory object provided by the caller of
1191 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1192 * sleeps for the iocb completion.
1195 lpfc_issue_ct_rsp_cmp(struct lpfc_hba
*phba
,
1196 struct lpfc_iocbq
*cmdiocbq
,
1197 struct lpfc_iocbq
*rspiocbq
)
1199 struct bsg_job_data
*dd_data
;
1200 struct fc_bsg_job
*job
;
1202 struct lpfc_dmabuf
*bmp
;
1203 struct lpfc_nodelist
*ndlp
;
1204 unsigned long flags
;
1207 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1208 dd_data
= cmdiocbq
->context2
;
1209 /* normal completion and timeout crossed paths, already done */
1211 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1215 job
= dd_data
->context_un
.iocb
.set_job
;
1216 bmp
= dd_data
->context_un
.iocb
.bmp
;
1217 rsp
= &rspiocbq
->iocb
;
1218 ndlp
= dd_data
->context_un
.iocb
.ndlp
;
1220 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
1221 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1223 if (rsp
->ulpStatus
) {
1224 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
1225 switch (rsp
->un
.ulpWord
[4] & 0xff) {
1226 case IOERR_SEQUENCE_TIMEOUT
:
1229 case IOERR_INVALID_RPI
:
1239 job
->reply
->reply_payload_rcv_len
=
1240 rsp
->un
.genreq64
.bdl
.bdeSize
;
1242 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
1243 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
1247 /* make error code available to userspace */
1248 job
->reply
->result
= rc
;
1249 job
->dd_data
= NULL
;
1250 /* complete the job back to userspace */
1252 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1257 * lpfc_issue_ct_rsp - issue a ct response
1258 * @phba: Pointer to HBA context object.
1259 * @job: Pointer to the job object.
1260 * @tag: tag index value into the ports context exchange array.
1261 * @bmp: Pointer to a dma buffer descriptor.
1262 * @num_entry: Number of enties in the bde.
1265 lpfc_issue_ct_rsp(struct lpfc_hba
*phba
, struct fc_bsg_job
*job
, uint32_t tag
,
1266 struct lpfc_dmabuf
*bmp
, int num_entry
)
1269 struct lpfc_iocbq
*ctiocb
= NULL
;
1271 struct lpfc_nodelist
*ndlp
= NULL
;
1272 struct bsg_job_data
*dd_data
;
1275 /* allocate our bsg tracking structure */
1276 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
1278 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1279 "2736 Failed allocation of dd_data\n");
1284 /* Allocate buffer for command iocb */
1285 ctiocb
= lpfc_sli_get_iocbq(phba
);
1291 icmd
= &ctiocb
->iocb
;
1292 icmd
->un
.xseq64
.bdl
.ulpIoTag32
= 0;
1293 icmd
->un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
1294 icmd
->un
.xseq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
1295 icmd
->un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
1296 icmd
->un
.xseq64
.bdl
.bdeSize
= (num_entry
* sizeof(struct ulp_bde64
));
1297 icmd
->un
.xseq64
.w5
.hcsw
.Fctl
= (LS
| LA
);
1298 icmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
1299 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_SOL_CTL
;
1300 icmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
1302 /* Fill in rest of iocb */
1303 icmd
->ulpCommand
= CMD_XMIT_SEQUENCE64_CX
;
1304 icmd
->ulpBdeCount
= 1;
1306 icmd
->ulpClass
= CLASS3
;
1307 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
1308 /* Do not issue unsol response if oxid not marked as valid */
1309 if (!(phba
->ct_ctx
[tag
].flags
& UNSOL_VALID
)) {
1311 goto issue_ct_rsp_exit
;
1313 icmd
->ulpContext
= phba
->ct_ctx
[tag
].oxid
;
1314 ndlp
= lpfc_findnode_did(phba
->pport
, phba
->ct_ctx
[tag
].SID
);
1316 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
,
1317 "2721 ndlp null for oxid %x SID %x\n",
1319 phba
->ct_ctx
[tag
].SID
);
1321 goto issue_ct_rsp_exit
;
1324 /* Check if the ndlp is active */
1325 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
1327 goto issue_ct_rsp_exit
;
1330 /* get a refernece count so the ndlp doesn't go away while
1333 if (!lpfc_nlp_get(ndlp
)) {
1335 goto issue_ct_rsp_exit
;
1338 icmd
->un
.ulpWord
[3] = ndlp
->nlp_rpi
;
1339 /* The exchange is done, mark the entry as invalid */
1340 phba
->ct_ctx
[tag
].flags
&= ~UNSOL_VALID
;
1342 icmd
->ulpContext
= (ushort
) tag
;
1344 icmd
->ulpTimeout
= phba
->fc_ratov
* 2;
1346 /* Xmit CT response on exchange <xid> */
1347 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
1348 "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
1349 icmd
->ulpContext
, icmd
->ulpIoTag
, phba
->link_state
);
1351 ctiocb
->iocb_cmpl
= NULL
;
1352 ctiocb
->iocb_flag
|= LPFC_IO_LIBDFC
;
1353 ctiocb
->vport
= phba
->pport
;
1354 ctiocb
->context3
= bmp
;
1356 ctiocb
->iocb_cmpl
= lpfc_issue_ct_rsp_cmp
;
1357 ctiocb
->context2
= dd_data
;
1358 ctiocb
->context1
= ndlp
;
1359 dd_data
->type
= TYPE_IOCB
;
1360 dd_data
->context_un
.iocb
.cmdiocbq
= ctiocb
;
1361 dd_data
->context_un
.iocb
.rspiocbq
= NULL
;
1362 dd_data
->context_un
.iocb
.set_job
= job
;
1363 dd_data
->context_un
.iocb
.bmp
= bmp
;
1364 dd_data
->context_un
.iocb
.ndlp
= ndlp
;
1366 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
1367 if (lpfc_readl(phba
->HCregaddr
, &creg_val
)) {
1369 goto issue_ct_rsp_exit
;
1371 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
1372 writel(creg_val
, phba
->HCregaddr
);
1373 readl(phba
->HCregaddr
); /* flush */
1376 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, ctiocb
, 0);
1378 if (rc
== IOCB_SUCCESS
)
1379 return 0; /* done for now */
1382 lpfc_sli_release_iocbq(phba
, ctiocb
);
1390 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1391 * @job: SEND_MGMT_RESP fc_bsg_job
1394 lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job
*job
)
1396 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
1397 struct lpfc_hba
*phba
= vport
->phba
;
1398 struct send_mgmt_resp
*mgmt_resp
= (struct send_mgmt_resp
*)
1399 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
1400 struct ulp_bde64
*bpl
;
1401 struct lpfc_dmabuf
*bmp
= NULL
;
1402 struct scatterlist
*sgel
= NULL
;
1406 uint32_t tag
= mgmt_resp
->tag
;
1407 unsigned long reqbfrcnt
=
1408 (unsigned long)job
->request_payload
.payload_len
;
1411 /* in case no data is transferred */
1412 job
->reply
->reply_payload_rcv_len
= 0;
1414 if (!reqbfrcnt
|| (reqbfrcnt
> (80 * BUF_SZ_4K
))) {
1416 goto send_mgmt_rsp_exit
;
1419 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
1422 goto send_mgmt_rsp_exit
;
1425 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
1428 goto send_mgmt_rsp_free_bmp
;
1431 INIT_LIST_HEAD(&bmp
->list
);
1432 bpl
= (struct ulp_bde64
*) bmp
->virt
;
1433 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
1434 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1435 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
1436 busaddr
= sg_dma_address(sgel
);
1437 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1438 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
1439 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
1440 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
1441 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
1445 rc
= lpfc_issue_ct_rsp(phba
, job
, tag
, bmp
, request_nseg
);
1447 if (rc
== IOCB_SUCCESS
)
1448 return 0; /* done for now */
1450 /* TBD need to handle a timeout */
1451 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
1452 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1454 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
1456 send_mgmt_rsp_free_bmp
:
1459 /* make error code available to userspace */
1460 job
->reply
->result
= rc
;
1461 job
->dd_data
= NULL
;
1466 * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
1467 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1469 * This function is responsible for placing a port into diagnostic loopback
1470 * mode in order to perform a diagnostic loopback test.
1471 * All new scsi requests are blocked, a small delay is used to allow the
1472 * scsi requests to complete then the link is brought down. If the link is
1473 * is placed in loopback mode then scsi requests are again allowed
1474 * so the scsi mid-layer doesn't give up on the port.
1475 * All of this is done in-line.
1478 lpfc_bsg_diag_mode(struct fc_bsg_job
*job
)
1480 struct Scsi_Host
*shost
= job
->shost
;
1481 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
1482 struct lpfc_hba
*phba
= vport
->phba
;
1483 struct diag_mode_set
*loopback_mode
;
1484 struct lpfc_sli
*psli
= &phba
->sli
;
1485 struct lpfc_sli_ring
*pring
= &psli
->ring
[LPFC_FCP_RING
];
1486 uint32_t link_flags
;
1488 struct lpfc_vport
**vports
;
1489 LPFC_MBOXQ_t
*pmboxq
;
1494 /* no data to return just the return code */
1495 job
->reply
->reply_payload_rcv_len
= 0;
1497 if (job
->request_len
<
1498 sizeof(struct fc_bsg_request
) + sizeof(struct diag_mode_set
)) {
1499 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1500 "2738 Received DIAG MODE request below minimum "
1506 loopback_mode
= (struct diag_mode_set
*)
1507 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
1508 link_flags
= loopback_mode
->type
;
1509 timeout
= loopback_mode
->timeout
* 100;
1511 if ((phba
->link_state
== LPFC_HBA_ERROR
) ||
1512 (psli
->sli_flag
& LPFC_BLOCK_MGMT_IO
) ||
1513 (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
))) {
1518 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1524 vports
= lpfc_create_vport_work_array(phba
);
1526 for (i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
1527 shost
= lpfc_shost_from_vport(vports
[i
]);
1528 scsi_block_requests(shost
);
1531 lpfc_destroy_vport_work_array(phba
, vports
);
1533 shost
= lpfc_shost_from_vport(phba
->pport
);
1534 scsi_block_requests(shost
);
1537 while (pring
->txcmplq_cnt
) {
1538 if (i
++ > 500) /* wait up to 5 seconds */
1544 memset((void *)pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
1545 pmboxq
->u
.mb
.mbxCommand
= MBX_DOWN_LINK
;
1546 pmboxq
->u
.mb
.mbxOwner
= OWN_HOST
;
1548 mbxstatus
= lpfc_sli_issue_mbox_wait(phba
, pmboxq
, LPFC_MBOX_TMO
);
1550 if ((mbxstatus
== MBX_SUCCESS
) && (pmboxq
->u
.mb
.mbxStatus
== 0)) {
1551 /* wait for link down before proceeding */
1553 while (phba
->link_state
!= LPFC_LINK_DOWN
) {
1554 if (i
++ > timeout
) {
1556 goto loopback_mode_exit
;
1562 memset((void *)pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
1563 if (link_flags
== INTERNAL_LOOP_BACK
)
1564 pmboxq
->u
.mb
.un
.varInitLnk
.link_flags
= FLAGS_LOCAL_LB
;
1566 pmboxq
->u
.mb
.un
.varInitLnk
.link_flags
=
1567 FLAGS_TOPOLOGY_MODE_LOOP
;
1569 pmboxq
->u
.mb
.mbxCommand
= MBX_INIT_LINK
;
1570 pmboxq
->u
.mb
.mbxOwner
= OWN_HOST
;
1572 mbxstatus
= lpfc_sli_issue_mbox_wait(phba
, pmboxq
,
1575 if ((mbxstatus
!= MBX_SUCCESS
) || (pmboxq
->u
.mb
.mbxStatus
))
1578 phba
->link_flag
|= LS_LOOPBACK_MODE
;
1579 /* wait for the link attention interrupt */
1583 while (phba
->link_state
!= LPFC_HBA_READY
) {
1584 if (i
++ > timeout
) {
1597 vports
= lpfc_create_vport_work_array(phba
);
1599 for (i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
1600 shost
= lpfc_shost_from_vport(vports
[i
]);
1601 scsi_unblock_requests(shost
);
1603 lpfc_destroy_vport_work_array(phba
, vports
);
1605 shost
= lpfc_shost_from_vport(phba
->pport
);
1606 scsi_unblock_requests(shost
);
1610 * Let SLI layer release mboxq if mbox command completed after timeout.
1612 if (mbxstatus
!= MBX_TIMEOUT
)
1613 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
1616 /* make error code available to userspace */
1617 job
->reply
->result
= rc
;
1618 /* complete the job back to userspace if no error */
1625 * lpfcdiag_loop_self_reg - obtains a remote port login id
1626 * @phba: Pointer to HBA context object
1627 * @rpi: Pointer to a remote port login id
1629 * This function obtains a remote port login id so the diag loopback test
1630 * can send and receive its own unsolicited CT command.
1632 static int lpfcdiag_loop_self_reg(struct lpfc_hba
*phba
, uint16_t *rpi
)
1635 struct lpfc_dmabuf
*dmabuff
;
1638 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1642 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1643 *rpi
= lpfc_sli4_alloc_rpi(phba
);
1644 status
= lpfc_reg_rpi(phba
, 0, phba
->pport
->fc_myDID
,
1645 (uint8_t *)&phba
->pport
->fc_sparam
, mbox
, *rpi
);
1647 mempool_free(mbox
, phba
->mbox_mem_pool
);
1648 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1649 lpfc_sli4_free_rpi(phba
, *rpi
);
1653 dmabuff
= (struct lpfc_dmabuf
*) mbox
->context1
;
1654 mbox
->context1
= NULL
;
1655 mbox
->context2
= NULL
;
1656 status
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
1658 if ((status
!= MBX_SUCCESS
) || (mbox
->u
.mb
.mbxStatus
)) {
1659 lpfc_mbuf_free(phba
, dmabuff
->virt
, dmabuff
->phys
);
1661 if (status
!= MBX_TIMEOUT
)
1662 mempool_free(mbox
, phba
->mbox_mem_pool
);
1663 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1664 lpfc_sli4_free_rpi(phba
, *rpi
);
1668 *rpi
= mbox
->u
.mb
.un
.varWords
[0];
1670 lpfc_mbuf_free(phba
, dmabuff
->virt
, dmabuff
->phys
);
1672 mempool_free(mbox
, phba
->mbox_mem_pool
);
1677 * lpfcdiag_loop_self_unreg - unregs from the rpi
1678 * @phba: Pointer to HBA context object
1679 * @rpi: Remote port login id
1681 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
1683 static int lpfcdiag_loop_self_unreg(struct lpfc_hba
*phba
, uint16_t rpi
)
1688 /* Allocate mboxq structure */
1689 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1693 lpfc_unreg_login(phba
, 0, rpi
, mbox
);
1694 status
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
1696 if ((status
!= MBX_SUCCESS
) || (mbox
->u
.mb
.mbxStatus
)) {
1697 if (status
!= MBX_TIMEOUT
)
1698 mempool_free(mbox
, phba
->mbox_mem_pool
);
1701 mempool_free(mbox
, phba
->mbox_mem_pool
);
1702 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1703 lpfc_sli4_free_rpi(phba
, rpi
);
1708 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
1709 * @phba: Pointer to HBA context object
1710 * @rpi: Remote port login id
1711 * @txxri: Pointer to transmit exchange id
1712 * @rxxri: Pointer to response exchabge id
1714 * This function obtains the transmit and receive ids required to send
1715 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
1716 * flags are used to the unsolicted response handler is able to process
1717 * the ct command sent on the same port.
1719 static int lpfcdiag_loop_get_xri(struct lpfc_hba
*phba
, uint16_t rpi
,
1720 uint16_t *txxri
, uint16_t * rxxri
)
1722 struct lpfc_bsg_event
*evt
;
1723 struct lpfc_iocbq
*cmdiocbq
, *rspiocbq
;
1725 struct lpfc_dmabuf
*dmabuf
;
1726 struct ulp_bde64
*bpl
= NULL
;
1727 struct lpfc_sli_ct_request
*ctreq
= NULL
;
1731 unsigned long flags
;
1735 evt
= lpfc_bsg_event_new(FC_REG_CT_EVENT
, current
->pid
,
1736 SLI_CT_ELX_LOOPBACK
);
1740 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1741 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
1742 lpfc_bsg_event_ref(evt
);
1743 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1745 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
1746 rspiocbq
= lpfc_sli_get_iocbq(phba
);
1748 dmabuf
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
1750 dmabuf
->virt
= lpfc_mbuf_alloc(phba
, 0, &dmabuf
->phys
);
1752 INIT_LIST_HEAD(&dmabuf
->list
);
1753 bpl
= (struct ulp_bde64
*) dmabuf
->virt
;
1754 memset(bpl
, 0, sizeof(*bpl
));
1755 ctreq
= (struct lpfc_sli_ct_request
*)(bpl
+ 1);
1757 le32_to_cpu(putPaddrHigh(dmabuf
->phys
+
1760 le32_to_cpu(putPaddrLow(dmabuf
->phys
+
1762 bpl
->tus
.f
.bdeFlags
= 0;
1763 bpl
->tus
.f
.bdeSize
= ELX_LOOPBACK_HEADER_SZ
;
1764 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
1768 if (cmdiocbq
== NULL
|| rspiocbq
== NULL
||
1769 dmabuf
== NULL
|| bpl
== NULL
|| ctreq
== NULL
||
1770 dmabuf
->virt
== NULL
) {
1772 goto err_get_xri_exit
;
1775 cmd
= &cmdiocbq
->iocb
;
1776 rsp
= &rspiocbq
->iocb
;
1778 memset(ctreq
, 0, ELX_LOOPBACK_HEADER_SZ
);
1780 ctreq
->RevisionId
.bits
.Revision
= SLI_CT_REVISION
;
1781 ctreq
->RevisionId
.bits
.InId
= 0;
1782 ctreq
->FsType
= SLI_CT_ELX_LOOPBACK
;
1783 ctreq
->FsSubType
= 0;
1784 ctreq
->CommandResponse
.bits
.CmdRsp
= ELX_LOOPBACK_XRI_SETUP
;
1785 ctreq
->CommandResponse
.bits
.Size
= 0;
1788 cmd
->un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(dmabuf
->phys
);
1789 cmd
->un
.xseq64
.bdl
.addrLow
= putPaddrLow(dmabuf
->phys
);
1790 cmd
->un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
1791 cmd
->un
.xseq64
.bdl
.bdeSize
= sizeof(*bpl
);
1793 cmd
->un
.xseq64
.w5
.hcsw
.Fctl
= LA
;
1794 cmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
1795 cmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CTL
;
1796 cmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
1798 cmd
->ulpCommand
= CMD_XMIT_SEQUENCE64_CR
;
1799 cmd
->ulpBdeCount
= 1;
1801 cmd
->ulpClass
= CLASS3
;
1802 cmd
->ulpContext
= rpi
;
1804 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
1805 cmdiocbq
->vport
= phba
->pport
;
1807 iocb_stat
= lpfc_sli_issue_iocb_wait(phba
, LPFC_ELS_RING
, cmdiocbq
,
1809 (phba
->fc_ratov
* 2)
1810 + LPFC_DRVR_TIMEOUT
);
1813 goto err_get_xri_exit
;
1815 *txxri
= rsp
->ulpContext
;
1818 evt
->wait_time_stamp
= jiffies
;
1819 time_left
= wait_event_interruptible_timeout(
1820 evt
->wq
, !list_empty(&evt
->events_to_see
),
1821 ((phba
->fc_ratov
* 2) + LPFC_DRVR_TIMEOUT
) * HZ
);
1822 if (list_empty(&evt
->events_to_see
))
1823 ret_val
= (time_left
) ? -EINTR
: -ETIMEDOUT
;
1825 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1826 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
1827 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1828 *rxxri
= (list_entry(evt
->events_to_get
.prev
,
1829 typeof(struct event_data
),
1835 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1836 lpfc_bsg_event_unref(evt
); /* release ref */
1837 lpfc_bsg_event_unref(evt
); /* delete */
1838 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1842 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
1846 if (cmdiocbq
&& (iocb_stat
!= IOCB_TIMEDOUT
))
1847 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
1849 lpfc_sli_release_iocbq(phba
, rspiocbq
);
1854 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
1855 * @phba: Pointer to HBA context object
1856 * @bpl: Pointer to 64 bit bde structure
1857 * @size: Number of bytes to process
1858 * @nocopydata: Flag to copy user data into the allocated buffer
1860 * This function allocates page size buffers and populates an lpfc_dmabufext.
1861 * If allowed the user data pointed to with indataptr is copied into the kernel
1862 * memory. The chained list of page size buffers is returned.
1864 static struct lpfc_dmabufext
*
1865 diag_cmd_data_alloc(struct lpfc_hba
*phba
,
1866 struct ulp_bde64
*bpl
, uint32_t size
,
1869 struct lpfc_dmabufext
*mlist
= NULL
;
1870 struct lpfc_dmabufext
*dmp
;
1871 int cnt
, offset
= 0, i
= 0;
1872 struct pci_dev
*pcidev
;
1874 pcidev
= phba
->pcidev
;
1877 /* We get chunks of 4K */
1878 if (size
> BUF_SZ_4K
)
1883 /* allocate struct lpfc_dmabufext buffer header */
1884 dmp
= kmalloc(sizeof(struct lpfc_dmabufext
), GFP_KERNEL
);
1888 INIT_LIST_HEAD(&dmp
->dma
.list
);
1890 /* Queue it to a linked list */
1892 list_add_tail(&dmp
->dma
.list
, &mlist
->dma
.list
);
1896 /* allocate buffer */
1897 dmp
->dma
.virt
= dma_alloc_coherent(&pcidev
->dev
,
1908 bpl
->tus
.f
.bdeFlags
= 0;
1909 pci_dma_sync_single_for_device(phba
->pcidev
,
1910 dmp
->dma
.phys
, LPFC_BPL_SIZE
, PCI_DMA_TODEVICE
);
1913 memset((uint8_t *)dmp
->dma
.virt
, 0, cnt
);
1914 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
1917 /* build buffer ptr list for IOCB */
1918 bpl
->addrLow
= le32_to_cpu(putPaddrLow(dmp
->dma
.phys
));
1919 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(dmp
->dma
.phys
));
1920 bpl
->tus
.f
.bdeSize
= (ushort
) cnt
;
1921 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
1932 diag_cmd_data_free(phba
, mlist
);
1937 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
1938 * @phba: Pointer to HBA context object
1939 * @rxxri: Receive exchange id
1940 * @len: Number of data bytes
1942 * This function allocates and posts a data buffer of sufficient size to recieve
1943 * an unsolicted CT command.
1945 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba
*phba
, uint16_t rxxri
,
1948 struct lpfc_sli
*psli
= &phba
->sli
;
1949 struct lpfc_sli_ring
*pring
= &psli
->ring
[LPFC_ELS_RING
];
1950 struct lpfc_iocbq
*cmdiocbq
;
1952 struct list_head head
, *curr
, *next
;
1953 struct lpfc_dmabuf
*rxbmp
;
1954 struct lpfc_dmabuf
*dmp
;
1955 struct lpfc_dmabuf
*mp
[2] = {NULL
, NULL
};
1956 struct ulp_bde64
*rxbpl
= NULL
;
1958 struct lpfc_dmabufext
*rxbuffer
= NULL
;
1963 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
1964 rxbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
1965 if (rxbmp
!= NULL
) {
1966 rxbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &rxbmp
->phys
);
1968 INIT_LIST_HEAD(&rxbmp
->list
);
1969 rxbpl
= (struct ulp_bde64
*) rxbmp
->virt
;
1970 rxbuffer
= diag_cmd_data_alloc(phba
, rxbpl
, len
, 0);
1974 if (!cmdiocbq
|| !rxbmp
|| !rxbpl
|| !rxbuffer
) {
1976 goto err_post_rxbufs_exit
;
1979 /* Queue buffers for the receive exchange */
1980 num_bde
= (uint32_t)rxbuffer
->flag
;
1981 dmp
= &rxbuffer
->dma
;
1983 cmd
= &cmdiocbq
->iocb
;
1986 INIT_LIST_HEAD(&head
);
1987 list_add_tail(&head
, &dmp
->list
);
1988 list_for_each_safe(curr
, next
, &head
) {
1989 mp
[i
] = list_entry(curr
, struct lpfc_dmabuf
, list
);
1992 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
1993 mp
[i
]->buffer_tag
= lpfc_sli_get_buffer_tag(phba
);
1994 cmd
->un
.quexri64cx
.buff
.bde
.addrHigh
=
1995 putPaddrHigh(mp
[i
]->phys
);
1996 cmd
->un
.quexri64cx
.buff
.bde
.addrLow
=
1997 putPaddrLow(mp
[i
]->phys
);
1998 cmd
->un
.quexri64cx
.buff
.bde
.tus
.f
.bdeSize
=
1999 ((struct lpfc_dmabufext
*)mp
[i
])->size
;
2000 cmd
->un
.quexri64cx
.buff
.buffer_tag
= mp
[i
]->buffer_tag
;
2001 cmd
->ulpCommand
= CMD_QUE_XRI64_CX
;
2004 cmd
->ulpBdeCount
= 1;
2005 cmd
->unsli3
.que_xri64cx_ext_words
.ebde_count
= 0;
2008 cmd
->un
.cont64
[i
].addrHigh
= putPaddrHigh(mp
[i
]->phys
);
2009 cmd
->un
.cont64
[i
].addrLow
= putPaddrLow(mp
[i
]->phys
);
2010 cmd
->un
.cont64
[i
].tus
.f
.bdeSize
=
2011 ((struct lpfc_dmabufext
*)mp
[i
])->size
;
2012 cmd
->ulpBdeCount
= ++i
;
2014 if ((--num_bde
> 0) && (i
< 2))
2017 cmd
->ulpCommand
= CMD_QUE_XRI_BUF64_CX
;
2021 cmd
->ulpClass
= CLASS3
;
2022 cmd
->ulpContext
= rxxri
;
2024 iocb_stat
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
,
2026 if (iocb_stat
== IOCB_ERROR
) {
2027 diag_cmd_data_free(phba
,
2028 (struct lpfc_dmabufext
*)mp
[0]);
2030 diag_cmd_data_free(phba
,
2031 (struct lpfc_dmabufext
*)mp
[1]);
2032 dmp
= list_entry(next
, struct lpfc_dmabuf
, list
);
2034 goto err_post_rxbufs_exit
;
2037 lpfc_sli_ringpostbuf_put(phba
, pring
, mp
[0]);
2039 lpfc_sli_ringpostbuf_put(phba
, pring
, mp
[1]);
2043 /* The iocb was freed by lpfc_sli_issue_iocb */
2044 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
2046 dmp
= list_entry(next
, struct lpfc_dmabuf
, list
);
2048 goto err_post_rxbufs_exit
;
2051 cmd
= &cmdiocbq
->iocb
;
2056 err_post_rxbufs_exit
:
2060 lpfc_mbuf_free(phba
, rxbmp
->virt
, rxbmp
->phys
);
2065 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
2070 * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
2071 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2073 * This function receives a user data buffer to be transmitted and received on
2074 * the same port, the link must be up and in loopback mode prior
2076 * 1. A kernel buffer is allocated to copy the user data into.
2077 * 2. The port registers with "itself".
2078 * 3. The transmit and receive exchange ids are obtained.
2079 * 4. The receive exchange id is posted.
2080 * 5. A new els loopback event is created.
2081 * 6. The command and response iocbs are allocated.
2082 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2084 * This function is meant to be called n times while the port is in loopback
2085 * so it is the apps responsibility to issue a reset to take the port out
2089 lpfc_bsg_diag_test(struct fc_bsg_job
*job
)
2091 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
2092 struct lpfc_hba
*phba
= vport
->phba
;
2093 struct diag_mode_test
*diag_mode
;
2094 struct lpfc_bsg_event
*evt
;
2095 struct event_data
*evdat
;
2096 struct lpfc_sli
*psli
= &phba
->sli
;
2099 size_t segment_len
= 0, segment_offset
= 0, current_offset
= 0;
2101 struct lpfc_iocbq
*cmdiocbq
, *rspiocbq
;
2103 struct lpfc_sli_ct_request
*ctreq
;
2104 struct lpfc_dmabuf
*txbmp
;
2105 struct ulp_bde64
*txbpl
= NULL
;
2106 struct lpfc_dmabufext
*txbuffer
= NULL
;
2107 struct list_head head
;
2108 struct lpfc_dmabuf
*curr
;
2109 uint16_t txxri
, rxxri
;
2111 uint8_t *ptr
= NULL
, *rx_databuf
= NULL
;
2115 unsigned long flags
;
2116 void *dataout
= NULL
;
2119 /* in case no data is returned return just the return code */
2120 job
->reply
->reply_payload_rcv_len
= 0;
2122 if (job
->request_len
<
2123 sizeof(struct fc_bsg_request
) + sizeof(struct diag_mode_test
)) {
2124 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2125 "2739 Received DIAG TEST request below minimum "
2128 goto loopback_test_exit
;
2131 if (job
->request_payload
.payload_len
!=
2132 job
->reply_payload
.payload_len
) {
2134 goto loopback_test_exit
;
2137 diag_mode
= (struct diag_mode_test
*)
2138 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
2140 if ((phba
->link_state
== LPFC_HBA_ERROR
) ||
2141 (psli
->sli_flag
& LPFC_BLOCK_MGMT_IO
) ||
2142 (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
))) {
2144 goto loopback_test_exit
;
2147 if (!lpfc_is_link_up(phba
) || !(phba
->link_flag
& LS_LOOPBACK_MODE
)) {
2149 goto loopback_test_exit
;
2152 size
= job
->request_payload
.payload_len
;
2153 full_size
= size
+ ELX_LOOPBACK_HEADER_SZ
; /* plus the header */
2155 if ((size
== 0) || (size
> 80 * BUF_SZ_4K
)) {
2157 goto loopback_test_exit
;
2160 if (full_size
>= BUF_SZ_4K
) {
2162 * Allocate memory for ioctl data. If buffer is bigger than 64k,
2163 * then we allocate 64k and re-use that buffer over and over to
2164 * xfer the whole block. This is because Linux kernel has a
2165 * problem allocating more than 120k of kernel space memory. Saw
2166 * problem with GET_FCPTARGETMAPPING...
2168 if (size
<= (64 * 1024))
2169 total_mem
= full_size
;
2171 total_mem
= 64 * 1024;
2173 /* Allocate memory for ioctl data */
2174 total_mem
= BUF_SZ_4K
;
2176 dataout
= kmalloc(total_mem
, GFP_KERNEL
);
2177 if (dataout
== NULL
) {
2179 goto loopback_test_exit
;
2183 ptr
+= ELX_LOOPBACK_HEADER_SZ
;
2184 sg_copy_to_buffer(job
->request_payload
.sg_list
,
2185 job
->request_payload
.sg_cnt
,
2187 rc
= lpfcdiag_loop_self_reg(phba
, &rpi
);
2189 goto loopback_test_exit
;
2191 rc
= lpfcdiag_loop_get_xri(phba
, rpi
, &txxri
, &rxxri
);
2193 lpfcdiag_loop_self_unreg(phba
, rpi
);
2194 goto loopback_test_exit
;
2197 rc
= lpfcdiag_loop_post_rxbufs(phba
, rxxri
, full_size
);
2199 lpfcdiag_loop_self_unreg(phba
, rpi
);
2200 goto loopback_test_exit
;
2203 evt
= lpfc_bsg_event_new(FC_REG_CT_EVENT
, current
->pid
,
2204 SLI_CT_ELX_LOOPBACK
);
2206 lpfcdiag_loop_self_unreg(phba
, rpi
);
2208 goto loopback_test_exit
;
2211 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2212 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
2213 lpfc_bsg_event_ref(evt
);
2214 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2216 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
2217 rspiocbq
= lpfc_sli_get_iocbq(phba
);
2218 txbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2221 txbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &txbmp
->phys
);
2223 INIT_LIST_HEAD(&txbmp
->list
);
2224 txbpl
= (struct ulp_bde64
*) txbmp
->virt
;
2225 txbuffer
= diag_cmd_data_alloc(phba
,
2226 txbpl
, full_size
, 0);
2230 if (!cmdiocbq
|| !rspiocbq
|| !txbmp
|| !txbpl
|| !txbuffer
||
2233 goto err_loopback_test_exit
;
2236 cmd
= &cmdiocbq
->iocb
;
2237 rsp
= &rspiocbq
->iocb
;
2239 INIT_LIST_HEAD(&head
);
2240 list_add_tail(&head
, &txbuffer
->dma
.list
);
2241 list_for_each_entry(curr
, &head
, list
) {
2242 segment_len
= ((struct lpfc_dmabufext
*)curr
)->size
;
2243 if (current_offset
== 0) {
2245 memset(ctreq
, 0, ELX_LOOPBACK_HEADER_SZ
);
2246 ctreq
->RevisionId
.bits
.Revision
= SLI_CT_REVISION
;
2247 ctreq
->RevisionId
.bits
.InId
= 0;
2248 ctreq
->FsType
= SLI_CT_ELX_LOOPBACK
;
2249 ctreq
->FsSubType
= 0;
2250 ctreq
->CommandResponse
.bits
.CmdRsp
= ELX_LOOPBACK_DATA
;
2251 ctreq
->CommandResponse
.bits
.Size
= size
;
2252 segment_offset
= ELX_LOOPBACK_HEADER_SZ
;
2256 BUG_ON(segment_offset
>= segment_len
);
2257 memcpy(curr
->virt
+ segment_offset
,
2258 ptr
+ current_offset
,
2259 segment_len
- segment_offset
);
2261 current_offset
+= segment_len
- segment_offset
;
2262 BUG_ON(current_offset
> size
);
2266 /* Build the XMIT_SEQUENCE iocb */
2268 num_bde
= (uint32_t)txbuffer
->flag
;
2270 cmd
->un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(txbmp
->phys
);
2271 cmd
->un
.xseq64
.bdl
.addrLow
= putPaddrLow(txbmp
->phys
);
2272 cmd
->un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
2273 cmd
->un
.xseq64
.bdl
.bdeSize
= (num_bde
* sizeof(struct ulp_bde64
));
2275 cmd
->un
.xseq64
.w5
.hcsw
.Fctl
= (LS
| LA
);
2276 cmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
2277 cmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CTL
;
2278 cmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
2280 cmd
->ulpCommand
= CMD_XMIT_SEQUENCE64_CX
;
2281 cmd
->ulpBdeCount
= 1;
2283 cmd
->ulpClass
= CLASS3
;
2284 cmd
->ulpContext
= txxri
;
2286 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
2287 cmdiocbq
->vport
= phba
->pport
;
2289 iocb_stat
= lpfc_sli_issue_iocb_wait(phba
, LPFC_ELS_RING
, cmdiocbq
,
2290 rspiocbq
, (phba
->fc_ratov
* 2) +
2293 if ((iocb_stat
!= IOCB_SUCCESS
) || (rsp
->ulpStatus
!= IOCB_SUCCESS
)) {
2295 goto err_loopback_test_exit
;
2299 time_left
= wait_event_interruptible_timeout(
2300 evt
->wq
, !list_empty(&evt
->events_to_see
),
2301 ((phba
->fc_ratov
* 2) + LPFC_DRVR_TIMEOUT
) * HZ
);
2303 if (list_empty(&evt
->events_to_see
))
2304 rc
= (time_left
) ? -EINTR
: -ETIMEDOUT
;
2306 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2307 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
2308 evdat
= list_entry(evt
->events_to_get
.prev
,
2309 typeof(*evdat
), node
);
2310 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2311 rx_databuf
= evdat
->data
;
2312 if (evdat
->len
!= full_size
) {
2313 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
2314 "1603 Loopback test did not receive expected "
2315 "data length. actual length 0x%x expected "
2317 evdat
->len
, full_size
);
2319 } else if (rx_databuf
== NULL
)
2323 /* skip over elx loopback header */
2324 rx_databuf
+= ELX_LOOPBACK_HEADER_SZ
;
2325 job
->reply
->reply_payload_rcv_len
=
2326 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
2327 job
->reply_payload
.sg_cnt
,
2329 job
->reply
->reply_payload_rcv_len
= size
;
2333 err_loopback_test_exit
:
2334 lpfcdiag_loop_self_unreg(phba
, rpi
);
2336 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2337 lpfc_bsg_event_unref(evt
); /* release ref */
2338 lpfc_bsg_event_unref(evt
); /* delete */
2339 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2341 if (cmdiocbq
!= NULL
)
2342 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
2344 if (rspiocbq
!= NULL
)
2345 lpfc_sli_release_iocbq(phba
, rspiocbq
);
2347 if (txbmp
!= NULL
) {
2348 if (txbpl
!= NULL
) {
2349 if (txbuffer
!= NULL
)
2350 diag_cmd_data_free(phba
, txbuffer
);
2351 lpfc_mbuf_free(phba
, txbmp
->virt
, txbmp
->phys
);
2358 /* make error code available to userspace */
2359 job
->reply
->result
= rc
;
2360 job
->dd_data
= NULL
;
2361 /* complete the job back to userspace if no error */
2368 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
2369 * @job: GET_DFC_REV fc_bsg_job
2372 lpfc_bsg_get_dfc_rev(struct fc_bsg_job
*job
)
2374 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
2375 struct lpfc_hba
*phba
= vport
->phba
;
2376 struct get_mgmt_rev
*event_req
;
2377 struct get_mgmt_rev_reply
*event_reply
;
2380 if (job
->request_len
<
2381 sizeof(struct fc_bsg_request
) + sizeof(struct get_mgmt_rev
)) {
2382 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2383 "2740 Received GET_DFC_REV request below "
2389 event_req
= (struct get_mgmt_rev
*)
2390 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
2392 event_reply
= (struct get_mgmt_rev_reply
*)
2393 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
2395 if (job
->reply_len
<
2396 sizeof(struct fc_bsg_request
) + sizeof(struct get_mgmt_rev_reply
)) {
2397 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2398 "2741 Received GET_DFC_REV reply below "
2404 event_reply
->info
.a_Major
= MANAGEMENT_MAJOR_REV
;
2405 event_reply
->info
.a_Minor
= MANAGEMENT_MINOR_REV
;
2407 job
->reply
->result
= rc
;
2414 * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
2415 * @phba: Pointer to HBA context object.
2416 * @pmboxq: Pointer to mailbox command.
2418 * This is completion handler function for mailbox commands issued from
2419 * lpfc_bsg_issue_mbox function. This function is called by the
2420 * mailbox event handler function with no lock held. This function
2421 * will wake up thread waiting on the wait queue pointed by context1
2425 lpfc_bsg_wake_mbox_wait(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
2427 struct bsg_job_data
*dd_data
;
2428 struct fc_bsg_job
*job
;
2430 unsigned long flags
;
2434 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2435 dd_data
= pmboxq
->context1
;
2436 /* job already timed out? */
2438 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2442 /* build the outgoing buffer to do an sg copy
2443 * the format is the response mailbox followed by any extended
2446 from
= (uint8_t *)&pmboxq
->u
.mb
;
2447 to
= (uint8_t *)dd_data
->context_un
.mbox
.mb
;
2448 memcpy(to
, from
, sizeof(MAILBOX_t
));
2449 if (pmboxq
->u
.mb
.mbxStatus
== MBX_SUCCESS
) {
2450 /* copy the extended data if any, count is in words */
2451 if (dd_data
->context_un
.mbox
.outExtWLen
) {
2452 from
= (uint8_t *)dd_data
->context_un
.mbox
.ext
;
2453 to
+= sizeof(MAILBOX_t
);
2454 size
= dd_data
->context_un
.mbox
.outExtWLen
*
2456 memcpy(to
, from
, size
);
2457 } else if (pmboxq
->u
.mb
.mbxCommand
== MBX_RUN_BIU_DIAG64
) {
2458 from
= (uint8_t *)dd_data
->context_un
.mbox
.
2460 to
+= sizeof(MAILBOX_t
);
2461 size
= dd_data
->context_un
.mbox
.dmp
->size
;
2462 memcpy(to
, from
, size
);
2463 } else if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
2464 (pmboxq
->u
.mb
.mbxCommand
== MBX_DUMP_MEMORY
)) {
2465 from
= (uint8_t *)dd_data
->context_un
.mbox
.dmp
->dma
.
2467 to
+= sizeof(MAILBOX_t
);
2468 size
= pmboxq
->u
.mb
.un
.varWords
[5];
2469 memcpy(to
, from
, size
);
2470 } else if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
2471 (pmboxq
->u
.mb
.mbxCommand
== MBX_SLI4_CONFIG
)) {
2472 struct lpfc_mbx_nembed_cmd
*nembed_sge
=
2473 (struct lpfc_mbx_nembed_cmd
*)
2474 &pmboxq
->u
.mb
.un
.varWords
[0];
2476 from
= (uint8_t *)dd_data
->context_un
.mbox
.dmp
->dma
.
2478 to
+= sizeof(MAILBOX_t
);
2479 size
= nembed_sge
->sge
[0].length
;
2480 memcpy(to
, from
, size
);
2481 } else if (pmboxq
->u
.mb
.mbxCommand
== MBX_READ_EVENT_LOG
) {
2482 from
= (uint8_t *)dd_data
->context_un
.
2484 to
+= sizeof(MAILBOX_t
);
2485 size
= dd_data
->context_un
.mbox
.dmp
->size
;
2486 memcpy(to
, from
, size
);
2490 from
= (uint8_t *)dd_data
->context_un
.mbox
.mb
;
2491 job
= dd_data
->context_un
.mbox
.set_job
;
2493 size
= job
->reply_payload
.payload_len
;
2494 job
->reply
->reply_payload_rcv_len
=
2495 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
2496 job
->reply_payload
.sg_cnt
,
2498 job
->reply
->result
= 0;
2500 job
->dd_data
= NULL
;
2503 dd_data
->context_un
.mbox
.set_job
= NULL
;
2504 /* need to hold the lock until we call job done to hold off
2505 * the timeout handler returning to the midlayer while
2506 * we are stillprocessing the job
2508 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2510 kfree(dd_data
->context_un
.mbox
.mb
);
2511 mempool_free(dd_data
->context_un
.mbox
.pmboxq
, phba
->mbox_mem_pool
);
2512 kfree(dd_data
->context_un
.mbox
.ext
);
2513 if (dd_data
->context_un
.mbox
.dmp
) {
2514 dma_free_coherent(&phba
->pcidev
->dev
,
2515 dd_data
->context_un
.mbox
.dmp
->size
,
2516 dd_data
->context_un
.mbox
.dmp
->dma
.virt
,
2517 dd_data
->context_un
.mbox
.dmp
->dma
.phys
);
2518 kfree(dd_data
->context_un
.mbox
.dmp
);
2520 if (dd_data
->context_un
.mbox
.rxbmp
) {
2521 lpfc_mbuf_free(phba
, dd_data
->context_un
.mbox
.rxbmp
->virt
,
2522 dd_data
->context_un
.mbox
.rxbmp
->phys
);
2523 kfree(dd_data
->context_un
.mbox
.rxbmp
);
2530 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
2531 * @phba: Pointer to HBA context object.
2532 * @mb: Pointer to a mailbox object.
2533 * @vport: Pointer to a vport object.
2535 * Some commands require the port to be offline, some may not be called from
2538 static int lpfc_bsg_check_cmd_access(struct lpfc_hba
*phba
,
2539 MAILBOX_t
*mb
, struct lpfc_vport
*vport
)
2541 /* return negative error values for bsg job */
2542 switch (mb
->mbxCommand
) {
2546 case MBX_CONFIG_LINK
:
2547 case MBX_CONFIG_RING
:
2548 case MBX_RESET_RING
:
2549 case MBX_UNREG_LOGIN
:
2551 case MBX_DUMP_CONTEXT
:
2555 if (!(vport
->fc_flag
& FC_OFFLINE_MODE
)) {
2556 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2557 "2743 Command 0x%x is illegal in on-line "
2563 case MBX_WRITE_VPARMS
:
2566 case MBX_READ_CONFIG
:
2567 case MBX_READ_RCONFIG
:
2568 case MBX_READ_STATUS
:
2571 case MBX_READ_LNK_STAT
:
2572 case MBX_DUMP_MEMORY
:
2574 case MBX_UPDATE_CFG
:
2575 case MBX_KILL_BOARD
:
2577 case MBX_LOAD_EXP_ROM
:
2579 case MBX_DEL_LD_ENTRY
:
2582 case MBX_SLI4_CONFIG
:
2583 case MBX_READ_EVENT_LOG
:
2584 case MBX_READ_EVENT_LOG_STATUS
:
2585 case MBX_WRITE_EVENT_LOG
:
2586 case MBX_PORT_CAPABILITIES
:
2587 case MBX_PORT_IOV_CONTROL
:
2588 case MBX_RUN_BIU_DIAG64
:
2590 case MBX_SET_VARIABLE
:
2591 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
2592 "1226 mbox: set_variable 0x%x, 0x%x\n",
2594 mb
->un
.varWords
[1]);
2595 if ((mb
->un
.varWords
[0] == SETVAR_MLOMNT
)
2596 && (mb
->un
.varWords
[1] == 1)) {
2597 phba
->wait_4_mlo_maint_flg
= 1;
2598 } else if (mb
->un
.varWords
[0] == SETVAR_MLORST
) {
2599 phba
->link_flag
&= ~LS_LOOPBACK_MODE
;
2600 phba
->fc_topology
= LPFC_TOPOLOGY_PT_PT
;
2603 case MBX_READ_SPARM64
:
2604 case MBX_READ_TOPOLOGY
:
2606 case MBX_REG_LOGIN64
:
2607 case MBX_CONFIG_PORT
:
2608 case MBX_RUN_BIU_DIAG
:
2610 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2611 "2742 Unknown Command 0x%x\n",
2620 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
2621 * @phba: Pointer to HBA context object.
2622 * @mb: Pointer to a mailbox object.
2623 * @vport: Pointer to a vport object.
2625 * Allocate a tracking object, mailbox command memory, get a mailbox
2626 * from the mailbox pool, copy the caller mailbox command.
2628 * If offline and the sli is active we need to poll for the command (port is
2629 * being reset) and com-plete the job, otherwise issue the mailbox command and
2630 * let our completion handler finish the command.
2633 lpfc_bsg_issue_mbox(struct lpfc_hba
*phba
, struct fc_bsg_job
*job
,
2634 struct lpfc_vport
*vport
)
2636 LPFC_MBOXQ_t
*pmboxq
= NULL
; /* internal mailbox queue */
2637 MAILBOX_t
*pmb
; /* shortcut to the pmboxq mailbox */
2638 /* a 4k buffer to hold the mb and extended data from/to the bsg */
2639 MAILBOX_t
*mb
= NULL
;
2640 struct bsg_job_data
*dd_data
= NULL
; /* bsg data tracking structure */
2642 struct lpfc_dmabuf
*rxbmp
= NULL
; /* for biu diag */
2643 struct lpfc_dmabufext
*dmp
= NULL
; /* for biu diag */
2644 struct ulp_bde64
*rxbpl
= NULL
;
2645 struct dfc_mbox_req
*mbox_req
= (struct dfc_mbox_req
*)
2646 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
2647 uint8_t *ext
= NULL
;
2651 /* in case no data is transferred */
2652 job
->reply
->reply_payload_rcv_len
= 0;
2654 /* check if requested extended data lengths are valid */
2655 if ((mbox_req
->inExtWLen
> MAILBOX_EXT_SIZE
) ||
2656 (mbox_req
->outExtWLen
> MAILBOX_EXT_SIZE
)) {
2661 /* allocate our bsg tracking structure */
2662 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
2664 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2665 "2727 Failed allocation of dd_data\n");
2670 mb
= kzalloc(BSG_MBOX_SIZE
, GFP_KERNEL
);
2676 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2681 memset(pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
2683 size
= job
->request_payload
.payload_len
;
2684 sg_copy_to_buffer(job
->request_payload
.sg_list
,
2685 job
->request_payload
.sg_cnt
,
2688 rc
= lpfc_bsg_check_cmd_access(phba
, mb
, vport
);
2690 goto job_done
; /* must be negative */
2692 pmb
= &pmboxq
->u
.mb
;
2693 memcpy(pmb
, mb
, sizeof(*pmb
));
2694 pmb
->mbxOwner
= OWN_HOST
;
2695 pmboxq
->vport
= vport
;
2697 /* If HBA encountered an error attention, allow only DUMP
2698 * or RESTART mailbox commands until the HBA is restarted.
2700 if (phba
->pport
->stopped
&&
2701 pmb
->mbxCommand
!= MBX_DUMP_MEMORY
&&
2702 pmb
->mbxCommand
!= MBX_RESTART
&&
2703 pmb
->mbxCommand
!= MBX_WRITE_VPARMS
&&
2704 pmb
->mbxCommand
!= MBX_WRITE_WWN
)
2705 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
,
2706 "2797 mbox: Issued mailbox cmd "
2707 "0x%x while in stopped state.\n",
2710 /* Don't allow mailbox commands to be sent when blocked
2711 * or when in the middle of discovery
2713 if (phba
->sli
.sli_flag
& LPFC_BLOCK_MGMT_IO
) {
2718 /* extended mailbox commands will need an extended buffer */
2719 if (mbox_req
->inExtWLen
|| mbox_req
->outExtWLen
) {
2720 ext
= kzalloc(MAILBOX_EXT_SIZE
, GFP_KERNEL
);
2726 /* any data for the device? */
2727 if (mbox_req
->inExtWLen
) {
2728 from
= (uint8_t *)mb
;
2729 from
+= sizeof(MAILBOX_t
);
2730 memcpy((uint8_t *)ext
, from
,
2731 mbox_req
->inExtWLen
* sizeof(uint32_t));
2734 pmboxq
->context2
= ext
;
2735 pmboxq
->in_ext_byte_len
=
2736 mbox_req
->inExtWLen
* sizeof(uint32_t);
2737 pmboxq
->out_ext_byte_len
=
2738 mbox_req
->outExtWLen
* sizeof(uint32_t);
2739 pmboxq
->mbox_offset_word
= mbox_req
->mbOffset
;
2742 /* biu diag will need a kernel buffer to transfer the data
2743 * allocate our own buffer and setup the mailbox command to
2746 if (pmb
->mbxCommand
== MBX_RUN_BIU_DIAG64
) {
2747 uint32_t transmit_length
= pmb
->un
.varWords
[1];
2748 uint32_t receive_length
= pmb
->un
.varWords
[4];
2749 /* transmit length cannot be greater than receive length or
2750 * mailbox extension size
2752 if ((transmit_length
> receive_length
) ||
2753 (transmit_length
> MAILBOX_EXT_SIZE
)) {
2758 rxbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2764 rxbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &rxbmp
->phys
);
2770 INIT_LIST_HEAD(&rxbmp
->list
);
2771 rxbpl
= (struct ulp_bde64
*) rxbmp
->virt
;
2772 dmp
= diag_cmd_data_alloc(phba
, rxbpl
, transmit_length
, 0);
2778 INIT_LIST_HEAD(&dmp
->dma
.list
);
2779 pmb
->un
.varBIUdiag
.un
.s2
.xmit_bde64
.addrHigh
=
2780 putPaddrHigh(dmp
->dma
.phys
);
2781 pmb
->un
.varBIUdiag
.un
.s2
.xmit_bde64
.addrLow
=
2782 putPaddrLow(dmp
->dma
.phys
);
2784 pmb
->un
.varBIUdiag
.un
.s2
.rcv_bde64
.addrHigh
=
2785 putPaddrHigh(dmp
->dma
.phys
+
2786 pmb
->un
.varBIUdiag
.un
.s2
.
2787 xmit_bde64
.tus
.f
.bdeSize
);
2788 pmb
->un
.varBIUdiag
.un
.s2
.rcv_bde64
.addrLow
=
2789 putPaddrLow(dmp
->dma
.phys
+
2790 pmb
->un
.varBIUdiag
.un
.s2
.
2791 xmit_bde64
.tus
.f
.bdeSize
);
2793 /* copy the transmit data found in the mailbox extension area */
2794 from
= (uint8_t *)mb
;
2795 from
+= sizeof(MAILBOX_t
);
2796 memcpy((uint8_t *)dmp
->dma
.virt
, from
, transmit_length
);
2797 } else if (pmb
->mbxCommand
== MBX_READ_EVENT_LOG
) {
2798 struct READ_EVENT_LOG_VAR
*rdEventLog
=
2799 &pmb
->un
.varRdEventLog
;
2800 uint32_t receive_length
= rdEventLog
->rcv_bde64
.tus
.f
.bdeSize
;
2801 uint32_t mode
= bf_get(lpfc_event_log
, rdEventLog
);
2803 /* receive length cannot be greater than mailbox
2806 if (receive_length
> MAILBOX_EXT_SIZE
) {
2811 /* mode zero uses a bde like biu diags command */
2814 /* rebuild the command for sli4 using our own buffers
2815 * like we do for biu diags
2818 rxbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2824 rxbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &rxbmp
->phys
);
2825 rxbpl
= (struct ulp_bde64
*) rxbmp
->virt
;
2827 INIT_LIST_HEAD(&rxbmp
->list
);
2828 dmp
= diag_cmd_data_alloc(phba
, rxbpl
,
2837 INIT_LIST_HEAD(&dmp
->dma
.list
);
2838 pmb
->un
.varWords
[3] = putPaddrLow(dmp
->dma
.phys
);
2839 pmb
->un
.varWords
[4] = putPaddrHigh(dmp
->dma
.phys
);
2841 } else if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2842 if (pmb
->mbxCommand
== MBX_DUMP_MEMORY
) {
2843 /* rebuild the command for sli4 using our own buffers
2844 * like we do for biu diags
2846 uint32_t receive_length
= pmb
->un
.varWords
[2];
2847 /* receive length cannot be greater than mailbox
2850 if ((receive_length
== 0) ||
2851 (receive_length
> MAILBOX_EXT_SIZE
)) {
2856 rxbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2862 rxbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &rxbmp
->phys
);
2868 INIT_LIST_HEAD(&rxbmp
->list
);
2869 rxbpl
= (struct ulp_bde64
*) rxbmp
->virt
;
2870 dmp
= diag_cmd_data_alloc(phba
, rxbpl
, receive_length
,
2877 INIT_LIST_HEAD(&dmp
->dma
.list
);
2878 pmb
->un
.varWords
[3] = putPaddrLow(dmp
->dma
.phys
);
2879 pmb
->un
.varWords
[4] = putPaddrHigh(dmp
->dma
.phys
);
2880 } else if ((pmb
->mbxCommand
== MBX_UPDATE_CFG
) &&
2881 pmb
->un
.varUpdateCfg
.co
) {
2882 struct ulp_bde64
*bde
=
2883 (struct ulp_bde64
*)&pmb
->un
.varWords
[4];
2885 /* bde size cannot be greater than mailbox ext size */
2886 if (bde
->tus
.f
.bdeSize
> MAILBOX_EXT_SIZE
) {
2891 rxbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2897 rxbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &rxbmp
->phys
);
2903 INIT_LIST_HEAD(&rxbmp
->list
);
2904 rxbpl
= (struct ulp_bde64
*) rxbmp
->virt
;
2905 dmp
= diag_cmd_data_alloc(phba
, rxbpl
,
2906 bde
->tus
.f
.bdeSize
, 0);
2912 INIT_LIST_HEAD(&dmp
->dma
.list
);
2913 bde
->addrHigh
= putPaddrHigh(dmp
->dma
.phys
);
2914 bde
->addrLow
= putPaddrLow(dmp
->dma
.phys
);
2916 /* copy the transmit data found in the mailbox
2919 from
= (uint8_t *)mb
;
2920 from
+= sizeof(MAILBOX_t
);
2921 memcpy((uint8_t *)dmp
->dma
.virt
, from
,
2922 bde
->tus
.f
.bdeSize
);
2923 } else if (pmb
->mbxCommand
== MBX_SLI4_CONFIG
) {
2924 struct lpfc_mbx_nembed_cmd
*nembed_sge
;
2925 struct mbox_header
*header
;
2926 uint32_t receive_length
;
2928 /* rebuild the command for sli4 using our own buffers
2929 * like we do for biu diags
2931 header
= (struct mbox_header
*)&pmb
->un
.varWords
[0];
2932 nembed_sge
= (struct lpfc_mbx_nembed_cmd
*)
2933 &pmb
->un
.varWords
[0];
2934 receive_length
= nembed_sge
->sge
[0].length
;
2936 /* receive length cannot be greater than mailbox
2939 if ((receive_length
== 0) ||
2940 (receive_length
> MAILBOX_EXT_SIZE
)) {
2945 rxbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2951 rxbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &rxbmp
->phys
);
2957 INIT_LIST_HEAD(&rxbmp
->list
);
2958 rxbpl
= (struct ulp_bde64
*) rxbmp
->virt
;
2959 dmp
= diag_cmd_data_alloc(phba
, rxbpl
, receive_length
,
2966 INIT_LIST_HEAD(&dmp
->dma
.list
);
2967 nembed_sge
->sge
[0].pa_hi
= putPaddrHigh(dmp
->dma
.phys
);
2968 nembed_sge
->sge
[0].pa_lo
= putPaddrLow(dmp
->dma
.phys
);
2969 /* copy the transmit data found in the mailbox
2972 from
= (uint8_t *)mb
;
2973 from
+= sizeof(MAILBOX_t
);
2974 memcpy((uint8_t *)dmp
->dma
.virt
, from
,
2975 header
->cfg_mhdr
.payload_length
);
2979 dd_data
->context_un
.mbox
.rxbmp
= rxbmp
;
2980 dd_data
->context_un
.mbox
.dmp
= dmp
;
2982 /* setup wake call as IOCB callback */
2983 pmboxq
->mbox_cmpl
= lpfc_bsg_wake_mbox_wait
;
2985 /* setup context field to pass wait_queue pointer to wake function */
2986 pmboxq
->context1
= dd_data
;
2987 dd_data
->type
= TYPE_MBOX
;
2988 dd_data
->context_un
.mbox
.pmboxq
= pmboxq
;
2989 dd_data
->context_un
.mbox
.mb
= mb
;
2990 dd_data
->context_un
.mbox
.set_job
= job
;
2991 dd_data
->context_un
.mbox
.ext
= ext
;
2992 dd_data
->context_un
.mbox
.mbOffset
= mbox_req
->mbOffset
;
2993 dd_data
->context_un
.mbox
.inExtWLen
= mbox_req
->inExtWLen
;
2994 dd_data
->context_un
.mbox
.outExtWLen
= mbox_req
->outExtWLen
;
2995 job
->dd_data
= dd_data
;
2997 if ((vport
->fc_flag
& FC_OFFLINE_MODE
) ||
2998 (!(phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
))) {
2999 rc
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_POLL
);
3000 if (rc
!= MBX_SUCCESS
) {
3001 rc
= (rc
== MBX_TIMEOUT
) ? -ETIME
: -ENODEV
;
3005 /* job finished, copy the data */
3006 memcpy(mb
, pmb
, sizeof(*pmb
));
3007 job
->reply
->reply_payload_rcv_len
=
3008 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
3009 job
->reply_payload
.sg_cnt
,
3011 /* not waiting mbox already done */
3016 rc
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
3017 if ((rc
== MBX_SUCCESS
) || (rc
== MBX_BUSY
))
3018 return 1; /* job started */
3021 /* common exit for error or job completed inline */
3024 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
3027 dma_free_coherent(&phba
->pcidev
->dev
,
3028 dmp
->size
, dmp
->dma
.virt
,
3033 lpfc_mbuf_free(phba
, rxbmp
->virt
, rxbmp
->phys
);
3042 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
3043 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
3046 lpfc_bsg_mbox_cmd(struct fc_bsg_job
*job
)
3048 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
3049 struct lpfc_hba
*phba
= vport
->phba
;
3052 /* in case no data is transferred */
3053 job
->reply
->reply_payload_rcv_len
= 0;
3054 if (job
->request_len
<
3055 sizeof(struct fc_bsg_request
) + sizeof(struct dfc_mbox_req
)) {
3056 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3057 "2737 Received MBOX_REQ request below "
3063 if (job
->request_payload
.payload_len
!= BSG_MBOX_SIZE
) {
3068 if (job
->reply_payload
.payload_len
!= BSG_MBOX_SIZE
) {
3073 if (phba
->sli
.sli_flag
& LPFC_BLOCK_MGMT_IO
) {
3078 rc
= lpfc_bsg_issue_mbox(phba
, job
, vport
);
3083 job
->reply
->result
= 0;
3084 job
->dd_data
= NULL
;
3087 /* job submitted, will complete later*/
3088 rc
= 0; /* return zero, no error */
3090 /* some error occurred */
3091 job
->reply
->result
= rc
;
3092 job
->dd_data
= NULL
;
3099 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
3100 * @phba: Pointer to HBA context object.
3101 * @cmdiocbq: Pointer to command iocb.
3102 * @rspiocbq: Pointer to response iocb.
3104 * This function is the completion handler for iocbs issued using
3105 * lpfc_menlo_cmd function. This function is called by the
3106 * ring event handler function without any lock held. This function
3107 * can be called from both worker thread context and interrupt
3108 * context. This function also can be called from another thread which
3109 * cleans up the SLI layer objects.
3110 * This function copies the contents of the response iocb to the
3111 * response iocb memory object provided by the caller of
3112 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
3113 * sleeps for the iocb completion.
3116 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba
*phba
,
3117 struct lpfc_iocbq
*cmdiocbq
,
3118 struct lpfc_iocbq
*rspiocbq
)
3120 struct bsg_job_data
*dd_data
;
3121 struct fc_bsg_job
*job
;
3123 struct lpfc_dmabuf
*bmp
;
3124 struct lpfc_bsg_menlo
*menlo
;
3125 unsigned long flags
;
3126 struct menlo_response
*menlo_resp
;
3129 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
3130 dd_data
= cmdiocbq
->context1
;
3132 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3136 menlo
= &dd_data
->context_un
.menlo
;
3137 job
= menlo
->set_job
;
3138 job
->dd_data
= NULL
; /* so timeout handler does not reply */
3140 spin_lock(&phba
->hbalock
);
3141 cmdiocbq
->iocb_flag
|= LPFC_IO_WAKE
;
3142 if (cmdiocbq
->context2
&& rspiocbq
)
3143 memcpy(&((struct lpfc_iocbq
*)cmdiocbq
->context2
)->iocb
,
3144 &rspiocbq
->iocb
, sizeof(IOCB_t
));
3145 spin_unlock(&phba
->hbalock
);
3148 rspiocbq
= menlo
->rspiocbq
;
3149 rsp
= &rspiocbq
->iocb
;
3151 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
3152 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
3153 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
3154 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
3156 /* always return the xri, this would be used in the case
3157 * of a menlo download to allow the data to be sent as a continuation
3160 menlo_resp
= (struct menlo_response
*)
3161 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
3162 menlo_resp
->xri
= rsp
->ulpContext
;
3163 if (rsp
->ulpStatus
) {
3164 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
3165 switch (rsp
->un
.ulpWord
[4] & 0xff) {
3166 case IOERR_SEQUENCE_TIMEOUT
:
3169 case IOERR_INVALID_RPI
:
3179 job
->reply
->reply_payload_rcv_len
=
3180 rsp
->un
.genreq64
.bdl
.bdeSize
;
3182 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
3183 lpfc_sli_release_iocbq(phba
, rspiocbq
);
3184 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
3187 /* make error code available to userspace */
3188 job
->reply
->result
= rc
;
3189 /* complete the job back to userspace */
3191 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3196 * lpfc_menlo_cmd - send an ioctl for menlo hardware
3197 * @job: fc_bsg_job to handle
3199 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
3200 * all the command completions will return the xri for the command.
3201 * For menlo data requests a gen request 64 CX is used to continue the exchange
3202 * supplied in the menlo request header xri field.
3205 lpfc_menlo_cmd(struct fc_bsg_job
*job
)
3207 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
3208 struct lpfc_hba
*phba
= vport
->phba
;
3209 struct lpfc_iocbq
*cmdiocbq
, *rspiocbq
;
3212 struct menlo_command
*menlo_cmd
;
3213 struct menlo_response
*menlo_resp
;
3214 struct lpfc_dmabuf
*bmp
= NULL
;
3217 struct scatterlist
*sgel
= NULL
;
3220 struct bsg_job_data
*dd_data
;
3221 struct ulp_bde64
*bpl
= NULL
;
3223 /* in case no data is returned return just the return code */
3224 job
->reply
->reply_payload_rcv_len
= 0;
3226 if (job
->request_len
<
3227 sizeof(struct fc_bsg_request
) +
3228 sizeof(struct menlo_command
)) {
3229 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3230 "2784 Received MENLO_CMD request below "
3236 if (job
->reply_len
<
3237 sizeof(struct fc_bsg_request
) + sizeof(struct menlo_response
)) {
3238 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3239 "2785 Received MENLO_CMD reply below "
3245 if (!(phba
->menlo_flag
& HBA_MENLO_SUPPORT
)) {
3246 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3247 "2786 Adapter does not support menlo "
3253 menlo_cmd
= (struct menlo_command
*)
3254 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
3256 menlo_resp
= (struct menlo_response
*)
3257 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
3259 /* allocate our bsg tracking structure */
3260 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
3262 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3263 "2787 Failed allocation of dd_data\n");
3268 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
3274 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
3280 rspiocbq
= lpfc_sli_get_iocbq(phba
);
3286 rsp
= &rspiocbq
->iocb
;
3288 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
3294 INIT_LIST_HEAD(&bmp
->list
);
3295 bpl
= (struct ulp_bde64
*) bmp
->virt
;
3296 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
3297 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
3298 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
3299 busaddr
= sg_dma_address(sgel
);
3300 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
3301 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
3302 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
3303 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
3304 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
3308 reply_nseg
= pci_map_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
3309 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
3310 for_each_sg(job
->reply_payload
.sg_list
, sgel
, reply_nseg
, numbde
) {
3311 busaddr
= sg_dma_address(sgel
);
3312 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
3313 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
3314 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
3315 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
3316 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
3320 cmd
= &cmdiocbq
->iocb
;
3321 cmd
->un
.genreq64
.bdl
.ulpIoTag32
= 0;
3322 cmd
->un
.genreq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
3323 cmd
->un
.genreq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
3324 cmd
->un
.genreq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
3325 cmd
->un
.genreq64
.bdl
.bdeSize
=
3326 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
3327 cmd
->un
.genreq64
.w5
.hcsw
.Fctl
= (SI
| LA
);
3328 cmd
->un
.genreq64
.w5
.hcsw
.Dfctl
= 0;
3329 cmd
->un
.genreq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CMD
;
3330 cmd
->un
.genreq64
.w5
.hcsw
.Type
= MENLO_TRANSPORT_TYPE
; /* 0xfe */
3331 cmd
->ulpBdeCount
= 1;
3332 cmd
->ulpClass
= CLASS3
;
3333 cmd
->ulpOwner
= OWN_CHIP
;
3334 cmd
->ulpLe
= 1; /* Limited Edition */
3335 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
3336 cmdiocbq
->vport
= phba
->pport
;
3337 /* We want the firmware to timeout before we do */
3338 cmd
->ulpTimeout
= MENLO_TIMEOUT
- 5;
3339 cmdiocbq
->context3
= bmp
;
3340 cmdiocbq
->context2
= rspiocbq
;
3341 cmdiocbq
->iocb_cmpl
= lpfc_bsg_menlo_cmd_cmp
;
3342 cmdiocbq
->context1
= dd_data
;
3343 cmdiocbq
->context2
= rspiocbq
;
3344 if (menlo_cmd
->cmd
== LPFC_BSG_VENDOR_MENLO_CMD
) {
3345 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CR
;
3346 cmd
->ulpPU
= MENLO_PU
; /* 3 */
3347 cmd
->un
.ulpWord
[4] = MENLO_DID
; /* 0x0000FC0E */
3348 cmd
->ulpContext
= MENLO_CONTEXT
; /* 0 */
3350 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CX
;
3352 cmd
->un
.ulpWord
[4] = 0;
3353 cmd
->ulpContext
= menlo_cmd
->xri
;
3356 dd_data
->type
= TYPE_MENLO
;
3357 dd_data
->context_un
.menlo
.cmdiocbq
= cmdiocbq
;
3358 dd_data
->context_un
.menlo
.rspiocbq
= rspiocbq
;
3359 dd_data
->context_un
.menlo
.set_job
= job
;
3360 dd_data
->context_un
.menlo
.bmp
= bmp
;
3362 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
,
3364 if (rc
== IOCB_SUCCESS
)
3365 return 0; /* done for now */
3367 /* iocb failed so cleanup */
3368 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
3369 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
3370 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
3371 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
3373 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
3376 lpfc_sli_release_iocbq(phba
, rspiocbq
);
3378 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
3384 /* make error code available to userspace */
3385 job
->reply
->result
= rc
;
3386 job
->dd_data
= NULL
;
3390 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
3391 * @job: fc_bsg_job to handle
3394 lpfc_bsg_hst_vendor(struct fc_bsg_job
*job
)
3396 int command
= job
->request
->rqst_data
.h_vendor
.vendor_cmd
[0];
3400 case LPFC_BSG_VENDOR_SET_CT_EVENT
:
3401 rc
= lpfc_bsg_hba_set_event(job
);
3403 case LPFC_BSG_VENDOR_GET_CT_EVENT
:
3404 rc
= lpfc_bsg_hba_get_event(job
);
3406 case LPFC_BSG_VENDOR_SEND_MGMT_RESP
:
3407 rc
= lpfc_bsg_send_mgmt_rsp(job
);
3409 case LPFC_BSG_VENDOR_DIAG_MODE
:
3410 rc
= lpfc_bsg_diag_mode(job
);
3412 case LPFC_BSG_VENDOR_DIAG_TEST
:
3413 rc
= lpfc_bsg_diag_test(job
);
3415 case LPFC_BSG_VENDOR_GET_MGMT_REV
:
3416 rc
= lpfc_bsg_get_dfc_rev(job
);
3418 case LPFC_BSG_VENDOR_MBOX
:
3419 rc
= lpfc_bsg_mbox_cmd(job
);
3421 case LPFC_BSG_VENDOR_MENLO_CMD
:
3422 case LPFC_BSG_VENDOR_MENLO_DATA
:
3423 rc
= lpfc_menlo_cmd(job
);
3427 job
->reply
->reply_payload_rcv_len
= 0;
3428 /* make error code available to userspace */
3429 job
->reply
->result
= rc
;
3437 * lpfc_bsg_request - handle a bsg request from the FC transport
3438 * @job: fc_bsg_job to handle
3441 lpfc_bsg_request(struct fc_bsg_job
*job
)
3446 msgcode
= job
->request
->msgcode
;
3448 case FC_BSG_HST_VENDOR
:
3449 rc
= lpfc_bsg_hst_vendor(job
);
3451 case FC_BSG_RPT_ELS
:
3452 rc
= lpfc_bsg_rport_els(job
);
3455 rc
= lpfc_bsg_send_mgmt_cmd(job
);
3459 job
->reply
->reply_payload_rcv_len
= 0;
3460 /* make error code available to userspace */
3461 job
->reply
->result
= rc
;
3469 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
3470 * @job: fc_bsg_job that has timed out
3472 * This function just aborts the job's IOCB. The aborted IOCB will return to
3473 * the waiting function which will handle passing the error back to userspace
3476 lpfc_bsg_timeout(struct fc_bsg_job
*job
)
3478 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
3479 struct lpfc_hba
*phba
= vport
->phba
;
3480 struct lpfc_iocbq
*cmdiocb
;
3481 struct lpfc_bsg_event
*evt
;
3482 struct lpfc_bsg_iocb
*iocb
;
3483 struct lpfc_bsg_mbox
*mbox
;
3484 struct lpfc_bsg_menlo
*menlo
;
3485 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
3486 struct bsg_job_data
*dd_data
;
3487 unsigned long flags
;
3489 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
3490 dd_data
= (struct bsg_job_data
*)job
->dd_data
;
3491 /* timeout and completion crossed paths if no dd_data */
3493 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3497 switch (dd_data
->type
) {
3499 iocb
= &dd_data
->context_un
.iocb
;
3500 cmdiocb
= iocb
->cmdiocbq
;
3501 /* hint to completion handler that the job timed out */
3502 job
->reply
->result
= -EAGAIN
;
3503 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3504 /* this will call our completion handler */
3505 spin_lock_irq(&phba
->hbalock
);
3506 lpfc_sli_issue_abort_iotag(phba
, pring
, cmdiocb
);
3507 spin_unlock_irq(&phba
->hbalock
);
3510 evt
= dd_data
->context_un
.evt
;
3511 /* this event has no job anymore */
3512 evt
->set_job
= NULL
;
3513 job
->dd_data
= NULL
;
3514 job
->reply
->reply_payload_rcv_len
= 0;
3515 /* Return -EAGAIN which is our way of signallying the
3518 job
->reply
->result
= -EAGAIN
;
3519 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3523 mbox
= &dd_data
->context_un
.mbox
;
3524 /* this mbox has no job anymore */
3525 mbox
->set_job
= NULL
;
3526 job
->dd_data
= NULL
;
3527 job
->reply
->reply_payload_rcv_len
= 0;
3528 job
->reply
->result
= -EAGAIN
;
3529 /* the mbox completion handler can now be run */
3530 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3534 menlo
= &dd_data
->context_un
.menlo
;
3535 cmdiocb
= menlo
->cmdiocbq
;
3536 /* hint to completion handler that the job timed out */
3537 job
->reply
->result
= -EAGAIN
;
3538 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3539 /* this will call our completion handler */
3540 spin_lock_irq(&phba
->hbalock
);
3541 lpfc_sli_issue_abort_iotag(phba
, pring
, cmdiocb
);
3542 spin_unlock_irq(&phba
->hbalock
);
3545 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3549 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
3550 * otherwise an error message will be displayed on the console
3551 * so always return success (zero)