1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_transport_fc.h>
30 #include <scsi/scsi_bsg_fc.h>
31 #include <scsi/fc/fc_fs.h>
36 #include "lpfc_sli4.h"
39 #include "lpfc_disc.h"
40 #include "lpfc_scsi.h"
42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_vport.h"
45 #include "lpfc_version.h"
47 struct lpfc_bsg_event
{
48 struct list_head node
;
52 /* Event type and waiter identifiers */
57 /* next two flags are here for the auto-delete logic */
58 unsigned long wait_time_stamp
;
61 /* seen and not seen events */
62 struct list_head events_to_get
;
63 struct list_head events_to_see
;
65 /* job waiting for this event to finish */
66 struct fc_bsg_job
*set_job
;
69 struct lpfc_bsg_iocb
{
70 struct lpfc_iocbq
*cmdiocbq
;
71 struct lpfc_iocbq
*rspiocbq
;
72 struct lpfc_dmabuf
*bmp
;
73 struct lpfc_nodelist
*ndlp
;
75 /* job waiting for this iocb to finish */
76 struct fc_bsg_job
*set_job
;
79 struct lpfc_bsg_mbox
{
82 struct lpfc_dmabuf
*rxbmp
; /* for BIU diags */
83 struct lpfc_dmabufext
*dmp
; /* for BIU diags */
84 uint8_t *ext
; /* extended mailbox data */
85 uint32_t mbOffset
; /* from app */
86 uint32_t inExtWLen
; /* from app */
87 uint32_t outExtWLen
; /* from app */
89 /* job waiting for this mbox command to finish */
90 struct fc_bsg_job
*set_job
;
93 #define MENLO_DID 0x0000FC0E
95 struct lpfc_bsg_menlo
{
96 struct lpfc_iocbq
*cmdiocbq
;
97 struct lpfc_iocbq
*rspiocbq
;
98 struct lpfc_dmabuf
*bmp
;
100 /* job waiting for this iocb to finish */
101 struct fc_bsg_job
*set_job
;
108 struct bsg_job_data
{
111 struct lpfc_bsg_event
*evt
;
112 struct lpfc_bsg_iocb iocb
;
113 struct lpfc_bsg_mbox mbox
;
114 struct lpfc_bsg_menlo menlo
;
119 struct list_head node
;
126 #define BUF_SZ_4K 4096
127 #define SLI_CT_ELX_LOOPBACK 0x10
129 enum ELX_LOOPBACK_CMD
{
130 ELX_LOOPBACK_XRI_SETUP
,
134 #define ELX_LOOPBACK_HEADER_SZ \
135 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
137 struct lpfc_dmabufext
{
138 struct lpfc_dmabuf dma
;
144 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
145 * @phba: Pointer to HBA context object.
146 * @cmdiocbq: Pointer to command iocb.
147 * @rspiocbq: Pointer to response iocb.
149 * This function is the completion handler for iocbs issued using
150 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
151 * ring event handler function without any lock held. This function
152 * can be called from both worker thread context and interrupt
153 * context. This function also can be called from another thread which
154 * cleans up the SLI layer objects.
155 * This function copies the contents of the response iocb to the
156 * response iocb memory object provided by the caller of
157 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
158 * sleeps for the iocb completion.
161 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba
*phba
,
162 struct lpfc_iocbq
*cmdiocbq
,
163 struct lpfc_iocbq
*rspiocbq
)
165 struct bsg_job_data
*dd_data
;
166 struct fc_bsg_job
*job
;
168 struct lpfc_dmabuf
*bmp
;
169 struct lpfc_nodelist
*ndlp
;
170 struct lpfc_bsg_iocb
*iocb
;
174 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
175 dd_data
= cmdiocbq
->context2
;
177 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
178 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
182 iocb
= &dd_data
->context_un
.iocb
;
184 job
->dd_data
= NULL
; /* so timeout handler does not reply */
187 rsp
= &rspiocbq
->iocb
;
188 ndlp
= cmdiocbq
->context1
;
190 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
191 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
192 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
193 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
195 if (rsp
->ulpStatus
) {
196 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
197 switch (rsp
->un
.ulpWord
[4] & 0xff) {
198 case IOERR_SEQUENCE_TIMEOUT
:
201 case IOERR_INVALID_RPI
:
211 job
->reply
->reply_payload_rcv_len
=
212 rsp
->un
.genreq64
.bdl
.bdeSize
;
214 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
215 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
219 /* make error code available to userspace */
220 job
->reply
->result
= rc
;
221 /* complete the job back to userspace */
223 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
228 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
229 * @job: fc_bsg_job to handle
232 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job
*job
)
234 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
235 struct lpfc_hba
*phba
= vport
->phba
;
236 struct lpfc_rport_data
*rdata
= job
->rport
->dd_data
;
237 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
238 struct ulp_bde64
*bpl
= NULL
;
240 struct lpfc_iocbq
*cmdiocbq
= NULL
;
242 struct lpfc_dmabuf
*bmp
= NULL
;
245 struct scatterlist
*sgel
= NULL
;
248 struct bsg_job_data
*dd_data
;
253 /* in case no data is transferred */
254 job
->reply
->reply_payload_rcv_len
= 0;
256 /* allocate our bsg tracking structure */
257 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
259 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
260 "2733 Failed allocation of dd_data\n");
265 if (!lpfc_nlp_get(ndlp
)) {
270 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
276 if (ndlp
->nlp_flag
& NLP_ELS_SND_MASK
) {
281 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
287 cmd
= &cmdiocbq
->iocb
;
288 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
294 INIT_LIST_HEAD(&bmp
->list
);
295 bpl
= (struct ulp_bde64
*) bmp
->virt
;
296 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
297 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
298 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
299 busaddr
= sg_dma_address(sgel
);
300 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
301 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
302 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
303 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
304 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
308 reply_nseg
= pci_map_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
309 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
310 for_each_sg(job
->reply_payload
.sg_list
, sgel
, reply_nseg
, numbde
) {
311 busaddr
= sg_dma_address(sgel
);
312 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
313 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
314 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
315 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
316 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
320 cmd
->un
.genreq64
.bdl
.ulpIoTag32
= 0;
321 cmd
->un
.genreq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
322 cmd
->un
.genreq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
323 cmd
->un
.genreq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
324 cmd
->un
.genreq64
.bdl
.bdeSize
=
325 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
326 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CR
;
327 cmd
->un
.genreq64
.w5
.hcsw
.Fctl
= (SI
| LA
);
328 cmd
->un
.genreq64
.w5
.hcsw
.Dfctl
= 0;
329 cmd
->un
.genreq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CTL
;
330 cmd
->un
.genreq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
331 cmd
->ulpBdeCount
= 1;
333 cmd
->ulpClass
= CLASS3
;
334 cmd
->ulpContext
= ndlp
->nlp_rpi
;
335 cmd
->ulpOwner
= OWN_CHIP
;
336 cmdiocbq
->vport
= phba
->pport
;
337 cmdiocbq
->context3
= bmp
;
338 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
339 timeout
= phba
->fc_ratov
* 2;
340 cmd
->ulpTimeout
= timeout
;
342 cmdiocbq
->iocb_cmpl
= lpfc_bsg_send_mgmt_cmd_cmp
;
343 cmdiocbq
->context1
= ndlp
;
344 cmdiocbq
->context2
= dd_data
;
345 dd_data
->type
= TYPE_IOCB
;
346 dd_data
->context_un
.iocb
.cmdiocbq
= cmdiocbq
;
347 dd_data
->context_un
.iocb
.set_job
= job
;
348 dd_data
->context_un
.iocb
.bmp
= bmp
;
350 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
351 creg_val
= readl(phba
->HCregaddr
);
352 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
353 writel(creg_val
, phba
->HCregaddr
);
354 readl(phba
->HCregaddr
); /* flush */
357 iocb_stat
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
, 0);
358 if (iocb_stat
== IOCB_SUCCESS
)
359 return 0; /* done for now */
360 else if (iocb_stat
== IOCB_BUSY
)
366 /* iocb failed so cleanup */
367 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
368 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
369 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
370 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
372 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
375 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
383 /* make error code available to userspace */
384 job
->reply
->result
= rc
;
390 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
391 * @phba: Pointer to HBA context object.
392 * @cmdiocbq: Pointer to command iocb.
393 * @rspiocbq: Pointer to response iocb.
395 * This function is the completion handler for iocbs issued using
396 * lpfc_bsg_rport_els_cmp function. This function is called by the
397 * ring event handler function without any lock held. This function
398 * can be called from both worker thread context and interrupt
399 * context. This function also can be called from other thread which
400 * cleans up the SLI layer objects.
401 * This function copies the contents of the response iocb to the
402 * response iocb memory object provided by the caller of
403 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
404 * sleeps for the iocb completion.
407 lpfc_bsg_rport_els_cmp(struct lpfc_hba
*phba
,
408 struct lpfc_iocbq
*cmdiocbq
,
409 struct lpfc_iocbq
*rspiocbq
)
411 struct bsg_job_data
*dd_data
;
412 struct fc_bsg_job
*job
;
414 struct lpfc_nodelist
*ndlp
;
415 struct lpfc_dmabuf
*pbuflist
= NULL
;
416 struct fc_bsg_ctels_reply
*els_reply
;
421 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
422 dd_data
= cmdiocbq
->context1
;
423 /* normal completion and timeout crossed paths, already done */
425 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
429 cmdiocbq
->iocb_flag
|= LPFC_IO_WAKE
;
430 if (cmdiocbq
->context2
&& rspiocbq
)
431 memcpy(&((struct lpfc_iocbq
*)cmdiocbq
->context2
)->iocb
,
432 &rspiocbq
->iocb
, sizeof(IOCB_t
));
434 job
= dd_data
->context_un
.iocb
.set_job
;
435 cmdiocbq
= dd_data
->context_un
.iocb
.cmdiocbq
;
436 rspiocbq
= dd_data
->context_un
.iocb
.rspiocbq
;
437 rsp
= &rspiocbq
->iocb
;
438 ndlp
= dd_data
->context_un
.iocb
.ndlp
;
440 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
441 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
442 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
443 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
445 if (job
->reply
->result
== -EAGAIN
)
447 else if (rsp
->ulpStatus
== IOSTAT_SUCCESS
)
448 job
->reply
->reply_payload_rcv_len
=
449 rsp
->un
.elsreq64
.bdl
.bdeSize
;
450 else if (rsp
->ulpStatus
== IOSTAT_LS_RJT
) {
451 job
->reply
->reply_payload_rcv_len
=
452 sizeof(struct fc_bsg_ctels_reply
);
453 /* LS_RJT data returned in word 4 */
454 rjt_data
= (uint8_t *)&rsp
->un
.ulpWord
[4];
455 els_reply
= &job
->reply
->reply_data
.ctels_reply
;
456 els_reply
->status
= FC_CTELS_STATUS_REJECT
;
457 els_reply
->rjt_data
.action
= rjt_data
[3];
458 els_reply
->rjt_data
.reason_code
= rjt_data
[2];
459 els_reply
->rjt_data
.reason_explanation
= rjt_data
[1];
460 els_reply
->rjt_data
.vendor_unique
= rjt_data
[0];
464 pbuflist
= (struct lpfc_dmabuf
*) cmdiocbq
->context3
;
465 lpfc_mbuf_free(phba
, pbuflist
->virt
, pbuflist
->phys
);
466 lpfc_sli_release_iocbq(phba
, rspiocbq
);
467 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
470 /* make error code available to userspace */
471 job
->reply
->result
= rc
;
473 /* complete the job back to userspace */
475 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
480 * lpfc_bsg_rport_els - send an ELS command from a bsg request
481 * @job: fc_bsg_job to handle
484 lpfc_bsg_rport_els(struct fc_bsg_job
*job
)
486 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
487 struct lpfc_hba
*phba
= vport
->phba
;
488 struct lpfc_rport_data
*rdata
= job
->rport
->dd_data
;
489 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
493 struct lpfc_iocbq
*rspiocbq
;
494 struct lpfc_iocbq
*cmdiocbq
;
497 struct lpfc_dmabuf
*pcmd
;
498 struct lpfc_dmabuf
*prsp
;
499 struct lpfc_dmabuf
*pbuflist
= NULL
;
500 struct ulp_bde64
*bpl
;
503 struct scatterlist
*sgel
= NULL
;
506 struct bsg_job_data
*dd_data
;
510 /* in case no data is transferred */
511 job
->reply
->reply_payload_rcv_len
= 0;
513 /* allocate our bsg tracking structure */
514 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
516 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
517 "2735 Failed allocation of dd_data\n");
522 if (!lpfc_nlp_get(ndlp
)) {
527 elscmd
= job
->request
->rqst_data
.r_els
.els_code
;
528 cmdsize
= job
->request_payload
.payload_len
;
529 rspsize
= job
->reply_payload
.payload_len
;
530 rspiocbq
= lpfc_sli_get_iocbq(phba
);
537 rsp
= &rspiocbq
->iocb
;
540 cmdiocbq
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, 0, ndlp
,
541 ndlp
->nlp_DID
, elscmd
);
547 /* prep els iocb set context1 to the ndlp, context2 to the command
548 * dmabuf, context3 holds the data dmabuf
550 pcmd
= (struct lpfc_dmabuf
*) cmdiocbq
->context2
;
551 prsp
= (struct lpfc_dmabuf
*) pcmd
->list
.next
;
552 lpfc_mbuf_free(phba
, pcmd
->virt
, pcmd
->phys
);
554 lpfc_mbuf_free(phba
, prsp
->virt
, prsp
->phys
);
556 cmdiocbq
->context2
= NULL
;
558 pbuflist
= (struct lpfc_dmabuf
*) cmdiocbq
->context3
;
559 bpl
= (struct ulp_bde64
*) pbuflist
->virt
;
561 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
562 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
563 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
564 busaddr
= sg_dma_address(sgel
);
565 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
566 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
567 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
568 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
569 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
573 reply_nseg
= pci_map_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
574 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
575 for_each_sg(job
->reply_payload
.sg_list
, sgel
, reply_nseg
, numbde
) {
576 busaddr
= sg_dma_address(sgel
);
577 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
578 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
579 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
580 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
581 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
584 cmdiocbq
->iocb
.un
.elsreq64
.bdl
.bdeSize
=
585 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
586 cmdiocbq
->iocb
.ulpContext
= rpi
;
587 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
588 cmdiocbq
->context1
= NULL
;
589 cmdiocbq
->context2
= NULL
;
591 cmdiocbq
->iocb_cmpl
= lpfc_bsg_rport_els_cmp
;
592 cmdiocbq
->context1
= dd_data
;
593 cmdiocbq
->context2
= rspiocbq
;
594 dd_data
->type
= TYPE_IOCB
;
595 dd_data
->context_un
.iocb
.cmdiocbq
= cmdiocbq
;
596 dd_data
->context_un
.iocb
.rspiocbq
= rspiocbq
;
597 dd_data
->context_un
.iocb
.set_job
= job
;
598 dd_data
->context_un
.iocb
.bmp
= NULL
;;
599 dd_data
->context_un
.iocb
.ndlp
= ndlp
;
601 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
602 creg_val
= readl(phba
->HCregaddr
);
603 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
604 writel(creg_val
, phba
->HCregaddr
);
605 readl(phba
->HCregaddr
); /* flush */
607 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
, 0);
609 if (rc
== IOCB_SUCCESS
)
610 return 0; /* done for now */
611 else if (rc
== IOCB_BUSY
)
616 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
617 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
618 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
619 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
621 lpfc_mbuf_free(phba
, pbuflist
->virt
, pbuflist
->phys
);
623 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
626 lpfc_sli_release_iocbq(phba
, rspiocbq
);
632 /* make error code available to userspace */
633 job
->reply
->result
= rc
;
639 * lpfc_bsg_event_free - frees an allocated event structure
640 * @kref: Pointer to a kref.
642 * Called from kref_put. Back cast the kref into an event structure address.
643 * Free any events to get, delete associated nodes, free any events to see,
644 * free any data then free the event itself.
647 lpfc_bsg_event_free(struct kref
*kref
)
649 struct lpfc_bsg_event
*evt
= container_of(kref
, struct lpfc_bsg_event
,
651 struct event_data
*ed
;
653 list_del(&evt
->node
);
655 while (!list_empty(&evt
->events_to_get
)) {
656 ed
= list_entry(evt
->events_to_get
.next
, typeof(*ed
), node
);
662 while (!list_empty(&evt
->events_to_see
)) {
663 ed
= list_entry(evt
->events_to_see
.next
, typeof(*ed
), node
);
673 * lpfc_bsg_event_ref - increments the kref for an event
674 * @evt: Pointer to an event structure.
677 lpfc_bsg_event_ref(struct lpfc_bsg_event
*evt
)
679 kref_get(&evt
->kref
);
683 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
684 * @evt: Pointer to an event structure.
687 lpfc_bsg_event_unref(struct lpfc_bsg_event
*evt
)
689 kref_put(&evt
->kref
, lpfc_bsg_event_free
);
693 * lpfc_bsg_event_new - allocate and initialize a event structure
694 * @ev_mask: Mask of events.
695 * @ev_reg_id: Event reg id.
696 * @ev_req_id: Event request id.
698 static struct lpfc_bsg_event
*
699 lpfc_bsg_event_new(uint32_t ev_mask
, int ev_reg_id
, uint32_t ev_req_id
)
701 struct lpfc_bsg_event
*evt
= kzalloc(sizeof(*evt
), GFP_KERNEL
);
706 INIT_LIST_HEAD(&evt
->events_to_get
);
707 INIT_LIST_HEAD(&evt
->events_to_see
);
708 evt
->type_mask
= ev_mask
;
709 evt
->req_id
= ev_req_id
;
710 evt
->reg_id
= ev_reg_id
;
711 evt
->wait_time_stamp
= jiffies
;
712 init_waitqueue_head(&evt
->wq
);
713 kref_init(&evt
->kref
);
718 * diag_cmd_data_free - Frees an lpfc dma buffer extension
719 * @phba: Pointer to HBA context object.
720 * @mlist: Pointer to an lpfc dma buffer extension.
723 diag_cmd_data_free(struct lpfc_hba
*phba
, struct lpfc_dmabufext
*mlist
)
725 struct lpfc_dmabufext
*mlast
;
726 struct pci_dev
*pcidev
;
727 struct list_head head
, *curr
, *next
;
729 if ((!mlist
) || (!lpfc_is_link_up(phba
) &&
730 (phba
->link_flag
& LS_LOOPBACK_MODE
))) {
734 pcidev
= phba
->pcidev
;
735 list_add_tail(&head
, &mlist
->dma
.list
);
737 list_for_each_safe(curr
, next
, &head
) {
738 mlast
= list_entry(curr
, struct lpfc_dmabufext
, dma
.list
);
740 dma_free_coherent(&pcidev
->dev
,
750 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
755 * This function is called when an unsolicited CT command is received. It
756 * forwards the event to any processes registered to receive CT events.
759 lpfc_bsg_ct_unsol_event(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
760 struct lpfc_iocbq
*piocbq
)
762 uint32_t evt_req_id
= 0;
765 struct lpfc_dmabuf
*dmabuf
= NULL
;
766 struct lpfc_bsg_event
*evt
;
767 struct event_data
*evt_dat
= NULL
;
768 struct lpfc_iocbq
*iocbq
;
770 struct list_head head
;
771 struct ulp_bde64
*bde
;
774 struct lpfc_dmabuf
*bdeBuf1
= piocbq
->context2
;
775 struct lpfc_dmabuf
*bdeBuf2
= piocbq
->context3
;
776 struct lpfc_hbq_entry
*hbqe
;
777 struct lpfc_sli_ct_request
*ct_req
;
778 struct fc_bsg_job
*job
= NULL
;
782 INIT_LIST_HEAD(&head
);
783 list_add_tail(&head
, &piocbq
->list
);
785 if (piocbq
->iocb
.ulpBdeCount
== 0 ||
786 piocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
== 0)
787 goto error_ct_unsol_exit
;
789 if (phba
->link_state
== LPFC_HBA_ERROR
||
790 (!(phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)))
791 goto error_ct_unsol_exit
;
793 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)
796 dma_addr
= getPaddr(piocbq
->iocb
.un
.cont64
[0].addrHigh
,
797 piocbq
->iocb
.un
.cont64
[0].addrLow
);
798 dmabuf
= lpfc_sli_ringpostbuf_get(phba
, pring
, dma_addr
);
801 goto error_ct_unsol_exit
;
802 ct_req
= (struct lpfc_sli_ct_request
*)dmabuf
->virt
;
803 evt_req_id
= ct_req
->FsType
;
804 cmd
= ct_req
->CommandResponse
.bits
.CmdRsp
;
805 len
= ct_req
->CommandResponse
.bits
.Size
;
806 if (!(phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
))
807 lpfc_sli_ringpostbuf_put(phba
, pring
, dmabuf
);
809 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
810 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
811 if (!(evt
->type_mask
& FC_REG_CT_EVENT
) ||
812 evt
->req_id
!= evt_req_id
)
815 lpfc_bsg_event_ref(evt
);
816 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
817 evt_dat
= kzalloc(sizeof(*evt_dat
), GFP_KERNEL
);
818 if (evt_dat
== NULL
) {
819 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
820 lpfc_bsg_event_unref(evt
);
821 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
822 "2614 Memory allocation failed for "
827 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
828 /* take accumulated byte count from the last iocbq */
829 iocbq
= list_entry(head
.prev
, typeof(*iocbq
), list
);
830 evt_dat
->len
= iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
;
832 list_for_each_entry(iocbq
, &head
, list
) {
833 for (i
= 0; i
< iocbq
->iocb
.ulpBdeCount
; i
++)
835 iocbq
->iocb
.un
.cont64
[i
].tus
.f
.bdeSize
;
839 evt_dat
->data
= kzalloc(evt_dat
->len
, GFP_KERNEL
);
840 if (evt_dat
->data
== NULL
) {
841 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
842 "2615 Memory allocation failed for "
843 "CT event data, size %d\n",
846 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
847 lpfc_bsg_event_unref(evt
);
848 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
849 goto error_ct_unsol_exit
;
852 list_for_each_entry(iocbq
, &head
, list
) {
854 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
855 bdeBuf1
= iocbq
->context2
;
856 bdeBuf2
= iocbq
->context3
;
858 for (i
= 0; i
< iocbq
->iocb
.ulpBdeCount
; i
++) {
859 if (phba
->sli3_options
&
860 LPFC_SLI3_HBQ_ENABLED
) {
862 hbqe
= (struct lpfc_hbq_entry
*)
863 &iocbq
->iocb
.un
.ulpWord
[0];
864 size
= hbqe
->bde
.tus
.f
.bdeSize
;
867 hbqe
= (struct lpfc_hbq_entry
*)
870 size
= hbqe
->bde
.tus
.f
.bdeSize
;
873 if ((offset
+ size
) > evt_dat
->len
)
874 size
= evt_dat
->len
- offset
;
876 size
= iocbq
->iocb
.un
.cont64
[i
].
878 bde
= &iocbq
->iocb
.un
.cont64
[i
];
879 dma_addr
= getPaddr(bde
->addrHigh
,
881 dmabuf
= lpfc_sli_ringpostbuf_get(phba
,
885 lpfc_printf_log(phba
, KERN_ERR
,
886 LOG_LIBDFC
, "2616 No dmabuf "
887 "found for iocbq 0x%p\n",
889 kfree(evt_dat
->data
);
891 spin_lock_irqsave(&phba
->ct_ev_lock
,
893 lpfc_bsg_event_unref(evt
);
894 spin_unlock_irqrestore(
895 &phba
->ct_ev_lock
, flags
);
896 goto error_ct_unsol_exit
;
898 memcpy((char *)(evt_dat
->data
) + offset
,
901 if (evt_req_id
!= SLI_CT_ELX_LOOPBACK
&&
902 !(phba
->sli3_options
&
903 LPFC_SLI3_HBQ_ENABLED
)) {
904 lpfc_sli_ringpostbuf_put(phba
, pring
,
908 case ELX_LOOPBACK_DATA
:
909 diag_cmd_data_free(phba
,
910 (struct lpfc_dmabufext
*)
913 case ELX_LOOPBACK_XRI_SETUP
:
914 if ((phba
->sli_rev
==
916 (phba
->sli3_options
&
917 LPFC_SLI3_HBQ_ENABLED
919 lpfc_in_buf_free(phba
,
922 lpfc_post_buffer(phba
,
928 if (!(phba
->sli3_options
&
929 LPFC_SLI3_HBQ_ENABLED
))
930 lpfc_post_buffer(phba
,
939 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
940 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
941 evt_dat
->immed_dat
= phba
->ctx_idx
;
942 phba
->ctx_idx
= (phba
->ctx_idx
+ 1) % 64;
943 /* Provide warning for over-run of the ct_ctx array */
944 if (phba
->ct_ctx
[evt_dat
->immed_dat
].flags
&
946 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
,
947 "2717 CT context array entry "
948 "[%d] over-run: oxid:x%x, "
949 "sid:x%x\n", phba
->ctx_idx
,
951 evt_dat
->immed_dat
].oxid
,
953 evt_dat
->immed_dat
].SID
);
954 phba
->ct_ctx
[evt_dat
->immed_dat
].oxid
=
955 piocbq
->iocb
.ulpContext
;
956 phba
->ct_ctx
[evt_dat
->immed_dat
].SID
=
957 piocbq
->iocb
.un
.rcvels
.remoteID
;
958 phba
->ct_ctx
[evt_dat
->immed_dat
].flags
= UNSOL_VALID
;
960 evt_dat
->immed_dat
= piocbq
->iocb
.ulpContext
;
962 evt_dat
->type
= FC_REG_CT_EVENT
;
963 list_add(&evt_dat
->node
, &evt
->events_to_see
);
964 if (evt_req_id
== SLI_CT_ELX_LOOPBACK
) {
965 wake_up_interruptible(&evt
->wq
);
966 lpfc_bsg_event_unref(evt
);
970 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
971 lpfc_bsg_event_unref(evt
);
976 job
->reply
->reply_payload_rcv_len
= size
;
977 /* make error code available to userspace */
978 job
->reply
->result
= 0;
980 /* complete the job back to userspace */
981 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
983 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
986 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
989 if (!list_empty(&head
))
991 if (evt_req_id
== SLI_CT_ELX_LOOPBACK
)
997 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
998 * @job: SET_EVENT fc_bsg_job
1001 lpfc_bsg_hba_set_event(struct fc_bsg_job
*job
)
1003 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
1004 struct lpfc_hba
*phba
= vport
->phba
;
1005 struct set_ct_event
*event_req
;
1006 struct lpfc_bsg_event
*evt
;
1008 struct bsg_job_data
*dd_data
= NULL
;
1010 unsigned long flags
;
1012 if (job
->request_len
<
1013 sizeof(struct fc_bsg_request
) + sizeof(struct set_ct_event
)) {
1014 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1015 "2612 Received SET_CT_EVENT below minimum "
1021 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
1022 if (dd_data
== NULL
) {
1023 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1024 "2734 Failed allocation of dd_data\n");
1029 event_req
= (struct set_ct_event
*)
1030 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
1031 ev_mask
= ((uint32_t)(unsigned long)event_req
->type_mask
&
1033 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1034 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
1035 if (evt
->reg_id
== event_req
->ev_reg_id
) {
1036 lpfc_bsg_event_ref(evt
);
1037 evt
->wait_time_stamp
= jiffies
;
1041 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1043 if (&evt
->node
== &phba
->ct_ev_waiters
) {
1044 /* no event waiting struct yet - first call */
1045 evt
= lpfc_bsg_event_new(ev_mask
, event_req
->ev_reg_id
,
1046 event_req
->ev_req_id
);
1048 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1049 "2617 Failed allocation of event "
1055 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1056 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
1057 lpfc_bsg_event_ref(evt
);
1058 evt
->wait_time_stamp
= jiffies
;
1059 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1062 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1064 dd_data
->type
= TYPE_EVT
;
1065 dd_data
->context_un
.evt
= evt
;
1066 evt
->set_job
= job
; /* for unsolicited command */
1067 job
->dd_data
= dd_data
; /* for fc transport timeout callback*/
1068 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1069 return 0; /* call job done later */
1072 if (dd_data
!= NULL
)
1075 job
->dd_data
= NULL
;
1080 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1081 * @job: GET_EVENT fc_bsg_job
1084 lpfc_bsg_hba_get_event(struct fc_bsg_job
*job
)
1086 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
1087 struct lpfc_hba
*phba
= vport
->phba
;
1088 struct get_ct_event
*event_req
;
1089 struct get_ct_event_reply
*event_reply
;
1090 struct lpfc_bsg_event
*evt
;
1091 struct event_data
*evt_dat
= NULL
;
1092 unsigned long flags
;
1095 if (job
->request_len
<
1096 sizeof(struct fc_bsg_request
) + sizeof(struct get_ct_event
)) {
1097 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1098 "2613 Received GET_CT_EVENT request below "
1104 event_req
= (struct get_ct_event
*)
1105 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
1107 event_reply
= (struct get_ct_event_reply
*)
1108 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
1109 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1110 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
1111 if (evt
->reg_id
== event_req
->ev_reg_id
) {
1112 if (list_empty(&evt
->events_to_get
))
1114 lpfc_bsg_event_ref(evt
);
1115 evt
->wait_time_stamp
= jiffies
;
1116 evt_dat
= list_entry(evt
->events_to_get
.prev
,
1117 struct event_data
, node
);
1118 list_del(&evt_dat
->node
);
1122 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1124 /* The app may continue to ask for event data until it gets
1125 * an error indicating that there isn't anymore
1127 if (evt_dat
== NULL
) {
1128 job
->reply
->reply_payload_rcv_len
= 0;
1133 if (evt_dat
->len
> job
->request_payload
.payload_len
) {
1134 evt_dat
->len
= job
->request_payload
.payload_len
;
1135 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1136 "2618 Truncated event data at %d "
1138 job
->request_payload
.payload_len
);
1141 event_reply
->type
= evt_dat
->type
;
1142 event_reply
->immed_data
= evt_dat
->immed_dat
;
1143 if (evt_dat
->len
> 0)
1144 job
->reply
->reply_payload_rcv_len
=
1145 sg_copy_from_buffer(job
->request_payload
.sg_list
,
1146 job
->request_payload
.sg_cnt
,
1147 evt_dat
->data
, evt_dat
->len
);
1149 job
->reply
->reply_payload_rcv_len
= 0;
1152 kfree(evt_dat
->data
);
1156 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1157 lpfc_bsg_event_unref(evt
);
1158 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1159 job
->dd_data
= NULL
;
1160 job
->reply
->result
= 0;
1165 job
->dd_data
= NULL
;
1166 job
->reply
->result
= rc
;
1171 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1172 * @phba: Pointer to HBA context object.
1173 * @cmdiocbq: Pointer to command iocb.
1174 * @rspiocbq: Pointer to response iocb.
1176 * This function is the completion handler for iocbs issued using
1177 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1178 * ring event handler function without any lock held. This function
1179 * can be called from both worker thread context and interrupt
1180 * context. This function also can be called from other thread which
1181 * cleans up the SLI layer objects.
1182 * This function copy the contents of the response iocb to the
1183 * response iocb memory object provided by the caller of
1184 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1185 * sleeps for the iocb completion.
1188 lpfc_issue_ct_rsp_cmp(struct lpfc_hba
*phba
,
1189 struct lpfc_iocbq
*cmdiocbq
,
1190 struct lpfc_iocbq
*rspiocbq
)
1192 struct bsg_job_data
*dd_data
;
1193 struct fc_bsg_job
*job
;
1195 struct lpfc_dmabuf
*bmp
;
1196 struct lpfc_nodelist
*ndlp
;
1197 unsigned long flags
;
1200 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1201 dd_data
= cmdiocbq
->context2
;
1202 /* normal completion and timeout crossed paths, already done */
1204 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1208 job
= dd_data
->context_un
.iocb
.set_job
;
1209 bmp
= dd_data
->context_un
.iocb
.bmp
;
1210 rsp
= &rspiocbq
->iocb
;
1211 ndlp
= dd_data
->context_un
.iocb
.ndlp
;
1213 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
1214 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1216 if (rsp
->ulpStatus
) {
1217 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
1218 switch (rsp
->un
.ulpWord
[4] & 0xff) {
1219 case IOERR_SEQUENCE_TIMEOUT
:
1222 case IOERR_INVALID_RPI
:
1232 job
->reply
->reply_payload_rcv_len
=
1233 rsp
->un
.genreq64
.bdl
.bdeSize
;
1235 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
1236 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
1240 /* make error code available to userspace */
1241 job
->reply
->result
= rc
;
1242 job
->dd_data
= NULL
;
1243 /* complete the job back to userspace */
1245 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1250 * lpfc_issue_ct_rsp - issue a ct response
1251 * @phba: Pointer to HBA context object.
1252 * @job: Pointer to the job object.
1253 * @tag: tag index value into the ports context exchange array.
1254 * @bmp: Pointer to a dma buffer descriptor.
1255 * @num_entry: Number of enties in the bde.
1258 lpfc_issue_ct_rsp(struct lpfc_hba
*phba
, struct fc_bsg_job
*job
, uint32_t tag
,
1259 struct lpfc_dmabuf
*bmp
, int num_entry
)
1262 struct lpfc_iocbq
*ctiocb
= NULL
;
1264 struct lpfc_nodelist
*ndlp
= NULL
;
1265 struct bsg_job_data
*dd_data
;
1268 /* allocate our bsg tracking structure */
1269 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
1271 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1272 "2736 Failed allocation of dd_data\n");
1277 /* Allocate buffer for command iocb */
1278 ctiocb
= lpfc_sli_get_iocbq(phba
);
1284 icmd
= &ctiocb
->iocb
;
1285 icmd
->un
.xseq64
.bdl
.ulpIoTag32
= 0;
1286 icmd
->un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
1287 icmd
->un
.xseq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
1288 icmd
->un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
1289 icmd
->un
.xseq64
.bdl
.bdeSize
= (num_entry
* sizeof(struct ulp_bde64
));
1290 icmd
->un
.xseq64
.w5
.hcsw
.Fctl
= (LS
| LA
);
1291 icmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
1292 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_SOL_CTL
;
1293 icmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
1295 /* Fill in rest of iocb */
1296 icmd
->ulpCommand
= CMD_XMIT_SEQUENCE64_CX
;
1297 icmd
->ulpBdeCount
= 1;
1299 icmd
->ulpClass
= CLASS3
;
1300 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
1301 /* Do not issue unsol response if oxid not marked as valid */
1302 if (!(phba
->ct_ctx
[tag
].flags
& UNSOL_VALID
)) {
1304 goto issue_ct_rsp_exit
;
1306 icmd
->ulpContext
= phba
->ct_ctx
[tag
].oxid
;
1307 ndlp
= lpfc_findnode_did(phba
->pport
, phba
->ct_ctx
[tag
].SID
);
1309 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
,
1310 "2721 ndlp null for oxid %x SID %x\n",
1312 phba
->ct_ctx
[tag
].SID
);
1314 goto issue_ct_rsp_exit
;
1317 /* Check if the ndlp is active */
1318 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
1320 goto issue_ct_rsp_exit
;
1323 /* get a refernece count so the ndlp doesn't go away while
1326 if (!lpfc_nlp_get(ndlp
)) {
1328 goto issue_ct_rsp_exit
;
1331 icmd
->un
.ulpWord
[3] = ndlp
->nlp_rpi
;
1332 /* The exchange is done, mark the entry as invalid */
1333 phba
->ct_ctx
[tag
].flags
&= ~UNSOL_VALID
;
1335 icmd
->ulpContext
= (ushort
) tag
;
1337 icmd
->ulpTimeout
= phba
->fc_ratov
* 2;
1339 /* Xmit CT response on exchange <xid> */
1340 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
1341 "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
1342 icmd
->ulpContext
, icmd
->ulpIoTag
, phba
->link_state
);
1344 ctiocb
->iocb_cmpl
= NULL
;
1345 ctiocb
->iocb_flag
|= LPFC_IO_LIBDFC
;
1346 ctiocb
->vport
= phba
->pport
;
1347 ctiocb
->context3
= bmp
;
1349 ctiocb
->iocb_cmpl
= lpfc_issue_ct_rsp_cmp
;
1350 ctiocb
->context2
= dd_data
;
1351 ctiocb
->context1
= ndlp
;
1352 dd_data
->type
= TYPE_IOCB
;
1353 dd_data
->context_un
.iocb
.cmdiocbq
= ctiocb
;
1354 dd_data
->context_un
.iocb
.rspiocbq
= NULL
;
1355 dd_data
->context_un
.iocb
.set_job
= job
;
1356 dd_data
->context_un
.iocb
.bmp
= bmp
;
1357 dd_data
->context_un
.iocb
.ndlp
= ndlp
;
1359 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
1360 creg_val
= readl(phba
->HCregaddr
);
1361 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
1362 writel(creg_val
, phba
->HCregaddr
);
1363 readl(phba
->HCregaddr
); /* flush */
1366 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, ctiocb
, 0);
1368 if (rc
== IOCB_SUCCESS
)
1369 return 0; /* done for now */
1372 lpfc_sli_release_iocbq(phba
, ctiocb
);
1380 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1381 * @job: SEND_MGMT_RESP fc_bsg_job
1384 lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job
*job
)
1386 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
1387 struct lpfc_hba
*phba
= vport
->phba
;
1388 struct send_mgmt_resp
*mgmt_resp
= (struct send_mgmt_resp
*)
1389 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
1390 struct ulp_bde64
*bpl
;
1391 struct lpfc_dmabuf
*bmp
= NULL
;
1392 struct scatterlist
*sgel
= NULL
;
1396 uint32_t tag
= mgmt_resp
->tag
;
1397 unsigned long reqbfrcnt
=
1398 (unsigned long)job
->request_payload
.payload_len
;
1401 /* in case no data is transferred */
1402 job
->reply
->reply_payload_rcv_len
= 0;
1404 if (!reqbfrcnt
|| (reqbfrcnt
> (80 * BUF_SZ_4K
))) {
1406 goto send_mgmt_rsp_exit
;
1409 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
1412 goto send_mgmt_rsp_exit
;
1415 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
1418 goto send_mgmt_rsp_free_bmp
;
1421 INIT_LIST_HEAD(&bmp
->list
);
1422 bpl
= (struct ulp_bde64
*) bmp
->virt
;
1423 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
1424 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1425 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
1426 busaddr
= sg_dma_address(sgel
);
1427 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1428 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
1429 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
1430 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
1431 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
1435 rc
= lpfc_issue_ct_rsp(phba
, job
, tag
, bmp
, request_nseg
);
1437 if (rc
== IOCB_SUCCESS
)
1438 return 0; /* done for now */
1440 /* TBD need to handle a timeout */
1441 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
1442 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1444 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
1446 send_mgmt_rsp_free_bmp
:
1449 /* make error code available to userspace */
1450 job
->reply
->result
= rc
;
1451 job
->dd_data
= NULL
;
1456 * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
1457 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1459 * This function is responsible for placing a port into diagnostic loopback
1460 * mode in order to perform a diagnostic loopback test.
1461 * All new scsi requests are blocked, a small delay is used to allow the
1462 * scsi requests to complete then the link is brought down. If the link is
1463 * is placed in loopback mode then scsi requests are again allowed
1464 * so the scsi mid-layer doesn't give up on the port.
1465 * All of this is done in-line.
1468 lpfc_bsg_diag_mode(struct fc_bsg_job
*job
)
1470 struct Scsi_Host
*shost
= job
->shost
;
1471 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
1472 struct lpfc_hba
*phba
= vport
->phba
;
1473 struct diag_mode_set
*loopback_mode
;
1474 struct lpfc_sli
*psli
= &phba
->sli
;
1475 struct lpfc_sli_ring
*pring
= &psli
->ring
[LPFC_FCP_RING
];
1476 uint32_t link_flags
;
1478 struct lpfc_vport
**vports
;
1479 LPFC_MBOXQ_t
*pmboxq
;
1484 /* no data to return just the return code */
1485 job
->reply
->reply_payload_rcv_len
= 0;
1487 if (job
->request_len
<
1488 sizeof(struct fc_bsg_request
) + sizeof(struct diag_mode_set
)) {
1489 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1490 "2738 Received DIAG MODE request below minimum "
1496 loopback_mode
= (struct diag_mode_set
*)
1497 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
1498 link_flags
= loopback_mode
->type
;
1499 timeout
= loopback_mode
->timeout
* 100;
1501 if ((phba
->link_state
== LPFC_HBA_ERROR
) ||
1502 (psli
->sli_flag
& LPFC_BLOCK_MGMT_IO
) ||
1503 (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
))) {
1508 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1514 vports
= lpfc_create_vport_work_array(phba
);
1516 for (i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
1517 shost
= lpfc_shost_from_vport(vports
[i
]);
1518 scsi_block_requests(shost
);
1521 lpfc_destroy_vport_work_array(phba
, vports
);
1523 shost
= lpfc_shost_from_vport(phba
->pport
);
1524 scsi_block_requests(shost
);
1527 while (pring
->txcmplq_cnt
) {
1528 if (i
++ > 500) /* wait up to 5 seconds */
1534 memset((void *)pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
1535 pmboxq
->u
.mb
.mbxCommand
= MBX_DOWN_LINK
;
1536 pmboxq
->u
.mb
.mbxOwner
= OWN_HOST
;
1538 mbxstatus
= lpfc_sli_issue_mbox_wait(phba
, pmboxq
, LPFC_MBOX_TMO
);
1540 if ((mbxstatus
== MBX_SUCCESS
) && (pmboxq
->u
.mb
.mbxStatus
== 0)) {
1541 /* wait for link down before proceeding */
1543 while (phba
->link_state
!= LPFC_LINK_DOWN
) {
1544 if (i
++ > timeout
) {
1546 goto loopback_mode_exit
;
1552 memset((void *)pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
1553 if (link_flags
== INTERNAL_LOOP_BACK
)
1554 pmboxq
->u
.mb
.un
.varInitLnk
.link_flags
= FLAGS_LOCAL_LB
;
1556 pmboxq
->u
.mb
.un
.varInitLnk
.link_flags
=
1557 FLAGS_TOPOLOGY_MODE_LOOP
;
1559 pmboxq
->u
.mb
.mbxCommand
= MBX_INIT_LINK
;
1560 pmboxq
->u
.mb
.mbxOwner
= OWN_HOST
;
1562 mbxstatus
= lpfc_sli_issue_mbox_wait(phba
, pmboxq
,
1565 if ((mbxstatus
!= MBX_SUCCESS
) || (pmboxq
->u
.mb
.mbxStatus
))
1568 phba
->link_flag
|= LS_LOOPBACK_MODE
;
1569 /* wait for the link attention interrupt */
1573 while (phba
->link_state
!= LPFC_HBA_READY
) {
1574 if (i
++ > timeout
) {
1587 vports
= lpfc_create_vport_work_array(phba
);
1589 for (i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
1590 shost
= lpfc_shost_from_vport(vports
[i
]);
1591 scsi_unblock_requests(shost
);
1593 lpfc_destroy_vport_work_array(phba
, vports
);
1595 shost
= lpfc_shost_from_vport(phba
->pport
);
1596 scsi_unblock_requests(shost
);
1600 * Let SLI layer release mboxq if mbox command completed after timeout.
1602 if (mbxstatus
!= MBX_TIMEOUT
)
1603 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
1606 /* make error code available to userspace */
1607 job
->reply
->result
= rc
;
1608 /* complete the job back to userspace if no error */
1615 * lpfcdiag_loop_self_reg - obtains a remote port login id
1616 * @phba: Pointer to HBA context object
1617 * @rpi: Pointer to a remote port login id
1619 * This function obtains a remote port login id so the diag loopback test
1620 * can send and receive its own unsolicited CT command.
1622 static int lpfcdiag_loop_self_reg(struct lpfc_hba
*phba
, uint16_t *rpi
)
1625 struct lpfc_dmabuf
*dmabuff
;
1628 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1632 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1633 *rpi
= lpfc_sli4_alloc_rpi(phba
);
1634 status
= lpfc_reg_rpi(phba
, 0, phba
->pport
->fc_myDID
,
1635 (uint8_t *)&phba
->pport
->fc_sparam
, mbox
, *rpi
);
1637 mempool_free(mbox
, phba
->mbox_mem_pool
);
1638 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1639 lpfc_sli4_free_rpi(phba
, *rpi
);
1643 dmabuff
= (struct lpfc_dmabuf
*) mbox
->context1
;
1644 mbox
->context1
= NULL
;
1645 mbox
->context2
= NULL
;
1646 status
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
1648 if ((status
!= MBX_SUCCESS
) || (mbox
->u
.mb
.mbxStatus
)) {
1649 lpfc_mbuf_free(phba
, dmabuff
->virt
, dmabuff
->phys
);
1651 if (status
!= MBX_TIMEOUT
)
1652 mempool_free(mbox
, phba
->mbox_mem_pool
);
1653 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1654 lpfc_sli4_free_rpi(phba
, *rpi
);
1658 *rpi
= mbox
->u
.mb
.un
.varWords
[0];
1660 lpfc_mbuf_free(phba
, dmabuff
->virt
, dmabuff
->phys
);
1662 mempool_free(mbox
, phba
->mbox_mem_pool
);
1667 * lpfcdiag_loop_self_unreg - unregs from the rpi
1668 * @phba: Pointer to HBA context object
1669 * @rpi: Remote port login id
1671 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
1673 static int lpfcdiag_loop_self_unreg(struct lpfc_hba
*phba
, uint16_t rpi
)
1678 /* Allocate mboxq structure */
1679 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1683 lpfc_unreg_login(phba
, 0, rpi
, mbox
);
1684 status
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
1686 if ((status
!= MBX_SUCCESS
) || (mbox
->u
.mb
.mbxStatus
)) {
1687 if (status
!= MBX_TIMEOUT
)
1688 mempool_free(mbox
, phba
->mbox_mem_pool
);
1691 mempool_free(mbox
, phba
->mbox_mem_pool
);
1692 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1693 lpfc_sli4_free_rpi(phba
, rpi
);
1698 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
1699 * @phba: Pointer to HBA context object
1700 * @rpi: Remote port login id
1701 * @txxri: Pointer to transmit exchange id
1702 * @rxxri: Pointer to response exchabge id
1704 * This function obtains the transmit and receive ids required to send
1705 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
1706 * flags are used to the unsolicted response handler is able to process
1707 * the ct command sent on the same port.
1709 static int lpfcdiag_loop_get_xri(struct lpfc_hba
*phba
, uint16_t rpi
,
1710 uint16_t *txxri
, uint16_t * rxxri
)
1712 struct lpfc_bsg_event
*evt
;
1713 struct lpfc_iocbq
*cmdiocbq
, *rspiocbq
;
1715 struct lpfc_dmabuf
*dmabuf
;
1716 struct ulp_bde64
*bpl
= NULL
;
1717 struct lpfc_sli_ct_request
*ctreq
= NULL
;
1721 unsigned long flags
;
1725 evt
= lpfc_bsg_event_new(FC_REG_CT_EVENT
, current
->pid
,
1726 SLI_CT_ELX_LOOPBACK
);
1730 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1731 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
1732 lpfc_bsg_event_ref(evt
);
1733 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1735 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
1736 rspiocbq
= lpfc_sli_get_iocbq(phba
);
1738 dmabuf
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
1740 dmabuf
->virt
= lpfc_mbuf_alloc(phba
, 0, &dmabuf
->phys
);
1742 INIT_LIST_HEAD(&dmabuf
->list
);
1743 bpl
= (struct ulp_bde64
*) dmabuf
->virt
;
1744 memset(bpl
, 0, sizeof(*bpl
));
1745 ctreq
= (struct lpfc_sli_ct_request
*)(bpl
+ 1);
1747 le32_to_cpu(putPaddrHigh(dmabuf
->phys
+
1750 le32_to_cpu(putPaddrLow(dmabuf
->phys
+
1752 bpl
->tus
.f
.bdeFlags
= 0;
1753 bpl
->tus
.f
.bdeSize
= ELX_LOOPBACK_HEADER_SZ
;
1754 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
1758 if (cmdiocbq
== NULL
|| rspiocbq
== NULL
||
1759 dmabuf
== NULL
|| bpl
== NULL
|| ctreq
== NULL
||
1760 dmabuf
->virt
== NULL
) {
1762 goto err_get_xri_exit
;
1765 cmd
= &cmdiocbq
->iocb
;
1766 rsp
= &rspiocbq
->iocb
;
1768 memset(ctreq
, 0, ELX_LOOPBACK_HEADER_SZ
);
1770 ctreq
->RevisionId
.bits
.Revision
= SLI_CT_REVISION
;
1771 ctreq
->RevisionId
.bits
.InId
= 0;
1772 ctreq
->FsType
= SLI_CT_ELX_LOOPBACK
;
1773 ctreq
->FsSubType
= 0;
1774 ctreq
->CommandResponse
.bits
.CmdRsp
= ELX_LOOPBACK_XRI_SETUP
;
1775 ctreq
->CommandResponse
.bits
.Size
= 0;
1778 cmd
->un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(dmabuf
->phys
);
1779 cmd
->un
.xseq64
.bdl
.addrLow
= putPaddrLow(dmabuf
->phys
);
1780 cmd
->un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
1781 cmd
->un
.xseq64
.bdl
.bdeSize
= sizeof(*bpl
);
1783 cmd
->un
.xseq64
.w5
.hcsw
.Fctl
= LA
;
1784 cmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
1785 cmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CTL
;
1786 cmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
1788 cmd
->ulpCommand
= CMD_XMIT_SEQUENCE64_CR
;
1789 cmd
->ulpBdeCount
= 1;
1791 cmd
->ulpClass
= CLASS3
;
1792 cmd
->ulpContext
= rpi
;
1794 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
1795 cmdiocbq
->vport
= phba
->pport
;
1797 iocb_stat
= lpfc_sli_issue_iocb_wait(phba
, LPFC_ELS_RING
, cmdiocbq
,
1799 (phba
->fc_ratov
* 2)
1800 + LPFC_DRVR_TIMEOUT
);
1803 goto err_get_xri_exit
;
1805 *txxri
= rsp
->ulpContext
;
1808 evt
->wait_time_stamp
= jiffies
;
1809 time_left
= wait_event_interruptible_timeout(
1810 evt
->wq
, !list_empty(&evt
->events_to_see
),
1811 ((phba
->fc_ratov
* 2) + LPFC_DRVR_TIMEOUT
) * HZ
);
1812 if (list_empty(&evt
->events_to_see
))
1813 ret_val
= (time_left
) ? -EINTR
: -ETIMEDOUT
;
1815 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1816 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
1817 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1818 *rxxri
= (list_entry(evt
->events_to_get
.prev
,
1819 typeof(struct event_data
),
1825 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1826 lpfc_bsg_event_unref(evt
); /* release ref */
1827 lpfc_bsg_event_unref(evt
); /* delete */
1828 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1832 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
1836 if (cmdiocbq
&& (iocb_stat
!= IOCB_TIMEDOUT
))
1837 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
1839 lpfc_sli_release_iocbq(phba
, rspiocbq
);
1844 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
1845 * @phba: Pointer to HBA context object
1846 * @bpl: Pointer to 64 bit bde structure
1847 * @size: Number of bytes to process
1848 * @nocopydata: Flag to copy user data into the allocated buffer
1850 * This function allocates page size buffers and populates an lpfc_dmabufext.
1851 * If allowed the user data pointed to with indataptr is copied into the kernel
1852 * memory. The chained list of page size buffers is returned.
1854 static struct lpfc_dmabufext
*
1855 diag_cmd_data_alloc(struct lpfc_hba
*phba
,
1856 struct ulp_bde64
*bpl
, uint32_t size
,
1859 struct lpfc_dmabufext
*mlist
= NULL
;
1860 struct lpfc_dmabufext
*dmp
;
1861 int cnt
, offset
= 0, i
= 0;
1862 struct pci_dev
*pcidev
;
1864 pcidev
= phba
->pcidev
;
1867 /* We get chunks of 4K */
1868 if (size
> BUF_SZ_4K
)
1873 /* allocate struct lpfc_dmabufext buffer header */
1874 dmp
= kmalloc(sizeof(struct lpfc_dmabufext
), GFP_KERNEL
);
1878 INIT_LIST_HEAD(&dmp
->dma
.list
);
1880 /* Queue it to a linked list */
1882 list_add_tail(&dmp
->dma
.list
, &mlist
->dma
.list
);
1886 /* allocate buffer */
1887 dmp
->dma
.virt
= dma_alloc_coherent(&pcidev
->dev
,
1898 bpl
->tus
.f
.bdeFlags
= 0;
1899 pci_dma_sync_single_for_device(phba
->pcidev
,
1900 dmp
->dma
.phys
, LPFC_BPL_SIZE
, PCI_DMA_TODEVICE
);
1903 memset((uint8_t *)dmp
->dma
.virt
, 0, cnt
);
1904 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
1907 /* build buffer ptr list for IOCB */
1908 bpl
->addrLow
= le32_to_cpu(putPaddrLow(dmp
->dma
.phys
));
1909 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(dmp
->dma
.phys
));
1910 bpl
->tus
.f
.bdeSize
= (ushort
) cnt
;
1911 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
1922 diag_cmd_data_free(phba
, mlist
);
1927 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
1928 * @phba: Pointer to HBA context object
1929 * @rxxri: Receive exchange id
1930 * @len: Number of data bytes
1932 * This function allocates and posts a data buffer of sufficient size to recieve
1933 * an unsolicted CT command.
1935 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba
*phba
, uint16_t rxxri
,
1938 struct lpfc_sli
*psli
= &phba
->sli
;
1939 struct lpfc_sli_ring
*pring
= &psli
->ring
[LPFC_ELS_RING
];
1940 struct lpfc_iocbq
*cmdiocbq
;
1942 struct list_head head
, *curr
, *next
;
1943 struct lpfc_dmabuf
*rxbmp
;
1944 struct lpfc_dmabuf
*dmp
;
1945 struct lpfc_dmabuf
*mp
[2] = {NULL
, NULL
};
1946 struct ulp_bde64
*rxbpl
= NULL
;
1948 struct lpfc_dmabufext
*rxbuffer
= NULL
;
1953 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
1954 rxbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
1955 if (rxbmp
!= NULL
) {
1956 rxbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &rxbmp
->phys
);
1958 INIT_LIST_HEAD(&rxbmp
->list
);
1959 rxbpl
= (struct ulp_bde64
*) rxbmp
->virt
;
1960 rxbuffer
= diag_cmd_data_alloc(phba
, rxbpl
, len
, 0);
1964 if (!cmdiocbq
|| !rxbmp
|| !rxbpl
|| !rxbuffer
) {
1966 goto err_post_rxbufs_exit
;
1969 /* Queue buffers for the receive exchange */
1970 num_bde
= (uint32_t)rxbuffer
->flag
;
1971 dmp
= &rxbuffer
->dma
;
1973 cmd
= &cmdiocbq
->iocb
;
1976 INIT_LIST_HEAD(&head
);
1977 list_add_tail(&head
, &dmp
->list
);
1978 list_for_each_safe(curr
, next
, &head
) {
1979 mp
[i
] = list_entry(curr
, struct lpfc_dmabuf
, list
);
1982 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
1983 mp
[i
]->buffer_tag
= lpfc_sli_get_buffer_tag(phba
);
1984 cmd
->un
.quexri64cx
.buff
.bde
.addrHigh
=
1985 putPaddrHigh(mp
[i
]->phys
);
1986 cmd
->un
.quexri64cx
.buff
.bde
.addrLow
=
1987 putPaddrLow(mp
[i
]->phys
);
1988 cmd
->un
.quexri64cx
.buff
.bde
.tus
.f
.bdeSize
=
1989 ((struct lpfc_dmabufext
*)mp
[i
])->size
;
1990 cmd
->un
.quexri64cx
.buff
.buffer_tag
= mp
[i
]->buffer_tag
;
1991 cmd
->ulpCommand
= CMD_QUE_XRI64_CX
;
1994 cmd
->ulpBdeCount
= 1;
1995 cmd
->unsli3
.que_xri64cx_ext_words
.ebde_count
= 0;
1998 cmd
->un
.cont64
[i
].addrHigh
= putPaddrHigh(mp
[i
]->phys
);
1999 cmd
->un
.cont64
[i
].addrLow
= putPaddrLow(mp
[i
]->phys
);
2000 cmd
->un
.cont64
[i
].tus
.f
.bdeSize
=
2001 ((struct lpfc_dmabufext
*)mp
[i
])->size
;
2002 cmd
->ulpBdeCount
= ++i
;
2004 if ((--num_bde
> 0) && (i
< 2))
2007 cmd
->ulpCommand
= CMD_QUE_XRI_BUF64_CX
;
2011 cmd
->ulpClass
= CLASS3
;
2012 cmd
->ulpContext
= rxxri
;
2014 iocb_stat
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
,
2016 if (iocb_stat
== IOCB_ERROR
) {
2017 diag_cmd_data_free(phba
,
2018 (struct lpfc_dmabufext
*)mp
[0]);
2020 diag_cmd_data_free(phba
,
2021 (struct lpfc_dmabufext
*)mp
[1]);
2022 dmp
= list_entry(next
, struct lpfc_dmabuf
, list
);
2024 goto err_post_rxbufs_exit
;
2027 lpfc_sli_ringpostbuf_put(phba
, pring
, mp
[0]);
2029 lpfc_sli_ringpostbuf_put(phba
, pring
, mp
[1]);
2033 /* The iocb was freed by lpfc_sli_issue_iocb */
2034 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
2036 dmp
= list_entry(next
, struct lpfc_dmabuf
, list
);
2038 goto err_post_rxbufs_exit
;
2041 cmd
= &cmdiocbq
->iocb
;
2046 err_post_rxbufs_exit
:
2050 lpfc_mbuf_free(phba
, rxbmp
->virt
, rxbmp
->phys
);
2055 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
2060 * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
2061 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2063 * This function receives a user data buffer to be transmitted and received on
2064 * the same port, the link must be up and in loopback mode prior
2066 * 1. A kernel buffer is allocated to copy the user data into.
2067 * 2. The port registers with "itself".
2068 * 3. The transmit and receive exchange ids are obtained.
2069 * 4. The receive exchange id is posted.
2070 * 5. A new els loopback event is created.
2071 * 6. The command and response iocbs are allocated.
2072 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2074 * This function is meant to be called n times while the port is in loopback
2075 * so it is the apps responsibility to issue a reset to take the port out
2079 lpfc_bsg_diag_test(struct fc_bsg_job
*job
)
2081 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
2082 struct lpfc_hba
*phba
= vport
->phba
;
2083 struct diag_mode_test
*diag_mode
;
2084 struct lpfc_bsg_event
*evt
;
2085 struct event_data
*evdat
;
2086 struct lpfc_sli
*psli
= &phba
->sli
;
2089 size_t segment_len
= 0, segment_offset
= 0, current_offset
= 0;
2091 struct lpfc_iocbq
*cmdiocbq
, *rspiocbq
;
2093 struct lpfc_sli_ct_request
*ctreq
;
2094 struct lpfc_dmabuf
*txbmp
;
2095 struct ulp_bde64
*txbpl
= NULL
;
2096 struct lpfc_dmabufext
*txbuffer
= NULL
;
2097 struct list_head head
;
2098 struct lpfc_dmabuf
*curr
;
2099 uint16_t txxri
, rxxri
;
2101 uint8_t *ptr
= NULL
, *rx_databuf
= NULL
;
2105 unsigned long flags
;
2106 void *dataout
= NULL
;
2109 /* in case no data is returned return just the return code */
2110 job
->reply
->reply_payload_rcv_len
= 0;
2112 if (job
->request_len
<
2113 sizeof(struct fc_bsg_request
) + sizeof(struct diag_mode_test
)) {
2114 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2115 "2739 Received DIAG TEST request below minimum "
2118 goto loopback_test_exit
;
2121 if (job
->request_payload
.payload_len
!=
2122 job
->reply_payload
.payload_len
) {
2124 goto loopback_test_exit
;
2127 diag_mode
= (struct diag_mode_test
*)
2128 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
2130 if ((phba
->link_state
== LPFC_HBA_ERROR
) ||
2131 (psli
->sli_flag
& LPFC_BLOCK_MGMT_IO
) ||
2132 (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
))) {
2134 goto loopback_test_exit
;
2137 if (!lpfc_is_link_up(phba
) || !(phba
->link_flag
& LS_LOOPBACK_MODE
)) {
2139 goto loopback_test_exit
;
2142 size
= job
->request_payload
.payload_len
;
2143 full_size
= size
+ ELX_LOOPBACK_HEADER_SZ
; /* plus the header */
2145 if ((size
== 0) || (size
> 80 * BUF_SZ_4K
)) {
2147 goto loopback_test_exit
;
2150 if (full_size
>= BUF_SZ_4K
) {
2152 * Allocate memory for ioctl data. If buffer is bigger than 64k,
2153 * then we allocate 64k and re-use that buffer over and over to
2154 * xfer the whole block. This is because Linux kernel has a
2155 * problem allocating more than 120k of kernel space memory. Saw
2156 * problem with GET_FCPTARGETMAPPING...
2158 if (size
<= (64 * 1024))
2159 total_mem
= full_size
;
2161 total_mem
= 64 * 1024;
2163 /* Allocate memory for ioctl data */
2164 total_mem
= BUF_SZ_4K
;
2166 dataout
= kmalloc(total_mem
, GFP_KERNEL
);
2167 if (dataout
== NULL
) {
2169 goto loopback_test_exit
;
2173 ptr
+= ELX_LOOPBACK_HEADER_SZ
;
2174 sg_copy_to_buffer(job
->request_payload
.sg_list
,
2175 job
->request_payload
.sg_cnt
,
2177 rc
= lpfcdiag_loop_self_reg(phba
, &rpi
);
2179 goto loopback_test_exit
;
2181 rc
= lpfcdiag_loop_get_xri(phba
, rpi
, &txxri
, &rxxri
);
2183 lpfcdiag_loop_self_unreg(phba
, rpi
);
2184 goto loopback_test_exit
;
2187 rc
= lpfcdiag_loop_post_rxbufs(phba
, rxxri
, full_size
);
2189 lpfcdiag_loop_self_unreg(phba
, rpi
);
2190 goto loopback_test_exit
;
2193 evt
= lpfc_bsg_event_new(FC_REG_CT_EVENT
, current
->pid
,
2194 SLI_CT_ELX_LOOPBACK
);
2196 lpfcdiag_loop_self_unreg(phba
, rpi
);
2198 goto loopback_test_exit
;
2201 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2202 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
2203 lpfc_bsg_event_ref(evt
);
2204 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2206 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
2207 rspiocbq
= lpfc_sli_get_iocbq(phba
);
2208 txbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2211 txbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &txbmp
->phys
);
2213 INIT_LIST_HEAD(&txbmp
->list
);
2214 txbpl
= (struct ulp_bde64
*) txbmp
->virt
;
2215 txbuffer
= diag_cmd_data_alloc(phba
,
2216 txbpl
, full_size
, 0);
2220 if (!cmdiocbq
|| !rspiocbq
|| !txbmp
|| !txbpl
|| !txbuffer
||
2223 goto err_loopback_test_exit
;
2226 cmd
= &cmdiocbq
->iocb
;
2227 rsp
= &rspiocbq
->iocb
;
2229 INIT_LIST_HEAD(&head
);
2230 list_add_tail(&head
, &txbuffer
->dma
.list
);
2231 list_for_each_entry(curr
, &head
, list
) {
2232 segment_len
= ((struct lpfc_dmabufext
*)curr
)->size
;
2233 if (current_offset
== 0) {
2235 memset(ctreq
, 0, ELX_LOOPBACK_HEADER_SZ
);
2236 ctreq
->RevisionId
.bits
.Revision
= SLI_CT_REVISION
;
2237 ctreq
->RevisionId
.bits
.InId
= 0;
2238 ctreq
->FsType
= SLI_CT_ELX_LOOPBACK
;
2239 ctreq
->FsSubType
= 0;
2240 ctreq
->CommandResponse
.bits
.CmdRsp
= ELX_LOOPBACK_DATA
;
2241 ctreq
->CommandResponse
.bits
.Size
= size
;
2242 segment_offset
= ELX_LOOPBACK_HEADER_SZ
;
2246 BUG_ON(segment_offset
>= segment_len
);
2247 memcpy(curr
->virt
+ segment_offset
,
2248 ptr
+ current_offset
,
2249 segment_len
- segment_offset
);
2251 current_offset
+= segment_len
- segment_offset
;
2252 BUG_ON(current_offset
> size
);
2256 /* Build the XMIT_SEQUENCE iocb */
2258 num_bde
= (uint32_t)txbuffer
->flag
;
2260 cmd
->un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(txbmp
->phys
);
2261 cmd
->un
.xseq64
.bdl
.addrLow
= putPaddrLow(txbmp
->phys
);
2262 cmd
->un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
2263 cmd
->un
.xseq64
.bdl
.bdeSize
= (num_bde
* sizeof(struct ulp_bde64
));
2265 cmd
->un
.xseq64
.w5
.hcsw
.Fctl
= (LS
| LA
);
2266 cmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
2267 cmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CTL
;
2268 cmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
2270 cmd
->ulpCommand
= CMD_XMIT_SEQUENCE64_CX
;
2271 cmd
->ulpBdeCount
= 1;
2273 cmd
->ulpClass
= CLASS3
;
2274 cmd
->ulpContext
= txxri
;
2276 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
2277 cmdiocbq
->vport
= phba
->pport
;
2279 iocb_stat
= lpfc_sli_issue_iocb_wait(phba
, LPFC_ELS_RING
, cmdiocbq
,
2280 rspiocbq
, (phba
->fc_ratov
* 2) +
2283 if ((iocb_stat
!= IOCB_SUCCESS
) || (rsp
->ulpStatus
!= IOCB_SUCCESS
)) {
2285 goto err_loopback_test_exit
;
2289 time_left
= wait_event_interruptible_timeout(
2290 evt
->wq
, !list_empty(&evt
->events_to_see
),
2291 ((phba
->fc_ratov
* 2) + LPFC_DRVR_TIMEOUT
) * HZ
);
2293 if (list_empty(&evt
->events_to_see
))
2294 rc
= (time_left
) ? -EINTR
: -ETIMEDOUT
;
2296 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2297 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
2298 evdat
= list_entry(evt
->events_to_get
.prev
,
2299 typeof(*evdat
), node
);
2300 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2301 rx_databuf
= evdat
->data
;
2302 if (evdat
->len
!= full_size
) {
2303 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
2304 "1603 Loopback test did not receive expected "
2305 "data length. actual length 0x%x expected "
2307 evdat
->len
, full_size
);
2309 } else if (rx_databuf
== NULL
)
2313 /* skip over elx loopback header */
2314 rx_databuf
+= ELX_LOOPBACK_HEADER_SZ
;
2315 job
->reply
->reply_payload_rcv_len
=
2316 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
2317 job
->reply_payload
.sg_cnt
,
2319 job
->reply
->reply_payload_rcv_len
= size
;
2323 err_loopback_test_exit
:
2324 lpfcdiag_loop_self_unreg(phba
, rpi
);
2326 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2327 lpfc_bsg_event_unref(evt
); /* release ref */
2328 lpfc_bsg_event_unref(evt
); /* delete */
2329 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2331 if (cmdiocbq
!= NULL
)
2332 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
2334 if (rspiocbq
!= NULL
)
2335 lpfc_sli_release_iocbq(phba
, rspiocbq
);
2337 if (txbmp
!= NULL
) {
2338 if (txbpl
!= NULL
) {
2339 if (txbuffer
!= NULL
)
2340 diag_cmd_data_free(phba
, txbuffer
);
2341 lpfc_mbuf_free(phba
, txbmp
->virt
, txbmp
->phys
);
2348 /* make error code available to userspace */
2349 job
->reply
->result
= rc
;
2350 job
->dd_data
= NULL
;
2351 /* complete the job back to userspace if no error */
2358 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
2359 * @job: GET_DFC_REV fc_bsg_job
2362 lpfc_bsg_get_dfc_rev(struct fc_bsg_job
*job
)
2364 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
2365 struct lpfc_hba
*phba
= vport
->phba
;
2366 struct get_mgmt_rev
*event_req
;
2367 struct get_mgmt_rev_reply
*event_reply
;
2370 if (job
->request_len
<
2371 sizeof(struct fc_bsg_request
) + sizeof(struct get_mgmt_rev
)) {
2372 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2373 "2740 Received GET_DFC_REV request below "
2379 event_req
= (struct get_mgmt_rev
*)
2380 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
2382 event_reply
= (struct get_mgmt_rev_reply
*)
2383 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
2385 if (job
->reply_len
<
2386 sizeof(struct fc_bsg_request
) + sizeof(struct get_mgmt_rev_reply
)) {
2387 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2388 "2741 Received GET_DFC_REV reply below "
2394 event_reply
->info
.a_Major
= MANAGEMENT_MAJOR_REV
;
2395 event_reply
->info
.a_Minor
= MANAGEMENT_MINOR_REV
;
2397 job
->reply
->result
= rc
;
2404 * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
2405 * @phba: Pointer to HBA context object.
2406 * @pmboxq: Pointer to mailbox command.
2408 * This is completion handler function for mailbox commands issued from
2409 * lpfc_bsg_issue_mbox function. This function is called by the
2410 * mailbox event handler function with no lock held. This function
2411 * will wake up thread waiting on the wait queue pointed by context1
2415 lpfc_bsg_wake_mbox_wait(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
2417 struct bsg_job_data
*dd_data
;
2418 struct fc_bsg_job
*job
;
2420 unsigned long flags
;
2424 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2425 dd_data
= pmboxq
->context1
;
2426 /* job already timed out? */
2428 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2432 /* build the outgoing buffer to do an sg copy
2433 * the format is the response mailbox followed by any extended
2436 from
= (uint8_t *)&pmboxq
->u
.mb
;
2437 to
= (uint8_t *)dd_data
->context_un
.mbox
.mb
;
2438 memcpy(to
, from
, sizeof(MAILBOX_t
));
2439 if (pmboxq
->u
.mb
.mbxStatus
== MBX_SUCCESS
) {
2440 /* copy the extended data if any, count is in words */
2441 if (dd_data
->context_un
.mbox
.outExtWLen
) {
2442 from
= (uint8_t *)dd_data
->context_un
.mbox
.ext
;
2443 to
+= sizeof(MAILBOX_t
);
2444 size
= dd_data
->context_un
.mbox
.outExtWLen
*
2446 memcpy(to
, from
, size
);
2447 } else if (pmboxq
->u
.mb
.mbxCommand
== MBX_RUN_BIU_DIAG64
) {
2448 from
= (uint8_t *)dd_data
->context_un
.mbox
.
2450 to
+= sizeof(MAILBOX_t
);
2451 size
= dd_data
->context_un
.mbox
.dmp
->size
;
2452 memcpy(to
, from
, size
);
2453 } else if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
2454 (pmboxq
->u
.mb
.mbxCommand
== MBX_DUMP_MEMORY
)) {
2455 from
= (uint8_t *)dd_data
->context_un
.mbox
.dmp
->dma
.
2457 to
+= sizeof(MAILBOX_t
);
2458 size
= pmboxq
->u
.mb
.un
.varWords
[5];
2459 memcpy(to
, from
, size
);
2460 } else if ((phba
->sli_rev
== LPFC_SLI_REV4
) &&
2461 (pmboxq
->u
.mb
.mbxCommand
== MBX_SLI4_CONFIG
)) {
2462 struct lpfc_mbx_nembed_cmd
*nembed_sge
=
2463 (struct lpfc_mbx_nembed_cmd
*)
2464 &pmboxq
->u
.mb
.un
.varWords
[0];
2466 from
= (uint8_t *)dd_data
->context_un
.mbox
.dmp
->dma
.
2468 to
+= sizeof(MAILBOX_t
);
2469 size
= nembed_sge
->sge
[0].length
;
2470 memcpy(to
, from
, size
);
2471 } else if (pmboxq
->u
.mb
.mbxCommand
== MBX_READ_EVENT_LOG
) {
2472 from
= (uint8_t *)dd_data
->context_un
.
2474 to
+= sizeof(MAILBOX_t
);
2475 size
= dd_data
->context_un
.mbox
.dmp
->size
;
2476 memcpy(to
, from
, size
);
2480 from
= (uint8_t *)dd_data
->context_un
.mbox
.mb
;
2481 job
= dd_data
->context_un
.mbox
.set_job
;
2482 size
= job
->reply_payload
.payload_len
;
2483 job
->reply
->reply_payload_rcv_len
=
2484 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
2485 job
->reply_payload
.sg_cnt
,
2487 job
->reply
->result
= 0;
2489 dd_data
->context_un
.mbox
.set_job
= NULL
;
2490 job
->dd_data
= NULL
;
2492 /* need to hold the lock until we call job done to hold off
2493 * the timeout handler returning to the midlayer while
2494 * we are stillprocessing the job
2496 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2498 kfree(dd_data
->context_un
.mbox
.mb
);
2499 mempool_free(dd_data
->context_un
.mbox
.pmboxq
, phba
->mbox_mem_pool
);
2500 kfree(dd_data
->context_un
.mbox
.ext
);
2501 if (dd_data
->context_un
.mbox
.dmp
) {
2502 dma_free_coherent(&phba
->pcidev
->dev
,
2503 dd_data
->context_un
.mbox
.dmp
->size
,
2504 dd_data
->context_un
.mbox
.dmp
->dma
.virt
,
2505 dd_data
->context_un
.mbox
.dmp
->dma
.phys
);
2506 kfree(dd_data
->context_un
.mbox
.dmp
);
2508 if (dd_data
->context_un
.mbox
.rxbmp
) {
2509 lpfc_mbuf_free(phba
, dd_data
->context_un
.mbox
.rxbmp
->virt
,
2510 dd_data
->context_un
.mbox
.rxbmp
->phys
);
2511 kfree(dd_data
->context_un
.mbox
.rxbmp
);
2518 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
2519 * @phba: Pointer to HBA context object.
2520 * @mb: Pointer to a mailbox object.
2521 * @vport: Pointer to a vport object.
2523 * Some commands require the port to be offline, some may not be called from
2526 static int lpfc_bsg_check_cmd_access(struct lpfc_hba
*phba
,
2527 MAILBOX_t
*mb
, struct lpfc_vport
*vport
)
2529 /* return negative error values for bsg job */
2530 switch (mb
->mbxCommand
) {
2534 case MBX_CONFIG_LINK
:
2535 case MBX_CONFIG_RING
:
2536 case MBX_RESET_RING
:
2537 case MBX_UNREG_LOGIN
:
2539 case MBX_DUMP_CONTEXT
:
2543 if (!(vport
->fc_flag
& FC_OFFLINE_MODE
)) {
2544 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2545 "2743 Command 0x%x is illegal in on-line "
2551 case MBX_WRITE_VPARMS
:
2554 case MBX_READ_CONFIG
:
2555 case MBX_READ_RCONFIG
:
2556 case MBX_READ_STATUS
:
2559 case MBX_READ_LNK_STAT
:
2560 case MBX_DUMP_MEMORY
:
2562 case MBX_UPDATE_CFG
:
2563 case MBX_KILL_BOARD
:
2565 case MBX_LOAD_EXP_ROM
:
2567 case MBX_DEL_LD_ENTRY
:
2570 case MBX_SLI4_CONFIG
:
2571 case MBX_READ_EVENT_LOG
:
2572 case MBX_READ_EVENT_LOG_STATUS
:
2573 case MBX_WRITE_EVENT_LOG
:
2574 case MBX_PORT_CAPABILITIES
:
2575 case MBX_PORT_IOV_CONTROL
:
2576 case MBX_RUN_BIU_DIAG64
:
2578 case MBX_SET_VARIABLE
:
2579 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
2580 "1226 mbox: set_variable 0x%x, 0x%x\n",
2582 mb
->un
.varWords
[1]);
2583 if ((mb
->un
.varWords
[0] == SETVAR_MLOMNT
)
2584 && (mb
->un
.varWords
[1] == 1)) {
2585 phba
->wait_4_mlo_maint_flg
= 1;
2586 } else if (mb
->un
.varWords
[0] == SETVAR_MLORST
) {
2587 phba
->link_flag
&= ~LS_LOOPBACK_MODE
;
2588 phba
->fc_topology
= LPFC_TOPOLOGY_PT_PT
;
2591 case MBX_READ_SPARM64
:
2592 case MBX_READ_TOPOLOGY
:
2594 case MBX_REG_LOGIN64
:
2595 case MBX_CONFIG_PORT
:
2596 case MBX_RUN_BIU_DIAG
:
2598 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2599 "2742 Unknown Command 0x%x\n",
2608 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
2609 * @phba: Pointer to HBA context object.
2610 * @mb: Pointer to a mailbox object.
2611 * @vport: Pointer to a vport object.
2613 * Allocate a tracking object, mailbox command memory, get a mailbox
2614 * from the mailbox pool, copy the caller mailbox command.
2616 * If offline and the sli is active we need to poll for the command (port is
2617 * being reset) and com-plete the job, otherwise issue the mailbox command and
2618 * let our completion handler finish the command.
2621 lpfc_bsg_issue_mbox(struct lpfc_hba
*phba
, struct fc_bsg_job
*job
,
2622 struct lpfc_vport
*vport
)
2624 LPFC_MBOXQ_t
*pmboxq
= NULL
; /* internal mailbox queue */
2625 MAILBOX_t
*pmb
; /* shortcut to the pmboxq mailbox */
2626 /* a 4k buffer to hold the mb and extended data from/to the bsg */
2627 MAILBOX_t
*mb
= NULL
;
2628 struct bsg_job_data
*dd_data
= NULL
; /* bsg data tracking structure */
2630 struct lpfc_dmabuf
*rxbmp
= NULL
; /* for biu diag */
2631 struct lpfc_dmabufext
*dmp
= NULL
; /* for biu diag */
2632 struct ulp_bde64
*rxbpl
= NULL
;
2633 struct dfc_mbox_req
*mbox_req
= (struct dfc_mbox_req
*)
2634 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
2635 uint8_t *ext
= NULL
;
2639 /* in case no data is transferred */
2640 job
->reply
->reply_payload_rcv_len
= 0;
2642 /* check if requested extended data lengths are valid */
2643 if ((mbox_req
->inExtWLen
> MAILBOX_EXT_SIZE
) ||
2644 (mbox_req
->outExtWLen
> MAILBOX_EXT_SIZE
)) {
2649 /* allocate our bsg tracking structure */
2650 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
2652 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2653 "2727 Failed allocation of dd_data\n");
2658 mb
= kzalloc(BSG_MBOX_SIZE
, GFP_KERNEL
);
2664 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2669 memset(pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
2671 size
= job
->request_payload
.payload_len
;
2672 sg_copy_to_buffer(job
->request_payload
.sg_list
,
2673 job
->request_payload
.sg_cnt
,
2676 rc
= lpfc_bsg_check_cmd_access(phba
, mb
, vport
);
2678 goto job_done
; /* must be negative */
2680 pmb
= &pmboxq
->u
.mb
;
2681 memcpy(pmb
, mb
, sizeof(*pmb
));
2682 pmb
->mbxOwner
= OWN_HOST
;
2683 pmboxq
->vport
= vport
;
2685 /* If HBA encountered an error attention, allow only DUMP
2686 * or RESTART mailbox commands until the HBA is restarted.
2688 if (phba
->pport
->stopped
&&
2689 pmb
->mbxCommand
!= MBX_DUMP_MEMORY
&&
2690 pmb
->mbxCommand
!= MBX_RESTART
&&
2691 pmb
->mbxCommand
!= MBX_WRITE_VPARMS
&&
2692 pmb
->mbxCommand
!= MBX_WRITE_WWN
)
2693 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
,
2694 "2797 mbox: Issued mailbox cmd "
2695 "0x%x while in stopped state.\n",
2698 /* Don't allow mailbox commands to be sent when blocked
2699 * or when in the middle of discovery
2701 if (phba
->sli
.sli_flag
& LPFC_BLOCK_MGMT_IO
) {
2706 /* extended mailbox commands will need an extended buffer */
2707 if (mbox_req
->inExtWLen
|| mbox_req
->outExtWLen
) {
2708 ext
= kzalloc(MAILBOX_EXT_SIZE
, GFP_KERNEL
);
2714 /* any data for the device? */
2715 if (mbox_req
->inExtWLen
) {
2716 from
= (uint8_t *)mb
;
2717 from
+= sizeof(MAILBOX_t
);
2718 memcpy((uint8_t *)ext
, from
,
2719 mbox_req
->inExtWLen
* sizeof(uint32_t));
2722 pmboxq
->context2
= ext
;
2723 pmboxq
->in_ext_byte_len
=
2724 mbox_req
->inExtWLen
* sizeof(uint32_t);
2725 pmboxq
->out_ext_byte_len
=
2726 mbox_req
->outExtWLen
* sizeof(uint32_t);
2727 pmboxq
->mbox_offset_word
= mbox_req
->mbOffset
;
2730 /* biu diag will need a kernel buffer to transfer the data
2731 * allocate our own buffer and setup the mailbox command to
2734 if (pmb
->mbxCommand
== MBX_RUN_BIU_DIAG64
) {
2735 uint32_t transmit_length
= pmb
->un
.varWords
[1];
2736 uint32_t receive_length
= pmb
->un
.varWords
[4];
2737 /* transmit length cannot be greater than receive length or
2738 * mailbox extension size
2740 if ((transmit_length
> receive_length
) ||
2741 (transmit_length
> MAILBOX_EXT_SIZE
)) {
2746 rxbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2752 rxbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &rxbmp
->phys
);
2758 INIT_LIST_HEAD(&rxbmp
->list
);
2759 rxbpl
= (struct ulp_bde64
*) rxbmp
->virt
;
2760 dmp
= diag_cmd_data_alloc(phba
, rxbpl
, transmit_length
, 0);
2766 INIT_LIST_HEAD(&dmp
->dma
.list
);
2767 pmb
->un
.varBIUdiag
.un
.s2
.xmit_bde64
.addrHigh
=
2768 putPaddrHigh(dmp
->dma
.phys
);
2769 pmb
->un
.varBIUdiag
.un
.s2
.xmit_bde64
.addrLow
=
2770 putPaddrLow(dmp
->dma
.phys
);
2772 pmb
->un
.varBIUdiag
.un
.s2
.rcv_bde64
.addrHigh
=
2773 putPaddrHigh(dmp
->dma
.phys
+
2774 pmb
->un
.varBIUdiag
.un
.s2
.
2775 xmit_bde64
.tus
.f
.bdeSize
);
2776 pmb
->un
.varBIUdiag
.un
.s2
.rcv_bde64
.addrLow
=
2777 putPaddrLow(dmp
->dma
.phys
+
2778 pmb
->un
.varBIUdiag
.un
.s2
.
2779 xmit_bde64
.tus
.f
.bdeSize
);
2781 /* copy the transmit data found in the mailbox extension area */
2782 from
= (uint8_t *)mb
;
2783 from
+= sizeof(MAILBOX_t
);
2784 memcpy((uint8_t *)dmp
->dma
.virt
, from
, transmit_length
);
2785 } else if (pmb
->mbxCommand
== MBX_READ_EVENT_LOG
) {
2786 struct READ_EVENT_LOG_VAR
*rdEventLog
=
2787 &pmb
->un
.varRdEventLog
;
2788 uint32_t receive_length
= rdEventLog
->rcv_bde64
.tus
.f
.bdeSize
;
2789 uint32_t mode
= bf_get(lpfc_event_log
, rdEventLog
);
2791 /* receive length cannot be greater than mailbox
2794 if (receive_length
> MAILBOX_EXT_SIZE
) {
2799 /* mode zero uses a bde like biu diags command */
2802 /* rebuild the command for sli4 using our own buffers
2803 * like we do for biu diags
2806 rxbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2812 rxbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &rxbmp
->phys
);
2813 rxbpl
= (struct ulp_bde64
*) rxbmp
->virt
;
2815 INIT_LIST_HEAD(&rxbmp
->list
);
2816 dmp
= diag_cmd_data_alloc(phba
, rxbpl
,
2825 INIT_LIST_HEAD(&dmp
->dma
.list
);
2826 pmb
->un
.varWords
[3] = putPaddrLow(dmp
->dma
.phys
);
2827 pmb
->un
.varWords
[4] = putPaddrHigh(dmp
->dma
.phys
);
2829 } else if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2830 if (pmb
->mbxCommand
== MBX_DUMP_MEMORY
) {
2831 /* rebuild the command for sli4 using our own buffers
2832 * like we do for biu diags
2834 uint32_t receive_length
= pmb
->un
.varWords
[2];
2835 /* receive length cannot be greater than mailbox
2838 if ((receive_length
== 0) ||
2839 (receive_length
> MAILBOX_EXT_SIZE
)) {
2844 rxbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2850 rxbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &rxbmp
->phys
);
2856 INIT_LIST_HEAD(&rxbmp
->list
);
2857 rxbpl
= (struct ulp_bde64
*) rxbmp
->virt
;
2858 dmp
= diag_cmd_data_alloc(phba
, rxbpl
, receive_length
,
2865 INIT_LIST_HEAD(&dmp
->dma
.list
);
2866 pmb
->un
.varWords
[3] = putPaddrLow(dmp
->dma
.phys
);
2867 pmb
->un
.varWords
[4] = putPaddrHigh(dmp
->dma
.phys
);
2868 } else if ((pmb
->mbxCommand
== MBX_UPDATE_CFG
) &&
2869 pmb
->un
.varUpdateCfg
.co
) {
2870 struct ulp_bde64
*bde
=
2871 (struct ulp_bde64
*)&pmb
->un
.varWords
[4];
2873 /* bde size cannot be greater than mailbox ext size */
2874 if (bde
->tus
.f
.bdeSize
> MAILBOX_EXT_SIZE
) {
2879 rxbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2885 rxbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &rxbmp
->phys
);
2891 INIT_LIST_HEAD(&rxbmp
->list
);
2892 rxbpl
= (struct ulp_bde64
*) rxbmp
->virt
;
2893 dmp
= diag_cmd_data_alloc(phba
, rxbpl
,
2894 bde
->tus
.f
.bdeSize
, 0);
2900 INIT_LIST_HEAD(&dmp
->dma
.list
);
2901 bde
->addrHigh
= putPaddrHigh(dmp
->dma
.phys
);
2902 bde
->addrLow
= putPaddrLow(dmp
->dma
.phys
);
2904 /* copy the transmit data found in the mailbox
2907 from
= (uint8_t *)mb
;
2908 from
+= sizeof(MAILBOX_t
);
2909 memcpy((uint8_t *)dmp
->dma
.virt
, from
,
2910 bde
->tus
.f
.bdeSize
);
2911 } else if (pmb
->mbxCommand
== MBX_SLI4_CONFIG
) {
2912 struct lpfc_mbx_nembed_cmd
*nembed_sge
;
2913 struct mbox_header
*header
;
2914 uint32_t receive_length
;
2916 /* rebuild the command for sli4 using our own buffers
2917 * like we do for biu diags
2919 header
= (struct mbox_header
*)&pmb
->un
.varWords
[0];
2920 nembed_sge
= (struct lpfc_mbx_nembed_cmd
*)
2921 &pmb
->un
.varWords
[0];
2922 receive_length
= nembed_sge
->sge
[0].length
;
2924 /* receive length cannot be greater than mailbox
2927 if ((receive_length
== 0) ||
2928 (receive_length
> MAILBOX_EXT_SIZE
)) {
2933 rxbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2939 rxbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &rxbmp
->phys
);
2945 INIT_LIST_HEAD(&rxbmp
->list
);
2946 rxbpl
= (struct ulp_bde64
*) rxbmp
->virt
;
2947 dmp
= diag_cmd_data_alloc(phba
, rxbpl
, receive_length
,
2954 INIT_LIST_HEAD(&dmp
->dma
.list
);
2955 nembed_sge
->sge
[0].pa_hi
= putPaddrHigh(dmp
->dma
.phys
);
2956 nembed_sge
->sge
[0].pa_lo
= putPaddrLow(dmp
->dma
.phys
);
2957 /* copy the transmit data found in the mailbox
2960 from
= (uint8_t *)mb
;
2961 from
+= sizeof(MAILBOX_t
);
2962 memcpy((uint8_t *)dmp
->dma
.virt
, from
,
2963 header
->cfg_mhdr
.payload_length
);
2967 dd_data
->context_un
.mbox
.rxbmp
= rxbmp
;
2968 dd_data
->context_un
.mbox
.dmp
= dmp
;
2970 /* setup wake call as IOCB callback */
2971 pmboxq
->mbox_cmpl
= lpfc_bsg_wake_mbox_wait
;
2973 /* setup context field to pass wait_queue pointer to wake function */
2974 pmboxq
->context1
= dd_data
;
2975 dd_data
->type
= TYPE_MBOX
;
2976 dd_data
->context_un
.mbox
.pmboxq
= pmboxq
;
2977 dd_data
->context_un
.mbox
.mb
= mb
;
2978 dd_data
->context_un
.mbox
.set_job
= job
;
2979 dd_data
->context_un
.mbox
.ext
= ext
;
2980 dd_data
->context_un
.mbox
.mbOffset
= mbox_req
->mbOffset
;
2981 dd_data
->context_un
.mbox
.inExtWLen
= mbox_req
->inExtWLen
;
2982 dd_data
->context_un
.mbox
.outExtWLen
= mbox_req
->outExtWLen
;
2983 job
->dd_data
= dd_data
;
2985 if ((vport
->fc_flag
& FC_OFFLINE_MODE
) ||
2986 (!(phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
))) {
2987 rc
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_POLL
);
2988 if (rc
!= MBX_SUCCESS
) {
2989 rc
= (rc
== MBX_TIMEOUT
) ? -ETIME
: -ENODEV
;
2993 /* job finished, copy the data */
2994 memcpy(mb
, pmb
, sizeof(*pmb
));
2995 job
->reply
->reply_payload_rcv_len
=
2996 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
2997 job
->reply_payload
.sg_cnt
,
2999 /* not waiting mbox already done */
3004 rc
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
3005 if ((rc
== MBX_SUCCESS
) || (rc
== MBX_BUSY
))
3006 return 1; /* job started */
3009 /* common exit for error or job completed inline */
3012 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
3015 dma_free_coherent(&phba
->pcidev
->dev
,
3016 dmp
->size
, dmp
->dma
.virt
,
3021 lpfc_mbuf_free(phba
, rxbmp
->virt
, rxbmp
->phys
);
3030 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
3031 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
3034 lpfc_bsg_mbox_cmd(struct fc_bsg_job
*job
)
3036 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
3037 struct lpfc_hba
*phba
= vport
->phba
;
3040 /* in case no data is transferred */
3041 job
->reply
->reply_payload_rcv_len
= 0;
3042 if (job
->request_len
<
3043 sizeof(struct fc_bsg_request
) + sizeof(struct dfc_mbox_req
)) {
3044 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3045 "2737 Received MBOX_REQ request below "
3051 if (job
->request_payload
.payload_len
!= BSG_MBOX_SIZE
) {
3056 if (job
->reply_payload
.payload_len
!= BSG_MBOX_SIZE
) {
3061 if (phba
->sli
.sli_flag
& LPFC_BLOCK_MGMT_IO
) {
3066 rc
= lpfc_bsg_issue_mbox(phba
, job
, vport
);
3071 job
->reply
->result
= 0;
3072 job
->dd_data
= NULL
;
3075 /* job submitted, will complete later*/
3076 rc
= 0; /* return zero, no error */
3078 /* some error occurred */
3079 job
->reply
->result
= rc
;
3080 job
->dd_data
= NULL
;
3087 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
3088 * @phba: Pointer to HBA context object.
3089 * @cmdiocbq: Pointer to command iocb.
3090 * @rspiocbq: Pointer to response iocb.
3092 * This function is the completion handler for iocbs issued using
3093 * lpfc_menlo_cmd function. This function is called by the
3094 * ring event handler function without any lock held. This function
3095 * can be called from both worker thread context and interrupt
3096 * context. This function also can be called from another thread which
3097 * cleans up the SLI layer objects.
3098 * This function copies the contents of the response iocb to the
3099 * response iocb memory object provided by the caller of
3100 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
3101 * sleeps for the iocb completion.
3104 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba
*phba
,
3105 struct lpfc_iocbq
*cmdiocbq
,
3106 struct lpfc_iocbq
*rspiocbq
)
3108 struct bsg_job_data
*dd_data
;
3109 struct fc_bsg_job
*job
;
3111 struct lpfc_dmabuf
*bmp
;
3112 struct lpfc_bsg_menlo
*menlo
;
3113 unsigned long flags
;
3114 struct menlo_response
*menlo_resp
;
3117 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
3118 dd_data
= cmdiocbq
->context1
;
3120 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3124 menlo
= &dd_data
->context_un
.menlo
;
3125 job
= menlo
->set_job
;
3126 job
->dd_data
= NULL
; /* so timeout handler does not reply */
3128 spin_lock(&phba
->hbalock
);
3129 cmdiocbq
->iocb_flag
|= LPFC_IO_WAKE
;
3130 if (cmdiocbq
->context2
&& rspiocbq
)
3131 memcpy(&((struct lpfc_iocbq
*)cmdiocbq
->context2
)->iocb
,
3132 &rspiocbq
->iocb
, sizeof(IOCB_t
));
3133 spin_unlock(&phba
->hbalock
);
3136 rspiocbq
= menlo
->rspiocbq
;
3137 rsp
= &rspiocbq
->iocb
;
3139 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
3140 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
3141 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
3142 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
3144 /* always return the xri, this would be used in the case
3145 * of a menlo download to allow the data to be sent as a continuation
3148 menlo_resp
= (struct menlo_response
*)
3149 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
3150 menlo_resp
->xri
= rsp
->ulpContext
;
3151 if (rsp
->ulpStatus
) {
3152 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
3153 switch (rsp
->un
.ulpWord
[4] & 0xff) {
3154 case IOERR_SEQUENCE_TIMEOUT
:
3157 case IOERR_INVALID_RPI
:
3167 job
->reply
->reply_payload_rcv_len
=
3168 rsp
->un
.genreq64
.bdl
.bdeSize
;
3170 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
3171 lpfc_sli_release_iocbq(phba
, rspiocbq
);
3172 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
3175 /* make error code available to userspace */
3176 job
->reply
->result
= rc
;
3177 /* complete the job back to userspace */
3179 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3184 * lpfc_menlo_cmd - send an ioctl for menlo hardware
3185 * @job: fc_bsg_job to handle
3187 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
3188 * all the command completions will return the xri for the command.
3189 * For menlo data requests a gen request 64 CX is used to continue the exchange
3190 * supplied in the menlo request header xri field.
3193 lpfc_menlo_cmd(struct fc_bsg_job
*job
)
3195 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
3196 struct lpfc_hba
*phba
= vport
->phba
;
3197 struct lpfc_iocbq
*cmdiocbq
, *rspiocbq
;
3200 struct menlo_command
*menlo_cmd
;
3201 struct menlo_response
*menlo_resp
;
3202 struct lpfc_dmabuf
*bmp
= NULL
;
3205 struct scatterlist
*sgel
= NULL
;
3208 struct bsg_job_data
*dd_data
;
3209 struct ulp_bde64
*bpl
= NULL
;
3211 /* in case no data is returned return just the return code */
3212 job
->reply
->reply_payload_rcv_len
= 0;
3214 if (job
->request_len
<
3215 sizeof(struct fc_bsg_request
) +
3216 sizeof(struct menlo_command
)) {
3217 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3218 "2784 Received MENLO_CMD request below "
3224 if (job
->reply_len
<
3225 sizeof(struct fc_bsg_request
) + sizeof(struct menlo_response
)) {
3226 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3227 "2785 Received MENLO_CMD reply below "
3233 if (!(phba
->menlo_flag
& HBA_MENLO_SUPPORT
)) {
3234 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3235 "2786 Adapter does not support menlo "
3241 menlo_cmd
= (struct menlo_command
*)
3242 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
3244 menlo_resp
= (struct menlo_response
*)
3245 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
3247 /* allocate our bsg tracking structure */
3248 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
3250 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
3251 "2787 Failed allocation of dd_data\n");
3256 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
3262 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
3268 rspiocbq
= lpfc_sli_get_iocbq(phba
);
3274 rsp
= &rspiocbq
->iocb
;
3276 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
3282 INIT_LIST_HEAD(&bmp
->list
);
3283 bpl
= (struct ulp_bde64
*) bmp
->virt
;
3284 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
3285 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
3286 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
3287 busaddr
= sg_dma_address(sgel
);
3288 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
3289 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
3290 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
3291 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
3292 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
3296 reply_nseg
= pci_map_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
3297 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
3298 for_each_sg(job
->reply_payload
.sg_list
, sgel
, reply_nseg
, numbde
) {
3299 busaddr
= sg_dma_address(sgel
);
3300 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
3301 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
3302 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
3303 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
3304 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
3308 cmd
= &cmdiocbq
->iocb
;
3309 cmd
->un
.genreq64
.bdl
.ulpIoTag32
= 0;
3310 cmd
->un
.genreq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
3311 cmd
->un
.genreq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
3312 cmd
->un
.genreq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
3313 cmd
->un
.genreq64
.bdl
.bdeSize
=
3314 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
3315 cmd
->un
.genreq64
.w5
.hcsw
.Fctl
= (SI
| LA
);
3316 cmd
->un
.genreq64
.w5
.hcsw
.Dfctl
= 0;
3317 cmd
->un
.genreq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CMD
;
3318 cmd
->un
.genreq64
.w5
.hcsw
.Type
= MENLO_TRANSPORT_TYPE
; /* 0xfe */
3319 cmd
->ulpBdeCount
= 1;
3320 cmd
->ulpClass
= CLASS3
;
3321 cmd
->ulpOwner
= OWN_CHIP
;
3322 cmd
->ulpLe
= 1; /* Limited Edition */
3323 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
3324 cmdiocbq
->vport
= phba
->pport
;
3325 /* We want the firmware to timeout before we do */
3326 cmd
->ulpTimeout
= MENLO_TIMEOUT
- 5;
3327 cmdiocbq
->context3
= bmp
;
3328 cmdiocbq
->context2
= rspiocbq
;
3329 cmdiocbq
->iocb_cmpl
= lpfc_bsg_menlo_cmd_cmp
;
3330 cmdiocbq
->context1
= dd_data
;
3331 cmdiocbq
->context2
= rspiocbq
;
3332 if (menlo_cmd
->cmd
== LPFC_BSG_VENDOR_MENLO_CMD
) {
3333 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CR
;
3334 cmd
->ulpPU
= MENLO_PU
; /* 3 */
3335 cmd
->un
.ulpWord
[4] = MENLO_DID
; /* 0x0000FC0E */
3336 cmd
->ulpContext
= MENLO_CONTEXT
; /* 0 */
3338 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CX
;
3340 cmd
->un
.ulpWord
[4] = 0;
3341 cmd
->ulpContext
= menlo_cmd
->xri
;
3344 dd_data
->type
= TYPE_MENLO
;
3345 dd_data
->context_un
.menlo
.cmdiocbq
= cmdiocbq
;
3346 dd_data
->context_un
.menlo
.rspiocbq
= rspiocbq
;
3347 dd_data
->context_un
.menlo
.set_job
= job
;
3348 dd_data
->context_un
.menlo
.bmp
= bmp
;
3350 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
,
3352 if (rc
== IOCB_SUCCESS
)
3353 return 0; /* done for now */
3355 /* iocb failed so cleanup */
3356 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
3357 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
3358 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
3359 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
3361 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
3364 lpfc_sli_release_iocbq(phba
, rspiocbq
);
3366 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
3372 /* make error code available to userspace */
3373 job
->reply
->result
= rc
;
3374 job
->dd_data
= NULL
;
3378 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
3379 * @job: fc_bsg_job to handle
3382 lpfc_bsg_hst_vendor(struct fc_bsg_job
*job
)
3384 int command
= job
->request
->rqst_data
.h_vendor
.vendor_cmd
[0];
3388 case LPFC_BSG_VENDOR_SET_CT_EVENT
:
3389 rc
= lpfc_bsg_hba_set_event(job
);
3391 case LPFC_BSG_VENDOR_GET_CT_EVENT
:
3392 rc
= lpfc_bsg_hba_get_event(job
);
3394 case LPFC_BSG_VENDOR_SEND_MGMT_RESP
:
3395 rc
= lpfc_bsg_send_mgmt_rsp(job
);
3397 case LPFC_BSG_VENDOR_DIAG_MODE
:
3398 rc
= lpfc_bsg_diag_mode(job
);
3400 case LPFC_BSG_VENDOR_DIAG_TEST
:
3401 rc
= lpfc_bsg_diag_test(job
);
3403 case LPFC_BSG_VENDOR_GET_MGMT_REV
:
3404 rc
= lpfc_bsg_get_dfc_rev(job
);
3406 case LPFC_BSG_VENDOR_MBOX
:
3407 rc
= lpfc_bsg_mbox_cmd(job
);
3409 case LPFC_BSG_VENDOR_MENLO_CMD
:
3410 case LPFC_BSG_VENDOR_MENLO_DATA
:
3411 rc
= lpfc_menlo_cmd(job
);
3415 job
->reply
->reply_payload_rcv_len
= 0;
3416 /* make error code available to userspace */
3417 job
->reply
->result
= rc
;
3425 * lpfc_bsg_request - handle a bsg request from the FC transport
3426 * @job: fc_bsg_job to handle
3429 lpfc_bsg_request(struct fc_bsg_job
*job
)
3434 msgcode
= job
->request
->msgcode
;
3436 case FC_BSG_HST_VENDOR
:
3437 rc
= lpfc_bsg_hst_vendor(job
);
3439 case FC_BSG_RPT_ELS
:
3440 rc
= lpfc_bsg_rport_els(job
);
3443 rc
= lpfc_bsg_send_mgmt_cmd(job
);
3447 job
->reply
->reply_payload_rcv_len
= 0;
3448 /* make error code available to userspace */
3449 job
->reply
->result
= rc
;
3457 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
3458 * @job: fc_bsg_job that has timed out
3460 * This function just aborts the job's IOCB. The aborted IOCB will return to
3461 * the waiting function which will handle passing the error back to userspace
3464 lpfc_bsg_timeout(struct fc_bsg_job
*job
)
3466 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
3467 struct lpfc_hba
*phba
= vport
->phba
;
3468 struct lpfc_iocbq
*cmdiocb
;
3469 struct lpfc_bsg_event
*evt
;
3470 struct lpfc_bsg_iocb
*iocb
;
3471 struct lpfc_bsg_mbox
*mbox
;
3472 struct lpfc_bsg_menlo
*menlo
;
3473 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
3474 struct bsg_job_data
*dd_data
;
3475 unsigned long flags
;
3477 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
3478 dd_data
= (struct bsg_job_data
*)job
->dd_data
;
3479 /* timeout and completion crossed paths if no dd_data */
3481 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3485 switch (dd_data
->type
) {
3487 iocb
= &dd_data
->context_un
.iocb
;
3488 cmdiocb
= iocb
->cmdiocbq
;
3489 /* hint to completion handler that the job timed out */
3490 job
->reply
->result
= -EAGAIN
;
3491 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3492 /* this will call our completion handler */
3493 spin_lock_irq(&phba
->hbalock
);
3494 lpfc_sli_issue_abort_iotag(phba
, pring
, cmdiocb
);
3495 spin_unlock_irq(&phba
->hbalock
);
3498 evt
= dd_data
->context_un
.evt
;
3499 /* this event has no job anymore */
3500 evt
->set_job
= NULL
;
3501 job
->dd_data
= NULL
;
3502 job
->reply
->reply_payload_rcv_len
= 0;
3503 /* Return -EAGAIN which is our way of signallying the
3506 job
->reply
->result
= -EAGAIN
;
3507 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3511 mbox
= &dd_data
->context_un
.mbox
;
3512 /* this mbox has no job anymore */
3513 mbox
->set_job
= NULL
;
3514 job
->dd_data
= NULL
;
3515 job
->reply
->reply_payload_rcv_len
= 0;
3516 job
->reply
->result
= -EAGAIN
;
3517 /* the mbox completion handler can now be run */
3518 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3522 menlo
= &dd_data
->context_un
.menlo
;
3523 cmdiocb
= menlo
->cmdiocbq
;
3524 /* hint to completion handler that the job timed out */
3525 job
->reply
->result
= -EAGAIN
;
3526 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3527 /* this will call our completion handler */
3528 spin_lock_irq(&phba
->hbalock
);
3529 lpfc_sli_issue_abort_iotag(phba
, pring
, cmdiocb
);
3530 spin_unlock_irq(&phba
->hbalock
);
3533 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3537 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
3538 * otherwise an error message will be displayed on the console
3539 * so always return success (zero)