1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2010 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_transport_fc.h>
30 #include <scsi/scsi_bsg_fc.h>
31 #include <scsi/fc/fc_fs.h>
36 #include "lpfc_sli4.h"
39 #include "lpfc_disc.h"
40 #include "lpfc_scsi.h"
42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h"
44 #include "lpfc_vport.h"
45 #include "lpfc_version.h"
47 struct lpfc_bsg_event
{
48 struct list_head node
;
52 /* Event type and waiter identifiers */
57 /* next two flags are here for the auto-delete logic */
58 unsigned long wait_time_stamp
;
61 /* seen and not seen events */
62 struct list_head events_to_get
;
63 struct list_head events_to_see
;
65 /* job waiting for this event to finish */
66 struct fc_bsg_job
*set_job
;
69 struct lpfc_bsg_iocb
{
70 struct lpfc_iocbq
*cmdiocbq
;
71 struct lpfc_iocbq
*rspiocbq
;
72 struct lpfc_dmabuf
*bmp
;
73 struct lpfc_nodelist
*ndlp
;
75 /* job waiting for this iocb to finish */
76 struct fc_bsg_job
*set_job
;
79 struct lpfc_bsg_mbox
{
83 /* job waiting for this mbox command to finish */
84 struct fc_bsg_job
*set_job
;
87 #define MENLO_DID 0x0000FC0E
89 struct lpfc_bsg_menlo
{
90 struct lpfc_iocbq
*cmdiocbq
;
91 struct lpfc_iocbq
*rspiocbq
;
92 struct lpfc_dmabuf
*bmp
;
94 /* job waiting for this iocb to finish */
95 struct fc_bsg_job
*set_job
;
102 struct bsg_job_data
{
105 struct lpfc_bsg_event
*evt
;
106 struct lpfc_bsg_iocb iocb
;
107 struct lpfc_bsg_mbox mbox
;
108 struct lpfc_bsg_menlo menlo
;
113 struct list_head node
;
120 #define BUF_SZ_4K 4096
121 #define SLI_CT_ELX_LOOPBACK 0x10
123 enum ELX_LOOPBACK_CMD
{
124 ELX_LOOPBACK_XRI_SETUP
,
128 #define ELX_LOOPBACK_HEADER_SZ \
129 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
131 struct lpfc_dmabufext
{
132 struct lpfc_dmabuf dma
;
138 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
139 * @phba: Pointer to HBA context object.
140 * @cmdiocbq: Pointer to command iocb.
141 * @rspiocbq: Pointer to response iocb.
143 * This function is the completion handler for iocbs issued using
144 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
145 * ring event handler function without any lock held. This function
146 * can be called from both worker thread context and interrupt
147 * context. This function also can be called from another thread which
148 * cleans up the SLI layer objects.
149 * This function copies the contents of the response iocb to the
150 * response iocb memory object provided by the caller of
151 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
152 * sleeps for the iocb completion.
155 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba
*phba
,
156 struct lpfc_iocbq
*cmdiocbq
,
157 struct lpfc_iocbq
*rspiocbq
)
159 unsigned long iflags
;
160 struct bsg_job_data
*dd_data
;
161 struct fc_bsg_job
*job
;
163 struct lpfc_dmabuf
*bmp
;
164 struct lpfc_nodelist
*ndlp
;
165 struct lpfc_bsg_iocb
*iocb
;
169 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
170 dd_data
= cmdiocbq
->context1
;
172 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
176 iocb
= &dd_data
->context_un
.iocb
;
178 job
->dd_data
= NULL
; /* so timeout handler does not reply */
180 spin_lock_irqsave(&phba
->hbalock
, iflags
);
181 cmdiocbq
->iocb_flag
|= LPFC_IO_WAKE
;
182 if (cmdiocbq
->context2
&& rspiocbq
)
183 memcpy(&((struct lpfc_iocbq
*)cmdiocbq
->context2
)->iocb
,
184 &rspiocbq
->iocb
, sizeof(IOCB_t
));
185 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
188 rspiocbq
= iocb
->rspiocbq
;
189 rsp
= &rspiocbq
->iocb
;
192 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
193 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
194 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
195 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
197 if (rsp
->ulpStatus
) {
198 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
199 switch (rsp
->un
.ulpWord
[4] & 0xff) {
200 case IOERR_SEQUENCE_TIMEOUT
:
203 case IOERR_INVALID_RPI
:
213 job
->reply
->reply_payload_rcv_len
=
214 rsp
->un
.genreq64
.bdl
.bdeSize
;
216 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
217 lpfc_sli_release_iocbq(phba
, rspiocbq
);
218 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
222 /* make error code available to userspace */
223 job
->reply
->result
= rc
;
224 /* complete the job back to userspace */
226 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
231 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
232 * @job: fc_bsg_job to handle
235 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job
*job
)
237 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
238 struct lpfc_hba
*phba
= vport
->phba
;
239 struct lpfc_rport_data
*rdata
= job
->rport
->dd_data
;
240 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
241 struct ulp_bde64
*bpl
= NULL
;
243 struct lpfc_iocbq
*cmdiocbq
= NULL
;
244 struct lpfc_iocbq
*rspiocbq
= NULL
;
247 struct lpfc_dmabuf
*bmp
= NULL
;
250 struct scatterlist
*sgel
= NULL
;
253 struct bsg_job_data
*dd_data
;
257 /* in case no data is transferred */
258 job
->reply
->reply_payload_rcv_len
= 0;
260 /* allocate our bsg tracking structure */
261 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
263 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
264 "2733 Failed allocation of dd_data\n");
269 if (!lpfc_nlp_get(ndlp
)) {
274 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
280 if (ndlp
->nlp_flag
& NLP_ELS_SND_MASK
) {
285 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
291 cmd
= &cmdiocbq
->iocb
;
292 rspiocbq
= lpfc_sli_get_iocbq(phba
);
298 rsp
= &rspiocbq
->iocb
;
299 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
305 INIT_LIST_HEAD(&bmp
->list
);
306 bpl
= (struct ulp_bde64
*) bmp
->virt
;
307 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
308 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
309 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
310 busaddr
= sg_dma_address(sgel
);
311 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
312 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
313 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
314 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
315 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
319 reply_nseg
= pci_map_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
320 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
321 for_each_sg(job
->reply_payload
.sg_list
, sgel
, reply_nseg
, numbde
) {
322 busaddr
= sg_dma_address(sgel
);
323 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
324 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
325 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
326 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
327 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
331 cmd
->un
.genreq64
.bdl
.ulpIoTag32
= 0;
332 cmd
->un
.genreq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
333 cmd
->un
.genreq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
334 cmd
->un
.genreq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
335 cmd
->un
.genreq64
.bdl
.bdeSize
=
336 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
337 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CR
;
338 cmd
->un
.genreq64
.w5
.hcsw
.Fctl
= (SI
| LA
);
339 cmd
->un
.genreq64
.w5
.hcsw
.Dfctl
= 0;
340 cmd
->un
.genreq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CTL
;
341 cmd
->un
.genreq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
342 cmd
->ulpBdeCount
= 1;
344 cmd
->ulpClass
= CLASS3
;
345 cmd
->ulpContext
= ndlp
->nlp_rpi
;
346 cmd
->ulpOwner
= OWN_CHIP
;
347 cmdiocbq
->vport
= phba
->pport
;
348 cmdiocbq
->context3
= bmp
;
349 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
350 timeout
= phba
->fc_ratov
* 2;
351 cmd
->ulpTimeout
= timeout
;
353 cmdiocbq
->iocb_cmpl
= lpfc_bsg_send_mgmt_cmd_cmp
;
354 cmdiocbq
->context1
= dd_data
;
355 cmdiocbq
->context2
= rspiocbq
;
356 dd_data
->type
= TYPE_IOCB
;
357 dd_data
->context_un
.iocb
.cmdiocbq
= cmdiocbq
;
358 dd_data
->context_un
.iocb
.rspiocbq
= rspiocbq
;
359 dd_data
->context_un
.iocb
.set_job
= job
;
360 dd_data
->context_un
.iocb
.bmp
= bmp
;
361 dd_data
->context_un
.iocb
.ndlp
= ndlp
;
363 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
364 creg_val
= readl(phba
->HCregaddr
);
365 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
366 writel(creg_val
, phba
->HCregaddr
);
367 readl(phba
->HCregaddr
); /* flush */
370 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
, 0);
372 if (rc
== IOCB_SUCCESS
)
373 return 0; /* done for now */
375 /* iocb failed so cleanup */
376 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
377 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
378 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
379 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
381 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
384 lpfc_sli_release_iocbq(phba
, rspiocbq
);
386 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
394 /* make error code available to userspace */
395 job
->reply
->result
= rc
;
401 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
402 * @phba: Pointer to HBA context object.
403 * @cmdiocbq: Pointer to command iocb.
404 * @rspiocbq: Pointer to response iocb.
406 * This function is the completion handler for iocbs issued using
407 * lpfc_bsg_rport_els_cmp function. This function is called by the
408 * ring event handler function without any lock held. This function
409 * can be called from both worker thread context and interrupt
410 * context. This function also can be called from other thread which
411 * cleans up the SLI layer objects.
412 * This function copies the contents of the response iocb to the
413 * response iocb memory object provided by the caller of
414 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
415 * sleeps for the iocb completion.
418 lpfc_bsg_rport_els_cmp(struct lpfc_hba
*phba
,
419 struct lpfc_iocbq
*cmdiocbq
,
420 struct lpfc_iocbq
*rspiocbq
)
422 struct bsg_job_data
*dd_data
;
423 struct fc_bsg_job
*job
;
425 struct lpfc_nodelist
*ndlp
;
426 struct lpfc_dmabuf
*pbuflist
= NULL
;
427 struct fc_bsg_ctels_reply
*els_reply
;
432 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
433 dd_data
= cmdiocbq
->context1
;
434 /* normal completion and timeout crossed paths, already done */
436 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
440 cmdiocbq
->iocb_flag
|= LPFC_IO_WAKE
;
441 if (cmdiocbq
->context2
&& rspiocbq
)
442 memcpy(&((struct lpfc_iocbq
*)cmdiocbq
->context2
)->iocb
,
443 &rspiocbq
->iocb
, sizeof(IOCB_t
));
445 job
= dd_data
->context_un
.iocb
.set_job
;
446 cmdiocbq
= dd_data
->context_un
.iocb
.cmdiocbq
;
447 rspiocbq
= dd_data
->context_un
.iocb
.rspiocbq
;
448 rsp
= &rspiocbq
->iocb
;
449 ndlp
= dd_data
->context_un
.iocb
.ndlp
;
451 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
452 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
453 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
454 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
456 if (job
->reply
->result
== -EAGAIN
)
458 else if (rsp
->ulpStatus
== IOSTAT_SUCCESS
)
459 job
->reply
->reply_payload_rcv_len
=
460 rsp
->un
.elsreq64
.bdl
.bdeSize
;
461 else if (rsp
->ulpStatus
== IOSTAT_LS_RJT
) {
462 job
->reply
->reply_payload_rcv_len
=
463 sizeof(struct fc_bsg_ctels_reply
);
464 /* LS_RJT data returned in word 4 */
465 rjt_data
= (uint8_t *)&rsp
->un
.ulpWord
[4];
466 els_reply
= &job
->reply
->reply_data
.ctels_reply
;
467 els_reply
->status
= FC_CTELS_STATUS_REJECT
;
468 els_reply
->rjt_data
.action
= rjt_data
[3];
469 els_reply
->rjt_data
.reason_code
= rjt_data
[2];
470 els_reply
->rjt_data
.reason_explanation
= rjt_data
[1];
471 els_reply
->rjt_data
.vendor_unique
= rjt_data
[0];
475 pbuflist
= (struct lpfc_dmabuf
*) cmdiocbq
->context3
;
476 lpfc_mbuf_free(phba
, pbuflist
->virt
, pbuflist
->phys
);
477 lpfc_sli_release_iocbq(phba
, rspiocbq
);
478 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
481 /* make error code available to userspace */
482 job
->reply
->result
= rc
;
484 /* complete the job back to userspace */
486 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
491 * lpfc_bsg_rport_els - send an ELS command from a bsg request
492 * @job: fc_bsg_job to handle
495 lpfc_bsg_rport_els(struct fc_bsg_job
*job
)
497 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
498 struct lpfc_hba
*phba
= vport
->phba
;
499 struct lpfc_rport_data
*rdata
= job
->rport
->dd_data
;
500 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
504 struct lpfc_iocbq
*rspiocbq
;
505 struct lpfc_iocbq
*cmdiocbq
;
508 struct lpfc_dmabuf
*pcmd
;
509 struct lpfc_dmabuf
*prsp
;
510 struct lpfc_dmabuf
*pbuflist
= NULL
;
511 struct ulp_bde64
*bpl
;
514 struct scatterlist
*sgel
= NULL
;
517 struct bsg_job_data
*dd_data
;
521 /* in case no data is transferred */
522 job
->reply
->reply_payload_rcv_len
= 0;
524 /* allocate our bsg tracking structure */
525 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
527 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
528 "2735 Failed allocation of dd_data\n");
533 if (!lpfc_nlp_get(ndlp
)) {
538 elscmd
= job
->request
->rqst_data
.r_els
.els_code
;
539 cmdsize
= job
->request_payload
.payload_len
;
540 rspsize
= job
->reply_payload
.payload_len
;
541 rspiocbq
= lpfc_sli_get_iocbq(phba
);
548 rsp
= &rspiocbq
->iocb
;
551 cmdiocbq
= lpfc_prep_els_iocb(vport
, 1, cmdsize
, 0, ndlp
,
552 ndlp
->nlp_DID
, elscmd
);
558 /* prep els iocb set context1 to the ndlp, context2 to the command
559 * dmabuf, context3 holds the data dmabuf
561 pcmd
= (struct lpfc_dmabuf
*) cmdiocbq
->context2
;
562 prsp
= (struct lpfc_dmabuf
*) pcmd
->list
.next
;
563 lpfc_mbuf_free(phba
, pcmd
->virt
, pcmd
->phys
);
565 lpfc_mbuf_free(phba
, prsp
->virt
, prsp
->phys
);
567 cmdiocbq
->context2
= NULL
;
569 pbuflist
= (struct lpfc_dmabuf
*) cmdiocbq
->context3
;
570 bpl
= (struct ulp_bde64
*) pbuflist
->virt
;
572 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
573 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
574 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
575 busaddr
= sg_dma_address(sgel
);
576 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
577 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
578 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
579 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
580 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
584 reply_nseg
= pci_map_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
585 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
586 for_each_sg(job
->reply_payload
.sg_list
, sgel
, reply_nseg
, numbde
) {
587 busaddr
= sg_dma_address(sgel
);
588 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
589 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
590 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
591 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
592 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
595 cmdiocbq
->iocb
.un
.elsreq64
.bdl
.bdeSize
=
596 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
597 cmdiocbq
->iocb
.ulpContext
= rpi
;
598 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
599 cmdiocbq
->context1
= NULL
;
600 cmdiocbq
->context2
= NULL
;
602 cmdiocbq
->iocb_cmpl
= lpfc_bsg_rport_els_cmp
;
603 cmdiocbq
->context1
= dd_data
;
604 cmdiocbq
->context2
= rspiocbq
;
605 dd_data
->type
= TYPE_IOCB
;
606 dd_data
->context_un
.iocb
.cmdiocbq
= cmdiocbq
;
607 dd_data
->context_un
.iocb
.rspiocbq
= rspiocbq
;
608 dd_data
->context_un
.iocb
.set_job
= job
;
609 dd_data
->context_un
.iocb
.bmp
= NULL
;;
610 dd_data
->context_un
.iocb
.ndlp
= ndlp
;
612 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
613 creg_val
= readl(phba
->HCregaddr
);
614 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
615 writel(creg_val
, phba
->HCregaddr
);
616 readl(phba
->HCregaddr
); /* flush */
618 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
, 0);
620 if (rc
== IOCB_SUCCESS
)
621 return 0; /* done for now */
623 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
624 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
625 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
626 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
628 lpfc_mbuf_free(phba
, pbuflist
->virt
, pbuflist
->phys
);
630 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
633 lpfc_sli_release_iocbq(phba
, rspiocbq
);
639 /* make error code available to userspace */
640 job
->reply
->result
= rc
;
646 * lpfc_bsg_event_free - frees an allocated event structure
647 * @kref: Pointer to a kref.
649 * Called from kref_put. Back cast the kref into an event structure address.
650 * Free any events to get, delete associated nodes, free any events to see,
651 * free any data then free the event itself.
654 lpfc_bsg_event_free(struct kref
*kref
)
656 struct lpfc_bsg_event
*evt
= container_of(kref
, struct lpfc_bsg_event
,
658 struct event_data
*ed
;
660 list_del(&evt
->node
);
662 while (!list_empty(&evt
->events_to_get
)) {
663 ed
= list_entry(evt
->events_to_get
.next
, typeof(*ed
), node
);
669 while (!list_empty(&evt
->events_to_see
)) {
670 ed
= list_entry(evt
->events_to_see
.next
, typeof(*ed
), node
);
680 * lpfc_bsg_event_ref - increments the kref for an event
681 * @evt: Pointer to an event structure.
684 lpfc_bsg_event_ref(struct lpfc_bsg_event
*evt
)
686 kref_get(&evt
->kref
);
690 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
691 * @evt: Pointer to an event structure.
694 lpfc_bsg_event_unref(struct lpfc_bsg_event
*evt
)
696 kref_put(&evt
->kref
, lpfc_bsg_event_free
);
700 * lpfc_bsg_event_new - allocate and initialize a event structure
701 * @ev_mask: Mask of events.
702 * @ev_reg_id: Event reg id.
703 * @ev_req_id: Event request id.
705 static struct lpfc_bsg_event
*
706 lpfc_bsg_event_new(uint32_t ev_mask
, int ev_reg_id
, uint32_t ev_req_id
)
708 struct lpfc_bsg_event
*evt
= kzalloc(sizeof(*evt
), GFP_KERNEL
);
713 INIT_LIST_HEAD(&evt
->events_to_get
);
714 INIT_LIST_HEAD(&evt
->events_to_see
);
715 evt
->type_mask
= ev_mask
;
716 evt
->req_id
= ev_req_id
;
717 evt
->reg_id
= ev_reg_id
;
718 evt
->wait_time_stamp
= jiffies
;
719 init_waitqueue_head(&evt
->wq
);
720 kref_init(&evt
->kref
);
725 * diag_cmd_data_free - Frees an lpfc dma buffer extension
726 * @phba: Pointer to HBA context object.
727 * @mlist: Pointer to an lpfc dma buffer extension.
730 diag_cmd_data_free(struct lpfc_hba
*phba
, struct lpfc_dmabufext
*mlist
)
732 struct lpfc_dmabufext
*mlast
;
733 struct pci_dev
*pcidev
;
734 struct list_head head
, *curr
, *next
;
736 if ((!mlist
) || (!lpfc_is_link_up(phba
) &&
737 (phba
->link_flag
& LS_LOOPBACK_MODE
))) {
741 pcidev
= phba
->pcidev
;
742 list_add_tail(&head
, &mlist
->dma
.list
);
744 list_for_each_safe(curr
, next
, &head
) {
745 mlast
= list_entry(curr
, struct lpfc_dmabufext
, dma
.list
);
747 dma_free_coherent(&pcidev
->dev
,
757 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
762 * This function is called when an unsolicited CT command is received. It
763 * forwards the event to any processes registered to receive CT events.
766 lpfc_bsg_ct_unsol_event(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
767 struct lpfc_iocbq
*piocbq
)
769 uint32_t evt_req_id
= 0;
772 struct lpfc_dmabuf
*dmabuf
= NULL
;
773 struct lpfc_bsg_event
*evt
;
774 struct event_data
*evt_dat
= NULL
;
775 struct lpfc_iocbq
*iocbq
;
777 struct list_head head
;
778 struct ulp_bde64
*bde
;
781 struct lpfc_dmabuf
*bdeBuf1
= piocbq
->context2
;
782 struct lpfc_dmabuf
*bdeBuf2
= piocbq
->context3
;
783 struct lpfc_hbq_entry
*hbqe
;
784 struct lpfc_sli_ct_request
*ct_req
;
785 struct fc_bsg_job
*job
= NULL
;
789 INIT_LIST_HEAD(&head
);
790 list_add_tail(&head
, &piocbq
->list
);
792 if (piocbq
->iocb
.ulpBdeCount
== 0 ||
793 piocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
== 0)
794 goto error_ct_unsol_exit
;
796 if (phba
->link_state
== LPFC_HBA_ERROR
||
797 (!(phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)))
798 goto error_ct_unsol_exit
;
800 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)
803 dma_addr
= getPaddr(piocbq
->iocb
.un
.cont64
[0].addrHigh
,
804 piocbq
->iocb
.un
.cont64
[0].addrLow
);
805 dmabuf
= lpfc_sli_ringpostbuf_get(phba
, pring
, dma_addr
);
808 goto error_ct_unsol_exit
;
809 ct_req
= (struct lpfc_sli_ct_request
*)dmabuf
->virt
;
810 evt_req_id
= ct_req
->FsType
;
811 cmd
= ct_req
->CommandResponse
.bits
.CmdRsp
;
812 len
= ct_req
->CommandResponse
.bits
.Size
;
813 if (!(phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
))
814 lpfc_sli_ringpostbuf_put(phba
, pring
, dmabuf
);
816 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
817 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
818 if (!(evt
->type_mask
& FC_REG_CT_EVENT
) ||
819 evt
->req_id
!= evt_req_id
)
822 lpfc_bsg_event_ref(evt
);
823 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
824 evt_dat
= kzalloc(sizeof(*evt_dat
), GFP_KERNEL
);
825 if (evt_dat
== NULL
) {
826 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
827 lpfc_bsg_event_unref(evt
);
828 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
829 "2614 Memory allocation failed for "
834 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
835 /* take accumulated byte count from the last iocbq */
836 iocbq
= list_entry(head
.prev
, typeof(*iocbq
), list
);
837 evt_dat
->len
= iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
;
839 list_for_each_entry(iocbq
, &head
, list
) {
840 for (i
= 0; i
< iocbq
->iocb
.ulpBdeCount
; i
++)
842 iocbq
->iocb
.un
.cont64
[i
].tus
.f
.bdeSize
;
846 evt_dat
->data
= kzalloc(evt_dat
->len
, GFP_KERNEL
);
847 if (evt_dat
->data
== NULL
) {
848 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
849 "2615 Memory allocation failed for "
850 "CT event data, size %d\n",
853 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
854 lpfc_bsg_event_unref(evt
);
855 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
856 goto error_ct_unsol_exit
;
859 list_for_each_entry(iocbq
, &head
, list
) {
861 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
862 bdeBuf1
= iocbq
->context2
;
863 bdeBuf2
= iocbq
->context3
;
865 for (i
= 0; i
< iocbq
->iocb
.ulpBdeCount
; i
++) {
866 if (phba
->sli3_options
&
867 LPFC_SLI3_HBQ_ENABLED
) {
869 hbqe
= (struct lpfc_hbq_entry
*)
870 &iocbq
->iocb
.un
.ulpWord
[0];
871 size
= hbqe
->bde
.tus
.f
.bdeSize
;
874 hbqe
= (struct lpfc_hbq_entry
*)
877 size
= hbqe
->bde
.tus
.f
.bdeSize
;
880 if ((offset
+ size
) > evt_dat
->len
)
881 size
= evt_dat
->len
- offset
;
883 size
= iocbq
->iocb
.un
.cont64
[i
].
885 bde
= &iocbq
->iocb
.un
.cont64
[i
];
886 dma_addr
= getPaddr(bde
->addrHigh
,
888 dmabuf
= lpfc_sli_ringpostbuf_get(phba
,
892 lpfc_printf_log(phba
, KERN_ERR
,
893 LOG_LIBDFC
, "2616 No dmabuf "
894 "found for iocbq 0x%p\n",
896 kfree(evt_dat
->data
);
898 spin_lock_irqsave(&phba
->ct_ev_lock
,
900 lpfc_bsg_event_unref(evt
);
901 spin_unlock_irqrestore(
902 &phba
->ct_ev_lock
, flags
);
903 goto error_ct_unsol_exit
;
905 memcpy((char *)(evt_dat
->data
) + offset
,
908 if (evt_req_id
!= SLI_CT_ELX_LOOPBACK
&&
909 !(phba
->sli3_options
&
910 LPFC_SLI3_HBQ_ENABLED
)) {
911 lpfc_sli_ringpostbuf_put(phba
, pring
,
915 case ELX_LOOPBACK_DATA
:
916 diag_cmd_data_free(phba
,
917 (struct lpfc_dmabufext
*)
920 case ELX_LOOPBACK_XRI_SETUP
:
921 if ((phba
->sli_rev
==
923 (phba
->sli3_options
&
924 LPFC_SLI3_HBQ_ENABLED
926 lpfc_in_buf_free(phba
,
929 lpfc_post_buffer(phba
,
935 if (!(phba
->sli3_options
&
936 LPFC_SLI3_HBQ_ENABLED
))
937 lpfc_post_buffer(phba
,
946 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
947 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
948 evt_dat
->immed_dat
= phba
->ctx_idx
;
949 phba
->ctx_idx
= (phba
->ctx_idx
+ 1) % 64;
950 phba
->ct_ctx
[evt_dat
->immed_dat
].oxid
=
951 piocbq
->iocb
.ulpContext
;
952 phba
->ct_ctx
[evt_dat
->immed_dat
].SID
=
953 piocbq
->iocb
.un
.rcvels
.remoteID
;
955 evt_dat
->immed_dat
= piocbq
->iocb
.ulpContext
;
957 evt_dat
->type
= FC_REG_CT_EVENT
;
958 list_add(&evt_dat
->node
, &evt
->events_to_see
);
959 if (evt_req_id
== SLI_CT_ELX_LOOPBACK
) {
960 wake_up_interruptible(&evt
->wq
);
961 lpfc_bsg_event_unref(evt
);
965 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
966 lpfc_bsg_event_unref(evt
);
971 job
->reply
->reply_payload_rcv_len
= size
;
972 /* make error code available to userspace */
973 job
->reply
->result
= 0;
975 /* complete the job back to userspace */
976 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
978 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
981 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
984 if (!list_empty(&head
))
986 if (evt_req_id
== SLI_CT_ELX_LOOPBACK
)
992 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
993 * @job: SET_EVENT fc_bsg_job
996 lpfc_bsg_hba_set_event(struct fc_bsg_job
*job
)
998 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
999 struct lpfc_hba
*phba
= vport
->phba
;
1000 struct set_ct_event
*event_req
;
1001 struct lpfc_bsg_event
*evt
;
1003 struct bsg_job_data
*dd_data
= NULL
;
1005 unsigned long flags
;
1007 if (job
->request_len
<
1008 sizeof(struct fc_bsg_request
) + sizeof(struct set_ct_event
)) {
1009 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1010 "2612 Received SET_CT_EVENT below minimum "
1016 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
1017 if (dd_data
== NULL
) {
1018 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1019 "2734 Failed allocation of dd_data\n");
1024 event_req
= (struct set_ct_event
*)
1025 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
1026 ev_mask
= ((uint32_t)(unsigned long)event_req
->type_mask
&
1028 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1029 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
1030 if (evt
->reg_id
== event_req
->ev_reg_id
) {
1031 lpfc_bsg_event_ref(evt
);
1032 evt
->wait_time_stamp
= jiffies
;
1036 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1038 if (&evt
->node
== &phba
->ct_ev_waiters
) {
1039 /* no event waiting struct yet - first call */
1040 evt
= lpfc_bsg_event_new(ev_mask
, event_req
->ev_reg_id
,
1041 event_req
->ev_req_id
);
1043 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1044 "2617 Failed allocation of event "
1050 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1051 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
1052 lpfc_bsg_event_ref(evt
);
1053 evt
->wait_time_stamp
= jiffies
;
1054 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1057 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1059 dd_data
->type
= TYPE_EVT
;
1060 dd_data
->context_un
.evt
= evt
;
1061 evt
->set_job
= job
; /* for unsolicited command */
1062 job
->dd_data
= dd_data
; /* for fc transport timeout callback*/
1063 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1064 return 0; /* call job done later */
1067 if (dd_data
!= NULL
)
1070 job
->dd_data
= NULL
;
1075 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1076 * @job: GET_EVENT fc_bsg_job
1079 lpfc_bsg_hba_get_event(struct fc_bsg_job
*job
)
1081 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
1082 struct lpfc_hba
*phba
= vport
->phba
;
1083 struct get_ct_event
*event_req
;
1084 struct get_ct_event_reply
*event_reply
;
1085 struct lpfc_bsg_event
*evt
;
1086 struct event_data
*evt_dat
= NULL
;
1087 unsigned long flags
;
1090 if (job
->request_len
<
1091 sizeof(struct fc_bsg_request
) + sizeof(struct get_ct_event
)) {
1092 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1093 "2613 Received GET_CT_EVENT request below "
1099 event_req
= (struct get_ct_event
*)
1100 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
1102 event_reply
= (struct get_ct_event_reply
*)
1103 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
1104 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1105 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
1106 if (evt
->reg_id
== event_req
->ev_reg_id
) {
1107 if (list_empty(&evt
->events_to_get
))
1109 lpfc_bsg_event_ref(evt
);
1110 evt
->wait_time_stamp
= jiffies
;
1111 evt_dat
= list_entry(evt
->events_to_get
.prev
,
1112 struct event_data
, node
);
1113 list_del(&evt_dat
->node
);
1117 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1119 /* The app may continue to ask for event data until it gets
1120 * an error indicating that there isn't anymore
1122 if (evt_dat
== NULL
) {
1123 job
->reply
->reply_payload_rcv_len
= 0;
1128 if (evt_dat
->len
> job
->request_payload
.payload_len
) {
1129 evt_dat
->len
= job
->request_payload
.payload_len
;
1130 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1131 "2618 Truncated event data at %d "
1133 job
->request_payload
.payload_len
);
1136 event_reply
->type
= evt_dat
->type
;
1137 event_reply
->immed_data
= evt_dat
->immed_dat
;
1138 if (evt_dat
->len
> 0)
1139 job
->reply
->reply_payload_rcv_len
=
1140 sg_copy_from_buffer(job
->request_payload
.sg_list
,
1141 job
->request_payload
.sg_cnt
,
1142 evt_dat
->data
, evt_dat
->len
);
1144 job
->reply
->reply_payload_rcv_len
= 0;
1147 kfree(evt_dat
->data
);
1151 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1152 lpfc_bsg_event_unref(evt
);
1153 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1154 job
->dd_data
= NULL
;
1155 job
->reply
->result
= 0;
1160 job
->dd_data
= NULL
;
1161 job
->reply
->result
= rc
;
1166 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1167 * @phba: Pointer to HBA context object.
1168 * @cmdiocbq: Pointer to command iocb.
1169 * @rspiocbq: Pointer to response iocb.
1171 * This function is the completion handler for iocbs issued using
1172 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1173 * ring event handler function without any lock held. This function
1174 * can be called from both worker thread context and interrupt
1175 * context. This function also can be called from other thread which
1176 * cleans up the SLI layer objects.
1177 * This function copy the contents of the response iocb to the
1178 * response iocb memory object provided by the caller of
1179 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1180 * sleeps for the iocb completion.
1183 lpfc_issue_ct_rsp_cmp(struct lpfc_hba
*phba
,
1184 struct lpfc_iocbq
*cmdiocbq
,
1185 struct lpfc_iocbq
*rspiocbq
)
1187 struct bsg_job_data
*dd_data
;
1188 struct fc_bsg_job
*job
;
1190 struct lpfc_dmabuf
*bmp
;
1191 struct lpfc_nodelist
*ndlp
;
1192 unsigned long flags
;
1195 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1196 dd_data
= cmdiocbq
->context1
;
1197 /* normal completion and timeout crossed paths, already done */
1199 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1203 job
= dd_data
->context_un
.iocb
.set_job
;
1204 bmp
= dd_data
->context_un
.iocb
.bmp
;
1205 rsp
= &rspiocbq
->iocb
;
1206 ndlp
= dd_data
->context_un
.iocb
.ndlp
;
1208 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
1209 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1211 if (rsp
->ulpStatus
) {
1212 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
1213 switch (rsp
->un
.ulpWord
[4] & 0xff) {
1214 case IOERR_SEQUENCE_TIMEOUT
:
1217 case IOERR_INVALID_RPI
:
1227 job
->reply
->reply_payload_rcv_len
=
1228 rsp
->un
.genreq64
.bdl
.bdeSize
;
1230 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
1231 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
1235 /* make error code available to userspace */
1236 job
->reply
->result
= rc
;
1237 job
->dd_data
= NULL
;
1238 /* complete the job back to userspace */
1240 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1245 * lpfc_issue_ct_rsp - issue a ct response
1246 * @phba: Pointer to HBA context object.
1247 * @job: Pointer to the job object.
1248 * @tag: tag index value into the ports context exchange array.
1249 * @bmp: Pointer to a dma buffer descriptor.
1250 * @num_entry: Number of enties in the bde.
1253 lpfc_issue_ct_rsp(struct lpfc_hba
*phba
, struct fc_bsg_job
*job
, uint32_t tag
,
1254 struct lpfc_dmabuf
*bmp
, int num_entry
)
1257 struct lpfc_iocbq
*ctiocb
= NULL
;
1259 struct lpfc_nodelist
*ndlp
= NULL
;
1260 struct bsg_job_data
*dd_data
;
1263 /* allocate our bsg tracking structure */
1264 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
1266 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1267 "2736 Failed allocation of dd_data\n");
1272 /* Allocate buffer for command iocb */
1273 ctiocb
= lpfc_sli_get_iocbq(phba
);
1279 icmd
= &ctiocb
->iocb
;
1280 icmd
->un
.xseq64
.bdl
.ulpIoTag32
= 0;
1281 icmd
->un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
1282 icmd
->un
.xseq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
1283 icmd
->un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
1284 icmd
->un
.xseq64
.bdl
.bdeSize
= (num_entry
* sizeof(struct ulp_bde64
));
1285 icmd
->un
.xseq64
.w5
.hcsw
.Fctl
= (LS
| LA
);
1286 icmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
1287 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_SOL_CTL
;
1288 icmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
1290 /* Fill in rest of iocb */
1291 icmd
->ulpCommand
= CMD_XMIT_SEQUENCE64_CX
;
1292 icmd
->ulpBdeCount
= 1;
1294 icmd
->ulpClass
= CLASS3
;
1295 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
1296 /* Do not issue unsol response if oxid not marked as valid */
1297 if (!(phba
->ct_ctx
[tag
].flags
& UNSOL_VALID
)) {
1299 goto issue_ct_rsp_exit
;
1301 icmd
->ulpContext
= phba
->ct_ctx
[tag
].oxid
;
1302 ndlp
= lpfc_findnode_did(phba
->pport
, phba
->ct_ctx
[tag
].SID
);
1304 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
,
1305 "2721 ndlp null for oxid %x SID %x\n",
1307 phba
->ct_ctx
[tag
].SID
);
1309 goto issue_ct_rsp_exit
;
1311 icmd
->un
.ulpWord
[3] = ndlp
->nlp_rpi
;
1312 /* The exchange is done, mark the entry as invalid */
1313 phba
->ct_ctx
[tag
].flags
&= ~UNSOL_VALID
;
1315 icmd
->ulpContext
= (ushort
) tag
;
1317 icmd
->ulpTimeout
= phba
->fc_ratov
* 2;
1319 /* Xmit CT response on exchange <xid> */
1320 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
1321 "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
1322 icmd
->ulpContext
, icmd
->ulpIoTag
, phba
->link_state
);
1324 ctiocb
->iocb_cmpl
= NULL
;
1325 ctiocb
->iocb_flag
|= LPFC_IO_LIBDFC
;
1326 ctiocb
->vport
= phba
->pport
;
1327 ctiocb
->context3
= bmp
;
1329 ctiocb
->iocb_cmpl
= lpfc_issue_ct_rsp_cmp
;
1330 ctiocb
->context1
= dd_data
;
1331 ctiocb
->context2
= NULL
;
1332 dd_data
->type
= TYPE_IOCB
;
1333 dd_data
->context_un
.iocb
.cmdiocbq
= ctiocb
;
1334 dd_data
->context_un
.iocb
.rspiocbq
= NULL
;
1335 dd_data
->context_un
.iocb
.set_job
= job
;
1336 dd_data
->context_un
.iocb
.bmp
= bmp
;
1337 dd_data
->context_un
.iocb
.ndlp
= ndlp
;
1339 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
1340 creg_val
= readl(phba
->HCregaddr
);
1341 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
1342 writel(creg_val
, phba
->HCregaddr
);
1343 readl(phba
->HCregaddr
); /* flush */
1346 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, ctiocb
, 0);
1348 if (rc
== IOCB_SUCCESS
)
1349 return 0; /* done for now */
1352 lpfc_sli_release_iocbq(phba
, ctiocb
);
1360 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1361 * @job: SEND_MGMT_RESP fc_bsg_job
1364 lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job
*job
)
1366 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
1367 struct lpfc_hba
*phba
= vport
->phba
;
1368 struct send_mgmt_resp
*mgmt_resp
= (struct send_mgmt_resp
*)
1369 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
1370 struct ulp_bde64
*bpl
;
1371 struct lpfc_dmabuf
*bmp
= NULL
;
1372 struct scatterlist
*sgel
= NULL
;
1376 uint32_t tag
= mgmt_resp
->tag
;
1377 unsigned long reqbfrcnt
=
1378 (unsigned long)job
->request_payload
.payload_len
;
1381 /* in case no data is transferred */
1382 job
->reply
->reply_payload_rcv_len
= 0;
1384 if (!reqbfrcnt
|| (reqbfrcnt
> (80 * BUF_SZ_4K
))) {
1386 goto send_mgmt_rsp_exit
;
1389 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
1392 goto send_mgmt_rsp_exit
;
1395 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
1398 goto send_mgmt_rsp_free_bmp
;
1401 INIT_LIST_HEAD(&bmp
->list
);
1402 bpl
= (struct ulp_bde64
*) bmp
->virt
;
1403 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
1404 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1405 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
1406 busaddr
= sg_dma_address(sgel
);
1407 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
1408 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
1409 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
1410 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
1411 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
1415 rc
= lpfc_issue_ct_rsp(phba
, job
, tag
, bmp
, request_nseg
);
1417 if (rc
== IOCB_SUCCESS
)
1418 return 0; /* done for now */
1420 /* TBD need to handle a timeout */
1421 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
1422 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1424 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
1426 send_mgmt_rsp_free_bmp
:
1429 /* make error code available to userspace */
1430 job
->reply
->result
= rc
;
1431 job
->dd_data
= NULL
;
1436 * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
1437 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1439 * This function is responsible for placing a port into diagnostic loopback
1440 * mode in order to perform a diagnostic loopback test.
1441 * All new scsi requests are blocked, a small delay is used to allow the
1442 * scsi requests to complete then the link is brought down. If the link is
1443 * is placed in loopback mode then scsi requests are again allowed
1444 * so the scsi mid-layer doesn't give up on the port.
1445 * All of this is done in-line.
1448 lpfc_bsg_diag_mode(struct fc_bsg_job
*job
)
1450 struct Scsi_Host
*shost
= job
->shost
;
1451 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
1452 struct lpfc_hba
*phba
= vport
->phba
;
1453 struct diag_mode_set
*loopback_mode
;
1454 struct lpfc_sli
*psli
= &phba
->sli
;
1455 struct lpfc_sli_ring
*pring
= &psli
->ring
[LPFC_FCP_RING
];
1456 uint32_t link_flags
;
1458 struct lpfc_vport
**vports
;
1459 LPFC_MBOXQ_t
*pmboxq
;
1464 /* no data to return just the return code */
1465 job
->reply
->reply_payload_rcv_len
= 0;
1467 if (job
->request_len
<
1468 sizeof(struct fc_bsg_request
) + sizeof(struct diag_mode_set
)) {
1469 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
1470 "2738 Received DIAG MODE request below minimum "
1476 loopback_mode
= (struct diag_mode_set
*)
1477 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
1478 link_flags
= loopback_mode
->type
;
1479 timeout
= loopback_mode
->timeout
;
1481 if ((phba
->link_state
== LPFC_HBA_ERROR
) ||
1482 (psli
->sli_flag
& LPFC_BLOCK_MGMT_IO
) ||
1483 (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
))) {
1488 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1494 vports
= lpfc_create_vport_work_array(phba
);
1496 for (i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
1497 shost
= lpfc_shost_from_vport(vports
[i
]);
1498 scsi_block_requests(shost
);
1501 lpfc_destroy_vport_work_array(phba
, vports
);
1503 shost
= lpfc_shost_from_vport(phba
->pport
);
1504 scsi_block_requests(shost
);
1507 while (pring
->txcmplq_cnt
) {
1508 if (i
++ > 500) /* wait up to 5 seconds */
1514 memset((void *)pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
1515 pmboxq
->u
.mb
.mbxCommand
= MBX_DOWN_LINK
;
1516 pmboxq
->u
.mb
.mbxOwner
= OWN_HOST
;
1518 mbxstatus
= lpfc_sli_issue_mbox_wait(phba
, pmboxq
, LPFC_MBOX_TMO
);
1520 if ((mbxstatus
== MBX_SUCCESS
) && (pmboxq
->u
.mb
.mbxStatus
== 0)) {
1521 /* wait for link down before proceeding */
1523 while (phba
->link_state
!= LPFC_LINK_DOWN
) {
1524 if (i
++ > timeout
) {
1526 goto loopback_mode_exit
;
1532 memset((void *)pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
1533 if (link_flags
== INTERNAL_LOOP_BACK
)
1534 pmboxq
->u
.mb
.un
.varInitLnk
.link_flags
= FLAGS_LOCAL_LB
;
1536 pmboxq
->u
.mb
.un
.varInitLnk
.link_flags
=
1537 FLAGS_TOPOLOGY_MODE_LOOP
;
1539 pmboxq
->u
.mb
.mbxCommand
= MBX_INIT_LINK
;
1540 pmboxq
->u
.mb
.mbxOwner
= OWN_HOST
;
1542 mbxstatus
= lpfc_sli_issue_mbox_wait(phba
, pmboxq
,
1545 if ((mbxstatus
!= MBX_SUCCESS
) || (pmboxq
->u
.mb
.mbxStatus
))
1548 phba
->link_flag
|= LS_LOOPBACK_MODE
;
1549 /* wait for the link attention interrupt */
1553 while (phba
->link_state
!= LPFC_HBA_READY
) {
1554 if (i
++ > timeout
) {
1567 vports
= lpfc_create_vport_work_array(phba
);
1569 for (i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
1570 shost
= lpfc_shost_from_vport(vports
[i
]);
1571 scsi_unblock_requests(shost
);
1573 lpfc_destroy_vport_work_array(phba
, vports
);
1575 shost
= lpfc_shost_from_vport(phba
->pport
);
1576 scsi_unblock_requests(shost
);
1580 * Let SLI layer release mboxq if mbox command completed after timeout.
1582 if (mbxstatus
!= MBX_TIMEOUT
)
1583 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
1586 /* make error code available to userspace */
1587 job
->reply
->result
= rc
;
1588 /* complete the job back to userspace if no error */
1595 * lpfcdiag_loop_self_reg - obtains a remote port login id
1596 * @phba: Pointer to HBA context object
1597 * @rpi: Pointer to a remote port login id
1599 * This function obtains a remote port login id so the diag loopback test
1600 * can send and receive its own unsolicited CT command.
1602 static int lpfcdiag_loop_self_reg(struct lpfc_hba
*phba
, uint16_t * rpi
)
1605 struct lpfc_dmabuf
*dmabuff
;
1608 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1612 status
= lpfc_reg_rpi(phba
, 0, phba
->pport
->fc_myDID
,
1613 (uint8_t *)&phba
->pport
->fc_sparam
, mbox
, 0);
1615 mempool_free(mbox
, phba
->mbox_mem_pool
);
1619 dmabuff
= (struct lpfc_dmabuf
*) mbox
->context1
;
1620 mbox
->context1
= NULL
;
1621 status
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
1623 if ((status
!= MBX_SUCCESS
) || (mbox
->u
.mb
.mbxStatus
)) {
1624 lpfc_mbuf_free(phba
, dmabuff
->virt
, dmabuff
->phys
);
1626 if (status
!= MBX_TIMEOUT
)
1627 mempool_free(mbox
, phba
->mbox_mem_pool
);
1631 *rpi
= mbox
->u
.mb
.un
.varWords
[0];
1633 lpfc_mbuf_free(phba
, dmabuff
->virt
, dmabuff
->phys
);
1635 mempool_free(mbox
, phba
->mbox_mem_pool
);
1640 * lpfcdiag_loop_self_unreg - unregs from the rpi
1641 * @phba: Pointer to HBA context object
1642 * @rpi: Remote port login id
1644 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
1646 static int lpfcdiag_loop_self_unreg(struct lpfc_hba
*phba
, uint16_t rpi
)
1651 /* Allocate mboxq structure */
1652 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1656 lpfc_unreg_login(phba
, 0, rpi
, mbox
);
1657 status
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
1659 if ((status
!= MBX_SUCCESS
) || (mbox
->u
.mb
.mbxStatus
)) {
1660 if (status
!= MBX_TIMEOUT
)
1661 mempool_free(mbox
, phba
->mbox_mem_pool
);
1665 mempool_free(mbox
, phba
->mbox_mem_pool
);
1670 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
1671 * @phba: Pointer to HBA context object
1672 * @rpi: Remote port login id
1673 * @txxri: Pointer to transmit exchange id
1674 * @rxxri: Pointer to response exchabge id
1676 * This function obtains the transmit and receive ids required to send
1677 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
1678 * flags are used to the unsolicted response handler is able to process
1679 * the ct command sent on the same port.
1681 static int lpfcdiag_loop_get_xri(struct lpfc_hba
*phba
, uint16_t rpi
,
1682 uint16_t *txxri
, uint16_t * rxxri
)
1684 struct lpfc_bsg_event
*evt
;
1685 struct lpfc_iocbq
*cmdiocbq
, *rspiocbq
;
1687 struct lpfc_dmabuf
*dmabuf
;
1688 struct ulp_bde64
*bpl
= NULL
;
1689 struct lpfc_sli_ct_request
*ctreq
= NULL
;
1691 unsigned long flags
;
1695 evt
= lpfc_bsg_event_new(FC_REG_CT_EVENT
, current
->pid
,
1696 SLI_CT_ELX_LOOPBACK
);
1700 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1701 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
1702 lpfc_bsg_event_ref(evt
);
1703 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1705 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
1706 rspiocbq
= lpfc_sli_get_iocbq(phba
);
1708 dmabuf
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
1710 dmabuf
->virt
= lpfc_mbuf_alloc(phba
, 0, &dmabuf
->phys
);
1711 INIT_LIST_HEAD(&dmabuf
->list
);
1712 bpl
= (struct ulp_bde64
*) dmabuf
->virt
;
1713 memset(bpl
, 0, sizeof(*bpl
));
1714 ctreq
= (struct lpfc_sli_ct_request
*)(bpl
+ 1);
1716 le32_to_cpu(putPaddrHigh(dmabuf
->phys
+ sizeof(*bpl
)));
1718 le32_to_cpu(putPaddrLow(dmabuf
->phys
+ sizeof(*bpl
)));
1719 bpl
->tus
.f
.bdeFlags
= 0;
1720 bpl
->tus
.f
.bdeSize
= ELX_LOOPBACK_HEADER_SZ
;
1721 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
1724 if (cmdiocbq
== NULL
|| rspiocbq
== NULL
||
1725 dmabuf
== NULL
|| bpl
== NULL
|| ctreq
== NULL
) {
1727 goto err_get_xri_exit
;
1730 cmd
= &cmdiocbq
->iocb
;
1731 rsp
= &rspiocbq
->iocb
;
1733 memset(ctreq
, 0, ELX_LOOPBACK_HEADER_SZ
);
1735 ctreq
->RevisionId
.bits
.Revision
= SLI_CT_REVISION
;
1736 ctreq
->RevisionId
.bits
.InId
= 0;
1737 ctreq
->FsType
= SLI_CT_ELX_LOOPBACK
;
1738 ctreq
->FsSubType
= 0;
1739 ctreq
->CommandResponse
.bits
.CmdRsp
= ELX_LOOPBACK_XRI_SETUP
;
1740 ctreq
->CommandResponse
.bits
.Size
= 0;
1743 cmd
->un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(dmabuf
->phys
);
1744 cmd
->un
.xseq64
.bdl
.addrLow
= putPaddrLow(dmabuf
->phys
);
1745 cmd
->un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
1746 cmd
->un
.xseq64
.bdl
.bdeSize
= sizeof(*bpl
);
1748 cmd
->un
.xseq64
.w5
.hcsw
.Fctl
= LA
;
1749 cmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
1750 cmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CTL
;
1751 cmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
1753 cmd
->ulpCommand
= CMD_XMIT_SEQUENCE64_CR
;
1754 cmd
->ulpBdeCount
= 1;
1756 cmd
->ulpClass
= CLASS3
;
1757 cmd
->ulpContext
= rpi
;
1759 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
1760 cmdiocbq
->vport
= phba
->pport
;
1762 ret_val
= lpfc_sli_issue_iocb_wait(phba
, LPFC_ELS_RING
, cmdiocbq
,
1764 (phba
->fc_ratov
* 2)
1765 + LPFC_DRVR_TIMEOUT
);
1767 goto err_get_xri_exit
;
1769 *txxri
= rsp
->ulpContext
;
1772 evt
->wait_time_stamp
= jiffies
;
1773 ret_val
= wait_event_interruptible_timeout(
1774 evt
->wq
, !list_empty(&evt
->events_to_see
),
1775 ((phba
->fc_ratov
* 2) + LPFC_DRVR_TIMEOUT
) * HZ
);
1776 if (list_empty(&evt
->events_to_see
))
1777 ret_val
= (ret_val
) ? EINTR
: ETIMEDOUT
;
1779 ret_val
= IOCB_SUCCESS
;
1780 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1781 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
1782 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1783 *rxxri
= (list_entry(evt
->events_to_get
.prev
,
1784 typeof(struct event_data
),
1790 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
1791 lpfc_bsg_event_unref(evt
); /* release ref */
1792 lpfc_bsg_event_unref(evt
); /* delete */
1793 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
1797 lpfc_mbuf_free(phba
, dmabuf
->virt
, dmabuf
->phys
);
1801 if (cmdiocbq
&& (ret_val
!= IOCB_TIMEDOUT
))
1802 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
1804 lpfc_sli_release_iocbq(phba
, rspiocbq
);
1809 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
1810 * @phba: Pointer to HBA context object
1811 * @bpl: Pointer to 64 bit bde structure
1812 * @size: Number of bytes to process
1813 * @nocopydata: Flag to copy user data into the allocated buffer
1815 * This function allocates page size buffers and populates an lpfc_dmabufext.
1816 * If allowed the user data pointed to with indataptr is copied into the kernel
1817 * memory. The chained list of page size buffers is returned.
1819 static struct lpfc_dmabufext
*
1820 diag_cmd_data_alloc(struct lpfc_hba
*phba
,
1821 struct ulp_bde64
*bpl
, uint32_t size
,
1824 struct lpfc_dmabufext
*mlist
= NULL
;
1825 struct lpfc_dmabufext
*dmp
;
1826 int cnt
, offset
= 0, i
= 0;
1827 struct pci_dev
*pcidev
;
1829 pcidev
= phba
->pcidev
;
1832 /* We get chunks of 4K */
1833 if (size
> BUF_SZ_4K
)
1838 /* allocate struct lpfc_dmabufext buffer header */
1839 dmp
= kmalloc(sizeof(struct lpfc_dmabufext
), GFP_KERNEL
);
1843 INIT_LIST_HEAD(&dmp
->dma
.list
);
1845 /* Queue it to a linked list */
1847 list_add_tail(&dmp
->dma
.list
, &mlist
->dma
.list
);
1851 /* allocate buffer */
1852 dmp
->dma
.virt
= dma_alloc_coherent(&pcidev
->dev
,
1863 bpl
->tus
.f
.bdeFlags
= 0;
1864 pci_dma_sync_single_for_device(phba
->pcidev
,
1865 dmp
->dma
.phys
, LPFC_BPL_SIZE
, PCI_DMA_TODEVICE
);
1868 memset((uint8_t *)dmp
->dma
.virt
, 0, cnt
);
1869 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
1872 /* build buffer ptr list for IOCB */
1873 bpl
->addrLow
= le32_to_cpu(putPaddrLow(dmp
->dma
.phys
));
1874 bpl
->addrHigh
= le32_to_cpu(putPaddrHigh(dmp
->dma
.phys
));
1875 bpl
->tus
.f
.bdeSize
= (ushort
) cnt
;
1876 bpl
->tus
.w
= le32_to_cpu(bpl
->tus
.w
);
1887 diag_cmd_data_free(phba
, mlist
);
1892 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
1893 * @phba: Pointer to HBA context object
1894 * @rxxri: Receive exchange id
1895 * @len: Number of data bytes
1897 * This function allocates and posts a data buffer of sufficient size to recieve
1898 * an unsolicted CT command.
1900 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba
*phba
, uint16_t rxxri
,
1903 struct lpfc_sli
*psli
= &phba
->sli
;
1904 struct lpfc_sli_ring
*pring
= &psli
->ring
[LPFC_ELS_RING
];
1905 struct lpfc_iocbq
*cmdiocbq
;
1907 struct list_head head
, *curr
, *next
;
1908 struct lpfc_dmabuf
*rxbmp
;
1909 struct lpfc_dmabuf
*dmp
;
1910 struct lpfc_dmabuf
*mp
[2] = {NULL
, NULL
};
1911 struct ulp_bde64
*rxbpl
= NULL
;
1913 struct lpfc_dmabufext
*rxbuffer
= NULL
;
1917 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
1918 rxbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
1919 if (rxbmp
!= NULL
) {
1920 rxbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &rxbmp
->phys
);
1921 INIT_LIST_HEAD(&rxbmp
->list
);
1922 rxbpl
= (struct ulp_bde64
*) rxbmp
->virt
;
1923 rxbuffer
= diag_cmd_data_alloc(phba
, rxbpl
, len
, 0);
1926 if (!cmdiocbq
|| !rxbmp
|| !rxbpl
|| !rxbuffer
) {
1928 goto err_post_rxbufs_exit
;
1931 /* Queue buffers for the receive exchange */
1932 num_bde
= (uint32_t)rxbuffer
->flag
;
1933 dmp
= &rxbuffer
->dma
;
1935 cmd
= &cmdiocbq
->iocb
;
1938 INIT_LIST_HEAD(&head
);
1939 list_add_tail(&head
, &dmp
->list
);
1940 list_for_each_safe(curr
, next
, &head
) {
1941 mp
[i
] = list_entry(curr
, struct lpfc_dmabuf
, list
);
1944 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
1945 mp
[i
]->buffer_tag
= lpfc_sli_get_buffer_tag(phba
);
1946 cmd
->un
.quexri64cx
.buff
.bde
.addrHigh
=
1947 putPaddrHigh(mp
[i
]->phys
);
1948 cmd
->un
.quexri64cx
.buff
.bde
.addrLow
=
1949 putPaddrLow(mp
[i
]->phys
);
1950 cmd
->un
.quexri64cx
.buff
.bde
.tus
.f
.bdeSize
=
1951 ((struct lpfc_dmabufext
*)mp
[i
])->size
;
1952 cmd
->un
.quexri64cx
.buff
.buffer_tag
= mp
[i
]->buffer_tag
;
1953 cmd
->ulpCommand
= CMD_QUE_XRI64_CX
;
1956 cmd
->ulpBdeCount
= 1;
1957 cmd
->unsli3
.que_xri64cx_ext_words
.ebde_count
= 0;
1960 cmd
->un
.cont64
[i
].addrHigh
= putPaddrHigh(mp
[i
]->phys
);
1961 cmd
->un
.cont64
[i
].addrLow
= putPaddrLow(mp
[i
]->phys
);
1962 cmd
->un
.cont64
[i
].tus
.f
.bdeSize
=
1963 ((struct lpfc_dmabufext
*)mp
[i
])->size
;
1964 cmd
->ulpBdeCount
= ++i
;
1966 if ((--num_bde
> 0) && (i
< 2))
1969 cmd
->ulpCommand
= CMD_QUE_XRI_BUF64_CX
;
1973 cmd
->ulpClass
= CLASS3
;
1974 cmd
->ulpContext
= rxxri
;
1976 ret_val
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
, 0);
1978 if (ret_val
== IOCB_ERROR
) {
1979 diag_cmd_data_free(phba
,
1980 (struct lpfc_dmabufext
*)mp
[0]);
1982 diag_cmd_data_free(phba
,
1983 (struct lpfc_dmabufext
*)mp
[1]);
1984 dmp
= list_entry(next
, struct lpfc_dmabuf
, list
);
1986 goto err_post_rxbufs_exit
;
1989 lpfc_sli_ringpostbuf_put(phba
, pring
, mp
[0]);
1991 lpfc_sli_ringpostbuf_put(phba
, pring
, mp
[1]);
1995 /* The iocb was freed by lpfc_sli_issue_iocb */
1996 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
1998 dmp
= list_entry(next
, struct lpfc_dmabuf
, list
);
2000 goto err_post_rxbufs_exit
;
2003 cmd
= &cmdiocbq
->iocb
;
2008 err_post_rxbufs_exit
:
2012 lpfc_mbuf_free(phba
, rxbmp
->virt
, rxbmp
->phys
);
2017 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
2022 * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
2023 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2025 * This function receives a user data buffer to be transmitted and received on
2026 * the same port, the link must be up and in loopback mode prior
2028 * 1. A kernel buffer is allocated to copy the user data into.
2029 * 2. The port registers with "itself".
2030 * 3. The transmit and receive exchange ids are obtained.
2031 * 4. The receive exchange id is posted.
2032 * 5. A new els loopback event is created.
2033 * 6. The command and response iocbs are allocated.
2034 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2036 * This function is meant to be called n times while the port is in loopback
2037 * so it is the apps responsibility to issue a reset to take the port out
2041 lpfc_bsg_diag_test(struct fc_bsg_job
*job
)
2043 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
2044 struct lpfc_hba
*phba
= vport
->phba
;
2045 struct diag_mode_test
*diag_mode
;
2046 struct lpfc_bsg_event
*evt
;
2047 struct event_data
*evdat
;
2048 struct lpfc_sli
*psli
= &phba
->sli
;
2051 size_t segment_len
= 0, segment_offset
= 0, current_offset
= 0;
2053 struct lpfc_iocbq
*cmdiocbq
, *rspiocbq
;
2055 struct lpfc_sli_ct_request
*ctreq
;
2056 struct lpfc_dmabuf
*txbmp
;
2057 struct ulp_bde64
*txbpl
= NULL
;
2058 struct lpfc_dmabufext
*txbuffer
= NULL
;
2059 struct list_head head
;
2060 struct lpfc_dmabuf
*curr
;
2061 uint16_t txxri
, rxxri
;
2063 uint8_t *ptr
= NULL
, *rx_databuf
= NULL
;
2065 unsigned long flags
;
2066 void *dataout
= NULL
;
2069 /* in case no data is returned return just the return code */
2070 job
->reply
->reply_payload_rcv_len
= 0;
2072 if (job
->request_len
<
2073 sizeof(struct fc_bsg_request
) + sizeof(struct diag_mode_test
)) {
2074 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2075 "2739 Received DIAG TEST request below minimum "
2078 goto loopback_test_exit
;
2081 if (job
->request_payload
.payload_len
!=
2082 job
->reply_payload
.payload_len
) {
2084 goto loopback_test_exit
;
2087 diag_mode
= (struct diag_mode_test
*)
2088 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
2090 if ((phba
->link_state
== LPFC_HBA_ERROR
) ||
2091 (psli
->sli_flag
& LPFC_BLOCK_MGMT_IO
) ||
2092 (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
))) {
2094 goto loopback_test_exit
;
2097 if (!lpfc_is_link_up(phba
) || !(phba
->link_flag
& LS_LOOPBACK_MODE
)) {
2099 goto loopback_test_exit
;
2102 size
= job
->request_payload
.payload_len
;
2103 full_size
= size
+ ELX_LOOPBACK_HEADER_SZ
; /* plus the header */
2105 if ((size
== 0) || (size
> 80 * BUF_SZ_4K
)) {
2107 goto loopback_test_exit
;
2110 if (size
>= BUF_SZ_4K
) {
2112 * Allocate memory for ioctl data. If buffer is bigger than 64k,
2113 * then we allocate 64k and re-use that buffer over and over to
2114 * xfer the whole block. This is because Linux kernel has a
2115 * problem allocating more than 120k of kernel space memory. Saw
2116 * problem with GET_FCPTARGETMAPPING...
2118 if (size
<= (64 * 1024))
2121 total_mem
= 64 * 1024;
2123 /* Allocate memory for ioctl data */
2124 total_mem
= BUF_SZ_4K
;
2126 dataout
= kmalloc(total_mem
, GFP_KERNEL
);
2127 if (dataout
== NULL
) {
2129 goto loopback_test_exit
;
2133 ptr
+= ELX_LOOPBACK_HEADER_SZ
;
2134 sg_copy_to_buffer(job
->request_payload
.sg_list
,
2135 job
->request_payload
.sg_cnt
,
2138 rc
= lpfcdiag_loop_self_reg(phba
, &rpi
);
2141 goto loopback_test_exit
;
2144 rc
= lpfcdiag_loop_get_xri(phba
, rpi
, &txxri
, &rxxri
);
2146 lpfcdiag_loop_self_unreg(phba
, rpi
);
2148 goto loopback_test_exit
;
2151 rc
= lpfcdiag_loop_post_rxbufs(phba
, rxxri
, full_size
);
2153 lpfcdiag_loop_self_unreg(phba
, rpi
);
2155 goto loopback_test_exit
;
2158 evt
= lpfc_bsg_event_new(FC_REG_CT_EVENT
, current
->pid
,
2159 SLI_CT_ELX_LOOPBACK
);
2161 lpfcdiag_loop_self_unreg(phba
, rpi
);
2163 goto loopback_test_exit
;
2166 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2167 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
2168 lpfc_bsg_event_ref(evt
);
2169 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2171 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
2172 rspiocbq
= lpfc_sli_get_iocbq(phba
);
2173 txbmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2176 txbmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &txbmp
->phys
);
2177 INIT_LIST_HEAD(&txbmp
->list
);
2178 txbpl
= (struct ulp_bde64
*) txbmp
->virt
;
2180 txbuffer
= diag_cmd_data_alloc(phba
,
2181 txbpl
, full_size
, 0);
2184 if (!cmdiocbq
|| !rspiocbq
|| !txbmp
|| !txbpl
|| !txbuffer
) {
2186 goto err_loopback_test_exit
;
2189 cmd
= &cmdiocbq
->iocb
;
2190 rsp
= &rspiocbq
->iocb
;
2192 INIT_LIST_HEAD(&head
);
2193 list_add_tail(&head
, &txbuffer
->dma
.list
);
2194 list_for_each_entry(curr
, &head
, list
) {
2195 segment_len
= ((struct lpfc_dmabufext
*)curr
)->size
;
2196 if (current_offset
== 0) {
2198 memset(ctreq
, 0, ELX_LOOPBACK_HEADER_SZ
);
2199 ctreq
->RevisionId
.bits
.Revision
= SLI_CT_REVISION
;
2200 ctreq
->RevisionId
.bits
.InId
= 0;
2201 ctreq
->FsType
= SLI_CT_ELX_LOOPBACK
;
2202 ctreq
->FsSubType
= 0;
2203 ctreq
->CommandResponse
.bits
.CmdRsp
= ELX_LOOPBACK_DATA
;
2204 ctreq
->CommandResponse
.bits
.Size
= size
;
2205 segment_offset
= ELX_LOOPBACK_HEADER_SZ
;
2209 BUG_ON(segment_offset
>= segment_len
);
2210 memcpy(curr
->virt
+ segment_offset
,
2211 ptr
+ current_offset
,
2212 segment_len
- segment_offset
);
2214 current_offset
+= segment_len
- segment_offset
;
2215 BUG_ON(current_offset
> size
);
2219 /* Build the XMIT_SEQUENCE iocb */
2221 num_bde
= (uint32_t)txbuffer
->flag
;
2223 cmd
->un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(txbmp
->phys
);
2224 cmd
->un
.xseq64
.bdl
.addrLow
= putPaddrLow(txbmp
->phys
);
2225 cmd
->un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
2226 cmd
->un
.xseq64
.bdl
.bdeSize
= (num_bde
* sizeof(struct ulp_bde64
));
2228 cmd
->un
.xseq64
.w5
.hcsw
.Fctl
= (LS
| LA
);
2229 cmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
2230 cmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CTL
;
2231 cmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_CT
;
2233 cmd
->ulpCommand
= CMD_XMIT_SEQUENCE64_CX
;
2234 cmd
->ulpBdeCount
= 1;
2236 cmd
->ulpClass
= CLASS3
;
2237 cmd
->ulpContext
= txxri
;
2239 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
2240 cmdiocbq
->vport
= phba
->pport
;
2242 rc
= lpfc_sli_issue_iocb_wait(phba
, LPFC_ELS_RING
, cmdiocbq
, rspiocbq
,
2243 (phba
->fc_ratov
* 2) + LPFC_DRVR_TIMEOUT
);
2245 if ((rc
!= IOCB_SUCCESS
) || (rsp
->ulpStatus
!= IOCB_SUCCESS
)) {
2247 goto err_loopback_test_exit
;
2251 rc
= wait_event_interruptible_timeout(
2252 evt
->wq
, !list_empty(&evt
->events_to_see
),
2253 ((phba
->fc_ratov
* 2) + LPFC_DRVR_TIMEOUT
) * HZ
);
2255 if (list_empty(&evt
->events_to_see
))
2256 rc
= (rc
) ? -EINTR
: -ETIMEDOUT
;
2258 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2259 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
2260 evdat
= list_entry(evt
->events_to_get
.prev
,
2261 typeof(*evdat
), node
);
2262 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2263 rx_databuf
= evdat
->data
;
2264 if (evdat
->len
!= full_size
) {
2265 lpfc_printf_log(phba
, KERN_ERR
, LOG_LIBDFC
,
2266 "1603 Loopback test did not receive expected "
2267 "data length. actual length 0x%x expected "
2269 evdat
->len
, full_size
);
2271 } else if (rx_databuf
== NULL
)
2275 /* skip over elx loopback header */
2276 rx_databuf
+= ELX_LOOPBACK_HEADER_SZ
;
2277 job
->reply
->reply_payload_rcv_len
=
2278 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
2279 job
->reply_payload
.sg_cnt
,
2281 job
->reply
->reply_payload_rcv_len
= size
;
2285 err_loopback_test_exit
:
2286 lpfcdiag_loop_self_unreg(phba
, rpi
);
2288 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2289 lpfc_bsg_event_unref(evt
); /* release ref */
2290 lpfc_bsg_event_unref(evt
); /* delete */
2291 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2293 if (cmdiocbq
!= NULL
)
2294 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
2296 if (rspiocbq
!= NULL
)
2297 lpfc_sli_release_iocbq(phba
, rspiocbq
);
2299 if (txbmp
!= NULL
) {
2300 if (txbpl
!= NULL
) {
2301 if (txbuffer
!= NULL
)
2302 diag_cmd_data_free(phba
, txbuffer
);
2303 lpfc_mbuf_free(phba
, txbmp
->virt
, txbmp
->phys
);
2310 /* make error code available to userspace */
2311 job
->reply
->result
= rc
;
2312 job
->dd_data
= NULL
;
2313 /* complete the job back to userspace if no error */
2320 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
2321 * @job: GET_DFC_REV fc_bsg_job
2324 lpfc_bsg_get_dfc_rev(struct fc_bsg_job
*job
)
2326 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
2327 struct lpfc_hba
*phba
= vport
->phba
;
2328 struct get_mgmt_rev
*event_req
;
2329 struct get_mgmt_rev_reply
*event_reply
;
2332 if (job
->request_len
<
2333 sizeof(struct fc_bsg_request
) + sizeof(struct get_mgmt_rev
)) {
2334 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2335 "2740 Received GET_DFC_REV request below "
2341 event_req
= (struct get_mgmt_rev
*)
2342 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
2344 event_reply
= (struct get_mgmt_rev_reply
*)
2345 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
2347 if (job
->reply_len
<
2348 sizeof(struct fc_bsg_request
) + sizeof(struct get_mgmt_rev_reply
)) {
2349 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2350 "2741 Received GET_DFC_REV reply below "
2356 event_reply
->info
.a_Major
= MANAGEMENT_MAJOR_REV
;
2357 event_reply
->info
.a_Minor
= MANAGEMENT_MINOR_REV
;
2359 job
->reply
->result
= rc
;
2366 * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
2367 * @phba: Pointer to HBA context object.
2368 * @pmboxq: Pointer to mailbox command.
2370 * This is completion handler function for mailbox commands issued from
2371 * lpfc_bsg_issue_mbox function. This function is called by the
2372 * mailbox event handler function with no lock held. This function
2373 * will wake up thread waiting on the wait queue pointed by context1
2377 lpfc_bsg_wake_mbox_wait(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
2379 struct bsg_job_data
*dd_data
;
2382 struct fc_bsg_job
*job
;
2384 unsigned long flags
;
2386 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2387 dd_data
= pmboxq
->context1
;
2389 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2393 pmb
= &dd_data
->context_un
.mbox
.pmboxq
->u
.mb
;
2394 mb
= dd_data
->context_un
.mbox
.mb
;
2395 job
= dd_data
->context_un
.mbox
.set_job
;
2396 memcpy(mb
, pmb
, sizeof(*pmb
));
2397 size
= job
->request_payload
.payload_len
;
2398 job
->reply
->reply_payload_rcv_len
=
2399 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
2400 job
->reply_payload
.sg_cnt
,
2402 job
->reply
->result
= 0;
2403 dd_data
->context_un
.mbox
.set_job
= NULL
;
2404 job
->dd_data
= NULL
;
2406 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2407 mempool_free(dd_data
->context_un
.mbox
.pmboxq
, phba
->mbox_mem_pool
);
2414 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
2415 * @phba: Pointer to HBA context object.
2416 * @mb: Pointer to a mailbox object.
2417 * @vport: Pointer to a vport object.
2419 * Some commands require the port to be offline, some may not be called from
2422 static int lpfc_bsg_check_cmd_access(struct lpfc_hba
*phba
,
2423 MAILBOX_t
*mb
, struct lpfc_vport
*vport
)
2425 /* return negative error values for bsg job */
2426 switch (mb
->mbxCommand
) {
2430 case MBX_CONFIG_LINK
:
2431 case MBX_CONFIG_RING
:
2432 case MBX_RESET_RING
:
2433 case MBX_UNREG_LOGIN
:
2435 case MBX_DUMP_CONTEXT
:
2439 if (!(vport
->fc_flag
& FC_OFFLINE_MODE
)) {
2440 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2441 "2743 Command 0x%x is illegal in on-line "
2447 case MBX_WRITE_VPARMS
:
2450 case MBX_READ_CONFIG
:
2451 case MBX_READ_RCONFIG
:
2452 case MBX_READ_STATUS
:
2455 case MBX_READ_LNK_STAT
:
2456 case MBX_DUMP_MEMORY
:
2458 case MBX_UPDATE_CFG
:
2459 case MBX_KILL_BOARD
:
2461 case MBX_LOAD_EXP_ROM
:
2463 case MBX_DEL_LD_ENTRY
:
2466 case MBX_SLI4_CONFIG
:
2467 case MBX_READ_EVENT_LOG_STATUS
:
2468 case MBX_WRITE_EVENT_LOG
:
2469 case MBX_PORT_CAPABILITIES
:
2470 case MBX_PORT_IOV_CONTROL
:
2472 case MBX_SET_VARIABLE
:
2473 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
2474 "1226 mbox: set_variable 0x%x, 0x%x\n",
2476 mb
->un
.varWords
[1]);
2477 if ((mb
->un
.varWords
[0] == SETVAR_MLOMNT
)
2478 && (mb
->un
.varWords
[1] == 1)) {
2479 phba
->wait_4_mlo_maint_flg
= 1;
2480 } else if (mb
->un
.varWords
[0] == SETVAR_MLORST
) {
2481 phba
->link_flag
&= ~LS_LOOPBACK_MODE
;
2482 phba
->fc_topology
= TOPOLOGY_PT_PT
;
2485 case MBX_RUN_BIU_DIAG64
:
2486 case MBX_READ_EVENT_LOG
:
2487 case MBX_READ_SPARM64
:
2491 case MBX_REG_LOGIN64
:
2492 case MBX_CONFIG_PORT
:
2493 case MBX_RUN_BIU_DIAG
:
2495 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2496 "2742 Unknown Command 0x%x\n",
2505 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
2506 * @phba: Pointer to HBA context object.
2507 * @mb: Pointer to a mailbox object.
2508 * @vport: Pointer to a vport object.
2510 * Allocate a tracking object, mailbox command memory, get a mailbox
2511 * from the mailbox pool, copy the caller mailbox command.
2513 * If offline and the sli is active we need to poll for the command (port is
2514 * being reset) and com-plete the job, otherwise issue the mailbox command and
2515 * let our completion handler finish the command.
2518 lpfc_bsg_issue_mbox(struct lpfc_hba
*phba
, struct fc_bsg_job
*job
,
2519 struct lpfc_vport
*vport
)
2521 LPFC_MBOXQ_t
*pmboxq
;
2524 struct bsg_job_data
*dd_data
;
2528 /* allocate our bsg tracking structure */
2529 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
2531 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2532 "2727 Failed allocation of dd_data\n");
2536 mb
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
2542 pmboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
2549 size
= job
->request_payload
.payload_len
;
2550 job
->reply
->reply_payload_rcv_len
=
2551 sg_copy_to_buffer(job
->request_payload
.sg_list
,
2552 job
->request_payload
.sg_cnt
,
2555 rc
= lpfc_bsg_check_cmd_access(phba
, mb
, vport
);
2559 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
2560 return rc
; /* must be negative */
2563 memset(pmboxq
, 0, sizeof(LPFC_MBOXQ_t
));
2564 pmb
= &pmboxq
->u
.mb
;
2565 memcpy(pmb
, mb
, sizeof(*pmb
));
2566 pmb
->mbxOwner
= OWN_HOST
;
2567 pmboxq
->context1
= NULL
;
2568 pmboxq
->vport
= vport
;
2570 if ((vport
->fc_flag
& FC_OFFLINE_MODE
) ||
2571 (!(phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
))) {
2572 rc
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_POLL
);
2573 if (rc
!= MBX_SUCCESS
) {
2574 if (rc
!= MBX_TIMEOUT
) {
2577 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
2579 return (rc
== MBX_TIMEOUT
) ? -ETIME
: -ENODEV
;
2582 memcpy(mb
, pmb
, sizeof(*pmb
));
2583 job
->reply
->reply_payload_rcv_len
=
2584 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
2585 job
->reply_payload
.sg_cnt
,
2589 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
2590 /* not waiting mbox already done */
2594 /* setup wake call as IOCB callback */
2595 pmboxq
->mbox_cmpl
= lpfc_bsg_wake_mbox_wait
;
2596 /* setup context field to pass wait_queue pointer to wake function */
2597 pmboxq
->context1
= dd_data
;
2598 dd_data
->type
= TYPE_MBOX
;
2599 dd_data
->context_un
.mbox
.pmboxq
= pmboxq
;
2600 dd_data
->context_un
.mbox
.mb
= mb
;
2601 dd_data
->context_un
.mbox
.set_job
= job
;
2602 job
->dd_data
= dd_data
;
2603 rc
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
2604 if ((rc
!= MBX_SUCCESS
) && (rc
!= MBX_BUSY
)) {
2607 mempool_free(pmboxq
, phba
->mbox_mem_pool
);
2615 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
2616 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
2619 lpfc_bsg_mbox_cmd(struct fc_bsg_job
*job
)
2621 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
2622 struct lpfc_hba
*phba
= vport
->phba
;
2625 /* in case no data is transferred */
2626 job
->reply
->reply_payload_rcv_len
= 0;
2627 if (job
->request_len
<
2628 sizeof(struct fc_bsg_request
) + sizeof(struct dfc_mbox_req
)) {
2629 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2630 "2737 Received MBOX_REQ request below "
2636 if (job
->request_payload
.payload_len
!= PAGE_SIZE
) {
2641 if (phba
->sli
.sli_flag
& LPFC_BLOCK_MGMT_IO
) {
2646 rc
= lpfc_bsg_issue_mbox(phba
, job
, vport
);
2651 job
->reply
->result
= 0;
2652 job
->dd_data
= NULL
;
2655 /* job submitted, will complete later*/
2656 rc
= 0; /* return zero, no error */
2658 /* some error occurred */
2659 job
->reply
->result
= rc
;
2660 job
->dd_data
= NULL
;
2667 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
2668 * @phba: Pointer to HBA context object.
2669 * @cmdiocbq: Pointer to command iocb.
2670 * @rspiocbq: Pointer to response iocb.
2672 * This function is the completion handler for iocbs issued using
2673 * lpfc_menlo_cmd function. This function is called by the
2674 * ring event handler function without any lock held. This function
2675 * can be called from both worker thread context and interrupt
2676 * context. This function also can be called from another thread which
2677 * cleans up the SLI layer objects.
2678 * This function copies the contents of the response iocb to the
2679 * response iocb memory object provided by the caller of
2680 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
2681 * sleeps for the iocb completion.
2684 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba
*phba
,
2685 struct lpfc_iocbq
*cmdiocbq
,
2686 struct lpfc_iocbq
*rspiocbq
)
2688 struct bsg_job_data
*dd_data
;
2689 struct fc_bsg_job
*job
;
2691 struct lpfc_dmabuf
*bmp
;
2692 struct lpfc_bsg_menlo
*menlo
;
2693 unsigned long flags
;
2694 struct menlo_response
*menlo_resp
;
2697 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
2698 dd_data
= cmdiocbq
->context1
;
2700 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2704 menlo
= &dd_data
->context_un
.menlo
;
2705 job
= menlo
->set_job
;
2706 job
->dd_data
= NULL
; /* so timeout handler does not reply */
2708 spin_lock_irqsave(&phba
->hbalock
, flags
);
2709 cmdiocbq
->iocb_flag
|= LPFC_IO_WAKE
;
2710 if (cmdiocbq
->context2
&& rspiocbq
)
2711 memcpy(&((struct lpfc_iocbq
*)cmdiocbq
->context2
)->iocb
,
2712 &rspiocbq
->iocb
, sizeof(IOCB_t
));
2713 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2716 rspiocbq
= menlo
->rspiocbq
;
2717 rsp
= &rspiocbq
->iocb
;
2719 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
2720 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
2721 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
2722 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
2724 /* always return the xri, this would be used in the case
2725 * of a menlo download to allow the data to be sent as a continuation
2728 menlo_resp
= (struct menlo_response
*)
2729 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
2730 menlo_resp
->xri
= rsp
->ulpContext
;
2731 if (rsp
->ulpStatus
) {
2732 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
2733 switch (rsp
->un
.ulpWord
[4] & 0xff) {
2734 case IOERR_SEQUENCE_TIMEOUT
:
2737 case IOERR_INVALID_RPI
:
2747 job
->reply
->reply_payload_rcv_len
=
2748 rsp
->un
.genreq64
.bdl
.bdeSize
;
2750 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
2751 lpfc_sli_release_iocbq(phba
, rspiocbq
);
2752 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
2755 /* make error code available to userspace */
2756 job
->reply
->result
= rc
;
2757 /* complete the job back to userspace */
2759 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
2764 * lpfc_menlo_cmd - send an ioctl for menlo hardware
2765 * @job: fc_bsg_job to handle
2767 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
2768 * all the command completions will return the xri for the command.
2769 * For menlo data requests a gen request 64 CX is used to continue the exchange
2770 * supplied in the menlo request header xri field.
2773 lpfc_menlo_cmd(struct fc_bsg_job
*job
)
2775 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
2776 struct lpfc_hba
*phba
= vport
->phba
;
2777 struct lpfc_iocbq
*cmdiocbq
, *rspiocbq
;
2780 struct menlo_command
*menlo_cmd
;
2781 struct menlo_response
*menlo_resp
;
2782 struct lpfc_dmabuf
*bmp
= NULL
;
2785 struct scatterlist
*sgel
= NULL
;
2788 struct bsg_job_data
*dd_data
;
2789 struct ulp_bde64
*bpl
= NULL
;
2791 /* in case no data is returned return just the return code */
2792 job
->reply
->reply_payload_rcv_len
= 0;
2794 if (job
->request_len
<
2795 sizeof(struct fc_bsg_request
) +
2796 sizeof(struct menlo_command
)) {
2797 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2798 "2784 Received MENLO_CMD request below "
2804 if (job
->reply_len
<
2805 sizeof(struct fc_bsg_request
) + sizeof(struct menlo_response
)) {
2806 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2807 "2785 Received MENLO_CMD reply below "
2813 if (!(phba
->menlo_flag
& HBA_MENLO_SUPPORT
)) {
2814 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2815 "2786 Adapter does not support menlo "
2821 menlo_cmd
= (struct menlo_command
*)
2822 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
2824 menlo_resp
= (struct menlo_response
*)
2825 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
2827 /* allocate our bsg tracking structure */
2828 dd_data
= kmalloc(sizeof(struct bsg_job_data
), GFP_KERNEL
);
2830 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
2831 "2787 Failed allocation of dd_data\n");
2836 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
2842 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
2848 rspiocbq
= lpfc_sli_get_iocbq(phba
);
2854 rsp
= &rspiocbq
->iocb
;
2856 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
2862 INIT_LIST_HEAD(&bmp
->list
);
2863 bpl
= (struct ulp_bde64
*) bmp
->virt
;
2864 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
2865 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
2866 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
2867 busaddr
= sg_dma_address(sgel
);
2868 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
2869 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
2870 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
2871 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
2872 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
2876 reply_nseg
= pci_map_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
2877 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
2878 for_each_sg(job
->reply_payload
.sg_list
, sgel
, reply_nseg
, numbde
) {
2879 busaddr
= sg_dma_address(sgel
);
2880 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
2881 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
2882 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
2883 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
2884 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
2888 cmd
= &cmdiocbq
->iocb
;
2889 cmd
->un
.genreq64
.bdl
.ulpIoTag32
= 0;
2890 cmd
->un
.genreq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
2891 cmd
->un
.genreq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
2892 cmd
->un
.genreq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
2893 cmd
->un
.genreq64
.bdl
.bdeSize
=
2894 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
2895 cmd
->un
.genreq64
.w5
.hcsw
.Fctl
= (SI
| LA
);
2896 cmd
->un
.genreq64
.w5
.hcsw
.Dfctl
= 0;
2897 cmd
->un
.genreq64
.w5
.hcsw
.Rctl
= FC_RCTL_DD_UNSOL_CMD
;
2898 cmd
->un
.genreq64
.w5
.hcsw
.Type
= MENLO_TRANSPORT_TYPE
; /* 0xfe */
2899 cmd
->ulpBdeCount
= 1;
2900 cmd
->ulpClass
= CLASS3
;
2901 cmd
->ulpOwner
= OWN_CHIP
;
2902 cmd
->ulpLe
= 1; /* Limited Edition */
2903 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
2904 cmdiocbq
->vport
= phba
->pport
;
2905 /* We want the firmware to timeout before we do */
2906 cmd
->ulpTimeout
= MENLO_TIMEOUT
- 5;
2907 cmdiocbq
->context3
= bmp
;
2908 cmdiocbq
->context2
= rspiocbq
;
2909 cmdiocbq
->iocb_cmpl
= lpfc_bsg_menlo_cmd_cmp
;
2910 cmdiocbq
->context1
= dd_data
;
2911 cmdiocbq
->context2
= rspiocbq
;
2912 if (menlo_cmd
->cmd
== LPFC_BSG_VENDOR_MENLO_CMD
) {
2913 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CR
;
2914 cmd
->ulpPU
= MENLO_PU
; /* 3 */
2915 cmd
->un
.ulpWord
[4] = MENLO_DID
; /* 0x0000FC0E */
2916 cmd
->ulpContext
= MENLO_CONTEXT
; /* 0 */
2918 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CX
;
2920 cmd
->un
.ulpWord
[4] = 0;
2921 cmd
->ulpContext
= menlo_cmd
->xri
;
2924 dd_data
->type
= TYPE_MENLO
;
2925 dd_data
->context_un
.menlo
.cmdiocbq
= cmdiocbq
;
2926 dd_data
->context_un
.menlo
.rspiocbq
= rspiocbq
;
2927 dd_data
->context_un
.menlo
.set_job
= job
;
2928 dd_data
->context_un
.menlo
.bmp
= bmp
;
2930 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, cmdiocbq
,
2932 if (rc
== IOCB_SUCCESS
)
2933 return 0; /* done for now */
2935 /* iocb failed so cleanup */
2936 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
2937 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
2938 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
2939 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
2941 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
2944 lpfc_sli_release_iocbq(phba
, rspiocbq
);
2946 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
2952 /* make error code available to userspace */
2953 job
->reply
->result
= rc
;
2954 job
->dd_data
= NULL
;
2958 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
2959 * @job: fc_bsg_job to handle
2962 lpfc_bsg_hst_vendor(struct fc_bsg_job
*job
)
2964 int command
= job
->request
->rqst_data
.h_vendor
.vendor_cmd
[0];
2968 case LPFC_BSG_VENDOR_SET_CT_EVENT
:
2969 rc
= lpfc_bsg_hba_set_event(job
);
2971 case LPFC_BSG_VENDOR_GET_CT_EVENT
:
2972 rc
= lpfc_bsg_hba_get_event(job
);
2974 case LPFC_BSG_VENDOR_SEND_MGMT_RESP
:
2975 rc
= lpfc_bsg_send_mgmt_rsp(job
);
2977 case LPFC_BSG_VENDOR_DIAG_MODE
:
2978 rc
= lpfc_bsg_diag_mode(job
);
2980 case LPFC_BSG_VENDOR_DIAG_TEST
:
2981 rc
= lpfc_bsg_diag_test(job
);
2983 case LPFC_BSG_VENDOR_GET_MGMT_REV
:
2984 rc
= lpfc_bsg_get_dfc_rev(job
);
2986 case LPFC_BSG_VENDOR_MBOX
:
2987 rc
= lpfc_bsg_mbox_cmd(job
);
2989 case LPFC_BSG_VENDOR_MENLO_CMD
:
2990 case LPFC_BSG_VENDOR_MENLO_DATA
:
2991 rc
= lpfc_menlo_cmd(job
);
2995 job
->reply
->reply_payload_rcv_len
= 0;
2996 /* make error code available to userspace */
2997 job
->reply
->result
= rc
;
3005 * lpfc_bsg_request - handle a bsg request from the FC transport
3006 * @job: fc_bsg_job to handle
3009 lpfc_bsg_request(struct fc_bsg_job
*job
)
3014 msgcode
= job
->request
->msgcode
;
3016 case FC_BSG_HST_VENDOR
:
3017 rc
= lpfc_bsg_hst_vendor(job
);
3019 case FC_BSG_RPT_ELS
:
3020 rc
= lpfc_bsg_rport_els(job
);
3023 rc
= lpfc_bsg_send_mgmt_cmd(job
);
3027 job
->reply
->reply_payload_rcv_len
= 0;
3028 /* make error code available to userspace */
3029 job
->reply
->result
= rc
;
3037 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
3038 * @job: fc_bsg_job that has timed out
3040 * This function just aborts the job's IOCB. The aborted IOCB will return to
3041 * the waiting function which will handle passing the error back to userspace
3044 lpfc_bsg_timeout(struct fc_bsg_job
*job
)
3046 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
3047 struct lpfc_hba
*phba
= vport
->phba
;
3048 struct lpfc_iocbq
*cmdiocb
;
3049 struct lpfc_bsg_event
*evt
;
3050 struct lpfc_bsg_iocb
*iocb
;
3051 struct lpfc_bsg_mbox
*mbox
;
3052 struct lpfc_bsg_menlo
*menlo
;
3053 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
3054 struct bsg_job_data
*dd_data
;
3055 unsigned long flags
;
3057 spin_lock_irqsave(&phba
->ct_ev_lock
, flags
);
3058 dd_data
= (struct bsg_job_data
*)job
->dd_data
;
3059 /* timeout and completion crossed paths if no dd_data */
3061 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3065 switch (dd_data
->type
) {
3067 iocb
= &dd_data
->context_un
.iocb
;
3068 cmdiocb
= iocb
->cmdiocbq
;
3069 /* hint to completion handler that the job timed out */
3070 job
->reply
->result
= -EAGAIN
;
3071 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3072 /* this will call our completion handler */
3073 spin_lock_irq(&phba
->hbalock
);
3074 lpfc_sli_issue_abort_iotag(phba
, pring
, cmdiocb
);
3075 spin_unlock_irq(&phba
->hbalock
);
3078 evt
= dd_data
->context_un
.evt
;
3079 /* this event has no job anymore */
3080 evt
->set_job
= NULL
;
3081 job
->dd_data
= NULL
;
3082 job
->reply
->reply_payload_rcv_len
= 0;
3083 /* Return -EAGAIN which is our way of signallying the
3086 job
->reply
->result
= -EAGAIN
;
3087 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3091 mbox
= &dd_data
->context_un
.mbox
;
3092 /* this mbox has no job anymore */
3093 mbox
->set_job
= NULL
;
3094 job
->dd_data
= NULL
;
3095 job
->reply
->reply_payload_rcv_len
= 0;
3096 job
->reply
->result
= -EAGAIN
;
3097 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3101 menlo
= &dd_data
->context_un
.menlo
;
3102 cmdiocb
= menlo
->cmdiocbq
;
3103 /* hint to completion handler that the job timed out */
3104 job
->reply
->result
= -EAGAIN
;
3105 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3106 /* this will call our completion handler */
3107 spin_lock_irq(&phba
->hbalock
);
3108 lpfc_sli_issue_abort_iotag(phba
, pring
, cmdiocb
);
3109 spin_unlock_irq(&phba
->hbalock
);
3112 spin_unlock_irqrestore(&phba
->ct_ev_lock
, flags
);
3116 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
3117 * otherwise an error message will be displayed on the console
3118 * so always return success (zero)