1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_host.h>
27 #include <scsi/scsi_transport_fc.h>
28 #include <scsi/scsi_bsg_fc.h>
33 #include "lpfc_sli4.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
38 #include "lpfc_logmsg.h"
39 #include "lpfc_crtn.h"
40 #include "lpfc_vport.h"
41 #include "lpfc_version.h"
44 * lpfc_bsg_rport_ct - send a CT command from a bsg request
45 * @job: fc_bsg_job to handle
48 lpfc_bsg_rport_ct(struct fc_bsg_job
*job
)
50 struct Scsi_Host
*shost
= job
->shost
;
51 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
52 struct lpfc_hba
*phba
= vport
->phba
;
53 struct lpfc_rport_data
*rdata
= job
->rport
->dd_data
;
54 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
55 struct ulp_bde64
*bpl
= NULL
;
57 struct lpfc_iocbq
*cmdiocbq
= NULL
;
58 struct lpfc_iocbq
*rspiocbq
= NULL
;
61 struct lpfc_dmabuf
*bmp
= NULL
;
64 struct scatterlist
*sgel
= NULL
;
69 /* in case no data is transferred */
70 job
->reply
->reply_payload_rcv_len
= 0;
72 if (!lpfc_nlp_get(ndlp
)) {
73 job
->reply
->result
= -ENODEV
;
77 if (ndlp
->nlp_flag
& NLP_ELS_SND_MASK
) {
82 spin_lock_irq(shost
->host_lock
);
83 cmdiocbq
= lpfc_sli_get_iocbq(phba
);
86 spin_unlock_irq(shost
->host_lock
);
89 cmd
= &cmdiocbq
->iocb
;
91 rspiocbq
= lpfc_sli_get_iocbq(phba
);
96 spin_unlock_irq(shost
->host_lock
);
98 rsp
= &rspiocbq
->iocb
;
100 bmp
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
103 spin_lock_irq(shost
->host_lock
);
107 spin_lock_irq(shost
->host_lock
);
108 bmp
->virt
= lpfc_mbuf_alloc(phba
, 0, &bmp
->phys
);
113 spin_unlock_irq(shost
->host_lock
);
115 INIT_LIST_HEAD(&bmp
->list
);
116 bpl
= (struct ulp_bde64
*) bmp
->virt
;
118 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
119 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
120 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
121 busaddr
= sg_dma_address(sgel
);
122 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
123 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
124 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
125 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
126 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
130 reply_nseg
= pci_map_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
131 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
132 for_each_sg(job
->reply_payload
.sg_list
, sgel
, reply_nseg
, numbde
) {
133 busaddr
= sg_dma_address(sgel
);
134 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
135 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
136 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
137 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
138 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
142 cmd
->un
.genreq64
.bdl
.ulpIoTag32
= 0;
143 cmd
->un
.genreq64
.bdl
.addrHigh
= putPaddrHigh(bmp
->phys
);
144 cmd
->un
.genreq64
.bdl
.addrLow
= putPaddrLow(bmp
->phys
);
145 cmd
->un
.genreq64
.bdl
.bdeFlags
= BUFF_TYPE_BLP_64
;
146 cmd
->un
.genreq64
.bdl
.bdeSize
=
147 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
148 cmd
->ulpCommand
= CMD_GEN_REQUEST64_CR
;
149 cmd
->un
.genreq64
.w5
.hcsw
.Fctl
= (SI
| LA
);
150 cmd
->un
.genreq64
.w5
.hcsw
.Dfctl
= 0;
151 cmd
->un
.genreq64
.w5
.hcsw
.Rctl
= FC_UNSOL_CTL
;
152 cmd
->un
.genreq64
.w5
.hcsw
.Type
= FC_COMMON_TRANSPORT_ULP
;
153 cmd
->ulpBdeCount
= 1;
155 cmd
->ulpClass
= CLASS3
;
156 cmd
->ulpContext
= ndlp
->nlp_rpi
;
157 cmd
->ulpOwner
= OWN_CHIP
;
158 cmdiocbq
->vport
= phba
->pport
;
159 cmdiocbq
->context1
= NULL
;
160 cmdiocbq
->context2
= NULL
;
161 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
163 timeout
= phba
->fc_ratov
* 2;
164 job
->dd_data
= cmdiocbq
;
166 rc
= lpfc_sli_issue_iocb_wait(phba
, LPFC_ELS_RING
, cmdiocbq
, rspiocbq
,
167 timeout
+ LPFC_DRVR_TIMEOUT
);
169 if (rc
!= IOCB_TIMEDOUT
) {
170 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
171 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
172 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
173 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
176 if (rc
== IOCB_TIMEDOUT
) {
177 lpfc_sli_release_iocbq(phba
, rspiocbq
);
182 if (rc
!= IOCB_SUCCESS
) {
187 if (rsp
->ulpStatus
) {
188 if (rsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
189 switch (rsp
->un
.ulpWord
[4] & 0xff) {
190 case IOERR_SEQUENCE_TIMEOUT
:
193 case IOERR_INVALID_RPI
:
203 job
->reply
->reply_payload_rcv_len
=
204 rsp
->un
.genreq64
.bdl
.bdeSize
;
207 spin_lock_irq(shost
->host_lock
);
208 lpfc_mbuf_free(phba
, bmp
->virt
, bmp
->phys
);
212 lpfc_sli_release_iocbq(phba
, rspiocbq
);
214 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
215 spin_unlock_irq(shost
->host_lock
);
219 /* make error code available to userspace */
220 job
->reply
->result
= rc
;
221 /* complete the job back to userspace */
228 * lpfc_bsg_rport_els - send an ELS command from a bsg request
229 * @job: fc_bsg_job to handle
232 lpfc_bsg_rport_els(struct fc_bsg_job
*job
)
234 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
235 struct lpfc_hba
*phba
= vport
->phba
;
236 struct lpfc_rport_data
*rdata
= job
->rport
->dd_data
;
237 struct lpfc_nodelist
*ndlp
= rdata
->pnode
;
242 struct lpfc_iocbq
*rspiocbq
;
243 struct lpfc_iocbq
*cmdiocbq
;
246 struct lpfc_dmabuf
*pcmd
;
247 struct lpfc_dmabuf
*prsp
;
248 struct lpfc_dmabuf
*pbuflist
= NULL
;
249 struct ulp_bde64
*bpl
;
253 struct scatterlist
*sgel
= NULL
;
258 /* in case no data is transferred */
259 job
->reply
->reply_payload_rcv_len
= 0;
261 if (!lpfc_nlp_get(ndlp
)) {
266 elscmd
= job
->request
->rqst_data
.r_els
.els_code
;
267 cmdsize
= job
->request_payload
.payload_len
;
268 rspsize
= job
->reply_payload
.payload_len
;
269 rspiocbq
= lpfc_sli_get_iocbq(phba
);
276 rsp
= &rspiocbq
->iocb
;
279 cmdiocbq
= lpfc_prep_els_iocb(phba
->pport
, 1, cmdsize
, 0, ndlp
,
280 ndlp
->nlp_DID
, elscmd
);
283 lpfc_sli_release_iocbq(phba
, rspiocbq
);
287 job
->dd_data
= cmdiocbq
;
288 pcmd
= (struct lpfc_dmabuf
*) cmdiocbq
->context2
;
289 prsp
= (struct lpfc_dmabuf
*) pcmd
->list
.next
;
291 lpfc_mbuf_free(phba
, pcmd
->virt
, pcmd
->phys
);
293 lpfc_mbuf_free(phba
, prsp
->virt
, prsp
->phys
);
295 cmdiocbq
->context2
= NULL
;
297 pbuflist
= (struct lpfc_dmabuf
*) cmdiocbq
->context3
;
298 bpl
= (struct ulp_bde64
*) pbuflist
->virt
;
300 request_nseg
= pci_map_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
301 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
303 for_each_sg(job
->request_payload
.sg_list
, sgel
, request_nseg
, numbde
) {
304 busaddr
= sg_dma_address(sgel
);
305 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
306 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
307 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
308 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
309 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
313 reply_nseg
= pci_map_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
314 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
315 for_each_sg(job
->reply_payload
.sg_list
, sgel
, reply_nseg
, numbde
) {
316 busaddr
= sg_dma_address(sgel
);
317 bpl
->tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64I
;
318 bpl
->tus
.f
.bdeSize
= sg_dma_len(sgel
);
319 bpl
->tus
.w
= cpu_to_le32(bpl
->tus
.w
);
320 bpl
->addrLow
= cpu_to_le32(putPaddrLow(busaddr
));
321 bpl
->addrHigh
= cpu_to_le32(putPaddrHigh(busaddr
));
325 cmdiocbq
->iocb
.un
.elsreq64
.bdl
.bdeSize
=
326 (request_nseg
+ reply_nseg
) * sizeof(struct ulp_bde64
);
327 cmdiocbq
->iocb
.ulpContext
= rpi
;
328 cmdiocbq
->iocb_flag
|= LPFC_IO_LIBDFC
;
329 cmdiocbq
->context1
= NULL
;
330 cmdiocbq
->context2
= NULL
;
332 iocb_status
= lpfc_sli_issue_iocb_wait(phba
, LPFC_ELS_RING
, cmdiocbq
,
333 rspiocbq
, (phba
->fc_ratov
* 2)
334 + LPFC_DRVR_TIMEOUT
);
336 /* release the new ndlp once the iocb completes */
338 if (iocb_status
!= IOCB_TIMEDOUT
) {
339 pci_unmap_sg(phba
->pcidev
, job
->request_payload
.sg_list
,
340 job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
341 pci_unmap_sg(phba
->pcidev
, job
->reply_payload
.sg_list
,
342 job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
345 if (iocb_status
== IOCB_SUCCESS
) {
346 if (rsp
->ulpStatus
== IOSTAT_SUCCESS
) {
347 job
->reply
->reply_payload_rcv_len
=
348 rsp
->un
.elsreq64
.bdl
.bdeSize
;
350 } else if (rsp
->ulpStatus
== IOSTAT_LS_RJT
) {
351 struct fc_bsg_ctels_reply
*els_reply
;
352 /* LS_RJT data returned in word 4 */
353 uint8_t *rjt_data
= (uint8_t *)&rsp
->un
.ulpWord
[4];
355 els_reply
= &job
->reply
->reply_data
.ctels_reply
;
356 job
->reply
->result
= 0;
357 els_reply
->status
= FC_CTELS_STATUS_REJECT
;
358 els_reply
->rjt_data
.action
= rjt_data
[0];
359 els_reply
->rjt_data
.reason_code
= rjt_data
[1];
360 els_reply
->rjt_data
.reason_explanation
= rjt_data
[2];
361 els_reply
->rjt_data
.vendor_unique
= rjt_data
[3];
367 if (iocb_status
!= IOCB_TIMEDOUT
)
368 lpfc_els_free_iocb(phba
, cmdiocbq
);
370 lpfc_sli_release_iocbq(phba
, rspiocbq
);
373 /* make error code available to userspace */
374 job
->reply
->result
= rc
;
375 /* complete the job back to userspace */
381 struct lpfc_ct_event
{
382 struct list_head node
;
384 wait_queue_head_t wq
;
386 /* Event type and waiter identifiers */
391 /* next two flags are here for the auto-delete logic */
392 unsigned long wait_time_stamp
;
395 /* seen and not seen events */
396 struct list_head events_to_get
;
397 struct list_head events_to_see
;
401 struct list_head node
;
408 static struct lpfc_ct_event
*
409 lpfc_ct_event_new(int ev_reg_id
, uint32_t ev_req_id
)
411 struct lpfc_ct_event
*evt
= kzalloc(sizeof(*evt
), GFP_KERNEL
);
415 INIT_LIST_HEAD(&evt
->events_to_get
);
416 INIT_LIST_HEAD(&evt
->events_to_see
);
417 evt
->req_id
= ev_req_id
;
418 evt
->reg_id
= ev_reg_id
;
419 evt
->wait_time_stamp
= jiffies
;
420 init_waitqueue_head(&evt
->wq
);
426 lpfc_ct_event_free(struct lpfc_ct_event
*evt
)
428 struct event_data
*ed
;
430 list_del(&evt
->node
);
432 while (!list_empty(&evt
->events_to_get
)) {
433 ed
= list_entry(evt
->events_to_get
.next
, typeof(*ed
), node
);
439 while (!list_empty(&evt
->events_to_see
)) {
440 ed
= list_entry(evt
->events_to_see
.next
, typeof(*ed
), node
);
450 lpfc_ct_event_ref(struct lpfc_ct_event
*evt
)
456 lpfc_ct_event_unref(struct lpfc_ct_event
*evt
)
459 lpfc_ct_event_free(evt
);
462 #define SLI_CT_ELX_LOOPBACK 0x10
464 enum ELX_LOOPBACK_CMD
{
465 ELX_LOOPBACK_XRI_SETUP
,
470 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
475 * This function is called when an unsolicited CT command is received. It
476 * forwards the event to any processes registerd to receive CT events.
479 lpfc_bsg_ct_unsol_event(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
480 struct lpfc_iocbq
*piocbq
)
482 uint32_t evt_req_id
= 0;
485 struct lpfc_dmabuf
*dmabuf
= NULL
;
486 struct lpfc_ct_event
*evt
;
487 struct event_data
*evt_dat
= NULL
;
488 struct lpfc_iocbq
*iocbq
;
490 struct list_head head
;
491 struct ulp_bde64
*bde
;
494 struct lpfc_dmabuf
*bdeBuf1
= piocbq
->context2
;
495 struct lpfc_dmabuf
*bdeBuf2
= piocbq
->context3
;
496 struct lpfc_hbq_entry
*hbqe
;
497 struct lpfc_sli_ct_request
*ct_req
;
499 INIT_LIST_HEAD(&head
);
500 list_add_tail(&head
, &piocbq
->list
);
502 if (piocbq
->iocb
.ulpBdeCount
== 0 ||
503 piocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
== 0)
504 goto error_ct_unsol_exit
;
506 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)
509 dma_addr
= getPaddr(piocbq
->iocb
.un
.cont64
[0].addrHigh
,
510 piocbq
->iocb
.un
.cont64
[0].addrLow
);
511 dmabuf
= lpfc_sli_ringpostbuf_get(phba
, pring
, dma_addr
);
514 ct_req
= (struct lpfc_sli_ct_request
*)dmabuf
->virt
;
515 evt_req_id
= ct_req
->FsType
;
516 cmd
= ct_req
->CommandResponse
.bits
.CmdRsp
;
517 len
= ct_req
->CommandResponse
.bits
.Size
;
518 if (!(phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
))
519 lpfc_sli_ringpostbuf_put(phba
, pring
, dmabuf
);
521 mutex_lock(&phba
->ct_event_mutex
);
522 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
523 if (evt
->req_id
!= evt_req_id
)
526 lpfc_ct_event_ref(evt
);
528 evt_dat
= kzalloc(sizeof(*evt_dat
), GFP_KERNEL
);
530 lpfc_ct_event_unref(evt
);
531 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
532 "2614 Memory allocation failed for "
537 mutex_unlock(&phba
->ct_event_mutex
);
539 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
540 /* take accumulated byte count from the last iocbq */
541 iocbq
= list_entry(head
.prev
, typeof(*iocbq
), list
);
542 evt_dat
->len
= iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
;
544 list_for_each_entry(iocbq
, &head
, list
) {
545 for (i
= 0; i
< iocbq
->iocb
.ulpBdeCount
; i
++)
547 iocbq
->iocb
.un
.cont64
[i
].tus
.f
.bdeSize
;
551 evt_dat
->data
= kzalloc(evt_dat
->len
, GFP_KERNEL
);
552 if (!evt_dat
->data
) {
553 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
554 "2615 Memory allocation failed for "
555 "CT event data, size %d\n",
558 mutex_lock(&phba
->ct_event_mutex
);
559 lpfc_ct_event_unref(evt
);
560 mutex_unlock(&phba
->ct_event_mutex
);
561 goto error_ct_unsol_exit
;
564 list_for_each_entry(iocbq
, &head
, list
) {
565 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
566 bdeBuf1
= iocbq
->context2
;
567 bdeBuf2
= iocbq
->context3
;
569 for (i
= 0; i
< iocbq
->iocb
.ulpBdeCount
; i
++) {
571 if (phba
->sli3_options
&
572 LPFC_SLI3_HBQ_ENABLED
) {
574 hbqe
= (struct lpfc_hbq_entry
*)
575 &iocbq
->iocb
.un
.ulpWord
[0];
576 size
= hbqe
->bde
.tus
.f
.bdeSize
;
579 hbqe
= (struct lpfc_hbq_entry
*)
582 size
= hbqe
->bde
.tus
.f
.bdeSize
;
585 if ((offset
+ size
) > evt_dat
->len
)
586 size
= evt_dat
->len
- offset
;
588 size
= iocbq
->iocb
.un
.cont64
[i
].
590 bde
= &iocbq
->iocb
.un
.cont64
[i
];
591 dma_addr
= getPaddr(bde
->addrHigh
,
593 dmabuf
= lpfc_sli_ringpostbuf_get(phba
,
597 lpfc_printf_log(phba
, KERN_ERR
,
598 LOG_LIBDFC
, "2616 No dmabuf "
599 "found for iocbq 0x%p\n",
601 kfree(evt_dat
->data
);
603 mutex_lock(&phba
->ct_event_mutex
);
604 lpfc_ct_event_unref(evt
);
605 mutex_unlock(&phba
->ct_event_mutex
);
606 goto error_ct_unsol_exit
;
608 memcpy((char *)(evt_dat
->data
) + offset
,
611 if (evt_req_id
!= SLI_CT_ELX_LOOPBACK
&&
612 !(phba
->sli3_options
&
613 LPFC_SLI3_HBQ_ENABLED
)) {
614 lpfc_sli_ringpostbuf_put(phba
, pring
,
618 case ELX_LOOPBACK_XRI_SETUP
:
619 if (!(phba
->sli3_options
&
620 LPFC_SLI3_HBQ_ENABLED
))
621 lpfc_post_buffer(phba
,
625 lpfc_in_buf_free(phba
,
629 if (!(phba
->sli3_options
&
630 LPFC_SLI3_HBQ_ENABLED
))
631 lpfc_post_buffer(phba
,
640 mutex_lock(&phba
->ct_event_mutex
);
641 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
642 evt_dat
->immed_dat
= phba
->ctx_idx
;
643 phba
->ctx_idx
= (phba
->ctx_idx
+ 1) % 64;
644 phba
->ct_ctx
[evt_dat
->immed_dat
].oxid
=
645 piocbq
->iocb
.ulpContext
;
646 phba
->ct_ctx
[evt_dat
->immed_dat
].SID
=
647 piocbq
->iocb
.un
.rcvels
.remoteID
;
649 evt_dat
->immed_dat
= piocbq
->iocb
.ulpContext
;
651 evt_dat
->type
= FC_REG_CT_EVENT
;
652 list_add(&evt_dat
->node
, &evt
->events_to_see
);
653 wake_up_interruptible(&evt
->wq
);
654 lpfc_ct_event_unref(evt
);
655 if (evt_req_id
== SLI_CT_ELX_LOOPBACK
)
658 mutex_unlock(&phba
->ct_event_mutex
);
661 if (!list_empty(&head
))
668 * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command
669 * @job: SET_EVENT fc_bsg_job
672 lpfc_bsg_set_event(struct fc_bsg_job
*job
)
674 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
675 struct lpfc_hba
*phba
= vport
->phba
;
676 struct set_ct_event
*event_req
;
677 struct lpfc_ct_event
*evt
;
680 if (job
->request_len
<
681 sizeof(struct fc_bsg_request
) + sizeof(struct set_ct_event
)) {
682 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
683 "2612 Received SET_CT_EVENT below minimum "
688 event_req
= (struct set_ct_event
*)
689 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
691 mutex_lock(&phba
->ct_event_mutex
);
692 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
693 if (evt
->reg_id
== event_req
->ev_reg_id
) {
694 lpfc_ct_event_ref(evt
);
695 evt
->wait_time_stamp
= jiffies
;
699 mutex_unlock(&phba
->ct_event_mutex
);
701 if (&evt
->node
== &phba
->ct_ev_waiters
) {
702 /* no event waiting struct yet - first call */
703 evt
= lpfc_ct_event_new(event_req
->ev_reg_id
,
704 event_req
->ev_req_id
);
706 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
707 "2617 Failed allocation of event "
712 mutex_lock(&phba
->ct_event_mutex
);
713 list_add(&evt
->node
, &phba
->ct_ev_waiters
);
714 lpfc_ct_event_ref(evt
);
715 mutex_unlock(&phba
->ct_event_mutex
);
719 if (wait_event_interruptible(evt
->wq
,
720 !list_empty(&evt
->events_to_see
))) {
721 mutex_lock(&phba
->ct_event_mutex
);
722 lpfc_ct_event_unref(evt
); /* release ref */
723 lpfc_ct_event_unref(evt
); /* delete */
724 mutex_unlock(&phba
->ct_event_mutex
);
729 evt
->wait_time_stamp
= jiffies
;
732 mutex_lock(&phba
->ct_event_mutex
);
733 list_move(evt
->events_to_see
.prev
, &evt
->events_to_get
);
734 lpfc_ct_event_unref(evt
); /* release ref */
735 mutex_unlock(&phba
->ct_event_mutex
);
738 /* set_event carries no reply payload */
739 job
->reply
->reply_payload_rcv_len
= 0;
740 /* make error code available to userspace */
741 job
->reply
->result
= rc
;
742 /* complete the job back to userspace */
749 * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command
750 * @job: GET_EVENT fc_bsg_job
753 lpfc_bsg_get_event(struct fc_bsg_job
*job
)
755 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
756 struct lpfc_hba
*phba
= vport
->phba
;
757 struct get_ct_event
*event_req
;
758 struct get_ct_event_reply
*event_reply
;
759 struct lpfc_ct_event
*evt
;
760 struct event_data
*evt_dat
= NULL
;
763 if (job
->request_len
<
764 sizeof(struct fc_bsg_request
) + sizeof(struct get_ct_event
)) {
765 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
766 "2613 Received GET_CT_EVENT request below "
771 event_req
= (struct get_ct_event
*)
772 job
->request
->rqst_data
.h_vendor
.vendor_cmd
;
774 event_reply
= (struct get_ct_event_reply
*)
775 job
->reply
->reply_data
.vendor_reply
.vendor_rsp
;
777 mutex_lock(&phba
->ct_event_mutex
);
778 list_for_each_entry(evt
, &phba
->ct_ev_waiters
, node
) {
779 if (evt
->reg_id
== event_req
->ev_reg_id
) {
780 if (list_empty(&evt
->events_to_get
))
782 lpfc_ct_event_ref(evt
);
783 evt
->wait_time_stamp
= jiffies
;
784 evt_dat
= list_entry(evt
->events_to_get
.prev
,
785 struct event_data
, node
);
786 list_del(&evt_dat
->node
);
790 mutex_unlock(&phba
->ct_event_mutex
);
793 job
->reply
->reply_payload_rcv_len
= 0;
795 goto error_get_event_exit
;
798 if (evt_dat
->len
> job
->reply_payload
.payload_len
) {
799 evt_dat
->len
= job
->reply_payload
.payload_len
;
800 lpfc_printf_log(phba
, KERN_WARNING
, LOG_LIBDFC
,
801 "2618 Truncated event data at %d "
803 job
->reply_payload
.payload_len
);
806 event_reply
->immed_data
= evt_dat
->immed_dat
;
808 if (evt_dat
->len
> 0)
809 job
->reply
->reply_payload_rcv_len
=
810 sg_copy_from_buffer(job
->reply_payload
.sg_list
,
811 job
->reply_payload
.sg_cnt
,
812 evt_dat
->data
, evt_dat
->len
);
814 job
->reply
->reply_payload_rcv_len
= 0;
818 kfree(evt_dat
->data
);
820 mutex_lock(&phba
->ct_event_mutex
);
821 lpfc_ct_event_unref(evt
);
822 mutex_unlock(&phba
->ct_event_mutex
);
824 error_get_event_exit
:
825 /* make error code available to userspace */
826 job
->reply
->result
= rc
;
827 /* complete the job back to userspace */
834 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
835 * @job: fc_bsg_job to handle
838 lpfc_bsg_hst_vendor(struct fc_bsg_job
*job
)
840 int command
= job
->request
->rqst_data
.h_vendor
.vendor_cmd
[0];
843 case LPFC_BSG_VENDOR_SET_CT_EVENT
:
844 return lpfc_bsg_set_event(job
);
847 case LPFC_BSG_VENDOR_GET_CT_EVENT
:
848 return lpfc_bsg_get_event(job
);
857 * lpfc_bsg_request - handle a bsg request from the FC transport
858 * @job: fc_bsg_job to handle
861 lpfc_bsg_request(struct fc_bsg_job
*job
)
866 msgcode
= job
->request
->msgcode
;
869 case FC_BSG_HST_VENDOR
:
870 rc
= lpfc_bsg_hst_vendor(job
);
873 rc
= lpfc_bsg_rport_els(job
);
876 rc
= lpfc_bsg_rport_ct(job
);
886 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
887 * @job: fc_bsg_job that has timed out
889 * This function just aborts the job's IOCB. The aborted IOCB will return to
890 * the waiting function which will handle passing the error back to userspace
893 lpfc_bsg_timeout(struct fc_bsg_job
*job
)
895 struct lpfc_vport
*vport
= (struct lpfc_vport
*)job
->shost
->hostdata
;
896 struct lpfc_hba
*phba
= vport
->phba
;
897 struct lpfc_iocbq
*cmdiocb
= (struct lpfc_iocbq
*)job
->dd_data
;
898 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
901 lpfc_sli_issue_abort_iotag(phba
, pring
, cmdiocb
);