Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / scsi / lpfc / lpfc_bsg.c
blobeed6ea5e0722e00badff05a22b401ab124c07907
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2009-2015 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * *
10 * This program is free software; you can redistribute it and/or *
11 * modify it under the terms of version 2 of the GNU General *
12 * Public License as published by the Free Software Foundation. *
13 * This program is distributed in the hope that it will be useful. *
14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
18 * TO BE LEGALLY INVALID. See the GNU General Public License for *
19 * more details, a copy of which can be found in the file COPYING *
20 * included with this package. *
21 *******************************************************************/
23 #include <linux/interrupt.h>
24 #include <linux/mempool.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/list.h>
29 #include <linux/bsg-lib.h>
30 #include <linux/vmalloc.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_host.h>
34 #include <scsi/scsi_transport_fc.h>
35 #include <scsi/scsi_bsg_fc.h>
36 #include <scsi/fc/fc_fs.h>
38 #include "lpfc_hw4.h"
39 #include "lpfc_hw.h"
40 #include "lpfc_sli.h"
41 #include "lpfc_sli4.h"
42 #include "lpfc_nl.h"
43 #include "lpfc_bsg.h"
44 #include "lpfc_disc.h"
45 #include "lpfc_scsi.h"
46 #include "lpfc.h"
47 #include "lpfc_logmsg.h"
48 #include "lpfc_crtn.h"
49 #include "lpfc_debugfs.h"
50 #include "lpfc_vport.h"
51 #include "lpfc_version.h"
53 struct lpfc_bsg_event {
54 struct list_head node;
55 struct kref kref;
56 wait_queue_head_t wq;
58 /* Event type and waiter identifiers */
59 uint32_t type_mask;
60 uint32_t req_id;
61 uint32_t reg_id;
63 /* next two flags are here for the auto-delete logic */
64 unsigned long wait_time_stamp;
65 int waiting;
67 /* seen and not seen events */
68 struct list_head events_to_get;
69 struct list_head events_to_see;
71 /* driver data associated with the job */
72 void *dd_data;
75 struct lpfc_bsg_iocb {
76 struct lpfc_iocbq *cmdiocbq;
77 struct lpfc_dmabuf *rmp;
78 struct lpfc_nodelist *ndlp;
81 struct lpfc_bsg_mbox {
82 LPFC_MBOXQ_t *pmboxq;
83 MAILBOX_t *mb;
84 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
85 uint8_t *ext; /* extended mailbox data */
86 uint32_t mbOffset; /* from app */
87 uint32_t inExtWLen; /* from app */
88 uint32_t outExtWLen; /* from app */
91 #define MENLO_DID 0x0000FC0E
93 struct lpfc_bsg_menlo {
94 struct lpfc_iocbq *cmdiocbq;
95 struct lpfc_dmabuf *rmp;
98 #define TYPE_EVT 1
99 #define TYPE_IOCB 2
100 #define TYPE_MBOX 3
101 #define TYPE_MENLO 4
102 struct bsg_job_data {
103 uint32_t type;
104 struct bsg_job *set_job; /* job waiting for this iocb to finish */
105 union {
106 struct lpfc_bsg_event *evt;
107 struct lpfc_bsg_iocb iocb;
108 struct lpfc_bsg_mbox mbox;
109 struct lpfc_bsg_menlo menlo;
110 } context_un;
113 struct event_data {
114 struct list_head node;
115 uint32_t type;
116 uint32_t immed_dat;
117 void *data;
118 uint32_t len;
121 #define BUF_SZ_4K 4096
122 #define SLI_CT_ELX_LOOPBACK 0x10
124 enum ELX_LOOPBACK_CMD {
125 ELX_LOOPBACK_XRI_SETUP,
126 ELX_LOOPBACK_DATA,
129 #define ELX_LOOPBACK_HEADER_SZ \
130 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
132 struct lpfc_dmabufext {
133 struct lpfc_dmabuf dma;
134 uint32_t size;
135 uint32_t flag;
138 static void
139 lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
141 struct lpfc_dmabuf *mlast, *next_mlast;
143 if (mlist) {
144 list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
145 list) {
146 lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
147 list_del(&mlast->list);
148 kfree(mlast);
150 lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
151 kfree(mlist);
153 return;
156 static struct lpfc_dmabuf *
157 lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
158 int outbound_buffers, struct ulp_bde64 *bpl,
159 int *bpl_entries)
161 struct lpfc_dmabuf *mlist = NULL;
162 struct lpfc_dmabuf *mp;
163 unsigned int bytes_left = size;
165 /* Verify we can support the size specified */
166 if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
167 return NULL;
169 /* Determine the number of dma buffers to allocate */
170 *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
171 size/LPFC_BPL_SIZE);
173 /* Allocate dma buffer and place in BPL passed */
174 while (bytes_left) {
175 /* Allocate dma buffer */
176 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
177 if (!mp) {
178 if (mlist)
179 lpfc_free_bsg_buffers(phba, mlist);
180 return NULL;
183 INIT_LIST_HEAD(&mp->list);
184 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
186 if (!mp->virt) {
187 kfree(mp);
188 if (mlist)
189 lpfc_free_bsg_buffers(phba, mlist);
190 return NULL;
193 /* Queue it to a linked list */
194 if (!mlist)
195 mlist = mp;
196 else
197 list_add_tail(&mp->list, &mlist->list);
199 /* Add buffer to buffer pointer list */
200 if (outbound_buffers)
201 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
202 else
203 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
204 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
205 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
206 bpl->tus.f.bdeSize = (uint16_t)
207 (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
208 bytes_left);
209 bytes_left -= bpl->tus.f.bdeSize;
210 bpl->tus.w = le32_to_cpu(bpl->tus.w);
211 bpl++;
213 return mlist;
216 static unsigned int
217 lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
218 struct bsg_buffer *bsg_buffers,
219 unsigned int bytes_to_transfer, int to_buffers)
222 struct lpfc_dmabuf *mp;
223 unsigned int transfer_bytes, bytes_copied = 0;
224 unsigned int sg_offset, dma_offset;
225 unsigned char *dma_address, *sg_address;
226 LIST_HEAD(temp_list);
227 struct sg_mapping_iter miter;
228 unsigned long flags;
229 unsigned int sg_flags = SG_MITER_ATOMIC;
230 bool sg_valid;
232 list_splice_init(&dma_buffers->list, &temp_list);
233 list_add(&dma_buffers->list, &temp_list);
234 sg_offset = 0;
235 if (to_buffers)
236 sg_flags |= SG_MITER_FROM_SG;
237 else
238 sg_flags |= SG_MITER_TO_SG;
239 sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
240 sg_flags);
241 local_irq_save(flags);
242 sg_valid = sg_miter_next(&miter);
243 list_for_each_entry(mp, &temp_list, list) {
244 dma_offset = 0;
245 while (bytes_to_transfer && sg_valid &&
246 (dma_offset < LPFC_BPL_SIZE)) {
247 dma_address = mp->virt + dma_offset;
248 if (sg_offset) {
249 /* Continue previous partial transfer of sg */
250 sg_address = miter.addr + sg_offset;
251 transfer_bytes = miter.length - sg_offset;
252 } else {
253 sg_address = miter.addr;
254 transfer_bytes = miter.length;
256 if (bytes_to_transfer < transfer_bytes)
257 transfer_bytes = bytes_to_transfer;
258 if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
259 transfer_bytes = LPFC_BPL_SIZE - dma_offset;
260 if (to_buffers)
261 memcpy(dma_address, sg_address, transfer_bytes);
262 else
263 memcpy(sg_address, dma_address, transfer_bytes);
264 dma_offset += transfer_bytes;
265 sg_offset += transfer_bytes;
266 bytes_to_transfer -= transfer_bytes;
267 bytes_copied += transfer_bytes;
268 if (sg_offset >= miter.length) {
269 sg_offset = 0;
270 sg_valid = sg_miter_next(&miter);
274 sg_miter_stop(&miter);
275 local_irq_restore(flags);
276 list_del_init(&dma_buffers->list);
277 list_splice(&temp_list, &dma_buffers->list);
278 return bytes_copied;
282 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
283 * @phba: Pointer to HBA context object.
284 * @cmdiocbq: Pointer to command iocb.
285 * @rspiocbq: Pointer to response iocb.
287 * This function is the completion handler for iocbs issued using
288 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
289 * ring event handler function without any lock held. This function
290 * can be called from both worker thread context and interrupt
291 * context. This function also can be called from another thread which
292 * cleans up the SLI layer objects.
293 * This function copies the contents of the response iocb to the
294 * response iocb memory object provided by the caller of
295 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
296 * sleeps for the iocb completion.
298 static void
299 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
300 struct lpfc_iocbq *cmdiocbq,
301 struct lpfc_iocbq *rspiocbq)
303 struct bsg_job_data *dd_data;
304 struct bsg_job *job;
305 struct fc_bsg_reply *bsg_reply;
306 IOCB_t *rsp;
307 struct lpfc_dmabuf *bmp, *cmp, *rmp;
308 struct lpfc_nodelist *ndlp;
309 struct lpfc_bsg_iocb *iocb;
310 unsigned long flags;
311 unsigned int rsp_size;
312 int rc = 0;
314 dd_data = cmdiocbq->context1;
316 /* Determine if job has been aborted */
317 spin_lock_irqsave(&phba->ct_ev_lock, flags);
318 job = dd_data->set_job;
319 if (job) {
320 bsg_reply = job->reply;
321 /* Prevent timeout handling from trying to abort job */
322 job->dd_data = NULL;
324 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
326 /* Close the timeout handler abort window */
327 spin_lock_irqsave(&phba->hbalock, flags);
328 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
329 spin_unlock_irqrestore(&phba->hbalock, flags);
331 iocb = &dd_data->context_un.iocb;
332 ndlp = iocb->cmdiocbq->context_un.ndlp;
333 rmp = iocb->rmp;
334 cmp = cmdiocbq->context2;
335 bmp = cmdiocbq->context3;
336 rsp = &rspiocbq->iocb;
338 /* Copy the completed data or set the error status */
340 if (job) {
341 if (rsp->ulpStatus) {
342 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
343 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
344 case IOERR_SEQUENCE_TIMEOUT:
345 rc = -ETIMEDOUT;
346 break;
347 case IOERR_INVALID_RPI:
348 rc = -EFAULT;
349 break;
350 default:
351 rc = -EACCES;
352 break;
354 } else {
355 rc = -EACCES;
357 } else {
358 rsp_size = rsp->un.genreq64.bdl.bdeSize;
359 bsg_reply->reply_payload_rcv_len =
360 lpfc_bsg_copy_data(rmp, &job->reply_payload,
361 rsp_size, 0);
365 lpfc_free_bsg_buffers(phba, cmp);
366 lpfc_free_bsg_buffers(phba, rmp);
367 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
368 kfree(bmp);
369 lpfc_nlp_put(ndlp);
370 lpfc_sli_release_iocbq(phba, cmdiocbq);
371 kfree(dd_data);
373 /* Complete the job if the job is still active */
375 if (job) {
376 bsg_reply->result = rc;
377 bsg_job_done(job, bsg_reply->result,
378 bsg_reply->reply_payload_rcv_len);
380 return;
384 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
385 * @job: fc_bsg_job to handle
387 static int
388 lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
390 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
391 struct lpfc_hba *phba = vport->phba;
392 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
393 struct lpfc_nodelist *ndlp = rdata->pnode;
394 struct fc_bsg_reply *bsg_reply = job->reply;
395 struct ulp_bde64 *bpl = NULL;
396 uint32_t timeout;
397 struct lpfc_iocbq *cmdiocbq = NULL;
398 IOCB_t *cmd;
399 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
400 int request_nseg;
401 int reply_nseg;
402 struct bsg_job_data *dd_data;
403 unsigned long flags;
404 uint32_t creg_val;
405 int rc = 0;
406 int iocb_stat;
408 /* in case no data is transferred */
409 bsg_reply->reply_payload_rcv_len = 0;
411 if (ndlp->nlp_flag & NLP_ELS_SND_MASK)
412 return -ENODEV;
414 /* allocate our bsg tracking structure */
415 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
416 if (!dd_data) {
417 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
418 "2733 Failed allocation of dd_data\n");
419 rc = -ENOMEM;
420 goto no_dd_data;
423 cmdiocbq = lpfc_sli_get_iocbq(phba);
424 if (!cmdiocbq) {
425 rc = -ENOMEM;
426 goto free_dd;
429 cmd = &cmdiocbq->iocb;
431 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
432 if (!bmp) {
433 rc = -ENOMEM;
434 goto free_cmdiocbq;
436 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
437 if (!bmp->virt) {
438 rc = -ENOMEM;
439 goto free_bmp;
442 INIT_LIST_HEAD(&bmp->list);
444 bpl = (struct ulp_bde64 *) bmp->virt;
445 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
446 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
447 1, bpl, &request_nseg);
448 if (!cmp) {
449 rc = -ENOMEM;
450 goto free_bmp;
452 lpfc_bsg_copy_data(cmp, &job->request_payload,
453 job->request_payload.payload_len, 1);
455 bpl += request_nseg;
456 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
457 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
458 bpl, &reply_nseg);
459 if (!rmp) {
460 rc = -ENOMEM;
461 goto free_cmp;
464 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
465 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
466 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
467 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
468 cmd->un.genreq64.bdl.bdeSize =
469 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
470 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
471 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
472 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
473 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
474 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
475 cmd->ulpBdeCount = 1;
476 cmd->ulpLe = 1;
477 cmd->ulpClass = CLASS3;
478 cmd->ulpContext = ndlp->nlp_rpi;
479 if (phba->sli_rev == LPFC_SLI_REV4)
480 cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
481 cmd->ulpOwner = OWN_CHIP;
482 cmdiocbq->vport = phba->pport;
483 cmdiocbq->context3 = bmp;
484 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
485 timeout = phba->fc_ratov * 2;
486 cmd->ulpTimeout = timeout;
488 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
489 cmdiocbq->context1 = dd_data;
490 cmdiocbq->context2 = cmp;
491 cmdiocbq->context3 = bmp;
493 dd_data->type = TYPE_IOCB;
494 dd_data->set_job = job;
495 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
496 dd_data->context_un.iocb.rmp = rmp;
497 job->dd_data = dd_data;
499 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
500 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
501 rc = -EIO ;
502 goto free_rmp;
504 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
505 writel(creg_val, phba->HCregaddr);
506 readl(phba->HCregaddr); /* flush */
509 cmdiocbq->context_un.ndlp = lpfc_nlp_get(ndlp);
510 if (!cmdiocbq->context_un.ndlp) {
511 rc = -ENODEV;
512 goto free_rmp;
515 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
516 if (iocb_stat == IOCB_SUCCESS) {
517 spin_lock_irqsave(&phba->hbalock, flags);
518 /* make sure the I/O had not been completed yet */
519 if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
520 /* open up abort window to timeout handler */
521 cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
523 spin_unlock_irqrestore(&phba->hbalock, flags);
524 return 0; /* done for now */
525 } else if (iocb_stat == IOCB_BUSY) {
526 rc = -EAGAIN;
527 } else {
528 rc = -EIO;
531 /* iocb failed so cleanup */
532 lpfc_nlp_put(ndlp);
534 free_rmp:
535 lpfc_free_bsg_buffers(phba, rmp);
536 free_cmp:
537 lpfc_free_bsg_buffers(phba, cmp);
538 free_bmp:
539 if (bmp->virt)
540 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
541 kfree(bmp);
542 free_cmdiocbq:
543 lpfc_sli_release_iocbq(phba, cmdiocbq);
544 free_dd:
545 kfree(dd_data);
546 no_dd_data:
547 /* make error code available to userspace */
548 bsg_reply->result = rc;
549 job->dd_data = NULL;
550 return rc;
554 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
555 * @phba: Pointer to HBA context object.
556 * @cmdiocbq: Pointer to command iocb.
557 * @rspiocbq: Pointer to response iocb.
559 * This function is the completion handler for iocbs issued using
560 * lpfc_bsg_rport_els_cmp function. This function is called by the
561 * ring event handler function without any lock held. This function
562 * can be called from both worker thread context and interrupt
563 * context. This function also can be called from other thread which
564 * cleans up the SLI layer objects.
565 * This function copies the contents of the response iocb to the
566 * response iocb memory object provided by the caller of
567 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
568 * sleeps for the iocb completion.
570 static void
571 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
572 struct lpfc_iocbq *cmdiocbq,
573 struct lpfc_iocbq *rspiocbq)
575 struct bsg_job_data *dd_data;
576 struct bsg_job *job;
577 struct fc_bsg_reply *bsg_reply;
578 IOCB_t *rsp;
579 struct lpfc_nodelist *ndlp;
580 struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
581 struct fc_bsg_ctels_reply *els_reply;
582 uint8_t *rjt_data;
583 unsigned long flags;
584 unsigned int rsp_size;
585 int rc = 0;
587 dd_data = cmdiocbq->context1;
588 ndlp = dd_data->context_un.iocb.ndlp;
589 cmdiocbq->context1 = ndlp;
591 /* Determine if job has been aborted */
592 spin_lock_irqsave(&phba->ct_ev_lock, flags);
593 job = dd_data->set_job;
594 if (job) {
595 bsg_reply = job->reply;
596 /* Prevent timeout handling from trying to abort job */
597 job->dd_data = NULL;
599 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
601 /* Close the timeout handler abort window */
602 spin_lock_irqsave(&phba->hbalock, flags);
603 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
604 spin_unlock_irqrestore(&phba->hbalock, flags);
606 rsp = &rspiocbq->iocb;
607 pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
608 prsp = (struct lpfc_dmabuf *)pcmd->list.next;
610 /* Copy the completed job data or determine the job status if job is
611 * still active
614 if (job) {
615 if (rsp->ulpStatus == IOSTAT_SUCCESS) {
616 rsp_size = rsp->un.elsreq64.bdl.bdeSize;
617 bsg_reply->reply_payload_rcv_len =
618 sg_copy_from_buffer(job->reply_payload.sg_list,
619 job->reply_payload.sg_cnt,
620 prsp->virt,
621 rsp_size);
622 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
623 bsg_reply->reply_payload_rcv_len =
624 sizeof(struct fc_bsg_ctels_reply);
625 /* LS_RJT data returned in word 4 */
626 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
627 els_reply = &bsg_reply->reply_data.ctels_reply;
628 els_reply->status = FC_CTELS_STATUS_REJECT;
629 els_reply->rjt_data.action = rjt_data[3];
630 els_reply->rjt_data.reason_code = rjt_data[2];
631 els_reply->rjt_data.reason_explanation = rjt_data[1];
632 els_reply->rjt_data.vendor_unique = rjt_data[0];
633 } else {
634 rc = -EIO;
638 lpfc_els_free_iocb(phba, cmdiocbq);
640 lpfc_nlp_put(ndlp);
641 kfree(dd_data);
643 /* Complete the job if the job is still active */
645 if (job) {
646 bsg_reply->result = rc;
647 bsg_job_done(job, bsg_reply->result,
648 bsg_reply->reply_payload_rcv_len);
650 return;
654 * lpfc_bsg_rport_els - send an ELS command from a bsg request
655 * @job: fc_bsg_job to handle
657 static int
658 lpfc_bsg_rport_els(struct bsg_job *job)
660 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
661 struct lpfc_hba *phba = vport->phba;
662 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
663 struct lpfc_nodelist *ndlp = rdata->pnode;
664 struct fc_bsg_request *bsg_request = job->request;
665 struct fc_bsg_reply *bsg_reply = job->reply;
666 uint32_t elscmd;
667 uint32_t cmdsize;
668 struct lpfc_iocbq *cmdiocbq;
669 uint16_t rpi = 0;
670 struct bsg_job_data *dd_data;
671 unsigned long flags;
672 uint32_t creg_val;
673 int rc = 0;
675 /* in case no data is transferred */
676 bsg_reply->reply_payload_rcv_len = 0;
678 /* verify the els command is not greater than the
679 * maximum ELS transfer size.
682 if (job->request_payload.payload_len > FCELSSIZE) {
683 rc = -EINVAL;
684 goto no_dd_data;
687 /* allocate our bsg tracking structure */
688 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
689 if (!dd_data) {
690 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
691 "2735 Failed allocation of dd_data\n");
692 rc = -ENOMEM;
693 goto no_dd_data;
696 elscmd = bsg_request->rqst_data.r_els.els_code;
697 cmdsize = job->request_payload.payload_len;
699 if (!lpfc_nlp_get(ndlp)) {
700 rc = -ENODEV;
701 goto free_dd_data;
704 /* We will use the allocated dma buffers by prep els iocb for command
705 * and response to ensure if the job times out and the request is freed,
706 * we won't be dma into memory that is no longer allocated to for the
707 * request.
710 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
711 ndlp->nlp_DID, elscmd);
712 if (!cmdiocbq) {
713 rc = -EIO;
714 goto release_ndlp;
717 /* Transfer the request payload to allocated command dma buffer */
718 sg_copy_to_buffer(job->request_payload.sg_list,
719 job->request_payload.sg_cnt,
720 ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
721 cmdsize);
723 rpi = ndlp->nlp_rpi;
725 if (phba->sli_rev == LPFC_SLI_REV4)
726 cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
727 else
728 cmdiocbq->iocb.ulpContext = rpi;
729 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
730 cmdiocbq->context1 = dd_data;
731 cmdiocbq->context_un.ndlp = ndlp;
732 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
733 dd_data->type = TYPE_IOCB;
734 dd_data->set_job = job;
735 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
736 dd_data->context_un.iocb.ndlp = ndlp;
737 dd_data->context_un.iocb.rmp = NULL;
738 job->dd_data = dd_data;
740 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
741 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
742 rc = -EIO;
743 goto linkdown_err;
745 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
746 writel(creg_val, phba->HCregaddr);
747 readl(phba->HCregaddr); /* flush */
750 cmdiocbq->context1 = lpfc_nlp_get(ndlp);
751 if (!cmdiocbq->context1) {
752 rc = -EIO;
753 goto linkdown_err;
756 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
757 if (rc == IOCB_SUCCESS) {
758 spin_lock_irqsave(&phba->hbalock, flags);
759 /* make sure the I/O had not been completed/released */
760 if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
761 /* open up abort window to timeout handler */
762 cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
764 spin_unlock_irqrestore(&phba->hbalock, flags);
765 return 0; /* done for now */
766 } else if (rc == IOCB_BUSY) {
767 rc = -EAGAIN;
768 } else {
769 rc = -EIO;
772 /* I/O issue failed. Cleanup resources. */
774 linkdown_err:
775 lpfc_els_free_iocb(phba, cmdiocbq);
777 release_ndlp:
778 lpfc_nlp_put(ndlp);
780 free_dd_data:
781 kfree(dd_data);
783 no_dd_data:
784 /* make error code available to userspace */
785 bsg_reply->result = rc;
786 job->dd_data = NULL;
787 return rc;
791 * lpfc_bsg_event_free - frees an allocated event structure
792 * @kref: Pointer to a kref.
794 * Called from kref_put. Back cast the kref into an event structure address.
795 * Free any events to get, delete associated nodes, free any events to see,
796 * free any data then free the event itself.
798 static void
799 lpfc_bsg_event_free(struct kref *kref)
801 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
802 kref);
803 struct event_data *ed;
805 list_del(&evt->node);
807 while (!list_empty(&evt->events_to_get)) {
808 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
809 list_del(&ed->node);
810 kfree(ed->data);
811 kfree(ed);
814 while (!list_empty(&evt->events_to_see)) {
815 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
816 list_del(&ed->node);
817 kfree(ed->data);
818 kfree(ed);
821 kfree(evt->dd_data);
822 kfree(evt);
826 * lpfc_bsg_event_ref - increments the kref for an event
827 * @evt: Pointer to an event structure.
829 static inline void
830 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
832 kref_get(&evt->kref);
836 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
837 * @evt: Pointer to an event structure.
839 static inline void
840 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
842 kref_put(&evt->kref, lpfc_bsg_event_free);
846 * lpfc_bsg_event_new - allocate and initialize a event structure
847 * @ev_mask: Mask of events.
848 * @ev_reg_id: Event reg id.
849 * @ev_req_id: Event request id.
851 static struct lpfc_bsg_event *
852 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
854 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
856 if (!evt)
857 return NULL;
859 INIT_LIST_HEAD(&evt->events_to_get);
860 INIT_LIST_HEAD(&evt->events_to_see);
861 evt->type_mask = ev_mask;
862 evt->req_id = ev_req_id;
863 evt->reg_id = ev_reg_id;
864 evt->wait_time_stamp = jiffies;
865 evt->dd_data = NULL;
866 init_waitqueue_head(&evt->wq);
867 kref_init(&evt->kref);
868 return evt;
872 * diag_cmd_data_free - Frees an lpfc dma buffer extension
873 * @phba: Pointer to HBA context object.
874 * @mlist: Pointer to an lpfc dma buffer extension.
876 static int
877 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
879 struct lpfc_dmabufext *mlast;
880 struct pci_dev *pcidev;
881 struct list_head head, *curr, *next;
883 if ((!mlist) || (!lpfc_is_link_up(phba) &&
884 (phba->link_flag & LS_LOOPBACK_MODE))) {
885 return 0;
888 pcidev = phba->pcidev;
889 list_add_tail(&head, &mlist->dma.list);
891 list_for_each_safe(curr, next, &head) {
892 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
893 if (mlast->dma.virt)
894 dma_free_coherent(&pcidev->dev,
895 mlast->size,
896 mlast->dma.virt,
897 mlast->dma.phys);
898 kfree(mlast);
900 return 0;
904 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
906 * This function is called when an unsolicited CT command is received. It
907 * forwards the event to any processes registered to receive CT events.
910 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
911 struct lpfc_iocbq *piocbq)
913 uint32_t evt_req_id = 0;
914 uint32_t cmd;
915 struct lpfc_dmabuf *dmabuf = NULL;
916 struct lpfc_bsg_event *evt;
917 struct event_data *evt_dat = NULL;
918 struct lpfc_iocbq *iocbq;
919 size_t offset = 0;
920 struct list_head head;
921 struct ulp_bde64 *bde;
922 dma_addr_t dma_addr;
923 int i;
924 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
925 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
926 struct lpfc_hbq_entry *hbqe;
927 struct lpfc_sli_ct_request *ct_req;
928 struct bsg_job *job = NULL;
929 struct fc_bsg_reply *bsg_reply;
930 struct bsg_job_data *dd_data = NULL;
931 unsigned long flags;
932 int size = 0;
934 INIT_LIST_HEAD(&head);
935 list_add_tail(&head, &piocbq->list);
937 ct_req = (struct lpfc_sli_ct_request *)bdeBuf1;
938 evt_req_id = ct_req->FsType;
939 cmd = ct_req->CommandResponse.bits.CmdRsp;
941 spin_lock_irqsave(&phba->ct_ev_lock, flags);
942 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
943 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
944 evt->req_id != evt_req_id)
945 continue;
947 lpfc_bsg_event_ref(evt);
948 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
949 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
950 if (evt_dat == NULL) {
951 spin_lock_irqsave(&phba->ct_ev_lock, flags);
952 lpfc_bsg_event_unref(evt);
953 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
954 "2614 Memory allocation failed for "
955 "CT event\n");
956 break;
959 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
960 /* take accumulated byte count from the last iocbq */
961 iocbq = list_entry(head.prev, typeof(*iocbq), list);
962 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
963 } else {
964 list_for_each_entry(iocbq, &head, list) {
965 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
966 evt_dat->len +=
967 iocbq->iocb.un.cont64[i].tus.f.bdeSize;
971 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
972 if (evt_dat->data == NULL) {
973 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
974 "2615 Memory allocation failed for "
975 "CT event data, size %d\n",
976 evt_dat->len);
977 kfree(evt_dat);
978 spin_lock_irqsave(&phba->ct_ev_lock, flags);
979 lpfc_bsg_event_unref(evt);
980 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
981 goto error_ct_unsol_exit;
984 list_for_each_entry(iocbq, &head, list) {
985 size = 0;
986 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
987 bdeBuf1 = iocbq->context2;
988 bdeBuf2 = iocbq->context3;
990 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
991 if (phba->sli3_options &
992 LPFC_SLI3_HBQ_ENABLED) {
993 if (i == 0) {
994 hbqe = (struct lpfc_hbq_entry *)
995 &iocbq->iocb.un.ulpWord[0];
996 size = hbqe->bde.tus.f.bdeSize;
997 dmabuf = bdeBuf1;
998 } else if (i == 1) {
999 hbqe = (struct lpfc_hbq_entry *)
1000 &iocbq->iocb.unsli3.
1001 sli3Words[4];
1002 size = hbqe->bde.tus.f.bdeSize;
1003 dmabuf = bdeBuf2;
1005 if ((offset + size) > evt_dat->len)
1006 size = evt_dat->len - offset;
1007 } else {
1008 size = iocbq->iocb.un.cont64[i].
1009 tus.f.bdeSize;
1010 bde = &iocbq->iocb.un.cont64[i];
1011 dma_addr = getPaddr(bde->addrHigh,
1012 bde->addrLow);
1013 dmabuf = lpfc_sli_ringpostbuf_get(phba,
1014 pring, dma_addr);
1016 if (!dmabuf) {
1017 lpfc_printf_log(phba, KERN_ERR,
1018 LOG_LIBDFC, "2616 No dmabuf "
1019 "found for iocbq x%px\n",
1020 iocbq);
1021 kfree(evt_dat->data);
1022 kfree(evt_dat);
1023 spin_lock_irqsave(&phba->ct_ev_lock,
1024 flags);
1025 lpfc_bsg_event_unref(evt);
1026 spin_unlock_irqrestore(
1027 &phba->ct_ev_lock, flags);
1028 goto error_ct_unsol_exit;
1030 memcpy((char *)(evt_dat->data) + offset,
1031 dmabuf->virt, size);
1032 offset += size;
1033 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
1034 !(phba->sli3_options &
1035 LPFC_SLI3_HBQ_ENABLED)) {
1036 lpfc_sli_ringpostbuf_put(phba, pring,
1037 dmabuf);
1038 } else {
1039 switch (cmd) {
1040 case ELX_LOOPBACK_DATA:
1041 if (phba->sli_rev <
1042 LPFC_SLI_REV4)
1043 diag_cmd_data_free(phba,
1044 (struct lpfc_dmabufext
1045 *)dmabuf);
1046 break;
1047 case ELX_LOOPBACK_XRI_SETUP:
1048 if ((phba->sli_rev ==
1049 LPFC_SLI_REV2) ||
1050 (phba->sli3_options &
1051 LPFC_SLI3_HBQ_ENABLED
1052 )) {
1053 lpfc_in_buf_free(phba,
1054 dmabuf);
1055 } else {
1056 lpfc_post_buffer(phba,
1057 pring,
1060 break;
1061 default:
1062 if (!(phba->sli3_options &
1063 LPFC_SLI3_HBQ_ENABLED))
1064 lpfc_post_buffer(phba,
1065 pring,
1067 break;
1073 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1074 if (phba->sli_rev == LPFC_SLI_REV4) {
1075 evt_dat->immed_dat = phba->ctx_idx;
1076 phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
1077 /* Provide warning for over-run of the ct_ctx array */
1078 if (phba->ct_ctx[evt_dat->immed_dat].valid ==
1079 UNSOL_VALID)
1080 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1081 "2717 CT context array entry "
1082 "[%d] over-run: oxid:x%x, "
1083 "sid:x%x\n", phba->ctx_idx,
1084 phba->ct_ctx[
1085 evt_dat->immed_dat].oxid,
1086 phba->ct_ctx[
1087 evt_dat->immed_dat].SID);
1088 phba->ct_ctx[evt_dat->immed_dat].rxid =
1089 piocbq->iocb.ulpContext;
1090 phba->ct_ctx[evt_dat->immed_dat].oxid =
1091 piocbq->iocb.unsli3.rcvsli3.ox_id;
1092 phba->ct_ctx[evt_dat->immed_dat].SID =
1093 piocbq->iocb.un.rcvels.remoteID;
1094 phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
1095 } else
1096 evt_dat->immed_dat = piocbq->iocb.ulpContext;
1098 evt_dat->type = FC_REG_CT_EVENT;
1099 list_add(&evt_dat->node, &evt->events_to_see);
1100 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
1101 wake_up_interruptible(&evt->wq);
1102 lpfc_bsg_event_unref(evt);
1103 break;
1106 list_move(evt->events_to_see.prev, &evt->events_to_get);
1108 dd_data = (struct bsg_job_data *)evt->dd_data;
1109 job = dd_data->set_job;
1110 dd_data->set_job = NULL;
1111 lpfc_bsg_event_unref(evt);
1112 if (job) {
1113 bsg_reply = job->reply;
1114 bsg_reply->reply_payload_rcv_len = size;
1115 /* make error code available to userspace */
1116 bsg_reply->result = 0;
1117 job->dd_data = NULL;
1118 /* complete the job back to userspace */
1119 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1120 bsg_job_done(job, bsg_reply->result,
1121 bsg_reply->reply_payload_rcv_len);
1122 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1125 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1127 error_ct_unsol_exit:
1128 if (!list_empty(&head))
1129 list_del(&head);
1130 if ((phba->sli_rev < LPFC_SLI_REV4) &&
1131 (evt_req_id == SLI_CT_ELX_LOOPBACK))
1132 return 0;
1133 return 1;
1137 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
1138 * @phba: Pointer to HBA context object.
1139 * @dmabuf: pointer to a dmabuf that describes the FC sequence
1141 * This function handles abort to the CT command toward management plane
1142 * for SLI4 port.
1144 * If the pending context of a CT command to management plane present, clears
1145 * such context and returns 1 for handled; otherwise, it returns 0 indicating
1146 * no context exists.
1149 lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
1151 struct fc_frame_header fc_hdr;
1152 struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
1153 int ctx_idx, handled = 0;
1154 uint16_t oxid, rxid;
1155 uint32_t sid;
1157 memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
1158 sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
1159 oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
1160 rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
1162 for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
1163 if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
1164 continue;
1165 if (phba->ct_ctx[ctx_idx].rxid != rxid)
1166 continue;
1167 if (phba->ct_ctx[ctx_idx].oxid != oxid)
1168 continue;
1169 if (phba->ct_ctx[ctx_idx].SID != sid)
1170 continue;
1171 phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
1172 handled = 1;
1174 return handled;
1178 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1179 * @job: SET_EVENT fc_bsg_job
1181 static int
1182 lpfc_bsg_hba_set_event(struct bsg_job *job)
1184 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1185 struct lpfc_hba *phba = vport->phba;
1186 struct fc_bsg_request *bsg_request = job->request;
1187 struct set_ct_event *event_req;
1188 struct lpfc_bsg_event *evt;
1189 int rc = 0;
1190 struct bsg_job_data *dd_data = NULL;
1191 uint32_t ev_mask;
1192 unsigned long flags;
1194 if (job->request_len <
1195 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1196 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1197 "2612 Received SET_CT_EVENT below minimum "
1198 "size\n");
1199 rc = -EINVAL;
1200 goto job_error;
1203 event_req = (struct set_ct_event *)
1204 bsg_request->rqst_data.h_vendor.vendor_cmd;
1205 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1206 FC_REG_EVENT_MASK);
1207 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1208 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1209 if (evt->reg_id == event_req->ev_reg_id) {
1210 lpfc_bsg_event_ref(evt);
1211 evt->wait_time_stamp = jiffies;
1212 dd_data = (struct bsg_job_data *)evt->dd_data;
1213 break;
1216 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1218 if (&evt->node == &phba->ct_ev_waiters) {
1219 /* no event waiting struct yet - first call */
1220 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1221 if (dd_data == NULL) {
1222 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1223 "2734 Failed allocation of dd_data\n");
1224 rc = -ENOMEM;
1225 goto job_error;
1227 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1228 event_req->ev_req_id);
1229 if (!evt) {
1230 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1231 "2617 Failed allocation of event "
1232 "waiter\n");
1233 rc = -ENOMEM;
1234 goto job_error;
1236 dd_data->type = TYPE_EVT;
1237 dd_data->set_job = NULL;
1238 dd_data->context_un.evt = evt;
1239 evt->dd_data = (void *)dd_data;
1240 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1241 list_add(&evt->node, &phba->ct_ev_waiters);
1242 lpfc_bsg_event_ref(evt);
1243 evt->wait_time_stamp = jiffies;
1244 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1247 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1248 evt->waiting = 1;
1249 dd_data->set_job = job; /* for unsolicited command */
1250 job->dd_data = dd_data; /* for fc transport timeout callback*/
1251 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1252 return 0; /* call job done later */
1254 job_error:
1255 kfree(dd_data);
1256 job->dd_data = NULL;
1257 return rc;
1261 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1262 * @job: GET_EVENT fc_bsg_job
1264 static int
1265 lpfc_bsg_hba_get_event(struct bsg_job *job)
1267 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1268 struct lpfc_hba *phba = vport->phba;
1269 struct fc_bsg_request *bsg_request = job->request;
1270 struct fc_bsg_reply *bsg_reply = job->reply;
1271 struct get_ct_event *event_req;
1272 struct get_ct_event_reply *event_reply;
1273 struct lpfc_bsg_event *evt, *evt_next;
1274 struct event_data *evt_dat = NULL;
1275 unsigned long flags;
1276 uint32_t rc = 0;
1278 if (job->request_len <
1279 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1280 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1281 "2613 Received GET_CT_EVENT request below "
1282 "minimum size\n");
1283 rc = -EINVAL;
1284 goto job_error;
1287 event_req = (struct get_ct_event *)
1288 bsg_request->rqst_data.h_vendor.vendor_cmd;
1290 event_reply = (struct get_ct_event_reply *)
1291 bsg_reply->reply_data.vendor_reply.vendor_rsp;
1292 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1293 list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
1294 if (evt->reg_id == event_req->ev_reg_id) {
1295 if (list_empty(&evt->events_to_get))
1296 break;
1297 lpfc_bsg_event_ref(evt);
1298 evt->wait_time_stamp = jiffies;
1299 evt_dat = list_entry(evt->events_to_get.prev,
1300 struct event_data, node);
1301 list_del(&evt_dat->node);
1302 break;
1305 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1307 /* The app may continue to ask for event data until it gets
1308 * an error indicating that there isn't anymore
1310 if (evt_dat == NULL) {
1311 bsg_reply->reply_payload_rcv_len = 0;
1312 rc = -ENOENT;
1313 goto job_error;
1316 if (evt_dat->len > job->request_payload.payload_len) {
1317 evt_dat->len = job->request_payload.payload_len;
1318 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1319 "2618 Truncated event data at %d "
1320 "bytes\n",
1321 job->request_payload.payload_len);
1324 event_reply->type = evt_dat->type;
1325 event_reply->immed_data = evt_dat->immed_dat;
1326 if (evt_dat->len > 0)
1327 bsg_reply->reply_payload_rcv_len =
1328 sg_copy_from_buffer(job->request_payload.sg_list,
1329 job->request_payload.sg_cnt,
1330 evt_dat->data, evt_dat->len);
1331 else
1332 bsg_reply->reply_payload_rcv_len = 0;
1334 if (evt_dat) {
1335 kfree(evt_dat->data);
1336 kfree(evt_dat);
1339 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1340 lpfc_bsg_event_unref(evt);
1341 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1342 job->dd_data = NULL;
1343 bsg_reply->result = 0;
1344 bsg_job_done(job, bsg_reply->result,
1345 bsg_reply->reply_payload_rcv_len);
1346 return 0;
1348 job_error:
1349 job->dd_data = NULL;
1350 bsg_reply->result = rc;
1351 return rc;
1355 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1356 * @phba: Pointer to HBA context object.
1357 * @cmdiocbq: Pointer to command iocb.
1358 * @rspiocbq: Pointer to response iocb.
1360 * This function is the completion handler for iocbs issued using
1361 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1362 * ring event handler function without any lock held. This function
1363 * can be called from both worker thread context and interrupt
1364 * context. This function also can be called from other thread which
1365 * cleans up the SLI layer objects.
1366 * This function copy the contents of the response iocb to the
1367 * response iocb memory object provided by the caller of
1368 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1369 * sleeps for the iocb completion.
1371 static void
1372 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1373 struct lpfc_iocbq *cmdiocbq,
1374 struct lpfc_iocbq *rspiocbq)
1376 struct bsg_job_data *dd_data;
1377 struct bsg_job *job;
1378 struct fc_bsg_reply *bsg_reply;
1379 IOCB_t *rsp;
1380 struct lpfc_dmabuf *bmp, *cmp;
1381 struct lpfc_nodelist *ndlp;
1382 unsigned long flags;
1383 int rc = 0;
1385 dd_data = cmdiocbq->context1;
1387 /* Determine if job has been aborted */
1388 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1389 job = dd_data->set_job;
1390 if (job) {
1391 /* Prevent timeout handling from trying to abort job */
1392 job->dd_data = NULL;
1394 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1396 /* Close the timeout handler abort window */
1397 spin_lock_irqsave(&phba->hbalock, flags);
1398 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
1399 spin_unlock_irqrestore(&phba->hbalock, flags);
1401 ndlp = dd_data->context_un.iocb.ndlp;
1402 cmp = cmdiocbq->context2;
1403 bmp = cmdiocbq->context3;
1404 rsp = &rspiocbq->iocb;
1406 /* Copy the completed job data or set the error status */
1408 if (job) {
1409 bsg_reply = job->reply;
1410 if (rsp->ulpStatus) {
1411 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1412 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
1413 case IOERR_SEQUENCE_TIMEOUT:
1414 rc = -ETIMEDOUT;
1415 break;
1416 case IOERR_INVALID_RPI:
1417 rc = -EFAULT;
1418 break;
1419 default:
1420 rc = -EACCES;
1421 break;
1423 } else {
1424 rc = -EACCES;
1426 } else {
1427 bsg_reply->reply_payload_rcv_len = 0;
1431 lpfc_free_bsg_buffers(phba, cmp);
1432 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1433 kfree(bmp);
1434 lpfc_sli_release_iocbq(phba, cmdiocbq);
1435 lpfc_nlp_put(ndlp);
1436 kfree(dd_data);
1438 /* Complete the job if the job is still active */
1440 if (job) {
1441 bsg_reply->result = rc;
1442 bsg_job_done(job, bsg_reply->result,
1443 bsg_reply->reply_payload_rcv_len);
1445 return;
1449 * lpfc_issue_ct_rsp - issue a ct response
1450 * @phba: Pointer to HBA context object.
1451 * @job: Pointer to the job object.
1452 * @tag: tag index value into the ports context exchange array.
1453 * @cmp: Pointer to a cmp dma buffer descriptor.
1454 * @bmp: Pointer to a bmp dma buffer descriptor.
1455 * @num_entry: Number of enties in the bde.
1457 static int
1458 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
1459 struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
1460 int num_entry)
1462 IOCB_t *icmd;
1463 struct lpfc_iocbq *ctiocb = NULL;
1464 int rc = 0;
1465 struct lpfc_nodelist *ndlp = NULL;
1466 struct bsg_job_data *dd_data;
1467 unsigned long flags;
1468 uint32_t creg_val;
1470 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1471 if (!ndlp) {
1472 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1473 "2721 ndlp null for oxid %x SID %x\n",
1474 phba->ct_ctx[tag].rxid,
1475 phba->ct_ctx[tag].SID);
1476 return IOCB_ERROR;
1479 /* allocate our bsg tracking structure */
1480 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1481 if (!dd_data) {
1482 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1483 "2736 Failed allocation of dd_data\n");
1484 rc = -ENOMEM;
1485 goto no_dd_data;
1488 /* Allocate buffer for command iocb */
1489 ctiocb = lpfc_sli_get_iocbq(phba);
1490 if (!ctiocb) {
1491 rc = -ENOMEM;
1492 goto no_ctiocb;
1495 icmd = &ctiocb->iocb;
1496 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1497 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1498 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1499 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1500 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1501 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1502 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1503 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1504 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1506 /* Fill in rest of iocb */
1507 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1508 icmd->ulpBdeCount = 1;
1509 icmd->ulpLe = 1;
1510 icmd->ulpClass = CLASS3;
1511 if (phba->sli_rev == LPFC_SLI_REV4) {
1512 /* Do not issue unsol response if oxid not marked as valid */
1513 if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
1514 rc = IOCB_ERROR;
1515 goto issue_ct_rsp_exit;
1517 icmd->ulpContext = phba->ct_ctx[tag].rxid;
1518 icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
1519 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1520 if (!ndlp) {
1521 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1522 "2721 ndlp null for oxid %x SID %x\n",
1523 icmd->ulpContext,
1524 phba->ct_ctx[tag].SID);
1525 rc = IOCB_ERROR;
1526 goto issue_ct_rsp_exit;
1529 /* get a refernece count so the ndlp doesn't go away while
1530 * we respond
1532 if (!lpfc_nlp_get(ndlp)) {
1533 rc = IOCB_ERROR;
1534 goto issue_ct_rsp_exit;
1537 icmd->un.ulpWord[3] =
1538 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1540 /* The exchange is done, mark the entry as invalid */
1541 phba->ct_ctx[tag].valid = UNSOL_INVALID;
1542 } else
1543 icmd->ulpContext = (ushort) tag;
1545 icmd->ulpTimeout = phba->fc_ratov * 2;
1547 /* Xmit CT response on exchange <xid> */
1548 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1549 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1550 icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
1552 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1553 ctiocb->vport = phba->pport;
1554 ctiocb->context1 = dd_data;
1555 ctiocb->context2 = cmp;
1556 ctiocb->context3 = bmp;
1557 ctiocb->context_un.ndlp = ndlp;
1558 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1560 dd_data->type = TYPE_IOCB;
1561 dd_data->set_job = job;
1562 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1563 dd_data->context_un.iocb.ndlp = lpfc_nlp_get(ndlp);
1564 if (!dd_data->context_un.iocb.ndlp) {
1565 rc = -IOCB_ERROR;
1566 goto issue_ct_rsp_exit;
1568 dd_data->context_un.iocb.rmp = NULL;
1569 job->dd_data = dd_data;
1571 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1572 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1573 rc = -IOCB_ERROR;
1574 goto issue_ct_rsp_exit;
1576 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1577 writel(creg_val, phba->HCregaddr);
1578 readl(phba->HCregaddr); /* flush */
1581 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1582 if (rc == IOCB_SUCCESS) {
1583 spin_lock_irqsave(&phba->hbalock, flags);
1584 /* make sure the I/O had not been completed/released */
1585 if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
1586 /* open up abort window to timeout handler */
1587 ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
1589 spin_unlock_irqrestore(&phba->hbalock, flags);
1590 return 0; /* done for now */
1593 /* iocb failed so cleanup */
1594 job->dd_data = NULL;
1595 lpfc_nlp_put(ndlp);
1597 issue_ct_rsp_exit:
1598 lpfc_sli_release_iocbq(phba, ctiocb);
1599 no_ctiocb:
1600 kfree(dd_data);
1601 no_dd_data:
1602 return rc;
1606 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1607 * @job: SEND_MGMT_RESP fc_bsg_job
1609 static int
1610 lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
1612 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1613 struct lpfc_hba *phba = vport->phba;
1614 struct fc_bsg_request *bsg_request = job->request;
1615 struct fc_bsg_reply *bsg_reply = job->reply;
1616 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1617 bsg_request->rqst_data.h_vendor.vendor_cmd;
1618 struct ulp_bde64 *bpl;
1619 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
1620 int bpl_entries;
1621 uint32_t tag = mgmt_resp->tag;
1622 unsigned long reqbfrcnt =
1623 (unsigned long)job->request_payload.payload_len;
1624 int rc = 0;
1626 /* in case no data is transferred */
1627 bsg_reply->reply_payload_rcv_len = 0;
1629 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1630 rc = -ERANGE;
1631 goto send_mgmt_rsp_exit;
1634 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1635 if (!bmp) {
1636 rc = -ENOMEM;
1637 goto send_mgmt_rsp_exit;
1640 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1641 if (!bmp->virt) {
1642 rc = -ENOMEM;
1643 goto send_mgmt_rsp_free_bmp;
1646 INIT_LIST_HEAD(&bmp->list);
1647 bpl = (struct ulp_bde64 *) bmp->virt;
1648 bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
1649 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
1650 1, bpl, &bpl_entries);
1651 if (!cmp) {
1652 rc = -ENOMEM;
1653 goto send_mgmt_rsp_free_bmp;
1655 lpfc_bsg_copy_data(cmp, &job->request_payload,
1656 job->request_payload.payload_len, 1);
1658 rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
1660 if (rc == IOCB_SUCCESS)
1661 return 0; /* done for now */
1663 rc = -EACCES;
1665 lpfc_free_bsg_buffers(phba, cmp);
1667 send_mgmt_rsp_free_bmp:
1668 if (bmp->virt)
1669 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1670 kfree(bmp);
1671 send_mgmt_rsp_exit:
1672 /* make error code available to userspace */
1673 bsg_reply->result = rc;
1674 job->dd_data = NULL;
1675 return rc;
1679 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1680 * @phba: Pointer to HBA context object.
1682 * This function is responsible for preparing driver for diag loopback
1683 * on device.
1685 static int
1686 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1688 struct lpfc_vport **vports;
1689 struct Scsi_Host *shost;
1690 struct lpfc_sli *psli;
1691 struct lpfc_queue *qp = NULL;
1692 struct lpfc_sli_ring *pring;
1693 int i = 0;
1695 psli = &phba->sli;
1696 if (!psli)
1697 return -ENODEV;
1700 if ((phba->link_state == LPFC_HBA_ERROR) ||
1701 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1702 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1703 return -EACCES;
1705 vports = lpfc_create_vport_work_array(phba);
1706 if (vports) {
1707 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1708 shost = lpfc_shost_from_vport(vports[i]);
1709 scsi_block_requests(shost);
1711 lpfc_destroy_vport_work_array(phba, vports);
1712 } else {
1713 shost = lpfc_shost_from_vport(phba->pport);
1714 scsi_block_requests(shost);
1717 if (phba->sli_rev != LPFC_SLI_REV4) {
1718 pring = &psli->sli3_ring[LPFC_FCP_RING];
1719 lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock);
1720 return 0;
1722 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1723 pring = qp->pring;
1724 if (!pring || (pring->ringno != LPFC_FCP_RING))
1725 continue;
1726 if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1727 &pring->ring_lock))
1728 break;
1730 return 0;
1734 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1735 * @phba: Pointer to HBA context object.
1737 * This function is responsible for driver exit processing of setting up
1738 * diag loopback mode on device.
1740 static void
1741 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1743 struct Scsi_Host *shost;
1744 struct lpfc_vport **vports;
1745 int i;
1747 vports = lpfc_create_vport_work_array(phba);
1748 if (vports) {
1749 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1750 shost = lpfc_shost_from_vport(vports[i]);
1751 scsi_unblock_requests(shost);
1753 lpfc_destroy_vport_work_array(phba, vports);
1754 } else {
1755 shost = lpfc_shost_from_vport(phba->pport);
1756 scsi_unblock_requests(shost);
1758 return;
1762 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1763 * @phba: Pointer to HBA context object.
1764 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1766 * This function is responsible for placing an sli3 port into diagnostic
1767 * loopback mode in order to perform a diagnostic loopback test.
1768 * All new scsi requests are blocked, a small delay is used to allow the
1769 * scsi requests to complete then the link is brought down. If the link is
1770 * is placed in loopback mode then scsi requests are again allowed
1771 * so the scsi mid-layer doesn't give up on the port.
1772 * All of this is done in-line.
1774 static int
1775 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
1777 struct fc_bsg_request *bsg_request = job->request;
1778 struct fc_bsg_reply *bsg_reply = job->reply;
1779 struct diag_mode_set *loopback_mode;
1780 uint32_t link_flags;
1781 uint32_t timeout;
1782 LPFC_MBOXQ_t *pmboxq = NULL;
1783 int mbxstatus = MBX_SUCCESS;
1784 int i = 0;
1785 int rc = 0;
1787 /* no data to return just the return code */
1788 bsg_reply->reply_payload_rcv_len = 0;
1790 if (job->request_len < sizeof(struct fc_bsg_request) +
1791 sizeof(struct diag_mode_set)) {
1792 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1793 "2738 Received DIAG MODE request size:%d "
1794 "below the minimum size:%d\n",
1795 job->request_len,
1796 (int)(sizeof(struct fc_bsg_request) +
1797 sizeof(struct diag_mode_set)));
1798 rc = -EINVAL;
1799 goto job_error;
1802 rc = lpfc_bsg_diag_mode_enter(phba);
1803 if (rc)
1804 goto job_error;
1806 /* bring the link to diagnostic mode */
1807 loopback_mode = (struct diag_mode_set *)
1808 bsg_request->rqst_data.h_vendor.vendor_cmd;
1809 link_flags = loopback_mode->type;
1810 timeout = loopback_mode->timeout * 100;
1812 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1813 if (!pmboxq) {
1814 rc = -ENOMEM;
1815 goto loopback_mode_exit;
1817 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1818 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1819 pmboxq->u.mb.mbxOwner = OWN_HOST;
1821 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1823 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1824 /* wait for link down before proceeding */
1825 i = 0;
1826 while (phba->link_state != LPFC_LINK_DOWN) {
1827 if (i++ > timeout) {
1828 rc = -ETIMEDOUT;
1829 goto loopback_mode_exit;
1831 msleep(10);
1834 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1835 if (link_flags == INTERNAL_LOOP_BACK)
1836 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1837 else
1838 pmboxq->u.mb.un.varInitLnk.link_flags =
1839 FLAGS_TOPOLOGY_MODE_LOOP;
1841 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1842 pmboxq->u.mb.mbxOwner = OWN_HOST;
1844 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1845 LPFC_MBOX_TMO);
1847 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1848 rc = -ENODEV;
1849 else {
1850 spin_lock_irq(&phba->hbalock);
1851 phba->link_flag |= LS_LOOPBACK_MODE;
1852 spin_unlock_irq(&phba->hbalock);
1853 /* wait for the link attention interrupt */
1854 msleep(100);
1856 i = 0;
1857 while (phba->link_state != LPFC_HBA_READY) {
1858 if (i++ > timeout) {
1859 rc = -ETIMEDOUT;
1860 break;
1863 msleep(10);
1867 } else
1868 rc = -ENODEV;
1870 loopback_mode_exit:
1871 lpfc_bsg_diag_mode_exit(phba);
1874 * Let SLI layer release mboxq if mbox command completed after timeout.
1876 if (pmboxq && mbxstatus != MBX_TIMEOUT)
1877 mempool_free(pmboxq, phba->mbox_mem_pool);
1879 job_error:
1880 /* make error code available to userspace */
1881 bsg_reply->result = rc;
1882 /* complete the job back to userspace if no error */
1883 if (rc == 0)
1884 bsg_job_done(job, bsg_reply->result,
1885 bsg_reply->reply_payload_rcv_len);
1886 return rc;
1890 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1891 * @phba: Pointer to HBA context object.
1892 * @diag: Flag for set link to diag or nomral operation state.
1894 * This function is responsible for issuing a sli4 mailbox command for setting
1895 * link to either diag state or normal operation state.
1897 static int
1898 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1900 LPFC_MBOXQ_t *pmboxq;
1901 struct lpfc_mbx_set_link_diag_state *link_diag_state;
1902 uint32_t req_len, alloc_len;
1903 int mbxstatus = MBX_SUCCESS, rc;
1905 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1906 if (!pmboxq)
1907 return -ENOMEM;
1909 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1910 sizeof(struct lpfc_sli4_cfg_mhdr));
1911 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1912 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1913 req_len, LPFC_SLI4_MBX_EMBED);
1914 if (alloc_len != req_len) {
1915 rc = -ENOMEM;
1916 goto link_diag_state_set_out;
1918 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1919 "3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1920 diag, phba->sli4_hba.lnk_info.lnk_tp,
1921 phba->sli4_hba.lnk_info.lnk_no);
1923 link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1924 bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
1925 LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
1926 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1927 phba->sli4_hba.lnk_info.lnk_no);
1928 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1929 phba->sli4_hba.lnk_info.lnk_tp);
1930 if (diag)
1931 bf_set(lpfc_mbx_set_diag_state_diag,
1932 &link_diag_state->u.req, 1);
1933 else
1934 bf_set(lpfc_mbx_set_diag_state_diag,
1935 &link_diag_state->u.req, 0);
1937 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1939 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1940 rc = 0;
1941 else
1942 rc = -ENODEV;
1944 link_diag_state_set_out:
1945 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1946 mempool_free(pmboxq, phba->mbox_mem_pool);
1948 return rc;
1952 * lpfc_sli4_bsg_set_loopback_mode - set sli4 internal loopback diagnostic
1953 * @phba: Pointer to HBA context object.
1954 * @mode: loopback mode to set
1955 * @link_no: link number for loopback mode to set
1957 * This function is responsible for issuing a sli4 mailbox command for setting
1958 * up loopback diagnostic for a link.
1960 static int
1961 lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
1962 uint32_t link_no)
1964 LPFC_MBOXQ_t *pmboxq;
1965 uint32_t req_len, alloc_len;
1966 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1967 int mbxstatus = MBX_SUCCESS, rc = 0;
1969 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1970 if (!pmboxq)
1971 return -ENOMEM;
1972 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1973 sizeof(struct lpfc_sli4_cfg_mhdr));
1974 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1975 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1976 req_len, LPFC_SLI4_MBX_EMBED);
1977 if (alloc_len != req_len) {
1978 mempool_free(pmboxq, phba->mbox_mem_pool);
1979 return -ENOMEM;
1981 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1982 bf_set(lpfc_mbx_set_diag_state_link_num,
1983 &link_diag_loopback->u.req, link_no);
1985 if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
1986 bf_set(lpfc_mbx_set_diag_state_link_type,
1987 &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED);
1988 } else {
1989 bf_set(lpfc_mbx_set_diag_state_link_type,
1990 &link_diag_loopback->u.req,
1991 phba->sli4_hba.lnk_info.lnk_tp);
1994 bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
1995 mode);
1997 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1998 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
1999 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2000 "3127 Failed setup loopback mode mailbox "
2001 "command, rc:x%x, status:x%x\n", mbxstatus,
2002 pmboxq->u.mb.mbxStatus);
2003 rc = -ENODEV;
2005 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
2006 mempool_free(pmboxq, phba->mbox_mem_pool);
2007 return rc;
2011 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
2012 * @phba: Pointer to HBA context object.
2014 * This function set up SLI4 FC port registrations for diagnostic run, which
2015 * includes all the rpis, vfi, and also vpi.
2017 static int
2018 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
2020 int rc;
2022 if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
2023 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2024 "3136 Port still had vfi registered: "
2025 "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
2026 phba->pport->fc_myDID, phba->fcf.fcfi,
2027 phba->sli4_hba.vfi_ids[phba->pport->vfi],
2028 phba->vpi_ids[phba->pport->vpi]);
2029 return -EINVAL;
2031 rc = lpfc_issue_reg_vfi(phba->pport);
2032 return rc;
2036 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
2037 * @phba: Pointer to HBA context object.
2038 * @job: LPFC_BSG_VENDOR_DIAG_MODE
2040 * This function is responsible for placing an sli4 port into diagnostic
2041 * loopback mode in order to perform a diagnostic loopback test.
2043 static int
2044 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
2046 struct fc_bsg_request *bsg_request = job->request;
2047 struct fc_bsg_reply *bsg_reply = job->reply;
2048 struct diag_mode_set *loopback_mode;
2049 uint32_t link_flags, timeout, link_no;
2050 int i, rc = 0;
2052 /* no data to return just the return code */
2053 bsg_reply->reply_payload_rcv_len = 0;
2055 if (job->request_len < sizeof(struct fc_bsg_request) +
2056 sizeof(struct diag_mode_set)) {
2057 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2058 "3011 Received DIAG MODE request size:%d "
2059 "below the minimum size:%d\n",
2060 job->request_len,
2061 (int)(sizeof(struct fc_bsg_request) +
2062 sizeof(struct diag_mode_set)));
2063 rc = -EINVAL;
2064 goto job_done;
2067 loopback_mode = (struct diag_mode_set *)
2068 bsg_request->rqst_data.h_vendor.vendor_cmd;
2069 link_flags = loopback_mode->type;
2070 timeout = loopback_mode->timeout * 100;
2072 if (loopback_mode->physical_link == -1)
2073 link_no = phba->sli4_hba.lnk_info.lnk_no;
2074 else
2075 link_no = loopback_mode->physical_link;
2077 if (link_flags == DISABLE_LOOP_BACK) {
2078 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2079 LPFC_DIAG_LOOPBACK_TYPE_DISABLE,
2080 link_no);
2081 if (!rc) {
2082 /* Unset the need disable bit */
2083 phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4);
2085 goto job_done;
2086 } else {
2087 /* Check if we need to disable the loopback state */
2088 if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) {
2089 rc = -EPERM;
2090 goto job_done;
2094 rc = lpfc_bsg_diag_mode_enter(phba);
2095 if (rc)
2096 goto job_done;
2098 /* indicate we are in loobpack diagnostic mode */
2099 spin_lock_irq(&phba->hbalock);
2100 phba->link_flag |= LS_LOOPBACK_MODE;
2101 spin_unlock_irq(&phba->hbalock);
2103 /* reset port to start frome scratch */
2104 rc = lpfc_selective_reset(phba);
2105 if (rc)
2106 goto job_done;
2108 /* bring the link to diagnostic mode */
2109 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2110 "3129 Bring link to diagnostic state.\n");
2112 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2113 if (rc) {
2114 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2115 "3130 Failed to bring link to diagnostic "
2116 "state, rc:x%x\n", rc);
2117 goto loopback_mode_exit;
2120 /* wait for link down before proceeding */
2121 i = 0;
2122 while (phba->link_state != LPFC_LINK_DOWN) {
2123 if (i++ > timeout) {
2124 rc = -ETIMEDOUT;
2125 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2126 "3131 Timeout waiting for link to "
2127 "diagnostic mode, timeout:%d ms\n",
2128 timeout * 10);
2129 goto loopback_mode_exit;
2131 msleep(10);
2134 /* set up loopback mode */
2135 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2136 "3132 Set up loopback mode:x%x\n", link_flags);
2138 switch (link_flags) {
2139 case INTERNAL_LOOP_BACK:
2140 if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2141 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2142 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2143 link_no);
2144 } else {
2145 /* Trunk is configured, but link is not in this trunk */
2146 if (phba->sli4_hba.conf_trunk) {
2147 rc = -ELNRNG;
2148 goto loopback_mode_exit;
2151 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2152 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2153 link_no);
2156 if (!rc) {
2157 /* Set the need disable bit */
2158 phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2161 break;
2162 case EXTERNAL_LOOP_BACK:
2163 if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2164 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2165 LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED,
2166 link_no);
2167 } else {
2168 /* Trunk is configured, but link is not in this trunk */
2169 if (phba->sli4_hba.conf_trunk) {
2170 rc = -ELNRNG;
2171 goto loopback_mode_exit;
2174 rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2175 LPFC_DIAG_LOOPBACK_TYPE_SERDES,
2176 link_no);
2179 if (!rc) {
2180 /* Set the need disable bit */
2181 phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2184 break;
2185 default:
2186 rc = -EINVAL;
2187 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2188 "3141 Loopback mode:x%x not supported\n",
2189 link_flags);
2190 goto loopback_mode_exit;
2193 if (!rc) {
2194 /* wait for the link attention interrupt */
2195 msleep(100);
2196 i = 0;
2197 while (phba->link_state < LPFC_LINK_UP) {
2198 if (i++ > timeout) {
2199 rc = -ETIMEDOUT;
2200 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2201 "3137 Timeout waiting for link up "
2202 "in loopback mode, timeout:%d ms\n",
2203 timeout * 10);
2204 break;
2206 msleep(10);
2210 /* port resource registration setup for loopback diagnostic */
2211 if (!rc) {
2212 /* set up a none zero myDID for loopback test */
2213 phba->pport->fc_myDID = 1;
2214 rc = lpfc_sli4_diag_fcport_reg_setup(phba);
2215 } else
2216 goto loopback_mode_exit;
2218 if (!rc) {
2219 /* wait for the port ready */
2220 msleep(100);
2221 i = 0;
2222 while (phba->link_state != LPFC_HBA_READY) {
2223 if (i++ > timeout) {
2224 rc = -ETIMEDOUT;
2225 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2226 "3133 Timeout waiting for port "
2227 "loopback mode ready, timeout:%d ms\n",
2228 timeout * 10);
2229 break;
2231 msleep(10);
2235 loopback_mode_exit:
2236 /* clear loopback diagnostic mode */
2237 if (rc) {
2238 spin_lock_irq(&phba->hbalock);
2239 phba->link_flag &= ~LS_LOOPBACK_MODE;
2240 spin_unlock_irq(&phba->hbalock);
2242 lpfc_bsg_diag_mode_exit(phba);
2244 job_done:
2245 /* make error code available to userspace */
2246 bsg_reply->result = rc;
2247 /* complete the job back to userspace if no error */
2248 if (rc == 0)
2249 bsg_job_done(job, bsg_reply->result,
2250 bsg_reply->reply_payload_rcv_len);
2251 return rc;
2255 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
2256 * @job: LPFC_BSG_VENDOR_DIAG_MODE
2258 * This function is responsible for responding to check and dispatch bsg diag
2259 * command from the user to proper driver action routines.
2261 static int
2262 lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
2264 struct Scsi_Host *shost;
2265 struct lpfc_vport *vport;
2266 struct lpfc_hba *phba;
2267 int rc;
2269 shost = fc_bsg_to_shost(job);
2270 if (!shost)
2271 return -ENODEV;
2272 vport = shost_priv(shost);
2273 if (!vport)
2274 return -ENODEV;
2275 phba = vport->phba;
2276 if (!phba)
2277 return -ENODEV;
2279 if (phba->sli_rev < LPFC_SLI_REV4)
2280 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
2281 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
2282 LPFC_SLI_INTF_IF_TYPE_2)
2283 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
2284 else
2285 rc = -ENODEV;
2287 return rc;
2291 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
2292 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
2294 * This function is responsible for responding to check and dispatch bsg diag
2295 * command from the user to proper driver action routines.
2297 static int
2298 lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
2300 struct fc_bsg_request *bsg_request = job->request;
2301 struct fc_bsg_reply *bsg_reply = job->reply;
2302 struct Scsi_Host *shost;
2303 struct lpfc_vport *vport;
2304 struct lpfc_hba *phba;
2305 struct diag_mode_set *loopback_mode_end_cmd;
2306 uint32_t timeout;
2307 int rc, i;
2309 shost = fc_bsg_to_shost(job);
2310 if (!shost)
2311 return -ENODEV;
2312 vport = shost_priv(shost);
2313 if (!vport)
2314 return -ENODEV;
2315 phba = vport->phba;
2316 if (!phba)
2317 return -ENODEV;
2319 if (phba->sli_rev < LPFC_SLI_REV4)
2320 return -ENODEV;
2321 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2322 LPFC_SLI_INTF_IF_TYPE_2)
2323 return -ENODEV;
2325 /* clear loopback diagnostic mode */
2326 spin_lock_irq(&phba->hbalock);
2327 phba->link_flag &= ~LS_LOOPBACK_MODE;
2328 spin_unlock_irq(&phba->hbalock);
2329 loopback_mode_end_cmd = (struct diag_mode_set *)
2330 bsg_request->rqst_data.h_vendor.vendor_cmd;
2331 timeout = loopback_mode_end_cmd->timeout * 100;
2333 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2334 if (rc) {
2335 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2336 "3139 Failed to bring link to diagnostic "
2337 "state, rc:x%x\n", rc);
2338 goto loopback_mode_end_exit;
2341 /* wait for link down before proceeding */
2342 i = 0;
2343 while (phba->link_state != LPFC_LINK_DOWN) {
2344 if (i++ > timeout) {
2345 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2346 "3140 Timeout waiting for link to "
2347 "diagnostic mode_end, timeout:%d ms\n",
2348 timeout * 10);
2349 /* there is nothing much we can do here */
2350 break;
2352 msleep(10);
2355 /* reset port resource registrations */
2356 rc = lpfc_selective_reset(phba);
2357 phba->pport->fc_myDID = 0;
2359 loopback_mode_end_exit:
2360 /* make return code available to userspace */
2361 bsg_reply->result = rc;
2362 /* complete the job back to userspace if no error */
2363 if (rc == 0)
2364 bsg_job_done(job, bsg_reply->result,
2365 bsg_reply->reply_payload_rcv_len);
2366 return rc;
2370 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
2371 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
2373 * This function is to perform SLI4 diag link test request from the user
2374 * applicaiton.
2376 static int
2377 lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
2379 struct fc_bsg_request *bsg_request = job->request;
2380 struct fc_bsg_reply *bsg_reply = job->reply;
2381 struct Scsi_Host *shost;
2382 struct lpfc_vport *vport;
2383 struct lpfc_hba *phba;
2384 LPFC_MBOXQ_t *pmboxq;
2385 struct sli4_link_diag *link_diag_test_cmd;
2386 uint32_t req_len, alloc_len;
2387 struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
2388 union lpfc_sli4_cfg_shdr *shdr;
2389 uint32_t shdr_status, shdr_add_status;
2390 struct diag_status *diag_status_reply;
2391 int mbxstatus, rc = -ENODEV, rc1 = 0;
2393 shost = fc_bsg_to_shost(job);
2394 if (!shost)
2395 goto job_error;
2397 vport = shost_priv(shost);
2398 if (!vport)
2399 goto job_error;
2401 phba = vport->phba;
2402 if (!phba)
2403 goto job_error;
2406 if (phba->sli_rev < LPFC_SLI_REV4)
2407 goto job_error;
2409 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2410 LPFC_SLI_INTF_IF_TYPE_2)
2411 goto job_error;
2413 if (job->request_len < sizeof(struct fc_bsg_request) +
2414 sizeof(struct sli4_link_diag)) {
2415 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2416 "3013 Received LINK DIAG TEST request "
2417 " size:%d below the minimum size:%d\n",
2418 job->request_len,
2419 (int)(sizeof(struct fc_bsg_request) +
2420 sizeof(struct sli4_link_diag)));
2421 rc = -EINVAL;
2422 goto job_error;
2425 rc = lpfc_bsg_diag_mode_enter(phba);
2426 if (rc)
2427 goto job_error;
2429 link_diag_test_cmd = (struct sli4_link_diag *)
2430 bsg_request->rqst_data.h_vendor.vendor_cmd;
2432 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2434 if (rc)
2435 goto job_error;
2437 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2438 if (!pmboxq)
2439 goto link_diag_test_exit;
2441 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2442 sizeof(struct lpfc_sli4_cfg_mhdr));
2443 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2444 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2445 req_len, LPFC_SLI4_MBX_EMBED);
2446 if (alloc_len != req_len) {
2447 rc = -ENOMEM;
2448 goto link_diag_test_exit;
2451 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2452 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2453 phba->sli4_hba.lnk_info.lnk_no);
2454 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2455 phba->sli4_hba.lnk_info.lnk_tp);
2456 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2457 link_diag_test_cmd->test_id);
2458 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2459 link_diag_test_cmd->loops);
2460 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2461 link_diag_test_cmd->test_version);
2462 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2463 link_diag_test_cmd->error_action);
2465 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2467 shdr = (union lpfc_sli4_cfg_shdr *)
2468 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2469 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2470 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2471 if (shdr_status || shdr_add_status || mbxstatus) {
2472 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2473 "3010 Run link diag test mailbox failed with "
2474 "mbx_status x%x status x%x, add_status x%x\n",
2475 mbxstatus, shdr_status, shdr_add_status);
2478 diag_status_reply = (struct diag_status *)
2479 bsg_reply->reply_data.vendor_reply.vendor_rsp;
2481 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) {
2482 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2483 "3012 Received Run link diag test reply "
2484 "below minimum size (%d): reply_len:%d\n",
2485 (int)(sizeof(*bsg_reply) +
2486 sizeof(*diag_status_reply)),
2487 job->reply_len);
2488 rc = -EINVAL;
2489 goto job_error;
2492 diag_status_reply->mbox_status = mbxstatus;
2493 diag_status_reply->shdr_status = shdr_status;
2494 diag_status_reply->shdr_add_status = shdr_add_status;
2496 link_diag_test_exit:
2497 rc1 = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2499 if (pmboxq)
2500 mempool_free(pmboxq, phba->mbox_mem_pool);
2502 lpfc_bsg_diag_mode_exit(phba);
2504 job_error:
2505 /* make error code available to userspace */
2506 if (rc1 && !rc)
2507 rc = rc1;
2508 bsg_reply->result = rc;
2509 /* complete the job back to userspace if no error */
2510 if (rc == 0)
2511 bsg_job_done(job, bsg_reply->result,
2512 bsg_reply->reply_payload_rcv_len);
2513 return rc;
2517 * lpfcdiag_loop_self_reg - obtains a remote port login id
2518 * @phba: Pointer to HBA context object
2519 * @rpi: Pointer to a remote port login id
2521 * This function obtains a remote port login id so the diag loopback test
2522 * can send and receive its own unsolicited CT command.
2524 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2526 LPFC_MBOXQ_t *mbox;
2527 struct lpfc_dmabuf *dmabuff;
2528 int status;
2530 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2531 if (!mbox)
2532 return -ENOMEM;
2534 if (phba->sli_rev < LPFC_SLI_REV4)
2535 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
2536 (uint8_t *)&phba->pport->fc_sparam,
2537 mbox, *rpi);
2538 else {
2539 *rpi = lpfc_sli4_alloc_rpi(phba);
2540 if (*rpi == LPFC_RPI_ALLOC_ERROR) {
2541 mempool_free(mbox, phba->mbox_mem_pool);
2542 return -EBUSY;
2544 status = lpfc_reg_rpi(phba, phba->pport->vpi,
2545 phba->pport->fc_myDID,
2546 (uint8_t *)&phba->pport->fc_sparam,
2547 mbox, *rpi);
2550 if (status) {
2551 mempool_free(mbox, phba->mbox_mem_pool);
2552 if (phba->sli_rev == LPFC_SLI_REV4)
2553 lpfc_sli4_free_rpi(phba, *rpi);
2554 return -ENOMEM;
2557 dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf;
2558 mbox->ctx_buf = NULL;
2559 mbox->ctx_ndlp = NULL;
2560 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2562 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2563 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2564 kfree(dmabuff);
2565 if (status != MBX_TIMEOUT)
2566 mempool_free(mbox, phba->mbox_mem_pool);
2567 if (phba->sli_rev == LPFC_SLI_REV4)
2568 lpfc_sli4_free_rpi(phba, *rpi);
2569 return -ENODEV;
2572 if (phba->sli_rev < LPFC_SLI_REV4)
2573 *rpi = mbox->u.mb.un.varWords[0];
2575 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2576 kfree(dmabuff);
2577 mempool_free(mbox, phba->mbox_mem_pool);
2578 return 0;
2582 * lpfcdiag_loop_self_unreg - unregs from the rpi
2583 * @phba: Pointer to HBA context object
2584 * @rpi: Remote port login id
2586 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
2588 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2590 LPFC_MBOXQ_t *mbox;
2591 int status;
2593 /* Allocate mboxq structure */
2594 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2595 if (mbox == NULL)
2596 return -ENOMEM;
2598 if (phba->sli_rev < LPFC_SLI_REV4)
2599 lpfc_unreg_login(phba, 0, rpi, mbox);
2600 else
2601 lpfc_unreg_login(phba, phba->pport->vpi,
2602 phba->sli4_hba.rpi_ids[rpi], mbox);
2604 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2606 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2607 if (status != MBX_TIMEOUT)
2608 mempool_free(mbox, phba->mbox_mem_pool);
2609 return -EIO;
2611 mempool_free(mbox, phba->mbox_mem_pool);
2612 if (phba->sli_rev == LPFC_SLI_REV4)
2613 lpfc_sli4_free_rpi(phba, rpi);
2614 return 0;
2618 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
2619 * @phba: Pointer to HBA context object
2620 * @rpi: Remote port login id
2621 * @txxri: Pointer to transmit exchange id
2622 * @rxxri: Pointer to response exchabge id
2624 * This function obtains the transmit and receive ids required to send
2625 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
2626 * flags are used to the unsolicted response handler is able to process
2627 * the ct command sent on the same port.
2629 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2630 uint16_t *txxri, uint16_t * rxxri)
2632 struct lpfc_bsg_event *evt;
2633 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2634 IOCB_t *cmd, *rsp;
2635 struct lpfc_dmabuf *dmabuf;
2636 struct ulp_bde64 *bpl = NULL;
2637 struct lpfc_sli_ct_request *ctreq = NULL;
2638 int ret_val = 0;
2639 int time_left;
2640 int iocb_stat = IOCB_SUCCESS;
2641 unsigned long flags;
2643 *txxri = 0;
2644 *rxxri = 0;
2645 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2646 SLI_CT_ELX_LOOPBACK);
2647 if (!evt)
2648 return -ENOMEM;
2650 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2651 list_add(&evt->node, &phba->ct_ev_waiters);
2652 lpfc_bsg_event_ref(evt);
2653 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2655 cmdiocbq = lpfc_sli_get_iocbq(phba);
2656 rspiocbq = lpfc_sli_get_iocbq(phba);
2658 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2659 if (dmabuf) {
2660 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
2661 if (dmabuf->virt) {
2662 INIT_LIST_HEAD(&dmabuf->list);
2663 bpl = (struct ulp_bde64 *) dmabuf->virt;
2664 memset(bpl, 0, sizeof(*bpl));
2665 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
2666 bpl->addrHigh =
2667 le32_to_cpu(putPaddrHigh(dmabuf->phys +
2668 sizeof(*bpl)));
2669 bpl->addrLow =
2670 le32_to_cpu(putPaddrLow(dmabuf->phys +
2671 sizeof(*bpl)));
2672 bpl->tus.f.bdeFlags = 0;
2673 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
2674 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2678 if (cmdiocbq == NULL || rspiocbq == NULL ||
2679 dmabuf == NULL || bpl == NULL || ctreq == NULL ||
2680 dmabuf->virt == NULL) {
2681 ret_val = -ENOMEM;
2682 goto err_get_xri_exit;
2685 cmd = &cmdiocbq->iocb;
2686 rsp = &rspiocbq->iocb;
2688 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2690 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2691 ctreq->RevisionId.bits.InId = 0;
2692 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2693 ctreq->FsSubType = 0;
2694 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
2695 ctreq->CommandResponse.bits.Size = 0;
2698 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
2699 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
2700 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2701 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
2703 cmd->un.xseq64.w5.hcsw.Fctl = LA;
2704 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2705 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2706 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2708 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2709 cmd->ulpBdeCount = 1;
2710 cmd->ulpLe = 1;
2711 cmd->ulpClass = CLASS3;
2712 cmd->ulpContext = rpi;
2714 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2715 cmdiocbq->vport = phba->pport;
2716 cmdiocbq->iocb_cmpl = NULL;
2718 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2719 rspiocbq,
2720 (phba->fc_ratov * 2)
2721 + LPFC_DRVR_TIMEOUT);
2722 if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {
2723 ret_val = -EIO;
2724 goto err_get_xri_exit;
2726 *txxri = rsp->ulpContext;
2728 evt->waiting = 1;
2729 evt->wait_time_stamp = jiffies;
2730 time_left = wait_event_interruptible_timeout(
2731 evt->wq, !list_empty(&evt->events_to_see),
2732 msecs_to_jiffies(1000 *
2733 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
2734 if (list_empty(&evt->events_to_see))
2735 ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2736 else {
2737 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2738 list_move(evt->events_to_see.prev, &evt->events_to_get);
2739 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2740 *rxxri = (list_entry(evt->events_to_get.prev,
2741 typeof(struct event_data),
2742 node))->immed_dat;
2744 evt->waiting = 0;
2746 err_get_xri_exit:
2747 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2748 lpfc_bsg_event_unref(evt); /* release ref */
2749 lpfc_bsg_event_unref(evt); /* delete */
2750 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2752 if (dmabuf) {
2753 if (dmabuf->virt)
2754 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2755 kfree(dmabuf);
2758 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
2759 lpfc_sli_release_iocbq(phba, cmdiocbq);
2760 if (rspiocbq)
2761 lpfc_sli_release_iocbq(phba, rspiocbq);
2762 return ret_val;
2766 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2767 * @phba: Pointer to HBA context object
2769 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and
2770 * returns the pointer to the buffer.
2772 static struct lpfc_dmabuf *
2773 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2775 struct lpfc_dmabuf *dmabuf;
2776 struct pci_dev *pcidev = phba->pcidev;
2778 /* allocate dma buffer struct */
2779 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2780 if (!dmabuf)
2781 return NULL;
2783 INIT_LIST_HEAD(&dmabuf->list);
2785 /* now, allocate dma buffer */
2786 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2787 &(dmabuf->phys), GFP_KERNEL);
2789 if (!dmabuf->virt) {
2790 kfree(dmabuf);
2791 return NULL;
2794 return dmabuf;
2798 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2799 * @phba: Pointer to HBA context object.
2800 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2802 * This routine just simply frees a dma buffer and its associated buffer
2803 * descriptor referred by @dmabuf.
2805 static void
2806 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2808 struct pci_dev *pcidev = phba->pcidev;
2810 if (!dmabuf)
2811 return;
2813 if (dmabuf->virt)
2814 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2815 dmabuf->virt, dmabuf->phys);
2816 kfree(dmabuf);
2817 return;
2821 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2822 * @phba: Pointer to HBA context object.
2823 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2825 * This routine just simply frees all dma buffers and their associated buffer
2826 * descriptors referred by @dmabuf_list.
2828 static void
2829 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2830 struct list_head *dmabuf_list)
2832 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2834 if (list_empty(dmabuf_list))
2835 return;
2837 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2838 list_del_init(&dmabuf->list);
2839 lpfc_bsg_dma_page_free(phba, dmabuf);
2841 return;
2845 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
2846 * @phba: Pointer to HBA context object
2847 * @bpl: Pointer to 64 bit bde structure
2848 * @size: Number of bytes to process
2849 * @nocopydata: Flag to copy user data into the allocated buffer
2851 * This function allocates page size buffers and populates an lpfc_dmabufext.
2852 * If allowed the user data pointed to with indataptr is copied into the kernel
2853 * memory. The chained list of page size buffers is returned.
2855 static struct lpfc_dmabufext *
2856 diag_cmd_data_alloc(struct lpfc_hba *phba,
2857 struct ulp_bde64 *bpl, uint32_t size,
2858 int nocopydata)
2860 struct lpfc_dmabufext *mlist = NULL;
2861 struct lpfc_dmabufext *dmp;
2862 int cnt, offset = 0, i = 0;
2863 struct pci_dev *pcidev;
2865 pcidev = phba->pcidev;
2867 while (size) {
2868 /* We get chunks of 4K */
2869 if (size > BUF_SZ_4K)
2870 cnt = BUF_SZ_4K;
2871 else
2872 cnt = size;
2874 /* allocate struct lpfc_dmabufext buffer header */
2875 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
2876 if (!dmp)
2877 goto out;
2879 INIT_LIST_HEAD(&dmp->dma.list);
2881 /* Queue it to a linked list */
2882 if (mlist)
2883 list_add_tail(&dmp->dma.list, &mlist->dma.list);
2884 else
2885 mlist = dmp;
2887 /* allocate buffer */
2888 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
2889 cnt,
2890 &(dmp->dma.phys),
2891 GFP_KERNEL);
2893 if (!dmp->dma.virt)
2894 goto out;
2896 dmp->size = cnt;
2898 if (nocopydata) {
2899 bpl->tus.f.bdeFlags = 0;
2900 } else {
2901 memset((uint8_t *)dmp->dma.virt, 0, cnt);
2902 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2905 /* build buffer ptr list for IOCB */
2906 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
2907 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
2908 bpl->tus.f.bdeSize = (ushort) cnt;
2909 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2910 bpl++;
2912 i++;
2913 offset += cnt;
2914 size -= cnt;
2917 if (mlist) {
2918 mlist->flag = i;
2919 return mlist;
2921 out:
2922 diag_cmd_data_free(phba, mlist);
2923 return NULL;
2927 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
2928 * @phba: Pointer to HBA context object
2929 * @rxxri: Receive exchange id
2930 * @len: Number of data bytes
2932 * This function allocates and posts a data buffer of sufficient size to receive
2933 * an unsolicted CT command.
2935 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2936 size_t len)
2938 struct lpfc_sli_ring *pring;
2939 struct lpfc_iocbq *cmdiocbq;
2940 IOCB_t *cmd = NULL;
2941 struct list_head head, *curr, *next;
2942 struct lpfc_dmabuf *rxbmp;
2943 struct lpfc_dmabuf *dmp;
2944 struct lpfc_dmabuf *mp[2] = {NULL, NULL};
2945 struct ulp_bde64 *rxbpl = NULL;
2946 uint32_t num_bde;
2947 struct lpfc_dmabufext *rxbuffer = NULL;
2948 int ret_val = 0;
2949 int iocb_stat;
2950 int i = 0;
2952 pring = lpfc_phba_elsring(phba);
2954 cmdiocbq = lpfc_sli_get_iocbq(phba);
2955 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2956 if (rxbmp != NULL) {
2957 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2958 if (rxbmp->virt) {
2959 INIT_LIST_HEAD(&rxbmp->list);
2960 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2961 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
2965 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) {
2966 ret_val = -ENOMEM;
2967 goto err_post_rxbufs_exit;
2970 /* Queue buffers for the receive exchange */
2971 num_bde = (uint32_t)rxbuffer->flag;
2972 dmp = &rxbuffer->dma;
2974 cmd = &cmdiocbq->iocb;
2975 i = 0;
2977 INIT_LIST_HEAD(&head);
2978 list_add_tail(&head, &dmp->list);
2979 list_for_each_safe(curr, next, &head) {
2980 mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
2981 list_del(curr);
2983 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2984 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
2985 cmd->un.quexri64cx.buff.bde.addrHigh =
2986 putPaddrHigh(mp[i]->phys);
2987 cmd->un.quexri64cx.buff.bde.addrLow =
2988 putPaddrLow(mp[i]->phys);
2989 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
2990 ((struct lpfc_dmabufext *)mp[i])->size;
2991 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
2992 cmd->ulpCommand = CMD_QUE_XRI64_CX;
2993 cmd->ulpPU = 0;
2994 cmd->ulpLe = 1;
2995 cmd->ulpBdeCount = 1;
2996 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
2998 } else {
2999 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
3000 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
3001 cmd->un.cont64[i].tus.f.bdeSize =
3002 ((struct lpfc_dmabufext *)mp[i])->size;
3003 cmd->ulpBdeCount = ++i;
3005 if ((--num_bde > 0) && (i < 2))
3006 continue;
3008 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
3009 cmd->ulpLe = 1;
3012 cmd->ulpClass = CLASS3;
3013 cmd->ulpContext = rxxri;
3015 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
3017 if (iocb_stat == IOCB_ERROR) {
3018 diag_cmd_data_free(phba,
3019 (struct lpfc_dmabufext *)mp[0]);
3020 if (mp[1])
3021 diag_cmd_data_free(phba,
3022 (struct lpfc_dmabufext *)mp[1]);
3023 dmp = list_entry(next, struct lpfc_dmabuf, list);
3024 ret_val = -EIO;
3025 goto err_post_rxbufs_exit;
3028 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
3029 if (mp[1]) {
3030 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
3031 mp[1] = NULL;
3034 /* The iocb was freed by lpfc_sli_issue_iocb */
3035 cmdiocbq = lpfc_sli_get_iocbq(phba);
3036 if (!cmdiocbq) {
3037 dmp = list_entry(next, struct lpfc_dmabuf, list);
3038 ret_val = -EIO;
3039 goto err_post_rxbufs_exit;
3042 cmd = &cmdiocbq->iocb;
3043 i = 0;
3045 list_del(&head);
3047 err_post_rxbufs_exit:
3049 if (rxbmp) {
3050 if (rxbmp->virt)
3051 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
3052 kfree(rxbmp);
3055 if (cmdiocbq)
3056 lpfc_sli_release_iocbq(phba, cmdiocbq);
3057 return ret_val;
3061 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
3062 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
3064 * This function receives a user data buffer to be transmitted and received on
3065 * the same port, the link must be up and in loopback mode prior
3066 * to being called.
3067 * 1. A kernel buffer is allocated to copy the user data into.
3068 * 2. The port registers with "itself".
3069 * 3. The transmit and receive exchange ids are obtained.
3070 * 4. The receive exchange id is posted.
3071 * 5. A new els loopback event is created.
3072 * 6. The command and response iocbs are allocated.
3073 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
3075 * This function is meant to be called n times while the port is in loopback
3076 * so it is the apps responsibility to issue a reset to take the port out
3077 * of loopback mode.
3079 static int
3080 lpfc_bsg_diag_loopback_run(struct bsg_job *job)
3082 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3083 struct fc_bsg_reply *bsg_reply = job->reply;
3084 struct lpfc_hba *phba = vport->phba;
3085 struct lpfc_bsg_event *evt;
3086 struct event_data *evdat;
3087 struct lpfc_sli *psli = &phba->sli;
3088 uint32_t size;
3089 uint32_t full_size;
3090 size_t segment_len = 0, segment_offset = 0, current_offset = 0;
3091 uint16_t rpi = 0;
3092 struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
3093 IOCB_t *cmd, *rsp = NULL;
3094 struct lpfc_sli_ct_request *ctreq;
3095 struct lpfc_dmabuf *txbmp;
3096 struct ulp_bde64 *txbpl = NULL;
3097 struct lpfc_dmabufext *txbuffer = NULL;
3098 struct list_head head;
3099 struct lpfc_dmabuf *curr;
3100 uint16_t txxri = 0, rxxri;
3101 uint32_t num_bde;
3102 uint8_t *ptr = NULL, *rx_databuf = NULL;
3103 int rc = 0;
3104 int time_left;
3105 int iocb_stat = IOCB_SUCCESS;
3106 unsigned long flags;
3107 void *dataout = NULL;
3108 uint32_t total_mem;
3110 /* in case no data is returned return just the return code */
3111 bsg_reply->reply_payload_rcv_len = 0;
3113 if (job->request_len <
3114 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
3115 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3116 "2739 Received DIAG TEST request below minimum "
3117 "size\n");
3118 rc = -EINVAL;
3119 goto loopback_test_exit;
3122 if (job->request_payload.payload_len !=
3123 job->reply_payload.payload_len) {
3124 rc = -EINVAL;
3125 goto loopback_test_exit;
3128 if ((phba->link_state == LPFC_HBA_ERROR) ||
3129 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
3130 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
3131 rc = -EACCES;
3132 goto loopback_test_exit;
3135 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
3136 rc = -EACCES;
3137 goto loopback_test_exit;
3140 size = job->request_payload.payload_len;
3141 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
3143 if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
3144 rc = -ERANGE;
3145 goto loopback_test_exit;
3148 if (full_size >= BUF_SZ_4K) {
3150 * Allocate memory for ioctl data. If buffer is bigger than 64k,
3151 * then we allocate 64k and re-use that buffer over and over to
3152 * xfer the whole block. This is because Linux kernel has a
3153 * problem allocating more than 120k of kernel space memory. Saw
3154 * problem with GET_FCPTARGETMAPPING...
3156 if (size <= (64 * 1024))
3157 total_mem = full_size;
3158 else
3159 total_mem = 64 * 1024;
3160 } else
3161 /* Allocate memory for ioctl data */
3162 total_mem = BUF_SZ_4K;
3164 dataout = kmalloc(total_mem, GFP_KERNEL);
3165 if (dataout == NULL) {
3166 rc = -ENOMEM;
3167 goto loopback_test_exit;
3170 ptr = dataout;
3171 ptr += ELX_LOOPBACK_HEADER_SZ;
3172 sg_copy_to_buffer(job->request_payload.sg_list,
3173 job->request_payload.sg_cnt,
3174 ptr, size);
3175 rc = lpfcdiag_loop_self_reg(phba, &rpi);
3176 if (rc)
3177 goto loopback_test_exit;
3179 if (phba->sli_rev < LPFC_SLI_REV4) {
3180 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
3181 if (rc) {
3182 lpfcdiag_loop_self_unreg(phba, rpi);
3183 goto loopback_test_exit;
3186 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
3187 if (rc) {
3188 lpfcdiag_loop_self_unreg(phba, rpi);
3189 goto loopback_test_exit;
3192 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
3193 SLI_CT_ELX_LOOPBACK);
3194 if (!evt) {
3195 lpfcdiag_loop_self_unreg(phba, rpi);
3196 rc = -ENOMEM;
3197 goto loopback_test_exit;
3200 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3201 list_add(&evt->node, &phba->ct_ev_waiters);
3202 lpfc_bsg_event_ref(evt);
3203 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3205 cmdiocbq = lpfc_sli_get_iocbq(phba);
3206 if (phba->sli_rev < LPFC_SLI_REV4)
3207 rspiocbq = lpfc_sli_get_iocbq(phba);
3208 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3210 if (txbmp) {
3211 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
3212 if (txbmp->virt) {
3213 INIT_LIST_HEAD(&txbmp->list);
3214 txbpl = (struct ulp_bde64 *) txbmp->virt;
3215 txbuffer = diag_cmd_data_alloc(phba,
3216 txbpl, full_size, 0);
3220 if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
3221 rc = -ENOMEM;
3222 goto err_loopback_test_exit;
3224 if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
3225 rc = -ENOMEM;
3226 goto err_loopback_test_exit;
3229 cmd = &cmdiocbq->iocb;
3230 if (phba->sli_rev < LPFC_SLI_REV4)
3231 rsp = &rspiocbq->iocb;
3233 INIT_LIST_HEAD(&head);
3234 list_add_tail(&head, &txbuffer->dma.list);
3235 list_for_each_entry(curr, &head, list) {
3236 segment_len = ((struct lpfc_dmabufext *)curr)->size;
3237 if (current_offset == 0) {
3238 ctreq = curr->virt;
3239 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
3240 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
3241 ctreq->RevisionId.bits.InId = 0;
3242 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
3243 ctreq->FsSubType = 0;
3244 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
3245 ctreq->CommandResponse.bits.Size = size;
3246 segment_offset = ELX_LOOPBACK_HEADER_SZ;
3247 } else
3248 segment_offset = 0;
3250 BUG_ON(segment_offset >= segment_len);
3251 memcpy(curr->virt + segment_offset,
3252 ptr + current_offset,
3253 segment_len - segment_offset);
3255 current_offset += segment_len - segment_offset;
3256 BUG_ON(current_offset > size);
3258 list_del(&head);
3260 /* Build the XMIT_SEQUENCE iocb */
3261 num_bde = (uint32_t)txbuffer->flag;
3263 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
3264 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
3265 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
3266 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
3268 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
3269 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
3270 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
3271 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
3273 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
3274 cmd->ulpBdeCount = 1;
3275 cmd->ulpLe = 1;
3276 cmd->ulpClass = CLASS3;
3278 if (phba->sli_rev < LPFC_SLI_REV4) {
3279 cmd->ulpContext = txxri;
3280 } else {
3281 cmd->un.xseq64.bdl.ulpIoTag32 = 0;
3282 cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
3283 cmdiocbq->context3 = txbmp;
3284 cmdiocbq->sli4_xritag = NO_XRI;
3285 cmd->unsli3.rcvsli3.ox_id = 0xffff;
3287 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
3288 cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK;
3289 cmdiocbq->vport = phba->pport;
3290 cmdiocbq->iocb_cmpl = NULL;
3291 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
3292 rspiocbq, (phba->fc_ratov * 2) +
3293 LPFC_DRVR_TIMEOUT);
3295 if ((iocb_stat != IOCB_SUCCESS) ||
3296 ((phba->sli_rev < LPFC_SLI_REV4) &&
3297 (rsp->ulpStatus != IOSTAT_SUCCESS))) {
3298 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3299 "3126 Failed loopback test issue iocb: "
3300 "iocb_stat:x%x\n", iocb_stat);
3301 rc = -EIO;
3302 goto err_loopback_test_exit;
3305 evt->waiting = 1;
3306 time_left = wait_event_interruptible_timeout(
3307 evt->wq, !list_empty(&evt->events_to_see),
3308 msecs_to_jiffies(1000 *
3309 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
3310 evt->waiting = 0;
3311 if (list_empty(&evt->events_to_see)) {
3312 rc = (time_left) ? -EINTR : -ETIMEDOUT;
3313 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3314 "3125 Not receiving unsolicited event, "
3315 "rc:x%x\n", rc);
3316 } else {
3317 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3318 list_move(evt->events_to_see.prev, &evt->events_to_get);
3319 evdat = list_entry(evt->events_to_get.prev,
3320 typeof(*evdat), node);
3321 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3322 rx_databuf = evdat->data;
3323 if (evdat->len != full_size) {
3324 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3325 "1603 Loopback test did not receive expected "
3326 "data length. actual length 0x%x expected "
3327 "length 0x%x\n",
3328 evdat->len, full_size);
3329 rc = -EIO;
3330 } else if (rx_databuf == NULL)
3331 rc = -EIO;
3332 else {
3333 rc = IOCB_SUCCESS;
3334 /* skip over elx loopback header */
3335 rx_databuf += ELX_LOOPBACK_HEADER_SZ;
3336 bsg_reply->reply_payload_rcv_len =
3337 sg_copy_from_buffer(job->reply_payload.sg_list,
3338 job->reply_payload.sg_cnt,
3339 rx_databuf, size);
3340 bsg_reply->reply_payload_rcv_len = size;
3344 err_loopback_test_exit:
3345 lpfcdiag_loop_self_unreg(phba, rpi);
3347 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3348 lpfc_bsg_event_unref(evt); /* release ref */
3349 lpfc_bsg_event_unref(evt); /* delete */
3350 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3352 if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
3353 lpfc_sli_release_iocbq(phba, cmdiocbq);
3355 if (rspiocbq != NULL)
3356 lpfc_sli_release_iocbq(phba, rspiocbq);
3358 if (txbmp != NULL) {
3359 if (txbpl != NULL) {
3360 if (txbuffer != NULL)
3361 diag_cmd_data_free(phba, txbuffer);
3362 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
3364 kfree(txbmp);
3367 loopback_test_exit:
3368 kfree(dataout);
3369 /* make error code available to userspace */
3370 bsg_reply->result = rc;
3371 job->dd_data = NULL;
3372 /* complete the job back to userspace if no error */
3373 if (rc == IOCB_SUCCESS)
3374 bsg_job_done(job, bsg_reply->result,
3375 bsg_reply->reply_payload_rcv_len);
3376 return rc;
3380 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
3381 * @job: GET_DFC_REV fc_bsg_job
3383 static int
3384 lpfc_bsg_get_dfc_rev(struct bsg_job *job)
3386 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3387 struct fc_bsg_reply *bsg_reply = job->reply;
3388 struct lpfc_hba *phba = vport->phba;
3389 struct get_mgmt_rev_reply *event_reply;
3390 int rc = 0;
3392 if (job->request_len <
3393 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
3394 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3395 "2740 Received GET_DFC_REV request below "
3396 "minimum size\n");
3397 rc = -EINVAL;
3398 goto job_error;
3401 event_reply = (struct get_mgmt_rev_reply *)
3402 bsg_reply->reply_data.vendor_reply.vendor_rsp;
3404 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
3405 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3406 "2741 Received GET_DFC_REV reply below "
3407 "minimum size\n");
3408 rc = -EINVAL;
3409 goto job_error;
3412 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
3413 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
3414 job_error:
3415 bsg_reply->result = rc;
3416 if (rc == 0)
3417 bsg_job_done(job, bsg_reply->result,
3418 bsg_reply->reply_payload_rcv_len);
3419 return rc;
3423 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
3424 * @phba: Pointer to HBA context object.
3425 * @pmboxq: Pointer to mailbox command.
3427 * This is completion handler function for mailbox commands issued from
3428 * lpfc_bsg_issue_mbox function. This function is called by the
3429 * mailbox event handler function with no lock held. This function
3430 * will wake up thread waiting on the wait queue pointed by context1
3431 * of the mailbox.
3433 static void
3434 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3436 struct bsg_job_data *dd_data;
3437 struct fc_bsg_reply *bsg_reply;
3438 struct bsg_job *job;
3439 uint32_t size;
3440 unsigned long flags;
3441 uint8_t *pmb, *pmb_buf;
3443 dd_data = pmboxq->ctx_ndlp;
3446 * The outgoing buffer is readily referred from the dma buffer,
3447 * just need to get header part from mailboxq structure.
3449 pmb = (uint8_t *)&pmboxq->u.mb;
3450 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3451 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3453 /* Determine if job has been aborted */
3455 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3456 job = dd_data->set_job;
3457 if (job) {
3458 /* Prevent timeout handling from trying to abort job */
3459 job->dd_data = NULL;
3461 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3463 /* Copy the mailbox data to the job if it is still active */
3465 if (job) {
3466 bsg_reply = job->reply;
3467 size = job->reply_payload.payload_len;
3468 bsg_reply->reply_payload_rcv_len =
3469 sg_copy_from_buffer(job->reply_payload.sg_list,
3470 job->reply_payload.sg_cnt,
3471 pmb_buf, size);
3474 dd_data->set_job = NULL;
3475 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3476 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3477 kfree(dd_data);
3479 /* Complete the job if the job is still active */
3481 if (job) {
3482 bsg_reply->result = 0;
3483 bsg_job_done(job, bsg_reply->result,
3484 bsg_reply->reply_payload_rcv_len);
3486 return;
3490 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
3491 * @phba: Pointer to HBA context object.
3492 * @mb: Pointer to a mailbox object.
3493 * @vport: Pointer to a vport object.
3495 * Some commands require the port to be offline, some may not be called from
3496 * the application.
3498 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3499 MAILBOX_t *mb, struct lpfc_vport *vport)
3501 /* return negative error values for bsg job */
3502 switch (mb->mbxCommand) {
3503 /* Offline only */
3504 case MBX_INIT_LINK:
3505 case MBX_DOWN_LINK:
3506 case MBX_CONFIG_LINK:
3507 case MBX_CONFIG_RING:
3508 case MBX_RESET_RING:
3509 case MBX_UNREG_LOGIN:
3510 case MBX_CLEAR_LA:
3511 case MBX_DUMP_CONTEXT:
3512 case MBX_RUN_DIAGS:
3513 case MBX_RESTART:
3514 case MBX_SET_MASK:
3515 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3516 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3517 "2743 Command 0x%x is illegal in on-line "
3518 "state\n",
3519 mb->mbxCommand);
3520 return -EPERM;
3522 break;
3523 case MBX_WRITE_NV:
3524 case MBX_WRITE_VPARMS:
3525 case MBX_LOAD_SM:
3526 case MBX_READ_NV:
3527 case MBX_READ_CONFIG:
3528 case MBX_READ_RCONFIG:
3529 case MBX_READ_STATUS:
3530 case MBX_READ_XRI:
3531 case MBX_READ_REV:
3532 case MBX_READ_LNK_STAT:
3533 case MBX_DUMP_MEMORY:
3534 case MBX_DOWN_LOAD:
3535 case MBX_UPDATE_CFG:
3536 case MBX_KILL_BOARD:
3537 case MBX_READ_TOPOLOGY:
3538 case MBX_LOAD_AREA:
3539 case MBX_LOAD_EXP_ROM:
3540 case MBX_BEACON:
3541 case MBX_DEL_LD_ENTRY:
3542 case MBX_SET_DEBUG:
3543 case MBX_WRITE_WWN:
3544 case MBX_SLI4_CONFIG:
3545 case MBX_READ_EVENT_LOG:
3546 case MBX_READ_EVENT_LOG_STATUS:
3547 case MBX_WRITE_EVENT_LOG:
3548 case MBX_PORT_CAPABILITIES:
3549 case MBX_PORT_IOV_CONTROL:
3550 case MBX_RUN_BIU_DIAG64:
3551 break;
3552 case MBX_SET_VARIABLE:
3553 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3554 "1226 mbox: set_variable 0x%x, 0x%x\n",
3555 mb->un.varWords[0],
3556 mb->un.varWords[1]);
3557 if ((mb->un.varWords[0] == SETVAR_MLOMNT)
3558 && (mb->un.varWords[1] == 1)) {
3559 phba->wait_4_mlo_maint_flg = 1;
3560 } else if (mb->un.varWords[0] == SETVAR_MLORST) {
3561 spin_lock_irq(&phba->hbalock);
3562 phba->link_flag &= ~LS_LOOPBACK_MODE;
3563 spin_unlock_irq(&phba->hbalock);
3564 phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
3566 break;
3567 case MBX_READ_SPARM64:
3568 case MBX_REG_LOGIN:
3569 case MBX_REG_LOGIN64:
3570 case MBX_CONFIG_PORT:
3571 case MBX_RUN_BIU_DIAG:
3572 default:
3573 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3574 "2742 Unknown Command 0x%x\n",
3575 mb->mbxCommand);
3576 return -EPERM;
3579 return 0; /* ok */
3583 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
3584 * @phba: Pointer to HBA context object.
3586 * This is routine clean up and reset BSG handling of multi-buffer mbox
3587 * command session.
3589 static void
3590 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3592 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3593 return;
3595 /* free all memory, including dma buffers */
3596 lpfc_bsg_dma_page_list_free(phba,
3597 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3598 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3599 /* multi-buffer write mailbox command pass-through complete */
3600 memset((char *)&phba->mbox_ext_buf_ctx, 0,
3601 sizeof(struct lpfc_mbox_ext_buf_ctx));
3602 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3604 return;
3608 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3609 * @phba: Pointer to HBA context object.
3610 * @pmboxq: Pointer to mailbox command.
3612 * This is routine handles BSG job for mailbox commands completions with
3613 * multiple external buffers.
3615 static struct bsg_job *
3616 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3618 struct bsg_job_data *dd_data;
3619 struct bsg_job *job;
3620 struct fc_bsg_reply *bsg_reply;
3621 uint8_t *pmb, *pmb_buf;
3622 unsigned long flags;
3623 uint32_t size;
3624 int rc = 0;
3625 struct lpfc_dmabuf *dmabuf;
3626 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3627 uint8_t *pmbx;
3629 dd_data = pmboxq->ctx_buf;
3631 /* Determine if job has been aborted */
3632 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3633 job = dd_data->set_job;
3634 if (job) {
3635 bsg_reply = job->reply;
3636 /* Prevent timeout handling from trying to abort job */
3637 job->dd_data = NULL;
3639 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3642 * The outgoing buffer is readily referred from the dma buffer,
3643 * just need to get header part from mailboxq structure.
3646 pmb = (uint8_t *)&pmboxq->u.mb;
3647 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3648 /* Copy the byte swapped response mailbox back to the user */
3649 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3650 /* if there is any non-embedded extended data copy that too */
3651 dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
3652 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3653 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3654 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3655 pmbx = (uint8_t *)dmabuf->virt;
3656 /* byte swap the extended data following the mailbox command */
3657 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3658 &pmbx[sizeof(MAILBOX_t)],
3659 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3662 /* Complete the job if the job is still active */
3664 if (job) {
3665 size = job->reply_payload.payload_len;
3666 bsg_reply->reply_payload_rcv_len =
3667 sg_copy_from_buffer(job->reply_payload.sg_list,
3668 job->reply_payload.sg_cnt,
3669 pmb_buf, size);
3671 /* result for successful */
3672 bsg_reply->result = 0;
3674 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3675 "2937 SLI_CONFIG ext-buffer mailbox command "
3676 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3677 phba->mbox_ext_buf_ctx.nembType,
3678 phba->mbox_ext_buf_ctx.mboxType, size);
3679 lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
3680 phba->mbox_ext_buf_ctx.nembType,
3681 phba->mbox_ext_buf_ctx.mboxType,
3682 dma_ebuf, sta_pos_addr,
3683 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3684 } else {
3685 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3686 "2938 SLI_CONFIG ext-buffer mailbox "
3687 "command (x%x/x%x) failure, rc:x%x\n",
3688 phba->mbox_ext_buf_ctx.nembType,
3689 phba->mbox_ext_buf_ctx.mboxType, rc);
3693 /* state change */
3694 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3695 kfree(dd_data);
3696 return job;
3700 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3701 * @phba: Pointer to HBA context object.
3702 * @pmboxq: Pointer to mailbox command.
3704 * This is completion handler function for mailbox read commands with multiple
3705 * external buffers.
3707 static void
3708 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3710 struct bsg_job *job;
3711 struct fc_bsg_reply *bsg_reply;
3713 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3715 /* handle the BSG job with mailbox command */
3716 if (!job)
3717 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3719 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3720 "2939 SLI_CONFIG ext-buffer rd mailbox command "
3721 "complete, ctxState:x%x, mbxStatus:x%x\n",
3722 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3724 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3725 lpfc_bsg_mbox_ext_session_reset(phba);
3727 /* free base driver mailbox structure memory */
3728 mempool_free(pmboxq, phba->mbox_mem_pool);
3730 /* if the job is still active, call job done */
3731 if (job) {
3732 bsg_reply = job->reply;
3733 bsg_job_done(job, bsg_reply->result,
3734 bsg_reply->reply_payload_rcv_len);
3736 return;
3740 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3741 * @phba: Pointer to HBA context object.
3742 * @pmboxq: Pointer to mailbox command.
3744 * This is completion handler function for mailbox write commands with multiple
3745 * external buffers.
3747 static void
3748 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3750 struct bsg_job *job;
3751 struct fc_bsg_reply *bsg_reply;
3753 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3755 /* handle the BSG job with the mailbox command */
3756 if (!job)
3757 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3759 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3760 "2940 SLI_CONFIG ext-buffer wr mailbox command "
3761 "complete, ctxState:x%x, mbxStatus:x%x\n",
3762 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3764 /* free all memory, including dma buffers */
3765 mempool_free(pmboxq, phba->mbox_mem_pool);
3766 lpfc_bsg_mbox_ext_session_reset(phba);
3768 /* if the job is still active, call job done */
3769 if (job) {
3770 bsg_reply = job->reply;
3771 bsg_job_done(job, bsg_reply->result,
3772 bsg_reply->reply_payload_rcv_len);
3775 return;
3778 static void
3779 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3780 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3781 struct lpfc_dmabuf *ext_dmabuf)
3783 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3785 /* pointer to the start of mailbox command */
3786 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3788 if (nemb_tp == nemb_mse) {
3789 if (index == 0) {
3790 sli_cfg_mbx->un.sli_config_emb0_subsys.
3791 mse[index].pa_hi =
3792 putPaddrHigh(mbx_dmabuf->phys +
3793 sizeof(MAILBOX_t));
3794 sli_cfg_mbx->un.sli_config_emb0_subsys.
3795 mse[index].pa_lo =
3796 putPaddrLow(mbx_dmabuf->phys +
3797 sizeof(MAILBOX_t));
3798 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3799 "2943 SLI_CONFIG(mse)[%d], "
3800 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3801 index,
3802 sli_cfg_mbx->un.sli_config_emb0_subsys.
3803 mse[index].buf_len,
3804 sli_cfg_mbx->un.sli_config_emb0_subsys.
3805 mse[index].pa_hi,
3806 sli_cfg_mbx->un.sli_config_emb0_subsys.
3807 mse[index].pa_lo);
3808 } else {
3809 sli_cfg_mbx->un.sli_config_emb0_subsys.
3810 mse[index].pa_hi =
3811 putPaddrHigh(ext_dmabuf->phys);
3812 sli_cfg_mbx->un.sli_config_emb0_subsys.
3813 mse[index].pa_lo =
3814 putPaddrLow(ext_dmabuf->phys);
3815 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3816 "2944 SLI_CONFIG(mse)[%d], "
3817 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3818 index,
3819 sli_cfg_mbx->un.sli_config_emb0_subsys.
3820 mse[index].buf_len,
3821 sli_cfg_mbx->un.sli_config_emb0_subsys.
3822 mse[index].pa_hi,
3823 sli_cfg_mbx->un.sli_config_emb0_subsys.
3824 mse[index].pa_lo);
3826 } else {
3827 if (index == 0) {
3828 sli_cfg_mbx->un.sli_config_emb1_subsys.
3829 hbd[index].pa_hi =
3830 putPaddrHigh(mbx_dmabuf->phys +
3831 sizeof(MAILBOX_t));
3832 sli_cfg_mbx->un.sli_config_emb1_subsys.
3833 hbd[index].pa_lo =
3834 putPaddrLow(mbx_dmabuf->phys +
3835 sizeof(MAILBOX_t));
3836 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3837 "3007 SLI_CONFIG(hbd)[%d], "
3838 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3839 index,
3840 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3841 &sli_cfg_mbx->un.
3842 sli_config_emb1_subsys.hbd[index]),
3843 sli_cfg_mbx->un.sli_config_emb1_subsys.
3844 hbd[index].pa_hi,
3845 sli_cfg_mbx->un.sli_config_emb1_subsys.
3846 hbd[index].pa_lo);
3848 } else {
3849 sli_cfg_mbx->un.sli_config_emb1_subsys.
3850 hbd[index].pa_hi =
3851 putPaddrHigh(ext_dmabuf->phys);
3852 sli_cfg_mbx->un.sli_config_emb1_subsys.
3853 hbd[index].pa_lo =
3854 putPaddrLow(ext_dmabuf->phys);
3855 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3856 "3008 SLI_CONFIG(hbd)[%d], "
3857 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3858 index,
3859 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3860 &sli_cfg_mbx->un.
3861 sli_config_emb1_subsys.hbd[index]),
3862 sli_cfg_mbx->un.sli_config_emb1_subsys.
3863 hbd[index].pa_hi,
3864 sli_cfg_mbx->un.sli_config_emb1_subsys.
3865 hbd[index].pa_lo);
3868 return;
3872 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
3873 * @phba: Pointer to HBA context object.
3874 * @job: Pointer to the job object.
3875 * @nemb_tp: Enumerate of non-embedded mailbox command type.
3876 * @dmabuf: Pointer to a DMA buffer descriptor.
3878 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3879 * non-embedded external bufffers.
3881 static int
3882 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
3883 enum nemb_type nemb_tp,
3884 struct lpfc_dmabuf *dmabuf)
3886 struct fc_bsg_request *bsg_request = job->request;
3887 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3888 struct dfc_mbox_req *mbox_req;
3889 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3890 uint32_t ext_buf_cnt, ext_buf_index;
3891 struct lpfc_dmabuf *ext_dmabuf = NULL;
3892 struct bsg_job_data *dd_data = NULL;
3893 LPFC_MBOXQ_t *pmboxq = NULL;
3894 MAILBOX_t *pmb;
3895 uint8_t *pmbx;
3896 int rc, i;
3898 mbox_req =
3899 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
3901 /* pointer to the start of mailbox command */
3902 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3904 if (nemb_tp == nemb_mse) {
3905 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3906 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3907 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3908 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3909 "2945 Handled SLI_CONFIG(mse) rd, "
3910 "ext_buf_cnt(%d) out of range(%d)\n",
3911 ext_buf_cnt,
3912 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3913 rc = -ERANGE;
3914 goto job_error;
3916 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3917 "2941 Handled SLI_CONFIG(mse) rd, "
3918 "ext_buf_cnt:%d\n", ext_buf_cnt);
3919 } else {
3920 /* sanity check on interface type for support */
3921 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
3922 LPFC_SLI_INTF_IF_TYPE_2) {
3923 rc = -ENODEV;
3924 goto job_error;
3926 /* nemb_tp == nemb_hbd */
3927 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3928 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3929 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3930 "2946 Handled SLI_CONFIG(hbd) rd, "
3931 "ext_buf_cnt(%d) out of range(%d)\n",
3932 ext_buf_cnt,
3933 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3934 rc = -ERANGE;
3935 goto job_error;
3937 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3938 "2942 Handled SLI_CONFIG(hbd) rd, "
3939 "ext_buf_cnt:%d\n", ext_buf_cnt);
3942 /* before dma descriptor setup */
3943 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3944 sta_pre_addr, dmabuf, ext_buf_cnt);
3946 /* reject non-embedded mailbox command with none external buffer */
3947 if (ext_buf_cnt == 0) {
3948 rc = -EPERM;
3949 goto job_error;
3950 } else if (ext_buf_cnt > 1) {
3951 /* additional external read buffers */
3952 for (i = 1; i < ext_buf_cnt; i++) {
3953 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3954 if (!ext_dmabuf) {
3955 rc = -ENOMEM;
3956 goto job_error;
3958 list_add_tail(&ext_dmabuf->list,
3959 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3963 /* bsg tracking structure */
3964 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3965 if (!dd_data) {
3966 rc = -ENOMEM;
3967 goto job_error;
3970 /* mailbox command structure for base driver */
3971 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3972 if (!pmboxq) {
3973 rc = -ENOMEM;
3974 goto job_error;
3976 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3978 /* for the first external buffer */
3979 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3981 /* for the rest of external buffer descriptors if any */
3982 if (ext_buf_cnt > 1) {
3983 ext_buf_index = 1;
3984 list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3985 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3986 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3987 ext_buf_index, dmabuf,
3988 curr_dmabuf);
3989 ext_buf_index++;
3993 /* after dma descriptor setup */
3994 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3995 sta_pos_addr, dmabuf, ext_buf_cnt);
3997 /* construct base driver mbox command */
3998 pmb = &pmboxq->u.mb;
3999 pmbx = (uint8_t *)dmabuf->virt;
4000 memcpy(pmb, pmbx, sizeof(*pmb));
4001 pmb->mbxOwner = OWN_HOST;
4002 pmboxq->vport = phba->pport;
4004 /* multi-buffer handling context */
4005 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
4006 phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
4007 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
4008 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
4009 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
4010 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
4012 /* callback for multi-buffer read mailbox command */
4013 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
4015 /* context fields to callback function */
4016 pmboxq->ctx_buf = dd_data;
4017 dd_data->type = TYPE_MBOX;
4018 dd_data->set_job = job;
4019 dd_data->context_un.mbox.pmboxq = pmboxq;
4020 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4021 job->dd_data = dd_data;
4023 /* state change */
4024 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4027 * Non-embedded mailbox subcommand data gets byte swapped here because
4028 * the lower level driver code only does the first 64 mailbox words.
4030 if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
4031 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
4032 (nemb_tp == nemb_mse))
4033 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
4034 &pmbx[sizeof(MAILBOX_t)],
4035 sli_cfg_mbx->un.sli_config_emb0_subsys.
4036 mse[0].buf_len);
4038 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4039 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4040 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4041 "2947 Issued SLI_CONFIG ext-buffer "
4042 "mailbox command, rc:x%x\n", rc);
4043 return SLI_CONFIG_HANDLED;
4045 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4046 "2948 Failed to issue SLI_CONFIG ext-buffer "
4047 "mailbox command, rc:x%x\n", rc);
4048 rc = -EPIPE;
4050 job_error:
4051 if (pmboxq)
4052 mempool_free(pmboxq, phba->mbox_mem_pool);
4053 lpfc_bsg_dma_page_list_free(phba,
4054 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4055 kfree(dd_data);
4056 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4057 return rc;
4061 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
4062 * @phba: Pointer to HBA context object.
4063 * @job: Pointer to the job object.
4064 * @nemb_tp: Enumerate of non-embedded mailbox command type.
4065 * @dmabuf: Pointer to a DMA buffer descriptor.
4067 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
4068 * non-embedded external bufffers.
4070 static int
4071 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
4072 enum nemb_type nemb_tp,
4073 struct lpfc_dmabuf *dmabuf)
4075 struct fc_bsg_request *bsg_request = job->request;
4076 struct fc_bsg_reply *bsg_reply = job->reply;
4077 struct dfc_mbox_req *mbox_req;
4078 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4079 uint32_t ext_buf_cnt;
4080 struct bsg_job_data *dd_data = NULL;
4081 LPFC_MBOXQ_t *pmboxq = NULL;
4082 MAILBOX_t *pmb;
4083 uint8_t *mbx;
4084 int rc = SLI_CONFIG_NOT_HANDLED, i;
4086 mbox_req =
4087 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4089 /* pointer to the start of mailbox command */
4090 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4092 if (nemb_tp == nemb_mse) {
4093 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
4094 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
4095 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
4096 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4097 "2953 Failed SLI_CONFIG(mse) wr, "
4098 "ext_buf_cnt(%d) out of range(%d)\n",
4099 ext_buf_cnt,
4100 LPFC_MBX_SLI_CONFIG_MAX_MSE);
4101 return -ERANGE;
4103 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4104 "2949 Handled SLI_CONFIG(mse) wr, "
4105 "ext_buf_cnt:%d\n", ext_buf_cnt);
4106 } else {
4107 /* sanity check on interface type for support */
4108 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
4109 LPFC_SLI_INTF_IF_TYPE_2)
4110 return -ENODEV;
4111 /* nemb_tp == nemb_hbd */
4112 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
4113 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
4114 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4115 "2954 Failed SLI_CONFIG(hbd) wr, "
4116 "ext_buf_cnt(%d) out of range(%d)\n",
4117 ext_buf_cnt,
4118 LPFC_MBX_SLI_CONFIG_MAX_HBD);
4119 return -ERANGE;
4121 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4122 "2950 Handled SLI_CONFIG(hbd) wr, "
4123 "ext_buf_cnt:%d\n", ext_buf_cnt);
4126 /* before dma buffer descriptor setup */
4127 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4128 sta_pre_addr, dmabuf, ext_buf_cnt);
4130 if (ext_buf_cnt == 0)
4131 return -EPERM;
4133 /* for the first external buffer */
4134 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
4136 /* after dma descriptor setup */
4137 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4138 sta_pos_addr, dmabuf, ext_buf_cnt);
4140 /* log for looking forward */
4141 for (i = 1; i < ext_buf_cnt; i++) {
4142 if (nemb_tp == nemb_mse)
4143 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4144 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
4145 i, sli_cfg_mbx->un.sli_config_emb0_subsys.
4146 mse[i].buf_len);
4147 else
4148 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4149 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
4150 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4151 &sli_cfg_mbx->un.sli_config_emb1_subsys.
4152 hbd[i]));
4155 /* multi-buffer handling context */
4156 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
4157 phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
4158 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
4159 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
4160 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
4161 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
4163 if (ext_buf_cnt == 1) {
4164 /* bsg tracking structure */
4165 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4166 if (!dd_data) {
4167 rc = -ENOMEM;
4168 goto job_error;
4171 /* mailbox command structure for base driver */
4172 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4173 if (!pmboxq) {
4174 rc = -ENOMEM;
4175 goto job_error;
4177 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4178 pmb = &pmboxq->u.mb;
4179 mbx = (uint8_t *)dmabuf->virt;
4180 memcpy(pmb, mbx, sizeof(*pmb));
4181 pmb->mbxOwner = OWN_HOST;
4182 pmboxq->vport = phba->pport;
4184 /* callback for multi-buffer read mailbox command */
4185 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4187 /* context fields to callback function */
4188 pmboxq->ctx_buf = dd_data;
4189 dd_data->type = TYPE_MBOX;
4190 dd_data->set_job = job;
4191 dd_data->context_un.mbox.pmboxq = pmboxq;
4192 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
4193 job->dd_data = dd_data;
4195 /* state change */
4197 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4198 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4199 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4200 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4201 "2955 Issued SLI_CONFIG ext-buffer "
4202 "mailbox command, rc:x%x\n", rc);
4203 return SLI_CONFIG_HANDLED;
4205 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4206 "2956 Failed to issue SLI_CONFIG ext-buffer "
4207 "mailbox command, rc:x%x\n", rc);
4208 rc = -EPIPE;
4209 goto job_error;
4212 /* wait for additoinal external buffers */
4214 bsg_reply->result = 0;
4215 bsg_job_done(job, bsg_reply->result,
4216 bsg_reply->reply_payload_rcv_len);
4217 return SLI_CONFIG_HANDLED;
4219 job_error:
4220 if (pmboxq)
4221 mempool_free(pmboxq, phba->mbox_mem_pool);
4222 kfree(dd_data);
4224 return rc;
4228 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
4229 * @phba: Pointer to HBA context object.
4230 * @job: Pointer to the job object.
4231 * @dmabuf: Pointer to a DMA buffer descriptor.
4233 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
4234 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
4235 * with embedded sussystem 0x1 and opcodes with external HBDs.
4237 static int
4238 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4239 struct lpfc_dmabuf *dmabuf)
4241 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4242 uint32_t subsys;
4243 uint32_t opcode;
4244 int rc = SLI_CONFIG_NOT_HANDLED;
4246 /* state change on new multi-buffer pass-through mailbox command */
4247 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
4249 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4251 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
4252 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
4253 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
4254 &sli_cfg_mbx->un.sli_config_emb0_subsys);
4255 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
4256 &sli_cfg_mbx->un.sli_config_emb0_subsys);
4257 if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
4258 switch (opcode) {
4259 case FCOE_OPCODE_READ_FCF:
4260 case FCOE_OPCODE_GET_DPORT_RESULTS:
4261 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4262 "2957 Handled SLI_CONFIG "
4263 "subsys_fcoe, opcode:x%x\n",
4264 opcode);
4265 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4266 nemb_mse, dmabuf);
4267 break;
4268 case FCOE_OPCODE_ADD_FCF:
4269 case FCOE_OPCODE_SET_DPORT_MODE:
4270 case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
4271 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4272 "2958 Handled SLI_CONFIG "
4273 "subsys_fcoe, opcode:x%x\n",
4274 opcode);
4275 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4276 nemb_mse, dmabuf);
4277 break;
4278 default:
4279 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4280 "2959 Reject SLI_CONFIG "
4281 "subsys_fcoe, opcode:x%x\n",
4282 opcode);
4283 rc = -EPERM;
4284 break;
4286 } else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4287 switch (opcode) {
4288 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
4289 case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
4290 case COMN_OPCODE_GET_PROFILE_CONFIG:
4291 case COMN_OPCODE_SET_FEATURES:
4292 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4293 "3106 Handled SLI_CONFIG "
4294 "subsys_comn, opcode:x%x\n",
4295 opcode);
4296 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4297 nemb_mse, dmabuf);
4298 break;
4299 default:
4300 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4301 "3107 Reject SLI_CONFIG "
4302 "subsys_comn, opcode:x%x\n",
4303 opcode);
4304 rc = -EPERM;
4305 break;
4307 } else {
4308 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4309 "2977 Reject SLI_CONFIG "
4310 "subsys:x%d, opcode:x%x\n",
4311 subsys, opcode);
4312 rc = -EPERM;
4314 } else {
4315 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
4316 &sli_cfg_mbx->un.sli_config_emb1_subsys);
4317 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
4318 &sli_cfg_mbx->un.sli_config_emb1_subsys);
4319 if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4320 switch (opcode) {
4321 case COMN_OPCODE_READ_OBJECT:
4322 case COMN_OPCODE_READ_OBJECT_LIST:
4323 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4324 "2960 Handled SLI_CONFIG "
4325 "subsys_comn, opcode:x%x\n",
4326 opcode);
4327 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4328 nemb_hbd, dmabuf);
4329 break;
4330 case COMN_OPCODE_WRITE_OBJECT:
4331 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4332 "2961 Handled SLI_CONFIG "
4333 "subsys_comn, opcode:x%x\n",
4334 opcode);
4335 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4336 nemb_hbd, dmabuf);
4337 break;
4338 default:
4339 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4340 "2962 Not handled SLI_CONFIG "
4341 "subsys_comn, opcode:x%x\n",
4342 opcode);
4343 rc = SLI_CONFIG_NOT_HANDLED;
4344 break;
4346 } else {
4347 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4348 "2978 Not handled SLI_CONFIG "
4349 "subsys:x%d, opcode:x%x\n",
4350 subsys, opcode);
4351 rc = SLI_CONFIG_NOT_HANDLED;
4355 /* state reset on not handled new multi-buffer mailbox command */
4356 if (rc != SLI_CONFIG_HANDLED)
4357 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4359 return rc;
4363 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
4364 * @phba: Pointer to HBA context object.
4366 * This routine is for requesting to abort a pass-through mailbox command with
4367 * multiple external buffers due to error condition.
4369 static void
4370 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
4372 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4373 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
4374 else
4375 lpfc_bsg_mbox_ext_session_reset(phba);
4376 return;
4380 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
4381 * @phba: Pointer to HBA context object.
4382 * @job: Pointer to the job object.
4384 * This routine extracts the next mailbox read external buffer back to
4385 * user space through BSG.
4387 static int
4388 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
4390 struct fc_bsg_reply *bsg_reply = job->reply;
4391 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4392 struct lpfc_dmabuf *dmabuf;
4393 uint8_t *pbuf;
4394 uint32_t size;
4395 uint32_t index;
4397 index = phba->mbox_ext_buf_ctx.seqNum;
4398 phba->mbox_ext_buf_ctx.seqNum++;
4400 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4401 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4403 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4404 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
4405 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
4406 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4407 "2963 SLI_CONFIG (mse) ext-buffer rd get "
4408 "buffer[%d], size:%d\n", index, size);
4409 } else {
4410 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4411 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
4412 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4413 "2964 SLI_CONFIG (hbd) ext-buffer rd get "
4414 "buffer[%d], size:%d\n", index, size);
4416 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
4417 return -EPIPE;
4418 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
4419 struct lpfc_dmabuf, list);
4420 list_del_init(&dmabuf->list);
4422 /* after dma buffer descriptor setup */
4423 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4424 mbox_rd, dma_ebuf, sta_pos_addr,
4425 dmabuf, index);
4427 pbuf = (uint8_t *)dmabuf->virt;
4428 bsg_reply->reply_payload_rcv_len =
4429 sg_copy_from_buffer(job->reply_payload.sg_list,
4430 job->reply_payload.sg_cnt,
4431 pbuf, size);
4433 lpfc_bsg_dma_page_free(phba, dmabuf);
4435 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4436 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4437 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
4438 "command session done\n");
4439 lpfc_bsg_mbox_ext_session_reset(phba);
4442 bsg_reply->result = 0;
4443 bsg_job_done(job, bsg_reply->result,
4444 bsg_reply->reply_payload_rcv_len);
4446 return SLI_CONFIG_HANDLED;
4450 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
4451 * @phba: Pointer to HBA context object.
4452 * @job: Pointer to the job object.
4453 * @dmabuf: Pointer to a DMA buffer descriptor.
4455 * This routine sets up the next mailbox read external buffer obtained
4456 * from user space through BSG.
4458 static int
4459 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
4460 struct lpfc_dmabuf *dmabuf)
4462 struct fc_bsg_reply *bsg_reply = job->reply;
4463 struct bsg_job_data *dd_data = NULL;
4464 LPFC_MBOXQ_t *pmboxq = NULL;
4465 MAILBOX_t *pmb;
4466 enum nemb_type nemb_tp;
4467 uint8_t *pbuf;
4468 uint32_t size;
4469 uint32_t index;
4470 int rc;
4472 index = phba->mbox_ext_buf_ctx.seqNum;
4473 phba->mbox_ext_buf_ctx.seqNum++;
4474 nemb_tp = phba->mbox_ext_buf_ctx.nembType;
4476 pbuf = (uint8_t *)dmabuf->virt;
4477 size = job->request_payload.payload_len;
4478 sg_copy_to_buffer(job->request_payload.sg_list,
4479 job->request_payload.sg_cnt,
4480 pbuf, size);
4482 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4483 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4484 "2966 SLI_CONFIG (mse) ext-buffer wr set "
4485 "buffer[%d], size:%d\n",
4486 phba->mbox_ext_buf_ctx.seqNum, size);
4488 } else {
4489 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4490 "2967 SLI_CONFIG (hbd) ext-buffer wr set "
4491 "buffer[%d], size:%d\n",
4492 phba->mbox_ext_buf_ctx.seqNum, size);
4496 /* set up external buffer descriptor and add to external buffer list */
4497 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
4498 phba->mbox_ext_buf_ctx.mbx_dmabuf,
4499 dmabuf);
4500 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4502 /* after write dma buffer */
4503 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4504 mbox_wr, dma_ebuf, sta_pos_addr,
4505 dmabuf, index);
4507 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4508 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4509 "2968 SLI_CONFIG ext-buffer wr all %d "
4510 "ebuffers received\n",
4511 phba->mbox_ext_buf_ctx.numBuf);
4513 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4514 if (!dd_data) {
4515 rc = -ENOMEM;
4516 goto job_error;
4519 /* mailbox command structure for base driver */
4520 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4521 if (!pmboxq) {
4522 rc = -ENOMEM;
4523 goto job_error;
4525 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4526 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4527 pmb = &pmboxq->u.mb;
4528 memcpy(pmb, pbuf, sizeof(*pmb));
4529 pmb->mbxOwner = OWN_HOST;
4530 pmboxq->vport = phba->pport;
4532 /* callback for multi-buffer write mailbox command */
4533 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4535 /* context fields to callback function */
4536 pmboxq->ctx_buf = dd_data;
4537 dd_data->type = TYPE_MBOX;
4538 dd_data->set_job = job;
4539 dd_data->context_un.mbox.pmboxq = pmboxq;
4540 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
4541 job->dd_data = dd_data;
4543 /* state change */
4544 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4546 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4547 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4548 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4549 "2969 Issued SLI_CONFIG ext-buffer "
4550 "mailbox command, rc:x%x\n", rc);
4551 return SLI_CONFIG_HANDLED;
4553 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4554 "2970 Failed to issue SLI_CONFIG ext-buffer "
4555 "mailbox command, rc:x%x\n", rc);
4556 rc = -EPIPE;
4557 goto job_error;
4560 /* wait for additoinal external buffers */
4561 bsg_reply->result = 0;
4562 bsg_job_done(job, bsg_reply->result,
4563 bsg_reply->reply_payload_rcv_len);
4564 return SLI_CONFIG_HANDLED;
4566 job_error:
4567 if (pmboxq)
4568 mempool_free(pmboxq, phba->mbox_mem_pool);
4569 lpfc_bsg_dma_page_free(phba, dmabuf);
4570 kfree(dd_data);
4572 return rc;
4576 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
4577 * @phba: Pointer to HBA context object.
4578 * @job: Pointer to the job object.
4579 * @dmabuf: Pointer to a DMA buffer descriptor.
4581 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
4582 * command with multiple non-embedded external buffers.
4584 static int
4585 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
4586 struct lpfc_dmabuf *dmabuf)
4588 int rc;
4590 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4591 "2971 SLI_CONFIG buffer (type:x%x)\n",
4592 phba->mbox_ext_buf_ctx.mboxType);
4594 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4595 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4596 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4597 "2972 SLI_CONFIG rd buffer state "
4598 "mismatch:x%x\n",
4599 phba->mbox_ext_buf_ctx.state);
4600 lpfc_bsg_mbox_ext_abort(phba);
4601 return -EPIPE;
4603 rc = lpfc_bsg_read_ebuf_get(phba, job);
4604 if (rc == SLI_CONFIG_HANDLED)
4605 lpfc_bsg_dma_page_free(phba, dmabuf);
4606 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4607 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4608 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4609 "2973 SLI_CONFIG wr buffer state "
4610 "mismatch:x%x\n",
4611 phba->mbox_ext_buf_ctx.state);
4612 lpfc_bsg_mbox_ext_abort(phba);
4613 return -EPIPE;
4615 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4617 return rc;
4621 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4622 * @phba: Pointer to HBA context object.
4623 * @job: Pointer to the job object.
4624 * @dmabuf: Pointer to a DMA buffer descriptor.
4626 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
4627 * (0x9B) mailbox commands and external buffers.
4629 static int
4630 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
4631 struct lpfc_dmabuf *dmabuf)
4633 struct fc_bsg_request *bsg_request = job->request;
4634 struct dfc_mbox_req *mbox_req;
4635 int rc = SLI_CONFIG_NOT_HANDLED;
4637 mbox_req =
4638 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4640 /* mbox command with/without single external buffer */
4641 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4642 return rc;
4644 /* mbox command and first external buffer */
4645 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4646 if (mbox_req->extSeqNum == 1) {
4647 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4648 "2974 SLI_CONFIG mailbox: tag:%d, "
4649 "seq:%d\n", mbox_req->extMboxTag,
4650 mbox_req->extSeqNum);
4651 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4652 return rc;
4653 } else
4654 goto sli_cfg_ext_error;
4658 * handle additional external buffers
4661 /* check broken pipe conditions */
4662 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4663 goto sli_cfg_ext_error;
4664 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4665 goto sli_cfg_ext_error;
4666 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4667 goto sli_cfg_ext_error;
4669 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4670 "2975 SLI_CONFIG mailbox external buffer: "
4671 "extSta:x%x, tag:%d, seq:%d\n",
4672 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4673 mbox_req->extSeqNum);
4674 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4675 return rc;
4677 sli_cfg_ext_error:
4678 /* all other cases, broken pipe */
4679 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4680 "2976 SLI_CONFIG mailbox broken pipe: "
4681 "ctxSta:x%x, ctxNumBuf:%d "
4682 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4683 phba->mbox_ext_buf_ctx.state,
4684 phba->mbox_ext_buf_ctx.numBuf,
4685 phba->mbox_ext_buf_ctx.mbxTag,
4686 phba->mbox_ext_buf_ctx.seqNum,
4687 mbox_req->extMboxTag, mbox_req->extSeqNum);
4689 lpfc_bsg_mbox_ext_session_reset(phba);
4691 return -EPIPE;
4695 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
4696 * @phba: Pointer to HBA context object.
4697 * @job: Pointer to the job object.
4698 * @vport: Pointer to a vport object.
4700 * Allocate a tracking object, mailbox command memory, get a mailbox
4701 * from the mailbox pool, copy the caller mailbox command.
4703 * If offline and the sli is active we need to poll for the command (port is
4704 * being reset) and com-plete the job, otherwise issue the mailbox command and
4705 * let our completion handler finish the command.
4707 static int
4708 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4709 struct lpfc_vport *vport)
4711 struct fc_bsg_request *bsg_request = job->request;
4712 struct fc_bsg_reply *bsg_reply = job->reply;
4713 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
4714 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
4715 /* a 4k buffer to hold the mb and extended data from/to the bsg */
4716 uint8_t *pmbx = NULL;
4717 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
4718 struct lpfc_dmabuf *dmabuf = NULL;
4719 struct dfc_mbox_req *mbox_req;
4720 struct READ_EVENT_LOG_VAR *rdEventLog;
4721 uint32_t transmit_length, receive_length, mode;
4722 struct lpfc_mbx_sli4_config *sli4_config;
4723 struct lpfc_mbx_nembed_cmd *nembed_sge;
4724 struct ulp_bde64 *bde;
4725 uint8_t *ext = NULL;
4726 int rc = 0;
4727 uint8_t *from;
4728 uint32_t size;
4730 /* in case no data is transferred */
4731 bsg_reply->reply_payload_rcv_len = 0;
4733 /* sanity check to protect driver */
4734 if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
4735 job->request_payload.payload_len > BSG_MBOX_SIZE) {
4736 rc = -ERANGE;
4737 goto job_done;
4741 * Don't allow mailbox commands to be sent when blocked or when in
4742 * the middle of discovery
4744 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4745 rc = -EAGAIN;
4746 goto job_done;
4749 mbox_req =
4750 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4752 /* check if requested extended data lengths are valid */
4753 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
4754 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
4755 rc = -ERANGE;
4756 goto job_done;
4759 dmabuf = lpfc_bsg_dma_page_alloc(phba);
4760 if (!dmabuf || !dmabuf->virt) {
4761 rc = -ENOMEM;
4762 goto job_done;
4765 /* Get the mailbox command or external buffer from BSG */
4766 pmbx = (uint8_t *)dmabuf->virt;
4767 size = job->request_payload.payload_len;
4768 sg_copy_to_buffer(job->request_payload.sg_list,
4769 job->request_payload.sg_cnt, pmbx, size);
4771 /* Handle possible SLI_CONFIG with non-embedded payloads */
4772 if (phba->sli_rev == LPFC_SLI_REV4) {
4773 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4774 if (rc == SLI_CONFIG_HANDLED)
4775 goto job_cont;
4776 if (rc)
4777 goto job_done;
4778 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4781 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4782 if (rc != 0)
4783 goto job_done; /* must be negative */
4785 /* allocate our bsg tracking structure */
4786 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4787 if (!dd_data) {
4788 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4789 "2727 Failed allocation of dd_data\n");
4790 rc = -ENOMEM;
4791 goto job_done;
4794 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4795 if (!pmboxq) {
4796 rc = -ENOMEM;
4797 goto job_done;
4799 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4801 pmb = &pmboxq->u.mb;
4802 memcpy(pmb, pmbx, sizeof(*pmb));
4803 pmb->mbxOwner = OWN_HOST;
4804 pmboxq->vport = vport;
4806 /* If HBA encountered an error attention, allow only DUMP
4807 * or RESTART mailbox commands until the HBA is restarted.
4809 if (phba->pport->stopped &&
4810 pmb->mbxCommand != MBX_DUMP_MEMORY &&
4811 pmb->mbxCommand != MBX_RESTART &&
4812 pmb->mbxCommand != MBX_WRITE_VPARMS &&
4813 pmb->mbxCommand != MBX_WRITE_WWN)
4814 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
4815 "2797 mbox: Issued mailbox cmd "
4816 "0x%x while in stopped state.\n",
4817 pmb->mbxCommand);
4819 /* extended mailbox commands will need an extended buffer */
4820 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4821 from = pmbx;
4822 ext = from + sizeof(MAILBOX_t);
4823 pmboxq->ctx_buf = ext;
4824 pmboxq->in_ext_byte_len =
4825 mbox_req->inExtWLen * sizeof(uint32_t);
4826 pmboxq->out_ext_byte_len =
4827 mbox_req->outExtWLen * sizeof(uint32_t);
4828 pmboxq->mbox_offset_word = mbox_req->mbOffset;
4831 /* biu diag will need a kernel buffer to transfer the data
4832 * allocate our own buffer and setup the mailbox command to
4833 * use ours
4835 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
4836 transmit_length = pmb->un.varWords[1];
4837 receive_length = pmb->un.varWords[4];
4838 /* transmit length cannot be greater than receive length or
4839 * mailbox extension size
4841 if ((transmit_length > receive_length) ||
4842 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4843 rc = -ERANGE;
4844 goto job_done;
4846 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
4847 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
4848 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
4849 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
4851 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
4852 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
4853 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4854 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
4855 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
4856 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4857 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
4858 rdEventLog = &pmb->un.varRdEventLog;
4859 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
4860 mode = bf_get(lpfc_event_log, rdEventLog);
4862 /* receive length cannot be greater than mailbox
4863 * extension size
4865 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4866 rc = -ERANGE;
4867 goto job_done;
4870 /* mode zero uses a bde like biu diags command */
4871 if (mode == 0) {
4872 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4873 + sizeof(MAILBOX_t));
4874 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4875 + sizeof(MAILBOX_t));
4877 } else if (phba->sli_rev == LPFC_SLI_REV4) {
4878 /* Let type 4 (well known data) through because the data is
4879 * returned in varwords[4-8]
4880 * otherwise check the recieve length and fetch the buffer addr
4882 if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
4883 (pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
4884 /* rebuild the command for sli4 using our own buffers
4885 * like we do for biu diags
4887 receive_length = pmb->un.varWords[2];
4888 /* receive length cannot be greater than mailbox
4889 * extension size
4891 if (receive_length == 0) {
4892 rc = -ERANGE;
4893 goto job_done;
4895 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4896 + sizeof(MAILBOX_t));
4897 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4898 + sizeof(MAILBOX_t));
4899 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
4900 pmb->un.varUpdateCfg.co) {
4901 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4903 /* bde size cannot be greater than mailbox ext size */
4904 if (bde->tus.f.bdeSize >
4905 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4906 rc = -ERANGE;
4907 goto job_done;
4909 bde->addrHigh = putPaddrHigh(dmabuf->phys
4910 + sizeof(MAILBOX_t));
4911 bde->addrLow = putPaddrLow(dmabuf->phys
4912 + sizeof(MAILBOX_t));
4913 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
4914 /* Handling non-embedded SLI_CONFIG mailbox command */
4915 sli4_config = &pmboxq->u.mqe.un.sli4_config;
4916 if (!bf_get(lpfc_mbox_hdr_emb,
4917 &sli4_config->header.cfg_mhdr)) {
4918 /* rebuild the command for sli4 using our
4919 * own buffers like we do for biu diags
4921 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
4922 &pmb->un.varWords[0];
4923 receive_length = nembed_sge->sge[0].length;
4925 /* receive length cannot be greater than
4926 * mailbox extension size
4928 if ((receive_length == 0) ||
4929 (receive_length >
4930 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4931 rc = -ERANGE;
4932 goto job_done;
4935 nembed_sge->sge[0].pa_hi =
4936 putPaddrHigh(dmabuf->phys
4937 + sizeof(MAILBOX_t));
4938 nembed_sge->sge[0].pa_lo =
4939 putPaddrLow(dmabuf->phys
4940 + sizeof(MAILBOX_t));
4945 dd_data->context_un.mbox.dmabuffers = dmabuf;
4947 /* setup wake call as IOCB callback */
4948 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
4950 /* setup context field to pass wait_queue pointer to wake function */
4951 pmboxq->ctx_ndlp = dd_data;
4952 dd_data->type = TYPE_MBOX;
4953 dd_data->set_job = job;
4954 dd_data->context_un.mbox.pmboxq = pmboxq;
4955 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4956 dd_data->context_un.mbox.ext = ext;
4957 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4958 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
4959 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
4960 job->dd_data = dd_data;
4962 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4963 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4964 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4965 if (rc != MBX_SUCCESS) {
4966 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4967 goto job_done;
4970 /* job finished, copy the data */
4971 memcpy(pmbx, pmb, sizeof(*pmb));
4972 bsg_reply->reply_payload_rcv_len =
4973 sg_copy_from_buffer(job->reply_payload.sg_list,
4974 job->reply_payload.sg_cnt,
4975 pmbx, size);
4976 /* not waiting mbox already done */
4977 rc = 0;
4978 goto job_done;
4981 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4982 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
4983 return 1; /* job started */
4985 job_done:
4986 /* common exit for error or job completed inline */
4987 if (pmboxq)
4988 mempool_free(pmboxq, phba->mbox_mem_pool);
4989 lpfc_bsg_dma_page_free(phba, dmabuf);
4990 kfree(dd_data);
4992 job_cont:
4993 return rc;
4997 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
4998 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
5000 static int
5001 lpfc_bsg_mbox_cmd(struct bsg_job *job)
5003 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5004 struct fc_bsg_request *bsg_request = job->request;
5005 struct fc_bsg_reply *bsg_reply = job->reply;
5006 struct lpfc_hba *phba = vport->phba;
5007 struct dfc_mbox_req *mbox_req;
5008 int rc = 0;
5010 /* mix-and-match backward compatibility */
5011 bsg_reply->reply_payload_rcv_len = 0;
5012 if (job->request_len <
5013 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
5014 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
5015 "2737 Mix-and-match backward compatibility "
5016 "between MBOX_REQ old size:%d and "
5017 "new request size:%d\n",
5018 (int)(job->request_len -
5019 sizeof(struct fc_bsg_request)),
5020 (int)sizeof(struct dfc_mbox_req));
5021 mbox_req = (struct dfc_mbox_req *)
5022 bsg_request->rqst_data.h_vendor.vendor_cmd;
5023 mbox_req->extMboxTag = 0;
5024 mbox_req->extSeqNum = 0;
5027 rc = lpfc_bsg_issue_mbox(phba, job, vport);
5029 if (rc == 0) {
5030 /* job done */
5031 bsg_reply->result = 0;
5032 job->dd_data = NULL;
5033 bsg_job_done(job, bsg_reply->result,
5034 bsg_reply->reply_payload_rcv_len);
5035 } else if (rc == 1)
5036 /* job submitted, will complete later*/
5037 rc = 0; /* return zero, no error */
5038 else {
5039 /* some error occurred */
5040 bsg_reply->result = rc;
5041 job->dd_data = NULL;
5044 return rc;
5048 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
5049 * @phba: Pointer to HBA context object.
5050 * @cmdiocbq: Pointer to command iocb.
5051 * @rspiocbq: Pointer to response iocb.
5053 * This function is the completion handler for iocbs issued using
5054 * lpfc_menlo_cmd function. This function is called by the
5055 * ring event handler function without any lock held. This function
5056 * can be called from both worker thread context and interrupt
5057 * context. This function also can be called from another thread which
5058 * cleans up the SLI layer objects.
5059 * This function copies the contents of the response iocb to the
5060 * response iocb memory object provided by the caller of
5061 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
5062 * sleeps for the iocb completion.
5064 static void
5065 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
5066 struct lpfc_iocbq *cmdiocbq,
5067 struct lpfc_iocbq *rspiocbq)
5069 struct bsg_job_data *dd_data;
5070 struct bsg_job *job;
5071 struct fc_bsg_reply *bsg_reply;
5072 IOCB_t *rsp;
5073 struct lpfc_dmabuf *bmp, *cmp, *rmp;
5074 struct lpfc_bsg_menlo *menlo;
5075 unsigned long flags;
5076 struct menlo_response *menlo_resp;
5077 unsigned int rsp_size;
5078 int rc = 0;
5080 dd_data = cmdiocbq->context1;
5081 cmp = cmdiocbq->context2;
5082 bmp = cmdiocbq->context3;
5083 menlo = &dd_data->context_un.menlo;
5084 rmp = menlo->rmp;
5085 rsp = &rspiocbq->iocb;
5087 /* Determine if job has been aborted */
5088 spin_lock_irqsave(&phba->ct_ev_lock, flags);
5089 job = dd_data->set_job;
5090 if (job) {
5091 bsg_reply = job->reply;
5092 /* Prevent timeout handling from trying to abort job */
5093 job->dd_data = NULL;
5095 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5097 /* Copy the job data or set the failing status for the job */
5099 if (job) {
5100 /* always return the xri, this would be used in the case
5101 * of a menlo download to allow the data to be sent as a
5102 * continuation of the exchange.
5105 menlo_resp = (struct menlo_response *)
5106 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5107 menlo_resp->xri = rsp->ulpContext;
5108 if (rsp->ulpStatus) {
5109 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
5110 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
5111 case IOERR_SEQUENCE_TIMEOUT:
5112 rc = -ETIMEDOUT;
5113 break;
5114 case IOERR_INVALID_RPI:
5115 rc = -EFAULT;
5116 break;
5117 default:
5118 rc = -EACCES;
5119 break;
5121 } else {
5122 rc = -EACCES;
5124 } else {
5125 rsp_size = rsp->un.genreq64.bdl.bdeSize;
5126 bsg_reply->reply_payload_rcv_len =
5127 lpfc_bsg_copy_data(rmp, &job->reply_payload,
5128 rsp_size, 0);
5133 lpfc_sli_release_iocbq(phba, cmdiocbq);
5134 lpfc_free_bsg_buffers(phba, cmp);
5135 lpfc_free_bsg_buffers(phba, rmp);
5136 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5137 kfree(bmp);
5138 kfree(dd_data);
5140 /* Complete the job if active */
5142 if (job) {
5143 bsg_reply->result = rc;
5144 bsg_job_done(job, bsg_reply->result,
5145 bsg_reply->reply_payload_rcv_len);
5148 return;
5152 * lpfc_menlo_cmd - send an ioctl for menlo hardware
5153 * @job: fc_bsg_job to handle
5155 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
5156 * all the command completions will return the xri for the command.
5157 * For menlo data requests a gen request 64 CX is used to continue the exchange
5158 * supplied in the menlo request header xri field.
5160 static int
5161 lpfc_menlo_cmd(struct bsg_job *job)
5163 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5164 struct fc_bsg_request *bsg_request = job->request;
5165 struct fc_bsg_reply *bsg_reply = job->reply;
5166 struct lpfc_hba *phba = vport->phba;
5167 struct lpfc_iocbq *cmdiocbq;
5168 IOCB_t *cmd;
5169 int rc = 0;
5170 struct menlo_command *menlo_cmd;
5171 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
5172 int request_nseg;
5173 int reply_nseg;
5174 struct bsg_job_data *dd_data;
5175 struct ulp_bde64 *bpl = NULL;
5177 /* in case no data is returned return just the return code */
5178 bsg_reply->reply_payload_rcv_len = 0;
5180 if (job->request_len <
5181 sizeof(struct fc_bsg_request) +
5182 sizeof(struct menlo_command)) {
5183 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5184 "2784 Received MENLO_CMD request below "
5185 "minimum size\n");
5186 rc = -ERANGE;
5187 goto no_dd_data;
5190 if (job->reply_len < sizeof(*bsg_reply) +
5191 sizeof(struct menlo_response)) {
5192 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5193 "2785 Received MENLO_CMD reply below "
5194 "minimum size\n");
5195 rc = -ERANGE;
5196 goto no_dd_data;
5199 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
5200 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5201 "2786 Adapter does not support menlo "
5202 "commands\n");
5203 rc = -EPERM;
5204 goto no_dd_data;
5207 menlo_cmd = (struct menlo_command *)
5208 bsg_request->rqst_data.h_vendor.vendor_cmd;
5210 /* allocate our bsg tracking structure */
5211 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
5212 if (!dd_data) {
5213 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5214 "2787 Failed allocation of dd_data\n");
5215 rc = -ENOMEM;
5216 goto no_dd_data;
5219 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5220 if (!bmp) {
5221 rc = -ENOMEM;
5222 goto free_dd;
5225 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
5226 if (!bmp->virt) {
5227 rc = -ENOMEM;
5228 goto free_bmp;
5231 INIT_LIST_HEAD(&bmp->list);
5233 bpl = (struct ulp_bde64 *)bmp->virt;
5234 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
5235 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
5236 1, bpl, &request_nseg);
5237 if (!cmp) {
5238 rc = -ENOMEM;
5239 goto free_bmp;
5241 lpfc_bsg_copy_data(cmp, &job->request_payload,
5242 job->request_payload.payload_len, 1);
5244 bpl += request_nseg;
5245 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
5246 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
5247 bpl, &reply_nseg);
5248 if (!rmp) {
5249 rc = -ENOMEM;
5250 goto free_cmp;
5253 cmdiocbq = lpfc_sli_get_iocbq(phba);
5254 if (!cmdiocbq) {
5255 rc = -ENOMEM;
5256 goto free_rmp;
5259 cmd = &cmdiocbq->iocb;
5260 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
5261 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
5262 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
5263 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
5264 cmd->un.genreq64.bdl.bdeSize =
5265 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
5266 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
5267 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
5268 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
5269 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
5270 cmd->ulpBdeCount = 1;
5271 cmd->ulpClass = CLASS3;
5272 cmd->ulpOwner = OWN_CHIP;
5273 cmd->ulpLe = 1; /* Limited Edition */
5274 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
5275 cmdiocbq->vport = phba->pport;
5276 /* We want the firmware to timeout before we do */
5277 cmd->ulpTimeout = MENLO_TIMEOUT - 5;
5278 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
5279 cmdiocbq->context1 = dd_data;
5280 cmdiocbq->context2 = cmp;
5281 cmdiocbq->context3 = bmp;
5282 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
5283 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
5284 cmd->ulpPU = MENLO_PU; /* 3 */
5285 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
5286 cmd->ulpContext = MENLO_CONTEXT; /* 0 */
5287 } else {
5288 cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
5289 cmd->ulpPU = 1;
5290 cmd->un.ulpWord[4] = 0;
5291 cmd->ulpContext = menlo_cmd->xri;
5294 dd_data->type = TYPE_MENLO;
5295 dd_data->set_job = job;
5296 dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
5297 dd_data->context_un.menlo.rmp = rmp;
5298 job->dd_data = dd_data;
5300 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
5301 MENLO_TIMEOUT - 5);
5302 if (rc == IOCB_SUCCESS)
5303 return 0; /* done for now */
5305 lpfc_sli_release_iocbq(phba, cmdiocbq);
5307 free_rmp:
5308 lpfc_free_bsg_buffers(phba, rmp);
5309 free_cmp:
5310 lpfc_free_bsg_buffers(phba, cmp);
5311 free_bmp:
5312 if (bmp->virt)
5313 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5314 kfree(bmp);
5315 free_dd:
5316 kfree(dd_data);
5317 no_dd_data:
5318 /* make error code available to userspace */
5319 bsg_reply->result = rc;
5320 job->dd_data = NULL;
5321 return rc;
5324 static int
5325 lpfc_forced_link_speed(struct bsg_job *job)
5327 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5328 struct lpfc_vport *vport = shost_priv(shost);
5329 struct lpfc_hba *phba = vport->phba;
5330 struct fc_bsg_reply *bsg_reply = job->reply;
5331 struct forced_link_speed_support_reply *forced_reply;
5332 int rc = 0;
5334 if (job->request_len <
5335 sizeof(struct fc_bsg_request) +
5336 sizeof(struct get_forced_link_speed_support)) {
5337 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5338 "0048 Received FORCED_LINK_SPEED request "
5339 "below minimum size\n");
5340 rc = -EINVAL;
5341 goto job_error;
5344 forced_reply = (struct forced_link_speed_support_reply *)
5345 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5347 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) {
5348 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5349 "0049 Received FORCED_LINK_SPEED reply below "
5350 "minimum size\n");
5351 rc = -EINVAL;
5352 goto job_error;
5355 forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
5356 ? LPFC_FORCED_LINK_SPEED_SUPPORTED
5357 : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
5358 job_error:
5359 bsg_reply->result = rc;
5360 if (rc == 0)
5361 bsg_job_done(job, bsg_reply->result,
5362 bsg_reply->reply_payload_rcv_len);
5363 return rc;
5367 * lpfc_check_fwlog_support: Check FW log support on the adapter
5368 * @phba: Pointer to HBA context object.
5370 * Check if FW Logging support by the adapter
5373 lpfc_check_fwlog_support(struct lpfc_hba *phba)
5375 struct lpfc_ras_fwlog *ras_fwlog = NULL;
5377 ras_fwlog = &phba->ras_fwlog;
5379 if (ras_fwlog->ras_hwsupport == false)
5380 return -EACCES;
5381 else if (ras_fwlog->ras_enabled == false)
5382 return -EPERM;
5383 else
5384 return 0;
5388 * lpfc_bsg_get_ras_config: Get RAS configuration settings
5389 * @job: fc_bsg_job to handle
5391 * Get RAS configuration values set.
5393 static int
5394 lpfc_bsg_get_ras_config(struct bsg_job *job)
5396 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5397 struct lpfc_vport *vport = shost_priv(shost);
5398 struct fc_bsg_reply *bsg_reply = job->reply;
5399 struct lpfc_hba *phba = vport->phba;
5400 struct lpfc_bsg_get_ras_config_reply *ras_reply;
5401 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5402 int rc = 0;
5404 if (job->request_len <
5405 sizeof(struct fc_bsg_request) +
5406 sizeof(struct lpfc_bsg_ras_req)) {
5407 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5408 "6192 FW_LOG request received "
5409 "below minimum size\n");
5410 rc = -EINVAL;
5411 goto ras_job_error;
5414 /* Check FW log status */
5415 rc = lpfc_check_fwlog_support(phba);
5416 if (rc)
5417 goto ras_job_error;
5419 ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
5420 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5422 /* Current logging state */
5423 spin_lock_irq(&phba->hbalock);
5424 if (ras_fwlog->state == ACTIVE)
5425 ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
5426 else
5427 ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
5428 spin_unlock_irq(&phba->hbalock);
5430 ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
5431 ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
5433 ras_job_error:
5434 /* make error code available to userspace */
5435 bsg_reply->result = rc;
5437 /* complete the job back to userspace */
5438 if (!rc)
5439 bsg_job_done(job, bsg_reply->result,
5440 bsg_reply->reply_payload_rcv_len);
5441 return rc;
5445 * lpfc_bsg_set_ras_config: Set FW logging parameters
5446 * @job: fc_bsg_job to handle
5448 * Set log-level parameters for FW-logging in host memory
5450 static int
5451 lpfc_bsg_set_ras_config(struct bsg_job *job)
5453 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5454 struct lpfc_vport *vport = shost_priv(shost);
5455 struct lpfc_hba *phba = vport->phba;
5456 struct lpfc_bsg_set_ras_config_req *ras_req;
5457 struct fc_bsg_request *bsg_request = job->request;
5458 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5459 struct fc_bsg_reply *bsg_reply = job->reply;
5460 uint8_t action = 0, log_level = 0;
5461 int rc = 0, action_status = 0;
5463 if (job->request_len <
5464 sizeof(struct fc_bsg_request) +
5465 sizeof(struct lpfc_bsg_set_ras_config_req)) {
5466 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5467 "6182 Received RAS_LOG request "
5468 "below minimum size\n");
5469 rc = -EINVAL;
5470 goto ras_job_error;
5473 /* Check FW log status */
5474 rc = lpfc_check_fwlog_support(phba);
5475 if (rc)
5476 goto ras_job_error;
5478 ras_req = (struct lpfc_bsg_set_ras_config_req *)
5479 bsg_request->rqst_data.h_vendor.vendor_cmd;
5480 action = ras_req->action;
5481 log_level = ras_req->log_level;
5483 if (action == LPFC_RASACTION_STOP_LOGGING) {
5484 /* Check if already disabled */
5485 spin_lock_irq(&phba->hbalock);
5486 if (ras_fwlog->state != ACTIVE) {
5487 spin_unlock_irq(&phba->hbalock);
5488 rc = -ESRCH;
5489 goto ras_job_error;
5491 spin_unlock_irq(&phba->hbalock);
5493 /* Disable logging */
5494 lpfc_ras_stop_fwlog(phba);
5495 } else {
5496 /*action = LPFC_RASACTION_START_LOGGING*/
5498 /* Even though FW-logging is active re-initialize
5499 * FW-logging with new log-level. Return status
5500 * "Logging already Running" to caller.
5502 spin_lock_irq(&phba->hbalock);
5503 if (ras_fwlog->state != INACTIVE)
5504 action_status = -EINPROGRESS;
5505 spin_unlock_irq(&phba->hbalock);
5507 /* Enable logging */
5508 rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
5509 LPFC_RAS_ENABLE_LOGGING);
5510 if (rc) {
5511 rc = -EINVAL;
5512 goto ras_job_error;
5515 /* Check if FW-logging is re-initialized */
5516 if (action_status == -EINPROGRESS)
5517 rc = action_status;
5519 ras_job_error:
5520 /* make error code available to userspace */
5521 bsg_reply->result = rc;
5523 /* complete the job back to userspace */
5524 if (!rc)
5525 bsg_job_done(job, bsg_reply->result,
5526 bsg_reply->reply_payload_rcv_len);
5528 return rc;
5532 * lpfc_bsg_get_ras_lwpd: Get log write position data
5533 * @job: fc_bsg_job to handle
5535 * Get Offset/Wrap count of the log message written
5536 * in host memory
5538 static int
5539 lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
5541 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5542 struct lpfc_vport *vport = shost_priv(shost);
5543 struct lpfc_bsg_get_ras_lwpd *ras_reply;
5544 struct lpfc_hba *phba = vport->phba;
5545 struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5546 struct fc_bsg_reply *bsg_reply = job->reply;
5547 u32 *lwpd_ptr = NULL;
5548 int rc = 0;
5550 rc = lpfc_check_fwlog_support(phba);
5551 if (rc)
5552 goto ras_job_error;
5554 if (job->request_len <
5555 sizeof(struct fc_bsg_request) +
5556 sizeof(struct lpfc_bsg_ras_req)) {
5557 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5558 "6183 Received RAS_LOG request "
5559 "below minimum size\n");
5560 rc = -EINVAL;
5561 goto ras_job_error;
5564 ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
5565 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5567 if (!ras_fwlog->lwpd.virt) {
5568 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5569 "6193 Restart FW Logging\n");
5570 rc = -EINVAL;
5571 goto ras_job_error;
5574 /* Get lwpd offset */
5575 lwpd_ptr = (uint32_t *)(ras_fwlog->lwpd.virt);
5576 ras_reply->offset = be32_to_cpu(*lwpd_ptr & 0xffffffff);
5578 /* Get wrap count */
5579 ras_reply->wrap_count = be32_to_cpu(*(++lwpd_ptr) & 0xffffffff);
5581 ras_job_error:
5582 /* make error code available to userspace */
5583 bsg_reply->result = rc;
5585 /* complete the job back to userspace */
5586 if (!rc)
5587 bsg_job_done(job, bsg_reply->result,
5588 bsg_reply->reply_payload_rcv_len);
5590 return rc;
5594 * lpfc_bsg_get_ras_fwlog: Read FW log
5595 * @job: fc_bsg_job to handle
5597 * Copy the FW log into the passed buffer.
5599 static int
5600 lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
5602 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5603 struct lpfc_vport *vport = shost_priv(shost);
5604 struct lpfc_hba *phba = vport->phba;
5605 struct fc_bsg_request *bsg_request = job->request;
5606 struct fc_bsg_reply *bsg_reply = job->reply;
5607 struct lpfc_bsg_get_fwlog_req *ras_req;
5608 u32 rd_offset, rd_index, offset;
5609 void *src, *fwlog_buff;
5610 struct lpfc_ras_fwlog *ras_fwlog = NULL;
5611 struct lpfc_dmabuf *dmabuf, *next;
5612 int rc = 0;
5614 ras_fwlog = &phba->ras_fwlog;
5616 rc = lpfc_check_fwlog_support(phba);
5617 if (rc)
5618 goto ras_job_error;
5620 /* Logging to be stopped before reading */
5621 spin_lock_irq(&phba->hbalock);
5622 if (ras_fwlog->state == ACTIVE) {
5623 spin_unlock_irq(&phba->hbalock);
5624 rc = -EINPROGRESS;
5625 goto ras_job_error;
5627 spin_unlock_irq(&phba->hbalock);
5629 if (job->request_len <
5630 sizeof(struct fc_bsg_request) +
5631 sizeof(struct lpfc_bsg_get_fwlog_req)) {
5632 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5633 "6184 Received RAS_LOG request "
5634 "below minimum size\n");
5635 rc = -EINVAL;
5636 goto ras_job_error;
5639 ras_req = (struct lpfc_bsg_get_fwlog_req *)
5640 bsg_request->rqst_data.h_vendor.vendor_cmd;
5641 rd_offset = ras_req->read_offset;
5643 /* Allocate memory to read fw log*/
5644 fwlog_buff = vmalloc(ras_req->read_size);
5645 if (!fwlog_buff) {
5646 rc = -ENOMEM;
5647 goto ras_job_error;
5650 rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
5651 offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
5653 list_for_each_entry_safe(dmabuf, next,
5654 &ras_fwlog->fwlog_buff_list, list) {
5656 if (dmabuf->buffer_tag < rd_index)
5657 continue;
5659 src = dmabuf->virt + offset;
5660 memcpy(fwlog_buff, src, ras_req->read_size);
5661 break;
5664 bsg_reply->reply_payload_rcv_len =
5665 sg_copy_from_buffer(job->reply_payload.sg_list,
5666 job->reply_payload.sg_cnt,
5667 fwlog_buff, ras_req->read_size);
5669 vfree(fwlog_buff);
5671 ras_job_error:
5672 bsg_reply->result = rc;
5673 if (!rc)
5674 bsg_job_done(job, bsg_reply->result,
5675 bsg_reply->reply_payload_rcv_len);
5677 return rc;
5680 static int
5681 lpfc_get_trunk_info(struct bsg_job *job)
5683 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5684 struct lpfc_hba *phba = vport->phba;
5685 struct fc_bsg_reply *bsg_reply = job->reply;
5686 struct lpfc_trunk_info *event_reply;
5687 int rc = 0;
5689 if (job->request_len <
5690 sizeof(struct fc_bsg_request) + sizeof(struct get_trunk_info_req)) {
5691 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5692 "2744 Received GET TRUNK _INFO request below "
5693 "minimum size\n");
5694 rc = -EINVAL;
5695 goto job_error;
5698 event_reply = (struct lpfc_trunk_info *)
5699 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5701 if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
5702 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5703 "2728 Received GET TRUNK _INFO reply below "
5704 "minimum size\n");
5705 rc = -EINVAL;
5706 goto job_error;
5708 if (event_reply == NULL) {
5709 rc = -EINVAL;
5710 goto job_error;
5713 bsg_bf_set(lpfc_trunk_info_link_status, event_reply,
5714 (phba->link_state >= LPFC_LINK_UP) ? 1 : 0);
5716 bsg_bf_set(lpfc_trunk_info_trunk_active0, event_reply,
5717 (phba->trunk_link.link0.state == LPFC_LINK_UP) ? 1 : 0);
5719 bsg_bf_set(lpfc_trunk_info_trunk_active1, event_reply,
5720 (phba->trunk_link.link1.state == LPFC_LINK_UP) ? 1 : 0);
5722 bsg_bf_set(lpfc_trunk_info_trunk_active2, event_reply,
5723 (phba->trunk_link.link2.state == LPFC_LINK_UP) ? 1 : 0);
5725 bsg_bf_set(lpfc_trunk_info_trunk_active3, event_reply,
5726 (phba->trunk_link.link3.state == LPFC_LINK_UP) ? 1 : 0);
5728 bsg_bf_set(lpfc_trunk_info_trunk_config0, event_reply,
5729 bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba));
5731 bsg_bf_set(lpfc_trunk_info_trunk_config1, event_reply,
5732 bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba));
5734 bsg_bf_set(lpfc_trunk_info_trunk_config2, event_reply,
5735 bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba));
5737 bsg_bf_set(lpfc_trunk_info_trunk_config3, event_reply,
5738 bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba));
5740 event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000;
5741 event_reply->logical_speed =
5742 phba->sli4_hba.link_state.logical_speed / 1000;
5743 job_error:
5744 bsg_reply->result = rc;
5745 if (!rc)
5746 bsg_job_done(job, bsg_reply->result,
5747 bsg_reply->reply_payload_rcv_len);
5748 return rc;
5753 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
5754 * @job: fc_bsg_job to handle
5756 static int
5757 lpfc_bsg_hst_vendor(struct bsg_job *job)
5759 struct fc_bsg_request *bsg_request = job->request;
5760 struct fc_bsg_reply *bsg_reply = job->reply;
5761 int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
5762 int rc;
5764 switch (command) {
5765 case LPFC_BSG_VENDOR_SET_CT_EVENT:
5766 rc = lpfc_bsg_hba_set_event(job);
5767 break;
5768 case LPFC_BSG_VENDOR_GET_CT_EVENT:
5769 rc = lpfc_bsg_hba_get_event(job);
5770 break;
5771 case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
5772 rc = lpfc_bsg_send_mgmt_rsp(job);
5773 break;
5774 case LPFC_BSG_VENDOR_DIAG_MODE:
5775 rc = lpfc_bsg_diag_loopback_mode(job);
5776 break;
5777 case LPFC_BSG_VENDOR_DIAG_MODE_END:
5778 rc = lpfc_sli4_bsg_diag_mode_end(job);
5779 break;
5780 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
5781 rc = lpfc_bsg_diag_loopback_run(job);
5782 break;
5783 case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
5784 rc = lpfc_sli4_bsg_link_diag_test(job);
5785 break;
5786 case LPFC_BSG_VENDOR_GET_MGMT_REV:
5787 rc = lpfc_bsg_get_dfc_rev(job);
5788 break;
5789 case LPFC_BSG_VENDOR_MBOX:
5790 rc = lpfc_bsg_mbox_cmd(job);
5791 break;
5792 case LPFC_BSG_VENDOR_MENLO_CMD:
5793 case LPFC_BSG_VENDOR_MENLO_DATA:
5794 rc = lpfc_menlo_cmd(job);
5795 break;
5796 case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
5797 rc = lpfc_forced_link_speed(job);
5798 break;
5799 case LPFC_BSG_VENDOR_RAS_GET_LWPD:
5800 rc = lpfc_bsg_get_ras_lwpd(job);
5801 break;
5802 case LPFC_BSG_VENDOR_RAS_GET_FWLOG:
5803 rc = lpfc_bsg_get_ras_fwlog(job);
5804 break;
5805 case LPFC_BSG_VENDOR_RAS_GET_CONFIG:
5806 rc = lpfc_bsg_get_ras_config(job);
5807 break;
5808 case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
5809 rc = lpfc_bsg_set_ras_config(job);
5810 break;
5811 case LPFC_BSG_VENDOR_GET_TRUNK_INFO:
5812 rc = lpfc_get_trunk_info(job);
5813 break;
5814 default:
5815 rc = -EINVAL;
5816 bsg_reply->reply_payload_rcv_len = 0;
5817 /* make error code available to userspace */
5818 bsg_reply->result = rc;
5819 break;
5822 return rc;
5826 * lpfc_bsg_request - handle a bsg request from the FC transport
5827 * @job: bsg_job to handle
5830 lpfc_bsg_request(struct bsg_job *job)
5832 struct fc_bsg_request *bsg_request = job->request;
5833 struct fc_bsg_reply *bsg_reply = job->reply;
5834 uint32_t msgcode;
5835 int rc;
5837 msgcode = bsg_request->msgcode;
5838 switch (msgcode) {
5839 case FC_BSG_HST_VENDOR:
5840 rc = lpfc_bsg_hst_vendor(job);
5841 break;
5842 case FC_BSG_RPT_ELS:
5843 rc = lpfc_bsg_rport_els(job);
5844 break;
5845 case FC_BSG_RPT_CT:
5846 rc = lpfc_bsg_send_mgmt_cmd(job);
5847 break;
5848 default:
5849 rc = -EINVAL;
5850 bsg_reply->reply_payload_rcv_len = 0;
5851 /* make error code available to userspace */
5852 bsg_reply->result = rc;
5853 break;
5856 return rc;
5860 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
5861 * @job: bsg_job that has timed out
5863 * This function just aborts the job's IOCB. The aborted IOCB will return to
5864 * the waiting function which will handle passing the error back to userspace
5867 lpfc_bsg_timeout(struct bsg_job *job)
5869 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5870 struct lpfc_hba *phba = vport->phba;
5871 struct lpfc_iocbq *cmdiocb;
5872 struct lpfc_sli_ring *pring;
5873 struct bsg_job_data *dd_data;
5874 unsigned long flags;
5875 int rc = 0;
5876 LIST_HEAD(completions);
5877 struct lpfc_iocbq *check_iocb, *next_iocb;
5879 pring = lpfc_phba_elsring(phba);
5880 if (unlikely(!pring))
5881 return -EIO;
5883 /* if job's driver data is NULL, the command completed or is in the
5884 * the process of completing. In this case, return status to request
5885 * so the timeout is retried. This avoids double completion issues
5886 * and the request will be pulled off the timer queue when the
5887 * command's completion handler executes. Otherwise, prevent the
5888 * command's completion handler from executing the job done callback
5889 * and continue processing to abort the outstanding the command.
5892 spin_lock_irqsave(&phba->ct_ev_lock, flags);
5893 dd_data = (struct bsg_job_data *)job->dd_data;
5894 if (dd_data) {
5895 dd_data->set_job = NULL;
5896 job->dd_data = NULL;
5897 } else {
5898 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5899 return -EAGAIN;
5902 switch (dd_data->type) {
5903 case TYPE_IOCB:
5904 /* Check to see if IOCB was issued to the port or not. If not,
5905 * remove it from the txq queue and call cancel iocbs.
5906 * Otherwise, call abort iotag
5908 cmdiocb = dd_data->context_un.iocb.cmdiocbq;
5909 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5911 spin_lock_irqsave(&phba->hbalock, flags);
5912 /* make sure the I/O abort window is still open */
5913 if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
5914 spin_unlock_irqrestore(&phba->hbalock, flags);
5915 return -EAGAIN;
5917 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5918 list) {
5919 if (check_iocb == cmdiocb) {
5920 list_move_tail(&check_iocb->list, &completions);
5921 break;
5924 if (list_empty(&completions))
5925 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
5926 spin_unlock_irqrestore(&phba->hbalock, flags);
5927 if (!list_empty(&completions)) {
5928 lpfc_sli_cancel_iocbs(phba, &completions,
5929 IOSTAT_LOCAL_REJECT,
5930 IOERR_SLI_ABORTED);
5932 break;
5934 case TYPE_EVT:
5935 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5936 break;
5938 case TYPE_MBOX:
5939 /* Update the ext buf ctx state if needed */
5941 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
5942 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
5943 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5944 break;
5945 case TYPE_MENLO:
5946 /* Check to see if IOCB was issued to the port or not. If not,
5947 * remove it from the txq queue and call cancel iocbs.
5948 * Otherwise, call abort iotag.
5950 cmdiocb = dd_data->context_un.menlo.cmdiocbq;
5951 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5953 spin_lock_irqsave(&phba->hbalock, flags);
5954 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5955 list) {
5956 if (check_iocb == cmdiocb) {
5957 list_move_tail(&check_iocb->list, &completions);
5958 break;
5961 if (list_empty(&completions))
5962 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb, NULL);
5963 spin_unlock_irqrestore(&phba->hbalock, flags);
5964 if (!list_empty(&completions)) {
5965 lpfc_sli_cancel_iocbs(phba, &completions,
5966 IOSTAT_LOCAL_REJECT,
5967 IOERR_SLI_ABORTED);
5969 break;
5970 default:
5971 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5972 break;
5975 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
5976 * otherwise an error message will be displayed on the console
5977 * so always return success (zero)
5979 return rc;