1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
39 #include <asm/set_memory.h>
42 #include <linux/nvme-fc-driver.h>
47 #include "lpfc_sli4.h"
49 #include "lpfc_disc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_nvmet.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_logmsg.h"
56 #include "lpfc_compat.h"
57 #include "lpfc_debugfs.h"
58 #include "lpfc_vport.h"
59 #include "lpfc_version.h"
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type
{
70 /* Provide function prototypes local to this module. */
71 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba
*, LPFC_MBOXQ_t
*,
73 static int lpfc_sli4_read_rev(struct lpfc_hba
*, LPFC_MBOXQ_t
*,
74 uint8_t *, uint32_t *);
75 static struct lpfc_iocbq
*lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba
*,
77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport
*,
79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport
*vport
,
80 struct hbq_dmabuf
*dmabuf
);
81 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba
*phba
,
82 struct lpfc_queue
*cq
, struct lpfc_cqe
*cqe
);
83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba
*, struct list_head
*,
85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba
*phba
,
86 struct lpfc_queue
*eq
,
87 struct lpfc_eqe
*eqe
);
88 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba
*phba
);
89 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba
*phba
);
90 static struct lpfc_cqe
*lpfc_sli4_cq_get(struct lpfc_queue
*q
);
91 static void __lpfc_sli4_consume_cqe(struct lpfc_hba
*phba
,
92 struct lpfc_queue
*cq
,
93 struct lpfc_cqe
*cqe
);
96 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq
*iocbq
)
101 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
103 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
104 * @srcp: Source memory pointer.
105 * @destp: Destination memory pointer.
106 * @cnt: Number of words required to be copied.
107 * Must be a multiple of sizeof(uint64_t)
109 * This function is used for copying data between driver memory
110 * and the SLI WQ. This function also changes the endianness
111 * of each word if native endianness is different from SLI
112 * endianness. This function can be called with or without
116 lpfc_sli4_pcimem_bcopy(void *srcp
, void *destp
, uint32_t cnt
)
118 uint64_t *src
= srcp
;
119 uint64_t *dest
= destp
;
122 for (i
= 0; i
< (int)cnt
; i
+= sizeof(uint64_t))
126 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
130 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
131 * @q: The Work Queue to operate on.
132 * @wqe: The work Queue Entry to put on the Work queue.
134 * This routine will copy the contents of @wqe to the next available entry on
135 * the @q. This function will then ring the Work Queue Doorbell to signal the
136 * HBA to start processing the Work Queue Entry. This function returns 0 if
137 * successful. If no entries are available on @q then this function will return
139 * The caller is expected to hold the hbalock when calling this routine.
142 lpfc_sli4_wq_put(struct lpfc_queue
*q
, union lpfc_wqe128
*wqe
)
144 union lpfc_wqe
*temp_wqe
;
145 struct lpfc_register doorbell
;
152 /* sanity check on queue memory */
155 temp_wqe
= lpfc_sli4_qe(q
, q
->host_index
);
157 /* If the host has not yet processed the next entry then we are done */
158 idx
= ((q
->host_index
+ 1) % q
->entry_count
);
159 if (idx
== q
->hba_index
) {
164 /* set consumption flag every once in a while */
165 if (!((q
->host_index
+ 1) % q
->notify_interval
))
166 bf_set(wqe_wqec
, &wqe
->generic
.wqe_com
, 1);
168 bf_set(wqe_wqec
, &wqe
->generic
.wqe_com
, 0);
169 if (q
->phba
->sli3_options
& LPFC_SLI4_PHWQ_ENABLED
)
170 bf_set(wqe_wqid
, &wqe
->generic
.wqe_com
, q
->queue_id
);
171 lpfc_sli4_pcimem_bcopy(wqe
, temp_wqe
, q
->entry_size
);
172 if (q
->dpp_enable
&& q
->phba
->cfg_enable_dpp
) {
173 /* write to DPP aperture taking advatage of Combined Writes */
174 tmp
= (uint8_t *)temp_wqe
;
176 for (i
= 0; i
< q
->entry_size
; i
+= sizeof(uint64_t))
177 __raw_writeq(*((uint64_t *)(tmp
+ i
)),
180 for (i
= 0; i
< q
->entry_size
; i
+= sizeof(uint32_t))
181 __raw_writel(*((uint32_t *)(tmp
+ i
)),
185 /* ensure WQE bcopy and DPP flushed before doorbell write */
188 /* Update the host index before invoking device */
189 host_index
= q
->host_index
;
195 if (q
->db_format
== LPFC_DB_LIST_FORMAT
) {
196 if (q
->dpp_enable
&& q
->phba
->cfg_enable_dpp
) {
197 bf_set(lpfc_if6_wq_db_list_fm_num_posted
, &doorbell
, 1);
198 bf_set(lpfc_if6_wq_db_list_fm_dpp
, &doorbell
, 1);
199 bf_set(lpfc_if6_wq_db_list_fm_dpp_id
, &doorbell
,
201 bf_set(lpfc_if6_wq_db_list_fm_id
, &doorbell
,
204 bf_set(lpfc_wq_db_list_fm_num_posted
, &doorbell
, 1);
205 bf_set(lpfc_wq_db_list_fm_id
, &doorbell
, q
->queue_id
);
207 /* Leave bits <23:16> clear for if_type 6 dpp */
208 if_type
= bf_get(lpfc_sli_intf_if_type
,
209 &q
->phba
->sli4_hba
.sli_intf
);
210 if (if_type
!= LPFC_SLI_INTF_IF_TYPE_6
)
211 bf_set(lpfc_wq_db_list_fm_index
, &doorbell
,
214 } else if (q
->db_format
== LPFC_DB_RING_FORMAT
) {
215 bf_set(lpfc_wq_db_ring_fm_num_posted
, &doorbell
, 1);
216 bf_set(lpfc_wq_db_ring_fm_id
, &doorbell
, q
->queue_id
);
220 writel(doorbell
.word0
, q
->db_regaddr
);
226 * lpfc_sli4_wq_release - Updates internal hba index for WQ
227 * @q: The Work Queue to operate on.
228 * @index: The index to advance the hba index to.
230 * This routine will update the HBA index of a queue to reflect consumption of
231 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
232 * an entry the host calls this function to update the queue's internal
233 * pointers. This routine returns the number of entries that were consumed by
237 lpfc_sli4_wq_release(struct lpfc_queue
*q
, uint32_t index
)
239 uint32_t released
= 0;
241 /* sanity check on queue memory */
245 if (q
->hba_index
== index
)
248 q
->hba_index
= ((q
->hba_index
+ 1) % q
->entry_count
);
250 } while (q
->hba_index
!= index
);
255 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
256 * @q: The Mailbox Queue to operate on.
257 * @wqe: The Mailbox Queue Entry to put on the Work queue.
259 * This routine will copy the contents of @mqe to the next available entry on
260 * the @q. This function will then ring the Work Queue Doorbell to signal the
261 * HBA to start processing the Work Queue Entry. This function returns 0 if
262 * successful. If no entries are available on @q then this function will return
264 * The caller is expected to hold the hbalock when calling this routine.
267 lpfc_sli4_mq_put(struct lpfc_queue
*q
, struct lpfc_mqe
*mqe
)
269 struct lpfc_mqe
*temp_mqe
;
270 struct lpfc_register doorbell
;
272 /* sanity check on queue memory */
275 temp_mqe
= lpfc_sli4_qe(q
, q
->host_index
);
277 /* If the host has not yet processed the next entry then we are done */
278 if (((q
->host_index
+ 1) % q
->entry_count
) == q
->hba_index
)
280 lpfc_sli4_pcimem_bcopy(mqe
, temp_mqe
, q
->entry_size
);
281 /* Save off the mailbox pointer for completion */
282 q
->phba
->mbox
= (MAILBOX_t
*)temp_mqe
;
284 /* Update the host index before invoking device */
285 q
->host_index
= ((q
->host_index
+ 1) % q
->entry_count
);
289 bf_set(lpfc_mq_doorbell_num_posted
, &doorbell
, 1);
290 bf_set(lpfc_mq_doorbell_id
, &doorbell
, q
->queue_id
);
291 writel(doorbell
.word0
, q
->phba
->sli4_hba
.MQDBregaddr
);
296 * lpfc_sli4_mq_release - Updates internal hba index for MQ
297 * @q: The Mailbox Queue to operate on.
299 * This routine will update the HBA index of a queue to reflect consumption of
300 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
301 * an entry the host calls this function to update the queue's internal
302 * pointers. This routine returns the number of entries that were consumed by
306 lpfc_sli4_mq_release(struct lpfc_queue
*q
)
308 /* sanity check on queue memory */
312 /* Clear the mailbox pointer for completion */
313 q
->phba
->mbox
= NULL
;
314 q
->hba_index
= ((q
->hba_index
+ 1) % q
->entry_count
);
319 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
320 * @q: The Event Queue to get the first valid EQE from
322 * This routine will get the first valid Event Queue Entry from @q, update
323 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
324 * the Queue (no more work to do), or the Queue is full of EQEs that have been
325 * processed, but not popped back to the HBA then this routine will return NULL.
327 static struct lpfc_eqe
*
328 lpfc_sli4_eq_get(struct lpfc_queue
*q
)
330 struct lpfc_eqe
*eqe
;
332 /* sanity check on queue memory */
335 eqe
= lpfc_sli4_qe(q
, q
->host_index
);
337 /* If the next EQE is not valid then we are done */
338 if (bf_get_le32(lpfc_eqe_valid
, eqe
) != q
->qe_valid
)
342 * insert barrier for instruction interlock : data from the hardware
343 * must have the valid bit checked before it can be copied and acted
344 * upon. Speculative instructions were allowing a bcopy at the start
345 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
346 * after our return, to copy data before the valid bit check above
347 * was done. As such, some of the copied data was stale. The barrier
348 * ensures the check is before any data is copied.
355 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
356 * @q: The Event Queue to disable interrupts
360 lpfc_sli4_eq_clr_intr(struct lpfc_queue
*q
)
362 struct lpfc_register doorbell
;
365 bf_set(lpfc_eqcq_doorbell_eqci
, &doorbell
, 1);
366 bf_set(lpfc_eqcq_doorbell_qt
, &doorbell
, LPFC_QUEUE_TYPE_EVENT
);
367 bf_set(lpfc_eqcq_doorbell_eqid_hi
, &doorbell
,
368 (q
->queue_id
>> LPFC_EQID_HI_FIELD_SHIFT
));
369 bf_set(lpfc_eqcq_doorbell_eqid_lo
, &doorbell
, q
->queue_id
);
370 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQDBregaddr
);
374 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
375 * @q: The Event Queue to disable interrupts
379 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue
*q
)
381 struct lpfc_register doorbell
;
384 bf_set(lpfc_if6_eq_doorbell_eqid
, &doorbell
, q
->queue_id
);
385 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQDBregaddr
);
389 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
390 * @phba: adapter with EQ
391 * @q: The Event Queue that the host has completed processing for.
392 * @count: Number of elements that have been consumed
393 * @arm: Indicates whether the host wants to arms this CQ.
395 * This routine will notify the HBA, by ringing the doorbell, that count
396 * number of EQEs have been processed. The @arm parameter indicates whether
397 * the queue should be rearmed when ringing the doorbell.
400 lpfc_sli4_write_eq_db(struct lpfc_hba
*phba
, struct lpfc_queue
*q
,
401 uint32_t count
, bool arm
)
403 struct lpfc_register doorbell
;
405 /* sanity check on queue memory */
406 if (unlikely(!q
|| (count
== 0 && !arm
)))
409 /* ring doorbell for number popped */
412 bf_set(lpfc_eqcq_doorbell_arm
, &doorbell
, 1);
413 bf_set(lpfc_eqcq_doorbell_eqci
, &doorbell
, 1);
415 bf_set(lpfc_eqcq_doorbell_num_released
, &doorbell
, count
);
416 bf_set(lpfc_eqcq_doorbell_qt
, &doorbell
, LPFC_QUEUE_TYPE_EVENT
);
417 bf_set(lpfc_eqcq_doorbell_eqid_hi
, &doorbell
,
418 (q
->queue_id
>> LPFC_EQID_HI_FIELD_SHIFT
));
419 bf_set(lpfc_eqcq_doorbell_eqid_lo
, &doorbell
, q
->queue_id
);
420 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQDBregaddr
);
421 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
422 if ((q
->phba
->intr_type
== INTx
) && (arm
== LPFC_QUEUE_REARM
))
423 readl(q
->phba
->sli4_hba
.EQDBregaddr
);
427 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
428 * @phba: adapter with EQ
429 * @q: The Event Queue that the host has completed processing for.
430 * @count: Number of elements that have been consumed
431 * @arm: Indicates whether the host wants to arms this CQ.
433 * This routine will notify the HBA, by ringing the doorbell, that count
434 * number of EQEs have been processed. The @arm parameter indicates whether
435 * the queue should be rearmed when ringing the doorbell.
438 lpfc_sli4_if6_write_eq_db(struct lpfc_hba
*phba
, struct lpfc_queue
*q
,
439 uint32_t count
, bool arm
)
441 struct lpfc_register doorbell
;
443 /* sanity check on queue memory */
444 if (unlikely(!q
|| (count
== 0 && !arm
)))
447 /* ring doorbell for number popped */
450 bf_set(lpfc_if6_eq_doorbell_arm
, &doorbell
, 1);
451 bf_set(lpfc_if6_eq_doorbell_num_released
, &doorbell
, count
);
452 bf_set(lpfc_if6_eq_doorbell_eqid
, &doorbell
, q
->queue_id
);
453 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQDBregaddr
);
454 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
455 if ((q
->phba
->intr_type
== INTx
) && (arm
== LPFC_QUEUE_REARM
))
456 readl(q
->phba
->sli4_hba
.EQDBregaddr
);
460 __lpfc_sli4_consume_eqe(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
,
461 struct lpfc_eqe
*eqe
)
463 if (!phba
->sli4_hba
.pc_sli4_params
.eqav
)
464 bf_set_le32(lpfc_eqe_valid
, eqe
, 0);
466 eq
->host_index
= ((eq
->host_index
+ 1) % eq
->entry_count
);
468 /* if the index wrapped around, toggle the valid bit */
469 if (phba
->sli4_hba
.pc_sli4_params
.eqav
&& !eq
->host_index
)
470 eq
->qe_valid
= (eq
->qe_valid
) ? 0 : 1;
474 lpfc_sli4_eqcq_flush(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
)
476 struct lpfc_eqe
*eqe
= NULL
;
477 u32 eq_count
= 0, cq_count
= 0;
478 struct lpfc_cqe
*cqe
= NULL
;
479 struct lpfc_queue
*cq
= NULL
, *childq
= NULL
;
482 /* walk all the EQ entries and drop on the floor */
483 eqe
= lpfc_sli4_eq_get(eq
);
485 /* Get the reference to the corresponding CQ */
486 cqid
= bf_get_le32(lpfc_eqe_resource_id
, eqe
);
489 list_for_each_entry(childq
, &eq
->child_list
, list
) {
490 if (childq
->queue_id
== cqid
) {
495 /* If CQ is valid, iterate through it and drop all the CQEs */
497 cqe
= lpfc_sli4_cq_get(cq
);
499 __lpfc_sli4_consume_cqe(phba
, cq
, cqe
);
501 cqe
= lpfc_sli4_cq_get(cq
);
503 /* Clear and re-arm the CQ */
504 phba
->sli4_hba
.sli4_write_cq_db(phba
, cq
, cq_count
,
508 __lpfc_sli4_consume_eqe(phba
, eq
, eqe
);
510 eqe
= lpfc_sli4_eq_get(eq
);
513 /* Clear and re-arm the EQ */
514 phba
->sli4_hba
.sli4_write_eq_db(phba
, eq
, eq_count
, LPFC_QUEUE_REARM
);
518 lpfc_sli4_process_eq(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
,
521 struct lpfc_eqe
*eqe
;
522 int count
= 0, consumed
= 0;
524 if (cmpxchg(&eq
->queue_claimed
, 0, 1) != 0)
527 eqe
= lpfc_sli4_eq_get(eq
);
529 lpfc_sli4_hba_handle_eqe(phba
, eq
, eqe
);
530 __lpfc_sli4_consume_eqe(phba
, eq
, eqe
);
533 if (!(++count
% eq
->max_proc_limit
))
536 if (!(count
% eq
->notify_interval
)) {
537 phba
->sli4_hba
.sli4_write_eq_db(phba
, eq
, consumed
,
542 eqe
= lpfc_sli4_eq_get(eq
);
544 eq
->EQ_processed
+= count
;
546 /* Track the max number of EQEs processed in 1 intr */
547 if (count
> eq
->EQ_max_eqe
)
548 eq
->EQ_max_eqe
= count
;
550 eq
->queue_claimed
= 0;
553 /* Always clear the EQ. */
554 phba
->sli4_hba
.sli4_write_eq_db(phba
, eq
, consumed
, rearm
);
560 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
561 * @q: The Completion Queue to get the first valid CQE from
563 * This routine will get the first valid Completion Queue Entry from @q, update
564 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
565 * the Queue (no more work to do), or the Queue is full of CQEs that have been
566 * processed, but not popped back to the HBA then this routine will return NULL.
568 static struct lpfc_cqe
*
569 lpfc_sli4_cq_get(struct lpfc_queue
*q
)
571 struct lpfc_cqe
*cqe
;
573 /* sanity check on queue memory */
576 cqe
= lpfc_sli4_qe(q
, q
->host_index
);
578 /* If the next CQE is not valid then we are done */
579 if (bf_get_le32(lpfc_cqe_valid
, cqe
) != q
->qe_valid
)
583 * insert barrier for instruction interlock : data from the hardware
584 * must have the valid bit checked before it can be copied and acted
585 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
586 * instructions allowing action on content before valid bit checked,
587 * add barrier here as well. May not be needed as "content" is a
588 * single 32-bit entity here (vs multi word structure for cq's).
595 __lpfc_sli4_consume_cqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
596 struct lpfc_cqe
*cqe
)
598 if (!phba
->sli4_hba
.pc_sli4_params
.cqav
)
599 bf_set_le32(lpfc_cqe_valid
, cqe
, 0);
601 cq
->host_index
= ((cq
->host_index
+ 1) % cq
->entry_count
);
603 /* if the index wrapped around, toggle the valid bit */
604 if (phba
->sli4_hba
.pc_sli4_params
.cqav
&& !cq
->host_index
)
605 cq
->qe_valid
= (cq
->qe_valid
) ? 0 : 1;
609 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
610 * @phba: the adapter with the CQ
611 * @q: The Completion Queue that the host has completed processing for.
612 * @count: the number of elements that were consumed
613 * @arm: Indicates whether the host wants to arms this CQ.
615 * This routine will notify the HBA, by ringing the doorbell, that the
616 * CQEs have been processed. The @arm parameter specifies whether the
617 * queue should be rearmed when ringing the doorbell.
620 lpfc_sli4_write_cq_db(struct lpfc_hba
*phba
, struct lpfc_queue
*q
,
621 uint32_t count
, bool arm
)
623 struct lpfc_register doorbell
;
625 /* sanity check on queue memory */
626 if (unlikely(!q
|| (count
== 0 && !arm
)))
629 /* ring doorbell for number popped */
632 bf_set(lpfc_eqcq_doorbell_arm
, &doorbell
, 1);
633 bf_set(lpfc_eqcq_doorbell_num_released
, &doorbell
, count
);
634 bf_set(lpfc_eqcq_doorbell_qt
, &doorbell
, LPFC_QUEUE_TYPE_COMPLETION
);
635 bf_set(lpfc_eqcq_doorbell_cqid_hi
, &doorbell
,
636 (q
->queue_id
>> LPFC_CQID_HI_FIELD_SHIFT
));
637 bf_set(lpfc_eqcq_doorbell_cqid_lo
, &doorbell
, q
->queue_id
);
638 writel(doorbell
.word0
, q
->phba
->sli4_hba
.CQDBregaddr
);
642 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
643 * @phba: the adapter with the CQ
644 * @q: The Completion Queue that the host has completed processing for.
645 * @count: the number of elements that were consumed
646 * @arm: Indicates whether the host wants to arms this CQ.
648 * This routine will notify the HBA, by ringing the doorbell, that the
649 * CQEs have been processed. The @arm parameter specifies whether the
650 * queue should be rearmed when ringing the doorbell.
653 lpfc_sli4_if6_write_cq_db(struct lpfc_hba
*phba
, struct lpfc_queue
*q
,
654 uint32_t count
, bool arm
)
656 struct lpfc_register doorbell
;
658 /* sanity check on queue memory */
659 if (unlikely(!q
|| (count
== 0 && !arm
)))
662 /* ring doorbell for number popped */
665 bf_set(lpfc_if6_cq_doorbell_arm
, &doorbell
, 1);
666 bf_set(lpfc_if6_cq_doorbell_num_released
, &doorbell
, count
);
667 bf_set(lpfc_if6_cq_doorbell_cqid
, &doorbell
, q
->queue_id
);
668 writel(doorbell
.word0
, q
->phba
->sli4_hba
.CQDBregaddr
);
672 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
673 * @q: The Header Receive Queue to operate on.
674 * @wqe: The Receive Queue Entry to put on the Receive queue.
676 * This routine will copy the contents of @wqe to the next available entry on
677 * the @q. This function will then ring the Receive Queue Doorbell to signal the
678 * HBA to start processing the Receive Queue Entry. This function returns the
679 * index that the rqe was copied to if successful. If no entries are available
680 * on @q then this function will return -ENOMEM.
681 * The caller is expected to hold the hbalock when calling this routine.
684 lpfc_sli4_rq_put(struct lpfc_queue
*hq
, struct lpfc_queue
*dq
,
685 struct lpfc_rqe
*hrqe
, struct lpfc_rqe
*drqe
)
687 struct lpfc_rqe
*temp_hrqe
;
688 struct lpfc_rqe
*temp_drqe
;
689 struct lpfc_register doorbell
;
693 /* sanity check on queue memory */
694 if (unlikely(!hq
) || unlikely(!dq
))
696 hq_put_index
= hq
->host_index
;
697 dq_put_index
= dq
->host_index
;
698 temp_hrqe
= lpfc_sli4_qe(hq
, hq_put_index
);
699 temp_drqe
= lpfc_sli4_qe(dq
, dq_put_index
);
701 if (hq
->type
!= LPFC_HRQ
|| dq
->type
!= LPFC_DRQ
)
703 if (hq_put_index
!= dq_put_index
)
705 /* If the host has not yet processed the next entry then we are done */
706 if (((hq_put_index
+ 1) % hq
->entry_count
) == hq
->hba_index
)
708 lpfc_sli4_pcimem_bcopy(hrqe
, temp_hrqe
, hq
->entry_size
);
709 lpfc_sli4_pcimem_bcopy(drqe
, temp_drqe
, dq
->entry_size
);
711 /* Update the host index to point to the next slot */
712 hq
->host_index
= ((hq_put_index
+ 1) % hq
->entry_count
);
713 dq
->host_index
= ((dq_put_index
+ 1) % dq
->entry_count
);
716 /* Ring The Header Receive Queue Doorbell */
717 if (!(hq
->host_index
% hq
->notify_interval
)) {
719 if (hq
->db_format
== LPFC_DB_RING_FORMAT
) {
720 bf_set(lpfc_rq_db_ring_fm_num_posted
, &doorbell
,
721 hq
->notify_interval
);
722 bf_set(lpfc_rq_db_ring_fm_id
, &doorbell
, hq
->queue_id
);
723 } else if (hq
->db_format
== LPFC_DB_LIST_FORMAT
) {
724 bf_set(lpfc_rq_db_list_fm_num_posted
, &doorbell
,
725 hq
->notify_interval
);
726 bf_set(lpfc_rq_db_list_fm_index
, &doorbell
,
728 bf_set(lpfc_rq_db_list_fm_id
, &doorbell
, hq
->queue_id
);
732 writel(doorbell
.word0
, hq
->db_regaddr
);
738 * lpfc_sli4_rq_release - Updates internal hba index for RQ
739 * @q: The Header Receive Queue to operate on.
741 * This routine will update the HBA index of a queue to reflect consumption of
742 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
743 * consumed an entry the host calls this function to update the queue's
744 * internal pointers. This routine returns the number of entries that were
745 * consumed by the HBA.
748 lpfc_sli4_rq_release(struct lpfc_queue
*hq
, struct lpfc_queue
*dq
)
750 /* sanity check on queue memory */
751 if (unlikely(!hq
) || unlikely(!dq
))
754 if ((hq
->type
!= LPFC_HRQ
) || (dq
->type
!= LPFC_DRQ
))
756 hq
->hba_index
= ((hq
->hba_index
+ 1) % hq
->entry_count
);
757 dq
->hba_index
= ((dq
->hba_index
+ 1) % dq
->entry_count
);
762 * lpfc_cmd_iocb - Get next command iocb entry in the ring
763 * @phba: Pointer to HBA context object.
764 * @pring: Pointer to driver SLI ring object.
766 * This function returns pointer to next command iocb entry
767 * in the command ring. The caller must hold hbalock to prevent
768 * other threads consume the next command iocb.
769 * SLI-2/SLI-3 provide different sized iocbs.
771 static inline IOCB_t
*
772 lpfc_cmd_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
774 return (IOCB_t
*) (((char *) pring
->sli
.sli3
.cmdringaddr
) +
775 pring
->sli
.sli3
.cmdidx
* phba
->iocb_cmd_size
);
779 * lpfc_resp_iocb - Get next response iocb entry in the ring
780 * @phba: Pointer to HBA context object.
781 * @pring: Pointer to driver SLI ring object.
783 * This function returns pointer to next response iocb entry
784 * in the response ring. The caller must hold hbalock to make sure
785 * that no other thread consume the next response iocb.
786 * SLI-2/SLI-3 provide different sized iocbs.
788 static inline IOCB_t
*
789 lpfc_resp_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
791 return (IOCB_t
*) (((char *) pring
->sli
.sli3
.rspringaddr
) +
792 pring
->sli
.sli3
.rspidx
* phba
->iocb_rsp_size
);
796 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
797 * @phba: Pointer to HBA context object.
799 * This function is called with hbalock held. This function
800 * allocates a new driver iocb object from the iocb pool. If the
801 * allocation is successful, it returns pointer to the newly
802 * allocated iocb object else it returns NULL.
805 __lpfc_sli_get_iocbq(struct lpfc_hba
*phba
)
807 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
808 struct lpfc_iocbq
* iocbq
= NULL
;
810 lockdep_assert_held(&phba
->hbalock
);
812 list_remove_head(lpfc_iocb_list
, iocbq
, struct lpfc_iocbq
, list
);
815 if (phba
->iocb_cnt
> phba
->iocb_max
)
816 phba
->iocb_max
= phba
->iocb_cnt
;
821 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
822 * @phba: Pointer to HBA context object.
823 * @xritag: XRI value.
825 * This function clears the sglq pointer from the array of acive
826 * sglq's. The xritag that is passed in is used to index into the
827 * array. Before the xritag can be used it needs to be adjusted
828 * by subtracting the xribase.
830 * Returns sglq ponter = success, NULL = Failure.
833 __lpfc_clear_active_sglq(struct lpfc_hba
*phba
, uint16_t xritag
)
835 struct lpfc_sglq
*sglq
;
837 sglq
= phba
->sli4_hba
.lpfc_sglq_active_list
[xritag
];
838 phba
->sli4_hba
.lpfc_sglq_active_list
[xritag
] = NULL
;
843 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
844 * @phba: Pointer to HBA context object.
845 * @xritag: XRI value.
847 * This function returns the sglq pointer from the array of acive
848 * sglq's. The xritag that is passed in is used to index into the
849 * array. Before the xritag can be used it needs to be adjusted
850 * by subtracting the xribase.
852 * Returns sglq ponter = success, NULL = Failure.
855 __lpfc_get_active_sglq(struct lpfc_hba
*phba
, uint16_t xritag
)
857 struct lpfc_sglq
*sglq
;
859 sglq
= phba
->sli4_hba
.lpfc_sglq_active_list
[xritag
];
864 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
865 * @phba: Pointer to HBA context object.
866 * @xritag: xri used in this exchange.
867 * @rrq: The RRQ to be cleared.
871 lpfc_clr_rrq_active(struct lpfc_hba
*phba
,
873 struct lpfc_node_rrq
*rrq
)
875 struct lpfc_nodelist
*ndlp
= NULL
;
877 if ((rrq
->vport
) && NLP_CHK_NODE_ACT(rrq
->ndlp
))
878 ndlp
= lpfc_findnode_did(rrq
->vport
, rrq
->nlp_DID
);
880 /* The target DID could have been swapped (cable swap)
881 * we should use the ndlp from the findnode if it is
884 if ((!ndlp
) && rrq
->ndlp
)
890 if (test_and_clear_bit(xritag
, ndlp
->active_rrqs_xri_bitmap
)) {
893 rrq
->rrq_stop_time
= 0;
896 mempool_free(rrq
, phba
->rrq_pool
);
900 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
901 * @phba: Pointer to HBA context object.
903 * This function is called with hbalock held. This function
904 * Checks if stop_time (ratov from setting rrq active) has
905 * been reached, if it has and the send_rrq flag is set then
906 * it will call lpfc_send_rrq. If the send_rrq flag is not set
907 * then it will just call the routine to clear the rrq and
908 * free the rrq resource.
909 * The timer is set to the next rrq that is going to expire before
910 * leaving the routine.
914 lpfc_handle_rrq_active(struct lpfc_hba
*phba
)
916 struct lpfc_node_rrq
*rrq
;
917 struct lpfc_node_rrq
*nextrrq
;
918 unsigned long next_time
;
919 unsigned long iflags
;
922 spin_lock_irqsave(&phba
->hbalock
, iflags
);
923 phba
->hba_flag
&= ~HBA_RRQ_ACTIVE
;
924 next_time
= jiffies
+ msecs_to_jiffies(1000 * (phba
->fc_ratov
+ 1));
925 list_for_each_entry_safe(rrq
, nextrrq
,
926 &phba
->active_rrq_list
, list
) {
927 if (time_after(jiffies
, rrq
->rrq_stop_time
))
928 list_move(&rrq
->list
, &send_rrq
);
929 else if (time_before(rrq
->rrq_stop_time
, next_time
))
930 next_time
= rrq
->rrq_stop_time
;
932 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
933 if ((!list_empty(&phba
->active_rrq_list
)) &&
934 (!(phba
->pport
->load_flag
& FC_UNLOADING
)))
935 mod_timer(&phba
->rrq_tmr
, next_time
);
936 list_for_each_entry_safe(rrq
, nextrrq
, &send_rrq
, list
) {
937 list_del(&rrq
->list
);
938 if (!rrq
->send_rrq
) {
939 /* this call will free the rrq */
940 lpfc_clr_rrq_active(phba
, rrq
->xritag
, rrq
);
941 } else if (lpfc_send_rrq(phba
, rrq
)) {
942 /* if we send the rrq then the completion handler
943 * will clear the bit in the xribitmap.
945 lpfc_clr_rrq_active(phba
, rrq
->xritag
,
952 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
953 * @vport: Pointer to vport context object.
954 * @xri: The xri used in the exchange.
955 * @did: The targets DID for this exchange.
957 * returns NULL = rrq not found in the phba->active_rrq_list.
958 * rrq = rrq for this xri and target.
960 struct lpfc_node_rrq
*
961 lpfc_get_active_rrq(struct lpfc_vport
*vport
, uint16_t xri
, uint32_t did
)
963 struct lpfc_hba
*phba
= vport
->phba
;
964 struct lpfc_node_rrq
*rrq
;
965 struct lpfc_node_rrq
*nextrrq
;
966 unsigned long iflags
;
968 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
970 spin_lock_irqsave(&phba
->hbalock
, iflags
);
971 list_for_each_entry_safe(rrq
, nextrrq
, &phba
->active_rrq_list
, list
) {
972 if (rrq
->vport
== vport
&& rrq
->xritag
== xri
&&
973 rrq
->nlp_DID
== did
){
974 list_del(&rrq
->list
);
975 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
979 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
984 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
985 * @vport: Pointer to vport context object.
986 * @ndlp: Pointer to the lpfc_node_list structure.
987 * If ndlp is NULL Remove all active RRQs for this vport from the
988 * phba->active_rrq_list and clear the rrq.
989 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
992 lpfc_cleanup_vports_rrqs(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
995 struct lpfc_hba
*phba
= vport
->phba
;
996 struct lpfc_node_rrq
*rrq
;
997 struct lpfc_node_rrq
*nextrrq
;
998 unsigned long iflags
;
1001 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
1004 lpfc_sli4_vport_delete_els_xri_aborted(vport
);
1005 lpfc_sli4_vport_delete_fcp_xri_aborted(vport
);
1007 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1008 list_for_each_entry_safe(rrq
, nextrrq
, &phba
->active_rrq_list
, list
)
1009 if ((rrq
->vport
== vport
) && (!ndlp
|| rrq
->ndlp
== ndlp
))
1010 list_move(&rrq
->list
, &rrq_list
);
1011 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1013 list_for_each_entry_safe(rrq
, nextrrq
, &rrq_list
, list
) {
1014 list_del(&rrq
->list
);
1015 lpfc_clr_rrq_active(phba
, rrq
->xritag
, rrq
);
1020 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1021 * @phba: Pointer to HBA context object.
1022 * @ndlp: Targets nodelist pointer for this exchange.
1023 * @xritag the xri in the bitmap to test.
1025 * This function returns:
1026 * 0 = rrq not active for this xri
1027 * 1 = rrq is valid for this xri.
1030 lpfc_test_rrq_active(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
,
1035 if (!ndlp
->active_rrqs_xri_bitmap
)
1037 if (test_bit(xritag
, ndlp
->active_rrqs_xri_bitmap
))
1044 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1045 * @phba: Pointer to HBA context object.
1046 * @ndlp: nodelist pointer for this target.
1047 * @xritag: xri used in this exchange.
1048 * @rxid: Remote Exchange ID.
1049 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1051 * This function takes the hbalock.
1052 * The active bit is always set in the active rrq xri_bitmap even
1053 * if there is no slot avaiable for the other rrq information.
1055 * returns 0 rrq actived for this xri
1056 * < 0 No memory or invalid ndlp.
1059 lpfc_set_rrq_active(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
,
1060 uint16_t xritag
, uint16_t rxid
, uint16_t send_rrq
)
1062 unsigned long iflags
;
1063 struct lpfc_node_rrq
*rrq
;
1069 if (!phba
->cfg_enable_rrq
)
1072 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1073 if (phba
->pport
->load_flag
& FC_UNLOADING
) {
1074 phba
->hba_flag
&= ~HBA_RRQ_ACTIVE
;
1079 * set the active bit even if there is no mem available.
1081 if (NLP_CHK_FREE_REQ(ndlp
))
1084 if (ndlp
->vport
&& (ndlp
->vport
->load_flag
& FC_UNLOADING
))
1087 if (!ndlp
->active_rrqs_xri_bitmap
)
1090 if (test_and_set_bit(xritag
, ndlp
->active_rrqs_xri_bitmap
))
1093 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1094 rrq
= mempool_alloc(phba
->rrq_pool
, GFP_KERNEL
);
1096 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
1097 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1098 " DID:0x%x Send:%d\n",
1099 xritag
, rxid
, ndlp
->nlp_DID
, send_rrq
);
1102 if (phba
->cfg_enable_rrq
== 1)
1103 rrq
->send_rrq
= send_rrq
;
1106 rrq
->xritag
= xritag
;
1107 rrq
->rrq_stop_time
= jiffies
+
1108 msecs_to_jiffies(1000 * (phba
->fc_ratov
+ 1));
1110 rrq
->nlp_DID
= ndlp
->nlp_DID
;
1111 rrq
->vport
= ndlp
->vport
;
1113 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1114 empty
= list_empty(&phba
->active_rrq_list
);
1115 list_add_tail(&rrq
->list
, &phba
->active_rrq_list
);
1116 phba
->hba_flag
|= HBA_RRQ_ACTIVE
;
1118 lpfc_worker_wake_up(phba
);
1119 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1122 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1123 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
1124 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1125 " DID:0x%x Send:%d\n",
1126 xritag
, rxid
, ndlp
->nlp_DID
, send_rrq
);
1131 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1132 * @phba: Pointer to HBA context object.
1133 * @piocb: Pointer to the iocbq.
1135 * The driver calls this function with either the nvme ls ring lock
1136 * or the fc els ring lock held depending on the iocb usage. This function
1137 * gets a new driver sglq object from the sglq list. If the list is not empty
1138 * then it is successful, it returns pointer to the newly allocated sglq
1139 * object else it returns NULL.
1141 static struct lpfc_sglq
*
1142 __lpfc_sli_get_els_sglq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocbq
)
1144 struct list_head
*lpfc_els_sgl_list
= &phba
->sli4_hba
.lpfc_els_sgl_list
;
1145 struct lpfc_sglq
*sglq
= NULL
;
1146 struct lpfc_sglq
*start_sglq
= NULL
;
1147 struct lpfc_io_buf
*lpfc_cmd
;
1148 struct lpfc_nodelist
*ndlp
;
1149 struct lpfc_sli_ring
*pring
= NULL
;
1152 if (piocbq
->iocb_flag
& LPFC_IO_NVME_LS
)
1153 pring
= phba
->sli4_hba
.nvmels_wq
->pring
;
1155 pring
= lpfc_phba_elsring(phba
);
1157 lockdep_assert_held(&pring
->ring_lock
);
1159 if (piocbq
->iocb_flag
& LPFC_IO_FCP
) {
1160 lpfc_cmd
= (struct lpfc_io_buf
*) piocbq
->context1
;
1161 ndlp
= lpfc_cmd
->rdata
->pnode
;
1162 } else if ((piocbq
->iocb
.ulpCommand
== CMD_GEN_REQUEST64_CR
) &&
1163 !(piocbq
->iocb_flag
& LPFC_IO_LIBDFC
)) {
1164 ndlp
= piocbq
->context_un
.ndlp
;
1165 } else if (piocbq
->iocb_flag
& LPFC_IO_LIBDFC
) {
1166 if (piocbq
->iocb_flag
& LPFC_IO_LOOPBACK
)
1169 ndlp
= piocbq
->context_un
.ndlp
;
1171 ndlp
= piocbq
->context1
;
1174 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
1175 list_remove_head(lpfc_els_sgl_list
, sglq
, struct lpfc_sglq
, list
);
1180 if (ndlp
&& ndlp
->active_rrqs_xri_bitmap
&&
1181 test_bit(sglq
->sli4_lxritag
,
1182 ndlp
->active_rrqs_xri_bitmap
)) {
1183 /* This xri has an rrq outstanding for this DID.
1184 * put it back in the list and get another xri.
1186 list_add_tail(&sglq
->list
, lpfc_els_sgl_list
);
1188 list_remove_head(lpfc_els_sgl_list
, sglq
,
1189 struct lpfc_sglq
, list
);
1190 if (sglq
== start_sglq
) {
1191 list_add_tail(&sglq
->list
, lpfc_els_sgl_list
);
1199 phba
->sli4_hba
.lpfc_sglq_active_list
[sglq
->sli4_lxritag
] = sglq
;
1200 sglq
->state
= SGL_ALLOCATED
;
1202 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
1207 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1208 * @phba: Pointer to HBA context object.
1209 * @piocb: Pointer to the iocbq.
1211 * This function is called with the sgl_list lock held. This function
1212 * gets a new driver sglq object from the sglq list. If the
1213 * list is not empty then it is successful, it returns pointer to the newly
1214 * allocated sglq object else it returns NULL.
1217 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocbq
)
1219 struct list_head
*lpfc_nvmet_sgl_list
;
1220 struct lpfc_sglq
*sglq
= NULL
;
1222 lpfc_nvmet_sgl_list
= &phba
->sli4_hba
.lpfc_nvmet_sgl_list
;
1224 lockdep_assert_held(&phba
->sli4_hba
.sgl_list_lock
);
1226 list_remove_head(lpfc_nvmet_sgl_list
, sglq
, struct lpfc_sglq
, list
);
1229 phba
->sli4_hba
.lpfc_sglq_active_list
[sglq
->sli4_lxritag
] = sglq
;
1230 sglq
->state
= SGL_ALLOCATED
;
1235 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1236 * @phba: Pointer to HBA context object.
1238 * This function is called with no lock held. This function
1239 * allocates a new driver iocb object from the iocb pool. If the
1240 * allocation is successful, it returns pointer to the newly
1241 * allocated iocb object else it returns NULL.
1244 lpfc_sli_get_iocbq(struct lpfc_hba
*phba
)
1246 struct lpfc_iocbq
* iocbq
= NULL
;
1247 unsigned long iflags
;
1249 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1250 iocbq
= __lpfc_sli_get_iocbq(phba
);
1251 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1256 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1257 * @phba: Pointer to HBA context object.
1258 * @iocbq: Pointer to driver iocb object.
1260 * This function is called with hbalock held to release driver
1261 * iocb object to the iocb pool. The iotag in the iocb object
1262 * does not change for each use of the iocb object. This function
1263 * clears all other fields of the iocb object when it is freed.
1264 * The sqlq structure that holds the xritag and phys and virtual
1265 * mappings for the scatter gather list is retrieved from the
1266 * active array of sglq. The get of the sglq pointer also clears
1267 * the entry in the array. If the status of the IO indiactes that
1268 * this IO was aborted then the sglq entry it put on the
1269 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1270 * IO has good status or fails for any other reason then the sglq
1271 * entry is added to the free list (lpfc_els_sgl_list).
1274 __lpfc_sli_release_iocbq_s4(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1276 struct lpfc_sglq
*sglq
;
1277 size_t start_clean
= offsetof(struct lpfc_iocbq
, iocb
);
1278 unsigned long iflag
= 0;
1279 struct lpfc_sli_ring
*pring
;
1281 lockdep_assert_held(&phba
->hbalock
);
1283 if (iocbq
->sli4_xritag
== NO_XRI
)
1286 sglq
= __lpfc_clear_active_sglq(phba
, iocbq
->sli4_lxritag
);
1290 if (iocbq
->iocb_flag
& LPFC_IO_NVMET
) {
1291 spin_lock_irqsave(&phba
->sli4_hba
.sgl_list_lock
,
1293 sglq
->state
= SGL_FREED
;
1295 list_add_tail(&sglq
->list
,
1296 &phba
->sli4_hba
.lpfc_nvmet_sgl_list
);
1297 spin_unlock_irqrestore(
1298 &phba
->sli4_hba
.sgl_list_lock
, iflag
);
1302 pring
= phba
->sli4_hba
.els_wq
->pring
;
1303 if ((iocbq
->iocb_flag
& LPFC_EXCHANGE_BUSY
) &&
1304 (sglq
->state
!= SGL_XRI_ABORTED
)) {
1305 spin_lock_irqsave(&phba
->sli4_hba
.sgl_list_lock
,
1307 list_add(&sglq
->list
,
1308 &phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
1309 spin_unlock_irqrestore(
1310 &phba
->sli4_hba
.sgl_list_lock
, iflag
);
1312 spin_lock_irqsave(&phba
->sli4_hba
.sgl_list_lock
,
1314 sglq
->state
= SGL_FREED
;
1316 list_add_tail(&sglq
->list
,
1317 &phba
->sli4_hba
.lpfc_els_sgl_list
);
1318 spin_unlock_irqrestore(
1319 &phba
->sli4_hba
.sgl_list_lock
, iflag
);
1321 /* Check if TXQ queue needs to be serviced */
1322 if (!list_empty(&pring
->txq
))
1323 lpfc_worker_wake_up(phba
);
1329 * Clean all volatile data fields, preserve iotag and node struct.
1331 memset((char *)iocbq
+ start_clean
, 0, sizeof(*iocbq
) - start_clean
);
1332 iocbq
->sli4_lxritag
= NO_XRI
;
1333 iocbq
->sli4_xritag
= NO_XRI
;
1334 iocbq
->iocb_flag
&= ~(LPFC_IO_NVME
| LPFC_IO_NVMET
|
1336 list_add_tail(&iocbq
->list
, &phba
->lpfc_iocb_list
);
1341 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1342 * @phba: Pointer to HBA context object.
1343 * @iocbq: Pointer to driver iocb object.
1345 * This function is called with hbalock held to release driver
1346 * iocb object to the iocb pool. The iotag in the iocb object
1347 * does not change for each use of the iocb object. This function
1348 * clears all other fields of the iocb object when it is freed.
1351 __lpfc_sli_release_iocbq_s3(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1353 size_t start_clean
= offsetof(struct lpfc_iocbq
, iocb
);
1355 lockdep_assert_held(&phba
->hbalock
);
1358 * Clean all volatile data fields, preserve iotag and node struct.
1360 memset((char*)iocbq
+ start_clean
, 0, sizeof(*iocbq
) - start_clean
);
1361 iocbq
->sli4_xritag
= NO_XRI
;
1362 list_add_tail(&iocbq
->list
, &phba
->lpfc_iocb_list
);
1366 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1367 * @phba: Pointer to HBA context object.
1368 * @iocbq: Pointer to driver iocb object.
1370 * This function is called with hbalock held to release driver
1371 * iocb object to the iocb pool. The iotag in the iocb object
1372 * does not change for each use of the iocb object. This function
1373 * clears all other fields of the iocb object when it is freed.
1376 __lpfc_sli_release_iocbq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1378 lockdep_assert_held(&phba
->hbalock
);
1380 phba
->__lpfc_sli_release_iocbq(phba
, iocbq
);
1385 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1386 * @phba: Pointer to HBA context object.
1387 * @iocbq: Pointer to driver iocb object.
1389 * This function is called with no lock held to release the iocb to
1393 lpfc_sli_release_iocbq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1395 unsigned long iflags
;
1398 * Clean all volatile data fields, preserve iotag and node struct.
1400 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1401 __lpfc_sli_release_iocbq(phba
, iocbq
);
1402 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1406 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1407 * @phba: Pointer to HBA context object.
1408 * @iocblist: List of IOCBs.
1409 * @ulpstatus: ULP status in IOCB command field.
1410 * @ulpWord4: ULP word-4 in IOCB command field.
1412 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1413 * on the list by invoking the complete callback function associated with the
1414 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1418 lpfc_sli_cancel_iocbs(struct lpfc_hba
*phba
, struct list_head
*iocblist
,
1419 uint32_t ulpstatus
, uint32_t ulpWord4
)
1421 struct lpfc_iocbq
*piocb
;
1423 while (!list_empty(iocblist
)) {
1424 list_remove_head(iocblist
, piocb
, struct lpfc_iocbq
, list
);
1425 if (!piocb
->iocb_cmpl
) {
1426 if (piocb
->iocb_flag
& LPFC_IO_NVME
)
1427 lpfc_nvme_cancel_iocb(phba
, piocb
);
1429 lpfc_sli_release_iocbq(phba
, piocb
);
1431 piocb
->iocb
.ulpStatus
= ulpstatus
;
1432 piocb
->iocb
.un
.ulpWord
[4] = ulpWord4
;
1433 (piocb
->iocb_cmpl
) (phba
, piocb
, piocb
);
1440 * lpfc_sli_iocb_cmd_type - Get the iocb type
1441 * @iocb_cmnd: iocb command code.
1443 * This function is called by ring event handler function to get the iocb type.
1444 * This function translates the iocb command to an iocb command type used to
1445 * decide the final disposition of each completed IOCB.
1446 * The function returns
1447 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1448 * LPFC_SOL_IOCB if it is a solicited iocb completion
1449 * LPFC_ABORT_IOCB if it is an abort iocb
1450 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1452 * The caller is not required to hold any lock.
1454 static lpfc_iocb_type
1455 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd
)
1457 lpfc_iocb_type type
= LPFC_UNKNOWN_IOCB
;
1459 if (iocb_cmnd
> CMD_MAX_IOCB_CMD
)
1462 switch (iocb_cmnd
) {
1463 case CMD_XMIT_SEQUENCE_CR
:
1464 case CMD_XMIT_SEQUENCE_CX
:
1465 case CMD_XMIT_BCAST_CN
:
1466 case CMD_XMIT_BCAST_CX
:
1467 case CMD_ELS_REQUEST_CR
:
1468 case CMD_ELS_REQUEST_CX
:
1469 case CMD_CREATE_XRI_CR
:
1470 case CMD_CREATE_XRI_CX
:
1471 case CMD_GET_RPI_CN
:
1472 case CMD_XMIT_ELS_RSP_CX
:
1473 case CMD_GET_RPI_CR
:
1474 case CMD_FCP_IWRITE_CR
:
1475 case CMD_FCP_IWRITE_CX
:
1476 case CMD_FCP_IREAD_CR
:
1477 case CMD_FCP_IREAD_CX
:
1478 case CMD_FCP_ICMND_CR
:
1479 case CMD_FCP_ICMND_CX
:
1480 case CMD_FCP_TSEND_CX
:
1481 case CMD_FCP_TRSP_CX
:
1482 case CMD_FCP_TRECEIVE_CX
:
1483 case CMD_FCP_AUTO_TRSP_CX
:
1484 case CMD_ADAPTER_MSG
:
1485 case CMD_ADAPTER_DUMP
:
1486 case CMD_XMIT_SEQUENCE64_CR
:
1487 case CMD_XMIT_SEQUENCE64_CX
:
1488 case CMD_XMIT_BCAST64_CN
:
1489 case CMD_XMIT_BCAST64_CX
:
1490 case CMD_ELS_REQUEST64_CR
:
1491 case CMD_ELS_REQUEST64_CX
:
1492 case CMD_FCP_IWRITE64_CR
:
1493 case CMD_FCP_IWRITE64_CX
:
1494 case CMD_FCP_IREAD64_CR
:
1495 case CMD_FCP_IREAD64_CX
:
1496 case CMD_FCP_ICMND64_CR
:
1497 case CMD_FCP_ICMND64_CX
:
1498 case CMD_FCP_TSEND64_CX
:
1499 case CMD_FCP_TRSP64_CX
:
1500 case CMD_FCP_TRECEIVE64_CX
:
1501 case CMD_GEN_REQUEST64_CR
:
1502 case CMD_GEN_REQUEST64_CX
:
1503 case CMD_XMIT_ELS_RSP64_CX
:
1504 case DSSCMD_IWRITE64_CR
:
1505 case DSSCMD_IWRITE64_CX
:
1506 case DSSCMD_IREAD64_CR
:
1507 case DSSCMD_IREAD64_CX
:
1508 type
= LPFC_SOL_IOCB
;
1510 case CMD_ABORT_XRI_CN
:
1511 case CMD_ABORT_XRI_CX
:
1512 case CMD_CLOSE_XRI_CN
:
1513 case CMD_CLOSE_XRI_CX
:
1514 case CMD_XRI_ABORTED_CX
:
1515 case CMD_ABORT_MXRI64_CN
:
1516 case CMD_XMIT_BLS_RSP64_CX
:
1517 type
= LPFC_ABORT_IOCB
;
1519 case CMD_RCV_SEQUENCE_CX
:
1520 case CMD_RCV_ELS_REQ_CX
:
1521 case CMD_RCV_SEQUENCE64_CX
:
1522 case CMD_RCV_ELS_REQ64_CX
:
1523 case CMD_ASYNC_STATUS
:
1524 case CMD_IOCB_RCV_SEQ64_CX
:
1525 case CMD_IOCB_RCV_ELS64_CX
:
1526 case CMD_IOCB_RCV_CONT64_CX
:
1527 case CMD_IOCB_RET_XRI64_CX
:
1528 type
= LPFC_UNSOL_IOCB
;
1530 case CMD_IOCB_XMIT_MSEQ64_CR
:
1531 case CMD_IOCB_XMIT_MSEQ64_CX
:
1532 case CMD_IOCB_RCV_SEQ_LIST64_CX
:
1533 case CMD_IOCB_RCV_ELS_LIST64_CX
:
1534 case CMD_IOCB_CLOSE_EXTENDED_CN
:
1535 case CMD_IOCB_ABORT_EXTENDED_CN
:
1536 case CMD_IOCB_RET_HBQE64_CN
:
1537 case CMD_IOCB_FCP_IBIDIR64_CR
:
1538 case CMD_IOCB_FCP_IBIDIR64_CX
:
1539 case CMD_IOCB_FCP_ITASKMGT64_CX
:
1540 case CMD_IOCB_LOGENTRY_CN
:
1541 case CMD_IOCB_LOGENTRY_ASYNC_CN
:
1542 printk("%s - Unhandled SLI-3 Command x%x\n",
1543 __func__
, iocb_cmnd
);
1544 type
= LPFC_UNKNOWN_IOCB
;
1547 type
= LPFC_UNKNOWN_IOCB
;
1555 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1556 * @phba: Pointer to HBA context object.
1558 * This function is called from SLI initialization code
1559 * to configure every ring of the HBA's SLI interface. The
1560 * caller is not required to hold any lock. This function issues
1561 * a config_ring mailbox command for each ring.
1562 * This function returns zero if successful else returns a negative
1566 lpfc_sli_ring_map(struct lpfc_hba
*phba
)
1568 struct lpfc_sli
*psli
= &phba
->sli
;
1573 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1577 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
1578 for (i
= 0; i
< psli
->num_rings
; i
++) {
1579 lpfc_config_ring(phba
, i
, pmb
);
1580 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
1581 if (rc
!= MBX_SUCCESS
) {
1582 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1583 "0446 Adapter failed to init (%d), "
1584 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1586 rc
, pmbox
->mbxCommand
,
1587 pmbox
->mbxStatus
, i
);
1588 phba
->link_state
= LPFC_HBA_ERROR
;
1593 mempool_free(pmb
, phba
->mbox_mem_pool
);
1598 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1599 * @phba: Pointer to HBA context object.
1600 * @pring: Pointer to driver SLI ring object.
1601 * @piocb: Pointer to the driver iocb object.
1603 * The driver calls this function with the hbalock held for SLI3 ports or
1604 * the ring lock held for SLI4 ports. The function adds the
1605 * new iocb to txcmplq of the given ring. This function always returns
1606 * 0. If this function is called for ELS ring, this function checks if
1607 * there is a vport associated with the ELS command. This function also
1608 * starts els_tmofunc timer if this is an ELS command.
1611 lpfc_sli_ringtxcmpl_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1612 struct lpfc_iocbq
*piocb
)
1614 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1615 lockdep_assert_held(&pring
->ring_lock
);
1617 lockdep_assert_held(&phba
->hbalock
);
1621 list_add_tail(&piocb
->list
, &pring
->txcmplq
);
1622 piocb
->iocb_flag
|= LPFC_IO_ON_TXCMPLQ
;
1623 pring
->txcmplq_cnt
++;
1625 if ((unlikely(pring
->ringno
== LPFC_ELS_RING
)) &&
1626 (piocb
->iocb
.ulpCommand
!= CMD_ABORT_XRI_CN
) &&
1627 (piocb
->iocb
.ulpCommand
!= CMD_CLOSE_XRI_CN
)) {
1628 BUG_ON(!piocb
->vport
);
1629 if (!(piocb
->vport
->load_flag
& FC_UNLOADING
))
1630 mod_timer(&piocb
->vport
->els_tmofunc
,
1632 msecs_to_jiffies(1000 * (phba
->fc_ratov
<< 1)));
1639 * lpfc_sli_ringtx_get - Get first element of the txq
1640 * @phba: Pointer to HBA context object.
1641 * @pring: Pointer to driver SLI ring object.
1643 * This function is called with hbalock held to get next
1644 * iocb in txq of the given ring. If there is any iocb in
1645 * the txq, the function returns first iocb in the list after
1646 * removing the iocb from the list, else it returns NULL.
1649 lpfc_sli_ringtx_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1651 struct lpfc_iocbq
*cmd_iocb
;
1653 lockdep_assert_held(&phba
->hbalock
);
1655 list_remove_head((&pring
->txq
), cmd_iocb
, struct lpfc_iocbq
, list
);
1660 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1661 * @phba: Pointer to HBA context object.
1662 * @pring: Pointer to driver SLI ring object.
1664 * This function is called with hbalock held and the caller must post the
1665 * iocb without releasing the lock. If the caller releases the lock,
1666 * iocb slot returned by the function is not guaranteed to be available.
1667 * The function returns pointer to the next available iocb slot if there
1668 * is available slot in the ring, else it returns NULL.
1669 * If the get index of the ring is ahead of the put index, the function
1670 * will post an error attention event to the worker thread to take the
1671 * HBA to offline state.
1674 lpfc_sli_next_iocb_slot (struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1676 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
1677 uint32_t max_cmd_idx
= pring
->sli
.sli3
.numCiocb
;
1679 lockdep_assert_held(&phba
->hbalock
);
1681 if ((pring
->sli
.sli3
.next_cmdidx
== pring
->sli
.sli3
.cmdidx
) &&
1682 (++pring
->sli
.sli3
.next_cmdidx
>= max_cmd_idx
))
1683 pring
->sli
.sli3
.next_cmdidx
= 0;
1685 if (unlikely(pring
->sli
.sli3
.local_getidx
==
1686 pring
->sli
.sli3
.next_cmdidx
)) {
1688 pring
->sli
.sli3
.local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
1690 if (unlikely(pring
->sli
.sli3
.local_getidx
>= max_cmd_idx
)) {
1691 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
1692 "0315 Ring %d issue: portCmdGet %d "
1693 "is bigger than cmd ring %d\n",
1695 pring
->sli
.sli3
.local_getidx
,
1698 phba
->link_state
= LPFC_HBA_ERROR
;
1700 * All error attention handlers are posted to
1703 phba
->work_ha
|= HA_ERATT
;
1704 phba
->work_hs
= HS_FFER3
;
1706 lpfc_worker_wake_up(phba
);
1711 if (pring
->sli
.sli3
.local_getidx
== pring
->sli
.sli3
.next_cmdidx
)
1715 return lpfc_cmd_iocb(phba
, pring
);
1719 * lpfc_sli_next_iotag - Get an iotag for the iocb
1720 * @phba: Pointer to HBA context object.
1721 * @iocbq: Pointer to driver iocb object.
1723 * This function gets an iotag for the iocb. If there is no unused iotag and
1724 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1725 * array and assigns a new iotag.
1726 * The function returns the allocated iotag if successful, else returns zero.
1727 * Zero is not a valid iotag.
1728 * The caller is not required to hold any lock.
1731 lpfc_sli_next_iotag(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1733 struct lpfc_iocbq
**new_arr
;
1734 struct lpfc_iocbq
**old_arr
;
1736 struct lpfc_sli
*psli
= &phba
->sli
;
1739 spin_lock_irq(&phba
->hbalock
);
1740 iotag
= psli
->last_iotag
;
1741 if(++iotag
< psli
->iocbq_lookup_len
) {
1742 psli
->last_iotag
= iotag
;
1743 psli
->iocbq_lookup
[iotag
] = iocbq
;
1744 spin_unlock_irq(&phba
->hbalock
);
1745 iocbq
->iotag
= iotag
;
1747 } else if (psli
->iocbq_lookup_len
< (0xffff
1748 - LPFC_IOCBQ_LOOKUP_INCREMENT
)) {
1749 new_len
= psli
->iocbq_lookup_len
+ LPFC_IOCBQ_LOOKUP_INCREMENT
;
1750 spin_unlock_irq(&phba
->hbalock
);
1751 new_arr
= kcalloc(new_len
, sizeof(struct lpfc_iocbq
*),
1754 spin_lock_irq(&phba
->hbalock
);
1755 old_arr
= psli
->iocbq_lookup
;
1756 if (new_len
<= psli
->iocbq_lookup_len
) {
1757 /* highly unprobable case */
1759 iotag
= psli
->last_iotag
;
1760 if(++iotag
< psli
->iocbq_lookup_len
) {
1761 psli
->last_iotag
= iotag
;
1762 psli
->iocbq_lookup
[iotag
] = iocbq
;
1763 spin_unlock_irq(&phba
->hbalock
);
1764 iocbq
->iotag
= iotag
;
1767 spin_unlock_irq(&phba
->hbalock
);
1770 if (psli
->iocbq_lookup
)
1771 memcpy(new_arr
, old_arr
,
1772 ((psli
->last_iotag
+ 1) *
1773 sizeof (struct lpfc_iocbq
*)));
1774 psli
->iocbq_lookup
= new_arr
;
1775 psli
->iocbq_lookup_len
= new_len
;
1776 psli
->last_iotag
= iotag
;
1777 psli
->iocbq_lookup
[iotag
] = iocbq
;
1778 spin_unlock_irq(&phba
->hbalock
);
1779 iocbq
->iotag
= iotag
;
1784 spin_unlock_irq(&phba
->hbalock
);
1786 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
1787 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1794 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1795 * @phba: Pointer to HBA context object.
1796 * @pring: Pointer to driver SLI ring object.
1797 * @iocb: Pointer to iocb slot in the ring.
1798 * @nextiocb: Pointer to driver iocb object which need to be
1799 * posted to firmware.
1801 * This function is called with hbalock held to post a new iocb to
1802 * the firmware. This function copies the new iocb to ring iocb slot and
1803 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1804 * a completion call back for this iocb else the function will free the
1808 lpfc_sli_submit_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1809 IOCB_t
*iocb
, struct lpfc_iocbq
*nextiocb
)
1811 lockdep_assert_held(&phba
->hbalock
);
1815 nextiocb
->iocb
.ulpIoTag
= (nextiocb
->iocb_cmpl
) ? nextiocb
->iotag
: 0;
1818 if (pring
->ringno
== LPFC_ELS_RING
) {
1819 lpfc_debugfs_slow_ring_trc(phba
,
1820 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1821 *(((uint32_t *) &nextiocb
->iocb
) + 4),
1822 *(((uint32_t *) &nextiocb
->iocb
) + 6),
1823 *(((uint32_t *) &nextiocb
->iocb
) + 7));
1827 * Issue iocb command to adapter
1829 lpfc_sli_pcimem_bcopy(&nextiocb
->iocb
, iocb
, phba
->iocb_cmd_size
);
1831 pring
->stats
.iocb_cmd
++;
1834 * If there is no completion routine to call, we can release the
1835 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1836 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1838 if (nextiocb
->iocb_cmpl
)
1839 lpfc_sli_ringtxcmpl_put(phba
, pring
, nextiocb
);
1841 __lpfc_sli_release_iocbq(phba
, nextiocb
);
1844 * Let the HBA know what IOCB slot will be the next one the
1845 * driver will put a command into.
1847 pring
->sli
.sli3
.cmdidx
= pring
->sli
.sli3
.next_cmdidx
;
1848 writel(pring
->sli
.sli3
.cmdidx
, &phba
->host_gp
[pring
->ringno
].cmdPutInx
);
1852 * lpfc_sli_update_full_ring - Update the chip attention register
1853 * @phba: Pointer to HBA context object.
1854 * @pring: Pointer to driver SLI ring object.
1856 * The caller is not required to hold any lock for calling this function.
1857 * This function updates the chip attention bits for the ring to inform firmware
1858 * that there are pending work to be done for this ring and requests an
1859 * interrupt when there is space available in the ring. This function is
1860 * called when the driver is unable to post more iocbs to the ring due
1861 * to unavailability of space in the ring.
1864 lpfc_sli_update_full_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1866 int ringno
= pring
->ringno
;
1868 pring
->flag
|= LPFC_CALL_RING_AVAILABLE
;
1873 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1874 * The HBA will tell us when an IOCB entry is available.
1876 writel((CA_R0ATT
|CA_R0CE_REQ
) << (ringno
*4), phba
->CAregaddr
);
1877 readl(phba
->CAregaddr
); /* flush */
1879 pring
->stats
.iocb_cmd_full
++;
1883 * lpfc_sli_update_ring - Update chip attention register
1884 * @phba: Pointer to HBA context object.
1885 * @pring: Pointer to driver SLI ring object.
1887 * This function updates the chip attention register bit for the
1888 * given ring to inform HBA that there is more work to be done
1889 * in this ring. The caller is not required to hold any lock.
1892 lpfc_sli_update_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1894 int ringno
= pring
->ringno
;
1897 * Tell the HBA that there is work to do in this ring.
1899 if (!(phba
->sli3_options
& LPFC_SLI3_CRP_ENABLED
)) {
1901 writel(CA_R0ATT
<< (ringno
* 4), phba
->CAregaddr
);
1902 readl(phba
->CAregaddr
); /* flush */
1907 * lpfc_sli_resume_iocb - Process iocbs in the txq
1908 * @phba: Pointer to HBA context object.
1909 * @pring: Pointer to driver SLI ring object.
1911 * This function is called with hbalock held to post pending iocbs
1912 * in the txq to the firmware. This function is called when driver
1913 * detects space available in the ring.
1916 lpfc_sli_resume_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1919 struct lpfc_iocbq
*nextiocb
;
1921 lockdep_assert_held(&phba
->hbalock
);
1925 * (a) there is anything on the txq to send
1927 * (c) link attention events can be processed (fcp ring only)
1928 * (d) IOCB processing is not blocked by the outstanding mbox command.
1931 if (lpfc_is_link_up(phba
) &&
1932 (!list_empty(&pring
->txq
)) &&
1933 (pring
->ringno
!= LPFC_FCP_RING
||
1934 phba
->sli
.sli_flag
& LPFC_PROCESS_LA
)) {
1936 while ((iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)) &&
1937 (nextiocb
= lpfc_sli_ringtx_get(phba
, pring
)))
1938 lpfc_sli_submit_iocb(phba
, pring
, iocb
, nextiocb
);
1941 lpfc_sli_update_ring(phba
, pring
);
1943 lpfc_sli_update_full_ring(phba
, pring
);
1950 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1951 * @phba: Pointer to HBA context object.
1952 * @hbqno: HBQ number.
1954 * This function is called with hbalock held to get the next
1955 * available slot for the given HBQ. If there is free slot
1956 * available for the HBQ it will return pointer to the next available
1957 * HBQ entry else it will return NULL.
1959 static struct lpfc_hbq_entry
*
1960 lpfc_sli_next_hbq_slot(struct lpfc_hba
*phba
, uint32_t hbqno
)
1962 struct hbq_s
*hbqp
= &phba
->hbqs
[hbqno
];
1964 lockdep_assert_held(&phba
->hbalock
);
1966 if (hbqp
->next_hbqPutIdx
== hbqp
->hbqPutIdx
&&
1967 ++hbqp
->next_hbqPutIdx
>= hbqp
->entry_count
)
1968 hbqp
->next_hbqPutIdx
= 0;
1970 if (unlikely(hbqp
->local_hbqGetIdx
== hbqp
->next_hbqPutIdx
)) {
1971 uint32_t raw_index
= phba
->hbq_get
[hbqno
];
1972 uint32_t getidx
= le32_to_cpu(raw_index
);
1974 hbqp
->local_hbqGetIdx
= getidx
;
1976 if (unlikely(hbqp
->local_hbqGetIdx
>= hbqp
->entry_count
)) {
1977 lpfc_printf_log(phba
, KERN_ERR
,
1978 LOG_SLI
| LOG_VPORT
,
1979 "1802 HBQ %d: local_hbqGetIdx "
1980 "%u is > than hbqp->entry_count %u\n",
1981 hbqno
, hbqp
->local_hbqGetIdx
,
1984 phba
->link_state
= LPFC_HBA_ERROR
;
1988 if (hbqp
->local_hbqGetIdx
== hbqp
->next_hbqPutIdx
)
1992 return (struct lpfc_hbq_entry
*) phba
->hbqs
[hbqno
].hbq_virt
+
1997 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1998 * @phba: Pointer to HBA context object.
2000 * This function is called with no lock held to free all the
2001 * hbq buffers while uninitializing the SLI interface. It also
2002 * frees the HBQ buffers returned by the firmware but not yet
2003 * processed by the upper layers.
2006 lpfc_sli_hbqbuf_free_all(struct lpfc_hba
*phba
)
2008 struct lpfc_dmabuf
*dmabuf
, *next_dmabuf
;
2009 struct hbq_dmabuf
*hbq_buf
;
2010 unsigned long flags
;
2013 hbq_count
= lpfc_sli_hbq_count();
2014 /* Return all memory used by all HBQs */
2015 spin_lock_irqsave(&phba
->hbalock
, flags
);
2016 for (i
= 0; i
< hbq_count
; ++i
) {
2017 list_for_each_entry_safe(dmabuf
, next_dmabuf
,
2018 &phba
->hbqs
[i
].hbq_buffer_list
, list
) {
2019 hbq_buf
= container_of(dmabuf
, struct hbq_dmabuf
, dbuf
);
2020 list_del(&hbq_buf
->dbuf
.list
);
2021 (phba
->hbqs
[i
].hbq_free_buffer
)(phba
, hbq_buf
);
2023 phba
->hbqs
[i
].buffer_count
= 0;
2026 /* Mark the HBQs not in use */
2027 phba
->hbq_in_use
= 0;
2028 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2032 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2033 * @phba: Pointer to HBA context object.
2034 * @hbqno: HBQ number.
2035 * @hbq_buf: Pointer to HBQ buffer.
2037 * This function is called with the hbalock held to post a
2038 * hbq buffer to the firmware. If the function finds an empty
2039 * slot in the HBQ, it will post the buffer. The function will return
2040 * pointer to the hbq entry if it successfully post the buffer
2041 * else it will return NULL.
2044 lpfc_sli_hbq_to_firmware(struct lpfc_hba
*phba
, uint32_t hbqno
,
2045 struct hbq_dmabuf
*hbq_buf
)
2047 lockdep_assert_held(&phba
->hbalock
);
2048 return phba
->lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buf
);
2052 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2053 * @phba: Pointer to HBA context object.
2054 * @hbqno: HBQ number.
2055 * @hbq_buf: Pointer to HBQ buffer.
2057 * This function is called with the hbalock held to post a hbq buffer to the
2058 * firmware. If the function finds an empty slot in the HBQ, it will post the
2059 * buffer and place it on the hbq_buffer_list. The function will return zero if
2060 * it successfully post the buffer else it will return an error.
2063 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba
*phba
, uint32_t hbqno
,
2064 struct hbq_dmabuf
*hbq_buf
)
2066 struct lpfc_hbq_entry
*hbqe
;
2067 dma_addr_t physaddr
= hbq_buf
->dbuf
.phys
;
2069 lockdep_assert_held(&phba
->hbalock
);
2070 /* Get next HBQ entry slot to use */
2071 hbqe
= lpfc_sli_next_hbq_slot(phba
, hbqno
);
2073 struct hbq_s
*hbqp
= &phba
->hbqs
[hbqno
];
2075 hbqe
->bde
.addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
2076 hbqe
->bde
.addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
2077 hbqe
->bde
.tus
.f
.bdeSize
= hbq_buf
->total_size
;
2078 hbqe
->bde
.tus
.f
.bdeFlags
= 0;
2079 hbqe
->bde
.tus
.w
= le32_to_cpu(hbqe
->bde
.tus
.w
);
2080 hbqe
->buffer_tag
= le32_to_cpu(hbq_buf
->tag
);
2082 hbqp
->hbqPutIdx
= hbqp
->next_hbqPutIdx
;
2083 writel(hbqp
->hbqPutIdx
, phba
->hbq_put
+ hbqno
);
2085 readl(phba
->hbq_put
+ hbqno
);
2086 list_add_tail(&hbq_buf
->dbuf
.list
, &hbqp
->hbq_buffer_list
);
2093 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2094 * @phba: Pointer to HBA context object.
2095 * @hbqno: HBQ number.
2096 * @hbq_buf: Pointer to HBQ buffer.
2098 * This function is called with the hbalock held to post an RQE to the SLI4
2099 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2100 * the hbq_buffer_list and return zero, otherwise it will return an error.
2103 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba
*phba
, uint32_t hbqno
,
2104 struct hbq_dmabuf
*hbq_buf
)
2107 struct lpfc_rqe hrqe
;
2108 struct lpfc_rqe drqe
;
2109 struct lpfc_queue
*hrq
;
2110 struct lpfc_queue
*drq
;
2112 if (hbqno
!= LPFC_ELS_HBQ
)
2114 hrq
= phba
->sli4_hba
.hdr_rq
;
2115 drq
= phba
->sli4_hba
.dat_rq
;
2117 lockdep_assert_held(&phba
->hbalock
);
2118 hrqe
.address_lo
= putPaddrLow(hbq_buf
->hbuf
.phys
);
2119 hrqe
.address_hi
= putPaddrHigh(hbq_buf
->hbuf
.phys
);
2120 drqe
.address_lo
= putPaddrLow(hbq_buf
->dbuf
.phys
);
2121 drqe
.address_hi
= putPaddrHigh(hbq_buf
->dbuf
.phys
);
2122 rc
= lpfc_sli4_rq_put(hrq
, drq
, &hrqe
, &drqe
);
2125 hbq_buf
->tag
= (rc
| (hbqno
<< 16));
2126 list_add_tail(&hbq_buf
->dbuf
.list
, &phba
->hbqs
[hbqno
].hbq_buffer_list
);
2130 /* HBQ for ELS and CT traffic. */
2131 static struct lpfc_hbq_init lpfc_els_hbq
= {
2136 .ring_mask
= (1 << LPFC_ELS_RING
),
2143 struct lpfc_hbq_init
*lpfc_hbq_defs
[] = {
2148 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2149 * @phba: Pointer to HBA context object.
2150 * @hbqno: HBQ number.
2151 * @count: Number of HBQ buffers to be posted.
2153 * This function is called with no lock held to post more hbq buffers to the
2154 * given HBQ. The function returns the number of HBQ buffers successfully
2158 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba
*phba
, uint32_t hbqno
, uint32_t count
)
2160 uint32_t i
, posted
= 0;
2161 unsigned long flags
;
2162 struct hbq_dmabuf
*hbq_buffer
;
2163 LIST_HEAD(hbq_buf_list
);
2164 if (!phba
->hbqs
[hbqno
].hbq_alloc_buffer
)
2167 if ((phba
->hbqs
[hbqno
].buffer_count
+ count
) >
2168 lpfc_hbq_defs
[hbqno
]->entry_count
)
2169 count
= lpfc_hbq_defs
[hbqno
]->entry_count
-
2170 phba
->hbqs
[hbqno
].buffer_count
;
2173 /* Allocate HBQ entries */
2174 for (i
= 0; i
< count
; i
++) {
2175 hbq_buffer
= (phba
->hbqs
[hbqno
].hbq_alloc_buffer
)(phba
);
2178 list_add_tail(&hbq_buffer
->dbuf
.list
, &hbq_buf_list
);
2180 /* Check whether HBQ is still in use */
2181 spin_lock_irqsave(&phba
->hbalock
, flags
);
2182 if (!phba
->hbq_in_use
)
2184 while (!list_empty(&hbq_buf_list
)) {
2185 list_remove_head(&hbq_buf_list
, hbq_buffer
, struct hbq_dmabuf
,
2187 hbq_buffer
->tag
= (phba
->hbqs
[hbqno
].buffer_count
|
2189 if (!lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buffer
)) {
2190 phba
->hbqs
[hbqno
].buffer_count
++;
2193 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
2195 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2198 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2199 while (!list_empty(&hbq_buf_list
)) {
2200 list_remove_head(&hbq_buf_list
, hbq_buffer
, struct hbq_dmabuf
,
2202 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
2208 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2209 * @phba: Pointer to HBA context object.
2212 * This function posts more buffers to the HBQ. This function
2213 * is called with no lock held. The function returns the number of HBQ entries
2214 * successfully allocated.
2217 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba
*phba
, uint32_t qno
)
2219 if (phba
->sli_rev
== LPFC_SLI_REV4
)
2222 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
2223 lpfc_hbq_defs
[qno
]->add_count
);
2227 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2228 * @phba: Pointer to HBA context object.
2229 * @qno: HBQ queue number.
2231 * This function is called from SLI initialization code path with
2232 * no lock held to post initial HBQ buffers to firmware. The
2233 * function returns the number of HBQ entries successfully allocated.
2236 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba
*phba
, uint32_t qno
)
2238 if (phba
->sli_rev
== LPFC_SLI_REV4
)
2239 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
2240 lpfc_hbq_defs
[qno
]->entry_count
);
2242 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
2243 lpfc_hbq_defs
[qno
]->init_count
);
2247 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2248 * @phba: Pointer to HBA context object.
2249 * @hbqno: HBQ number.
2251 * This function removes the first hbq buffer on an hbq list and returns a
2252 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2254 static struct hbq_dmabuf
*
2255 lpfc_sli_hbqbuf_get(struct list_head
*rb_list
)
2257 struct lpfc_dmabuf
*d_buf
;
2259 list_remove_head(rb_list
, d_buf
, struct lpfc_dmabuf
, list
);
2262 return container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
2266 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2267 * @phba: Pointer to HBA context object.
2268 * @hbqno: HBQ number.
2270 * This function removes the first RQ buffer on an RQ buffer list and returns a
2271 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2273 static struct rqb_dmabuf
*
2274 lpfc_sli_rqbuf_get(struct lpfc_hba
*phba
, struct lpfc_queue
*hrq
)
2276 struct lpfc_dmabuf
*h_buf
;
2277 struct lpfc_rqb
*rqbp
;
2280 list_remove_head(&rqbp
->rqb_buffer_list
, h_buf
,
2281 struct lpfc_dmabuf
, list
);
2284 rqbp
->buffer_count
--;
2285 return container_of(h_buf
, struct rqb_dmabuf
, hbuf
);
2289 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2290 * @phba: Pointer to HBA context object.
2291 * @tag: Tag of the hbq buffer.
2293 * This function searches for the hbq buffer associated with the given tag in
2294 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2295 * otherwise it returns NULL.
2297 static struct hbq_dmabuf
*
2298 lpfc_sli_hbqbuf_find(struct lpfc_hba
*phba
, uint32_t tag
)
2300 struct lpfc_dmabuf
*d_buf
;
2301 struct hbq_dmabuf
*hbq_buf
;
2305 if (hbqno
>= LPFC_MAX_HBQS
)
2308 spin_lock_irq(&phba
->hbalock
);
2309 list_for_each_entry(d_buf
, &phba
->hbqs
[hbqno
].hbq_buffer_list
, list
) {
2310 hbq_buf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
2311 if (hbq_buf
->tag
== tag
) {
2312 spin_unlock_irq(&phba
->hbalock
);
2316 spin_unlock_irq(&phba
->hbalock
);
2317 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
| LOG_VPORT
,
2318 "1803 Bad hbq tag. Data: x%x x%x\n",
2319 tag
, phba
->hbqs
[tag
>> 16].buffer_count
);
2324 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2325 * @phba: Pointer to HBA context object.
2326 * @hbq_buffer: Pointer to HBQ buffer.
2328 * This function is called with hbalock. This function gives back
2329 * the hbq buffer to firmware. If the HBQ does not have space to
2330 * post the buffer, it will free the buffer.
2333 lpfc_sli_free_hbq(struct lpfc_hba
*phba
, struct hbq_dmabuf
*hbq_buffer
)
2338 hbqno
= hbq_buffer
->tag
>> 16;
2339 if (lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buffer
))
2340 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
2345 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2346 * @mbxCommand: mailbox command code.
2348 * This function is called by the mailbox event handler function to verify
2349 * that the completed mailbox command is a legitimate mailbox command. If the
2350 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2351 * and the mailbox event handler will take the HBA offline.
2354 lpfc_sli_chk_mbx_command(uint8_t mbxCommand
)
2358 switch (mbxCommand
) {
2362 case MBX_WRITE_VPARMS
:
2363 case MBX_RUN_BIU_DIAG
:
2366 case MBX_CONFIG_LINK
:
2367 case MBX_CONFIG_RING
:
2368 case MBX_RESET_RING
:
2369 case MBX_READ_CONFIG
:
2370 case MBX_READ_RCONFIG
:
2371 case MBX_READ_SPARM
:
2372 case MBX_READ_STATUS
:
2376 case MBX_READ_LNK_STAT
:
2378 case MBX_UNREG_LOGIN
:
2380 case MBX_DUMP_MEMORY
:
2381 case MBX_DUMP_CONTEXT
:
2384 case MBX_UPDATE_CFG
:
2386 case MBX_DEL_LD_ENTRY
:
2387 case MBX_RUN_PROGRAM
:
2389 case MBX_SET_VARIABLE
:
2390 case MBX_UNREG_D_ID
:
2391 case MBX_KILL_BOARD
:
2392 case MBX_CONFIG_FARP
:
2395 case MBX_RUN_BIU_DIAG64
:
2396 case MBX_CONFIG_PORT
:
2397 case MBX_READ_SPARM64
:
2398 case MBX_READ_RPI64
:
2399 case MBX_REG_LOGIN64
:
2400 case MBX_READ_TOPOLOGY
:
2403 case MBX_LOAD_EXP_ROM
:
2404 case MBX_ASYNCEVT_ENABLE
:
2408 case MBX_PORT_CAPABILITIES
:
2409 case MBX_PORT_IOV_CONTROL
:
2410 case MBX_SLI4_CONFIG
:
2411 case MBX_SLI4_REQ_FTRS
:
2413 case MBX_UNREG_FCFI
:
2418 case MBX_RESUME_RPI
:
2419 case MBX_READ_EVENT_LOG_STATUS
:
2420 case MBX_READ_EVENT_LOG
:
2421 case MBX_SECURITY_MGMT
:
2423 case MBX_ACCESS_VDATA
:
2434 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2435 * @phba: Pointer to HBA context object.
2436 * @pmboxq: Pointer to mailbox command.
2438 * This is completion handler function for mailbox commands issued from
2439 * lpfc_sli_issue_mbox_wait function. This function is called by the
2440 * mailbox event handler function with no lock held. This function
2441 * will wake up thread waiting on the wait queue pointed by context1
2445 lpfc_sli_wake_mbox_wait(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
2447 unsigned long drvr_flag
;
2448 struct completion
*pmbox_done
;
2451 * If pmbox_done is empty, the driver thread gave up waiting and
2452 * continued running.
2454 pmboxq
->mbox_flag
|= LPFC_MBX_WAKE
;
2455 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
2456 pmbox_done
= (struct completion
*)pmboxq
->context3
;
2458 complete(pmbox_done
);
2459 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
2464 __lpfc_sli_rpi_release(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
2466 unsigned long iflags
;
2468 if (ndlp
->nlp_flag
& NLP_RELEASE_RPI
) {
2469 lpfc_sli4_free_rpi(vport
->phba
, ndlp
->nlp_rpi
);
2470 spin_lock_irqsave(&vport
->phba
->ndlp_lock
, iflags
);
2471 ndlp
->nlp_flag
&= ~NLP_RELEASE_RPI
;
2472 ndlp
->nlp_rpi
= LPFC_RPI_ALLOC_ERROR
;
2473 spin_unlock_irqrestore(&vport
->phba
->ndlp_lock
, iflags
);
2475 ndlp
->nlp_flag
&= ~NLP_UNREG_INP
;
2479 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2480 * @phba: Pointer to HBA context object.
2481 * @pmb: Pointer to mailbox object.
2483 * This function is the default mailbox completion handler. It
2484 * frees the memory resources associated with the completed mailbox
2485 * command. If the completed command is a REG_LOGIN mailbox command,
2486 * this function will issue a UREG_LOGIN to re-claim the RPI.
2489 lpfc_sli_def_mbox_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2491 struct lpfc_vport
*vport
= pmb
->vport
;
2492 struct lpfc_dmabuf
*mp
;
2493 struct lpfc_nodelist
*ndlp
;
2494 struct Scsi_Host
*shost
;
2498 mp
= (struct lpfc_dmabuf
*)(pmb
->ctx_buf
);
2501 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2506 * If a REG_LOGIN succeeded after node is destroyed or node
2507 * is in re-discovery driver need to cleanup the RPI.
2509 if (!(phba
->pport
->load_flag
& FC_UNLOADING
) &&
2510 pmb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
&&
2511 !pmb
->u
.mb
.mbxStatus
) {
2512 rpi
= pmb
->u
.mb
.un
.varWords
[0];
2513 vpi
= pmb
->u
.mb
.un
.varRegLogin
.vpi
;
2514 lpfc_unreg_login(phba
, vpi
, rpi
, pmb
);
2516 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
2517 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
2518 if (rc
!= MBX_NOT_FINISHED
)
2522 if ((pmb
->u
.mb
.mbxCommand
== MBX_REG_VPI
) &&
2523 !(phba
->pport
->load_flag
& FC_UNLOADING
) &&
2524 !pmb
->u
.mb
.mbxStatus
) {
2525 shost
= lpfc_shost_from_vport(vport
);
2526 spin_lock_irq(shost
->host_lock
);
2527 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2528 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2529 spin_unlock_irq(shost
->host_lock
);
2532 if (pmb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
2533 ndlp
= (struct lpfc_nodelist
*)pmb
->ctx_ndlp
;
2535 pmb
->ctx_buf
= NULL
;
2536 pmb
->ctx_ndlp
= NULL
;
2539 if (pmb
->u
.mb
.mbxCommand
== MBX_UNREG_LOGIN
) {
2540 ndlp
= (struct lpfc_nodelist
*)pmb
->ctx_ndlp
;
2542 /* Check to see if there are any deferred events to process */
2546 KERN_INFO
, LOG_MBOX
| LOG_DISCOVERY
,
2547 "1438 UNREG cmpl deferred mbox x%x "
2548 "on NPort x%x Data: x%x x%x %px\n",
2549 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
2550 ndlp
->nlp_flag
, ndlp
->nlp_defer_did
, ndlp
);
2552 if ((ndlp
->nlp_flag
& NLP_UNREG_INP
) &&
2553 (ndlp
->nlp_defer_did
!= NLP_EVT_NOTHING_PENDING
)) {
2554 ndlp
->nlp_flag
&= ~NLP_UNREG_INP
;
2555 ndlp
->nlp_defer_did
= NLP_EVT_NOTHING_PENDING
;
2556 lpfc_issue_els_plogi(vport
, ndlp
->nlp_DID
, 0);
2558 __lpfc_sli_rpi_release(vport
, ndlp
);
2560 if (vport
->load_flag
& FC_UNLOADING
)
2562 pmb
->ctx_ndlp
= NULL
;
2566 /* Check security permission status on INIT_LINK mailbox command */
2567 if ((pmb
->u
.mb
.mbxCommand
== MBX_INIT_LINK
) &&
2568 (pmb
->u
.mb
.mbxStatus
== MBXERR_SEC_NO_PERMISSION
))
2569 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
2570 "2860 SLI authentication is required "
2571 "for INIT_LINK but has not done yet\n");
2573 if (bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
) == MBX_SLI4_CONFIG
)
2574 lpfc_sli4_mbox_cmd_free(phba
, pmb
);
2576 mempool_free(pmb
, phba
->mbox_mem_pool
);
2579 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2580 * @phba: Pointer to HBA context object.
2581 * @pmb: Pointer to mailbox object.
2583 * This function is the unreg rpi mailbox completion handler. It
2584 * frees the memory resources associated with the completed mailbox
2585 * command. An additional refrenece is put on the ndlp to prevent
2586 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2587 * the unreg mailbox command completes, this routine puts the
2592 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2594 struct lpfc_vport
*vport
= pmb
->vport
;
2595 struct lpfc_nodelist
*ndlp
;
2597 ndlp
= pmb
->ctx_ndlp
;
2598 if (pmb
->u
.mb
.mbxCommand
== MBX_UNREG_LOGIN
) {
2599 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
2600 (bf_get(lpfc_sli_intf_if_type
,
2601 &phba
->sli4_hba
.sli_intf
) >=
2602 LPFC_SLI_INTF_IF_TYPE_2
)) {
2605 vport
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
2606 "0010 UNREG_LOGIN vpi:%x "
2607 "rpi:%x DID:%x defer x%x flg x%x "
2609 vport
->vpi
, ndlp
->nlp_rpi
,
2610 ndlp
->nlp_DID
, ndlp
->nlp_defer_did
,
2612 ndlp
->nlp_usg_map
, ndlp
);
2613 ndlp
->nlp_flag
&= ~NLP_LOGO_ACC
;
2616 /* Check to see if there are any deferred
2619 if ((ndlp
->nlp_flag
& NLP_UNREG_INP
) &&
2620 (ndlp
->nlp_defer_did
!=
2621 NLP_EVT_NOTHING_PENDING
)) {
2623 vport
, KERN_INFO
, LOG_DISCOVERY
,
2624 "4111 UNREG cmpl deferred "
2626 "NPort x%x Data: x%x x%px\n",
2627 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
2628 ndlp
->nlp_defer_did
, ndlp
);
2629 ndlp
->nlp_flag
&= ~NLP_UNREG_INP
;
2630 ndlp
->nlp_defer_did
=
2631 NLP_EVT_NOTHING_PENDING
;
2632 lpfc_issue_els_plogi(
2633 vport
, ndlp
->nlp_DID
, 0);
2635 __lpfc_sli_rpi_release(vport
, ndlp
);
2641 mempool_free(pmb
, phba
->mbox_mem_pool
);
2645 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2646 * @phba: Pointer to HBA context object.
2648 * This function is called with no lock held. This function processes all
2649 * the completed mailbox commands and gives it to upper layers. The interrupt
2650 * service routine processes mailbox completion interrupt and adds completed
2651 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2652 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2653 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2654 * function returns the mailbox commands to the upper layer by calling the
2655 * completion handler function of each mailbox.
2658 lpfc_sli_handle_mb_event(struct lpfc_hba
*phba
)
2665 phba
->sli
.slistat
.mbox_event
++;
2667 /* Get all completed mailboxe buffers into the cmplq */
2668 spin_lock_irq(&phba
->hbalock
);
2669 list_splice_init(&phba
->sli
.mboxq_cmpl
, &cmplq
);
2670 spin_unlock_irq(&phba
->hbalock
);
2672 /* Get a Mailbox buffer to setup mailbox commands for callback */
2674 list_remove_head(&cmplq
, pmb
, LPFC_MBOXQ_t
, list
);
2680 if (pmbox
->mbxCommand
!= MBX_HEARTBEAT
) {
2682 lpfc_debugfs_disc_trc(pmb
->vport
,
2683 LPFC_DISC_TRC_MBOX_VPORT
,
2684 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2685 (uint32_t)pmbox
->mbxCommand
,
2686 pmbox
->un
.varWords
[0],
2687 pmbox
->un
.varWords
[1]);
2690 lpfc_debugfs_disc_trc(phba
->pport
,
2692 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2693 (uint32_t)pmbox
->mbxCommand
,
2694 pmbox
->un
.varWords
[0],
2695 pmbox
->un
.varWords
[1]);
2700 * It is a fatal error if unknown mbox command completion.
2702 if (lpfc_sli_chk_mbx_command(pmbox
->mbxCommand
) ==
2704 /* Unknown mailbox command compl */
2705 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
2706 "(%d):0323 Unknown Mailbox command "
2707 "x%x (x%x/x%x) Cmpl\n",
2708 pmb
->vport
? pmb
->vport
->vpi
:
2711 lpfc_sli_config_mbox_subsys_get(phba
,
2713 lpfc_sli_config_mbox_opcode_get(phba
,
2715 phba
->link_state
= LPFC_HBA_ERROR
;
2716 phba
->work_hs
= HS_FFER3
;
2717 lpfc_handle_eratt(phba
);
2721 if (pmbox
->mbxStatus
) {
2722 phba
->sli
.slistat
.mbox_stat_err
++;
2723 if (pmbox
->mbxStatus
== MBXERR_NO_RESOURCES
) {
2724 /* Mbox cmd cmpl error - RETRYing */
2725 lpfc_printf_log(phba
, KERN_INFO
,
2727 "(%d):0305 Mbox cmd cmpl "
2728 "error - RETRYing Data: x%x "
2729 "(x%x/x%x) x%x x%x x%x\n",
2730 pmb
->vport
? pmb
->vport
->vpi
:
2733 lpfc_sli_config_mbox_subsys_get(phba
,
2735 lpfc_sli_config_mbox_opcode_get(phba
,
2738 pmbox
->un
.varWords
[0],
2739 pmb
->vport
? pmb
->vport
->port_state
:
2740 LPFC_VPORT_UNKNOWN
);
2741 pmbox
->mbxStatus
= 0;
2742 pmbox
->mbxOwner
= OWN_HOST
;
2743 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
2744 if (rc
!= MBX_NOT_FINISHED
)
2749 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2750 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
2751 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2752 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2754 pmb
->vport
? pmb
->vport
->vpi
: 0,
2756 lpfc_sli_config_mbox_subsys_get(phba
, pmb
),
2757 lpfc_sli_config_mbox_opcode_get(phba
, pmb
),
2759 *((uint32_t *) pmbox
),
2760 pmbox
->un
.varWords
[0],
2761 pmbox
->un
.varWords
[1],
2762 pmbox
->un
.varWords
[2],
2763 pmbox
->un
.varWords
[3],
2764 pmbox
->un
.varWords
[4],
2765 pmbox
->un
.varWords
[5],
2766 pmbox
->un
.varWords
[6],
2767 pmbox
->un
.varWords
[7],
2768 pmbox
->un
.varWords
[8],
2769 pmbox
->un
.varWords
[9],
2770 pmbox
->un
.varWords
[10]);
2773 pmb
->mbox_cmpl(phba
,pmb
);
2779 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2780 * @phba: Pointer to HBA context object.
2781 * @pring: Pointer to driver SLI ring object.
2784 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2785 * is set in the tag the buffer is posted for a particular exchange,
2786 * the function will return the buffer without replacing the buffer.
2787 * If the buffer is for unsolicited ELS or CT traffic, this function
2788 * returns the buffer and also posts another buffer to the firmware.
2790 static struct lpfc_dmabuf
*
2791 lpfc_sli_get_buff(struct lpfc_hba
*phba
,
2792 struct lpfc_sli_ring
*pring
,
2795 struct hbq_dmabuf
*hbq_entry
;
2797 if (tag
& QUE_BUFTAG_BIT
)
2798 return lpfc_sli_ring_taggedbuf_get(phba
, pring
, tag
);
2799 hbq_entry
= lpfc_sli_hbqbuf_find(phba
, tag
);
2802 return &hbq_entry
->dbuf
;
2806 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2807 * @phba: Pointer to HBA context object.
2808 * @pring: Pointer to driver SLI ring object.
2809 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2810 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2811 * @fch_type: the type for the first frame of the sequence.
2813 * This function is called with no lock held. This function uses the r_ctl and
2814 * type of the received sequence to find the correct callback function to call
2815 * to process the sequence.
2818 lpfc_complete_unsol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2819 struct lpfc_iocbq
*saveq
, uint32_t fch_r_ctl
,
2826 lpfc_nvmet_unsol_ls_event(phba
, pring
, saveq
);
2832 /* unSolicited Responses */
2833 if (pring
->prt
[0].profile
) {
2834 if (pring
->prt
[0].lpfc_sli_rcv_unsol_event
)
2835 (pring
->prt
[0].lpfc_sli_rcv_unsol_event
) (phba
, pring
,
2839 /* We must search, based on rctl / type
2840 for the right routine */
2841 for (i
= 0; i
< pring
->num_mask
; i
++) {
2842 if ((pring
->prt
[i
].rctl
== fch_r_ctl
) &&
2843 (pring
->prt
[i
].type
== fch_type
)) {
2844 if (pring
->prt
[i
].lpfc_sli_rcv_unsol_event
)
2845 (pring
->prt
[i
].lpfc_sli_rcv_unsol_event
)
2846 (phba
, pring
, saveq
);
2854 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2855 * @phba: Pointer to HBA context object.
2856 * @pring: Pointer to driver SLI ring object.
2857 * @saveq: Pointer to the unsolicited iocb.
2859 * This function is called with no lock held by the ring event handler
2860 * when there is an unsolicited iocb posted to the response ring by the
2861 * firmware. This function gets the buffer associated with the iocbs
2862 * and calls the event handler for the ring. This function handles both
2863 * qring buffers and hbq buffers.
2864 * When the function returns 1 the caller can free the iocb object otherwise
2865 * upper layer functions will free the iocb objects.
2868 lpfc_sli_process_unsol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2869 struct lpfc_iocbq
*saveq
)
2873 uint32_t Rctl
, Type
;
2874 struct lpfc_iocbq
*iocbq
;
2875 struct lpfc_dmabuf
*dmzbuf
;
2877 irsp
= &(saveq
->iocb
);
2879 if (irsp
->ulpCommand
== CMD_ASYNC_STATUS
) {
2880 if (pring
->lpfc_sli_rcv_async_status
)
2881 pring
->lpfc_sli_rcv_async_status(phba
, pring
, saveq
);
2883 lpfc_printf_log(phba
,
2886 "0316 Ring %d handler: unexpected "
2887 "ASYNC_STATUS iocb received evt_code "
2890 irsp
->un
.asyncstat
.evt_code
);
2894 if ((irsp
->ulpCommand
== CMD_IOCB_RET_XRI64_CX
) &&
2895 (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)) {
2896 if (irsp
->ulpBdeCount
> 0) {
2897 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
2898 irsp
->un
.ulpWord
[3]);
2899 lpfc_in_buf_free(phba
, dmzbuf
);
2902 if (irsp
->ulpBdeCount
> 1) {
2903 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
2904 irsp
->unsli3
.sli3Words
[3]);
2905 lpfc_in_buf_free(phba
, dmzbuf
);
2908 if (irsp
->ulpBdeCount
> 2) {
2909 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
2910 irsp
->unsli3
.sli3Words
[7]);
2911 lpfc_in_buf_free(phba
, dmzbuf
);
2917 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
2918 if (irsp
->ulpBdeCount
!= 0) {
2919 saveq
->context2
= lpfc_sli_get_buff(phba
, pring
,
2920 irsp
->un
.ulpWord
[3]);
2921 if (!saveq
->context2
)
2922 lpfc_printf_log(phba
,
2925 "0341 Ring %d Cannot find buffer for "
2926 "an unsolicited iocb. tag 0x%x\n",
2928 irsp
->un
.ulpWord
[3]);
2930 if (irsp
->ulpBdeCount
== 2) {
2931 saveq
->context3
= lpfc_sli_get_buff(phba
, pring
,
2932 irsp
->unsli3
.sli3Words
[7]);
2933 if (!saveq
->context3
)
2934 lpfc_printf_log(phba
,
2937 "0342 Ring %d Cannot find buffer for an"
2938 " unsolicited iocb. tag 0x%x\n",
2940 irsp
->unsli3
.sli3Words
[7]);
2942 list_for_each_entry(iocbq
, &saveq
->list
, list
) {
2943 irsp
= &(iocbq
->iocb
);
2944 if (irsp
->ulpBdeCount
!= 0) {
2945 iocbq
->context2
= lpfc_sli_get_buff(phba
, pring
,
2946 irsp
->un
.ulpWord
[3]);
2947 if (!iocbq
->context2
)
2948 lpfc_printf_log(phba
,
2951 "0343 Ring %d Cannot find "
2952 "buffer for an unsolicited iocb"
2953 ". tag 0x%x\n", pring
->ringno
,
2954 irsp
->un
.ulpWord
[3]);
2956 if (irsp
->ulpBdeCount
== 2) {
2957 iocbq
->context3
= lpfc_sli_get_buff(phba
, pring
,
2958 irsp
->unsli3
.sli3Words
[7]);
2959 if (!iocbq
->context3
)
2960 lpfc_printf_log(phba
,
2963 "0344 Ring %d Cannot find "
2964 "buffer for an unsolicited "
2967 irsp
->unsli3
.sli3Words
[7]);
2971 if (irsp
->ulpBdeCount
!= 0 &&
2972 (irsp
->ulpCommand
== CMD_IOCB_RCV_CONT64_CX
||
2973 irsp
->ulpStatus
== IOSTAT_INTERMED_RSP
)) {
2976 /* search continue save q for same XRI */
2977 list_for_each_entry(iocbq
, &pring
->iocb_continue_saveq
, clist
) {
2978 if (iocbq
->iocb
.unsli3
.rcvsli3
.ox_id
==
2979 saveq
->iocb
.unsli3
.rcvsli3
.ox_id
) {
2980 list_add_tail(&saveq
->list
, &iocbq
->list
);
2986 list_add_tail(&saveq
->clist
,
2987 &pring
->iocb_continue_saveq
);
2988 if (saveq
->iocb
.ulpStatus
!= IOSTAT_INTERMED_RSP
) {
2989 list_del_init(&iocbq
->clist
);
2991 irsp
= &(saveq
->iocb
);
2995 if ((irsp
->ulpCommand
== CMD_RCV_ELS_REQ64_CX
) ||
2996 (irsp
->ulpCommand
== CMD_RCV_ELS_REQ_CX
) ||
2997 (irsp
->ulpCommand
== CMD_IOCB_RCV_ELS64_CX
)) {
2998 Rctl
= FC_RCTL_ELS_REQ
;
3001 w5p
= (WORD5
*)&(saveq
->iocb
.un
.ulpWord
[5]);
3002 Rctl
= w5p
->hcsw
.Rctl
;
3003 Type
= w5p
->hcsw
.Type
;
3005 /* Firmware Workaround */
3006 if ((Rctl
== 0) && (pring
->ringno
== LPFC_ELS_RING
) &&
3007 (irsp
->ulpCommand
== CMD_RCV_SEQUENCE64_CX
||
3008 irsp
->ulpCommand
== CMD_IOCB_RCV_SEQ64_CX
)) {
3009 Rctl
= FC_RCTL_ELS_REQ
;
3011 w5p
->hcsw
.Rctl
= Rctl
;
3012 w5p
->hcsw
.Type
= Type
;
3016 if (!lpfc_complete_unsol_iocb(phba
, pring
, saveq
, Rctl
, Type
))
3017 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3018 "0313 Ring %d handler: unexpected Rctl x%x "
3019 "Type x%x received\n",
3020 pring
->ringno
, Rctl
, Type
);
3026 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3027 * @phba: Pointer to HBA context object.
3028 * @pring: Pointer to driver SLI ring object.
3029 * @prspiocb: Pointer to response iocb object.
3031 * This function looks up the iocb_lookup table to get the command iocb
3032 * corresponding to the given response iocb using the iotag of the
3033 * response iocb. The driver calls this function with the hbalock held
3034 * for SLI3 ports or the ring lock held for SLI4 ports.
3035 * This function returns the command iocb object if it finds the command
3036 * iocb else returns NULL.
3038 static struct lpfc_iocbq
*
3039 lpfc_sli_iocbq_lookup(struct lpfc_hba
*phba
,
3040 struct lpfc_sli_ring
*pring
,
3041 struct lpfc_iocbq
*prspiocb
)
3043 struct lpfc_iocbq
*cmd_iocb
= NULL
;
3045 spinlock_t
*temp_lock
= NULL
;
3046 unsigned long iflag
= 0;
3048 if (phba
->sli_rev
== LPFC_SLI_REV4
)
3049 temp_lock
= &pring
->ring_lock
;
3051 temp_lock
= &phba
->hbalock
;
3053 spin_lock_irqsave(temp_lock
, iflag
);
3054 iotag
= prspiocb
->iocb
.ulpIoTag
;
3056 if (iotag
!= 0 && iotag
<= phba
->sli
.last_iotag
) {
3057 cmd_iocb
= phba
->sli
.iocbq_lookup
[iotag
];
3058 if (cmd_iocb
->iocb_flag
& LPFC_IO_ON_TXCMPLQ
) {
3059 /* remove from txcmpl queue list */
3060 list_del_init(&cmd_iocb
->list
);
3061 cmd_iocb
->iocb_flag
&= ~LPFC_IO_ON_TXCMPLQ
;
3062 pring
->txcmplq_cnt
--;
3063 spin_unlock_irqrestore(temp_lock
, iflag
);
3068 spin_unlock_irqrestore(temp_lock
, iflag
);
3069 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3070 "0317 iotag x%x is out of "
3071 "range: max iotag x%x wd0 x%x\n",
3072 iotag
, phba
->sli
.last_iotag
,
3073 *(((uint32_t *) &prspiocb
->iocb
) + 7));
3078 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3079 * @phba: Pointer to HBA context object.
3080 * @pring: Pointer to driver SLI ring object.
3083 * This function looks up the iocb_lookup table to get the command iocb
3084 * corresponding to the given iotag. The driver calls this function with
3085 * the ring lock held because this function is an SLI4 port only helper.
3086 * This function returns the command iocb object if it finds the command
3087 * iocb else returns NULL.
3089 static struct lpfc_iocbq
*
3090 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba
*phba
,
3091 struct lpfc_sli_ring
*pring
, uint16_t iotag
)
3093 struct lpfc_iocbq
*cmd_iocb
= NULL
;
3094 spinlock_t
*temp_lock
= NULL
;
3095 unsigned long iflag
= 0;
3097 if (phba
->sli_rev
== LPFC_SLI_REV4
)
3098 temp_lock
= &pring
->ring_lock
;
3100 temp_lock
= &phba
->hbalock
;
3102 spin_lock_irqsave(temp_lock
, iflag
);
3103 if (iotag
!= 0 && iotag
<= phba
->sli
.last_iotag
) {
3104 cmd_iocb
= phba
->sli
.iocbq_lookup
[iotag
];
3105 if (cmd_iocb
->iocb_flag
& LPFC_IO_ON_TXCMPLQ
) {
3106 /* remove from txcmpl queue list */
3107 list_del_init(&cmd_iocb
->list
);
3108 cmd_iocb
->iocb_flag
&= ~LPFC_IO_ON_TXCMPLQ
;
3109 pring
->txcmplq_cnt
--;
3110 spin_unlock_irqrestore(temp_lock
, iflag
);
3115 spin_unlock_irqrestore(temp_lock
, iflag
);
3116 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3117 "0372 iotag x%x lookup error: max iotag (x%x) "
3119 iotag
, phba
->sli
.last_iotag
,
3120 cmd_iocb
? cmd_iocb
->iocb_flag
: 0xffff);
3125 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3126 * @phba: Pointer to HBA context object.
3127 * @pring: Pointer to driver SLI ring object.
3128 * @saveq: Pointer to the response iocb to be processed.
3130 * This function is called by the ring event handler for non-fcp
3131 * rings when there is a new response iocb in the response ring.
3132 * The caller is not required to hold any locks. This function
3133 * gets the command iocb associated with the response iocb and
3134 * calls the completion handler for the command iocb. If there
3135 * is no completion handler, the function will free the resources
3136 * associated with command iocb. If the response iocb is for
3137 * an already aborted command iocb, the status of the completion
3138 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3139 * This function always returns 1.
3142 lpfc_sli_process_sol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
3143 struct lpfc_iocbq
*saveq
)
3145 struct lpfc_iocbq
*cmdiocbp
;
3147 unsigned long iflag
;
3149 cmdiocbp
= lpfc_sli_iocbq_lookup(phba
, pring
, saveq
);
3151 if (cmdiocbp
->iocb_cmpl
) {
3153 * If an ELS command failed send an event to mgmt
3156 if (saveq
->iocb
.ulpStatus
&&
3157 (pring
->ringno
== LPFC_ELS_RING
) &&
3158 (cmdiocbp
->iocb
.ulpCommand
==
3159 CMD_ELS_REQUEST64_CR
))
3160 lpfc_send_els_failure_event(phba
,
3164 * Post all ELS completions to the worker thread.
3165 * All other are passed to the completion callback.
3167 if (pring
->ringno
== LPFC_ELS_RING
) {
3168 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
3169 (cmdiocbp
->iocb_flag
&
3170 LPFC_DRIVER_ABORTED
)) {
3171 spin_lock_irqsave(&phba
->hbalock
,
3173 cmdiocbp
->iocb_flag
&=
3174 ~LPFC_DRIVER_ABORTED
;
3175 spin_unlock_irqrestore(&phba
->hbalock
,
3177 saveq
->iocb
.ulpStatus
=
3178 IOSTAT_LOCAL_REJECT
;
3179 saveq
->iocb
.un
.ulpWord
[4] =
3182 /* Firmware could still be in progress
3183 * of DMAing payload, so don't free data
3184 * buffer till after a hbeat.
3186 spin_lock_irqsave(&phba
->hbalock
,
3188 saveq
->iocb_flag
|= LPFC_DELAY_MEM_FREE
;
3189 spin_unlock_irqrestore(&phba
->hbalock
,
3192 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3193 if (saveq
->iocb_flag
&
3194 LPFC_EXCHANGE_BUSY
) {
3195 /* Set cmdiocb flag for the
3196 * exchange busy so sgl (xri)
3197 * will not be released until
3198 * the abort xri is received
3202 &phba
->hbalock
, iflag
);
3203 cmdiocbp
->iocb_flag
|=
3205 spin_unlock_irqrestore(
3206 &phba
->hbalock
, iflag
);
3208 if (cmdiocbp
->iocb_flag
&
3209 LPFC_DRIVER_ABORTED
) {
3211 * Clear LPFC_DRIVER_ABORTED
3212 * bit in case it was driver
3216 &phba
->hbalock
, iflag
);
3217 cmdiocbp
->iocb_flag
&=
3218 ~LPFC_DRIVER_ABORTED
;
3219 spin_unlock_irqrestore(
3220 &phba
->hbalock
, iflag
);
3221 cmdiocbp
->iocb
.ulpStatus
=
3222 IOSTAT_LOCAL_REJECT
;
3223 cmdiocbp
->iocb
.un
.ulpWord
[4] =
3224 IOERR_ABORT_REQUESTED
;
3226 * For SLI4, irsiocb contains
3227 * NO_XRI in sli_xritag, it
3228 * shall not affect releasing
3229 * sgl (xri) process.
3231 saveq
->iocb
.ulpStatus
=
3232 IOSTAT_LOCAL_REJECT
;
3233 saveq
->iocb
.un
.ulpWord
[4] =
3236 &phba
->hbalock
, iflag
);
3238 LPFC_DELAY_MEM_FREE
;
3239 spin_unlock_irqrestore(
3240 &phba
->hbalock
, iflag
);
3244 (cmdiocbp
->iocb_cmpl
) (phba
, cmdiocbp
, saveq
);
3246 lpfc_sli_release_iocbq(phba
, cmdiocbp
);
3249 * Unknown initiating command based on the response iotag.
3250 * This could be the case on the ELS ring because of
3253 if (pring
->ringno
!= LPFC_ELS_RING
) {
3255 * Ring <ringno> handler: unexpected completion IoTag
3258 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3259 "0322 Ring %d handler: "
3260 "unexpected completion IoTag x%x "
3261 "Data: x%x x%x x%x x%x\n",
3263 saveq
->iocb
.ulpIoTag
,
3264 saveq
->iocb
.ulpStatus
,
3265 saveq
->iocb
.un
.ulpWord
[4],
3266 saveq
->iocb
.ulpCommand
,
3267 saveq
->iocb
.ulpContext
);
3275 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3276 * @phba: Pointer to HBA context object.
3277 * @pring: Pointer to driver SLI ring object.
3279 * This function is called from the iocb ring event handlers when
3280 * put pointer is ahead of the get pointer for a ring. This function signal
3281 * an error attention condition to the worker thread and the worker
3282 * thread will transition the HBA to offline state.
3285 lpfc_sli_rsp_pointers_error(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
3287 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
3289 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3290 * rsp ring <portRspMax>
3292 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3293 "0312 Ring %d handler: portRspPut %d "
3294 "is bigger than rsp ring %d\n",
3295 pring
->ringno
, le32_to_cpu(pgp
->rspPutInx
),
3296 pring
->sli
.sli3
.numRiocb
);
3298 phba
->link_state
= LPFC_HBA_ERROR
;
3301 * All error attention handlers are posted to
3304 phba
->work_ha
|= HA_ERATT
;
3305 phba
->work_hs
= HS_FFER3
;
3307 lpfc_worker_wake_up(phba
);
3313 * lpfc_poll_eratt - Error attention polling timer timeout handler
3314 * @ptr: Pointer to address of HBA context object.
3316 * This function is invoked by the Error Attention polling timer when the
3317 * timer times out. It will check the SLI Error Attention register for
3318 * possible attention events. If so, it will post an Error Attention event
3319 * and wake up worker thread to process it. Otherwise, it will set up the
3320 * Error Attention polling timer for the next poll.
3322 void lpfc_poll_eratt(struct timer_list
*t
)
3324 struct lpfc_hba
*phba
;
3326 uint64_t sli_intr
, cnt
;
3328 phba
= from_timer(phba
, t
, eratt_poll
);
3330 /* Here we will also keep track of interrupts per sec of the hba */
3331 sli_intr
= phba
->sli
.slistat
.sli_intr
;
3333 if (phba
->sli
.slistat
.sli_prev_intr
> sli_intr
)
3334 cnt
= (((uint64_t)(-1) - phba
->sli
.slistat
.sli_prev_intr
) +
3337 cnt
= (sli_intr
- phba
->sli
.slistat
.sli_prev_intr
);
3339 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3340 do_div(cnt
, phba
->eratt_poll_interval
);
3341 phba
->sli
.slistat
.sli_ips
= cnt
;
3343 phba
->sli
.slistat
.sli_prev_intr
= sli_intr
;
3345 /* Check chip HA register for error event */
3346 eratt
= lpfc_sli_check_eratt(phba
);
3349 /* Tell the worker thread there is work to do */
3350 lpfc_worker_wake_up(phba
);
3352 /* Restart the timer for next eratt poll */
3353 mod_timer(&phba
->eratt_poll
,
3355 msecs_to_jiffies(1000 * phba
->eratt_poll_interval
));
3361 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3362 * @phba: Pointer to HBA context object.
3363 * @pring: Pointer to driver SLI ring object.
3364 * @mask: Host attention register mask for this ring.
3366 * This function is called from the interrupt context when there is a ring
3367 * event for the fcp ring. The caller does not hold any lock.
3368 * The function processes each response iocb in the response ring until it
3369 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3370 * LE bit set. The function will call the completion handler of the command iocb
3371 * if the response iocb indicates a completion for a command iocb or it is
3372 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3373 * function if this is an unsolicited iocb.
3374 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3375 * to check it explicitly.
3378 lpfc_sli_handle_fast_ring_event(struct lpfc_hba
*phba
,
3379 struct lpfc_sli_ring
*pring
, uint32_t mask
)
3381 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
3382 IOCB_t
*irsp
= NULL
;
3383 IOCB_t
*entry
= NULL
;
3384 struct lpfc_iocbq
*cmdiocbq
= NULL
;
3385 struct lpfc_iocbq rspiocbq
;
3387 uint32_t portRspPut
, portRspMax
;
3389 lpfc_iocb_type type
;
3390 unsigned long iflag
;
3391 uint32_t rsp_cmpl
= 0;
3393 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3394 pring
->stats
.iocb_event
++;
3397 * The next available response entry should never exceed the maximum
3398 * entries. If it does, treat it as an adapter hardware error.
3400 portRspMax
= pring
->sli
.sli3
.numRiocb
;
3401 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
3402 if (unlikely(portRspPut
>= portRspMax
)) {
3403 lpfc_sli_rsp_pointers_error(phba
, pring
);
3404 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3407 if (phba
->fcp_ring_in_use
) {
3408 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3411 phba
->fcp_ring_in_use
= 1;
3414 while (pring
->sli
.sli3
.rspidx
!= portRspPut
) {
3416 * Fetch an entry off the ring and copy it into a local data
3417 * structure. The copy involves a byte-swap since the
3418 * network byte order and pci byte orders are different.
3420 entry
= lpfc_resp_iocb(phba
, pring
);
3421 phba
->last_completion_time
= jiffies
;
3423 if (++pring
->sli
.sli3
.rspidx
>= portRspMax
)
3424 pring
->sli
.sli3
.rspidx
= 0;
3426 lpfc_sli_pcimem_bcopy((uint32_t *) entry
,
3427 (uint32_t *) &rspiocbq
.iocb
,
3428 phba
->iocb_rsp_size
);
3429 INIT_LIST_HEAD(&(rspiocbq
.list
));
3430 irsp
= &rspiocbq
.iocb
;
3432 type
= lpfc_sli_iocb_cmd_type(irsp
->ulpCommand
& CMD_IOCB_MASK
);
3433 pring
->stats
.iocb_rsp
++;
3436 if (unlikely(irsp
->ulpStatus
)) {
3438 * If resource errors reported from HBA, reduce
3439 * queuedepths of the SCSI device.
3441 if ((irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) &&
3442 ((irsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) ==
3443 IOERR_NO_RESOURCES
)) {
3444 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3445 phba
->lpfc_rampdown_queue_depth(phba
);
3446 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3449 /* Rsp ring <ringno> error: IOCB */
3450 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3451 "0336 Rsp Ring %d error: IOCB Data: "
3452 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3454 irsp
->un
.ulpWord
[0],
3455 irsp
->un
.ulpWord
[1],
3456 irsp
->un
.ulpWord
[2],
3457 irsp
->un
.ulpWord
[3],
3458 irsp
->un
.ulpWord
[4],
3459 irsp
->un
.ulpWord
[5],
3460 *(uint32_t *)&irsp
->un1
,
3461 *((uint32_t *)&irsp
->un1
+ 1));
3465 case LPFC_ABORT_IOCB
:
3468 * Idle exchange closed via ABTS from port. No iocb
3469 * resources need to be recovered.
3471 if (unlikely(irsp
->ulpCommand
== CMD_XRI_ABORTED_CX
)) {
3472 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3473 "0333 IOCB cmd 0x%x"
3474 " processed. Skipping"
3480 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3481 cmdiocbq
= lpfc_sli_iocbq_lookup(phba
, pring
,
3483 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3484 if (unlikely(!cmdiocbq
))
3486 if (cmdiocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
)
3487 cmdiocbq
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
3488 if (cmdiocbq
->iocb_cmpl
) {
3489 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3490 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
,
3492 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3495 case LPFC_UNSOL_IOCB
:
3496 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3497 lpfc_sli_process_unsol_iocb(phba
, pring
, &rspiocbq
);
3498 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3501 if (irsp
->ulpCommand
== CMD_ADAPTER_MSG
) {
3502 char adaptermsg
[LPFC_MAX_ADPTMSG
];
3503 memset(adaptermsg
, 0, LPFC_MAX_ADPTMSG
);
3504 memcpy(&adaptermsg
[0], (uint8_t *) irsp
,
3506 dev_warn(&((phba
->pcidev
)->dev
),
3508 phba
->brd_no
, adaptermsg
);
3510 /* Unknown IOCB command */
3511 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3512 "0334 Unknown IOCB command "
3513 "Data: x%x, x%x x%x x%x x%x\n",
3514 type
, irsp
->ulpCommand
,
3523 * The response IOCB has been processed. Update the ring
3524 * pointer in SLIM. If the port response put pointer has not
3525 * been updated, sync the pgp->rspPutInx and fetch the new port
3526 * response put pointer.
3528 writel(pring
->sli
.sli3
.rspidx
,
3529 &phba
->host_gp
[pring
->ringno
].rspGetInx
);
3531 if (pring
->sli
.sli3
.rspidx
== portRspPut
)
3532 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
3535 if ((rsp_cmpl
> 0) && (mask
& HA_R0RE_REQ
)) {
3536 pring
->stats
.iocb_rsp_full
++;
3537 status
= ((CA_R0ATT
| CA_R0RE_RSP
) << (pring
->ringno
* 4));
3538 writel(status
, phba
->CAregaddr
);
3539 readl(phba
->CAregaddr
);
3541 if ((mask
& HA_R0CE_RSP
) && (pring
->flag
& LPFC_CALL_RING_AVAILABLE
)) {
3542 pring
->flag
&= ~LPFC_CALL_RING_AVAILABLE
;
3543 pring
->stats
.iocb_cmd_empty
++;
3545 /* Force update of the local copy of cmdGetInx */
3546 pring
->sli
.sli3
.local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
3547 lpfc_sli_resume_iocb(phba
, pring
);
3549 if ((pring
->lpfc_sli_cmd_available
))
3550 (pring
->lpfc_sli_cmd_available
) (phba
, pring
);
3554 phba
->fcp_ring_in_use
= 0;
3555 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3560 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3561 * @phba: Pointer to HBA context object.
3562 * @pring: Pointer to driver SLI ring object.
3563 * @rspiocbp: Pointer to driver response IOCB object.
3565 * This function is called from the worker thread when there is a slow-path
3566 * response IOCB to process. This function chains all the response iocbs until
3567 * seeing the iocb with the LE bit set. The function will call
3568 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3569 * completion of a command iocb. The function will call the
3570 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3571 * The function frees the resources or calls the completion handler if this
3572 * iocb is an abort completion. The function returns NULL when the response
3573 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3574 * this function shall chain the iocb on to the iocb_continueq and return the
3575 * response iocb passed in.
3577 static struct lpfc_iocbq
*
3578 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
3579 struct lpfc_iocbq
*rspiocbp
)
3581 struct lpfc_iocbq
*saveq
;
3582 struct lpfc_iocbq
*cmdiocbp
;
3583 struct lpfc_iocbq
*next_iocb
;
3584 IOCB_t
*irsp
= NULL
;
3585 uint32_t free_saveq
;
3586 uint8_t iocb_cmd_type
;
3587 lpfc_iocb_type type
;
3588 unsigned long iflag
;
3591 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3592 /* First add the response iocb to the countinueq list */
3593 list_add_tail(&rspiocbp
->list
, &(pring
->iocb_continueq
));
3594 pring
->iocb_continueq_cnt
++;
3596 /* Now, determine whether the list is completed for processing */
3597 irsp
= &rspiocbp
->iocb
;
3600 * By default, the driver expects to free all resources
3601 * associated with this iocb completion.
3604 saveq
= list_get_first(&pring
->iocb_continueq
,
3605 struct lpfc_iocbq
, list
);
3606 irsp
= &(saveq
->iocb
);
3607 list_del_init(&pring
->iocb_continueq
);
3608 pring
->iocb_continueq_cnt
= 0;
3610 pring
->stats
.iocb_rsp
++;
3613 * If resource errors reported from HBA, reduce
3614 * queuedepths of the SCSI device.
3616 if ((irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) &&
3617 ((irsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) ==
3618 IOERR_NO_RESOURCES
)) {
3619 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3620 phba
->lpfc_rampdown_queue_depth(phba
);
3621 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3624 if (irsp
->ulpStatus
) {
3625 /* Rsp ring <ringno> error: IOCB */
3626 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3627 "0328 Rsp Ring %d error: "
3632 "x%x x%x x%x x%x\n",
3634 irsp
->un
.ulpWord
[0],
3635 irsp
->un
.ulpWord
[1],
3636 irsp
->un
.ulpWord
[2],
3637 irsp
->un
.ulpWord
[3],
3638 irsp
->un
.ulpWord
[4],
3639 irsp
->un
.ulpWord
[5],
3640 *(((uint32_t *) irsp
) + 6),
3641 *(((uint32_t *) irsp
) + 7),
3642 *(((uint32_t *) irsp
) + 8),
3643 *(((uint32_t *) irsp
) + 9),
3644 *(((uint32_t *) irsp
) + 10),
3645 *(((uint32_t *) irsp
) + 11),
3646 *(((uint32_t *) irsp
) + 12),
3647 *(((uint32_t *) irsp
) + 13),
3648 *(((uint32_t *) irsp
) + 14),
3649 *(((uint32_t *) irsp
) + 15));
3653 * Fetch the IOCB command type and call the correct completion
3654 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3655 * get freed back to the lpfc_iocb_list by the discovery
3658 iocb_cmd_type
= irsp
->ulpCommand
& CMD_IOCB_MASK
;
3659 type
= lpfc_sli_iocb_cmd_type(iocb_cmd_type
);
3662 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3663 rc
= lpfc_sli_process_sol_iocb(phba
, pring
, saveq
);
3664 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3667 case LPFC_UNSOL_IOCB
:
3668 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3669 rc
= lpfc_sli_process_unsol_iocb(phba
, pring
, saveq
);
3670 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3675 case LPFC_ABORT_IOCB
:
3677 if (irsp
->ulpCommand
!= CMD_XRI_ABORTED_CX
) {
3678 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3679 cmdiocbp
= lpfc_sli_iocbq_lookup(phba
, pring
,
3681 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3684 /* Call the specified completion routine */
3685 if (cmdiocbp
->iocb_cmpl
) {
3686 spin_unlock_irqrestore(&phba
->hbalock
,
3688 (cmdiocbp
->iocb_cmpl
)(phba
, cmdiocbp
,
3690 spin_lock_irqsave(&phba
->hbalock
,
3693 __lpfc_sli_release_iocbq(phba
,
3698 case LPFC_UNKNOWN_IOCB
:
3699 if (irsp
->ulpCommand
== CMD_ADAPTER_MSG
) {
3700 char adaptermsg
[LPFC_MAX_ADPTMSG
];
3701 memset(adaptermsg
, 0, LPFC_MAX_ADPTMSG
);
3702 memcpy(&adaptermsg
[0], (uint8_t *)irsp
,
3704 dev_warn(&((phba
->pcidev
)->dev
),
3706 phba
->brd_no
, adaptermsg
);
3708 /* Unknown IOCB command */
3709 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3710 "0335 Unknown IOCB "
3711 "command Data: x%x "
3722 list_for_each_entry_safe(rspiocbp
, next_iocb
,
3723 &saveq
->list
, list
) {
3724 list_del_init(&rspiocbp
->list
);
3725 __lpfc_sli_release_iocbq(phba
, rspiocbp
);
3727 __lpfc_sli_release_iocbq(phba
, saveq
);
3731 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3736 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3737 * @phba: Pointer to HBA context object.
3738 * @pring: Pointer to driver SLI ring object.
3739 * @mask: Host attention register mask for this ring.
3741 * This routine wraps the actual slow_ring event process routine from the
3742 * API jump table function pointer from the lpfc_hba struct.
3745 lpfc_sli_handle_slow_ring_event(struct lpfc_hba
*phba
,
3746 struct lpfc_sli_ring
*pring
, uint32_t mask
)
3748 phba
->lpfc_sli_handle_slow_ring_event(phba
, pring
, mask
);
3752 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3753 * @phba: Pointer to HBA context object.
3754 * @pring: Pointer to driver SLI ring object.
3755 * @mask: Host attention register mask for this ring.
3757 * This function is called from the worker thread when there is a ring event
3758 * for non-fcp rings. The caller does not hold any lock. The function will
3759 * remove each response iocb in the response ring and calls the handle
3760 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3763 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba
*phba
,
3764 struct lpfc_sli_ring
*pring
, uint32_t mask
)
3766 struct lpfc_pgp
*pgp
;
3768 IOCB_t
*irsp
= NULL
;
3769 struct lpfc_iocbq
*rspiocbp
= NULL
;
3770 uint32_t portRspPut
, portRspMax
;
3771 unsigned long iflag
;
3774 pgp
= &phba
->port_gp
[pring
->ringno
];
3775 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3776 pring
->stats
.iocb_event
++;
3779 * The next available response entry should never exceed the maximum
3780 * entries. If it does, treat it as an adapter hardware error.
3782 portRspMax
= pring
->sli
.sli3
.numRiocb
;
3783 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
3784 if (portRspPut
>= portRspMax
) {
3786 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3787 * rsp ring <portRspMax>
3789 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3790 "0303 Ring %d handler: portRspPut %d "
3791 "is bigger than rsp ring %d\n",
3792 pring
->ringno
, portRspPut
, portRspMax
);
3794 phba
->link_state
= LPFC_HBA_ERROR
;
3795 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3797 phba
->work_hs
= HS_FFER3
;
3798 lpfc_handle_eratt(phba
);
3804 while (pring
->sli
.sli3
.rspidx
!= portRspPut
) {
3806 * Build a completion list and call the appropriate handler.
3807 * The process is to get the next available response iocb, get
3808 * a free iocb from the list, copy the response data into the
3809 * free iocb, insert to the continuation list, and update the
3810 * next response index to slim. This process makes response
3811 * iocb's in the ring available to DMA as fast as possible but
3812 * pays a penalty for a copy operation. Since the iocb is
3813 * only 32 bytes, this penalty is considered small relative to
3814 * the PCI reads for register values and a slim write. When
3815 * the ulpLe field is set, the entire Command has been
3818 entry
= lpfc_resp_iocb(phba
, pring
);
3820 phba
->last_completion_time
= jiffies
;
3821 rspiocbp
= __lpfc_sli_get_iocbq(phba
);
3822 if (rspiocbp
== NULL
) {
3823 printk(KERN_ERR
"%s: out of buffers! Failing "
3824 "completion.\n", __func__
);
3828 lpfc_sli_pcimem_bcopy(entry
, &rspiocbp
->iocb
,
3829 phba
->iocb_rsp_size
);
3830 irsp
= &rspiocbp
->iocb
;
3832 if (++pring
->sli
.sli3
.rspidx
>= portRspMax
)
3833 pring
->sli
.sli3
.rspidx
= 0;
3835 if (pring
->ringno
== LPFC_ELS_RING
) {
3836 lpfc_debugfs_slow_ring_trc(phba
,
3837 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3838 *(((uint32_t *) irsp
) + 4),
3839 *(((uint32_t *) irsp
) + 6),
3840 *(((uint32_t *) irsp
) + 7));
3843 writel(pring
->sli
.sli3
.rspidx
,
3844 &phba
->host_gp
[pring
->ringno
].rspGetInx
);
3846 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3847 /* Handle the response IOCB */
3848 rspiocbp
= lpfc_sli_sp_handle_rspiocb(phba
, pring
, rspiocbp
);
3849 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3852 * If the port response put pointer has not been updated, sync
3853 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3854 * response put pointer.
3856 if (pring
->sli
.sli3
.rspidx
== portRspPut
) {
3857 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
3859 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3861 if ((rspiocbp
!= NULL
) && (mask
& HA_R0RE_REQ
)) {
3862 /* At least one response entry has been freed */
3863 pring
->stats
.iocb_rsp_full
++;
3864 /* SET RxRE_RSP in Chip Att register */
3865 status
= ((CA_R0ATT
| CA_R0RE_RSP
) << (pring
->ringno
* 4));
3866 writel(status
, phba
->CAregaddr
);
3867 readl(phba
->CAregaddr
); /* flush */
3869 if ((mask
& HA_R0CE_RSP
) && (pring
->flag
& LPFC_CALL_RING_AVAILABLE
)) {
3870 pring
->flag
&= ~LPFC_CALL_RING_AVAILABLE
;
3871 pring
->stats
.iocb_cmd_empty
++;
3873 /* Force update of the local copy of cmdGetInx */
3874 pring
->sli
.sli3
.local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
3875 lpfc_sli_resume_iocb(phba
, pring
);
3877 if ((pring
->lpfc_sli_cmd_available
))
3878 (pring
->lpfc_sli_cmd_available
) (phba
, pring
);
3882 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3887 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3888 * @phba: Pointer to HBA context object.
3889 * @pring: Pointer to driver SLI ring object.
3890 * @mask: Host attention register mask for this ring.
3892 * This function is called from the worker thread when there is a pending
3893 * ELS response iocb on the driver internal slow-path response iocb worker
3894 * queue. The caller does not hold any lock. The function will remove each
3895 * response iocb from the response worker queue and calls the handle
3896 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3899 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba
*phba
,
3900 struct lpfc_sli_ring
*pring
, uint32_t mask
)
3902 struct lpfc_iocbq
*irspiocbq
;
3903 struct hbq_dmabuf
*dmabuf
;
3904 struct lpfc_cq_event
*cq_event
;
3905 unsigned long iflag
;
3908 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3909 phba
->hba_flag
&= ~HBA_SP_QUEUE_EVT
;
3910 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3911 while (!list_empty(&phba
->sli4_hba
.sp_queue_event
)) {
3912 /* Get the response iocb from the head of work queue */
3913 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3914 list_remove_head(&phba
->sli4_hba
.sp_queue_event
,
3915 cq_event
, struct lpfc_cq_event
, list
);
3916 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3918 switch (bf_get(lpfc_wcqe_c_code
, &cq_event
->cqe
.wcqe_cmpl
)) {
3919 case CQE_CODE_COMPL_WQE
:
3920 irspiocbq
= container_of(cq_event
, struct lpfc_iocbq
,
3922 /* Translate ELS WCQE to response IOCBQ */
3923 irspiocbq
= lpfc_sli4_els_wcqe_to_rspiocbq(phba
,
3926 lpfc_sli_sp_handle_rspiocb(phba
, pring
,
3930 case CQE_CODE_RECEIVE
:
3931 case CQE_CODE_RECEIVE_V1
:
3932 dmabuf
= container_of(cq_event
, struct hbq_dmabuf
,
3934 lpfc_sli4_handle_received_buffer(phba
, dmabuf
);
3941 /* Limit the number of events to 64 to avoid soft lockups */
3948 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3949 * @phba: Pointer to HBA context object.
3950 * @pring: Pointer to driver SLI ring object.
3952 * This function aborts all iocbs in the given ring and frees all the iocb
3953 * objects in txq. This function issues an abort iocb for all the iocb commands
3954 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3955 * the return of this function. The caller is not required to hold any locks.
3958 lpfc_sli_abort_iocb_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
3960 LIST_HEAD(completions
);
3961 struct lpfc_iocbq
*iocb
, *next_iocb
;
3963 if (pring
->ringno
== LPFC_ELS_RING
) {
3964 lpfc_fabric_abort_hba(phba
);
3967 /* Error everything on txq and txcmplq
3970 if (phba
->sli_rev
>= LPFC_SLI_REV4
) {
3971 spin_lock_irq(&pring
->ring_lock
);
3972 list_splice_init(&pring
->txq
, &completions
);
3974 spin_unlock_irq(&pring
->ring_lock
);
3976 spin_lock_irq(&phba
->hbalock
);
3977 /* Next issue ABTS for everything on the txcmplq */
3978 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
)
3979 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
3980 spin_unlock_irq(&phba
->hbalock
);
3982 spin_lock_irq(&phba
->hbalock
);
3983 list_splice_init(&pring
->txq
, &completions
);
3986 /* Next issue ABTS for everything on the txcmplq */
3987 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
)
3988 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
3989 spin_unlock_irq(&phba
->hbalock
);
3992 /* Cancel all the IOCBs from the completions list */
3993 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
3998 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3999 * @phba: Pointer to HBA context object.
4000 * @pring: Pointer to driver SLI ring object.
4002 * This function aborts all iocbs in FCP rings and frees all the iocb
4003 * objects in txq. This function issues an abort iocb for all the iocb commands
4004 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4005 * the return of this function. The caller is not required to hold any locks.
4008 lpfc_sli_abort_fcp_rings(struct lpfc_hba
*phba
)
4010 struct lpfc_sli
*psli
= &phba
->sli
;
4011 struct lpfc_sli_ring
*pring
;
4014 /* Look on all the FCP Rings for the iotag */
4015 if (phba
->sli_rev
>= LPFC_SLI_REV4
) {
4016 for (i
= 0; i
< phba
->cfg_hdw_queue
; i
++) {
4017 pring
= phba
->sli4_hba
.hdwq
[i
].io_wq
->pring
;
4018 lpfc_sli_abort_iocb_ring(phba
, pring
);
4021 pring
= &psli
->sli3_ring
[LPFC_FCP_RING
];
4022 lpfc_sli_abort_iocb_ring(phba
, pring
);
4027 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4028 * @phba: Pointer to HBA context object.
4030 * This function flushes all iocbs in the IO ring and frees all the iocb
4031 * objects in txq and txcmplq. This function will not issue abort iocbs
4032 * for all the iocb commands in txcmplq, they will just be returned with
4033 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4034 * slot has been permanently disabled.
4037 lpfc_sli_flush_io_rings(struct lpfc_hba
*phba
)
4041 struct lpfc_sli
*psli
= &phba
->sli
;
4042 struct lpfc_sli_ring
*pring
;
4044 struct lpfc_iocbq
*piocb
, *next_iocb
;
4046 spin_lock_irq(&phba
->hbalock
);
4047 /* Indicate the I/O queues are flushed */
4048 phba
->hba_flag
|= HBA_IOQ_FLUSH
;
4049 spin_unlock_irq(&phba
->hbalock
);
4051 /* Look on all the FCP Rings for the iotag */
4052 if (phba
->sli_rev
>= LPFC_SLI_REV4
) {
4053 for (i
= 0; i
< phba
->cfg_hdw_queue
; i
++) {
4054 pring
= phba
->sli4_hba
.hdwq
[i
].io_wq
->pring
;
4056 spin_lock_irq(&pring
->ring_lock
);
4057 /* Retrieve everything on txq */
4058 list_splice_init(&pring
->txq
, &txq
);
4059 list_for_each_entry_safe(piocb
, next_iocb
,
4060 &pring
->txcmplq
, list
)
4061 piocb
->iocb_flag
&= ~LPFC_IO_ON_TXCMPLQ
;
4062 /* Retrieve everything on the txcmplq */
4063 list_splice_init(&pring
->txcmplq
, &txcmplq
);
4065 pring
->txcmplq_cnt
= 0;
4066 spin_unlock_irq(&pring
->ring_lock
);
4069 lpfc_sli_cancel_iocbs(phba
, &txq
,
4070 IOSTAT_LOCAL_REJECT
,
4072 /* Flush the txcmpq */
4073 lpfc_sli_cancel_iocbs(phba
, &txcmplq
,
4074 IOSTAT_LOCAL_REJECT
,
4078 pring
= &psli
->sli3_ring
[LPFC_FCP_RING
];
4080 spin_lock_irq(&phba
->hbalock
);
4081 /* Retrieve everything on txq */
4082 list_splice_init(&pring
->txq
, &txq
);
4083 list_for_each_entry_safe(piocb
, next_iocb
,
4084 &pring
->txcmplq
, list
)
4085 piocb
->iocb_flag
&= ~LPFC_IO_ON_TXCMPLQ
;
4086 /* Retrieve everything on the txcmplq */
4087 list_splice_init(&pring
->txcmplq
, &txcmplq
);
4089 pring
->txcmplq_cnt
= 0;
4090 spin_unlock_irq(&phba
->hbalock
);
4093 lpfc_sli_cancel_iocbs(phba
, &txq
, IOSTAT_LOCAL_REJECT
,
4095 /* Flush the txcmpq */
4096 lpfc_sli_cancel_iocbs(phba
, &txcmplq
, IOSTAT_LOCAL_REJECT
,
4102 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4103 * @phba: Pointer to HBA context object.
4104 * @mask: Bit mask to be checked.
4106 * This function reads the host status register and compares
4107 * with the provided bit mask to check if HBA completed
4108 * the restart. This function will wait in a loop for the
4109 * HBA to complete restart. If the HBA does not restart within
4110 * 15 iterations, the function will reset the HBA again. The
4111 * function returns 1 when HBA fail to restart otherwise returns
4115 lpfc_sli_brdready_s3(struct lpfc_hba
*phba
, uint32_t mask
)
4121 /* Read the HBA Host Status Register */
4122 if (lpfc_readl(phba
->HSregaddr
, &status
))
4126 * Check status register every 100ms for 5 retries, then every
4127 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4128 * every 2.5 sec for 4.
4129 * Break our of the loop if errors occurred during init.
4131 while (((status
& mask
) != mask
) &&
4132 !(status
& HS_FFERM
) &&
4144 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
4145 lpfc_sli_brdrestart(phba
);
4147 /* Read the HBA Host Status Register */
4148 if (lpfc_readl(phba
->HSregaddr
, &status
)) {
4154 /* Check to see if any errors occurred during init */
4155 if ((status
& HS_FFERM
) || (i
>= 20)) {
4156 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4157 "2751 Adapter failed to restart, "
4158 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4160 readl(phba
->MBslimaddr
+ 0xa8),
4161 readl(phba
->MBslimaddr
+ 0xac));
4162 phba
->link_state
= LPFC_HBA_ERROR
;
4170 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4171 * @phba: Pointer to HBA context object.
4172 * @mask: Bit mask to be checked.
4174 * This function checks the host status register to check if HBA is
4175 * ready. This function will wait in a loop for the HBA to be ready
4176 * If the HBA is not ready , the function will will reset the HBA PCI
4177 * function again. The function returns 1 when HBA fail to be ready
4178 * otherwise returns zero.
4181 lpfc_sli_brdready_s4(struct lpfc_hba
*phba
, uint32_t mask
)
4186 /* Read the HBA Host Status Register */
4187 status
= lpfc_sli4_post_status_check(phba
);
4190 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
4191 lpfc_sli_brdrestart(phba
);
4192 status
= lpfc_sli4_post_status_check(phba
);
4195 /* Check to see if any errors occurred during init */
4197 phba
->link_state
= LPFC_HBA_ERROR
;
4200 phba
->sli4_hba
.intr_enable
= 0;
4206 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4207 * @phba: Pointer to HBA context object.
4208 * @mask: Bit mask to be checked.
4210 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4211 * from the API jump table function pointer from the lpfc_hba struct.
4214 lpfc_sli_brdready(struct lpfc_hba
*phba
, uint32_t mask
)
4216 return phba
->lpfc_sli_brdready(phba
, mask
);
4219 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4222 * lpfc_reset_barrier - Make HBA ready for HBA reset
4223 * @phba: Pointer to HBA context object.
4225 * This function is called before resetting an HBA. This function is called
4226 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4228 void lpfc_reset_barrier(struct lpfc_hba
*phba
)
4230 uint32_t __iomem
*resp_buf
;
4231 uint32_t __iomem
*mbox_buf
;
4232 volatile uint32_t mbox
;
4233 uint32_t hc_copy
, ha_copy
, resp_data
;
4237 lockdep_assert_held(&phba
->hbalock
);
4239 pci_read_config_byte(phba
->pcidev
, PCI_HEADER_TYPE
, &hdrtype
);
4240 if (hdrtype
!= 0x80 ||
4241 (FC_JEDEC_ID(phba
->vpd
.rev
.biuRev
) != HELIOS_JEDEC_ID
&&
4242 FC_JEDEC_ID(phba
->vpd
.rev
.biuRev
) != THOR_JEDEC_ID
))
4246 * Tell the other part of the chip to suspend temporarily all
4249 resp_buf
= phba
->MBslimaddr
;
4251 /* Disable the error attention */
4252 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
))
4254 writel((hc_copy
& ~HC_ERINT_ENA
), phba
->HCregaddr
);
4255 readl(phba
->HCregaddr
); /* flush */
4256 phba
->link_flag
|= LS_IGNORE_ERATT
;
4258 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
4260 if (ha_copy
& HA_ERATT
) {
4261 /* Clear Chip error bit */
4262 writel(HA_ERATT
, phba
->HAregaddr
);
4263 phba
->pport
->stopped
= 1;
4267 ((MAILBOX_t
*)&mbox
)->mbxCommand
= MBX_KILL_BOARD
;
4268 ((MAILBOX_t
*)&mbox
)->mbxOwner
= OWN_CHIP
;
4270 writel(BARRIER_TEST_PATTERN
, (resp_buf
+ 1));
4271 mbox_buf
= phba
->MBslimaddr
;
4272 writel(mbox
, mbox_buf
);
4274 for (i
= 0; i
< 50; i
++) {
4275 if (lpfc_readl((resp_buf
+ 1), &resp_data
))
4277 if (resp_data
!= ~(BARRIER_TEST_PATTERN
))
4283 if (lpfc_readl((resp_buf
+ 1), &resp_data
))
4285 if (resp_data
!= ~(BARRIER_TEST_PATTERN
)) {
4286 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
||
4287 phba
->pport
->stopped
)
4293 ((MAILBOX_t
*)&mbox
)->mbxOwner
= OWN_HOST
;
4295 for (i
= 0; i
< 500; i
++) {
4296 if (lpfc_readl(resp_buf
, &resp_data
))
4298 if (resp_data
!= mbox
)
4307 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
4309 if (!(ha_copy
& HA_ERATT
))
4315 if (readl(phba
->HAregaddr
) & HA_ERATT
) {
4316 writel(HA_ERATT
, phba
->HAregaddr
);
4317 phba
->pport
->stopped
= 1;
4321 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
4322 writel(hc_copy
, phba
->HCregaddr
);
4323 readl(phba
->HCregaddr
); /* flush */
4327 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4328 * @phba: Pointer to HBA context object.
4330 * This function issues a kill_board mailbox command and waits for
4331 * the error attention interrupt. This function is called for stopping
4332 * the firmware processing. The caller is not required to hold any
4333 * locks. This function calls lpfc_hba_down_post function to free
4334 * any pending commands after the kill. The function will return 1 when it
4335 * fails to kill the board else will return 0.
4338 lpfc_sli_brdkill(struct lpfc_hba
*phba
)
4340 struct lpfc_sli
*psli
;
4350 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4351 "0329 Kill HBA Data: x%x x%x\n",
4352 phba
->pport
->port_state
, psli
->sli_flag
);
4354 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4358 /* Disable the error attention */
4359 spin_lock_irq(&phba
->hbalock
);
4360 if (lpfc_readl(phba
->HCregaddr
, &status
)) {
4361 spin_unlock_irq(&phba
->hbalock
);
4362 mempool_free(pmb
, phba
->mbox_mem_pool
);
4365 status
&= ~HC_ERINT_ENA
;
4366 writel(status
, phba
->HCregaddr
);
4367 readl(phba
->HCregaddr
); /* flush */
4368 phba
->link_flag
|= LS_IGNORE_ERATT
;
4369 spin_unlock_irq(&phba
->hbalock
);
4371 lpfc_kill_board(phba
, pmb
);
4372 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4373 retval
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
4375 if (retval
!= MBX_SUCCESS
) {
4376 if (retval
!= MBX_BUSY
)
4377 mempool_free(pmb
, phba
->mbox_mem_pool
);
4378 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4379 "2752 KILL_BOARD command failed retval %d\n",
4381 spin_lock_irq(&phba
->hbalock
);
4382 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
4383 spin_unlock_irq(&phba
->hbalock
);
4387 spin_lock_irq(&phba
->hbalock
);
4388 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
4389 spin_unlock_irq(&phba
->hbalock
);
4391 mempool_free(pmb
, phba
->mbox_mem_pool
);
4393 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4394 * attention every 100ms for 3 seconds. If we don't get ERATT after
4395 * 3 seconds we still set HBA_ERROR state because the status of the
4396 * board is now undefined.
4398 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
4400 while ((i
++ < 30) && !(ha_copy
& HA_ERATT
)) {
4402 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
4406 del_timer_sync(&psli
->mbox_tmo
);
4407 if (ha_copy
& HA_ERATT
) {
4408 writel(HA_ERATT
, phba
->HAregaddr
);
4409 phba
->pport
->stopped
= 1;
4411 spin_lock_irq(&phba
->hbalock
);
4412 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
4413 psli
->mbox_active
= NULL
;
4414 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
4415 spin_unlock_irq(&phba
->hbalock
);
4417 lpfc_hba_down_post(phba
);
4418 phba
->link_state
= LPFC_HBA_ERROR
;
4420 return ha_copy
& HA_ERATT
? 0 : 1;
4424 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4425 * @phba: Pointer to HBA context object.
4427 * This function resets the HBA by writing HC_INITFF to the control
4428 * register. After the HBA resets, this function resets all the iocb ring
4429 * indices. This function disables PCI layer parity checking during
4431 * This function returns 0 always.
4432 * The caller is not required to hold any locks.
4435 lpfc_sli_brdreset(struct lpfc_hba
*phba
)
4437 struct lpfc_sli
*psli
;
4438 struct lpfc_sli_ring
*pring
;
4445 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4446 "0325 Reset HBA Data: x%x x%x\n",
4447 (phba
->pport
) ? phba
->pport
->port_state
: 0,
4450 /* perform board reset */
4451 phba
->fc_eventTag
= 0;
4452 phba
->link_events
= 0;
4454 phba
->pport
->fc_myDID
= 0;
4455 phba
->pport
->fc_prevDID
= 0;
4458 /* Turn off parity checking and serr during the physical reset */
4459 if (pci_read_config_word(phba
->pcidev
, PCI_COMMAND
, &cfg_value
))
4462 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
,
4464 ~(PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
)));
4466 psli
->sli_flag
&= ~(LPFC_SLI_ACTIVE
| LPFC_PROCESS_LA
);
4468 /* Now toggle INITFF bit in the Host Control Register */
4469 writel(HC_INITFF
, phba
->HCregaddr
);
4471 readl(phba
->HCregaddr
); /* flush */
4472 writel(0, phba
->HCregaddr
);
4473 readl(phba
->HCregaddr
); /* flush */
4475 /* Restore PCI cmd register */
4476 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, cfg_value
);
4478 /* Initialize relevant SLI info */
4479 for (i
= 0; i
< psli
->num_rings
; i
++) {
4480 pring
= &psli
->sli3_ring
[i
];
4482 pring
->sli
.sli3
.rspidx
= 0;
4483 pring
->sli
.sli3
.next_cmdidx
= 0;
4484 pring
->sli
.sli3
.local_getidx
= 0;
4485 pring
->sli
.sli3
.cmdidx
= 0;
4486 pring
->missbufcnt
= 0;
4489 phba
->link_state
= LPFC_WARM_START
;
4494 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4495 * @phba: Pointer to HBA context object.
4497 * This function resets a SLI4 HBA. This function disables PCI layer parity
4498 * checking during resets the device. The caller is not required to hold
4501 * This function returns 0 on success else returns negative error code.
4504 lpfc_sli4_brdreset(struct lpfc_hba
*phba
)
4506 struct lpfc_sli
*psli
= &phba
->sli
;
4511 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4512 "0295 Reset HBA Data: x%x x%x x%x\n",
4513 phba
->pport
->port_state
, psli
->sli_flag
,
4516 /* perform board reset */
4517 phba
->fc_eventTag
= 0;
4518 phba
->link_events
= 0;
4519 phba
->pport
->fc_myDID
= 0;
4520 phba
->pport
->fc_prevDID
= 0;
4522 spin_lock_irq(&phba
->hbalock
);
4523 psli
->sli_flag
&= ~(LPFC_PROCESS_LA
);
4524 phba
->fcf
.fcf_flag
= 0;
4525 spin_unlock_irq(&phba
->hbalock
);
4527 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4528 if (phba
->hba_flag
& HBA_FW_DUMP_OP
) {
4529 phba
->hba_flag
&= ~HBA_FW_DUMP_OP
;
4533 /* Now physically reset the device */
4534 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4535 "0389 Performing PCI function reset!\n");
4537 /* Turn off parity checking and serr during the physical reset */
4538 if (pci_read_config_word(phba
->pcidev
, PCI_COMMAND
, &cfg_value
)) {
4539 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4540 "3205 PCI read Config failed\n");
4544 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, (cfg_value
&
4545 ~(PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
)));
4547 /* Perform FCoE PCI function reset before freeing queue memory */
4548 rc
= lpfc_pci_function_reset(phba
);
4550 /* Restore PCI cmd register */
4551 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, cfg_value
);
4557 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4558 * @phba: Pointer to HBA context object.
4560 * This function is called in the SLI initialization code path to
4561 * restart the HBA. The caller is not required to hold any lock.
4562 * This function writes MBX_RESTART mailbox command to the SLIM and
4563 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4564 * function to free any pending commands. The function enables
4565 * POST only during the first initialization. The function returns zero.
4566 * The function does not guarantee completion of MBX_RESTART mailbox
4567 * command before the return of this function.
4570 lpfc_sli_brdrestart_s3(struct lpfc_hba
*phba
)
4573 struct lpfc_sli
*psli
;
4574 volatile uint32_t word0
;
4575 void __iomem
*to_slim
;
4576 uint32_t hba_aer_enabled
;
4578 spin_lock_irq(&phba
->hbalock
);
4580 /* Take PCIe device Advanced Error Reporting (AER) state */
4581 hba_aer_enabled
= phba
->hba_flag
& HBA_AER_ENABLED
;
4586 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4587 "0337 Restart HBA Data: x%x x%x\n",
4588 (phba
->pport
) ? phba
->pport
->port_state
: 0,
4592 mb
= (MAILBOX_t
*) &word0
;
4593 mb
->mbxCommand
= MBX_RESTART
;
4596 lpfc_reset_barrier(phba
);
4598 to_slim
= phba
->MBslimaddr
;
4599 writel(*(uint32_t *) mb
, to_slim
);
4600 readl(to_slim
); /* flush */
4602 /* Only skip post after fc_ffinit is completed */
4603 if (phba
->pport
&& phba
->pport
->port_state
)
4604 word0
= 1; /* This is really setting up word1 */
4606 word0
= 0; /* This is really setting up word1 */
4607 to_slim
= phba
->MBslimaddr
+ sizeof (uint32_t);
4608 writel(*(uint32_t *) mb
, to_slim
);
4609 readl(to_slim
); /* flush */
4611 lpfc_sli_brdreset(phba
);
4613 phba
->pport
->stopped
= 0;
4614 phba
->link_state
= LPFC_INIT_START
;
4616 spin_unlock_irq(&phba
->hbalock
);
4618 memset(&psli
->lnk_stat_offsets
, 0, sizeof(psli
->lnk_stat_offsets
));
4619 psli
->stats_start
= ktime_get_seconds();
4621 /* Give the INITFF and Post time to settle. */
4624 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4625 if (hba_aer_enabled
)
4626 pci_disable_pcie_error_reporting(phba
->pcidev
);
4628 lpfc_hba_down_post(phba
);
4634 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4635 * @phba: Pointer to HBA context object.
4637 * This function is called in the SLI initialization code path to restart
4638 * a SLI4 HBA. The caller is not required to hold any lock.
4639 * At the end of the function, it calls lpfc_hba_down_post function to
4640 * free any pending commands.
4643 lpfc_sli_brdrestart_s4(struct lpfc_hba
*phba
)
4645 struct lpfc_sli
*psli
= &phba
->sli
;
4646 uint32_t hba_aer_enabled
;
4650 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4651 "0296 Restart HBA Data: x%x x%x\n",
4652 phba
->pport
->port_state
, psli
->sli_flag
);
4654 /* Take PCIe device Advanced Error Reporting (AER) state */
4655 hba_aer_enabled
= phba
->hba_flag
& HBA_AER_ENABLED
;
4657 rc
= lpfc_sli4_brdreset(phba
);
4659 phba
->link_state
= LPFC_HBA_ERROR
;
4660 goto hba_down_queue
;
4663 spin_lock_irq(&phba
->hbalock
);
4664 phba
->pport
->stopped
= 0;
4665 phba
->link_state
= LPFC_INIT_START
;
4667 spin_unlock_irq(&phba
->hbalock
);
4669 memset(&psli
->lnk_stat_offsets
, 0, sizeof(psli
->lnk_stat_offsets
));
4670 psli
->stats_start
= ktime_get_seconds();
4672 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4673 if (hba_aer_enabled
)
4674 pci_disable_pcie_error_reporting(phba
->pcidev
);
4677 lpfc_hba_down_post(phba
);
4678 lpfc_sli4_queue_destroy(phba
);
4684 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4685 * @phba: Pointer to HBA context object.
4687 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4688 * API jump table function pointer from the lpfc_hba struct.
4691 lpfc_sli_brdrestart(struct lpfc_hba
*phba
)
4693 return phba
->lpfc_sli_brdrestart(phba
);
4697 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4698 * @phba: Pointer to HBA context object.
4700 * This function is called after a HBA restart to wait for successful
4701 * restart of the HBA. Successful restart of the HBA is indicated by
4702 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4703 * iteration, the function will restart the HBA again. The function returns
4704 * zero if HBA successfully restarted else returns negative error code.
4707 lpfc_sli_chipset_init(struct lpfc_hba
*phba
)
4709 uint32_t status
, i
= 0;
4711 /* Read the HBA Host Status Register */
4712 if (lpfc_readl(phba
->HSregaddr
, &status
))
4715 /* Check status register to see what current state is */
4717 while ((status
& (HS_FFRDY
| HS_MBRDY
)) != (HS_FFRDY
| HS_MBRDY
)) {
4719 /* Check every 10ms for 10 retries, then every 100ms for 90
4720 * retries, then every 1 sec for 50 retires for a total of
4721 * ~60 seconds before reset the board again and check every
4722 * 1 sec for 50 retries. The up to 60 seconds before the
4723 * board ready is required by the Falcon FIPS zeroization
4724 * complete, and any reset the board in between shall cause
4725 * restart of zeroization, further delay the board ready.
4728 /* Adapter failed to init, timeout, status reg
4730 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4731 "0436 Adapter failed to init, "
4732 "timeout, status reg x%x, "
4733 "FW Data: A8 x%x AC x%x\n", status
,
4734 readl(phba
->MBslimaddr
+ 0xa8),
4735 readl(phba
->MBslimaddr
+ 0xac));
4736 phba
->link_state
= LPFC_HBA_ERROR
;
4740 /* Check to see if any errors occurred during init */
4741 if (status
& HS_FFERM
) {
4742 /* ERROR: During chipset initialization */
4743 /* Adapter failed to init, chipset, status reg
4745 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4746 "0437 Adapter failed to init, "
4747 "chipset, status reg x%x, "
4748 "FW Data: A8 x%x AC x%x\n", status
,
4749 readl(phba
->MBslimaddr
+ 0xa8),
4750 readl(phba
->MBslimaddr
+ 0xac));
4751 phba
->link_state
= LPFC_HBA_ERROR
;
4764 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
4765 lpfc_sli_brdrestart(phba
);
4767 /* Read the HBA Host Status Register */
4768 if (lpfc_readl(phba
->HSregaddr
, &status
))
4772 /* Check to see if any errors occurred during init */
4773 if (status
& HS_FFERM
) {
4774 /* ERROR: During chipset initialization */
4775 /* Adapter failed to init, chipset, status reg <status> */
4776 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4777 "0438 Adapter failed to init, chipset, "
4779 "FW Data: A8 x%x AC x%x\n", status
,
4780 readl(phba
->MBslimaddr
+ 0xa8),
4781 readl(phba
->MBslimaddr
+ 0xac));
4782 phba
->link_state
= LPFC_HBA_ERROR
;
4786 /* Clear all interrupt enable conditions */
4787 writel(0, phba
->HCregaddr
);
4788 readl(phba
->HCregaddr
); /* flush */
4790 /* setup host attn register */
4791 writel(0xffffffff, phba
->HAregaddr
);
4792 readl(phba
->HAregaddr
); /* flush */
4797 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4799 * This function calculates and returns the number of HBQs required to be
4803 lpfc_sli_hbq_count(void)
4805 return ARRAY_SIZE(lpfc_hbq_defs
);
4809 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4811 * This function adds the number of hbq entries in every HBQ to get
4812 * the total number of hbq entries required for the HBA and returns
4816 lpfc_sli_hbq_entry_count(void)
4818 int hbq_count
= lpfc_sli_hbq_count();
4822 for (i
= 0; i
< hbq_count
; ++i
)
4823 count
+= lpfc_hbq_defs
[i
]->entry_count
;
4828 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4830 * This function calculates amount of memory required for all hbq entries
4831 * to be configured and returns the total memory required.
4834 lpfc_sli_hbq_size(void)
4836 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry
);
4840 * lpfc_sli_hbq_setup - configure and initialize HBQs
4841 * @phba: Pointer to HBA context object.
4843 * This function is called during the SLI initialization to configure
4844 * all the HBQs and post buffers to the HBQ. The caller is not
4845 * required to hold any locks. This function will return zero if successful
4846 * else it will return negative error code.
4849 lpfc_sli_hbq_setup(struct lpfc_hba
*phba
)
4851 int hbq_count
= lpfc_sli_hbq_count();
4855 uint32_t hbq_entry_index
;
4857 /* Get a Mailbox buffer to setup mailbox
4858 * commands for HBA initialization
4860 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4867 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4868 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
4869 phba
->hbq_in_use
= 1;
4871 hbq_entry_index
= 0;
4872 for (hbqno
= 0; hbqno
< hbq_count
; ++hbqno
) {
4873 phba
->hbqs
[hbqno
].next_hbqPutIdx
= 0;
4874 phba
->hbqs
[hbqno
].hbqPutIdx
= 0;
4875 phba
->hbqs
[hbqno
].local_hbqGetIdx
= 0;
4876 phba
->hbqs
[hbqno
].entry_count
=
4877 lpfc_hbq_defs
[hbqno
]->entry_count
;
4878 lpfc_config_hbq(phba
, hbqno
, lpfc_hbq_defs
[hbqno
],
4879 hbq_entry_index
, pmb
);
4880 hbq_entry_index
+= phba
->hbqs
[hbqno
].entry_count
;
4882 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
4883 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4884 mbxStatus <status>, ring <num> */
4886 lpfc_printf_log(phba
, KERN_ERR
,
4887 LOG_SLI
| LOG_VPORT
,
4888 "1805 Adapter failed to init. "
4889 "Data: x%x x%x x%x\n",
4891 pmbox
->mbxStatus
, hbqno
);
4893 phba
->link_state
= LPFC_HBA_ERROR
;
4894 mempool_free(pmb
, phba
->mbox_mem_pool
);
4898 phba
->hbq_count
= hbq_count
;
4900 mempool_free(pmb
, phba
->mbox_mem_pool
);
4902 /* Initially populate or replenish the HBQs */
4903 for (hbqno
= 0; hbqno
< hbq_count
; ++hbqno
)
4904 lpfc_sli_hbqbuf_init_hbqs(phba
, hbqno
);
4909 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4910 * @phba: Pointer to HBA context object.
4912 * This function is called during the SLI initialization to configure
4913 * all the HBQs and post buffers to the HBQ. The caller is not
4914 * required to hold any locks. This function will return zero if successful
4915 * else it will return negative error code.
4918 lpfc_sli4_rb_setup(struct lpfc_hba
*phba
)
4920 phba
->hbq_in_use
= 1;
4922 * Specific case when the MDS diagnostics is enabled and supported.
4923 * The receive buffer count is truncated to manage the incoming
4926 if (phba
->cfg_enable_mds_diags
&& phba
->mds_diags_support
)
4927 phba
->hbqs
[LPFC_ELS_HBQ
].entry_count
=
4928 lpfc_hbq_defs
[LPFC_ELS_HBQ
]->entry_count
>> 1;
4930 phba
->hbqs
[LPFC_ELS_HBQ
].entry_count
=
4931 lpfc_hbq_defs
[LPFC_ELS_HBQ
]->entry_count
;
4932 phba
->hbq_count
= 1;
4933 lpfc_sli_hbqbuf_init_hbqs(phba
, LPFC_ELS_HBQ
);
4934 /* Initially populate or replenish the HBQs */
4939 * lpfc_sli_config_port - Issue config port mailbox command
4940 * @phba: Pointer to HBA context object.
4941 * @sli_mode: sli mode - 2/3
4943 * This function is called by the sli initialization code path
4944 * to issue config_port mailbox command. This function restarts the
4945 * HBA firmware and issues a config_port mailbox command to configure
4946 * the SLI interface in the sli mode specified by sli_mode
4947 * variable. The caller is not required to hold any locks.
4948 * The function returns 0 if successful, else returns negative error
4952 lpfc_sli_config_port(struct lpfc_hba
*phba
, int sli_mode
)
4955 uint32_t resetcount
= 0, rc
= 0, done
= 0;
4957 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4959 phba
->link_state
= LPFC_HBA_ERROR
;
4963 phba
->sli_rev
= sli_mode
;
4964 while (resetcount
< 2 && !done
) {
4965 spin_lock_irq(&phba
->hbalock
);
4966 phba
->sli
.sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
4967 spin_unlock_irq(&phba
->hbalock
);
4968 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
4969 lpfc_sli_brdrestart(phba
);
4970 rc
= lpfc_sli_chipset_init(phba
);
4974 spin_lock_irq(&phba
->hbalock
);
4975 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
4976 spin_unlock_irq(&phba
->hbalock
);
4979 /* Call pre CONFIG_PORT mailbox command initialization. A
4980 * value of 0 means the call was successful. Any other
4981 * nonzero value is a failure, but if ERESTART is returned,
4982 * the driver may reset the HBA and try again.
4984 rc
= lpfc_config_port_prep(phba
);
4985 if (rc
== -ERESTART
) {
4986 phba
->link_state
= LPFC_LINK_UNKNOWN
;
4991 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
4992 lpfc_config_port(phba
, pmb
);
4993 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
4994 phba
->sli3_options
&= ~(LPFC_SLI3_NPIV_ENABLED
|
4995 LPFC_SLI3_HBQ_ENABLED
|
4996 LPFC_SLI3_CRP_ENABLED
|
4997 LPFC_SLI3_DSS_ENABLED
);
4998 if (rc
!= MBX_SUCCESS
) {
4999 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5000 "0442 Adapter failed to init, mbxCmd x%x "
5001 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5002 pmb
->u
.mb
.mbxCommand
, pmb
->u
.mb
.mbxStatus
, 0);
5003 spin_lock_irq(&phba
->hbalock
);
5004 phba
->sli
.sli_flag
&= ~LPFC_SLI_ACTIVE
;
5005 spin_unlock_irq(&phba
->hbalock
);
5008 /* Allow asynchronous mailbox command to go through */
5009 spin_lock_irq(&phba
->hbalock
);
5010 phba
->sli
.sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
5011 spin_unlock_irq(&phba
->hbalock
);
5014 if ((pmb
->u
.mb
.un
.varCfgPort
.casabt
== 1) &&
5015 (pmb
->u
.mb
.un
.varCfgPort
.gasabt
== 0))
5016 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
5017 "3110 Port did not grant ASABT\n");
5022 goto do_prep_failed
;
5024 if (pmb
->u
.mb
.un
.varCfgPort
.sli_mode
== 3) {
5025 if (!pmb
->u
.mb
.un
.varCfgPort
.cMA
) {
5027 goto do_prep_failed
;
5029 if (phba
->max_vpi
&& pmb
->u
.mb
.un
.varCfgPort
.gmv
) {
5030 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
5031 phba
->max_vpi
= pmb
->u
.mb
.un
.varCfgPort
.max_vpi
;
5032 phba
->max_vports
= (phba
->max_vpi
> phba
->max_vports
) ?
5033 phba
->max_vpi
: phba
->max_vports
;
5037 phba
->fips_level
= 0;
5038 phba
->fips_spec_rev
= 0;
5039 if (pmb
->u
.mb
.un
.varCfgPort
.gdss
) {
5040 phba
->sli3_options
|= LPFC_SLI3_DSS_ENABLED
;
5041 phba
->fips_level
= pmb
->u
.mb
.un
.varCfgPort
.fips_level
;
5042 phba
->fips_spec_rev
= pmb
->u
.mb
.un
.varCfgPort
.fips_rev
;
5043 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
5044 "2850 Security Crypto Active. FIPS x%d "
5046 phba
->fips_level
, phba
->fips_spec_rev
);
5048 if (pmb
->u
.mb
.un
.varCfgPort
.sec_err
) {
5049 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5050 "2856 Config Port Security Crypto "
5052 pmb
->u
.mb
.un
.varCfgPort
.sec_err
);
5054 if (pmb
->u
.mb
.un
.varCfgPort
.gerbm
)
5055 phba
->sli3_options
|= LPFC_SLI3_HBQ_ENABLED
;
5056 if (pmb
->u
.mb
.un
.varCfgPort
.gcrp
)
5057 phba
->sli3_options
|= LPFC_SLI3_CRP_ENABLED
;
5059 phba
->hbq_get
= phba
->mbox
->us
.s3_pgp
.hbq_get
;
5060 phba
->port_gp
= phba
->mbox
->us
.s3_pgp
.port
;
5062 if (phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
) {
5063 if (pmb
->u
.mb
.un
.varCfgPort
.gbg
== 0) {
5064 phba
->cfg_enable_bg
= 0;
5065 phba
->sli3_options
&= ~LPFC_SLI3_BG_ENABLED
;
5066 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5067 "0443 Adapter did not grant "
5072 phba
->hbq_get
= NULL
;
5073 phba
->port_gp
= phba
->mbox
->us
.s2
.port
;
5077 mempool_free(pmb
, phba
->mbox_mem_pool
);
5083 * lpfc_sli_hba_setup - SLI initialization function
5084 * @phba: Pointer to HBA context object.
5086 * This function is the main SLI initialization function. This function
5087 * is called by the HBA initialization code, HBA reset code and HBA
5088 * error attention handler code. Caller is not required to hold any
5089 * locks. This function issues config_port mailbox command to configure
5090 * the SLI, setup iocb rings and HBQ rings. In the end the function
5091 * calls the config_port_post function to issue init_link mailbox
5092 * command and to start the discovery. The function will return zero
5093 * if successful, else it will return negative error code.
5096 lpfc_sli_hba_setup(struct lpfc_hba
*phba
)
5102 switch (phba
->cfg_sli_mode
) {
5104 if (phba
->cfg_enable_npiv
) {
5105 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_VPORT
,
5106 "1824 NPIV enabled: Override sli_mode "
5107 "parameter (%d) to auto (0).\n",
5108 phba
->cfg_sli_mode
);
5117 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_VPORT
,
5118 "1819 Unrecognized sli_mode parameter: %d.\n",
5119 phba
->cfg_sli_mode
);
5123 phba
->fcp_embed_io
= 0; /* SLI4 FC support only */
5125 rc
= lpfc_sli_config_port(phba
, mode
);
5127 if (rc
&& phba
->cfg_sli_mode
== 3)
5128 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_VPORT
,
5129 "1820 Unable to select SLI-3. "
5130 "Not supported by adapter.\n");
5131 if (rc
&& mode
!= 2)
5132 rc
= lpfc_sli_config_port(phba
, 2);
5133 else if (rc
&& mode
== 2)
5134 rc
= lpfc_sli_config_port(phba
, 3);
5136 goto lpfc_sli_hba_setup_error
;
5138 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5139 if (phba
->cfg_aer_support
== 1 && !(phba
->hba_flag
& HBA_AER_ENABLED
)) {
5140 rc
= pci_enable_pcie_error_reporting(phba
->pcidev
);
5142 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
5143 "2709 This device supports "
5144 "Advanced Error Reporting (AER)\n");
5145 spin_lock_irq(&phba
->hbalock
);
5146 phba
->hba_flag
|= HBA_AER_ENABLED
;
5147 spin_unlock_irq(&phba
->hbalock
);
5149 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
5150 "2708 This device does not support "
5151 "Advanced Error Reporting (AER): %d\n",
5153 phba
->cfg_aer_support
= 0;
5157 if (phba
->sli_rev
== 3) {
5158 phba
->iocb_cmd_size
= SLI3_IOCB_CMD_SIZE
;
5159 phba
->iocb_rsp_size
= SLI3_IOCB_RSP_SIZE
;
5161 phba
->iocb_cmd_size
= SLI2_IOCB_CMD_SIZE
;
5162 phba
->iocb_rsp_size
= SLI2_IOCB_RSP_SIZE
;
5163 phba
->sli3_options
= 0;
5166 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
5167 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5168 phba
->sli_rev
, phba
->max_vpi
);
5169 rc
= lpfc_sli_ring_map(phba
);
5172 goto lpfc_sli_hba_setup_error
;
5174 /* Initialize VPIs. */
5175 if (phba
->sli_rev
== LPFC_SLI_REV3
) {
5177 * The VPI bitmask and physical ID array are allocated
5178 * and initialized once only - at driver load. A port
5179 * reset doesn't need to reinitialize this memory.
5181 if ((phba
->vpi_bmask
== NULL
) && (phba
->vpi_ids
== NULL
)) {
5182 longs
= (phba
->max_vpi
+ BITS_PER_LONG
) / BITS_PER_LONG
;
5183 phba
->vpi_bmask
= kcalloc(longs
,
5184 sizeof(unsigned long),
5186 if (!phba
->vpi_bmask
) {
5188 goto lpfc_sli_hba_setup_error
;
5191 phba
->vpi_ids
= kcalloc(phba
->max_vpi
+ 1,
5194 if (!phba
->vpi_ids
) {
5195 kfree(phba
->vpi_bmask
);
5197 goto lpfc_sli_hba_setup_error
;
5199 for (i
= 0; i
< phba
->max_vpi
; i
++)
5200 phba
->vpi_ids
[i
] = i
;
5205 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
5206 rc
= lpfc_sli_hbq_setup(phba
);
5208 goto lpfc_sli_hba_setup_error
;
5210 spin_lock_irq(&phba
->hbalock
);
5211 phba
->sli
.sli_flag
|= LPFC_PROCESS_LA
;
5212 spin_unlock_irq(&phba
->hbalock
);
5214 rc
= lpfc_config_port_post(phba
);
5216 goto lpfc_sli_hba_setup_error
;
5220 lpfc_sli_hba_setup_error
:
5221 phba
->link_state
= LPFC_HBA_ERROR
;
5222 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5223 "0445 Firmware initialization failed\n");
5228 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5229 * @phba: Pointer to HBA context object.
5230 * @mboxq: mailbox pointer.
5231 * This function issue a dump mailbox command to read config region
5232 * 23 and parse the records in the region and populate driver
5236 lpfc_sli4_read_fcoe_params(struct lpfc_hba
*phba
)
5238 LPFC_MBOXQ_t
*mboxq
;
5239 struct lpfc_dmabuf
*mp
;
5240 struct lpfc_mqe
*mqe
;
5241 uint32_t data_length
;
5244 /* Program the default value of vlan_id and fc_map */
5245 phba
->valid_vlan
= 0;
5246 phba
->fc_map
[0] = LPFC_FCOE_FCF_MAP0
;
5247 phba
->fc_map
[1] = LPFC_FCOE_FCF_MAP1
;
5248 phba
->fc_map
[2] = LPFC_FCOE_FCF_MAP2
;
5250 mboxq
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5254 mqe
= &mboxq
->u
.mqe
;
5255 if (lpfc_sli4_dump_cfg_rg23(phba
, mboxq
)) {
5257 goto out_free_mboxq
;
5260 mp
= (struct lpfc_dmabuf
*)mboxq
->ctx_buf
;
5261 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
5263 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
5264 "(%d):2571 Mailbox cmd x%x Status x%x "
5265 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5266 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5267 "CQ: x%x x%x x%x x%x\n",
5268 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
5269 bf_get(lpfc_mqe_command
, mqe
),
5270 bf_get(lpfc_mqe_status
, mqe
),
5271 mqe
->un
.mb_words
[0], mqe
->un
.mb_words
[1],
5272 mqe
->un
.mb_words
[2], mqe
->un
.mb_words
[3],
5273 mqe
->un
.mb_words
[4], mqe
->un
.mb_words
[5],
5274 mqe
->un
.mb_words
[6], mqe
->un
.mb_words
[7],
5275 mqe
->un
.mb_words
[8], mqe
->un
.mb_words
[9],
5276 mqe
->un
.mb_words
[10], mqe
->un
.mb_words
[11],
5277 mqe
->un
.mb_words
[12], mqe
->un
.mb_words
[13],
5278 mqe
->un
.mb_words
[14], mqe
->un
.mb_words
[15],
5279 mqe
->un
.mb_words
[16], mqe
->un
.mb_words
[50],
5281 mboxq
->mcqe
.mcqe_tag0
, mboxq
->mcqe
.mcqe_tag1
,
5282 mboxq
->mcqe
.trailer
);
5285 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
5288 goto out_free_mboxq
;
5290 data_length
= mqe
->un
.mb_words
[5];
5291 if (data_length
> DMP_RGN23_SIZE
) {
5292 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
5295 goto out_free_mboxq
;
5298 lpfc_parse_fcoe_conf(phba
, mp
->virt
, data_length
);
5299 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
5304 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5309 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5310 * @phba: pointer to lpfc hba data structure.
5311 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5312 * @vpd: pointer to the memory to hold resulting port vpd data.
5313 * @vpd_size: On input, the number of bytes allocated to @vpd.
5314 * On output, the number of data bytes in @vpd.
5316 * This routine executes a READ_REV SLI4 mailbox command. In
5317 * addition, this routine gets the port vpd data.
5321 * -ENOMEM - could not allocated memory.
5324 lpfc_sli4_read_rev(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
5325 uint8_t *vpd
, uint32_t *vpd_size
)
5329 struct lpfc_dmabuf
*dmabuf
;
5330 struct lpfc_mqe
*mqe
;
5332 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
5337 * Get a DMA buffer for the vpd data resulting from the READ_REV
5340 dma_size
= *vpd_size
;
5341 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
, dma_size
,
5342 &dmabuf
->phys
, GFP_KERNEL
);
5343 if (!dmabuf
->virt
) {
5349 * The SLI4 implementation of READ_REV conflicts at word1,
5350 * bits 31:16 and SLI4 adds vpd functionality not present
5351 * in SLI3. This code corrects the conflicts.
5353 lpfc_read_rev(phba
, mboxq
);
5354 mqe
= &mboxq
->u
.mqe
;
5355 mqe
->un
.read_rev
.vpd_paddr_high
= putPaddrHigh(dmabuf
->phys
);
5356 mqe
->un
.read_rev
.vpd_paddr_low
= putPaddrLow(dmabuf
->phys
);
5357 mqe
->un
.read_rev
.word1
&= 0x0000FFFF;
5358 bf_set(lpfc_mbx_rd_rev_vpd
, &mqe
->un
.read_rev
, 1);
5359 bf_set(lpfc_mbx_rd_rev_avail_len
, &mqe
->un
.read_rev
, dma_size
);
5361 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
5363 dma_free_coherent(&phba
->pcidev
->dev
, dma_size
,
5364 dmabuf
->virt
, dmabuf
->phys
);
5370 * The available vpd length cannot be bigger than the
5371 * DMA buffer passed to the port. Catch the less than
5372 * case and update the caller's size.
5374 if (mqe
->un
.read_rev
.avail_vpd_len
< *vpd_size
)
5375 *vpd_size
= mqe
->un
.read_rev
.avail_vpd_len
;
5377 memcpy(vpd
, dmabuf
->virt
, *vpd_size
);
5379 dma_free_coherent(&phba
->pcidev
->dev
, dma_size
,
5380 dmabuf
->virt
, dmabuf
->phys
);
5386 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5387 * @phba: pointer to lpfc hba data structure.
5389 * This routine retrieves SLI4 device physical port name this PCI function
5394 * otherwise - failed to retrieve controller attributes
5397 lpfc_sli4_get_ctl_attr(struct lpfc_hba
*phba
)
5399 LPFC_MBOXQ_t
*mboxq
;
5400 struct lpfc_mbx_get_cntl_attributes
*mbx_cntl_attr
;
5401 struct lpfc_controller_attribute
*cntl_attr
;
5402 void *virtaddr
= NULL
;
5403 uint32_t alloclen
, reqlen
;
5404 uint32_t shdr_status
, shdr_add_status
;
5405 union lpfc_sli4_cfg_shdr
*shdr
;
5408 mboxq
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5412 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5413 reqlen
= sizeof(struct lpfc_mbx_get_cntl_attributes
);
5414 alloclen
= lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
5415 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES
, reqlen
,
5416 LPFC_SLI4_MBX_NEMBED
);
5418 if (alloclen
< reqlen
) {
5419 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5420 "3084 Allocated DMA memory size (%d) is "
5421 "less than the requested DMA memory size "
5422 "(%d)\n", alloclen
, reqlen
);
5424 goto out_free_mboxq
;
5426 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
5427 virtaddr
= mboxq
->sge_array
->addr
[0];
5428 mbx_cntl_attr
= (struct lpfc_mbx_get_cntl_attributes
*)virtaddr
;
5429 shdr
= &mbx_cntl_attr
->cfg_shdr
;
5430 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
5431 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
5432 if (shdr_status
|| shdr_add_status
|| rc
) {
5433 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
5434 "3085 Mailbox x%x (x%x/x%x) failed, "
5435 "rc:x%x, status:x%x, add_status:x%x\n",
5436 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
5437 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
5438 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
5439 rc
, shdr_status
, shdr_add_status
);
5441 goto out_free_mboxq
;
5444 cntl_attr
= &mbx_cntl_attr
->cntl_attr
;
5445 phba
->sli4_hba
.lnk_info
.lnk_dv
= LPFC_LNK_DAT_VAL
;
5446 phba
->sli4_hba
.lnk_info
.lnk_tp
=
5447 bf_get(lpfc_cntl_attr_lnk_type
, cntl_attr
);
5448 phba
->sli4_hba
.lnk_info
.lnk_no
=
5449 bf_get(lpfc_cntl_attr_lnk_numb
, cntl_attr
);
5451 memset(phba
->BIOSVersion
, 0, sizeof(phba
->BIOSVersion
));
5452 strlcat(phba
->BIOSVersion
, (char *)cntl_attr
->bios_ver_str
,
5453 sizeof(phba
->BIOSVersion
));
5455 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5456 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5457 phba
->sli4_hba
.lnk_info
.lnk_tp
,
5458 phba
->sli4_hba
.lnk_info
.lnk_no
,
5461 if (rc
!= MBX_TIMEOUT
) {
5462 if (bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
) == MBX_SLI4_CONFIG
)
5463 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
5465 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5471 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5472 * @phba: pointer to lpfc hba data structure.
5474 * This routine retrieves SLI4 device physical port name this PCI function
5479 * otherwise - failed to retrieve physical port name
5482 lpfc_sli4_retrieve_pport_name(struct lpfc_hba
*phba
)
5484 LPFC_MBOXQ_t
*mboxq
;
5485 struct lpfc_mbx_get_port_name
*get_port_name
;
5486 uint32_t shdr_status
, shdr_add_status
;
5487 union lpfc_sli4_cfg_shdr
*shdr
;
5488 char cport_name
= 0;
5491 /* We assume nothing at this point */
5492 phba
->sli4_hba
.lnk_info
.lnk_dv
= LPFC_LNK_DAT_INVAL
;
5493 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_NON
;
5495 mboxq
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5498 /* obtain link type and link number via READ_CONFIG */
5499 phba
->sli4_hba
.lnk_info
.lnk_dv
= LPFC_LNK_DAT_INVAL
;
5500 lpfc_sli4_read_config(phba
);
5501 if (phba
->sli4_hba
.lnk_info
.lnk_dv
== LPFC_LNK_DAT_VAL
)
5502 goto retrieve_ppname
;
5504 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5505 rc
= lpfc_sli4_get_ctl_attr(phba
);
5507 goto out_free_mboxq
;
5510 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
5511 LPFC_MBOX_OPCODE_GET_PORT_NAME
,
5512 sizeof(struct lpfc_mbx_get_port_name
) -
5513 sizeof(struct lpfc_sli4_cfg_mhdr
),
5514 LPFC_SLI4_MBX_EMBED
);
5515 get_port_name
= &mboxq
->u
.mqe
.un
.get_port_name
;
5516 shdr
= (union lpfc_sli4_cfg_shdr
*)&get_port_name
->header
.cfg_shdr
;
5517 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
, LPFC_OPCODE_VERSION_1
);
5518 bf_set(lpfc_mbx_get_port_name_lnk_type
, &get_port_name
->u
.request
,
5519 phba
->sli4_hba
.lnk_info
.lnk_tp
);
5520 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
5521 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
5522 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
5523 if (shdr_status
|| shdr_add_status
|| rc
) {
5524 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
5525 "3087 Mailbox x%x (x%x/x%x) failed: "
5526 "rc:x%x, status:x%x, add_status:x%x\n",
5527 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
5528 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
5529 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
5530 rc
, shdr_status
, shdr_add_status
);
5532 goto out_free_mboxq
;
5534 switch (phba
->sli4_hba
.lnk_info
.lnk_no
) {
5535 case LPFC_LINK_NUMBER_0
:
5536 cport_name
= bf_get(lpfc_mbx_get_port_name_name0
,
5537 &get_port_name
->u
.response
);
5538 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_GET
;
5540 case LPFC_LINK_NUMBER_1
:
5541 cport_name
= bf_get(lpfc_mbx_get_port_name_name1
,
5542 &get_port_name
->u
.response
);
5543 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_GET
;
5545 case LPFC_LINK_NUMBER_2
:
5546 cport_name
= bf_get(lpfc_mbx_get_port_name_name2
,
5547 &get_port_name
->u
.response
);
5548 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_GET
;
5550 case LPFC_LINK_NUMBER_3
:
5551 cport_name
= bf_get(lpfc_mbx_get_port_name_name3
,
5552 &get_port_name
->u
.response
);
5553 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_GET
;
5559 if (phba
->sli4_hba
.pport_name_sta
== LPFC_SLI4_PPNAME_GET
) {
5560 phba
->Port
[0] = cport_name
;
5561 phba
->Port
[1] = '\0';
5562 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5563 "3091 SLI get port name: %s\n", phba
->Port
);
5567 if (rc
!= MBX_TIMEOUT
) {
5568 if (bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
) == MBX_SLI4_CONFIG
)
5569 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
5571 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5577 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5578 * @phba: pointer to lpfc hba data structure.
5580 * This routine is called to explicitly arm the SLI4 device's completion and
5584 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba
*phba
)
5587 struct lpfc_sli4_hba
*sli4_hba
= &phba
->sli4_hba
;
5588 struct lpfc_sli4_hdw_queue
*qp
;
5589 struct lpfc_queue
*eq
;
5591 sli4_hba
->sli4_write_cq_db(phba
, sli4_hba
->mbx_cq
, 0, LPFC_QUEUE_REARM
);
5592 sli4_hba
->sli4_write_cq_db(phba
, sli4_hba
->els_cq
, 0, LPFC_QUEUE_REARM
);
5593 if (sli4_hba
->nvmels_cq
)
5594 sli4_hba
->sli4_write_cq_db(phba
, sli4_hba
->nvmels_cq
, 0,
5597 if (sli4_hba
->hdwq
) {
5598 /* Loop thru all Hardware Queues */
5599 for (qidx
= 0; qidx
< phba
->cfg_hdw_queue
; qidx
++) {
5600 qp
= &sli4_hba
->hdwq
[qidx
];
5601 /* ARM the corresponding CQ */
5602 sli4_hba
->sli4_write_cq_db(phba
, qp
->io_cq
, 0,
5606 /* Loop thru all IRQ vectors */
5607 for (qidx
= 0; qidx
< phba
->cfg_irq_chann
; qidx
++) {
5608 eq
= sli4_hba
->hba_eq_hdl
[qidx
].eq
;
5609 /* ARM the corresponding EQ */
5610 sli4_hba
->sli4_write_eq_db(phba
, eq
,
5611 0, LPFC_QUEUE_REARM
);
5615 if (phba
->nvmet_support
) {
5616 for (qidx
= 0; qidx
< phba
->cfg_nvmet_mrq
; qidx
++) {
5617 sli4_hba
->sli4_write_cq_db(phba
,
5618 sli4_hba
->nvmet_cqset
[qidx
], 0,
5625 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5626 * @phba: Pointer to HBA context object.
5627 * @type: The resource extent type.
5628 * @extnt_count: buffer to hold port available extent count.
5629 * @extnt_size: buffer to hold element count per extent.
5631 * This function calls the port and retrievs the number of available
5632 * extents and their size for a particular extent type.
5634 * Returns: 0 if successful. Nonzero otherwise.
5637 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba
*phba
, uint16_t type
,
5638 uint16_t *extnt_count
, uint16_t *extnt_size
)
5643 struct lpfc_mbx_get_rsrc_extent_info
*rsrc_info
;
5646 mbox
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5650 /* Find out how many extents are available for this resource type */
5651 length
= (sizeof(struct lpfc_mbx_get_rsrc_extent_info
) -
5652 sizeof(struct lpfc_sli4_cfg_mhdr
));
5653 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
5654 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO
,
5655 length
, LPFC_SLI4_MBX_EMBED
);
5657 /* Send an extents count of 0 - the GET doesn't use it. */
5658 rc
= lpfc_sli4_mbox_rsrc_extent(phba
, mbox
, 0, type
,
5659 LPFC_SLI4_MBX_EMBED
);
5665 if (!phba
->sli4_hba
.intr_enable
)
5666 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
5668 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
5669 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
5676 rsrc_info
= &mbox
->u
.mqe
.un
.rsrc_extent_info
;
5677 if (bf_get(lpfc_mbox_hdr_status
,
5678 &rsrc_info
->header
.cfg_shdr
.response
)) {
5679 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_INIT
,
5680 "2930 Failed to get resource extents "
5681 "Status 0x%x Add'l Status 0x%x\n",
5682 bf_get(lpfc_mbox_hdr_status
,
5683 &rsrc_info
->header
.cfg_shdr
.response
),
5684 bf_get(lpfc_mbox_hdr_add_status
,
5685 &rsrc_info
->header
.cfg_shdr
.response
));
5690 *extnt_count
= bf_get(lpfc_mbx_get_rsrc_extent_info_cnt
,
5692 *extnt_size
= bf_get(lpfc_mbx_get_rsrc_extent_info_size
,
5695 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5696 "3162 Retrieved extents type-%d from port: count:%d, "
5697 "size:%d\n", type
, *extnt_count
, *extnt_size
);
5700 mempool_free(mbox
, phba
->mbox_mem_pool
);
5705 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5706 * @phba: Pointer to HBA context object.
5707 * @type: The extent type to check.
5709 * This function reads the current available extents from the port and checks
5710 * if the extent count or extent size has changed since the last access.
5711 * Callers use this routine post port reset to understand if there is a
5712 * extent reprovisioning requirement.
5715 * -Error: error indicates problem.
5716 * 1: Extent count or size has changed.
5720 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba
*phba
, uint16_t type
)
5722 uint16_t curr_ext_cnt
, rsrc_ext_cnt
;
5723 uint16_t size_diff
, rsrc_ext_size
;
5725 struct lpfc_rsrc_blks
*rsrc_entry
;
5726 struct list_head
*rsrc_blk_list
= NULL
;
5730 rc
= lpfc_sli4_get_avail_extnt_rsrc(phba
, type
,
5737 case LPFC_RSC_TYPE_FCOE_RPI
:
5738 rsrc_blk_list
= &phba
->sli4_hba
.lpfc_rpi_blk_list
;
5740 case LPFC_RSC_TYPE_FCOE_VPI
:
5741 rsrc_blk_list
= &phba
->lpfc_vpi_blk_list
;
5743 case LPFC_RSC_TYPE_FCOE_XRI
:
5744 rsrc_blk_list
= &phba
->sli4_hba
.lpfc_xri_blk_list
;
5746 case LPFC_RSC_TYPE_FCOE_VFI
:
5747 rsrc_blk_list
= &phba
->sli4_hba
.lpfc_vfi_blk_list
;
5753 list_for_each_entry(rsrc_entry
, rsrc_blk_list
, list
) {
5755 if (rsrc_entry
->rsrc_size
!= rsrc_ext_size
)
5759 if (curr_ext_cnt
!= rsrc_ext_cnt
|| size_diff
!= 0)
5766 * lpfc_sli4_cfg_post_extnts -
5767 * @phba: Pointer to HBA context object.
5768 * @extnt_cnt - number of available extents.
5769 * @type - the extent type (rpi, xri, vfi, vpi).
5770 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5771 * @mbox - pointer to the caller's allocated mailbox structure.
5773 * This function executes the extents allocation request. It also
5774 * takes care of the amount of memory needed to allocate or get the
5775 * allocated extents. It is the caller's responsibility to evaluate
5779 * -Error: Error value describes the condition found.
5783 lpfc_sli4_cfg_post_extnts(struct lpfc_hba
*phba
, uint16_t extnt_cnt
,
5784 uint16_t type
, bool *emb
, LPFC_MBOXQ_t
*mbox
)
5789 uint32_t alloc_len
, mbox_tmo
;
5791 /* Calculate the total requested length of the dma memory */
5792 req_len
= extnt_cnt
* sizeof(uint16_t);
5795 * Calculate the size of an embedded mailbox. The uint32_t
5796 * accounts for extents-specific word.
5798 emb_len
= sizeof(MAILBOX_t
) - sizeof(struct mbox_header
) -
5802 * Presume the allocation and response will fit into an embedded
5803 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5805 *emb
= LPFC_SLI4_MBX_EMBED
;
5806 if (req_len
> emb_len
) {
5807 req_len
= extnt_cnt
* sizeof(uint16_t) +
5808 sizeof(union lpfc_sli4_cfg_shdr
) +
5810 *emb
= LPFC_SLI4_MBX_NEMBED
;
5813 alloc_len
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
5814 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT
,
5816 if (alloc_len
< req_len
) {
5817 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5818 "2982 Allocated DMA memory size (x%x) is "
5819 "less than the requested DMA memory "
5820 "size (x%x)\n", alloc_len
, req_len
);
5823 rc
= lpfc_sli4_mbox_rsrc_extent(phba
, mbox
, extnt_cnt
, type
, *emb
);
5827 if (!phba
->sli4_hba
.intr_enable
)
5828 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
5830 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
5831 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
5840 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5841 * @phba: Pointer to HBA context object.
5842 * @type: The resource extent type to allocate.
5844 * This function allocates the number of elements for the specified
5848 lpfc_sli4_alloc_extent(struct lpfc_hba
*phba
, uint16_t type
)
5851 uint16_t rsrc_id_cnt
, rsrc_cnt
, rsrc_size
;
5852 uint16_t rsrc_id
, rsrc_start
, j
, k
;
5855 unsigned long longs
;
5856 unsigned long *bmask
;
5857 struct lpfc_rsrc_blks
*rsrc_blks
;
5860 struct lpfc_id_range
*id_array
= NULL
;
5861 void *virtaddr
= NULL
;
5862 struct lpfc_mbx_nembed_rsrc_extent
*n_rsrc
;
5863 struct lpfc_mbx_alloc_rsrc_extents
*rsrc_ext
;
5864 struct list_head
*ext_blk_list
;
5866 rc
= lpfc_sli4_get_avail_extnt_rsrc(phba
, type
,
5872 if ((rsrc_cnt
== 0) || (rsrc_size
== 0)) {
5873 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_INIT
,
5874 "3009 No available Resource Extents "
5875 "for resource type 0x%x: Count: 0x%x, "
5876 "Size 0x%x\n", type
, rsrc_cnt
,
5881 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_INIT
| LOG_SLI
,
5882 "2903 Post resource extents type-0x%x: "
5883 "count:%d, size %d\n", type
, rsrc_cnt
, rsrc_size
);
5885 mbox
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5889 rc
= lpfc_sli4_cfg_post_extnts(phba
, rsrc_cnt
, type
, &emb
, mbox
);
5896 * Figure out where the response is located. Then get local pointers
5897 * to the response data. The port does not guarantee to respond to
5898 * all extents counts request so update the local variable with the
5899 * allocated count from the port.
5901 if (emb
== LPFC_SLI4_MBX_EMBED
) {
5902 rsrc_ext
= &mbox
->u
.mqe
.un
.alloc_rsrc_extents
;
5903 id_array
= &rsrc_ext
->u
.rsp
.id
[0];
5904 rsrc_cnt
= bf_get(lpfc_mbx_rsrc_cnt
, &rsrc_ext
->u
.rsp
);
5906 virtaddr
= mbox
->sge_array
->addr
[0];
5907 n_rsrc
= (struct lpfc_mbx_nembed_rsrc_extent
*) virtaddr
;
5908 rsrc_cnt
= bf_get(lpfc_mbx_rsrc_cnt
, n_rsrc
);
5909 id_array
= &n_rsrc
->id
;
5912 longs
= ((rsrc_cnt
* rsrc_size
) + BITS_PER_LONG
- 1) / BITS_PER_LONG
;
5913 rsrc_id_cnt
= rsrc_cnt
* rsrc_size
;
5916 * Based on the resource size and count, correct the base and max
5919 length
= sizeof(struct lpfc_rsrc_blks
);
5921 case LPFC_RSC_TYPE_FCOE_RPI
:
5922 phba
->sli4_hba
.rpi_bmask
= kcalloc(longs
,
5923 sizeof(unsigned long),
5925 if (unlikely(!phba
->sli4_hba
.rpi_bmask
)) {
5929 phba
->sli4_hba
.rpi_ids
= kcalloc(rsrc_id_cnt
,
5932 if (unlikely(!phba
->sli4_hba
.rpi_ids
)) {
5933 kfree(phba
->sli4_hba
.rpi_bmask
);
5939 * The next_rpi was initialized with the maximum available
5940 * count but the port may allocate a smaller number. Catch
5941 * that case and update the next_rpi.
5943 phba
->sli4_hba
.next_rpi
= rsrc_id_cnt
;
5945 /* Initialize local ptrs for common extent processing later. */
5946 bmask
= phba
->sli4_hba
.rpi_bmask
;
5947 ids
= phba
->sli4_hba
.rpi_ids
;
5948 ext_blk_list
= &phba
->sli4_hba
.lpfc_rpi_blk_list
;
5950 case LPFC_RSC_TYPE_FCOE_VPI
:
5951 phba
->vpi_bmask
= kcalloc(longs
, sizeof(unsigned long),
5953 if (unlikely(!phba
->vpi_bmask
)) {
5957 phba
->vpi_ids
= kcalloc(rsrc_id_cnt
, sizeof(uint16_t),
5959 if (unlikely(!phba
->vpi_ids
)) {
5960 kfree(phba
->vpi_bmask
);
5965 /* Initialize local ptrs for common extent processing later. */
5966 bmask
= phba
->vpi_bmask
;
5967 ids
= phba
->vpi_ids
;
5968 ext_blk_list
= &phba
->lpfc_vpi_blk_list
;
5970 case LPFC_RSC_TYPE_FCOE_XRI
:
5971 phba
->sli4_hba
.xri_bmask
= kcalloc(longs
,
5972 sizeof(unsigned long),
5974 if (unlikely(!phba
->sli4_hba
.xri_bmask
)) {
5978 phba
->sli4_hba
.max_cfg_param
.xri_used
= 0;
5979 phba
->sli4_hba
.xri_ids
= kcalloc(rsrc_id_cnt
,
5982 if (unlikely(!phba
->sli4_hba
.xri_ids
)) {
5983 kfree(phba
->sli4_hba
.xri_bmask
);
5988 /* Initialize local ptrs for common extent processing later. */
5989 bmask
= phba
->sli4_hba
.xri_bmask
;
5990 ids
= phba
->sli4_hba
.xri_ids
;
5991 ext_blk_list
= &phba
->sli4_hba
.lpfc_xri_blk_list
;
5993 case LPFC_RSC_TYPE_FCOE_VFI
:
5994 phba
->sli4_hba
.vfi_bmask
= kcalloc(longs
,
5995 sizeof(unsigned long),
5997 if (unlikely(!phba
->sli4_hba
.vfi_bmask
)) {
6001 phba
->sli4_hba
.vfi_ids
= kcalloc(rsrc_id_cnt
,
6004 if (unlikely(!phba
->sli4_hba
.vfi_ids
)) {
6005 kfree(phba
->sli4_hba
.vfi_bmask
);
6010 /* Initialize local ptrs for common extent processing later. */
6011 bmask
= phba
->sli4_hba
.vfi_bmask
;
6012 ids
= phba
->sli4_hba
.vfi_ids
;
6013 ext_blk_list
= &phba
->sli4_hba
.lpfc_vfi_blk_list
;
6016 /* Unsupported Opcode. Fail call. */
6020 ext_blk_list
= NULL
;
6025 * Complete initializing the extent configuration with the
6026 * allocated ids assigned to this function. The bitmask serves
6027 * as an index into the array and manages the available ids. The
6028 * array just stores the ids communicated to the port via the wqes.
6030 for (i
= 0, j
= 0, k
= 0; i
< rsrc_cnt
; i
++) {
6032 rsrc_id
= bf_get(lpfc_mbx_rsrc_id_word4_0
,
6035 rsrc_id
= bf_get(lpfc_mbx_rsrc_id_word4_1
,
6038 rsrc_blks
= kzalloc(length
, GFP_KERNEL
);
6039 if (unlikely(!rsrc_blks
)) {
6045 rsrc_blks
->rsrc_start
= rsrc_id
;
6046 rsrc_blks
->rsrc_size
= rsrc_size
;
6047 list_add_tail(&rsrc_blks
->list
, ext_blk_list
);
6048 rsrc_start
= rsrc_id
;
6049 if ((type
== LPFC_RSC_TYPE_FCOE_XRI
) && (j
== 0)) {
6050 phba
->sli4_hba
.io_xri_start
= rsrc_start
+
6051 lpfc_sli4_get_iocb_cnt(phba
);
6054 while (rsrc_id
< (rsrc_start
+ rsrc_size
)) {
6059 /* Entire word processed. Get next word.*/
6064 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
6071 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6072 * @phba: Pointer to HBA context object.
6073 * @type: the extent's type.
6075 * This function deallocates all extents of a particular resource type.
6076 * SLI4 does not allow for deallocating a particular extent range. It
6077 * is the caller's responsibility to release all kernel memory resources.
6080 lpfc_sli4_dealloc_extent(struct lpfc_hba
*phba
, uint16_t type
)
6083 uint32_t length
, mbox_tmo
= 0;
6085 struct lpfc_mbx_dealloc_rsrc_extents
*dealloc_rsrc
;
6086 struct lpfc_rsrc_blks
*rsrc_blk
, *rsrc_blk_next
;
6088 mbox
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6093 * This function sends an embedded mailbox because it only sends the
6094 * the resource type. All extents of this type are released by the
6097 length
= (sizeof(struct lpfc_mbx_dealloc_rsrc_extents
) -
6098 sizeof(struct lpfc_sli4_cfg_mhdr
));
6099 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
6100 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT
,
6101 length
, LPFC_SLI4_MBX_EMBED
);
6103 /* Send an extents count of 0 - the dealloc doesn't use it. */
6104 rc
= lpfc_sli4_mbox_rsrc_extent(phba
, mbox
, 0, type
,
6105 LPFC_SLI4_MBX_EMBED
);
6110 if (!phba
->sli4_hba
.intr_enable
)
6111 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
6113 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
6114 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
6121 dealloc_rsrc
= &mbox
->u
.mqe
.un
.dealloc_rsrc_extents
;
6122 if (bf_get(lpfc_mbox_hdr_status
,
6123 &dealloc_rsrc
->header
.cfg_shdr
.response
)) {
6124 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_INIT
,
6125 "2919 Failed to release resource extents "
6126 "for type %d - Status 0x%x Add'l Status 0x%x. "
6127 "Resource memory not released.\n",
6129 bf_get(lpfc_mbox_hdr_status
,
6130 &dealloc_rsrc
->header
.cfg_shdr
.response
),
6131 bf_get(lpfc_mbox_hdr_add_status
,
6132 &dealloc_rsrc
->header
.cfg_shdr
.response
));
6137 /* Release kernel memory resources for the specific type. */
6139 case LPFC_RSC_TYPE_FCOE_VPI
:
6140 kfree(phba
->vpi_bmask
);
6141 kfree(phba
->vpi_ids
);
6142 bf_set(lpfc_vpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
6143 list_for_each_entry_safe(rsrc_blk
, rsrc_blk_next
,
6144 &phba
->lpfc_vpi_blk_list
, list
) {
6145 list_del_init(&rsrc_blk
->list
);
6148 phba
->sli4_hba
.max_cfg_param
.vpi_used
= 0;
6150 case LPFC_RSC_TYPE_FCOE_XRI
:
6151 kfree(phba
->sli4_hba
.xri_bmask
);
6152 kfree(phba
->sli4_hba
.xri_ids
);
6153 list_for_each_entry_safe(rsrc_blk
, rsrc_blk_next
,
6154 &phba
->sli4_hba
.lpfc_xri_blk_list
, list
) {
6155 list_del_init(&rsrc_blk
->list
);
6159 case LPFC_RSC_TYPE_FCOE_VFI
:
6160 kfree(phba
->sli4_hba
.vfi_bmask
);
6161 kfree(phba
->sli4_hba
.vfi_ids
);
6162 bf_set(lpfc_vfi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
6163 list_for_each_entry_safe(rsrc_blk
, rsrc_blk_next
,
6164 &phba
->sli4_hba
.lpfc_vfi_blk_list
, list
) {
6165 list_del_init(&rsrc_blk
->list
);
6169 case LPFC_RSC_TYPE_FCOE_RPI
:
6170 /* RPI bitmask and physical id array are cleaned up earlier. */
6171 list_for_each_entry_safe(rsrc_blk
, rsrc_blk_next
,
6172 &phba
->sli4_hba
.lpfc_rpi_blk_list
, list
) {
6173 list_del_init(&rsrc_blk
->list
);
6181 bf_set(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
6184 mempool_free(mbox
, phba
->mbox_mem_pool
);
6189 lpfc_set_features(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mbox
,
6194 len
= sizeof(struct lpfc_mbx_set_feature
) -
6195 sizeof(struct lpfc_sli4_cfg_mhdr
);
6196 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
6197 LPFC_MBOX_OPCODE_SET_FEATURES
, len
,
6198 LPFC_SLI4_MBX_EMBED
);
6201 case LPFC_SET_UE_RECOVERY
:
6202 bf_set(lpfc_mbx_set_feature_UER
,
6203 &mbox
->u
.mqe
.un
.set_feature
, 1);
6204 mbox
->u
.mqe
.un
.set_feature
.feature
= LPFC_SET_UE_RECOVERY
;
6205 mbox
->u
.mqe
.un
.set_feature
.param_len
= 8;
6207 case LPFC_SET_MDS_DIAGS
:
6208 bf_set(lpfc_mbx_set_feature_mds
,
6209 &mbox
->u
.mqe
.un
.set_feature
, 1);
6210 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk
,
6211 &mbox
->u
.mqe
.un
.set_feature
, 1);
6212 mbox
->u
.mqe
.un
.set_feature
.feature
= LPFC_SET_MDS_DIAGS
;
6213 mbox
->u
.mqe
.un
.set_feature
.param_len
= 8;
6215 case LPFC_SET_DUAL_DUMP
:
6216 bf_set(lpfc_mbx_set_feature_dd
,
6217 &mbox
->u
.mqe
.un
.set_feature
, LPFC_ENABLE_DUAL_DUMP
);
6218 bf_set(lpfc_mbx_set_feature_ddquery
,
6219 &mbox
->u
.mqe
.un
.set_feature
, 0);
6220 mbox
->u
.mqe
.un
.set_feature
.feature
= LPFC_SET_DUAL_DUMP
;
6221 mbox
->u
.mqe
.un
.set_feature
.param_len
= 4;
6229 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6230 * @phba: Pointer to HBA context object.
6232 * Disable FW logging into host memory on the adapter. To
6233 * be done before reading logs from the host memory.
6236 lpfc_ras_stop_fwlog(struct lpfc_hba
*phba
)
6238 struct lpfc_ras_fwlog
*ras_fwlog
= &phba
->ras_fwlog
;
6240 spin_lock_irq(&phba
->hbalock
);
6241 ras_fwlog
->state
= INACTIVE
;
6242 spin_unlock_irq(&phba
->hbalock
);
6244 /* Disable FW logging to host memory */
6245 writel(LPFC_CTL_PDEV_CTL_DDL_RAS
,
6246 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_CTL_PDEV_CTL_OFFSET
);
6248 /* Wait 10ms for firmware to stop using DMA buffer */
6249 usleep_range(10 * 1000, 20 * 1000);
6253 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6254 * @phba: Pointer to HBA context object.
6256 * This function is called to free memory allocated for RAS FW logging
6257 * support in the driver.
6260 lpfc_sli4_ras_dma_free(struct lpfc_hba
*phba
)
6262 struct lpfc_ras_fwlog
*ras_fwlog
= &phba
->ras_fwlog
;
6263 struct lpfc_dmabuf
*dmabuf
, *next
;
6265 if (!list_empty(&ras_fwlog
->fwlog_buff_list
)) {
6266 list_for_each_entry_safe(dmabuf
, next
,
6267 &ras_fwlog
->fwlog_buff_list
,
6269 list_del(&dmabuf
->list
);
6270 dma_free_coherent(&phba
->pcidev
->dev
,
6271 LPFC_RAS_MAX_ENTRY_SIZE
,
6272 dmabuf
->virt
, dmabuf
->phys
);
6277 if (ras_fwlog
->lwpd
.virt
) {
6278 dma_free_coherent(&phba
->pcidev
->dev
,
6279 sizeof(uint32_t) * 2,
6280 ras_fwlog
->lwpd
.virt
,
6281 ras_fwlog
->lwpd
.phys
);
6282 ras_fwlog
->lwpd
.virt
= NULL
;
6285 spin_lock_irq(&phba
->hbalock
);
6286 ras_fwlog
->state
= INACTIVE
;
6287 spin_unlock_irq(&phba
->hbalock
);
6291 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6292 * @phba: Pointer to HBA context object.
6293 * @fwlog_buff_count: Count of buffers to be created.
6295 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6296 * to update FW log is posted to the adapter.
6297 * Buffer count is calculated based on module param ras_fwlog_buffsize
6298 * Size of each buffer posted to FW is 64K.
6302 lpfc_sli4_ras_dma_alloc(struct lpfc_hba
*phba
,
6303 uint32_t fwlog_buff_count
)
6305 struct lpfc_ras_fwlog
*ras_fwlog
= &phba
->ras_fwlog
;
6306 struct lpfc_dmabuf
*dmabuf
;
6309 /* Initialize List */
6310 INIT_LIST_HEAD(&ras_fwlog
->fwlog_buff_list
);
6312 /* Allocate memory for the LWPD */
6313 ras_fwlog
->lwpd
.virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
6314 sizeof(uint32_t) * 2,
6315 &ras_fwlog
->lwpd
.phys
,
6317 if (!ras_fwlog
->lwpd
.virt
) {
6318 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6319 "6185 LWPD Memory Alloc Failed\n");
6324 ras_fwlog
->fw_buffcount
= fwlog_buff_count
;
6325 for (i
= 0; i
< ras_fwlog
->fw_buffcount
; i
++) {
6326 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
),
6330 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
6331 "6186 Memory Alloc failed FW logging");
6335 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
6336 LPFC_RAS_MAX_ENTRY_SIZE
,
6337 &dmabuf
->phys
, GFP_KERNEL
);
6338 if (!dmabuf
->virt
) {
6341 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
6342 "6187 DMA Alloc Failed FW logging");
6345 dmabuf
->buffer_tag
= i
;
6346 list_add_tail(&dmabuf
->list
, &ras_fwlog
->fwlog_buff_list
);
6351 lpfc_sli4_ras_dma_free(phba
);
6357 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6358 * @phba: pointer to lpfc hba data structure.
6359 * @pmboxq: pointer to the driver internal queue element for mailbox command.
6361 * Completion handler for driver's RAS MBX command to the device.
6364 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
6367 union lpfc_sli4_cfg_shdr
*shdr
;
6368 uint32_t shdr_status
, shdr_add_status
;
6369 struct lpfc_ras_fwlog
*ras_fwlog
= &phba
->ras_fwlog
;
6373 shdr
= (union lpfc_sli4_cfg_shdr
*)
6374 &pmb
->u
.mqe
.un
.ras_fwlog
.header
.cfg_shdr
;
6375 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
6376 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
6378 if (mb
->mbxStatus
!= MBX_SUCCESS
|| shdr_status
) {
6379 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
6380 "6188 FW LOG mailbox "
6381 "completed with status x%x add_status x%x,"
6382 " mbx status x%x\n",
6383 shdr_status
, shdr_add_status
, mb
->mbxStatus
);
6385 ras_fwlog
->ras_hwsupport
= false;
6389 spin_lock_irq(&phba
->hbalock
);
6390 ras_fwlog
->state
= ACTIVE
;
6391 spin_unlock_irq(&phba
->hbalock
);
6392 mempool_free(pmb
, phba
->mbox_mem_pool
);
6397 /* Free RAS DMA memory */
6398 lpfc_sli4_ras_dma_free(phba
);
6399 mempool_free(pmb
, phba
->mbox_mem_pool
);
6403 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6404 * @phba: pointer to lpfc hba data structure.
6405 * @fwlog_level: Logging verbosity level.
6406 * @fwlog_enable: Enable/Disable logging.
6408 * Initialize memory and post mailbox command to enable FW logging in host
6412 lpfc_sli4_ras_fwlog_init(struct lpfc_hba
*phba
,
6413 uint32_t fwlog_level
,
6414 uint32_t fwlog_enable
)
6416 struct lpfc_ras_fwlog
*ras_fwlog
= &phba
->ras_fwlog
;
6417 struct lpfc_mbx_set_ras_fwlog
*mbx_fwlog
= NULL
;
6418 struct lpfc_dmabuf
*dmabuf
;
6420 uint32_t len
= 0, fwlog_buffsize
, fwlog_entry_count
;
6423 spin_lock_irq(&phba
->hbalock
);
6424 ras_fwlog
->state
= INACTIVE
;
6425 spin_unlock_irq(&phba
->hbalock
);
6427 fwlog_buffsize
= (LPFC_RAS_MIN_BUFF_POST_SIZE
*
6428 phba
->cfg_ras_fwlog_buffsize
);
6429 fwlog_entry_count
= (fwlog_buffsize
/LPFC_RAS_MAX_ENTRY_SIZE
);
6432 * If re-enabling FW logging support use earlier allocated
6433 * DMA buffers while posting MBX command.
6435 if (!ras_fwlog
->lwpd
.virt
) {
6436 rc
= lpfc_sli4_ras_dma_alloc(phba
, fwlog_entry_count
);
6438 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
6439 "6189 FW Log Memory Allocation Failed");
6444 /* Setup Mailbox command */
6445 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6447 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6448 "6190 RAS MBX Alloc Failed");
6453 ras_fwlog
->fw_loglevel
= fwlog_level
;
6454 len
= (sizeof(struct lpfc_mbx_set_ras_fwlog
) -
6455 sizeof(struct lpfc_sli4_cfg_mhdr
));
6457 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_LOWLEVEL
,
6458 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION
,
6459 len
, LPFC_SLI4_MBX_EMBED
);
6461 mbx_fwlog
= (struct lpfc_mbx_set_ras_fwlog
*)&mbox
->u
.mqe
.un
.ras_fwlog
;
6462 bf_set(lpfc_fwlog_enable
, &mbx_fwlog
->u
.request
,
6464 bf_set(lpfc_fwlog_loglvl
, &mbx_fwlog
->u
.request
,
6465 ras_fwlog
->fw_loglevel
);
6466 bf_set(lpfc_fwlog_buffcnt
, &mbx_fwlog
->u
.request
,
6467 ras_fwlog
->fw_buffcount
);
6468 bf_set(lpfc_fwlog_buffsz
, &mbx_fwlog
->u
.request
,
6469 LPFC_RAS_MAX_ENTRY_SIZE
/SLI4_PAGE_SIZE
);
6471 /* Update DMA buffer address */
6472 list_for_each_entry(dmabuf
, &ras_fwlog
->fwlog_buff_list
, list
) {
6473 memset(dmabuf
->virt
, 0, LPFC_RAS_MAX_ENTRY_SIZE
);
6475 mbx_fwlog
->u
.request
.buff_fwlog
[dmabuf
->buffer_tag
].addr_lo
=
6476 putPaddrLow(dmabuf
->phys
);
6478 mbx_fwlog
->u
.request
.buff_fwlog
[dmabuf
->buffer_tag
].addr_hi
=
6479 putPaddrHigh(dmabuf
->phys
);
6482 /* Update LPWD address */
6483 mbx_fwlog
->u
.request
.lwpd
.addr_lo
= putPaddrLow(ras_fwlog
->lwpd
.phys
);
6484 mbx_fwlog
->u
.request
.lwpd
.addr_hi
= putPaddrHigh(ras_fwlog
->lwpd
.phys
);
6486 spin_lock_irq(&phba
->hbalock
);
6487 ras_fwlog
->state
= REG_INPROGRESS
;
6488 spin_unlock_irq(&phba
->hbalock
);
6489 mbox
->vport
= phba
->pport
;
6490 mbox
->mbox_cmpl
= lpfc_sli4_ras_mbox_cmpl
;
6492 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
6494 if (rc
== MBX_NOT_FINISHED
) {
6495 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6496 "6191 FW-Log Mailbox failed. "
6497 "status %d mbxStatus : x%x", rc
,
6498 bf_get(lpfc_mqe_status
, &mbox
->u
.mqe
));
6499 mempool_free(mbox
, phba
->mbox_mem_pool
);
6506 lpfc_sli4_ras_dma_free(phba
);
6512 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6513 * @phba: Pointer to HBA context object.
6515 * Check if RAS is supported on the adapter and initialize it.
6518 lpfc_sli4_ras_setup(struct lpfc_hba
*phba
)
6520 /* Check RAS FW Log needs to be enabled or not */
6521 if (lpfc_check_fwlog_support(phba
))
6524 lpfc_sli4_ras_fwlog_init(phba
, phba
->cfg_ras_fwlog_level
,
6525 LPFC_RAS_ENABLE_LOGGING
);
6529 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6530 * @phba: Pointer to HBA context object.
6532 * This function allocates all SLI4 resource identifiers.
6535 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba
*phba
)
6537 int i
, rc
, error
= 0;
6538 uint16_t count
, base
;
6539 unsigned long longs
;
6541 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
6542 phba
->sli4_hba
.next_rpi
= phba
->sli4_hba
.max_cfg_param
.max_rpi
;
6543 if (phba
->sli4_hba
.extents_in_use
) {
6545 * The port supports resource extents. The XRI, VPI, VFI, RPI
6546 * resource extent count must be read and allocated before
6547 * provisioning the resource id arrays.
6549 if (bf_get(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
) ==
6550 LPFC_IDX_RSRC_RDY
) {
6552 * Extent-based resources are set - the driver could
6553 * be in a port reset. Figure out if any corrective
6554 * actions need to be taken.
6556 rc
= lpfc_sli4_chk_avail_extnt_rsrc(phba
,
6557 LPFC_RSC_TYPE_FCOE_VFI
);
6560 rc
= lpfc_sli4_chk_avail_extnt_rsrc(phba
,
6561 LPFC_RSC_TYPE_FCOE_VPI
);
6564 rc
= lpfc_sli4_chk_avail_extnt_rsrc(phba
,
6565 LPFC_RSC_TYPE_FCOE_XRI
);
6568 rc
= lpfc_sli4_chk_avail_extnt_rsrc(phba
,
6569 LPFC_RSC_TYPE_FCOE_RPI
);
6574 * It's possible that the number of resources
6575 * provided to this port instance changed between
6576 * resets. Detect this condition and reallocate
6577 * resources. Otherwise, there is no action.
6580 lpfc_printf_log(phba
, KERN_INFO
,
6581 LOG_MBOX
| LOG_INIT
,
6582 "2931 Detected extent resource "
6583 "change. Reallocating all "
6585 rc
= lpfc_sli4_dealloc_extent(phba
,
6586 LPFC_RSC_TYPE_FCOE_VFI
);
6587 rc
= lpfc_sli4_dealloc_extent(phba
,
6588 LPFC_RSC_TYPE_FCOE_VPI
);
6589 rc
= lpfc_sli4_dealloc_extent(phba
,
6590 LPFC_RSC_TYPE_FCOE_XRI
);
6591 rc
= lpfc_sli4_dealloc_extent(phba
,
6592 LPFC_RSC_TYPE_FCOE_RPI
);
6597 rc
= lpfc_sli4_alloc_extent(phba
, LPFC_RSC_TYPE_FCOE_VFI
);
6601 rc
= lpfc_sli4_alloc_extent(phba
, LPFC_RSC_TYPE_FCOE_VPI
);
6605 rc
= lpfc_sli4_alloc_extent(phba
, LPFC_RSC_TYPE_FCOE_RPI
);
6609 rc
= lpfc_sli4_alloc_extent(phba
, LPFC_RSC_TYPE_FCOE_XRI
);
6612 bf_set(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
,
6617 * The port does not support resource extents. The XRI, VPI,
6618 * VFI, RPI resource ids were determined from READ_CONFIG.
6619 * Just allocate the bitmasks and provision the resource id
6620 * arrays. If a port reset is active, the resources don't
6621 * need any action - just exit.
6623 if (bf_get(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
) ==
6624 LPFC_IDX_RSRC_RDY
) {
6625 lpfc_sli4_dealloc_resource_identifiers(phba
);
6626 lpfc_sli4_remove_rpis(phba
);
6629 count
= phba
->sli4_hba
.max_cfg_param
.max_rpi
;
6631 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6632 "3279 Invalid provisioning of "
6637 base
= phba
->sli4_hba
.max_cfg_param
.rpi_base
;
6638 longs
= (count
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
6639 phba
->sli4_hba
.rpi_bmask
= kcalloc(longs
,
6640 sizeof(unsigned long),
6642 if (unlikely(!phba
->sli4_hba
.rpi_bmask
)) {
6646 phba
->sli4_hba
.rpi_ids
= kcalloc(count
, sizeof(uint16_t),
6648 if (unlikely(!phba
->sli4_hba
.rpi_ids
)) {
6650 goto free_rpi_bmask
;
6653 for (i
= 0; i
< count
; i
++)
6654 phba
->sli4_hba
.rpi_ids
[i
] = base
+ i
;
6657 count
= phba
->sli4_hba
.max_cfg_param
.max_vpi
;
6659 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6660 "3280 Invalid provisioning of "
6665 base
= phba
->sli4_hba
.max_cfg_param
.vpi_base
;
6666 longs
= (count
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
6667 phba
->vpi_bmask
= kcalloc(longs
, sizeof(unsigned long),
6669 if (unlikely(!phba
->vpi_bmask
)) {
6673 phba
->vpi_ids
= kcalloc(count
, sizeof(uint16_t),
6675 if (unlikely(!phba
->vpi_ids
)) {
6677 goto free_vpi_bmask
;
6680 for (i
= 0; i
< count
; i
++)
6681 phba
->vpi_ids
[i
] = base
+ i
;
6684 count
= phba
->sli4_hba
.max_cfg_param
.max_xri
;
6686 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6687 "3281 Invalid provisioning of "
6692 base
= phba
->sli4_hba
.max_cfg_param
.xri_base
;
6693 longs
= (count
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
6694 phba
->sli4_hba
.xri_bmask
= kcalloc(longs
,
6695 sizeof(unsigned long),
6697 if (unlikely(!phba
->sli4_hba
.xri_bmask
)) {
6701 phba
->sli4_hba
.max_cfg_param
.xri_used
= 0;
6702 phba
->sli4_hba
.xri_ids
= kcalloc(count
, sizeof(uint16_t),
6704 if (unlikely(!phba
->sli4_hba
.xri_ids
)) {
6706 goto free_xri_bmask
;
6709 for (i
= 0; i
< count
; i
++)
6710 phba
->sli4_hba
.xri_ids
[i
] = base
+ i
;
6713 count
= phba
->sli4_hba
.max_cfg_param
.max_vfi
;
6715 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6716 "3282 Invalid provisioning of "
6721 base
= phba
->sli4_hba
.max_cfg_param
.vfi_base
;
6722 longs
= (count
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
6723 phba
->sli4_hba
.vfi_bmask
= kcalloc(longs
,
6724 sizeof(unsigned long),
6726 if (unlikely(!phba
->sli4_hba
.vfi_bmask
)) {
6730 phba
->sli4_hba
.vfi_ids
= kcalloc(count
, sizeof(uint16_t),
6732 if (unlikely(!phba
->sli4_hba
.vfi_ids
)) {
6734 goto free_vfi_bmask
;
6737 for (i
= 0; i
< count
; i
++)
6738 phba
->sli4_hba
.vfi_ids
[i
] = base
+ i
;
6741 * Mark all resources ready. An HBA reset doesn't need
6742 * to reset the initialization.
6744 bf_set(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
,
6750 kfree(phba
->sli4_hba
.vfi_bmask
);
6751 phba
->sli4_hba
.vfi_bmask
= NULL
;
6753 kfree(phba
->sli4_hba
.xri_ids
);
6754 phba
->sli4_hba
.xri_ids
= NULL
;
6756 kfree(phba
->sli4_hba
.xri_bmask
);
6757 phba
->sli4_hba
.xri_bmask
= NULL
;
6759 kfree(phba
->vpi_ids
);
6760 phba
->vpi_ids
= NULL
;
6762 kfree(phba
->vpi_bmask
);
6763 phba
->vpi_bmask
= NULL
;
6765 kfree(phba
->sli4_hba
.rpi_ids
);
6766 phba
->sli4_hba
.rpi_ids
= NULL
;
6768 kfree(phba
->sli4_hba
.rpi_bmask
);
6769 phba
->sli4_hba
.rpi_bmask
= NULL
;
6775 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6776 * @phba: Pointer to HBA context object.
6778 * This function allocates the number of elements for the specified
6782 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba
*phba
)
6784 if (phba
->sli4_hba
.extents_in_use
) {
6785 lpfc_sli4_dealloc_extent(phba
, LPFC_RSC_TYPE_FCOE_VPI
);
6786 lpfc_sli4_dealloc_extent(phba
, LPFC_RSC_TYPE_FCOE_RPI
);
6787 lpfc_sli4_dealloc_extent(phba
, LPFC_RSC_TYPE_FCOE_XRI
);
6788 lpfc_sli4_dealloc_extent(phba
, LPFC_RSC_TYPE_FCOE_VFI
);
6790 kfree(phba
->vpi_bmask
);
6791 phba
->sli4_hba
.max_cfg_param
.vpi_used
= 0;
6792 kfree(phba
->vpi_ids
);
6793 bf_set(lpfc_vpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
6794 kfree(phba
->sli4_hba
.xri_bmask
);
6795 kfree(phba
->sli4_hba
.xri_ids
);
6796 kfree(phba
->sli4_hba
.vfi_bmask
);
6797 kfree(phba
->sli4_hba
.vfi_ids
);
6798 bf_set(lpfc_vfi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
6799 bf_set(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
6806 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
6807 * @phba: Pointer to HBA context object.
6808 * @type: The resource extent type.
6809 * @extnt_count: buffer to hold port extent count response
6810 * @extnt_size: buffer to hold port extent size response.
6812 * This function calls the port to read the host allocated extents
6813 * for a particular type.
6816 lpfc_sli4_get_allocated_extnts(struct lpfc_hba
*phba
, uint16_t type
,
6817 uint16_t *extnt_cnt
, uint16_t *extnt_size
)
6821 uint16_t curr_blks
= 0;
6822 uint32_t req_len
, emb_len
;
6823 uint32_t alloc_len
, mbox_tmo
;
6824 struct list_head
*blk_list_head
;
6825 struct lpfc_rsrc_blks
*rsrc_blk
;
6827 void *virtaddr
= NULL
;
6828 struct lpfc_mbx_nembed_rsrc_extent
*n_rsrc
;
6829 struct lpfc_mbx_alloc_rsrc_extents
*rsrc_ext
;
6830 union lpfc_sli4_cfg_shdr
*shdr
;
6833 case LPFC_RSC_TYPE_FCOE_VPI
:
6834 blk_list_head
= &phba
->lpfc_vpi_blk_list
;
6836 case LPFC_RSC_TYPE_FCOE_XRI
:
6837 blk_list_head
= &phba
->sli4_hba
.lpfc_xri_blk_list
;
6839 case LPFC_RSC_TYPE_FCOE_VFI
:
6840 blk_list_head
= &phba
->sli4_hba
.lpfc_vfi_blk_list
;
6842 case LPFC_RSC_TYPE_FCOE_RPI
:
6843 blk_list_head
= &phba
->sli4_hba
.lpfc_rpi_blk_list
;
6849 /* Count the number of extents currently allocatd for this type. */
6850 list_for_each_entry(rsrc_blk
, blk_list_head
, list
) {
6851 if (curr_blks
== 0) {
6853 * The GET_ALLOCATED mailbox does not return the size,
6854 * just the count. The size should be just the size
6855 * stored in the current allocated block and all sizes
6856 * for an extent type are the same so set the return
6859 *extnt_size
= rsrc_blk
->rsrc_size
;
6865 * Calculate the size of an embedded mailbox. The uint32_t
6866 * accounts for extents-specific word.
6868 emb_len
= sizeof(MAILBOX_t
) - sizeof(struct mbox_header
) -
6872 * Presume the allocation and response will fit into an embedded
6873 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6875 emb
= LPFC_SLI4_MBX_EMBED
;
6877 if (req_len
> emb_len
) {
6878 req_len
= curr_blks
* sizeof(uint16_t) +
6879 sizeof(union lpfc_sli4_cfg_shdr
) +
6881 emb
= LPFC_SLI4_MBX_NEMBED
;
6884 mbox
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6887 memset(mbox
, 0, sizeof(LPFC_MBOXQ_t
));
6889 alloc_len
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
6890 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT
,
6892 if (alloc_len
< req_len
) {
6893 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6894 "2983 Allocated DMA memory size (x%x) is "
6895 "less than the requested DMA memory "
6896 "size (x%x)\n", alloc_len
, req_len
);
6900 rc
= lpfc_sli4_mbox_rsrc_extent(phba
, mbox
, curr_blks
, type
, emb
);
6906 if (!phba
->sli4_hba
.intr_enable
)
6907 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
6909 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
6910 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
6919 * Figure out where the response is located. Then get local pointers
6920 * to the response data. The port does not guarantee to respond to
6921 * all extents counts request so update the local variable with the
6922 * allocated count from the port.
6924 if (emb
== LPFC_SLI4_MBX_EMBED
) {
6925 rsrc_ext
= &mbox
->u
.mqe
.un
.alloc_rsrc_extents
;
6926 shdr
= &rsrc_ext
->header
.cfg_shdr
;
6927 *extnt_cnt
= bf_get(lpfc_mbx_rsrc_cnt
, &rsrc_ext
->u
.rsp
);
6929 virtaddr
= mbox
->sge_array
->addr
[0];
6930 n_rsrc
= (struct lpfc_mbx_nembed_rsrc_extent
*) virtaddr
;
6931 shdr
= &n_rsrc
->cfg_shdr
;
6932 *extnt_cnt
= bf_get(lpfc_mbx_rsrc_cnt
, n_rsrc
);
6935 if (bf_get(lpfc_mbox_hdr_status
, &shdr
->response
)) {
6936 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_INIT
,
6937 "2984 Failed to read allocated resources "
6938 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6940 bf_get(lpfc_mbox_hdr_status
, &shdr
->response
),
6941 bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
));
6946 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
6951 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
6952 * @phba: pointer to lpfc hba data structure.
6953 * @pring: Pointer to driver SLI ring object.
6954 * @sgl_list: linked link of sgl buffers to post
6955 * @cnt: number of linked list buffers
6957 * This routine walks the list of buffers that have been allocated and
6958 * repost them to the port by using SGL block post. This is needed after a
6959 * pci_function_reset/warm_start or start. It attempts to construct blocks
6960 * of buffer sgls which contains contiguous xris and uses the non-embedded
6961 * SGL block post mailbox commands to post them to the port. For single
6962 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6963 * mailbox command for posting.
6965 * Returns: 0 = success, non-zero failure.
6968 lpfc_sli4_repost_sgl_list(struct lpfc_hba
*phba
,
6969 struct list_head
*sgl_list
, int cnt
)
6971 struct lpfc_sglq
*sglq_entry
= NULL
;
6972 struct lpfc_sglq
*sglq_entry_next
= NULL
;
6973 struct lpfc_sglq
*sglq_entry_first
= NULL
;
6974 int status
, total_cnt
;
6975 int post_cnt
= 0, num_posted
= 0, block_cnt
= 0;
6976 int last_xritag
= NO_XRI
;
6977 LIST_HEAD(prep_sgl_list
);
6978 LIST_HEAD(blck_sgl_list
);
6979 LIST_HEAD(allc_sgl_list
);
6980 LIST_HEAD(post_sgl_list
);
6981 LIST_HEAD(free_sgl_list
);
6983 spin_lock_irq(&phba
->hbalock
);
6984 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
6985 list_splice_init(sgl_list
, &allc_sgl_list
);
6986 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
6987 spin_unlock_irq(&phba
->hbalock
);
6990 list_for_each_entry_safe(sglq_entry
, sglq_entry_next
,
6991 &allc_sgl_list
, list
) {
6992 list_del_init(&sglq_entry
->list
);
6994 if ((last_xritag
!= NO_XRI
) &&
6995 (sglq_entry
->sli4_xritag
!= last_xritag
+ 1)) {
6996 /* a hole in xri block, form a sgl posting block */
6997 list_splice_init(&prep_sgl_list
, &blck_sgl_list
);
6998 post_cnt
= block_cnt
- 1;
6999 /* prepare list for next posting block */
7000 list_add_tail(&sglq_entry
->list
, &prep_sgl_list
);
7003 /* prepare list for next posting block */
7004 list_add_tail(&sglq_entry
->list
, &prep_sgl_list
);
7005 /* enough sgls for non-embed sgl mbox command */
7006 if (block_cnt
== LPFC_NEMBED_MBOX_SGL_CNT
) {
7007 list_splice_init(&prep_sgl_list
,
7009 post_cnt
= block_cnt
;
7015 /* keep track of last sgl's xritag */
7016 last_xritag
= sglq_entry
->sli4_xritag
;
7018 /* end of repost sgl list condition for buffers */
7019 if (num_posted
== total_cnt
) {
7020 if (post_cnt
== 0) {
7021 list_splice_init(&prep_sgl_list
,
7023 post_cnt
= block_cnt
;
7024 } else if (block_cnt
== 1) {
7025 status
= lpfc_sli4_post_sgl(phba
,
7026 sglq_entry
->phys
, 0,
7027 sglq_entry
->sli4_xritag
);
7029 /* successful, put sgl to posted list */
7030 list_add_tail(&sglq_entry
->list
,
7033 /* Failure, put sgl to free list */
7034 lpfc_printf_log(phba
, KERN_WARNING
,
7036 "3159 Failed to post "
7037 "sgl, xritag:x%x\n",
7038 sglq_entry
->sli4_xritag
);
7039 list_add_tail(&sglq_entry
->list
,
7046 /* continue until a nembed page worth of sgls */
7050 /* post the buffer list sgls as a block */
7051 status
= lpfc_sli4_post_sgl_list(phba
, &blck_sgl_list
,
7055 /* success, put sgl list to posted sgl list */
7056 list_splice_init(&blck_sgl_list
, &post_sgl_list
);
7058 /* Failure, put sgl list to free sgl list */
7059 sglq_entry_first
= list_first_entry(&blck_sgl_list
,
7062 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
7063 "3160 Failed to post sgl-list, "
7065 sglq_entry_first
->sli4_xritag
,
7066 (sglq_entry_first
->sli4_xritag
+
7068 list_splice_init(&blck_sgl_list
, &free_sgl_list
);
7069 total_cnt
-= post_cnt
;
7072 /* don't reset xirtag due to hole in xri block */
7074 last_xritag
= NO_XRI
;
7076 /* reset sgl post count for next round of posting */
7080 /* free the sgls failed to post */
7081 lpfc_free_sgl_list(phba
, &free_sgl_list
);
7083 /* push sgls posted to the available list */
7084 if (!list_empty(&post_sgl_list
)) {
7085 spin_lock_irq(&phba
->hbalock
);
7086 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
7087 list_splice_init(&post_sgl_list
, sgl_list
);
7088 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
7089 spin_unlock_irq(&phba
->hbalock
);
7091 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
7092 "3161 Failure to post sgl to port.\n");
7096 /* return the number of XRIs actually posted */
7101 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7102 * @phba: pointer to lpfc hba data structure.
7104 * This routine walks the list of nvme buffers that have been allocated and
7105 * repost them to the port by using SGL block post. This is needed after a
7106 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7107 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7108 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7110 * Returns: 0 = success, non-zero failure.
7113 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba
*phba
)
7115 LIST_HEAD(post_nblist
);
7116 int num_posted
, rc
= 0;
7118 /* get all NVME buffers need to repost to a local list */
7119 lpfc_io_buf_flush(phba
, &post_nblist
);
7121 /* post the list of nvme buffer sgls to port if available */
7122 if (!list_empty(&post_nblist
)) {
7123 num_posted
= lpfc_sli4_post_io_sgl_list(
7124 phba
, &post_nblist
, phba
->sli4_hba
.io_xri_cnt
);
7125 /* failed to post any nvme buffer, return error */
7126 if (num_posted
== 0)
7133 lpfc_set_host_data(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mbox
)
7137 len
= sizeof(struct lpfc_mbx_set_host_data
) -
7138 sizeof(struct lpfc_sli4_cfg_mhdr
);
7139 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
7140 LPFC_MBOX_OPCODE_SET_HOST_DATA
, len
,
7141 LPFC_SLI4_MBX_EMBED
);
7143 mbox
->u
.mqe
.un
.set_host_data
.param_id
= LPFC_SET_HOST_OS_DRIVER_VERSION
;
7144 mbox
->u
.mqe
.un
.set_host_data
.param_len
=
7145 LPFC_HOST_OS_DRIVER_VERSION_SIZE
;
7146 snprintf(mbox
->u
.mqe
.un
.set_host_data
.data
,
7147 LPFC_HOST_OS_DRIVER_VERSION_SIZE
,
7148 "Linux %s v"LPFC_DRIVER_VERSION
,
7149 (phba
->hba_flag
& HBA_FCOE_MODE
) ? "FCoE" : "FC");
7153 lpfc_post_rq_buffer(struct lpfc_hba
*phba
, struct lpfc_queue
*hrq
,
7154 struct lpfc_queue
*drq
, int count
, int idx
)
7157 struct lpfc_rqe hrqe
;
7158 struct lpfc_rqe drqe
;
7159 struct lpfc_rqb
*rqbp
;
7160 unsigned long flags
;
7161 struct rqb_dmabuf
*rqb_buffer
;
7162 LIST_HEAD(rqb_buf_list
);
7164 spin_lock_irqsave(&phba
->hbalock
, flags
);
7166 for (i
= 0; i
< count
; i
++) {
7167 /* IF RQ is already full, don't bother */
7168 if (rqbp
->buffer_count
+ i
>= rqbp
->entry_count
- 1)
7170 rqb_buffer
= rqbp
->rqb_alloc_buffer(phba
);
7173 rqb_buffer
->hrq
= hrq
;
7174 rqb_buffer
->drq
= drq
;
7175 rqb_buffer
->idx
= idx
;
7176 list_add_tail(&rqb_buffer
->hbuf
.list
, &rqb_buf_list
);
7178 while (!list_empty(&rqb_buf_list
)) {
7179 list_remove_head(&rqb_buf_list
, rqb_buffer
, struct rqb_dmabuf
,
7182 hrqe
.address_lo
= putPaddrLow(rqb_buffer
->hbuf
.phys
);
7183 hrqe
.address_hi
= putPaddrHigh(rqb_buffer
->hbuf
.phys
);
7184 drqe
.address_lo
= putPaddrLow(rqb_buffer
->dbuf
.phys
);
7185 drqe
.address_hi
= putPaddrHigh(rqb_buffer
->dbuf
.phys
);
7186 rc
= lpfc_sli4_rq_put(hrq
, drq
, &hrqe
, &drqe
);
7188 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7189 "6421 Cannot post to HRQ %d: %x %x %x "
7197 rqbp
->rqb_free_buffer(phba
, rqb_buffer
);
7199 list_add_tail(&rqb_buffer
->hbuf
.list
,
7200 &rqbp
->rqb_buffer_list
);
7201 rqbp
->buffer_count
++;
7204 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
7209 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
7210 * @phba: Pointer to HBA context object.
7212 * This function is the main SLI4 device initialization PCI function. This
7213 * function is called by the HBA initialization code, HBA reset code and
7214 * HBA error attention handler code. Caller is not required to hold any
7218 lpfc_sli4_hba_setup(struct lpfc_hba
*phba
)
7220 int rc
, i
, cnt
, len
, dd
;
7221 LPFC_MBOXQ_t
*mboxq
;
7222 struct lpfc_mqe
*mqe
;
7225 uint32_t ftr_rsp
= 0;
7226 struct Scsi_Host
*shost
= lpfc_shost_from_vport(phba
->pport
);
7227 struct lpfc_vport
*vport
= phba
->pport
;
7228 struct lpfc_dmabuf
*mp
;
7229 struct lpfc_rqb
*rqbp
;
7231 /* Perform a PCI function reset to start from clean */
7232 rc
= lpfc_pci_function_reset(phba
);
7236 /* Check the HBA Host Status Register for readyness */
7237 rc
= lpfc_sli4_post_status_check(phba
);
7241 spin_lock_irq(&phba
->hbalock
);
7242 phba
->sli
.sli_flag
|= LPFC_SLI_ACTIVE
;
7243 spin_unlock_irq(&phba
->hbalock
);
7247 * Allocate a single mailbox container for initializing the
7250 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
7254 /* Issue READ_REV to collect vpd and FW information. */
7255 vpd_size
= SLI4_PAGE_SIZE
;
7256 vpd
= kzalloc(vpd_size
, GFP_KERNEL
);
7262 rc
= lpfc_sli4_read_rev(phba
, mboxq
, vpd
, &vpd_size
);
7268 mqe
= &mboxq
->u
.mqe
;
7269 phba
->sli_rev
= bf_get(lpfc_mbx_rd_rev_sli_lvl
, &mqe
->un
.read_rev
);
7270 if (bf_get(lpfc_mbx_rd_rev_fcoe
, &mqe
->un
.read_rev
)) {
7271 phba
->hba_flag
|= HBA_FCOE_MODE
;
7272 phba
->fcp_embed_io
= 0; /* SLI4 FC support only */
7274 phba
->hba_flag
&= ~HBA_FCOE_MODE
;
7277 if (bf_get(lpfc_mbx_rd_rev_cee_ver
, &mqe
->un
.read_rev
) ==
7279 phba
->hba_flag
|= HBA_FIP_SUPPORT
;
7281 phba
->hba_flag
&= ~HBA_FIP_SUPPORT
;
7283 phba
->hba_flag
&= ~HBA_IOQ_FLUSH
;
7285 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
7286 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7287 "0376 READ_REV Error. SLI Level %d "
7288 "FCoE enabled %d\n",
7289 phba
->sli_rev
, phba
->hba_flag
& HBA_FCOE_MODE
);
7296 * Continue initialization with default values even if driver failed
7297 * to read FCoE param config regions, only read parameters if the
7300 if (phba
->hba_flag
& HBA_FCOE_MODE
&&
7301 lpfc_sli4_read_fcoe_params(phba
))
7302 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_INIT
,
7303 "2570 Failed to read FCoE parameters\n");
7306 * Retrieve sli4 device physical port name, failure of doing it
7307 * is considered as non-fatal.
7309 rc
= lpfc_sli4_retrieve_pport_name(phba
);
7311 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
7312 "3080 Successful retrieving SLI4 device "
7313 "physical port name: %s.\n", phba
->Port
);
7315 rc
= lpfc_sli4_get_ctl_attr(phba
);
7317 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
7318 "8351 Successful retrieving SLI4 device "
7322 * Evaluate the read rev and vpd data. Populate the driver
7323 * state with the results. If this routine fails, the failure
7324 * is not fatal as the driver will use generic values.
7326 rc
= lpfc_parse_vpd(phba
, vpd
, vpd_size
);
7327 if (unlikely(!rc
)) {
7328 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7329 "0377 Error %d parsing vpd. "
7330 "Using defaults.\n", rc
);
7335 /* Save information as VPD data */
7336 phba
->vpd
.rev
.biuRev
= mqe
->un
.read_rev
.first_hw_rev
;
7337 phba
->vpd
.rev
.smRev
= mqe
->un
.read_rev
.second_hw_rev
;
7340 * This is because first G7 ASIC doesn't support the standard
7341 * 0x5a NVME cmd descriptor type/subtype
7343 if ((bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
7344 LPFC_SLI_INTF_IF_TYPE_6
) &&
7345 (phba
->vpd
.rev
.biuRev
== LPFC_G7_ASIC_1
) &&
7346 (phba
->vpd
.rev
.smRev
== 0) &&
7347 (phba
->cfg_nvme_embed_cmd
== 1))
7348 phba
->cfg_nvme_embed_cmd
= 0;
7350 phba
->vpd
.rev
.endecRev
= mqe
->un
.read_rev
.third_hw_rev
;
7351 phba
->vpd
.rev
.fcphHigh
= bf_get(lpfc_mbx_rd_rev_fcph_high
,
7353 phba
->vpd
.rev
.fcphLow
= bf_get(lpfc_mbx_rd_rev_fcph_low
,
7355 phba
->vpd
.rev
.feaLevelHigh
= bf_get(lpfc_mbx_rd_rev_ftr_lvl_high
,
7357 phba
->vpd
.rev
.feaLevelLow
= bf_get(lpfc_mbx_rd_rev_ftr_lvl_low
,
7359 phba
->vpd
.rev
.sli1FwRev
= mqe
->un
.read_rev
.fw_id_rev
;
7360 memcpy(phba
->vpd
.rev
.sli1FwName
, mqe
->un
.read_rev
.fw_name
, 16);
7361 phba
->vpd
.rev
.sli2FwRev
= mqe
->un
.read_rev
.ulp_fw_id_rev
;
7362 memcpy(phba
->vpd
.rev
.sli2FwName
, mqe
->un
.read_rev
.ulp_fw_name
, 16);
7363 phba
->vpd
.rev
.opFwRev
= mqe
->un
.read_rev
.fw_id_rev
;
7364 memcpy(phba
->vpd
.rev
.opFwName
, mqe
->un
.read_rev
.fw_name
, 16);
7365 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
7366 "(%d):0380 READ_REV Status x%x "
7367 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7368 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
7369 bf_get(lpfc_mqe_status
, mqe
),
7370 phba
->vpd
.rev
.opFwName
,
7371 phba
->vpd
.rev
.fcphHigh
, phba
->vpd
.rev
.fcphLow
,
7372 phba
->vpd
.rev
.feaLevelHigh
, phba
->vpd
.rev
.feaLevelLow
);
7374 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
7375 rc
= (phba
->sli4_hba
.max_cfg_param
.max_xri
>> 3);
7376 if (phba
->pport
->cfg_lun_queue_depth
> rc
) {
7377 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
7378 "3362 LUN queue depth changed from %d to %d\n",
7379 phba
->pport
->cfg_lun_queue_depth
, rc
);
7380 phba
->pport
->cfg_lun_queue_depth
= rc
;
7383 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
7384 LPFC_SLI_INTF_IF_TYPE_0
) {
7385 lpfc_set_features(phba
, mboxq
, LPFC_SET_UE_RECOVERY
);
7386 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7387 if (rc
== MBX_SUCCESS
) {
7388 phba
->hba_flag
|= HBA_RECOVERABLE_UE
;
7389 /* Set 1Sec interval to detect UE */
7390 phba
->eratt_poll_interval
= 1;
7391 phba
->sli4_hba
.ue_to_sr
= bf_get(
7392 lpfc_mbx_set_feature_UESR
,
7393 &mboxq
->u
.mqe
.un
.set_feature
);
7394 phba
->sli4_hba
.ue_to_rp
= bf_get(
7395 lpfc_mbx_set_feature_UERP
,
7396 &mboxq
->u
.mqe
.un
.set_feature
);
7400 if (phba
->cfg_enable_mds_diags
&& phba
->mds_diags_support
) {
7401 /* Enable MDS Diagnostics only if the SLI Port supports it */
7402 lpfc_set_features(phba
, mboxq
, LPFC_SET_MDS_DIAGS
);
7403 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7404 if (rc
!= MBX_SUCCESS
)
7405 phba
->mds_diags_support
= 0;
7409 * Discover the port's supported feature set and match it against the
7412 lpfc_request_features(phba
, mboxq
);
7413 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7420 * The port must support FCP initiator mode as this is the
7421 * only mode running in the host.
7423 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi
, &mqe
->un
.req_ftrs
))) {
7424 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
7425 "0378 No support for fcpi mode.\n");
7429 /* Performance Hints are ONLY for FCoE */
7430 if (phba
->hba_flag
& HBA_FCOE_MODE
) {
7431 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh
, &mqe
->un
.req_ftrs
))
7432 phba
->sli3_options
|= LPFC_SLI4_PERFH_ENABLED
;
7434 phba
->sli3_options
&= ~LPFC_SLI4_PERFH_ENABLED
;
7438 * If the port cannot support the host's requested features
7439 * then turn off the global config parameters to disable the
7440 * feature in the driver. This is not a fatal error.
7442 if (phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
) {
7443 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif
, &mqe
->un
.req_ftrs
))) {
7444 phba
->cfg_enable_bg
= 0;
7445 phba
->sli3_options
&= ~LPFC_SLI3_BG_ENABLED
;
7450 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
7451 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv
, &mqe
->un
.req_ftrs
)))
7455 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
7456 "0379 Feature Mismatch Data: x%08x %08x "
7457 "x%x x%x x%x\n", mqe
->un
.req_ftrs
.word2
,
7458 mqe
->un
.req_ftrs
.word3
, phba
->cfg_enable_bg
,
7459 phba
->cfg_enable_npiv
, phba
->max_vpi
);
7460 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif
, &mqe
->un
.req_ftrs
)))
7461 phba
->cfg_enable_bg
= 0;
7462 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv
, &mqe
->un
.req_ftrs
)))
7463 phba
->cfg_enable_npiv
= 0;
7466 /* These SLI3 features are assumed in SLI4 */
7467 spin_lock_irq(&phba
->hbalock
);
7468 phba
->sli3_options
|= (LPFC_SLI3_NPIV_ENABLED
| LPFC_SLI3_HBQ_ENABLED
);
7469 spin_unlock_irq(&phba
->hbalock
);
7471 /* Always try to enable dual dump feature if we can */
7472 lpfc_set_features(phba
, mboxq
, LPFC_SET_DUAL_DUMP
);
7473 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7474 dd
= bf_get(lpfc_mbx_set_feature_dd
, &mboxq
->u
.mqe
.un
.set_feature
);
7475 if ((rc
== MBX_SUCCESS
) && (dd
== LPFC_ENABLE_DUAL_DUMP
))
7476 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
| LOG_INIT
,
7477 "6448 Dual Dump is enabled\n");
7479 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
| LOG_INIT
,
7480 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
7482 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
7483 lpfc_sli_config_mbox_subsys_get(
7485 lpfc_sli_config_mbox_opcode_get(
7489 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7490 * calls depends on these resources to complete port setup.
7492 rc
= lpfc_sli4_alloc_resource_identifiers(phba
);
7494 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7495 "2920 Failed to alloc Resource IDs "
7500 lpfc_set_host_data(phba
, mboxq
);
7502 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7504 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
7505 "2134 Failed to set host os driver version %x",
7509 /* Read the port's service parameters. */
7510 rc
= lpfc_read_sparam(phba
, mboxq
, vport
->vpi
);
7512 phba
->link_state
= LPFC_HBA_ERROR
;
7517 mboxq
->vport
= vport
;
7518 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7519 mp
= (struct lpfc_dmabuf
*)mboxq
->ctx_buf
;
7520 if (rc
== MBX_SUCCESS
) {
7521 memcpy(&vport
->fc_sparam
, mp
->virt
, sizeof(struct serv_parm
));
7526 * This memory was allocated by the lpfc_read_sparam routine. Release
7527 * it to the mbuf pool.
7529 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
7531 mboxq
->ctx_buf
= NULL
;
7533 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7534 "0382 READ_SPARAM command failed "
7535 "status %d, mbxStatus x%x\n",
7536 rc
, bf_get(lpfc_mqe_status
, mqe
));
7537 phba
->link_state
= LPFC_HBA_ERROR
;
7542 lpfc_update_vport_wwn(vport
);
7544 /* Update the fc_host data structures with new wwn. */
7545 fc_host_node_name(shost
) = wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
7546 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
7548 /* Create all the SLI4 queues */
7549 rc
= lpfc_sli4_queue_create(phba
);
7551 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7552 "3089 Failed to allocate queues\n");
7556 /* Set up all the queues to the device */
7557 rc
= lpfc_sli4_queue_setup(phba
);
7559 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7560 "0381 Error %d during queue setup.\n ", rc
);
7561 goto out_stop_timers
;
7563 /* Initialize the driver internal SLI layer lists. */
7564 lpfc_sli4_setup(phba
);
7565 lpfc_sli4_queue_init(phba
);
7567 /* update host els xri-sgl sizes and mappings */
7568 rc
= lpfc_sli4_els_sgl_update(phba
);
7570 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7571 "1400 Failed to update xri-sgl size and "
7572 "mapping: %d\n", rc
);
7573 goto out_destroy_queue
;
7576 /* register the els sgl pool to the port */
7577 rc
= lpfc_sli4_repost_sgl_list(phba
, &phba
->sli4_hba
.lpfc_els_sgl_list
,
7578 phba
->sli4_hba
.els_xri_cnt
);
7579 if (unlikely(rc
< 0)) {
7580 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7581 "0582 Error %d during els sgl post "
7584 goto out_destroy_queue
;
7586 phba
->sli4_hba
.els_xri_cnt
= rc
;
7588 if (phba
->nvmet_support
) {
7589 /* update host nvmet xri-sgl sizes and mappings */
7590 rc
= lpfc_sli4_nvmet_sgl_update(phba
);
7592 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7593 "6308 Failed to update nvmet-sgl size "
7594 "and mapping: %d\n", rc
);
7595 goto out_destroy_queue
;
7598 /* register the nvmet sgl pool to the port */
7599 rc
= lpfc_sli4_repost_sgl_list(
7601 &phba
->sli4_hba
.lpfc_nvmet_sgl_list
,
7602 phba
->sli4_hba
.nvmet_xri_cnt
);
7603 if (unlikely(rc
< 0)) {
7604 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7605 "3117 Error %d during nvmet "
7608 goto out_destroy_queue
;
7610 phba
->sli4_hba
.nvmet_xri_cnt
= rc
;
7612 /* We allocate an iocbq for every receive context SGL.
7613 * The additional allocation is for abort and ls handling.
7615 cnt
= phba
->sli4_hba
.nvmet_xri_cnt
+
7616 phba
->sli4_hba
.max_cfg_param
.max_xri
;
7618 /* update host common xri-sgl sizes and mappings */
7619 rc
= lpfc_sli4_io_sgl_update(phba
);
7621 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7622 "6082 Failed to update nvme-sgl size "
7623 "and mapping: %d\n", rc
);
7624 goto out_destroy_queue
;
7627 /* register the allocated common sgl pool to the port */
7628 rc
= lpfc_sli4_repost_io_sgl_list(phba
);
7630 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7631 "6116 Error %d during nvme sgl post "
7633 /* Some NVME buffers were moved to abort nvme list */
7634 /* A pci function reset will repost them */
7636 goto out_destroy_queue
;
7638 /* Each lpfc_io_buf job structure has an iocbq element.
7639 * This cnt provides for abort, els, ct and ls requests.
7641 cnt
= phba
->sli4_hba
.max_cfg_param
.max_xri
;
7644 if (!phba
->sli
.iocbq_lookup
) {
7645 /* Initialize and populate the iocb list per host */
7646 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7647 "2821 initialize iocb list with %d entries\n",
7649 rc
= lpfc_init_iocb_list(phba
, cnt
);
7651 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7652 "1413 Failed to init iocb list.\n");
7653 goto out_destroy_queue
;
7657 if (phba
->nvmet_support
)
7658 lpfc_nvmet_create_targetport(phba
);
7660 if (phba
->nvmet_support
&& phba
->cfg_nvmet_mrq
) {
7661 /* Post initial buffers to all RQs created */
7662 for (i
= 0; i
< phba
->cfg_nvmet_mrq
; i
++) {
7663 rqbp
= phba
->sli4_hba
.nvmet_mrq_hdr
[i
]->rqbp
;
7664 INIT_LIST_HEAD(&rqbp
->rqb_buffer_list
);
7665 rqbp
->rqb_alloc_buffer
= lpfc_sli4_nvmet_alloc
;
7666 rqbp
->rqb_free_buffer
= lpfc_sli4_nvmet_free
;
7667 rqbp
->entry_count
= LPFC_NVMET_RQE_DEF_COUNT
;
7668 rqbp
->buffer_count
= 0;
7670 lpfc_post_rq_buffer(
7671 phba
, phba
->sli4_hba
.nvmet_mrq_hdr
[i
],
7672 phba
->sli4_hba
.nvmet_mrq_data
[i
],
7673 phba
->cfg_nvmet_mrq_post
, i
);
7677 /* Post the rpi header region to the device. */
7678 rc
= lpfc_sli4_post_all_rpi_hdrs(phba
);
7680 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7681 "0393 Error %d during rpi post operation\n",
7684 goto out_destroy_queue
;
7686 lpfc_sli4_node_prep(phba
);
7688 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
7689 if ((phba
->nvmet_support
== 0) || (phba
->cfg_nvmet_mrq
== 1)) {
7691 * The FC Port needs to register FCFI (index 0)
7693 lpfc_reg_fcfi(phba
, mboxq
);
7694 mboxq
->vport
= phba
->pport
;
7695 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7696 if (rc
!= MBX_SUCCESS
)
7697 goto out_unset_queue
;
7699 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
,
7700 &mboxq
->u
.mqe
.un
.reg_fcfi
);
7702 /* We are a NVME Target mode with MRQ > 1 */
7704 /* First register the FCFI */
7705 lpfc_reg_fcfi_mrq(phba
, mboxq
, 0);
7706 mboxq
->vport
= phba
->pport
;
7707 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7708 if (rc
!= MBX_SUCCESS
)
7709 goto out_unset_queue
;
7711 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_mrq_fcfi
,
7712 &mboxq
->u
.mqe
.un
.reg_fcfi_mrq
);
7714 /* Next register the MRQs */
7715 lpfc_reg_fcfi_mrq(phba
, mboxq
, 1);
7716 mboxq
->vport
= phba
->pport
;
7717 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7718 if (rc
!= MBX_SUCCESS
)
7719 goto out_unset_queue
;
7722 /* Check if the port is configured to be disabled */
7723 lpfc_sli_read_link_ste(phba
);
7726 /* Don't post more new bufs if repost already recovered
7729 if (phba
->nvmet_support
== 0) {
7730 if (phba
->sli4_hba
.io_xri_cnt
== 0) {
7731 len
= lpfc_new_io_buf(
7732 phba
, phba
->sli4_hba
.io_xri_max
);
7735 goto out_unset_queue
;
7738 if (phba
->cfg_xri_rebalancing
)
7739 lpfc_create_multixri_pools(phba
);
7742 phba
->cfg_xri_rebalancing
= 0;
7745 /* Allow asynchronous mailbox command to go through */
7746 spin_lock_irq(&phba
->hbalock
);
7747 phba
->sli
.sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
7748 spin_unlock_irq(&phba
->hbalock
);
7750 /* Post receive buffers to the device */
7751 lpfc_sli4_rb_setup(phba
);
7753 /* Reset HBA FCF states after HBA reset */
7754 phba
->fcf
.fcf_flag
= 0;
7755 phba
->fcf
.current_rec
.flag
= 0;
7757 /* Start the ELS watchdog timer */
7758 mod_timer(&vport
->els_tmofunc
,
7759 jiffies
+ msecs_to_jiffies(1000 * (phba
->fc_ratov
* 2)));
7761 /* Start heart beat timer */
7762 mod_timer(&phba
->hb_tmofunc
,
7763 jiffies
+ msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
));
7764 phba
->hb_outstanding
= 0;
7765 phba
->last_completion_time
= jiffies
;
7767 /* start eq_delay heartbeat */
7768 if (phba
->cfg_auto_imax
)
7769 queue_delayed_work(phba
->wq
, &phba
->eq_delay_work
,
7770 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS
));
7772 /* Start error attention (ERATT) polling timer */
7773 mod_timer(&phba
->eratt_poll
,
7774 jiffies
+ msecs_to_jiffies(1000 * phba
->eratt_poll_interval
));
7776 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
7777 if (phba
->cfg_aer_support
== 1 && !(phba
->hba_flag
& HBA_AER_ENABLED
)) {
7778 rc
= pci_enable_pcie_error_reporting(phba
->pcidev
);
7780 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7781 "2829 This device supports "
7782 "Advanced Error Reporting (AER)\n");
7783 spin_lock_irq(&phba
->hbalock
);
7784 phba
->hba_flag
|= HBA_AER_ENABLED
;
7785 spin_unlock_irq(&phba
->hbalock
);
7787 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7788 "2830 This device does not support "
7789 "Advanced Error Reporting (AER)\n");
7790 phba
->cfg_aer_support
= 0;
7796 * The port is ready, set the host's link state to LINK_DOWN
7797 * in preparation for link interrupts.
7799 spin_lock_irq(&phba
->hbalock
);
7800 phba
->link_state
= LPFC_LINK_DOWN
;
7802 /* Check if physical ports are trunked */
7803 if (bf_get(lpfc_conf_trunk_port0
, &phba
->sli4_hba
))
7804 phba
->trunk_link
.link0
.state
= LPFC_LINK_DOWN
;
7805 if (bf_get(lpfc_conf_trunk_port1
, &phba
->sli4_hba
))
7806 phba
->trunk_link
.link1
.state
= LPFC_LINK_DOWN
;
7807 if (bf_get(lpfc_conf_trunk_port2
, &phba
->sli4_hba
))
7808 phba
->trunk_link
.link2
.state
= LPFC_LINK_DOWN
;
7809 if (bf_get(lpfc_conf_trunk_port3
, &phba
->sli4_hba
))
7810 phba
->trunk_link
.link3
.state
= LPFC_LINK_DOWN
;
7811 spin_unlock_irq(&phba
->hbalock
);
7813 /* Arm the CQs and then EQs on device */
7814 lpfc_sli4_arm_cqeq_intr(phba
);
7816 /* Indicate device interrupt mode */
7817 phba
->sli4_hba
.intr_enable
= 1;
7819 if (!(phba
->hba_flag
& HBA_FCOE_MODE
) &&
7820 (phba
->hba_flag
& LINK_DISABLED
)) {
7821 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_SLI
,
7822 "3103 Adapter Link is disabled.\n");
7823 lpfc_down_link(phba
, mboxq
);
7824 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7825 if (rc
!= MBX_SUCCESS
) {
7826 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_SLI
,
7827 "3104 Adapter failed to issue "
7828 "DOWN_LINK mbox cmd, rc:x%x\n", rc
);
7829 goto out_io_buff_free
;
7831 } else if (phba
->cfg_suppress_link_up
== LPFC_INITIALIZE_LINK
) {
7832 /* don't perform init_link on SLI4 FC port loopback test */
7833 if (!(phba
->link_flag
& LS_LOOPBACK_MODE
)) {
7834 rc
= phba
->lpfc_hba_init_link(phba
, MBX_NOWAIT
);
7836 goto out_io_buff_free
;
7839 mempool_free(mboxq
, phba
->mbox_mem_pool
);
7842 /* Free allocated IO Buffers */
7845 /* Unset all the queues set up in this routine when error out */
7846 lpfc_sli4_queue_unset(phba
);
7848 lpfc_free_iocb_list(phba
);
7849 lpfc_sli4_queue_destroy(phba
);
7851 lpfc_stop_hba_timers(phba
);
7853 mempool_free(mboxq
, phba
->mbox_mem_pool
);
7858 * lpfc_mbox_timeout - Timeout call back function for mbox timer
7859 * @ptr: context object - pointer to hba structure.
7861 * This is the callback function for mailbox timer. The mailbox
7862 * timer is armed when a new mailbox command is issued and the timer
7863 * is deleted when the mailbox complete. The function is called by
7864 * the kernel timer code when a mailbox does not complete within
7865 * expected time. This function wakes up the worker thread to
7866 * process the mailbox timeout and returns. All the processing is
7867 * done by the worker thread function lpfc_mbox_timeout_handler.
7870 lpfc_mbox_timeout(struct timer_list
*t
)
7872 struct lpfc_hba
*phba
= from_timer(phba
, t
, sli
.mbox_tmo
);
7873 unsigned long iflag
;
7874 uint32_t tmo_posted
;
7876 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflag
);
7877 tmo_posted
= phba
->pport
->work_port_events
& WORKER_MBOX_TMO
;
7879 phba
->pport
->work_port_events
|= WORKER_MBOX_TMO
;
7880 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflag
);
7883 lpfc_worker_wake_up(phba
);
7888 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
7890 * @phba: Pointer to HBA context object.
7892 * This function checks if any mailbox completions are present on the mailbox
7896 lpfc_sli4_mbox_completions_pending(struct lpfc_hba
*phba
)
7900 struct lpfc_queue
*mcq
;
7901 struct lpfc_mcqe
*mcqe
;
7902 bool pending_completions
= false;
7905 if (unlikely(!phba
) || (phba
->sli_rev
!= LPFC_SLI_REV4
))
7908 /* Check for completions on mailbox completion queue */
7910 mcq
= phba
->sli4_hba
.mbx_cq
;
7911 idx
= mcq
->hba_index
;
7912 qe_valid
= mcq
->qe_valid
;
7913 while (bf_get_le32(lpfc_cqe_valid
,
7914 (struct lpfc_cqe
*)lpfc_sli4_qe(mcq
, idx
)) == qe_valid
) {
7915 mcqe
= (struct lpfc_mcqe
*)(lpfc_sli4_qe(mcq
, idx
));
7916 if (bf_get_le32(lpfc_trailer_completed
, mcqe
) &&
7917 (!bf_get_le32(lpfc_trailer_async
, mcqe
))) {
7918 pending_completions
= true;
7921 idx
= (idx
+ 1) % mcq
->entry_count
;
7922 if (mcq
->hba_index
== idx
)
7925 /* if the index wrapped around, toggle the valid bit */
7926 if (phba
->sli4_hba
.pc_sli4_params
.cqav
&& !idx
)
7927 qe_valid
= (qe_valid
) ? 0 : 1;
7929 return pending_completions
;
7934 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
7936 * @phba: Pointer to HBA context object.
7938 * For sli4, it is possible to miss an interrupt. As such mbox completions
7939 * maybe missed causing erroneous mailbox timeouts to occur. This function
7940 * checks to see if mbox completions are on the mailbox completion queue
7941 * and will process all the completions associated with the eq for the
7942 * mailbox completion queue.
7945 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba
*phba
)
7947 struct lpfc_sli4_hba
*sli4_hba
= &phba
->sli4_hba
;
7949 struct lpfc_queue
*fpeq
= NULL
;
7950 struct lpfc_queue
*eq
;
7953 if (unlikely(!phba
) || (phba
->sli_rev
!= LPFC_SLI_REV4
))
7956 /* Find the EQ associated with the mbox CQ */
7957 if (sli4_hba
->hdwq
) {
7958 for (eqidx
= 0; eqidx
< phba
->cfg_irq_chann
; eqidx
++) {
7959 eq
= phba
->sli4_hba
.hba_eq_hdl
[eqidx
].eq
;
7960 if (eq
&& eq
->queue_id
== sli4_hba
->mbx_cq
->assoc_qid
) {
7969 /* Turn off interrupts from this EQ */
7971 sli4_hba
->sli4_eq_clr_intr(fpeq
);
7973 /* Check to see if a mbox completion is pending */
7975 mbox_pending
= lpfc_sli4_mbox_completions_pending(phba
);
7978 * If a mbox completion is pending, process all the events on EQ
7979 * associated with the mbox completion queue (this could include
7980 * mailbox commands, async events, els commands, receive queue data
7985 /* process and rearm the EQ */
7986 lpfc_sli4_process_eq(phba
, fpeq
, LPFC_QUEUE_REARM
);
7988 /* Always clear and re-arm the EQ */
7989 sli4_hba
->sli4_write_eq_db(phba
, fpeq
, 0, LPFC_QUEUE_REARM
);
7991 return mbox_pending
;
7996 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
7997 * @phba: Pointer to HBA context object.
7999 * This function is called from worker thread when a mailbox command times out.
8000 * The caller is not required to hold any locks. This function will reset the
8001 * HBA and recover all the pending commands.
8004 lpfc_mbox_timeout_handler(struct lpfc_hba
*phba
)
8006 LPFC_MBOXQ_t
*pmbox
= phba
->sli
.mbox_active
;
8007 MAILBOX_t
*mb
= NULL
;
8009 struct lpfc_sli
*psli
= &phba
->sli
;
8011 /* If the mailbox completed, process the completion and return */
8012 if (lpfc_sli4_process_missed_mbox_completions(phba
))
8017 /* Check the pmbox pointer first. There is a race condition
8018 * between the mbox timeout handler getting executed in the
8019 * worklist and the mailbox actually completing. When this
8020 * race condition occurs, the mbox_active will be NULL.
8022 spin_lock_irq(&phba
->hbalock
);
8023 if (pmbox
== NULL
) {
8024 lpfc_printf_log(phba
, KERN_WARNING
,
8026 "0353 Active Mailbox cleared - mailbox timeout "
8028 spin_unlock_irq(&phba
->hbalock
);
8032 /* Mbox cmd <mbxCommand> timeout */
8033 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
8034 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
8036 phba
->pport
->port_state
,
8038 phba
->sli
.mbox_active
);
8039 spin_unlock_irq(&phba
->hbalock
);
8041 /* Setting state unknown so lpfc_sli_abort_iocb_ring
8042 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
8043 * it to fail all outstanding SCSI IO.
8045 spin_lock_irq(&phba
->pport
->work_port_lock
);
8046 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
8047 spin_unlock_irq(&phba
->pport
->work_port_lock
);
8048 spin_lock_irq(&phba
->hbalock
);
8049 phba
->link_state
= LPFC_LINK_UNKNOWN
;
8050 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
8051 spin_unlock_irq(&phba
->hbalock
);
8053 lpfc_sli_abort_fcp_rings(phba
);
8055 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
8056 "0345 Resetting board due to mailbox timeout\n");
8058 /* Reset the HBA device */
8059 lpfc_reset_hba(phba
);
8063 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
8064 * @phba: Pointer to HBA context object.
8065 * @pmbox: Pointer to mailbox object.
8066 * @flag: Flag indicating how the mailbox need to be processed.
8068 * This function is called by discovery code and HBA management code
8069 * to submit a mailbox command to firmware with SLI-3 interface spec. This
8070 * function gets the hbalock to protect the data structures.
8071 * The mailbox command can be submitted in polling mode, in which case
8072 * this function will wait in a polling loop for the completion of the
8074 * If the mailbox is submitted in no_wait mode (not polling) the
8075 * function will submit the command and returns immediately without waiting
8076 * for the mailbox completion. The no_wait is supported only when HBA
8077 * is in SLI2/SLI3 mode - interrupts are enabled.
8078 * The SLI interface allows only one mailbox pending at a time. If the
8079 * mailbox is issued in polling mode and there is already a mailbox
8080 * pending, then the function will return an error. If the mailbox is issued
8081 * in NO_WAIT mode and there is a mailbox pending already, the function
8082 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8083 * The sli layer owns the mailbox object until the completion of mailbox
8084 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8085 * return codes the caller owns the mailbox command after the return of
8089 lpfc_sli_issue_mbox_s3(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmbox
,
8093 struct lpfc_sli
*psli
= &phba
->sli
;
8094 uint32_t status
, evtctr
;
8095 uint32_t ha_copy
, hc_copy
;
8097 unsigned long timeout
;
8098 unsigned long drvr_flag
= 0;
8099 uint32_t word0
, ldata
;
8100 void __iomem
*to_slim
;
8101 int processing_queue
= 0;
8103 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
8105 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
8106 /* processing mbox queue from intr_handler */
8107 if (unlikely(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
8108 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8111 processing_queue
= 1;
8112 pmbox
= lpfc_mbox_get(phba
);
8114 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8119 if (pmbox
->mbox_cmpl
&& pmbox
->mbox_cmpl
!= lpfc_sli_def_mbox_cmpl
&&
8120 pmbox
->mbox_cmpl
!= lpfc_sli_wake_mbox_wait
) {
8122 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8123 lpfc_printf_log(phba
, KERN_ERR
,
8124 LOG_MBOX
| LOG_VPORT
,
8125 "1806 Mbox x%x failed. No vport\n",
8126 pmbox
->u
.mb
.mbxCommand
);
8128 goto out_not_finished
;
8132 /* If the PCI channel is in offline state, do not post mbox. */
8133 if (unlikely(pci_channel_offline(phba
->pcidev
))) {
8134 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8135 goto out_not_finished
;
8138 /* If HBA has a deferred error attention, fail the iocb. */
8139 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
8140 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8141 goto out_not_finished
;
8147 status
= MBX_SUCCESS
;
8149 if (phba
->link_state
== LPFC_HBA_ERROR
) {
8150 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8152 /* Mbox command <mbxCommand> cannot issue */
8153 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
8154 "(%d):0311 Mailbox command x%x cannot "
8155 "issue Data: x%x x%x\n",
8156 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
8157 pmbox
->u
.mb
.mbxCommand
, psli
->sli_flag
, flag
);
8158 goto out_not_finished
;
8161 if (mbx
->mbxCommand
!= MBX_KILL_BOARD
&& flag
& MBX_NOWAIT
) {
8162 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
) ||
8163 !(hc_copy
& HC_MBINT_ENA
)) {
8164 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8165 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
8166 "(%d):2528 Mailbox command x%x cannot "
8167 "issue Data: x%x x%x\n",
8168 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
8169 pmbox
->u
.mb
.mbxCommand
, psli
->sli_flag
, flag
);
8170 goto out_not_finished
;
8174 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
8175 /* Polling for a mbox command when another one is already active
8176 * is not allowed in SLI. Also, the driver must have established
8177 * SLI2 mode to queue and process multiple mbox commands.
8180 if (flag
& MBX_POLL
) {
8181 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8183 /* Mbox command <mbxCommand> cannot issue */
8184 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
8185 "(%d):2529 Mailbox command x%x "
8186 "cannot issue Data: x%x x%x\n",
8187 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
8188 pmbox
->u
.mb
.mbxCommand
,
8189 psli
->sli_flag
, flag
);
8190 goto out_not_finished
;
8193 if (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
)) {
8194 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8195 /* Mbox command <mbxCommand> cannot issue */
8196 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
8197 "(%d):2530 Mailbox command x%x "
8198 "cannot issue Data: x%x x%x\n",
8199 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
8200 pmbox
->u
.mb
.mbxCommand
,
8201 psli
->sli_flag
, flag
);
8202 goto out_not_finished
;
8205 /* Another mailbox command is still being processed, queue this
8206 * command to be processed later.
8208 lpfc_mbox_put(phba
, pmbox
);
8210 /* Mbox cmd issue - BUSY */
8211 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
8212 "(%d):0308 Mbox cmd issue - BUSY Data: "
8213 "x%x x%x x%x x%x\n",
8214 pmbox
->vport
? pmbox
->vport
->vpi
: 0xffffff,
8216 phba
->pport
? phba
->pport
->port_state
: 0xff,
8217 psli
->sli_flag
, flag
);
8219 psli
->slistat
.mbox_busy
++;
8220 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8223 lpfc_debugfs_disc_trc(pmbox
->vport
,
8224 LPFC_DISC_TRC_MBOX_VPORT
,
8225 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
8226 (uint32_t)mbx
->mbxCommand
,
8227 mbx
->un
.varWords
[0], mbx
->un
.varWords
[1]);
8230 lpfc_debugfs_disc_trc(phba
->pport
,
8232 "MBOX Bsy: cmd:x%x mb:x%x x%x",
8233 (uint32_t)mbx
->mbxCommand
,
8234 mbx
->un
.varWords
[0], mbx
->un
.varWords
[1]);
8240 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
8242 /* If we are not polling, we MUST be in SLI2 mode */
8243 if (flag
!= MBX_POLL
) {
8244 if (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
) &&
8245 (mbx
->mbxCommand
!= MBX_KILL_BOARD
)) {
8246 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
8247 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8248 /* Mbox command <mbxCommand> cannot issue */
8249 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
8250 "(%d):2531 Mailbox command x%x "
8251 "cannot issue Data: x%x x%x\n",
8252 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
8253 pmbox
->u
.mb
.mbxCommand
,
8254 psli
->sli_flag
, flag
);
8255 goto out_not_finished
;
8257 /* timeout active mbox command */
8258 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, pmbox
) *
8260 mod_timer(&psli
->mbox_tmo
, jiffies
+ timeout
);
8263 /* Mailbox cmd <cmd> issue */
8264 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
8265 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8267 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
8269 phba
->pport
? phba
->pport
->port_state
: 0xff,
8270 psli
->sli_flag
, flag
);
8272 if (mbx
->mbxCommand
!= MBX_HEARTBEAT
) {
8274 lpfc_debugfs_disc_trc(pmbox
->vport
,
8275 LPFC_DISC_TRC_MBOX_VPORT
,
8276 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8277 (uint32_t)mbx
->mbxCommand
,
8278 mbx
->un
.varWords
[0], mbx
->un
.varWords
[1]);
8281 lpfc_debugfs_disc_trc(phba
->pport
,
8283 "MBOX Send: cmd:x%x mb:x%x x%x",
8284 (uint32_t)mbx
->mbxCommand
,
8285 mbx
->un
.varWords
[0], mbx
->un
.varWords
[1]);
8289 psli
->slistat
.mbox_cmd
++;
8290 evtctr
= psli
->slistat
.mbox_event
;
8292 /* next set own bit for the adapter and copy over command word */
8293 mbx
->mbxOwner
= OWN_CHIP
;
8295 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
8296 /* Populate mbox extension offset word. */
8297 if (pmbox
->in_ext_byte_len
|| pmbox
->out_ext_byte_len
) {
8298 *(((uint32_t *)mbx
) + pmbox
->mbox_offset_word
)
8299 = (uint8_t *)phba
->mbox_ext
8300 - (uint8_t *)phba
->mbox
;
8303 /* Copy the mailbox extension data */
8304 if (pmbox
->in_ext_byte_len
&& pmbox
->ctx_buf
) {
8305 lpfc_sli_pcimem_bcopy(pmbox
->ctx_buf
,
8306 (uint8_t *)phba
->mbox_ext
,
8307 pmbox
->in_ext_byte_len
);
8309 /* Copy command data to host SLIM area */
8310 lpfc_sli_pcimem_bcopy(mbx
, phba
->mbox
, MAILBOX_CMD_SIZE
);
8312 /* Populate mbox extension offset word. */
8313 if (pmbox
->in_ext_byte_len
|| pmbox
->out_ext_byte_len
)
8314 *(((uint32_t *)mbx
) + pmbox
->mbox_offset_word
)
8315 = MAILBOX_HBA_EXT_OFFSET
;
8317 /* Copy the mailbox extension data */
8318 if (pmbox
->in_ext_byte_len
&& pmbox
->ctx_buf
)
8319 lpfc_memcpy_to_slim(phba
->MBslimaddr
+
8320 MAILBOX_HBA_EXT_OFFSET
,
8321 pmbox
->ctx_buf
, pmbox
->in_ext_byte_len
);
8323 if (mbx
->mbxCommand
== MBX_CONFIG_PORT
)
8324 /* copy command data into host mbox for cmpl */
8325 lpfc_sli_pcimem_bcopy(mbx
, phba
->mbox
,
8328 /* First copy mbox command data to HBA SLIM, skip past first
8330 to_slim
= phba
->MBslimaddr
+ sizeof (uint32_t);
8331 lpfc_memcpy_to_slim(to_slim
, &mbx
->un
.varWords
[0],
8332 MAILBOX_CMD_SIZE
- sizeof (uint32_t));
8334 /* Next copy over first word, with mbxOwner set */
8335 ldata
= *((uint32_t *)mbx
);
8336 to_slim
= phba
->MBslimaddr
;
8337 writel(ldata
, to_slim
);
8338 readl(to_slim
); /* flush */
8340 if (mbx
->mbxCommand
== MBX_CONFIG_PORT
)
8341 /* switch over to host mailbox */
8342 psli
->sli_flag
|= LPFC_SLI_ACTIVE
;
8349 /* Set up reference to mailbox command */
8350 psli
->mbox_active
= pmbox
;
8351 /* Interrupt board to do it */
8352 writel(CA_MBATT
, phba
->CAregaddr
);
8353 readl(phba
->CAregaddr
); /* flush */
8354 /* Don't wait for it to finish, just return */
8358 /* Set up null reference to mailbox command */
8359 psli
->mbox_active
= NULL
;
8360 /* Interrupt board to do it */
8361 writel(CA_MBATT
, phba
->CAregaddr
);
8362 readl(phba
->CAregaddr
); /* flush */
8364 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
8365 /* First read mbox status word */
8366 word0
= *((uint32_t *)phba
->mbox
);
8367 word0
= le32_to_cpu(word0
);
8369 /* First read mbox status word */
8370 if (lpfc_readl(phba
->MBslimaddr
, &word0
)) {
8371 spin_unlock_irqrestore(&phba
->hbalock
,
8373 goto out_not_finished
;
8377 /* Read the HBA Host Attention Register */
8378 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
)) {
8379 spin_unlock_irqrestore(&phba
->hbalock
,
8381 goto out_not_finished
;
8383 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, pmbox
) *
8386 /* Wait for command to complete */
8387 while (((word0
& OWN_CHIP
) == OWN_CHIP
) ||
8388 (!(ha_copy
& HA_MBATT
) &&
8389 (phba
->link_state
> LPFC_WARM_START
))) {
8390 if (time_after(jiffies
, timeout
)) {
8391 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
8392 spin_unlock_irqrestore(&phba
->hbalock
,
8394 goto out_not_finished
;
8397 /* Check if we took a mbox interrupt while we were
8399 if (((word0
& OWN_CHIP
) != OWN_CHIP
)
8400 && (evtctr
!= psli
->slistat
.mbox_event
))
8404 spin_unlock_irqrestore(&phba
->hbalock
,
8407 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
8410 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
8411 /* First copy command data */
8412 word0
= *((uint32_t *)phba
->mbox
);
8413 word0
= le32_to_cpu(word0
);
8414 if (mbx
->mbxCommand
== MBX_CONFIG_PORT
) {
8417 /* Check real SLIM for any errors */
8418 slimword0
= readl(phba
->MBslimaddr
);
8419 slimmb
= (MAILBOX_t
*) & slimword0
;
8420 if (((slimword0
& OWN_CHIP
) != OWN_CHIP
)
8421 && slimmb
->mbxStatus
) {
8428 /* First copy command data */
8429 word0
= readl(phba
->MBslimaddr
);
8431 /* Read the HBA Host Attention Register */
8432 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
)) {
8433 spin_unlock_irqrestore(&phba
->hbalock
,
8435 goto out_not_finished
;
8439 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
8440 /* copy results back to user */
8441 lpfc_sli_pcimem_bcopy(phba
->mbox
, mbx
,
8443 /* Copy the mailbox extension data */
8444 if (pmbox
->out_ext_byte_len
&& pmbox
->ctx_buf
) {
8445 lpfc_sli_pcimem_bcopy(phba
->mbox_ext
,
8447 pmbox
->out_ext_byte_len
);
8450 /* First copy command data */
8451 lpfc_memcpy_from_slim(mbx
, phba
->MBslimaddr
,
8453 /* Copy the mailbox extension data */
8454 if (pmbox
->out_ext_byte_len
&& pmbox
->ctx_buf
) {
8455 lpfc_memcpy_from_slim(
8458 MAILBOX_HBA_EXT_OFFSET
,
8459 pmbox
->out_ext_byte_len
);
8463 writel(HA_MBATT
, phba
->HAregaddr
);
8464 readl(phba
->HAregaddr
); /* flush */
8466 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
8467 status
= mbx
->mbxStatus
;
8470 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8474 if (processing_queue
) {
8475 pmbox
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
8476 lpfc_mbox_cmpl_put(phba
, pmbox
);
8478 return MBX_NOT_FINISHED
;
8482 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8483 * @phba: Pointer to HBA context object.
8485 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8486 * the driver internal pending mailbox queue. It will then try to wait out the
8487 * possible outstanding mailbox command before return.
8490 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8491 * the outstanding mailbox command timed out.
8494 lpfc_sli4_async_mbox_block(struct lpfc_hba
*phba
)
8496 struct lpfc_sli
*psli
= &phba
->sli
;
8498 unsigned long timeout
= 0;
8500 /* Mark the asynchronous mailbox command posting as blocked */
8501 spin_lock_irq(&phba
->hbalock
);
8502 psli
->sli_flag
|= LPFC_SLI_ASYNC_MBX_BLK
;
8503 /* Determine how long we might wait for the active mailbox
8504 * command to be gracefully completed by firmware.
8506 if (phba
->sli
.mbox_active
)
8507 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
,
8508 phba
->sli
.mbox_active
) *
8510 spin_unlock_irq(&phba
->hbalock
);
8512 /* Make sure the mailbox is really active */
8514 lpfc_sli4_process_missed_mbox_completions(phba
);
8516 /* Wait for the outstnading mailbox command to complete */
8517 while (phba
->sli
.mbox_active
) {
8518 /* Check active mailbox complete status every 2ms */
8520 if (time_after(jiffies
, timeout
)) {
8521 /* Timeout, marked the outstanding cmd not complete */
8527 /* Can not cleanly block async mailbox command, fails it */
8529 spin_lock_irq(&phba
->hbalock
);
8530 psli
->sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
8531 spin_unlock_irq(&phba
->hbalock
);
8537 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8538 * @phba: Pointer to HBA context object.
8540 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8541 * commands from the driver internal pending mailbox queue. It makes sure
8542 * that there is no outstanding mailbox command before resuming posting
8543 * asynchronous mailbox commands. If, for any reason, there is outstanding
8544 * mailbox command, it will try to wait it out before resuming asynchronous
8545 * mailbox command posting.
8548 lpfc_sli4_async_mbox_unblock(struct lpfc_hba
*phba
)
8550 struct lpfc_sli
*psli
= &phba
->sli
;
8552 spin_lock_irq(&phba
->hbalock
);
8553 if (!(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
8554 /* Asynchronous mailbox posting is not blocked, do nothing */
8555 spin_unlock_irq(&phba
->hbalock
);
8559 /* Outstanding synchronous mailbox command is guaranteed to be done,
8560 * successful or timeout, after timing-out the outstanding mailbox
8561 * command shall always be removed, so just unblock posting async
8562 * mailbox command and resume
8564 psli
->sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
8565 spin_unlock_irq(&phba
->hbalock
);
8567 /* wake up worker thread to post asynchronous mailbox command */
8568 lpfc_worker_wake_up(phba
);
8572 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8573 * @phba: Pointer to HBA context object.
8574 * @mboxq: Pointer to mailbox object.
8576 * The function waits for the bootstrap mailbox register ready bit from
8577 * port for twice the regular mailbox command timeout value.
8579 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8580 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8583 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
8586 unsigned long timeout
;
8587 struct lpfc_register bmbx_reg
;
8589 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, mboxq
)
8593 bmbx_reg
.word0
= readl(phba
->sli4_hba
.BMBXregaddr
);
8594 db_ready
= bf_get(lpfc_bmbx_rdy
, &bmbx_reg
);
8598 if (time_after(jiffies
, timeout
))
8599 return MBXERR_ERROR
;
8600 } while (!db_ready
);
8606 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8607 * @phba: Pointer to HBA context object.
8608 * @mboxq: Pointer to mailbox object.
8610 * The function posts a mailbox to the port. The mailbox is expected
8611 * to be comletely filled in and ready for the port to operate on it.
8612 * This routine executes a synchronous completion operation on the
8613 * mailbox by polling for its completion.
8615 * The caller must not be holding any locks when calling this routine.
8618 * MBX_SUCCESS - mailbox posted successfully
8619 * Any of the MBX error values.
8622 lpfc_sli4_post_sync_mbox(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
8624 int rc
= MBX_SUCCESS
;
8625 unsigned long iflag
;
8626 uint32_t mcqe_status
;
8628 struct lpfc_sli
*psli
= &phba
->sli
;
8629 struct lpfc_mqe
*mb
= &mboxq
->u
.mqe
;
8630 struct lpfc_bmbx_create
*mbox_rgn
;
8631 struct dma_address
*dma_address
;
8634 * Only one mailbox can be active to the bootstrap mailbox region
8635 * at a time and there is no queueing provided.
8637 spin_lock_irqsave(&phba
->hbalock
, iflag
);
8638 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
8639 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8640 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
8641 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8642 "cannot issue Data: x%x x%x\n",
8643 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
8644 mboxq
->u
.mb
.mbxCommand
,
8645 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
8646 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
8647 psli
->sli_flag
, MBX_POLL
);
8648 return MBXERR_ERROR
;
8650 /* The server grabs the token and owns it until release */
8651 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
8652 phba
->sli
.mbox_active
= mboxq
;
8653 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8655 /* wait for bootstrap mbox register for readyness */
8656 rc
= lpfc_sli4_wait_bmbx_ready(phba
, mboxq
);
8660 * Initialize the bootstrap memory region to avoid stale data areas
8661 * in the mailbox post. Then copy the caller's mailbox contents to
8662 * the bmbx mailbox region.
8664 mbx_cmnd
= bf_get(lpfc_mqe_command
, mb
);
8665 memset(phba
->sli4_hba
.bmbx
.avirt
, 0, sizeof(struct lpfc_bmbx_create
));
8666 lpfc_sli4_pcimem_bcopy(mb
, phba
->sli4_hba
.bmbx
.avirt
,
8667 sizeof(struct lpfc_mqe
));
8669 /* Post the high mailbox dma address to the port and wait for ready. */
8670 dma_address
= &phba
->sli4_hba
.bmbx
.dma_address
;
8671 writel(dma_address
->addr_hi
, phba
->sli4_hba
.BMBXregaddr
);
8673 /* wait for bootstrap mbox register for hi-address write done */
8674 rc
= lpfc_sli4_wait_bmbx_ready(phba
, mboxq
);
8678 /* Post the low mailbox dma address to the port. */
8679 writel(dma_address
->addr_lo
, phba
->sli4_hba
.BMBXregaddr
);
8681 /* wait for bootstrap mbox register for low address write done */
8682 rc
= lpfc_sli4_wait_bmbx_ready(phba
, mboxq
);
8687 * Read the CQ to ensure the mailbox has completed.
8688 * If so, update the mailbox status so that the upper layers
8689 * can complete the request normally.
8691 lpfc_sli4_pcimem_bcopy(phba
->sli4_hba
.bmbx
.avirt
, mb
,
8692 sizeof(struct lpfc_mqe
));
8693 mbox_rgn
= (struct lpfc_bmbx_create
*) phba
->sli4_hba
.bmbx
.avirt
;
8694 lpfc_sli4_pcimem_bcopy(&mbox_rgn
->mcqe
, &mboxq
->mcqe
,
8695 sizeof(struct lpfc_mcqe
));
8696 mcqe_status
= bf_get(lpfc_mcqe_status
, &mbox_rgn
->mcqe
);
8698 * When the CQE status indicates a failure and the mailbox status
8699 * indicates success then copy the CQE status into the mailbox status
8700 * (and prefix it with x4000).
8702 if (mcqe_status
!= MB_CQE_STATUS_SUCCESS
) {
8703 if (bf_get(lpfc_mqe_status
, mb
) == MBX_SUCCESS
)
8704 bf_set(lpfc_mqe_status
, mb
,
8705 (LPFC_MBX_ERROR_RANGE
| mcqe_status
));
8708 lpfc_sli4_swap_str(phba
, mboxq
);
8710 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
8711 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8712 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8713 " x%x x%x CQ: x%x x%x x%x x%x\n",
8714 mboxq
->vport
? mboxq
->vport
->vpi
: 0, mbx_cmnd
,
8715 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
8716 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
8717 bf_get(lpfc_mqe_status
, mb
),
8718 mb
->un
.mb_words
[0], mb
->un
.mb_words
[1],
8719 mb
->un
.mb_words
[2], mb
->un
.mb_words
[3],
8720 mb
->un
.mb_words
[4], mb
->un
.mb_words
[5],
8721 mb
->un
.mb_words
[6], mb
->un
.mb_words
[7],
8722 mb
->un
.mb_words
[8], mb
->un
.mb_words
[9],
8723 mb
->un
.mb_words
[10], mb
->un
.mb_words
[11],
8724 mb
->un
.mb_words
[12], mboxq
->mcqe
.word0
,
8725 mboxq
->mcqe
.mcqe_tag0
, mboxq
->mcqe
.mcqe_tag1
,
8726 mboxq
->mcqe
.trailer
);
8728 /* We are holding the token, no needed for lock when release */
8729 spin_lock_irqsave(&phba
->hbalock
, iflag
);
8730 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
8731 phba
->sli
.mbox_active
= NULL
;
8732 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8737 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
8738 * @phba: Pointer to HBA context object.
8739 * @pmbox: Pointer to mailbox object.
8740 * @flag: Flag indicating how the mailbox need to be processed.
8742 * This function is called by discovery code and HBA management code to submit
8743 * a mailbox command to firmware with SLI-4 interface spec.
8745 * Return codes the caller owns the mailbox command after the return of the
8749 lpfc_sli_issue_mbox_s4(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
8752 struct lpfc_sli
*psli
= &phba
->sli
;
8753 unsigned long iflags
;
8756 /* dump from issue mailbox command if setup */
8757 lpfc_idiag_mbxacc_dump_issue_mbox(phba
, &mboxq
->u
.mb
);
8759 rc
= lpfc_mbox_dev_check(phba
);
8761 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
8762 "(%d):2544 Mailbox command x%x (x%x/x%x) "
8763 "cannot issue Data: x%x x%x\n",
8764 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
8765 mboxq
->u
.mb
.mbxCommand
,
8766 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
8767 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
8768 psli
->sli_flag
, flag
);
8769 goto out_not_finished
;
8772 /* Detect polling mode and jump to a handler */
8773 if (!phba
->sli4_hba
.intr_enable
) {
8774 if (flag
== MBX_POLL
)
8775 rc
= lpfc_sli4_post_sync_mbox(phba
, mboxq
);
8778 if (rc
!= MBX_SUCCESS
)
8779 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
8780 "(%d):2541 Mailbox command x%x "
8781 "(x%x/x%x) failure: "
8782 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8784 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
8785 mboxq
->u
.mb
.mbxCommand
,
8786 lpfc_sli_config_mbox_subsys_get(phba
,
8788 lpfc_sli_config_mbox_opcode_get(phba
,
8790 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
),
8791 bf_get(lpfc_mcqe_status
, &mboxq
->mcqe
),
8792 bf_get(lpfc_mcqe_ext_status
,
8794 psli
->sli_flag
, flag
);
8796 } else if (flag
== MBX_POLL
) {
8797 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
8798 "(%d):2542 Try to issue mailbox command "
8799 "x%x (x%x/x%x) synchronously ahead of async "
8800 "mailbox command queue: x%x x%x\n",
8801 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
8802 mboxq
->u
.mb
.mbxCommand
,
8803 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
8804 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
8805 psli
->sli_flag
, flag
);
8806 /* Try to block the asynchronous mailbox posting */
8807 rc
= lpfc_sli4_async_mbox_block(phba
);
8809 /* Successfully blocked, now issue sync mbox cmd */
8810 rc
= lpfc_sli4_post_sync_mbox(phba
, mboxq
);
8811 if (rc
!= MBX_SUCCESS
)
8812 lpfc_printf_log(phba
, KERN_WARNING
,
8814 "(%d):2597 Sync Mailbox command "
8815 "x%x (x%x/x%x) failure: "
8816 "mqe_sta: x%x mcqe_sta: x%x/x%x "
8818 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
8819 mboxq
->u
.mb
.mbxCommand
,
8820 lpfc_sli_config_mbox_subsys_get(phba
,
8822 lpfc_sli_config_mbox_opcode_get(phba
,
8824 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
),
8825 bf_get(lpfc_mcqe_status
, &mboxq
->mcqe
),
8826 bf_get(lpfc_mcqe_ext_status
,
8828 psli
->sli_flag
, flag
);
8829 /* Unblock the async mailbox posting afterward */
8830 lpfc_sli4_async_mbox_unblock(phba
);
8835 /* Now, interrupt mode asynchronous mailbox command */
8836 rc
= lpfc_mbox_cmd_check(phba
, mboxq
);
8838 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
8839 "(%d):2543 Mailbox command x%x (x%x/x%x) "
8840 "cannot issue Data: x%x x%x\n",
8841 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
8842 mboxq
->u
.mb
.mbxCommand
,
8843 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
8844 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
8845 psli
->sli_flag
, flag
);
8846 goto out_not_finished
;
8849 /* Put the mailbox command to the driver internal FIFO */
8850 psli
->slistat
.mbox_busy
++;
8851 spin_lock_irqsave(&phba
->hbalock
, iflags
);
8852 lpfc_mbox_put(phba
, mboxq
);
8853 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8854 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
8855 "(%d):0354 Mbox cmd issue - Enqueue Data: "
8856 "x%x (x%x/x%x) x%x x%x x%x\n",
8857 mboxq
->vport
? mboxq
->vport
->vpi
: 0xffffff,
8858 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
8859 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
8860 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
8861 phba
->pport
->port_state
,
8862 psli
->sli_flag
, MBX_NOWAIT
);
8863 /* Wake up worker thread to transport mailbox command from head */
8864 lpfc_worker_wake_up(phba
);
8869 return MBX_NOT_FINISHED
;
8873 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
8874 * @phba: Pointer to HBA context object.
8876 * This function is called by worker thread to send a mailbox command to
8877 * SLI4 HBA firmware.
8881 lpfc_sli4_post_async_mbox(struct lpfc_hba
*phba
)
8883 struct lpfc_sli
*psli
= &phba
->sli
;
8884 LPFC_MBOXQ_t
*mboxq
;
8885 int rc
= MBX_SUCCESS
;
8886 unsigned long iflags
;
8887 struct lpfc_mqe
*mqe
;
8890 /* Check interrupt mode before post async mailbox command */
8891 if (unlikely(!phba
->sli4_hba
.intr_enable
))
8892 return MBX_NOT_FINISHED
;
8894 /* Check for mailbox command service token */
8895 spin_lock_irqsave(&phba
->hbalock
, iflags
);
8896 if (unlikely(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
8897 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8898 return MBX_NOT_FINISHED
;
8900 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
8901 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8902 return MBX_NOT_FINISHED
;
8904 if (unlikely(phba
->sli
.mbox_active
)) {
8905 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8906 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
8907 "0384 There is pending active mailbox cmd\n");
8908 return MBX_NOT_FINISHED
;
8910 /* Take the mailbox command service token */
8911 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
8913 /* Get the next mailbox command from head of queue */
8914 mboxq
= lpfc_mbox_get(phba
);
8916 /* If no more mailbox command waiting for post, we're done */
8918 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
8919 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8922 phba
->sli
.mbox_active
= mboxq
;
8923 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8925 /* Check device readiness for posting mailbox command */
8926 rc
= lpfc_mbox_dev_check(phba
);
8928 /* Driver clean routine will clean up pending mailbox */
8929 goto out_not_finished
;
8931 /* Prepare the mbox command to be posted */
8932 mqe
= &mboxq
->u
.mqe
;
8933 mbx_cmnd
= bf_get(lpfc_mqe_command
, mqe
);
8935 /* Start timer for the mbox_tmo and log some mailbox post messages */
8936 mod_timer(&psli
->mbox_tmo
, (jiffies
+
8937 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba
, mboxq
))));
8939 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
8940 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
8942 mboxq
->vport
? mboxq
->vport
->vpi
: 0, mbx_cmnd
,
8943 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
8944 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
8945 phba
->pport
->port_state
, psli
->sli_flag
);
8947 if (mbx_cmnd
!= MBX_HEARTBEAT
) {
8949 lpfc_debugfs_disc_trc(mboxq
->vport
,
8950 LPFC_DISC_TRC_MBOX_VPORT
,
8951 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8952 mbx_cmnd
, mqe
->un
.mb_words
[0],
8953 mqe
->un
.mb_words
[1]);
8955 lpfc_debugfs_disc_trc(phba
->pport
,
8957 "MBOX Send: cmd:x%x mb:x%x x%x",
8958 mbx_cmnd
, mqe
->un
.mb_words
[0],
8959 mqe
->un
.mb_words
[1]);
8962 psli
->slistat
.mbox_cmd
++;
8964 /* Post the mailbox command to the port */
8965 rc
= lpfc_sli4_mq_put(phba
->sli4_hba
.mbx_wq
, mqe
);
8966 if (rc
!= MBX_SUCCESS
) {
8967 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
8968 "(%d):2533 Mailbox command x%x (x%x/x%x) "
8969 "cannot issue Data: x%x x%x\n",
8970 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
8971 mboxq
->u
.mb
.mbxCommand
,
8972 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
8973 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
8974 psli
->sli_flag
, MBX_NOWAIT
);
8975 goto out_not_finished
;
8981 spin_lock_irqsave(&phba
->hbalock
, iflags
);
8982 if (phba
->sli
.mbox_active
) {
8983 mboxq
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
8984 __lpfc_mbox_cmpl_put(phba
, mboxq
);
8985 /* Release the token */
8986 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
8987 phba
->sli
.mbox_active
= NULL
;
8989 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8991 return MBX_NOT_FINISHED
;
8995 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
8996 * @phba: Pointer to HBA context object.
8997 * @pmbox: Pointer to mailbox object.
8998 * @flag: Flag indicating how the mailbox need to be processed.
9000 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
9001 * the API jump table function pointer from the lpfc_hba struct.
9003 * Return codes the caller owns the mailbox command after the return of the
9007 lpfc_sli_issue_mbox(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmbox
, uint32_t flag
)
9009 return phba
->lpfc_sli_issue_mbox(phba
, pmbox
, flag
);
9013 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
9014 * @phba: The hba struct for which this call is being executed.
9015 * @dev_grp: The HBA PCI-Device group number.
9017 * This routine sets up the mbox interface API function jump table in @phba
9019 * Returns: 0 - success, -ENODEV - failure.
9022 lpfc_mbox_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
9026 case LPFC_PCI_DEV_LP
:
9027 phba
->lpfc_sli_issue_mbox
= lpfc_sli_issue_mbox_s3
;
9028 phba
->lpfc_sli_handle_slow_ring_event
=
9029 lpfc_sli_handle_slow_ring_event_s3
;
9030 phba
->lpfc_sli_hbq_to_firmware
= lpfc_sli_hbq_to_firmware_s3
;
9031 phba
->lpfc_sli_brdrestart
= lpfc_sli_brdrestart_s3
;
9032 phba
->lpfc_sli_brdready
= lpfc_sli_brdready_s3
;
9034 case LPFC_PCI_DEV_OC
:
9035 phba
->lpfc_sli_issue_mbox
= lpfc_sli_issue_mbox_s4
;
9036 phba
->lpfc_sli_handle_slow_ring_event
=
9037 lpfc_sli_handle_slow_ring_event_s4
;
9038 phba
->lpfc_sli_hbq_to_firmware
= lpfc_sli_hbq_to_firmware_s4
;
9039 phba
->lpfc_sli_brdrestart
= lpfc_sli_brdrestart_s4
;
9040 phba
->lpfc_sli_brdready
= lpfc_sli_brdready_s4
;
9043 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9044 "1420 Invalid HBA PCI-device group: 0x%x\n",
9053 * __lpfc_sli_ringtx_put - Add an iocb to the txq
9054 * @phba: Pointer to HBA context object.
9055 * @pring: Pointer to driver SLI ring object.
9056 * @piocb: Pointer to address of newly added command iocb.
9058 * This function is called with hbalock held for SLI3 ports or
9059 * the ring lock held for SLI4 ports to add a command
9060 * iocb to the txq when SLI layer cannot submit the command iocb
9064 __lpfc_sli_ringtx_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
9065 struct lpfc_iocbq
*piocb
)
9067 if (phba
->sli_rev
== LPFC_SLI_REV4
)
9068 lockdep_assert_held(&pring
->ring_lock
);
9070 lockdep_assert_held(&phba
->hbalock
);
9071 /* Insert the caller's iocb in the txq tail for later processing. */
9072 list_add_tail(&piocb
->list
, &pring
->txq
);
9076 * lpfc_sli_next_iocb - Get the next iocb in the txq
9077 * @phba: Pointer to HBA context object.
9078 * @pring: Pointer to driver SLI ring object.
9079 * @piocb: Pointer to address of newly added command iocb.
9081 * This function is called with hbalock held before a new
9082 * iocb is submitted to the firmware. This function checks
9083 * txq to flush the iocbs in txq to Firmware before
9084 * submitting new iocbs to the Firmware.
9085 * If there are iocbs in the txq which need to be submitted
9086 * to firmware, lpfc_sli_next_iocb returns the first element
9087 * of the txq after dequeuing it from txq.
9088 * If there is no iocb in the txq then the function will return
9089 * *piocb and *piocb is set to NULL. Caller needs to check
9090 * *piocb to find if there are more commands in the txq.
9092 static struct lpfc_iocbq
*
9093 lpfc_sli_next_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
9094 struct lpfc_iocbq
**piocb
)
9096 struct lpfc_iocbq
* nextiocb
;
9098 lockdep_assert_held(&phba
->hbalock
);
9100 nextiocb
= lpfc_sli_ringtx_get(phba
, pring
);
9110 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
9111 * @phba: Pointer to HBA context object.
9112 * @ring_number: SLI ring number to issue iocb on.
9113 * @piocb: Pointer to command iocb.
9114 * @flag: Flag indicating if this command can be put into txq.
9116 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9117 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9118 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9119 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9120 * this function allows only iocbs for posting buffers. This function finds
9121 * next available slot in the command ring and posts the command to the
9122 * available slot and writes the port attention register to request HBA start
9123 * processing new iocb. If there is no slot available in the ring and
9124 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9125 * the function returns IOCB_BUSY.
9127 * This function is called with hbalock held. The function will return success
9128 * after it successfully submit the iocb to firmware or after adding to the
9132 __lpfc_sli_issue_iocb_s3(struct lpfc_hba
*phba
, uint32_t ring_number
,
9133 struct lpfc_iocbq
*piocb
, uint32_t flag
)
9135 struct lpfc_iocbq
*nextiocb
;
9137 struct lpfc_sli_ring
*pring
= &phba
->sli
.sli3_ring
[ring_number
];
9139 lockdep_assert_held(&phba
->hbalock
);
9141 if (piocb
->iocb_cmpl
&& (!piocb
->vport
) &&
9142 (piocb
->iocb
.ulpCommand
!= CMD_ABORT_XRI_CN
) &&
9143 (piocb
->iocb
.ulpCommand
!= CMD_CLOSE_XRI_CN
)) {
9144 lpfc_printf_log(phba
, KERN_ERR
,
9145 LOG_SLI
| LOG_VPORT
,
9146 "1807 IOCB x%x failed. No vport\n",
9147 piocb
->iocb
.ulpCommand
);
9153 /* If the PCI channel is in offline state, do not post iocbs. */
9154 if (unlikely(pci_channel_offline(phba
->pcidev
)))
9157 /* If HBA has a deferred error attention, fail the iocb. */
9158 if (unlikely(phba
->hba_flag
& DEFER_ERATT
))
9162 * We should never get an IOCB if we are in a < LINK_DOWN state
9164 if (unlikely(phba
->link_state
< LPFC_LINK_DOWN
))
9168 * Check to see if we are blocking IOCB processing because of a
9169 * outstanding event.
9171 if (unlikely(pring
->flag
& LPFC_STOP_IOCB_EVENT
))
9174 if (unlikely(phba
->link_state
== LPFC_LINK_DOWN
)) {
9176 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
9177 * can be issued if the link is not up.
9179 switch (piocb
->iocb
.ulpCommand
) {
9180 case CMD_GEN_REQUEST64_CR
:
9181 case CMD_GEN_REQUEST64_CX
:
9182 if (!(phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
) ||
9183 (piocb
->iocb
.un
.genreq64
.w5
.hcsw
.Rctl
!=
9184 FC_RCTL_DD_UNSOL_CMD
) ||
9185 (piocb
->iocb
.un
.genreq64
.w5
.hcsw
.Type
!=
9186 MENLO_TRANSPORT_TYPE
))
9190 case CMD_QUE_RING_BUF_CN
:
9191 case CMD_QUE_RING_BUF64_CN
:
9193 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9194 * completion, iocb_cmpl MUST be 0.
9196 if (piocb
->iocb_cmpl
)
9197 piocb
->iocb_cmpl
= NULL
;
9199 case CMD_CREATE_XRI_CR
:
9200 case CMD_CLOSE_XRI_CN
:
9201 case CMD_CLOSE_XRI_CX
:
9208 * For FCP commands, we must be in a state where we can process link
9211 } else if (unlikely(pring
->ringno
== LPFC_FCP_RING
&&
9212 !(phba
->sli
.sli_flag
& LPFC_PROCESS_LA
))) {
9216 while ((iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)) &&
9217 (nextiocb
= lpfc_sli_next_iocb(phba
, pring
, &piocb
)))
9218 lpfc_sli_submit_iocb(phba
, pring
, iocb
, nextiocb
);
9221 lpfc_sli_update_ring(phba
, pring
);
9223 lpfc_sli_update_full_ring(phba
, pring
);
9226 return IOCB_SUCCESS
;
9231 pring
->stats
.iocb_cmd_delay
++;
9235 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
9236 __lpfc_sli_ringtx_put(phba
, pring
, piocb
);
9237 return IOCB_SUCCESS
;
9244 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9245 * @phba: Pointer to HBA context object.
9246 * @piocb: Pointer to command iocb.
9247 * @sglq: Pointer to the scatter gather queue object.
9249 * This routine converts the bpl or bde that is in the IOCB
9250 * to a sgl list for the sli4 hardware. The physical address
9251 * of the bpl/bde is converted back to a virtual address.
9252 * If the IOCB contains a BPL then the list of BDE's is
9253 * converted to sli4_sge's. If the IOCB contains a single
9254 * BDE then it is converted to a single sli_sge.
9255 * The IOCB is still in cpu endianess so the contents of
9256 * the bpl can be used without byte swapping.
9258 * Returns valid XRI = Success, NO_XRI = Failure.
9261 lpfc_sli4_bpl2sgl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocbq
,
9262 struct lpfc_sglq
*sglq
)
9264 uint16_t xritag
= NO_XRI
;
9265 struct ulp_bde64
*bpl
= NULL
;
9266 struct ulp_bde64 bde
;
9267 struct sli4_sge
*sgl
= NULL
;
9268 struct lpfc_dmabuf
*dmabuf
;
9272 uint32_t offset
= 0; /* accumulated offset in the sg request list */
9273 int inbound
= 0; /* number of sg reply entries inbound from firmware */
9275 if (!piocbq
|| !sglq
)
9278 sgl
= (struct sli4_sge
*)sglq
->sgl
;
9279 icmd
= &piocbq
->iocb
;
9280 if (icmd
->ulpCommand
== CMD_XMIT_BLS_RSP64_CX
)
9281 return sglq
->sli4_xritag
;
9282 if (icmd
->un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BLP_64
) {
9283 numBdes
= icmd
->un
.genreq64
.bdl
.bdeSize
/
9284 sizeof(struct ulp_bde64
);
9285 /* The addrHigh and addrLow fields within the IOCB
9286 * have not been byteswapped yet so there is no
9287 * need to swap them back.
9289 if (piocbq
->context3
)
9290 dmabuf
= (struct lpfc_dmabuf
*)piocbq
->context3
;
9294 bpl
= (struct ulp_bde64
*)dmabuf
->virt
;
9298 for (i
= 0; i
< numBdes
; i
++) {
9299 /* Should already be byte swapped. */
9300 sgl
->addr_hi
= bpl
->addrHigh
;
9301 sgl
->addr_lo
= bpl
->addrLow
;
9303 sgl
->word2
= le32_to_cpu(sgl
->word2
);
9304 if ((i
+1) == numBdes
)
9305 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
9307 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
9308 /* swap the size field back to the cpu so we
9309 * can assign it to the sgl.
9311 bde
.tus
.w
= le32_to_cpu(bpl
->tus
.w
);
9312 sgl
->sge_len
= cpu_to_le32(bde
.tus
.f
.bdeSize
);
9313 /* The offsets in the sgl need to be accumulated
9314 * separately for the request and reply lists.
9315 * The request is always first, the reply follows.
9317 if (piocbq
->iocb
.ulpCommand
== CMD_GEN_REQUEST64_CR
) {
9318 /* add up the reply sg entries */
9319 if (bpl
->tus
.f
.bdeFlags
== BUFF_TYPE_BDE_64I
)
9321 /* first inbound? reset the offset */
9324 bf_set(lpfc_sli4_sge_offset
, sgl
, offset
);
9325 bf_set(lpfc_sli4_sge_type
, sgl
,
9326 LPFC_SGE_TYPE_DATA
);
9327 offset
+= bde
.tus
.f
.bdeSize
;
9329 sgl
->word2
= cpu_to_le32(sgl
->word2
);
9333 } else if (icmd
->un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BDE_64
) {
9334 /* The addrHigh and addrLow fields of the BDE have not
9335 * been byteswapped yet so they need to be swapped
9336 * before putting them in the sgl.
9339 cpu_to_le32(icmd
->un
.genreq64
.bdl
.addrHigh
);
9341 cpu_to_le32(icmd
->un
.genreq64
.bdl
.addrLow
);
9342 sgl
->word2
= le32_to_cpu(sgl
->word2
);
9343 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
9344 sgl
->word2
= cpu_to_le32(sgl
->word2
);
9346 cpu_to_le32(icmd
->un
.genreq64
.bdl
.bdeSize
);
9348 return sglq
->sli4_xritag
;
9352 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
9353 * @phba: Pointer to HBA context object.
9354 * @piocb: Pointer to command iocb.
9355 * @wqe: Pointer to the work queue entry.
9357 * This routine converts the iocb command to its Work Queue Entry
9358 * equivalent. The wqe pointer should not have any fields set when
9359 * this routine is called because it will memcpy over them.
9360 * This routine does not set the CQ_ID or the WQEC bits in the
9363 * Returns: 0 = Success, IOCB_ERROR = Failure.
9366 lpfc_sli4_iocb2wqe(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
,
9367 union lpfc_wqe128
*wqe
)
9369 uint32_t xmit_len
= 0, total_len
= 0;
9373 uint8_t command_type
= ELS_COMMAND_NON_FIP
;
9376 uint16_t abrt_iotag
;
9377 struct lpfc_iocbq
*abrtiocbq
;
9378 struct ulp_bde64
*bpl
= NULL
;
9379 uint32_t els_id
= LPFC_ELS_ID_DEFAULT
;
9381 struct ulp_bde64 bde
;
9382 struct lpfc_nodelist
*ndlp
;
9386 fip
= phba
->hba_flag
& HBA_FIP_SUPPORT
;
9387 /* The fcp commands will set command type */
9388 if (iocbq
->iocb_flag
& LPFC_IO_FCP
)
9389 command_type
= FCP_COMMAND
;
9390 else if (fip
&& (iocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
))
9391 command_type
= ELS_COMMAND_FIP
;
9393 command_type
= ELS_COMMAND_NON_FIP
;
9395 if (phba
->fcp_embed_io
)
9396 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
9397 /* Some of the fields are in the right position already */
9398 memcpy(wqe
, &iocbq
->iocb
, sizeof(union lpfc_wqe
));
9399 /* The ct field has moved so reset */
9400 wqe
->generic
.wqe_com
.word7
= 0;
9401 wqe
->generic
.wqe_com
.word10
= 0;
9403 abort_tag
= (uint32_t) iocbq
->iotag
;
9404 xritag
= iocbq
->sli4_xritag
;
9405 /* words0-2 bpl convert bde */
9406 if (iocbq
->iocb
.un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BLP_64
) {
9407 numBdes
= iocbq
->iocb
.un
.genreq64
.bdl
.bdeSize
/
9408 sizeof(struct ulp_bde64
);
9409 bpl
= (struct ulp_bde64
*)
9410 ((struct lpfc_dmabuf
*)iocbq
->context3
)->virt
;
9414 /* Should already be byte swapped. */
9415 wqe
->generic
.bde
.addrHigh
= le32_to_cpu(bpl
->addrHigh
);
9416 wqe
->generic
.bde
.addrLow
= le32_to_cpu(bpl
->addrLow
);
9417 /* swap the size field back to the cpu so we
9418 * can assign it to the sgl.
9420 wqe
->generic
.bde
.tus
.w
= le32_to_cpu(bpl
->tus
.w
);
9421 xmit_len
= wqe
->generic
.bde
.tus
.f
.bdeSize
;
9423 for (i
= 0; i
< numBdes
; i
++) {
9424 bde
.tus
.w
= le32_to_cpu(bpl
[i
].tus
.w
);
9425 total_len
+= bde
.tus
.f
.bdeSize
;
9428 xmit_len
= iocbq
->iocb
.un
.fcpi64
.bdl
.bdeSize
;
9430 iocbq
->iocb
.ulpIoTag
= iocbq
->iotag
;
9431 cmnd
= iocbq
->iocb
.ulpCommand
;
9433 switch (iocbq
->iocb
.ulpCommand
) {
9434 case CMD_ELS_REQUEST64_CR
:
9435 if (iocbq
->iocb_flag
& LPFC_IO_LIBDFC
)
9436 ndlp
= iocbq
->context_un
.ndlp
;
9438 ndlp
= (struct lpfc_nodelist
*)iocbq
->context1
;
9439 if (!iocbq
->iocb
.ulpLe
) {
9440 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9441 "2007 Only Limited Edition cmd Format"
9442 " supported 0x%x\n",
9443 iocbq
->iocb
.ulpCommand
);
9447 wqe
->els_req
.payload_len
= xmit_len
;
9448 /* Els_reguest64 has a TMO */
9449 bf_set(wqe_tmo
, &wqe
->els_req
.wqe_com
,
9450 iocbq
->iocb
.ulpTimeout
);
9451 /* Need a VF for word 4 set the vf bit*/
9452 bf_set(els_req64_vf
, &wqe
->els_req
, 0);
9453 /* And a VFID for word 12 */
9454 bf_set(els_req64_vfid
, &wqe
->els_req
, 0);
9455 ct
= ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
);
9456 bf_set(wqe_ctxt_tag
, &wqe
->els_req
.wqe_com
,
9457 iocbq
->iocb
.ulpContext
);
9458 bf_set(wqe_ct
, &wqe
->els_req
.wqe_com
, ct
);
9459 bf_set(wqe_pu
, &wqe
->els_req
.wqe_com
, 0);
9460 /* CCP CCPE PV PRI in word10 were set in the memcpy */
9461 if (command_type
== ELS_COMMAND_FIP
)
9462 els_id
= ((iocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
)
9463 >> LPFC_FIP_ELS_ID_SHIFT
);
9464 pcmd
= (uint32_t *) (((struct lpfc_dmabuf
*)
9465 iocbq
->context2
)->virt
);
9466 if_type
= bf_get(lpfc_sli_intf_if_type
,
9467 &phba
->sli4_hba
.sli_intf
);
9468 if (if_type
>= LPFC_SLI_INTF_IF_TYPE_2
) {
9469 if (pcmd
&& (*pcmd
== ELS_CMD_FLOGI
||
9470 *pcmd
== ELS_CMD_SCR
||
9471 *pcmd
== ELS_CMD_RSCN_XMT
||
9472 *pcmd
== ELS_CMD_FDISC
||
9473 *pcmd
== ELS_CMD_LOGO
||
9474 *pcmd
== ELS_CMD_PLOGI
)) {
9475 bf_set(els_req64_sp
, &wqe
->els_req
, 1);
9476 bf_set(els_req64_sid
, &wqe
->els_req
,
9477 iocbq
->vport
->fc_myDID
);
9478 if ((*pcmd
== ELS_CMD_FLOGI
) &&
9479 !(phba
->fc_topology
==
9480 LPFC_TOPOLOGY_LOOP
))
9481 bf_set(els_req64_sid
, &wqe
->els_req
, 0);
9482 bf_set(wqe_ct
, &wqe
->els_req
.wqe_com
, 1);
9483 bf_set(wqe_ctxt_tag
, &wqe
->els_req
.wqe_com
,
9484 phba
->vpi_ids
[iocbq
->vport
->vpi
]);
9485 } else if (pcmd
&& iocbq
->context1
) {
9486 bf_set(wqe_ct
, &wqe
->els_req
.wqe_com
, 0);
9487 bf_set(wqe_ctxt_tag
, &wqe
->els_req
.wqe_com
,
9488 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
9491 bf_set(wqe_temp_rpi
, &wqe
->els_req
.wqe_com
,
9492 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
9493 bf_set(wqe_els_id
, &wqe
->els_req
.wqe_com
, els_id
);
9494 bf_set(wqe_dbde
, &wqe
->els_req
.wqe_com
, 1);
9495 bf_set(wqe_iod
, &wqe
->els_req
.wqe_com
, LPFC_WQE_IOD_READ
);
9496 bf_set(wqe_qosd
, &wqe
->els_req
.wqe_com
, 1);
9497 bf_set(wqe_lenloc
, &wqe
->els_req
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
9498 bf_set(wqe_ebde_cnt
, &wqe
->els_req
.wqe_com
, 0);
9499 wqe
->els_req
.max_response_payload_len
= total_len
- xmit_len
;
9501 case CMD_XMIT_SEQUENCE64_CX
:
9502 bf_set(wqe_ctxt_tag
, &wqe
->xmit_sequence
.wqe_com
,
9503 iocbq
->iocb
.un
.ulpWord
[3]);
9504 bf_set(wqe_rcvoxid
, &wqe
->xmit_sequence
.wqe_com
,
9505 iocbq
->iocb
.unsli3
.rcvsli3
.ox_id
);
9506 /* The entire sequence is transmitted for this IOCB */
9507 xmit_len
= total_len
;
9508 cmnd
= CMD_XMIT_SEQUENCE64_CR
;
9509 if (phba
->link_flag
& LS_LOOPBACK_MODE
)
9510 bf_set(wqe_xo
, &wqe
->xmit_sequence
.wge_ctl
, 1);
9512 case CMD_XMIT_SEQUENCE64_CR
:
9513 /* word3 iocb=io_tag32 wqe=reserved */
9514 wqe
->xmit_sequence
.rsvd3
= 0;
9515 /* word4 relative_offset memcpy */
9516 /* word5 r_ctl/df_ctl memcpy */
9517 bf_set(wqe_pu
, &wqe
->xmit_sequence
.wqe_com
, 0);
9518 bf_set(wqe_dbde
, &wqe
->xmit_sequence
.wqe_com
, 1);
9519 bf_set(wqe_iod
, &wqe
->xmit_sequence
.wqe_com
,
9520 LPFC_WQE_IOD_WRITE
);
9521 bf_set(wqe_lenloc
, &wqe
->xmit_sequence
.wqe_com
,
9522 LPFC_WQE_LENLOC_WORD12
);
9523 bf_set(wqe_ebde_cnt
, &wqe
->xmit_sequence
.wqe_com
, 0);
9524 wqe
->xmit_sequence
.xmit_len
= xmit_len
;
9525 command_type
= OTHER_COMMAND
;
9527 case CMD_XMIT_BCAST64_CN
:
9528 /* word3 iocb=iotag32 wqe=seq_payload_len */
9529 wqe
->xmit_bcast64
.seq_payload_len
= xmit_len
;
9530 /* word4 iocb=rsvd wqe=rsvd */
9531 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9532 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9533 bf_set(wqe_ct
, &wqe
->xmit_bcast64
.wqe_com
,
9534 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
9535 bf_set(wqe_dbde
, &wqe
->xmit_bcast64
.wqe_com
, 1);
9536 bf_set(wqe_iod
, &wqe
->xmit_bcast64
.wqe_com
, LPFC_WQE_IOD_WRITE
);
9537 bf_set(wqe_lenloc
, &wqe
->xmit_bcast64
.wqe_com
,
9538 LPFC_WQE_LENLOC_WORD3
);
9539 bf_set(wqe_ebde_cnt
, &wqe
->xmit_bcast64
.wqe_com
, 0);
9541 case CMD_FCP_IWRITE64_CR
:
9542 command_type
= FCP_COMMAND_DATA_OUT
;
9543 /* word3 iocb=iotag wqe=payload_offset_len */
9544 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9545 bf_set(payload_offset_len
, &wqe
->fcp_iwrite
,
9546 xmit_len
+ sizeof(struct fcp_rsp
));
9547 bf_set(cmd_buff_len
, &wqe
->fcp_iwrite
,
9549 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9550 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9551 bf_set(wqe_erp
, &wqe
->fcp_iwrite
.wqe_com
,
9552 iocbq
->iocb
.ulpFCP2Rcvy
);
9553 bf_set(wqe_lnk
, &wqe
->fcp_iwrite
.wqe_com
, iocbq
->iocb
.ulpXS
);
9554 /* Always open the exchange */
9555 bf_set(wqe_iod
, &wqe
->fcp_iwrite
.wqe_com
, LPFC_WQE_IOD_WRITE
);
9556 bf_set(wqe_lenloc
, &wqe
->fcp_iwrite
.wqe_com
,
9557 LPFC_WQE_LENLOC_WORD4
);
9558 bf_set(wqe_pu
, &wqe
->fcp_iwrite
.wqe_com
, iocbq
->iocb
.ulpPU
);
9559 bf_set(wqe_dbde
, &wqe
->fcp_iwrite
.wqe_com
, 1);
9560 if (iocbq
->iocb_flag
& LPFC_IO_OAS
) {
9561 bf_set(wqe_oas
, &wqe
->fcp_iwrite
.wqe_com
, 1);
9562 bf_set(wqe_ccpe
, &wqe
->fcp_iwrite
.wqe_com
, 1);
9563 if (iocbq
->priority
) {
9564 bf_set(wqe_ccp
, &wqe
->fcp_iwrite
.wqe_com
,
9565 (iocbq
->priority
<< 1));
9567 bf_set(wqe_ccp
, &wqe
->fcp_iwrite
.wqe_com
,
9568 (phba
->cfg_XLanePriority
<< 1));
9571 /* Note, word 10 is already initialized to 0 */
9573 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9574 if (phba
->cfg_enable_pbde
)
9575 bf_set(wqe_pbde
, &wqe
->fcp_iwrite
.wqe_com
, 1);
9577 bf_set(wqe_pbde
, &wqe
->fcp_iwrite
.wqe_com
, 0);
9579 if (phba
->fcp_embed_io
) {
9580 struct lpfc_io_buf
*lpfc_cmd
;
9581 struct sli4_sge
*sgl
;
9582 struct fcp_cmnd
*fcp_cmnd
;
9585 /* 128 byte wqe support here */
9587 lpfc_cmd
= iocbq
->context1
;
9588 sgl
= (struct sli4_sge
*)lpfc_cmd
->dma_sgl
;
9589 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
9591 /* Word 0-2 - FCP_CMND */
9592 wqe
->generic
.bde
.tus
.f
.bdeFlags
=
9593 BUFF_TYPE_BDE_IMMED
;
9594 wqe
->generic
.bde
.tus
.f
.bdeSize
= sgl
->sge_len
;
9595 wqe
->generic
.bde
.addrHigh
= 0;
9596 wqe
->generic
.bde
.addrLow
= 88; /* Word 22 */
9598 bf_set(wqe_wqes
, &wqe
->fcp_iwrite
.wqe_com
, 1);
9599 bf_set(wqe_dbde
, &wqe
->fcp_iwrite
.wqe_com
, 0);
9601 /* Word 22-29 FCP CMND Payload */
9602 ptr
= &wqe
->words
[22];
9603 memcpy(ptr
, fcp_cmnd
, sizeof(struct fcp_cmnd
));
9606 case CMD_FCP_IREAD64_CR
:
9607 /* word3 iocb=iotag wqe=payload_offset_len */
9608 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9609 bf_set(payload_offset_len
, &wqe
->fcp_iread
,
9610 xmit_len
+ sizeof(struct fcp_rsp
));
9611 bf_set(cmd_buff_len
, &wqe
->fcp_iread
,
9613 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9614 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9615 bf_set(wqe_erp
, &wqe
->fcp_iread
.wqe_com
,
9616 iocbq
->iocb
.ulpFCP2Rcvy
);
9617 bf_set(wqe_lnk
, &wqe
->fcp_iread
.wqe_com
, iocbq
->iocb
.ulpXS
);
9618 /* Always open the exchange */
9619 bf_set(wqe_iod
, &wqe
->fcp_iread
.wqe_com
, LPFC_WQE_IOD_READ
);
9620 bf_set(wqe_lenloc
, &wqe
->fcp_iread
.wqe_com
,
9621 LPFC_WQE_LENLOC_WORD4
);
9622 bf_set(wqe_pu
, &wqe
->fcp_iread
.wqe_com
, iocbq
->iocb
.ulpPU
);
9623 bf_set(wqe_dbde
, &wqe
->fcp_iread
.wqe_com
, 1);
9624 if (iocbq
->iocb_flag
& LPFC_IO_OAS
) {
9625 bf_set(wqe_oas
, &wqe
->fcp_iread
.wqe_com
, 1);
9626 bf_set(wqe_ccpe
, &wqe
->fcp_iread
.wqe_com
, 1);
9627 if (iocbq
->priority
) {
9628 bf_set(wqe_ccp
, &wqe
->fcp_iread
.wqe_com
,
9629 (iocbq
->priority
<< 1));
9631 bf_set(wqe_ccp
, &wqe
->fcp_iread
.wqe_com
,
9632 (phba
->cfg_XLanePriority
<< 1));
9635 /* Note, word 10 is already initialized to 0 */
9637 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9638 if (phba
->cfg_enable_pbde
)
9639 bf_set(wqe_pbde
, &wqe
->fcp_iread
.wqe_com
, 1);
9641 bf_set(wqe_pbde
, &wqe
->fcp_iread
.wqe_com
, 0);
9643 if (phba
->fcp_embed_io
) {
9644 struct lpfc_io_buf
*lpfc_cmd
;
9645 struct sli4_sge
*sgl
;
9646 struct fcp_cmnd
*fcp_cmnd
;
9649 /* 128 byte wqe support here */
9651 lpfc_cmd
= iocbq
->context1
;
9652 sgl
= (struct sli4_sge
*)lpfc_cmd
->dma_sgl
;
9653 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
9655 /* Word 0-2 - FCP_CMND */
9656 wqe
->generic
.bde
.tus
.f
.bdeFlags
=
9657 BUFF_TYPE_BDE_IMMED
;
9658 wqe
->generic
.bde
.tus
.f
.bdeSize
= sgl
->sge_len
;
9659 wqe
->generic
.bde
.addrHigh
= 0;
9660 wqe
->generic
.bde
.addrLow
= 88; /* Word 22 */
9662 bf_set(wqe_wqes
, &wqe
->fcp_iread
.wqe_com
, 1);
9663 bf_set(wqe_dbde
, &wqe
->fcp_iread
.wqe_com
, 0);
9665 /* Word 22-29 FCP CMND Payload */
9666 ptr
= &wqe
->words
[22];
9667 memcpy(ptr
, fcp_cmnd
, sizeof(struct fcp_cmnd
));
9670 case CMD_FCP_ICMND64_CR
:
9671 /* word3 iocb=iotag wqe=payload_offset_len */
9672 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9673 bf_set(payload_offset_len
, &wqe
->fcp_icmd
,
9674 xmit_len
+ sizeof(struct fcp_rsp
));
9675 bf_set(cmd_buff_len
, &wqe
->fcp_icmd
,
9677 /* word3 iocb=IO_TAG wqe=reserved */
9678 bf_set(wqe_pu
, &wqe
->fcp_icmd
.wqe_com
, 0);
9679 /* Always open the exchange */
9680 bf_set(wqe_dbde
, &wqe
->fcp_icmd
.wqe_com
, 1);
9681 bf_set(wqe_iod
, &wqe
->fcp_icmd
.wqe_com
, LPFC_WQE_IOD_WRITE
);
9682 bf_set(wqe_qosd
, &wqe
->fcp_icmd
.wqe_com
, 1);
9683 bf_set(wqe_lenloc
, &wqe
->fcp_icmd
.wqe_com
,
9684 LPFC_WQE_LENLOC_NONE
);
9685 bf_set(wqe_erp
, &wqe
->fcp_icmd
.wqe_com
,
9686 iocbq
->iocb
.ulpFCP2Rcvy
);
9687 if (iocbq
->iocb_flag
& LPFC_IO_OAS
) {
9688 bf_set(wqe_oas
, &wqe
->fcp_icmd
.wqe_com
, 1);
9689 bf_set(wqe_ccpe
, &wqe
->fcp_icmd
.wqe_com
, 1);
9690 if (iocbq
->priority
) {
9691 bf_set(wqe_ccp
, &wqe
->fcp_icmd
.wqe_com
,
9692 (iocbq
->priority
<< 1));
9694 bf_set(wqe_ccp
, &wqe
->fcp_icmd
.wqe_com
,
9695 (phba
->cfg_XLanePriority
<< 1));
9698 /* Note, word 10 is already initialized to 0 */
9700 if (phba
->fcp_embed_io
) {
9701 struct lpfc_io_buf
*lpfc_cmd
;
9702 struct sli4_sge
*sgl
;
9703 struct fcp_cmnd
*fcp_cmnd
;
9706 /* 128 byte wqe support here */
9708 lpfc_cmd
= iocbq
->context1
;
9709 sgl
= (struct sli4_sge
*)lpfc_cmd
->dma_sgl
;
9710 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
9712 /* Word 0-2 - FCP_CMND */
9713 wqe
->generic
.bde
.tus
.f
.bdeFlags
=
9714 BUFF_TYPE_BDE_IMMED
;
9715 wqe
->generic
.bde
.tus
.f
.bdeSize
= sgl
->sge_len
;
9716 wqe
->generic
.bde
.addrHigh
= 0;
9717 wqe
->generic
.bde
.addrLow
= 88; /* Word 22 */
9719 bf_set(wqe_wqes
, &wqe
->fcp_icmd
.wqe_com
, 1);
9720 bf_set(wqe_dbde
, &wqe
->fcp_icmd
.wqe_com
, 0);
9722 /* Word 22-29 FCP CMND Payload */
9723 ptr
= &wqe
->words
[22];
9724 memcpy(ptr
, fcp_cmnd
, sizeof(struct fcp_cmnd
));
9727 case CMD_GEN_REQUEST64_CR
:
9728 /* For this command calculate the xmit length of the
9732 numBdes
= iocbq
->iocb
.un
.genreq64
.bdl
.bdeSize
/
9733 sizeof(struct ulp_bde64
);
9734 for (i
= 0; i
< numBdes
; i
++) {
9735 bde
.tus
.w
= le32_to_cpu(bpl
[i
].tus
.w
);
9736 if (bde
.tus
.f
.bdeFlags
!= BUFF_TYPE_BDE_64
)
9738 xmit_len
+= bde
.tus
.f
.bdeSize
;
9740 /* word3 iocb=IO_TAG wqe=request_payload_len */
9741 wqe
->gen_req
.request_payload_len
= xmit_len
;
9742 /* word4 iocb=parameter wqe=relative_offset memcpy */
9743 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
9744 /* word6 context tag copied in memcpy */
9745 if (iocbq
->iocb
.ulpCt_h
|| iocbq
->iocb
.ulpCt_l
) {
9746 ct
= ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
);
9747 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9748 "2015 Invalid CT %x command 0x%x\n",
9749 ct
, iocbq
->iocb
.ulpCommand
);
9752 bf_set(wqe_ct
, &wqe
->gen_req
.wqe_com
, 0);
9753 bf_set(wqe_tmo
, &wqe
->gen_req
.wqe_com
, iocbq
->iocb
.ulpTimeout
);
9754 bf_set(wqe_pu
, &wqe
->gen_req
.wqe_com
, iocbq
->iocb
.ulpPU
);
9755 bf_set(wqe_dbde
, &wqe
->gen_req
.wqe_com
, 1);
9756 bf_set(wqe_iod
, &wqe
->gen_req
.wqe_com
, LPFC_WQE_IOD_READ
);
9757 bf_set(wqe_qosd
, &wqe
->gen_req
.wqe_com
, 1);
9758 bf_set(wqe_lenloc
, &wqe
->gen_req
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
9759 bf_set(wqe_ebde_cnt
, &wqe
->gen_req
.wqe_com
, 0);
9760 wqe
->gen_req
.max_response_payload_len
= total_len
- xmit_len
;
9761 command_type
= OTHER_COMMAND
;
9763 case CMD_XMIT_ELS_RSP64_CX
:
9764 ndlp
= (struct lpfc_nodelist
*)iocbq
->context1
;
9765 /* words0-2 BDE memcpy */
9766 /* word3 iocb=iotag32 wqe=response_payload_len */
9767 wqe
->xmit_els_rsp
.response_payload_len
= xmit_len
;
9769 wqe
->xmit_els_rsp
.word4
= 0;
9770 /* word5 iocb=rsvd wge=did */
9771 bf_set(wqe_els_did
, &wqe
->xmit_els_rsp
.wqe_dest
,
9772 iocbq
->iocb
.un
.xseq64
.xmit_els_remoteID
);
9774 if_type
= bf_get(lpfc_sli_intf_if_type
,
9775 &phba
->sli4_hba
.sli_intf
);
9776 if (if_type
>= LPFC_SLI_INTF_IF_TYPE_2
) {
9777 if (iocbq
->vport
->fc_flag
& FC_PT2PT
) {
9778 bf_set(els_rsp64_sp
, &wqe
->xmit_els_rsp
, 1);
9779 bf_set(els_rsp64_sid
, &wqe
->xmit_els_rsp
,
9780 iocbq
->vport
->fc_myDID
);
9781 if (iocbq
->vport
->fc_myDID
== Fabric_DID
) {
9783 &wqe
->xmit_els_rsp
.wqe_dest
, 0);
9787 bf_set(wqe_ct
, &wqe
->xmit_els_rsp
.wqe_com
,
9788 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
9789 bf_set(wqe_pu
, &wqe
->xmit_els_rsp
.wqe_com
, iocbq
->iocb
.ulpPU
);
9790 bf_set(wqe_rcvoxid
, &wqe
->xmit_els_rsp
.wqe_com
,
9791 iocbq
->iocb
.unsli3
.rcvsli3
.ox_id
);
9792 if (!iocbq
->iocb
.ulpCt_h
&& iocbq
->iocb
.ulpCt_l
)
9793 bf_set(wqe_ctxt_tag
, &wqe
->xmit_els_rsp
.wqe_com
,
9794 phba
->vpi_ids
[iocbq
->vport
->vpi
]);
9795 bf_set(wqe_dbde
, &wqe
->xmit_els_rsp
.wqe_com
, 1);
9796 bf_set(wqe_iod
, &wqe
->xmit_els_rsp
.wqe_com
, LPFC_WQE_IOD_WRITE
);
9797 bf_set(wqe_qosd
, &wqe
->xmit_els_rsp
.wqe_com
, 1);
9798 bf_set(wqe_lenloc
, &wqe
->xmit_els_rsp
.wqe_com
,
9799 LPFC_WQE_LENLOC_WORD3
);
9800 bf_set(wqe_ebde_cnt
, &wqe
->xmit_els_rsp
.wqe_com
, 0);
9801 bf_set(wqe_rsp_temp_rpi
, &wqe
->xmit_els_rsp
,
9802 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
9803 pcmd
= (uint32_t *) (((struct lpfc_dmabuf
*)
9804 iocbq
->context2
)->virt
);
9805 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
9806 bf_set(els_rsp64_sp
, &wqe
->xmit_els_rsp
, 1);
9807 bf_set(els_rsp64_sid
, &wqe
->xmit_els_rsp
,
9808 iocbq
->vport
->fc_myDID
);
9809 bf_set(wqe_ct
, &wqe
->xmit_els_rsp
.wqe_com
, 1);
9810 bf_set(wqe_ctxt_tag
, &wqe
->xmit_els_rsp
.wqe_com
,
9811 phba
->vpi_ids
[phba
->pport
->vpi
]);
9813 command_type
= OTHER_COMMAND
;
9815 case CMD_CLOSE_XRI_CN
:
9816 case CMD_ABORT_XRI_CN
:
9817 case CMD_ABORT_XRI_CX
:
9818 /* words 0-2 memcpy should be 0 rserved */
9819 /* port will send abts */
9820 abrt_iotag
= iocbq
->iocb
.un
.acxri
.abortContextTag
;
9821 if (abrt_iotag
!= 0 && abrt_iotag
<= phba
->sli
.last_iotag
) {
9822 abrtiocbq
= phba
->sli
.iocbq_lookup
[abrt_iotag
];
9823 fip
= abrtiocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
;
9827 if ((iocbq
->iocb
.ulpCommand
== CMD_CLOSE_XRI_CN
) || fip
)
9829 * The link is down, or the command was ELS_FIP
9830 * so the fw does not need to send abts
9833 bf_set(abort_cmd_ia
, &wqe
->abort_cmd
, 1);
9835 bf_set(abort_cmd_ia
, &wqe
->abort_cmd
, 0);
9836 bf_set(abort_cmd_criteria
, &wqe
->abort_cmd
, T_XRI_TAG
);
9837 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
9838 wqe
->abort_cmd
.rsrvd5
= 0;
9839 bf_set(wqe_ct
, &wqe
->abort_cmd
.wqe_com
,
9840 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
9841 abort_tag
= iocbq
->iocb
.un
.acxri
.abortIoTag
;
9843 * The abort handler will send us CMD_ABORT_XRI_CN or
9844 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
9846 bf_set(wqe_cmnd
, &wqe
->abort_cmd
.wqe_com
, CMD_ABORT_XRI_CX
);
9847 bf_set(wqe_qosd
, &wqe
->abort_cmd
.wqe_com
, 1);
9848 bf_set(wqe_lenloc
, &wqe
->abort_cmd
.wqe_com
,
9849 LPFC_WQE_LENLOC_NONE
);
9850 cmnd
= CMD_ABORT_XRI_CX
;
9851 command_type
= OTHER_COMMAND
;
9854 case CMD_XMIT_BLS_RSP64_CX
:
9855 ndlp
= (struct lpfc_nodelist
*)iocbq
->context1
;
9856 /* As BLS ABTS RSP WQE is very different from other WQEs,
9857 * we re-construct this WQE here based on information in
9858 * iocbq from scratch.
9860 memset(wqe
, 0, sizeof(*wqe
));
9861 /* OX_ID is invariable to who sent ABTS to CT exchange */
9862 bf_set(xmit_bls_rsp64_oxid
, &wqe
->xmit_bls_rsp
,
9863 bf_get(lpfc_abts_oxid
, &iocbq
->iocb
.un
.bls_rsp
));
9864 if (bf_get(lpfc_abts_orig
, &iocbq
->iocb
.un
.bls_rsp
) ==
9865 LPFC_ABTS_UNSOL_INT
) {
9866 /* ABTS sent by initiator to CT exchange, the
9867 * RX_ID field will be filled with the newly
9868 * allocated responder XRI.
9870 bf_set(xmit_bls_rsp64_rxid
, &wqe
->xmit_bls_rsp
,
9871 iocbq
->sli4_xritag
);
9873 /* ABTS sent by responder to CT exchange, the
9874 * RX_ID field will be filled with the responder
9877 bf_set(xmit_bls_rsp64_rxid
, &wqe
->xmit_bls_rsp
,
9878 bf_get(lpfc_abts_rxid
, &iocbq
->iocb
.un
.bls_rsp
));
9880 bf_set(xmit_bls_rsp64_seqcnthi
, &wqe
->xmit_bls_rsp
, 0xffff);
9881 bf_set(wqe_xmit_bls_pt
, &wqe
->xmit_bls_rsp
.wqe_dest
, 0x1);
9884 bf_set(wqe_els_did
, &wqe
->xmit_bls_rsp
.wqe_dest
,
9886 bf_set(xmit_bls_rsp64_temprpi
, &wqe
->xmit_bls_rsp
,
9887 iocbq
->iocb
.ulpContext
);
9888 bf_set(wqe_ct
, &wqe
->xmit_bls_rsp
.wqe_com
, 1);
9889 bf_set(wqe_ctxt_tag
, &wqe
->xmit_bls_rsp
.wqe_com
,
9890 phba
->vpi_ids
[phba
->pport
->vpi
]);
9891 bf_set(wqe_qosd
, &wqe
->xmit_bls_rsp
.wqe_com
, 1);
9892 bf_set(wqe_lenloc
, &wqe
->xmit_bls_rsp
.wqe_com
,
9893 LPFC_WQE_LENLOC_NONE
);
9894 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
9895 command_type
= OTHER_COMMAND
;
9896 if (iocbq
->iocb
.un
.xseq64
.w5
.hcsw
.Rctl
== FC_RCTL_BA_RJT
) {
9897 bf_set(xmit_bls_rsp64_rjt_vspec
, &wqe
->xmit_bls_rsp
,
9898 bf_get(lpfc_vndr_code
, &iocbq
->iocb
.un
.bls_rsp
));
9899 bf_set(xmit_bls_rsp64_rjt_expc
, &wqe
->xmit_bls_rsp
,
9900 bf_get(lpfc_rsn_expln
, &iocbq
->iocb
.un
.bls_rsp
));
9901 bf_set(xmit_bls_rsp64_rjt_rsnc
, &wqe
->xmit_bls_rsp
,
9902 bf_get(lpfc_rsn_code
, &iocbq
->iocb
.un
.bls_rsp
));
9906 case CMD_SEND_FRAME
:
9907 bf_set(wqe_cmnd
, &wqe
->generic
.wqe_com
, CMD_SEND_FRAME
);
9908 bf_set(wqe_sof
, &wqe
->generic
.wqe_com
, 0x2E); /* SOF byte */
9909 bf_set(wqe_eof
, &wqe
->generic
.wqe_com
, 0x41); /* EOF byte */
9910 bf_set(wqe_lenloc
, &wqe
->generic
.wqe_com
, 1);
9911 bf_set(wqe_xbl
, &wqe
->generic
.wqe_com
, 1);
9912 bf_set(wqe_dbde
, &wqe
->generic
.wqe_com
, 1);
9913 bf_set(wqe_xc
, &wqe
->generic
.wqe_com
, 1);
9914 bf_set(wqe_cmd_type
, &wqe
->generic
.wqe_com
, 0xA);
9915 bf_set(wqe_cqid
, &wqe
->generic
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
9916 bf_set(wqe_xri_tag
, &wqe
->generic
.wqe_com
, xritag
);
9917 bf_set(wqe_reqtag
, &wqe
->generic
.wqe_com
, iocbq
->iotag
);
9919 case CMD_XRI_ABORTED_CX
:
9920 case CMD_CREATE_XRI_CR
: /* Do we expect to use this? */
9921 case CMD_IOCB_FCP_IBIDIR64_CR
: /* bidirectional xfer */
9922 case CMD_FCP_TSEND64_CX
: /* Target mode send xfer-ready */
9923 case CMD_FCP_TRSP64_CX
: /* Target mode rcv */
9924 case CMD_FCP_AUTO_TRSP_CX
: /* Auto target rsp */
9926 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9927 "2014 Invalid command 0x%x\n",
9928 iocbq
->iocb
.ulpCommand
);
9933 if (iocbq
->iocb_flag
& LPFC_IO_DIF_PASS
)
9934 bf_set(wqe_dif
, &wqe
->generic
.wqe_com
, LPFC_WQE_DIF_PASSTHRU
);
9935 else if (iocbq
->iocb_flag
& LPFC_IO_DIF_STRIP
)
9936 bf_set(wqe_dif
, &wqe
->generic
.wqe_com
, LPFC_WQE_DIF_STRIP
);
9937 else if (iocbq
->iocb_flag
& LPFC_IO_DIF_INSERT
)
9938 bf_set(wqe_dif
, &wqe
->generic
.wqe_com
, LPFC_WQE_DIF_INSERT
);
9939 iocbq
->iocb_flag
&= ~(LPFC_IO_DIF_PASS
| LPFC_IO_DIF_STRIP
|
9940 LPFC_IO_DIF_INSERT
);
9941 bf_set(wqe_xri_tag
, &wqe
->generic
.wqe_com
, xritag
);
9942 bf_set(wqe_reqtag
, &wqe
->generic
.wqe_com
, iocbq
->iotag
);
9943 wqe
->generic
.wqe_com
.abort_tag
= abort_tag
;
9944 bf_set(wqe_cmd_type
, &wqe
->generic
.wqe_com
, command_type
);
9945 bf_set(wqe_cmnd
, &wqe
->generic
.wqe_com
, cmnd
);
9946 bf_set(wqe_class
, &wqe
->generic
.wqe_com
, iocbq
->iocb
.ulpClass
);
9947 bf_set(wqe_cqid
, &wqe
->generic
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
9952 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
9953 * @phba: Pointer to HBA context object.
9954 * @ring_number: SLI ring number to issue iocb on.
9955 * @piocb: Pointer to command iocb.
9956 * @flag: Flag indicating if this command can be put into txq.
9958 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
9959 * an iocb command to an HBA with SLI-4 interface spec.
9961 * This function is called with ringlock held. The function will return success
9962 * after it successfully submit the iocb to firmware or after adding to the
9966 __lpfc_sli_issue_iocb_s4(struct lpfc_hba
*phba
, uint32_t ring_number
,
9967 struct lpfc_iocbq
*piocb
, uint32_t flag
)
9969 struct lpfc_sglq
*sglq
;
9970 union lpfc_wqe128 wqe
;
9971 struct lpfc_queue
*wq
;
9972 struct lpfc_sli_ring
*pring
;
9975 if ((piocb
->iocb_flag
& LPFC_IO_FCP
) ||
9976 (piocb
->iocb_flag
& LPFC_USE_FCPWQIDX
)) {
9977 wq
= phba
->sli4_hba
.hdwq
[piocb
->hba_wqidx
].io_wq
;
9979 wq
= phba
->sli4_hba
.els_wq
;
9982 /* Get corresponding ring */
9986 * The WQE can be either 64 or 128 bytes,
9989 lockdep_assert_held(&pring
->ring_lock
);
9991 if (piocb
->sli4_xritag
== NO_XRI
) {
9992 if (piocb
->iocb
.ulpCommand
== CMD_ABORT_XRI_CN
||
9993 piocb
->iocb
.ulpCommand
== CMD_CLOSE_XRI_CN
)
9996 if (!list_empty(&pring
->txq
)) {
9997 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
9998 __lpfc_sli_ringtx_put(phba
,
10000 return IOCB_SUCCESS
;
10005 sglq
= __lpfc_sli_get_els_sglq(phba
, piocb
);
10007 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
10008 __lpfc_sli_ringtx_put(phba
,
10011 return IOCB_SUCCESS
;
10017 } else if (piocb
->iocb_flag
& LPFC_IO_FCP
)
10018 /* These IO's already have an XRI and a mapped sgl. */
10022 * This is a continuation of a commandi,(CX) so this
10023 * sglq is on the active list
10025 sglq
= __lpfc_get_active_sglq(phba
, piocb
->sli4_lxritag
);
10031 piocb
->sli4_lxritag
= sglq
->sli4_lxritag
;
10032 piocb
->sli4_xritag
= sglq
->sli4_xritag
;
10033 if (NO_XRI
== lpfc_sli4_bpl2sgl(phba
, piocb
, sglq
))
10037 if (lpfc_sli4_iocb2wqe(phba
, piocb
, &wqe
))
10040 if (lpfc_sli4_wq_put(wq
, &wqe
))
10042 lpfc_sli_ringtxcmpl_put(phba
, pring
, piocb
);
10048 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10050 * This routine wraps the actual lockless version for issusing IOCB function
10051 * pointer from the lpfc_hba struct.
10054 * IOCB_ERROR - Error
10055 * IOCB_SUCCESS - Success
10059 __lpfc_sli_issue_iocb(struct lpfc_hba
*phba
, uint32_t ring_number
,
10060 struct lpfc_iocbq
*piocb
, uint32_t flag
)
10062 return phba
->__lpfc_sli_issue_iocb(phba
, ring_number
, piocb
, flag
);
10066 * lpfc_sli_api_table_setup - Set up sli api function jump table
10067 * @phba: The hba struct for which this call is being executed.
10068 * @dev_grp: The HBA PCI-Device group number.
10070 * This routine sets up the SLI interface API function jump table in @phba
10072 * Returns: 0 - success, -ENODEV - failure.
10075 lpfc_sli_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
10079 case LPFC_PCI_DEV_LP
:
10080 phba
->__lpfc_sli_issue_iocb
= __lpfc_sli_issue_iocb_s3
;
10081 phba
->__lpfc_sli_release_iocbq
= __lpfc_sli_release_iocbq_s3
;
10083 case LPFC_PCI_DEV_OC
:
10084 phba
->__lpfc_sli_issue_iocb
= __lpfc_sli_issue_iocb_s4
;
10085 phba
->__lpfc_sli_release_iocbq
= __lpfc_sli_release_iocbq_s4
;
10088 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10089 "1419 Invalid HBA PCI-device group: 0x%x\n",
10094 phba
->lpfc_get_iocb_from_iocbq
= lpfc_get_iocb_from_iocbq
;
10099 * lpfc_sli4_calc_ring - Calculates which ring to use
10100 * @phba: Pointer to HBA context object.
10101 * @piocb: Pointer to command iocb.
10103 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10104 * hba_wqidx, thus we need to calculate the corresponding ring.
10105 * Since ABORTS must go on the same WQ of the command they are
10106 * aborting, we use command's hba_wqidx.
10108 struct lpfc_sli_ring
*
10109 lpfc_sli4_calc_ring(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocb
)
10111 struct lpfc_io_buf
*lpfc_cmd
;
10113 if (piocb
->iocb_flag
& (LPFC_IO_FCP
| LPFC_USE_FCPWQIDX
)) {
10114 if (unlikely(!phba
->sli4_hba
.hdwq
))
10117 * for abort iocb hba_wqidx should already
10118 * be setup based on what work queue we used.
10120 if (!(piocb
->iocb_flag
& LPFC_USE_FCPWQIDX
)) {
10121 lpfc_cmd
= (struct lpfc_io_buf
*)piocb
->context1
;
10122 piocb
->hba_wqidx
= lpfc_cmd
->hdwq_no
;
10124 return phba
->sli4_hba
.hdwq
[piocb
->hba_wqidx
].io_wq
->pring
;
10126 if (unlikely(!phba
->sli4_hba
.els_wq
))
10128 piocb
->hba_wqidx
= 0;
10129 return phba
->sli4_hba
.els_wq
->pring
;
10134 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10135 * @phba: Pointer to HBA context object.
10136 * @pring: Pointer to driver SLI ring object.
10137 * @piocb: Pointer to command iocb.
10138 * @flag: Flag indicating if this command can be put into txq.
10140 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10141 * function. This function gets the hbalock and calls
10142 * __lpfc_sli_issue_iocb function and will return the error returned
10143 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10144 * functions which do not hold hbalock.
10147 lpfc_sli_issue_iocb(struct lpfc_hba
*phba
, uint32_t ring_number
,
10148 struct lpfc_iocbq
*piocb
, uint32_t flag
)
10150 struct lpfc_sli_ring
*pring
;
10151 struct lpfc_queue
*eq
;
10152 unsigned long iflags
;
10155 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
10156 eq
= phba
->sli4_hba
.hdwq
[piocb
->hba_wqidx
].hba_eq
;
10158 pring
= lpfc_sli4_calc_ring(phba
, piocb
);
10159 if (unlikely(pring
== NULL
))
10162 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
10163 rc
= __lpfc_sli_issue_iocb(phba
, ring_number
, piocb
, flag
);
10164 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
10166 lpfc_sli4_poll_eq(eq
, LPFC_POLL_FASTPATH
);
10168 /* For now, SLI2/3 will still use hbalock */
10169 spin_lock_irqsave(&phba
->hbalock
, iflags
);
10170 rc
= __lpfc_sli_issue_iocb(phba
, ring_number
, piocb
, flag
);
10171 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
10177 * lpfc_extra_ring_setup - Extra ring setup function
10178 * @phba: Pointer to HBA context object.
10180 * This function is called while driver attaches with the
10181 * HBA to setup the extra ring. The extra ring is used
10182 * only when driver needs to support target mode functionality
10183 * or IP over FC functionalities.
10185 * This function is called with no lock held. SLI3 only.
10188 lpfc_extra_ring_setup( struct lpfc_hba
*phba
)
10190 struct lpfc_sli
*psli
;
10191 struct lpfc_sli_ring
*pring
;
10195 /* Adjust cmd/rsp ring iocb entries more evenly */
10197 /* Take some away from the FCP ring */
10198 pring
= &psli
->sli3_ring
[LPFC_FCP_RING
];
10199 pring
->sli
.sli3
.numCiocb
-= SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
10200 pring
->sli
.sli3
.numRiocb
-= SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
10201 pring
->sli
.sli3
.numCiocb
-= SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
10202 pring
->sli
.sli3
.numRiocb
-= SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
10204 /* and give them to the extra ring */
10205 pring
= &psli
->sli3_ring
[LPFC_EXTRA_RING
];
10207 pring
->sli
.sli3
.numCiocb
+= SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
10208 pring
->sli
.sli3
.numRiocb
+= SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
10209 pring
->sli
.sli3
.numCiocb
+= SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
10210 pring
->sli
.sli3
.numRiocb
+= SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
10212 /* Setup default profile for this ring */
10213 pring
->iotag_max
= 4096;
10214 pring
->num_mask
= 1;
10215 pring
->prt
[0].profile
= 0; /* Mask 0 */
10216 pring
->prt
[0].rctl
= phba
->cfg_multi_ring_rctl
;
10217 pring
->prt
[0].type
= phba
->cfg_multi_ring_type
;
10218 pring
->prt
[0].lpfc_sli_rcv_unsol_event
= NULL
;
10222 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10223 * @phba: Pointer to HBA context object.
10224 * @iocbq: Pointer to iocb object.
10226 * The async_event handler calls this routine when it receives
10227 * an ASYNC_STATUS_CN event from the port. The port generates
10228 * this event when an Abort Sequence request to an rport fails
10229 * twice in succession. The abort could be originated by the
10230 * driver or by the port. The ABTS could have been for an ELS
10231 * or FCP IO. The port only generates this event when an ABTS
10232 * fails to complete after one retry.
10235 lpfc_sli_abts_err_handler(struct lpfc_hba
*phba
,
10236 struct lpfc_iocbq
*iocbq
)
10238 struct lpfc_nodelist
*ndlp
= NULL
;
10239 uint16_t rpi
= 0, vpi
= 0;
10240 struct lpfc_vport
*vport
= NULL
;
10242 /* The rpi in the ulpContext is vport-sensitive. */
10243 vpi
= iocbq
->iocb
.un
.asyncstat
.sub_ctxt_tag
;
10244 rpi
= iocbq
->iocb
.ulpContext
;
10246 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
10247 "3092 Port generated ABTS async event "
10248 "on vpi %d rpi %d status 0x%x\n",
10249 vpi
, rpi
, iocbq
->iocb
.ulpStatus
);
10251 vport
= lpfc_find_vport_by_vpid(phba
, vpi
);
10254 ndlp
= lpfc_findnode_rpi(vport
, rpi
);
10255 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
10258 if (iocbq
->iocb
.ulpStatus
== IOSTAT_LOCAL_REJECT
)
10259 lpfc_sli_abts_recover_port(vport
, ndlp
);
10263 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
10264 "3095 Event Context not found, no "
10265 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10266 iocbq
->iocb
.ulpContext
, iocbq
->iocb
.ulpStatus
,
10270 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10271 * @phba: pointer to HBA context object.
10272 * @ndlp: nodelist pointer for the impacted rport.
10273 * @axri: pointer to the wcqe containing the failed exchange.
10275 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10276 * port. The port generates this event when an abort exchange request to an
10277 * rport fails twice in succession with no reply. The abort could be originated
10278 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10281 lpfc_sli4_abts_err_handler(struct lpfc_hba
*phba
,
10282 struct lpfc_nodelist
*ndlp
,
10283 struct sli4_wcqe_xri_aborted
*axri
)
10285 struct lpfc_vport
*vport
;
10286 uint32_t ext_status
= 0;
10288 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
10289 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
10290 "3115 Node Context not found, driver "
10291 "ignoring abts err event\n");
10295 vport
= ndlp
->vport
;
10296 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
10297 "3116 Port generated FCP XRI ABORT event on "
10298 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10299 ndlp
->vport
->vpi
, phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
],
10300 bf_get(lpfc_wcqe_xa_xri
, axri
),
10301 bf_get(lpfc_wcqe_xa_status
, axri
),
10305 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10306 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10307 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10309 ext_status
= axri
->parameter
& IOERR_PARAM_MASK
;
10310 if ((bf_get(lpfc_wcqe_xa_status
, axri
) == IOSTAT_LOCAL_REJECT
) &&
10311 ((ext_status
== IOERR_SEQUENCE_TIMEOUT
) || (ext_status
== 0)))
10312 lpfc_sli_abts_recover_port(vport
, ndlp
);
10316 * lpfc_sli_async_event_handler - ASYNC iocb handler function
10317 * @phba: Pointer to HBA context object.
10318 * @pring: Pointer to driver SLI ring object.
10319 * @iocbq: Pointer to iocb object.
10321 * This function is called by the slow ring event handler
10322 * function when there is an ASYNC event iocb in the ring.
10323 * This function is called with no lock held.
10324 * Currently this function handles only temperature related
10325 * ASYNC events. The function decodes the temperature sensor
10326 * event message and posts events for the management applications.
10329 lpfc_sli_async_event_handler(struct lpfc_hba
* phba
,
10330 struct lpfc_sli_ring
* pring
, struct lpfc_iocbq
* iocbq
)
10334 struct temp_event temp_event_data
;
10335 struct Scsi_Host
*shost
;
10338 icmd
= &iocbq
->iocb
;
10339 evt_code
= icmd
->un
.asyncstat
.evt_code
;
10341 switch (evt_code
) {
10342 case ASYNC_TEMP_WARN
:
10343 case ASYNC_TEMP_SAFE
:
10344 temp_event_data
.data
= (uint32_t) icmd
->ulpContext
;
10345 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
10346 if (evt_code
== ASYNC_TEMP_WARN
) {
10347 temp_event_data
.event_code
= LPFC_THRESHOLD_TEMP
;
10348 lpfc_printf_log(phba
, KERN_ERR
, LOG_TEMP
,
10349 "0347 Adapter is very hot, please take "
10350 "corrective action. temperature : %d Celsius\n",
10351 (uint32_t) icmd
->ulpContext
);
10353 temp_event_data
.event_code
= LPFC_NORMAL_TEMP
;
10354 lpfc_printf_log(phba
, KERN_ERR
, LOG_TEMP
,
10355 "0340 Adapter temperature is OK now. "
10356 "temperature : %d Celsius\n",
10357 (uint32_t) icmd
->ulpContext
);
10360 /* Send temperature change event to applications */
10361 shost
= lpfc_shost_from_vport(phba
->pport
);
10362 fc_host_post_vendor_event(shost
, fc_get_event_number(),
10363 sizeof(temp_event_data
), (char *) &temp_event_data
,
10364 LPFC_NL_VENDOR_ID
);
10366 case ASYNC_STATUS_CN
:
10367 lpfc_sli_abts_err_handler(phba
, iocbq
);
10370 iocb_w
= (uint32_t *) icmd
;
10371 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10372 "0346 Ring %d handler: unexpected ASYNC_STATUS"
10374 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10375 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10376 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10377 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10378 pring
->ringno
, icmd
->un
.asyncstat
.evt_code
,
10379 iocb_w
[0], iocb_w
[1], iocb_w
[2], iocb_w
[3],
10380 iocb_w
[4], iocb_w
[5], iocb_w
[6], iocb_w
[7],
10381 iocb_w
[8], iocb_w
[9], iocb_w
[10], iocb_w
[11],
10382 iocb_w
[12], iocb_w
[13], iocb_w
[14], iocb_w
[15]);
10390 * lpfc_sli4_setup - SLI ring setup function
10391 * @phba: Pointer to HBA context object.
10393 * lpfc_sli_setup sets up rings of the SLI interface with
10394 * number of iocbs per ring and iotags. This function is
10395 * called while driver attach to the HBA and before the
10396 * interrupts are enabled. So there is no need for locking.
10398 * This function always returns 0.
10401 lpfc_sli4_setup(struct lpfc_hba
*phba
)
10403 struct lpfc_sli_ring
*pring
;
10405 pring
= phba
->sli4_hba
.els_wq
->pring
;
10406 pring
->num_mask
= LPFC_MAX_RING_MASK
;
10407 pring
->prt
[0].profile
= 0; /* Mask 0 */
10408 pring
->prt
[0].rctl
= FC_RCTL_ELS_REQ
;
10409 pring
->prt
[0].type
= FC_TYPE_ELS
;
10410 pring
->prt
[0].lpfc_sli_rcv_unsol_event
=
10411 lpfc_els_unsol_event
;
10412 pring
->prt
[1].profile
= 0; /* Mask 1 */
10413 pring
->prt
[1].rctl
= FC_RCTL_ELS_REP
;
10414 pring
->prt
[1].type
= FC_TYPE_ELS
;
10415 pring
->prt
[1].lpfc_sli_rcv_unsol_event
=
10416 lpfc_els_unsol_event
;
10417 pring
->prt
[2].profile
= 0; /* Mask 2 */
10418 /* NameServer Inquiry */
10419 pring
->prt
[2].rctl
= FC_RCTL_DD_UNSOL_CTL
;
10421 pring
->prt
[2].type
= FC_TYPE_CT
;
10422 pring
->prt
[2].lpfc_sli_rcv_unsol_event
=
10423 lpfc_ct_unsol_event
;
10424 pring
->prt
[3].profile
= 0; /* Mask 3 */
10425 /* NameServer response */
10426 pring
->prt
[3].rctl
= FC_RCTL_DD_SOL_CTL
;
10428 pring
->prt
[3].type
= FC_TYPE_CT
;
10429 pring
->prt
[3].lpfc_sli_rcv_unsol_event
=
10430 lpfc_ct_unsol_event
;
10435 * lpfc_sli_setup - SLI ring setup function
10436 * @phba: Pointer to HBA context object.
10438 * lpfc_sli_setup sets up rings of the SLI interface with
10439 * number of iocbs per ring and iotags. This function is
10440 * called while driver attach to the HBA and before the
10441 * interrupts are enabled. So there is no need for locking.
10443 * This function always returns 0. SLI3 only.
10446 lpfc_sli_setup(struct lpfc_hba
*phba
)
10448 int i
, totiocbsize
= 0;
10449 struct lpfc_sli
*psli
= &phba
->sli
;
10450 struct lpfc_sli_ring
*pring
;
10452 psli
->num_rings
= MAX_SLI3_CONFIGURED_RINGS
;
10453 psli
->sli_flag
= 0;
10455 psli
->iocbq_lookup
= NULL
;
10456 psli
->iocbq_lookup_len
= 0;
10457 psli
->last_iotag
= 0;
10459 for (i
= 0; i
< psli
->num_rings
; i
++) {
10460 pring
= &psli
->sli3_ring
[i
];
10462 case LPFC_FCP_RING
: /* ring 0 - FCP */
10463 /* numCiocb and numRiocb are used in config_port */
10464 pring
->sli
.sli3
.numCiocb
= SLI2_IOCB_CMD_R0_ENTRIES
;
10465 pring
->sli
.sli3
.numRiocb
= SLI2_IOCB_RSP_R0_ENTRIES
;
10466 pring
->sli
.sli3
.numCiocb
+=
10467 SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
10468 pring
->sli
.sli3
.numRiocb
+=
10469 SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
10470 pring
->sli
.sli3
.numCiocb
+=
10471 SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
10472 pring
->sli
.sli3
.numRiocb
+=
10473 SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
10474 pring
->sli
.sli3
.sizeCiocb
= (phba
->sli_rev
== 3) ?
10475 SLI3_IOCB_CMD_SIZE
:
10476 SLI2_IOCB_CMD_SIZE
;
10477 pring
->sli
.sli3
.sizeRiocb
= (phba
->sli_rev
== 3) ?
10478 SLI3_IOCB_RSP_SIZE
:
10479 SLI2_IOCB_RSP_SIZE
;
10480 pring
->iotag_ctr
= 0;
10482 (phba
->cfg_hba_queue_depth
* 2);
10483 pring
->fast_iotag
= pring
->iotag_max
;
10484 pring
->num_mask
= 0;
10486 case LPFC_EXTRA_RING
: /* ring 1 - EXTRA */
10487 /* numCiocb and numRiocb are used in config_port */
10488 pring
->sli
.sli3
.numCiocb
= SLI2_IOCB_CMD_R1_ENTRIES
;
10489 pring
->sli
.sli3
.numRiocb
= SLI2_IOCB_RSP_R1_ENTRIES
;
10490 pring
->sli
.sli3
.sizeCiocb
= (phba
->sli_rev
== 3) ?
10491 SLI3_IOCB_CMD_SIZE
:
10492 SLI2_IOCB_CMD_SIZE
;
10493 pring
->sli
.sli3
.sizeRiocb
= (phba
->sli_rev
== 3) ?
10494 SLI3_IOCB_RSP_SIZE
:
10495 SLI2_IOCB_RSP_SIZE
;
10496 pring
->iotag_max
= phba
->cfg_hba_queue_depth
;
10497 pring
->num_mask
= 0;
10499 case LPFC_ELS_RING
: /* ring 2 - ELS / CT */
10500 /* numCiocb and numRiocb are used in config_port */
10501 pring
->sli
.sli3
.numCiocb
= SLI2_IOCB_CMD_R2_ENTRIES
;
10502 pring
->sli
.sli3
.numRiocb
= SLI2_IOCB_RSP_R2_ENTRIES
;
10503 pring
->sli
.sli3
.sizeCiocb
= (phba
->sli_rev
== 3) ?
10504 SLI3_IOCB_CMD_SIZE
:
10505 SLI2_IOCB_CMD_SIZE
;
10506 pring
->sli
.sli3
.sizeRiocb
= (phba
->sli_rev
== 3) ?
10507 SLI3_IOCB_RSP_SIZE
:
10508 SLI2_IOCB_RSP_SIZE
;
10509 pring
->fast_iotag
= 0;
10510 pring
->iotag_ctr
= 0;
10511 pring
->iotag_max
= 4096;
10512 pring
->lpfc_sli_rcv_async_status
=
10513 lpfc_sli_async_event_handler
;
10514 pring
->num_mask
= LPFC_MAX_RING_MASK
;
10515 pring
->prt
[0].profile
= 0; /* Mask 0 */
10516 pring
->prt
[0].rctl
= FC_RCTL_ELS_REQ
;
10517 pring
->prt
[0].type
= FC_TYPE_ELS
;
10518 pring
->prt
[0].lpfc_sli_rcv_unsol_event
=
10519 lpfc_els_unsol_event
;
10520 pring
->prt
[1].profile
= 0; /* Mask 1 */
10521 pring
->prt
[1].rctl
= FC_RCTL_ELS_REP
;
10522 pring
->prt
[1].type
= FC_TYPE_ELS
;
10523 pring
->prt
[1].lpfc_sli_rcv_unsol_event
=
10524 lpfc_els_unsol_event
;
10525 pring
->prt
[2].profile
= 0; /* Mask 2 */
10526 /* NameServer Inquiry */
10527 pring
->prt
[2].rctl
= FC_RCTL_DD_UNSOL_CTL
;
10529 pring
->prt
[2].type
= FC_TYPE_CT
;
10530 pring
->prt
[2].lpfc_sli_rcv_unsol_event
=
10531 lpfc_ct_unsol_event
;
10532 pring
->prt
[3].profile
= 0; /* Mask 3 */
10533 /* NameServer response */
10534 pring
->prt
[3].rctl
= FC_RCTL_DD_SOL_CTL
;
10536 pring
->prt
[3].type
= FC_TYPE_CT
;
10537 pring
->prt
[3].lpfc_sli_rcv_unsol_event
=
10538 lpfc_ct_unsol_event
;
10541 totiocbsize
+= (pring
->sli
.sli3
.numCiocb
*
10542 pring
->sli
.sli3
.sizeCiocb
) +
10543 (pring
->sli
.sli3
.numRiocb
* pring
->sli
.sli3
.sizeRiocb
);
10545 if (totiocbsize
> MAX_SLIM_IOCB_SIZE
) {
10546 /* Too many cmd / rsp ring entries in SLI2 SLIM */
10547 printk(KERN_ERR
"%d:0462 Too many cmd / rsp ring entries in "
10548 "SLI2 SLIM Data: x%x x%lx\n",
10549 phba
->brd_no
, totiocbsize
,
10550 (unsigned long) MAX_SLIM_IOCB_SIZE
);
10552 if (phba
->cfg_multi_ring_support
== 2)
10553 lpfc_extra_ring_setup(phba
);
10559 * lpfc_sli4_queue_init - Queue initialization function
10560 * @phba: Pointer to HBA context object.
10562 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
10563 * ring. This function also initializes ring indices of each ring.
10564 * This function is called during the initialization of the SLI
10565 * interface of an HBA.
10566 * This function is called with no lock held and always returns
10570 lpfc_sli4_queue_init(struct lpfc_hba
*phba
)
10572 struct lpfc_sli
*psli
;
10573 struct lpfc_sli_ring
*pring
;
10577 spin_lock_irq(&phba
->hbalock
);
10578 INIT_LIST_HEAD(&psli
->mboxq
);
10579 INIT_LIST_HEAD(&psli
->mboxq_cmpl
);
10580 /* Initialize list headers for txq and txcmplq as double linked lists */
10581 for (i
= 0; i
< phba
->cfg_hdw_queue
; i
++) {
10582 pring
= phba
->sli4_hba
.hdwq
[i
].io_wq
->pring
;
10584 pring
->ringno
= LPFC_FCP_RING
;
10585 pring
->txcmplq_cnt
= 0;
10586 INIT_LIST_HEAD(&pring
->txq
);
10587 INIT_LIST_HEAD(&pring
->txcmplq
);
10588 INIT_LIST_HEAD(&pring
->iocb_continueq
);
10589 spin_lock_init(&pring
->ring_lock
);
10591 pring
= phba
->sli4_hba
.els_wq
->pring
;
10593 pring
->ringno
= LPFC_ELS_RING
;
10594 pring
->txcmplq_cnt
= 0;
10595 INIT_LIST_HEAD(&pring
->txq
);
10596 INIT_LIST_HEAD(&pring
->txcmplq
);
10597 INIT_LIST_HEAD(&pring
->iocb_continueq
);
10598 spin_lock_init(&pring
->ring_lock
);
10600 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
10601 pring
= phba
->sli4_hba
.nvmels_wq
->pring
;
10603 pring
->ringno
= LPFC_ELS_RING
;
10604 pring
->txcmplq_cnt
= 0;
10605 INIT_LIST_HEAD(&pring
->txq
);
10606 INIT_LIST_HEAD(&pring
->txcmplq
);
10607 INIT_LIST_HEAD(&pring
->iocb_continueq
);
10608 spin_lock_init(&pring
->ring_lock
);
10611 spin_unlock_irq(&phba
->hbalock
);
10615 * lpfc_sli_queue_init - Queue initialization function
10616 * @phba: Pointer to HBA context object.
10618 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
10619 * ring. This function also initializes ring indices of each ring.
10620 * This function is called during the initialization of the SLI
10621 * interface of an HBA.
10622 * This function is called with no lock held and always returns
10626 lpfc_sli_queue_init(struct lpfc_hba
*phba
)
10628 struct lpfc_sli
*psli
;
10629 struct lpfc_sli_ring
*pring
;
10633 spin_lock_irq(&phba
->hbalock
);
10634 INIT_LIST_HEAD(&psli
->mboxq
);
10635 INIT_LIST_HEAD(&psli
->mboxq_cmpl
);
10636 /* Initialize list headers for txq and txcmplq as double linked lists */
10637 for (i
= 0; i
< psli
->num_rings
; i
++) {
10638 pring
= &psli
->sli3_ring
[i
];
10640 pring
->sli
.sli3
.next_cmdidx
= 0;
10641 pring
->sli
.sli3
.local_getidx
= 0;
10642 pring
->sli
.sli3
.cmdidx
= 0;
10643 INIT_LIST_HEAD(&pring
->iocb_continueq
);
10644 INIT_LIST_HEAD(&pring
->iocb_continue_saveq
);
10645 INIT_LIST_HEAD(&pring
->postbufq
);
10647 INIT_LIST_HEAD(&pring
->txq
);
10648 INIT_LIST_HEAD(&pring
->txcmplq
);
10649 spin_lock_init(&pring
->ring_lock
);
10651 spin_unlock_irq(&phba
->hbalock
);
10655 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
10656 * @phba: Pointer to HBA context object.
10658 * This routine flushes the mailbox command subsystem. It will unconditionally
10659 * flush all the mailbox commands in the three possible stages in the mailbox
10660 * command sub-system: pending mailbox command queue; the outstanding mailbox
10661 * command; and completed mailbox command queue. It is caller's responsibility
10662 * to make sure that the driver is in the proper state to flush the mailbox
10663 * command sub-system. Namely, the posting of mailbox commands into the
10664 * pending mailbox command queue from the various clients must be stopped;
10665 * either the HBA is in a state that it will never works on the outstanding
10666 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
10667 * mailbox command has been completed.
10670 lpfc_sli_mbox_sys_flush(struct lpfc_hba
*phba
)
10672 LIST_HEAD(completions
);
10673 struct lpfc_sli
*psli
= &phba
->sli
;
10675 unsigned long iflag
;
10677 /* Disable softirqs, including timers from obtaining phba->hbalock */
10678 local_bh_disable();
10680 /* Flush all the mailbox commands in the mbox system */
10681 spin_lock_irqsave(&phba
->hbalock
, iflag
);
10683 /* The pending mailbox command queue */
10684 list_splice_init(&phba
->sli
.mboxq
, &completions
);
10685 /* The outstanding active mailbox command */
10686 if (psli
->mbox_active
) {
10687 list_add_tail(&psli
->mbox_active
->list
, &completions
);
10688 psli
->mbox_active
= NULL
;
10689 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
10691 /* The completed mailbox command queue */
10692 list_splice_init(&phba
->sli
.mboxq_cmpl
, &completions
);
10693 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
10695 /* Enable softirqs again, done with phba->hbalock */
10698 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
10699 while (!list_empty(&completions
)) {
10700 list_remove_head(&completions
, pmb
, LPFC_MBOXQ_t
, list
);
10701 pmb
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
10702 if (pmb
->mbox_cmpl
)
10703 pmb
->mbox_cmpl(phba
, pmb
);
10708 * lpfc_sli_host_down - Vport cleanup function
10709 * @vport: Pointer to virtual port object.
10711 * lpfc_sli_host_down is called to clean up the resources
10712 * associated with a vport before destroying virtual
10713 * port data structures.
10714 * This function does following operations:
10715 * - Free discovery resources associated with this virtual
10717 * - Free iocbs associated with this virtual port in
10719 * - Send abort for all iocb commands associated with this
10720 * vport in txcmplq.
10722 * This function is called with no lock held and always returns 1.
10725 lpfc_sli_host_down(struct lpfc_vport
*vport
)
10727 LIST_HEAD(completions
);
10728 struct lpfc_hba
*phba
= vport
->phba
;
10729 struct lpfc_sli
*psli
= &phba
->sli
;
10730 struct lpfc_queue
*qp
= NULL
;
10731 struct lpfc_sli_ring
*pring
;
10732 struct lpfc_iocbq
*iocb
, *next_iocb
;
10734 unsigned long flags
= 0;
10735 uint16_t prev_pring_flag
;
10737 lpfc_cleanup_discovery_resources(vport
);
10739 spin_lock_irqsave(&phba
->hbalock
, flags
);
10742 * Error everything on the txq since these iocbs
10743 * have not been given to the FW yet.
10744 * Also issue ABTS for everything on the txcmplq
10746 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
10747 for (i
= 0; i
< psli
->num_rings
; i
++) {
10748 pring
= &psli
->sli3_ring
[i
];
10749 prev_pring_flag
= pring
->flag
;
10750 /* Only slow rings */
10751 if (pring
->ringno
== LPFC_ELS_RING
) {
10752 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
10753 /* Set the lpfc data pending flag */
10754 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
10756 list_for_each_entry_safe(iocb
, next_iocb
,
10757 &pring
->txq
, list
) {
10758 if (iocb
->vport
!= vport
)
10760 list_move_tail(&iocb
->list
, &completions
);
10762 list_for_each_entry_safe(iocb
, next_iocb
,
10763 &pring
->txcmplq
, list
) {
10764 if (iocb
->vport
!= vport
)
10766 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
10768 pring
->flag
= prev_pring_flag
;
10771 list_for_each_entry(qp
, &phba
->sli4_hba
.lpfc_wq_list
, wq_list
) {
10775 if (pring
== phba
->sli4_hba
.els_wq
->pring
) {
10776 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
10777 /* Set the lpfc data pending flag */
10778 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
10780 prev_pring_flag
= pring
->flag
;
10781 spin_lock(&pring
->ring_lock
);
10782 list_for_each_entry_safe(iocb
, next_iocb
,
10783 &pring
->txq
, list
) {
10784 if (iocb
->vport
!= vport
)
10786 list_move_tail(&iocb
->list
, &completions
);
10788 spin_unlock(&pring
->ring_lock
);
10789 list_for_each_entry_safe(iocb
, next_iocb
,
10790 &pring
->txcmplq
, list
) {
10791 if (iocb
->vport
!= vport
)
10793 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
10795 pring
->flag
= prev_pring_flag
;
10798 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
10800 /* Cancel all the IOCBs from the completions list */
10801 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
10807 * lpfc_sli_hba_down - Resource cleanup function for the HBA
10808 * @phba: Pointer to HBA context object.
10810 * This function cleans up all iocb, buffers, mailbox commands
10811 * while shutting down the HBA. This function is called with no
10812 * lock held and always returns 1.
10813 * This function does the following to cleanup driver resources:
10814 * - Free discovery resources for each virtual port
10815 * - Cleanup any pending fabric iocbs
10816 * - Iterate through the iocb txq and free each entry
10818 * - Free up any buffer posted to the HBA
10819 * - Free mailbox commands in the mailbox queue.
10822 lpfc_sli_hba_down(struct lpfc_hba
*phba
)
10824 LIST_HEAD(completions
);
10825 struct lpfc_sli
*psli
= &phba
->sli
;
10826 struct lpfc_queue
*qp
= NULL
;
10827 struct lpfc_sli_ring
*pring
;
10828 struct lpfc_dmabuf
*buf_ptr
;
10829 unsigned long flags
= 0;
10832 /* Shutdown the mailbox command sub-system */
10833 lpfc_sli_mbox_sys_shutdown(phba
, LPFC_MBX_WAIT
);
10835 lpfc_hba_down_prep(phba
);
10837 /* Disable softirqs, including timers from obtaining phba->hbalock */
10838 local_bh_disable();
10840 lpfc_fabric_abort_hba(phba
);
10842 spin_lock_irqsave(&phba
->hbalock
, flags
);
10845 * Error everything on the txq since these iocbs
10846 * have not been given to the FW yet.
10848 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
10849 for (i
= 0; i
< psli
->num_rings
; i
++) {
10850 pring
= &psli
->sli3_ring
[i
];
10851 /* Only slow rings */
10852 if (pring
->ringno
== LPFC_ELS_RING
) {
10853 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
10854 /* Set the lpfc data pending flag */
10855 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
10857 list_splice_init(&pring
->txq
, &completions
);
10860 list_for_each_entry(qp
, &phba
->sli4_hba
.lpfc_wq_list
, wq_list
) {
10864 spin_lock(&pring
->ring_lock
);
10865 list_splice_init(&pring
->txq
, &completions
);
10866 spin_unlock(&pring
->ring_lock
);
10867 if (pring
== phba
->sli4_hba
.els_wq
->pring
) {
10868 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
10869 /* Set the lpfc data pending flag */
10870 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
10874 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
10876 /* Cancel all the IOCBs from the completions list */
10877 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
10880 spin_lock_irqsave(&phba
->hbalock
, flags
);
10881 list_splice_init(&phba
->elsbuf
, &completions
);
10882 phba
->elsbuf_cnt
= 0;
10883 phba
->elsbuf_prev_cnt
= 0;
10884 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
10886 while (!list_empty(&completions
)) {
10887 list_remove_head(&completions
, buf_ptr
,
10888 struct lpfc_dmabuf
, list
);
10889 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
10893 /* Enable softirqs again, done with phba->hbalock */
10896 /* Return any active mbox cmds */
10897 del_timer_sync(&psli
->mbox_tmo
);
10899 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
10900 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
10901 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
10907 * lpfc_sli_pcimem_bcopy - SLI memory copy function
10908 * @srcp: Source memory pointer.
10909 * @destp: Destination memory pointer.
10910 * @cnt: Number of words required to be copied.
10912 * This function is used for copying data between driver memory
10913 * and the SLI memory. This function also changes the endianness
10914 * of each word if native endianness is different from SLI
10915 * endianness. This function can be called with or without
10919 lpfc_sli_pcimem_bcopy(void *srcp
, void *destp
, uint32_t cnt
)
10921 uint32_t *src
= srcp
;
10922 uint32_t *dest
= destp
;
10926 for (i
= 0; i
< (int)cnt
; i
+= sizeof (uint32_t)) {
10928 ldata
= le32_to_cpu(ldata
);
10937 * lpfc_sli_bemem_bcopy - SLI memory copy function
10938 * @srcp: Source memory pointer.
10939 * @destp: Destination memory pointer.
10940 * @cnt: Number of words required to be copied.
10942 * This function is used for copying data between a data structure
10943 * with big endian representation to local endianness.
10944 * This function can be called with or without lock.
10947 lpfc_sli_bemem_bcopy(void *srcp
, void *destp
, uint32_t cnt
)
10949 uint32_t *src
= srcp
;
10950 uint32_t *dest
= destp
;
10954 for (i
= 0; i
< (int)cnt
; i
+= sizeof(uint32_t)) {
10956 ldata
= be32_to_cpu(ldata
);
10964 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
10965 * @phba: Pointer to HBA context object.
10966 * @pring: Pointer to driver SLI ring object.
10967 * @mp: Pointer to driver buffer object.
10969 * This function is called with no lock held.
10970 * It always return zero after adding the buffer to the postbufq
10974 lpfc_sli_ringpostbuf_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
10975 struct lpfc_dmabuf
*mp
)
10977 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
10979 spin_lock_irq(&phba
->hbalock
);
10980 list_add_tail(&mp
->list
, &pring
->postbufq
);
10981 pring
->postbufq_cnt
++;
10982 spin_unlock_irq(&phba
->hbalock
);
10987 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
10988 * @phba: Pointer to HBA context object.
10990 * When HBQ is enabled, buffers are searched based on tags. This function
10991 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
10992 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
10993 * does not conflict with tags of buffer posted for unsolicited events.
10994 * The function returns the allocated tag. The function is called with
10998 lpfc_sli_get_buffer_tag(struct lpfc_hba
*phba
)
11000 spin_lock_irq(&phba
->hbalock
);
11001 phba
->buffer_tag_count
++;
11003 * Always set the QUE_BUFTAG_BIT to distiguish between
11004 * a tag assigned by HBQ.
11006 phba
->buffer_tag_count
|= QUE_BUFTAG_BIT
;
11007 spin_unlock_irq(&phba
->hbalock
);
11008 return phba
->buffer_tag_count
;
11012 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
11013 * @phba: Pointer to HBA context object.
11014 * @pring: Pointer to driver SLI ring object.
11015 * @tag: Buffer tag.
11017 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
11018 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
11019 * iocb is posted to the response ring with the tag of the buffer.
11020 * This function searches the pring->postbufq list using the tag
11021 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
11022 * iocb. If the buffer is found then lpfc_dmabuf object of the
11023 * buffer is returned to the caller else NULL is returned.
11024 * This function is called with no lock held.
11026 struct lpfc_dmabuf
*
11027 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
11030 struct lpfc_dmabuf
*mp
, *next_mp
;
11031 struct list_head
*slp
= &pring
->postbufq
;
11033 /* Search postbufq, from the beginning, looking for a match on tag */
11034 spin_lock_irq(&phba
->hbalock
);
11035 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
11036 if (mp
->buffer_tag
== tag
) {
11037 list_del_init(&mp
->list
);
11038 pring
->postbufq_cnt
--;
11039 spin_unlock_irq(&phba
->hbalock
);
11044 spin_unlock_irq(&phba
->hbalock
);
11045 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11046 "0402 Cannot find virtual addr for buffer tag on "
11047 "ring %d Data x%lx x%px x%px x%x\n",
11048 pring
->ringno
, (unsigned long) tag
,
11049 slp
->next
, slp
->prev
, pring
->postbufq_cnt
);
11055 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
11056 * @phba: Pointer to HBA context object.
11057 * @pring: Pointer to driver SLI ring object.
11058 * @phys: DMA address of the buffer.
11060 * This function searches the buffer list using the dma_address
11061 * of unsolicited event to find the driver's lpfc_dmabuf object
11062 * corresponding to the dma_address. The function returns the
11063 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11064 * This function is called by the ct and els unsolicited event
11065 * handlers to get the buffer associated with the unsolicited
11068 * This function is called with no lock held.
11070 struct lpfc_dmabuf
*
11071 lpfc_sli_ringpostbuf_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
11074 struct lpfc_dmabuf
*mp
, *next_mp
;
11075 struct list_head
*slp
= &pring
->postbufq
;
11077 /* Search postbufq, from the beginning, looking for a match on phys */
11078 spin_lock_irq(&phba
->hbalock
);
11079 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
11080 if (mp
->phys
== phys
) {
11081 list_del_init(&mp
->list
);
11082 pring
->postbufq_cnt
--;
11083 spin_unlock_irq(&phba
->hbalock
);
11088 spin_unlock_irq(&phba
->hbalock
);
11089 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11090 "0410 Cannot find virtual addr for mapped buf on "
11091 "ring %d Data x%llx x%px x%px x%x\n",
11092 pring
->ringno
, (unsigned long long)phys
,
11093 slp
->next
, slp
->prev
, pring
->postbufq_cnt
);
11098 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
11099 * @phba: Pointer to HBA context object.
11100 * @cmdiocb: Pointer to driver command iocb object.
11101 * @rspiocb: Pointer to driver response iocb object.
11103 * This function is the completion handler for the abort iocbs for
11104 * ELS commands. This function is called from the ELS ring event
11105 * handler with no lock held. This function frees memory resources
11106 * associated with the abort iocb.
11109 lpfc_sli_abort_els_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
11110 struct lpfc_iocbq
*rspiocb
)
11112 IOCB_t
*irsp
= &rspiocb
->iocb
;
11113 uint16_t abort_iotag
, abort_context
;
11114 struct lpfc_iocbq
*abort_iocb
= NULL
;
11116 if (irsp
->ulpStatus
) {
11119 * Assume that the port already completed and returned, or
11120 * will return the iocb. Just Log the message.
11122 abort_context
= cmdiocb
->iocb
.un
.acxri
.abortContextTag
;
11123 abort_iotag
= cmdiocb
->iocb
.un
.acxri
.abortIoTag
;
11125 spin_lock_irq(&phba
->hbalock
);
11126 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
11127 if (irsp
->ulpCommand
== CMD_ABORT_XRI_CX
&&
11128 irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
&&
11129 irsp
->un
.ulpWord
[4] == IOERR_ABORT_REQUESTED
) {
11130 spin_unlock_irq(&phba
->hbalock
);
11133 if (abort_iotag
!= 0 &&
11134 abort_iotag
<= phba
->sli
.last_iotag
)
11136 phba
->sli
.iocbq_lookup
[abort_iotag
];
11138 /* For sli4 the abort_tag is the XRI,
11139 * so the abort routine puts the iotag of the iocb
11140 * being aborted in the context field of the abort
11143 abort_iocb
= phba
->sli
.iocbq_lookup
[abort_context
];
11145 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
| LOG_SLI
,
11146 "0327 Cannot abort els iocb x%px "
11147 "with tag %x context %x, abort status %x, "
11149 abort_iocb
, abort_iotag
, abort_context
,
11150 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4]);
11152 spin_unlock_irq(&phba
->hbalock
);
11155 lpfc_sli_release_iocbq(phba
, cmdiocb
);
11160 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
11161 * @phba: Pointer to HBA context object.
11162 * @cmdiocb: Pointer to driver command iocb object.
11163 * @rspiocb: Pointer to driver response iocb object.
11165 * The function is called from SLI ring event handler with no
11166 * lock held. This function is the completion handler for ELS commands
11167 * which are aborted. The function frees memory resources used for
11168 * the aborted ELS commands.
11171 lpfc_ignore_els_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
11172 struct lpfc_iocbq
*rspiocb
)
11174 IOCB_t
*irsp
= &rspiocb
->iocb
;
11176 /* ELS cmd tag <ulpIoTag> completes */
11177 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
11178 "0139 Ignoring ELS cmd tag x%x completion Data: "
11180 irsp
->ulpIoTag
, irsp
->ulpStatus
,
11181 irsp
->un
.ulpWord
[4], irsp
->ulpTimeout
);
11182 if (cmdiocb
->iocb
.ulpCommand
== CMD_GEN_REQUEST64_CR
)
11183 lpfc_ct_free_iocb(phba
, cmdiocb
);
11185 lpfc_els_free_iocb(phba
, cmdiocb
);
11190 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
11191 * @phba: Pointer to HBA context object.
11192 * @pring: Pointer to driver SLI ring object.
11193 * @cmdiocb: Pointer to driver command iocb object.
11195 * This function issues an abort iocb for the provided command iocb down to
11196 * the port. Other than the case the outstanding command iocb is an abort
11197 * request, this function issues abort out unconditionally. This function is
11198 * called with hbalock held. The function returns 0 when it fails due to
11199 * memory allocation failure or when the command iocb is an abort request.
11202 lpfc_sli_abort_iotag_issue(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
11203 struct lpfc_iocbq
*cmdiocb
)
11205 struct lpfc_vport
*vport
= cmdiocb
->vport
;
11206 struct lpfc_iocbq
*abtsiocbp
;
11207 IOCB_t
*icmd
= NULL
;
11208 IOCB_t
*iabt
= NULL
;
11210 unsigned long iflags
;
11211 struct lpfc_nodelist
*ndlp
;
11213 lockdep_assert_held(&phba
->hbalock
);
11216 * There are certain command types we don't want to abort. And we
11217 * don't want to abort commands that are already in the process of
11220 icmd
= &cmdiocb
->iocb
;
11221 if (icmd
->ulpCommand
== CMD_ABORT_XRI_CN
||
11222 icmd
->ulpCommand
== CMD_CLOSE_XRI_CN
||
11223 (cmdiocb
->iocb_flag
& LPFC_DRIVER_ABORTED
) != 0)
11226 /* issue ABTS for this IOCB based on iotag */
11227 abtsiocbp
= __lpfc_sli_get_iocbq(phba
);
11228 if (abtsiocbp
== NULL
)
11231 /* This signals the response to set the correct status
11232 * before calling the completion handler
11234 cmdiocb
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
11236 iabt
= &abtsiocbp
->iocb
;
11237 iabt
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
11238 iabt
->un
.acxri
.abortContextTag
= icmd
->ulpContext
;
11239 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
11240 iabt
->un
.acxri
.abortIoTag
= cmdiocb
->sli4_xritag
;
11241 iabt
->un
.acxri
.abortContextTag
= cmdiocb
->iotag
;
11243 iabt
->un
.acxri
.abortIoTag
= icmd
->ulpIoTag
;
11244 if (pring
->ringno
== LPFC_ELS_RING
) {
11245 ndlp
= (struct lpfc_nodelist
*)(cmdiocb
->context1
);
11246 iabt
->un
.acxri
.abortContextTag
= ndlp
->nlp_rpi
;
11250 iabt
->ulpClass
= icmd
->ulpClass
;
11252 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11253 abtsiocbp
->hba_wqidx
= cmdiocb
->hba_wqidx
;
11254 if (cmdiocb
->iocb_flag
& LPFC_IO_FCP
)
11255 abtsiocbp
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
11256 if (cmdiocb
->iocb_flag
& LPFC_IO_FOF
)
11257 abtsiocbp
->iocb_flag
|= LPFC_IO_FOF
;
11259 if (phba
->link_state
>= LPFC_LINK_UP
)
11260 iabt
->ulpCommand
= CMD_ABORT_XRI_CN
;
11262 iabt
->ulpCommand
= CMD_CLOSE_XRI_CN
;
11264 abtsiocbp
->iocb_cmpl
= lpfc_sli_abort_els_cmpl
;
11265 abtsiocbp
->vport
= vport
;
11267 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
11268 "0339 Abort xri x%x, original iotag x%x, "
11269 "abort cmd iotag x%x\n",
11270 iabt
->un
.acxri
.abortIoTag
,
11271 iabt
->un
.acxri
.abortContextTag
,
11274 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
11275 pring
= lpfc_sli4_calc_ring(phba
, abtsiocbp
);
11276 if (unlikely(pring
== NULL
))
11278 /* Note: both hbalock and ring_lock need to be set here */
11279 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
11280 retval
= __lpfc_sli_issue_iocb(phba
, pring
->ringno
,
11282 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
11284 retval
= __lpfc_sli_issue_iocb(phba
, pring
->ringno
,
11289 __lpfc_sli_release_iocbq(phba
, abtsiocbp
);
11292 * Caller to this routine should check for IOCB_ERROR
11293 * and handle it properly. This routine no longer removes
11294 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11300 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11301 * @phba: Pointer to HBA context object.
11302 * @pring: Pointer to driver SLI ring object.
11303 * @cmdiocb: Pointer to driver command iocb object.
11305 * This function issues an abort iocb for the provided command iocb. In case
11306 * of unloading, the abort iocb will not be issued to commands on the ELS
11307 * ring. Instead, the callback function shall be changed to those commands
11308 * so that nothing happens when them finishes. This function is called with
11309 * hbalock held. The function returns 0 when the command iocb is an abort
11313 lpfc_sli_issue_abort_iotag(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
11314 struct lpfc_iocbq
*cmdiocb
)
11316 struct lpfc_vport
*vport
= cmdiocb
->vport
;
11317 int retval
= IOCB_ERROR
;
11318 IOCB_t
*icmd
= NULL
;
11320 lockdep_assert_held(&phba
->hbalock
);
11323 * There are certain command types we don't want to abort. And we
11324 * don't want to abort commands that are already in the process of
11327 icmd
= &cmdiocb
->iocb
;
11328 if (icmd
->ulpCommand
== CMD_ABORT_XRI_CN
||
11329 icmd
->ulpCommand
== CMD_CLOSE_XRI_CN
||
11330 (cmdiocb
->iocb_flag
& LPFC_DRIVER_ABORTED
) != 0)
11334 if (cmdiocb
->iocb_flag
& LPFC_IO_FABRIC
)
11335 cmdiocb
->fabric_iocb_cmpl
= lpfc_ignore_els_cmpl
;
11337 cmdiocb
->iocb_cmpl
= lpfc_ignore_els_cmpl
;
11338 goto abort_iotag_exit
;
11342 * If we're unloading, don't abort iocb on the ELS ring, but change
11343 * the callback so that nothing happens when it finishes.
11345 if ((vport
->load_flag
& FC_UNLOADING
) &&
11346 (pring
->ringno
== LPFC_ELS_RING
)) {
11347 if (cmdiocb
->iocb_flag
& LPFC_IO_FABRIC
)
11348 cmdiocb
->fabric_iocb_cmpl
= lpfc_ignore_els_cmpl
;
11350 cmdiocb
->iocb_cmpl
= lpfc_ignore_els_cmpl
;
11351 goto abort_iotag_exit
;
11354 /* Now, we try to issue the abort to the cmdiocb out */
11355 retval
= lpfc_sli_abort_iotag_issue(phba
, pring
, cmdiocb
);
11359 * Caller to this routine should check for IOCB_ERROR
11360 * and handle it properly. This routine no longer removes
11361 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11367 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11368 * @phba: pointer to lpfc HBA data structure.
11370 * This routine will abort all pending and outstanding iocbs to an HBA.
11373 lpfc_sli_hba_iocb_abort(struct lpfc_hba
*phba
)
11375 struct lpfc_sli
*psli
= &phba
->sli
;
11376 struct lpfc_sli_ring
*pring
;
11377 struct lpfc_queue
*qp
= NULL
;
11380 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
11381 for (i
= 0; i
< psli
->num_rings
; i
++) {
11382 pring
= &psli
->sli3_ring
[i
];
11383 lpfc_sli_abort_iocb_ring(phba
, pring
);
11387 list_for_each_entry(qp
, &phba
->sli4_hba
.lpfc_wq_list
, wq_list
) {
11391 lpfc_sli_abort_iocb_ring(phba
, pring
);
11396 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11397 * @iocbq: Pointer to driver iocb object.
11398 * @vport: Pointer to driver virtual port object.
11399 * @tgt_id: SCSI ID of the target.
11400 * @lun_id: LUN ID of the scsi device.
11401 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11403 * This function acts as an iocb filter for functions which abort or count
11404 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11405 * 0 if the filtering criteria is met for the given iocb and will return
11406 * 1 if the filtering criteria is not met.
11407 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11408 * given iocb is for the SCSI device specified by vport, tgt_id and
11409 * lun_id parameter.
11410 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11411 * given iocb is for the SCSI target specified by vport and tgt_id
11413 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11414 * given iocb is for the SCSI host associated with the given vport.
11415 * This function is called with no locks held.
11418 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq
*iocbq
, struct lpfc_vport
*vport
,
11419 uint16_t tgt_id
, uint64_t lun_id
,
11420 lpfc_ctx_cmd ctx_cmd
)
11422 struct lpfc_io_buf
*lpfc_cmd
;
11425 if (iocbq
->vport
!= vport
)
11428 if (!(iocbq
->iocb_flag
& LPFC_IO_FCP
) ||
11429 !(iocbq
->iocb_flag
& LPFC_IO_ON_TXCMPLQ
))
11432 lpfc_cmd
= container_of(iocbq
, struct lpfc_io_buf
, cur_iocbq
);
11434 if (lpfc_cmd
->pCmd
== NULL
)
11439 if ((lpfc_cmd
->rdata
) && (lpfc_cmd
->rdata
->pnode
) &&
11440 (lpfc_cmd
->rdata
->pnode
->nlp_sid
== tgt_id
) &&
11441 (scsilun_to_int(&lpfc_cmd
->fcp_cmnd
->fcp_lun
) == lun_id
))
11445 if ((lpfc_cmd
->rdata
) && (lpfc_cmd
->rdata
->pnode
) &&
11446 (lpfc_cmd
->rdata
->pnode
->nlp_sid
== tgt_id
))
11449 case LPFC_CTX_HOST
:
11453 printk(KERN_ERR
"%s: Unknown context cmd type, value %d\n",
11454 __func__
, ctx_cmd
);
11462 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11463 * @vport: Pointer to virtual port.
11464 * @tgt_id: SCSI ID of the target.
11465 * @lun_id: LUN ID of the scsi device.
11466 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11468 * This function returns number of FCP commands pending for the vport.
11469 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11470 * commands pending on the vport associated with SCSI device specified
11471 * by tgt_id and lun_id parameters.
11472 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11473 * commands pending on the vport associated with SCSI target specified
11474 * by tgt_id parameter.
11475 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11476 * commands pending on the vport.
11477 * This function returns the number of iocbs which satisfy the filter.
11478 * This function is called without any lock held.
11481 lpfc_sli_sum_iocb(struct lpfc_vport
*vport
, uint16_t tgt_id
, uint64_t lun_id
,
11482 lpfc_ctx_cmd ctx_cmd
)
11484 struct lpfc_hba
*phba
= vport
->phba
;
11485 struct lpfc_iocbq
*iocbq
;
11488 spin_lock_irq(&phba
->hbalock
);
11489 for (i
= 1, sum
= 0; i
<= phba
->sli
.last_iotag
; i
++) {
11490 iocbq
= phba
->sli
.iocbq_lookup
[i
];
11492 if (lpfc_sli_validate_fcp_iocb (iocbq
, vport
, tgt_id
, lun_id
,
11496 spin_unlock_irq(&phba
->hbalock
);
11502 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11503 * @phba: Pointer to HBA context object
11504 * @cmdiocb: Pointer to command iocb object.
11505 * @rspiocb: Pointer to response iocb object.
11507 * This function is called when an aborted FCP iocb completes. This
11508 * function is called by the ring event handler with no lock held.
11509 * This function frees the iocb.
11512 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
11513 struct lpfc_iocbq
*rspiocb
)
11515 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
11516 "3096 ABORT_XRI_CN completing on rpi x%x "
11517 "original iotag x%x, abort cmd iotag x%x "
11518 "status 0x%x, reason 0x%x\n",
11519 cmdiocb
->iocb
.un
.acxri
.abortContextTag
,
11520 cmdiocb
->iocb
.un
.acxri
.abortIoTag
,
11521 cmdiocb
->iotag
, rspiocb
->iocb
.ulpStatus
,
11522 rspiocb
->iocb
.un
.ulpWord
[4]);
11523 lpfc_sli_release_iocbq(phba
, cmdiocb
);
11528 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11529 * @vport: Pointer to virtual port.
11530 * @pring: Pointer to driver SLI ring object.
11531 * @tgt_id: SCSI ID of the target.
11532 * @lun_id: LUN ID of the scsi device.
11533 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11535 * This function sends an abort command for every SCSI command
11536 * associated with the given virtual port pending on the ring
11537 * filtered by lpfc_sli_validate_fcp_iocb function.
11538 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11539 * FCP iocbs associated with lun specified by tgt_id and lun_id
11541 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11542 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11543 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11544 * FCP iocbs associated with virtual port.
11545 * This function returns number of iocbs it failed to abort.
11546 * This function is called with no locks held.
11549 lpfc_sli_abort_iocb(struct lpfc_vport
*vport
, struct lpfc_sli_ring
*pring
,
11550 uint16_t tgt_id
, uint64_t lun_id
, lpfc_ctx_cmd abort_cmd
)
11552 struct lpfc_hba
*phba
= vport
->phba
;
11553 struct lpfc_iocbq
*iocbq
;
11554 struct lpfc_iocbq
*abtsiocb
;
11555 struct lpfc_sli_ring
*pring_s4
;
11556 IOCB_t
*cmd
= NULL
;
11557 int errcnt
= 0, ret_val
= 0;
11560 /* all I/Os are in process of being flushed */
11561 if (phba
->hba_flag
& HBA_IOQ_FLUSH
)
11564 for (i
= 1; i
<= phba
->sli
.last_iotag
; i
++) {
11565 iocbq
= phba
->sli
.iocbq_lookup
[i
];
11567 if (lpfc_sli_validate_fcp_iocb(iocbq
, vport
, tgt_id
, lun_id
,
11572 * If the iocbq is already being aborted, don't take a second
11573 * action, but do count it.
11575 if (iocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
)
11578 /* issue ABTS for this IOCB based on iotag */
11579 abtsiocb
= lpfc_sli_get_iocbq(phba
);
11580 if (abtsiocb
== NULL
) {
11585 /* indicate the IO is being aborted by the driver. */
11586 iocbq
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
11588 cmd
= &iocbq
->iocb
;
11589 abtsiocb
->iocb
.un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
11590 abtsiocb
->iocb
.un
.acxri
.abortContextTag
= cmd
->ulpContext
;
11591 if (phba
->sli_rev
== LPFC_SLI_REV4
)
11592 abtsiocb
->iocb
.un
.acxri
.abortIoTag
= iocbq
->sli4_xritag
;
11594 abtsiocb
->iocb
.un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
11595 abtsiocb
->iocb
.ulpLe
= 1;
11596 abtsiocb
->iocb
.ulpClass
= cmd
->ulpClass
;
11597 abtsiocb
->vport
= vport
;
11599 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11600 abtsiocb
->hba_wqidx
= iocbq
->hba_wqidx
;
11601 if (iocbq
->iocb_flag
& LPFC_IO_FCP
)
11602 abtsiocb
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
11603 if (iocbq
->iocb_flag
& LPFC_IO_FOF
)
11604 abtsiocb
->iocb_flag
|= LPFC_IO_FOF
;
11606 if (lpfc_is_link_up(phba
))
11607 abtsiocb
->iocb
.ulpCommand
= CMD_ABORT_XRI_CN
;
11609 abtsiocb
->iocb
.ulpCommand
= CMD_CLOSE_XRI_CN
;
11611 /* Setup callback routine and issue the command. */
11612 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
11613 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
11614 pring_s4
= lpfc_sli4_calc_ring(phba
, iocbq
);
11617 ret_val
= lpfc_sli_issue_iocb(phba
, pring_s4
->ringno
,
11620 ret_val
= lpfc_sli_issue_iocb(phba
, pring
->ringno
,
11622 if (ret_val
== IOCB_ERROR
) {
11623 lpfc_sli_release_iocbq(phba
, abtsiocb
);
11633 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11634 * @vport: Pointer to virtual port.
11635 * @pring: Pointer to driver SLI ring object.
11636 * @tgt_id: SCSI ID of the target.
11637 * @lun_id: LUN ID of the scsi device.
11638 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11640 * This function sends an abort command for every SCSI command
11641 * associated with the given virtual port pending on the ring
11642 * filtered by lpfc_sli_validate_fcp_iocb function.
11643 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
11644 * FCP iocbs associated with lun specified by tgt_id and lun_id
11646 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
11647 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11648 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
11649 * FCP iocbs associated with virtual port.
11650 * This function returns number of iocbs it aborted .
11651 * This function is called with no locks held right after a taskmgmt
11655 lpfc_sli_abort_taskmgmt(struct lpfc_vport
*vport
, struct lpfc_sli_ring
*pring
,
11656 uint16_t tgt_id
, uint64_t lun_id
, lpfc_ctx_cmd cmd
)
11658 struct lpfc_hba
*phba
= vport
->phba
;
11659 struct lpfc_io_buf
*lpfc_cmd
;
11660 struct lpfc_iocbq
*abtsiocbq
;
11661 struct lpfc_nodelist
*ndlp
;
11662 struct lpfc_iocbq
*iocbq
;
11664 int sum
, i
, ret_val
;
11665 unsigned long iflags
;
11666 struct lpfc_sli_ring
*pring_s4
= NULL
;
11668 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11670 /* all I/Os are in process of being flushed */
11671 if (phba
->hba_flag
& HBA_IOQ_FLUSH
) {
11672 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11677 for (i
= 1; i
<= phba
->sli
.last_iotag
; i
++) {
11678 iocbq
= phba
->sli
.iocbq_lookup
[i
];
11680 if (lpfc_sli_validate_fcp_iocb(iocbq
, vport
, tgt_id
, lun_id
,
11684 /* Guard against IO completion being called at same time */
11685 lpfc_cmd
= container_of(iocbq
, struct lpfc_io_buf
, cur_iocbq
);
11686 spin_lock(&lpfc_cmd
->buf_lock
);
11688 if (!lpfc_cmd
->pCmd
) {
11689 spin_unlock(&lpfc_cmd
->buf_lock
);
11693 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
11695 phba
->sli4_hba
.hdwq
[iocbq
->hba_wqidx
].io_wq
->pring
;
11697 spin_unlock(&lpfc_cmd
->buf_lock
);
11700 /* Note: both hbalock and ring_lock must be set here */
11701 spin_lock(&pring_s4
->ring_lock
);
11705 * If the iocbq is already being aborted, don't take a second
11706 * action, but do count it.
11708 if ((iocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
) ||
11709 !(iocbq
->iocb_flag
& LPFC_IO_ON_TXCMPLQ
)) {
11710 if (phba
->sli_rev
== LPFC_SLI_REV4
)
11711 spin_unlock(&pring_s4
->ring_lock
);
11712 spin_unlock(&lpfc_cmd
->buf_lock
);
11716 /* issue ABTS for this IOCB based on iotag */
11717 abtsiocbq
= __lpfc_sli_get_iocbq(phba
);
11719 if (phba
->sli_rev
== LPFC_SLI_REV4
)
11720 spin_unlock(&pring_s4
->ring_lock
);
11721 spin_unlock(&lpfc_cmd
->buf_lock
);
11725 icmd
= &iocbq
->iocb
;
11726 abtsiocbq
->iocb
.un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
11727 abtsiocbq
->iocb
.un
.acxri
.abortContextTag
= icmd
->ulpContext
;
11728 if (phba
->sli_rev
== LPFC_SLI_REV4
)
11729 abtsiocbq
->iocb
.un
.acxri
.abortIoTag
=
11730 iocbq
->sli4_xritag
;
11732 abtsiocbq
->iocb
.un
.acxri
.abortIoTag
= icmd
->ulpIoTag
;
11733 abtsiocbq
->iocb
.ulpLe
= 1;
11734 abtsiocbq
->iocb
.ulpClass
= icmd
->ulpClass
;
11735 abtsiocbq
->vport
= vport
;
11737 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11738 abtsiocbq
->hba_wqidx
= iocbq
->hba_wqidx
;
11739 if (iocbq
->iocb_flag
& LPFC_IO_FCP
)
11740 abtsiocbq
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
11741 if (iocbq
->iocb_flag
& LPFC_IO_FOF
)
11742 abtsiocbq
->iocb_flag
|= LPFC_IO_FOF
;
11744 ndlp
= lpfc_cmd
->rdata
->pnode
;
11746 if (lpfc_is_link_up(phba
) &&
11747 (ndlp
&& ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
))
11748 abtsiocbq
->iocb
.ulpCommand
= CMD_ABORT_XRI_CN
;
11750 abtsiocbq
->iocb
.ulpCommand
= CMD_CLOSE_XRI_CN
;
11752 /* Setup callback routine and issue the command. */
11753 abtsiocbq
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
11756 * Indicate the IO is being aborted by the driver and set
11757 * the caller's flag into the aborted IO.
11759 iocbq
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
11761 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
11762 ret_val
= __lpfc_sli_issue_iocb(phba
, pring_s4
->ringno
,
11764 spin_unlock(&pring_s4
->ring_lock
);
11766 ret_val
= __lpfc_sli_issue_iocb(phba
, pring
->ringno
,
11770 spin_unlock(&lpfc_cmd
->buf_lock
);
11772 if (ret_val
== IOCB_ERROR
)
11773 __lpfc_sli_release_iocbq(phba
, abtsiocbq
);
11777 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11782 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
11783 * @phba: Pointer to HBA context object.
11784 * @cmdiocbq: Pointer to command iocb.
11785 * @rspiocbq: Pointer to response iocb.
11787 * This function is the completion handler for iocbs issued using
11788 * lpfc_sli_issue_iocb_wait function. This function is called by the
11789 * ring event handler function without any lock held. This function
11790 * can be called from both worker thread context and interrupt
11791 * context. This function also can be called from other thread which
11792 * cleans up the SLI layer objects.
11793 * This function copy the contents of the response iocb to the
11794 * response iocb memory object provided by the caller of
11795 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
11796 * sleeps for the iocb completion.
11799 lpfc_sli_wake_iocb_wait(struct lpfc_hba
*phba
,
11800 struct lpfc_iocbq
*cmdiocbq
,
11801 struct lpfc_iocbq
*rspiocbq
)
11803 wait_queue_head_t
*pdone_q
;
11804 unsigned long iflags
;
11805 struct lpfc_io_buf
*lpfc_cmd
;
11807 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11808 if (cmdiocbq
->iocb_flag
& LPFC_IO_WAKE_TMO
) {
11811 * A time out has occurred for the iocb. If a time out
11812 * completion handler has been supplied, call it. Otherwise,
11813 * just free the iocbq.
11816 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11817 cmdiocbq
->iocb_cmpl
= cmdiocbq
->wait_iocb_cmpl
;
11818 cmdiocbq
->wait_iocb_cmpl
= NULL
;
11819 if (cmdiocbq
->iocb_cmpl
)
11820 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
, NULL
);
11822 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
11826 cmdiocbq
->iocb_flag
|= LPFC_IO_WAKE
;
11827 if (cmdiocbq
->context2
&& rspiocbq
)
11828 memcpy(&((struct lpfc_iocbq
*)cmdiocbq
->context2
)->iocb
,
11829 &rspiocbq
->iocb
, sizeof(IOCB_t
));
11831 /* Set the exchange busy flag for task management commands */
11832 if ((cmdiocbq
->iocb_flag
& LPFC_IO_FCP
) &&
11833 !(cmdiocbq
->iocb_flag
& LPFC_IO_LIBDFC
)) {
11834 lpfc_cmd
= container_of(cmdiocbq
, struct lpfc_io_buf
,
11836 if (rspiocbq
&& (rspiocbq
->iocb_flag
& LPFC_EXCHANGE_BUSY
))
11837 lpfc_cmd
->flags
|= LPFC_SBUF_XBUSY
;
11839 lpfc_cmd
->flags
&= ~LPFC_SBUF_XBUSY
;
11842 pdone_q
= cmdiocbq
->context_un
.wait_queue
;
11845 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11850 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
11851 * @phba: Pointer to HBA context object..
11852 * @piocbq: Pointer to command iocb.
11853 * @flag: Flag to test.
11855 * This routine grabs the hbalock and then test the iocb_flag to
11856 * see if the passed in flag is set.
11858 * 1 if flag is set.
11859 * 0 if flag is not set.
11862 lpfc_chk_iocb_flg(struct lpfc_hba
*phba
,
11863 struct lpfc_iocbq
*piocbq
, uint32_t flag
)
11865 unsigned long iflags
;
11868 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11869 ret
= piocbq
->iocb_flag
& flag
;
11870 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11876 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
11877 * @phba: Pointer to HBA context object..
11878 * @pring: Pointer to sli ring.
11879 * @piocb: Pointer to command iocb.
11880 * @prspiocbq: Pointer to response iocb.
11881 * @timeout: Timeout in number of seconds.
11883 * This function issues the iocb to firmware and waits for the
11884 * iocb to complete. The iocb_cmpl field of the shall be used
11885 * to handle iocbs which time out. If the field is NULL, the
11886 * function shall free the iocbq structure. If more clean up is
11887 * needed, the caller is expected to provide a completion function
11888 * that will provide the needed clean up. If the iocb command is
11889 * not completed within timeout seconds, the function will either
11890 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
11891 * completion function set in the iocb_cmpl field and then return
11892 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
11893 * resources if this function returns IOCB_TIMEDOUT.
11894 * The function waits for the iocb completion using an
11895 * non-interruptible wait.
11896 * This function will sleep while waiting for iocb completion.
11897 * So, this function should not be called from any context which
11898 * does not allow sleeping. Due to the same reason, this function
11899 * cannot be called with interrupt disabled.
11900 * This function assumes that the iocb completions occur while
11901 * this function sleep. So, this function cannot be called from
11902 * the thread which process iocb completion for this ring.
11903 * This function clears the iocb_flag of the iocb object before
11904 * issuing the iocb and the iocb completion handler sets this
11905 * flag and wakes this thread when the iocb completes.
11906 * The contents of the response iocb will be copied to prspiocbq
11907 * by the completion handler when the command completes.
11908 * This function returns IOCB_SUCCESS when success.
11909 * This function is called with no lock held.
11912 lpfc_sli_issue_iocb_wait(struct lpfc_hba
*phba
,
11913 uint32_t ring_number
,
11914 struct lpfc_iocbq
*piocb
,
11915 struct lpfc_iocbq
*prspiocbq
,
11918 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q
);
11919 long timeleft
, timeout_req
= 0;
11920 int retval
= IOCB_SUCCESS
;
11922 struct lpfc_iocbq
*iocb
;
11924 int txcmplq_cnt
= 0;
11925 struct lpfc_sli_ring
*pring
;
11926 unsigned long iflags
;
11927 bool iocb_completed
= true;
11929 if (phba
->sli_rev
>= LPFC_SLI_REV4
)
11930 pring
= lpfc_sli4_calc_ring(phba
, piocb
);
11932 pring
= &phba
->sli
.sli3_ring
[ring_number
];
11934 * If the caller has provided a response iocbq buffer, then context2
11935 * is NULL or its an error.
11938 if (piocb
->context2
)
11940 piocb
->context2
= prspiocbq
;
11943 piocb
->wait_iocb_cmpl
= piocb
->iocb_cmpl
;
11944 piocb
->iocb_cmpl
= lpfc_sli_wake_iocb_wait
;
11945 piocb
->context_un
.wait_queue
= &done_q
;
11946 piocb
->iocb_flag
&= ~(LPFC_IO_WAKE
| LPFC_IO_WAKE_TMO
);
11948 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
11949 if (lpfc_readl(phba
->HCregaddr
, &creg_val
))
11951 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
11952 writel(creg_val
, phba
->HCregaddr
);
11953 readl(phba
->HCregaddr
); /* flush */
11956 retval
= lpfc_sli_issue_iocb(phba
, ring_number
, piocb
,
11957 SLI_IOCB_RET_IOCB
);
11958 if (retval
== IOCB_SUCCESS
) {
11959 timeout_req
= msecs_to_jiffies(timeout
* 1000);
11960 timeleft
= wait_event_timeout(done_q
,
11961 lpfc_chk_iocb_flg(phba
, piocb
, LPFC_IO_WAKE
),
11963 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11964 if (!(piocb
->iocb_flag
& LPFC_IO_WAKE
)) {
11967 * IOCB timed out. Inform the wake iocb wait
11968 * completion function and set local status
11971 iocb_completed
= false;
11972 piocb
->iocb_flag
|= LPFC_IO_WAKE_TMO
;
11974 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11975 if (iocb_completed
) {
11976 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
11977 "0331 IOCB wake signaled\n");
11978 /* Note: we are not indicating if the IOCB has a success
11979 * status or not - that's for the caller to check.
11980 * IOCB_SUCCESS means just that the command was sent and
11981 * completed. Not that it completed successfully.
11983 } else if (timeleft
== 0) {
11984 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11985 "0338 IOCB wait timeout error - no "
11986 "wake response Data x%x\n", timeout
);
11987 retval
= IOCB_TIMEDOUT
;
11989 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11990 "0330 IOCB wake NOT set, "
11992 timeout
, (timeleft
/ jiffies
));
11993 retval
= IOCB_TIMEDOUT
;
11995 } else if (retval
== IOCB_BUSY
) {
11996 if (phba
->cfg_log_verbose
& LOG_SLI
) {
11997 list_for_each_entry(iocb
, &pring
->txq
, list
) {
12000 list_for_each_entry(iocb
, &pring
->txcmplq
, list
) {
12003 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
12004 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12005 phba
->iocb_cnt
, txq_cnt
, txcmplq_cnt
);
12009 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
12010 "0332 IOCB wait issue failed, Data x%x\n",
12012 retval
= IOCB_ERROR
;
12015 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
12016 if (lpfc_readl(phba
->HCregaddr
, &creg_val
))
12018 creg_val
&= ~(HC_R0INT_ENA
<< LPFC_FCP_RING
);
12019 writel(creg_val
, phba
->HCregaddr
);
12020 readl(phba
->HCregaddr
); /* flush */
12024 piocb
->context2
= NULL
;
12026 piocb
->context_un
.wait_queue
= NULL
;
12027 piocb
->iocb_cmpl
= NULL
;
12032 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
12033 * @phba: Pointer to HBA context object.
12034 * @pmboxq: Pointer to driver mailbox object.
12035 * @timeout: Timeout in number of seconds.
12037 * This function issues the mailbox to firmware and waits for the
12038 * mailbox command to complete. If the mailbox command is not
12039 * completed within timeout seconds, it returns MBX_TIMEOUT.
12040 * The function waits for the mailbox completion using an
12041 * interruptible wait. If the thread is woken up due to a
12042 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12043 * should not free the mailbox resources, if this function returns
12045 * This function will sleep while waiting for mailbox completion.
12046 * So, this function should not be called from any context which
12047 * does not allow sleeping. Due to the same reason, this function
12048 * cannot be called with interrupt disabled.
12049 * This function assumes that the mailbox completion occurs while
12050 * this function sleep. So, this function cannot be called from
12051 * the worker thread which processes mailbox completion.
12052 * This function is called in the context of HBA management
12054 * This function returns MBX_SUCCESS when successful.
12055 * This function is called with no lock held.
12058 lpfc_sli_issue_mbox_wait(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
,
12061 struct completion mbox_done
;
12063 unsigned long flag
;
12065 pmboxq
->mbox_flag
&= ~LPFC_MBX_WAKE
;
12066 /* setup wake call as IOCB callback */
12067 pmboxq
->mbox_cmpl
= lpfc_sli_wake_mbox_wait
;
12069 /* setup context3 field to pass wait_queue pointer to wake function */
12070 init_completion(&mbox_done
);
12071 pmboxq
->context3
= &mbox_done
;
12072 /* now issue the command */
12073 retval
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
12074 if (retval
== MBX_BUSY
|| retval
== MBX_SUCCESS
) {
12075 wait_for_completion_timeout(&mbox_done
,
12076 msecs_to_jiffies(timeout
* 1000));
12078 spin_lock_irqsave(&phba
->hbalock
, flag
);
12079 pmboxq
->context3
= NULL
;
12081 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12082 * else do not free the resources.
12084 if (pmboxq
->mbox_flag
& LPFC_MBX_WAKE
) {
12085 retval
= MBX_SUCCESS
;
12087 retval
= MBX_TIMEOUT
;
12088 pmboxq
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
12090 spin_unlock_irqrestore(&phba
->hbalock
, flag
);
12096 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
12097 * @phba: Pointer to HBA context.
12099 * This function is called to shutdown the driver's mailbox sub-system.
12100 * It first marks the mailbox sub-system is in a block state to prevent
12101 * the asynchronous mailbox command from issued off the pending mailbox
12102 * command queue. If the mailbox command sub-system shutdown is due to
12103 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12104 * the mailbox sub-system flush routine to forcefully bring down the
12105 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12106 * as with offline or HBA function reset), this routine will wait for the
12107 * outstanding mailbox command to complete before invoking the mailbox
12108 * sub-system flush routine to gracefully bring down mailbox sub-system.
12111 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba
*phba
, int mbx_action
)
12113 struct lpfc_sli
*psli
= &phba
->sli
;
12114 unsigned long timeout
;
12116 if (mbx_action
== LPFC_MBX_NO_WAIT
) {
12117 /* delay 100ms for port state */
12119 lpfc_sli_mbox_sys_flush(phba
);
12122 timeout
= msecs_to_jiffies(LPFC_MBOX_TMO
* 1000) + jiffies
;
12124 /* Disable softirqs, including timers from obtaining phba->hbalock */
12125 local_bh_disable();
12127 spin_lock_irq(&phba
->hbalock
);
12128 psli
->sli_flag
|= LPFC_SLI_ASYNC_MBX_BLK
;
12130 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
12131 /* Determine how long we might wait for the active mailbox
12132 * command to be gracefully completed by firmware.
12134 if (phba
->sli
.mbox_active
)
12135 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
,
12136 phba
->sli
.mbox_active
) *
12138 spin_unlock_irq(&phba
->hbalock
);
12140 /* Enable softirqs again, done with phba->hbalock */
12143 while (phba
->sli
.mbox_active
) {
12144 /* Check active mailbox complete status every 2ms */
12146 if (time_after(jiffies
, timeout
))
12147 /* Timeout, let the mailbox flush routine to
12148 * forcefully release active mailbox command
12153 spin_unlock_irq(&phba
->hbalock
);
12155 /* Enable softirqs again, done with phba->hbalock */
12159 lpfc_sli_mbox_sys_flush(phba
);
12163 * lpfc_sli_eratt_read - read sli-3 error attention events
12164 * @phba: Pointer to HBA context.
12166 * This function is called to read the SLI3 device error attention registers
12167 * for possible error attention events. The caller must hold the hostlock
12168 * with spin_lock_irq().
12170 * This function returns 1 when there is Error Attention in the Host Attention
12171 * Register and returns 0 otherwise.
12174 lpfc_sli_eratt_read(struct lpfc_hba
*phba
)
12178 /* Read chip Host Attention (HA) register */
12179 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
12182 if (ha_copy
& HA_ERATT
) {
12183 /* Read host status register to retrieve error event */
12184 if (lpfc_sli_read_hs(phba
))
12187 /* Check if there is a deferred error condition is active */
12188 if ((HS_FFER1
& phba
->work_hs
) &&
12189 ((HS_FFER2
| HS_FFER3
| HS_FFER4
| HS_FFER5
|
12190 HS_FFER6
| HS_FFER7
| HS_FFER8
) & phba
->work_hs
)) {
12191 phba
->hba_flag
|= DEFER_ERATT
;
12192 /* Clear all interrupt enable conditions */
12193 writel(0, phba
->HCregaddr
);
12194 readl(phba
->HCregaddr
);
12197 /* Set the driver HA work bitmap */
12198 phba
->work_ha
|= HA_ERATT
;
12199 /* Indicate polling handles this ERATT */
12200 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
12206 /* Set the driver HS work bitmap */
12207 phba
->work_hs
|= UNPLUG_ERR
;
12208 /* Set the driver HA work bitmap */
12209 phba
->work_ha
|= HA_ERATT
;
12210 /* Indicate polling handles this ERATT */
12211 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
12216 * lpfc_sli4_eratt_read - read sli-4 error attention events
12217 * @phba: Pointer to HBA context.
12219 * This function is called to read the SLI4 device error attention registers
12220 * for possible error attention events. The caller must hold the hostlock
12221 * with spin_lock_irq().
12223 * This function returns 1 when there is Error Attention in the Host Attention
12224 * Register and returns 0 otherwise.
12227 lpfc_sli4_eratt_read(struct lpfc_hba
*phba
)
12229 uint32_t uerr_sta_hi
, uerr_sta_lo
;
12230 uint32_t if_type
, portsmphr
;
12231 struct lpfc_register portstat_reg
;
12234 * For now, use the SLI4 device internal unrecoverable error
12235 * registers for error attention. This can be changed later.
12237 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
12239 case LPFC_SLI_INTF_IF_TYPE_0
:
12240 if (lpfc_readl(phba
->sli4_hba
.u
.if_type0
.UERRLOregaddr
,
12242 lpfc_readl(phba
->sli4_hba
.u
.if_type0
.UERRHIregaddr
,
12244 phba
->work_hs
|= UNPLUG_ERR
;
12245 phba
->work_ha
|= HA_ERATT
;
12246 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
12249 if ((~phba
->sli4_hba
.ue_mask_lo
& uerr_sta_lo
) ||
12250 (~phba
->sli4_hba
.ue_mask_hi
& uerr_sta_hi
)) {
12251 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12252 "1423 HBA Unrecoverable error: "
12253 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12254 "ue_mask_lo_reg=0x%x, "
12255 "ue_mask_hi_reg=0x%x\n",
12256 uerr_sta_lo
, uerr_sta_hi
,
12257 phba
->sli4_hba
.ue_mask_lo
,
12258 phba
->sli4_hba
.ue_mask_hi
);
12259 phba
->work_status
[0] = uerr_sta_lo
;
12260 phba
->work_status
[1] = uerr_sta_hi
;
12261 phba
->work_ha
|= HA_ERATT
;
12262 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
12266 case LPFC_SLI_INTF_IF_TYPE_2
:
12267 case LPFC_SLI_INTF_IF_TYPE_6
:
12268 if (lpfc_readl(phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
,
12269 &portstat_reg
.word0
) ||
12270 lpfc_readl(phba
->sli4_hba
.PSMPHRregaddr
,
12272 phba
->work_hs
|= UNPLUG_ERR
;
12273 phba
->work_ha
|= HA_ERATT
;
12274 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
12277 if (bf_get(lpfc_sliport_status_err
, &portstat_reg
)) {
12278 phba
->work_status
[0] =
12279 readl(phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
);
12280 phba
->work_status
[1] =
12281 readl(phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
);
12282 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12283 "2885 Port Status Event: "
12284 "port status reg 0x%x, "
12285 "port smphr reg 0x%x, "
12286 "error 1=0x%x, error 2=0x%x\n",
12287 portstat_reg
.word0
,
12289 phba
->work_status
[0],
12290 phba
->work_status
[1]);
12291 phba
->work_ha
|= HA_ERATT
;
12292 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
12296 case LPFC_SLI_INTF_IF_TYPE_1
:
12298 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12299 "2886 HBA Error Attention on unsupported "
12300 "if type %d.", if_type
);
12308 * lpfc_sli_check_eratt - check error attention events
12309 * @phba: Pointer to HBA context.
12311 * This function is called from timer soft interrupt context to check HBA's
12312 * error attention register bit for error attention events.
12314 * This function returns 1 when there is Error Attention in the Host Attention
12315 * Register and returns 0 otherwise.
12318 lpfc_sli_check_eratt(struct lpfc_hba
*phba
)
12322 /* If somebody is waiting to handle an eratt, don't process it
12323 * here. The brdkill function will do this.
12325 if (phba
->link_flag
& LS_IGNORE_ERATT
)
12328 /* Check if interrupt handler handles this ERATT */
12329 spin_lock_irq(&phba
->hbalock
);
12330 if (phba
->hba_flag
& HBA_ERATT_HANDLED
) {
12331 /* Interrupt handler has handled ERATT */
12332 spin_unlock_irq(&phba
->hbalock
);
12337 * If there is deferred error attention, do not check for error
12340 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
12341 spin_unlock_irq(&phba
->hbalock
);
12345 /* If PCI channel is offline, don't process it */
12346 if (unlikely(pci_channel_offline(phba
->pcidev
))) {
12347 spin_unlock_irq(&phba
->hbalock
);
12351 switch (phba
->sli_rev
) {
12352 case LPFC_SLI_REV2
:
12353 case LPFC_SLI_REV3
:
12354 /* Read chip Host Attention (HA) register */
12355 ha_copy
= lpfc_sli_eratt_read(phba
);
12357 case LPFC_SLI_REV4
:
12358 /* Read device Uncoverable Error (UERR) registers */
12359 ha_copy
= lpfc_sli4_eratt_read(phba
);
12362 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12363 "0299 Invalid SLI revision (%d)\n",
12368 spin_unlock_irq(&phba
->hbalock
);
12374 * lpfc_intr_state_check - Check device state for interrupt handling
12375 * @phba: Pointer to HBA context.
12377 * This inline routine checks whether a device or its PCI slot is in a state
12378 * that the interrupt should be handled.
12380 * This function returns 0 if the device or the PCI slot is in a state that
12381 * interrupt should be handled, otherwise -EIO.
12384 lpfc_intr_state_check(struct lpfc_hba
*phba
)
12386 /* If the pci channel is offline, ignore all the interrupts */
12387 if (unlikely(pci_channel_offline(phba
->pcidev
)))
12390 /* Update device level interrupt statistics */
12391 phba
->sli
.slistat
.sli_intr
++;
12393 /* Ignore all interrupts during initialization. */
12394 if (unlikely(phba
->link_state
< LPFC_LINK_DOWN
))
12401 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
12402 * @irq: Interrupt number.
12403 * @dev_id: The device context pointer.
12405 * This function is directly called from the PCI layer as an interrupt
12406 * service routine when device with SLI-3 interface spec is enabled with
12407 * MSI-X multi-message interrupt mode and there are slow-path events in
12408 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12409 * interrupt mode, this function is called as part of the device-level
12410 * interrupt handler. When the PCI slot is in error recovery or the HBA
12411 * is undergoing initialization, the interrupt handler will not process
12412 * the interrupt. The link attention and ELS ring attention events are
12413 * handled by the worker thread. The interrupt handler signals the worker
12414 * thread and returns for these events. This function is called without
12415 * any lock held. It gets the hbalock to access and update SLI data
12418 * This function returns IRQ_HANDLED when interrupt is handled else it
12419 * returns IRQ_NONE.
12422 lpfc_sli_sp_intr_handler(int irq
, void *dev_id
)
12424 struct lpfc_hba
*phba
;
12425 uint32_t ha_copy
, hc_copy
;
12426 uint32_t work_ha_copy
;
12427 unsigned long status
;
12428 unsigned long iflag
;
12431 MAILBOX_t
*mbox
, *pmbox
;
12432 struct lpfc_vport
*vport
;
12433 struct lpfc_nodelist
*ndlp
;
12434 struct lpfc_dmabuf
*mp
;
12439 * Get the driver's phba structure from the dev_id and
12440 * assume the HBA is not interrupting.
12442 phba
= (struct lpfc_hba
*)dev_id
;
12444 if (unlikely(!phba
))
12448 * Stuff needs to be attented to when this function is invoked as an
12449 * individual interrupt handler in MSI-X multi-message interrupt mode
12451 if (phba
->intr_type
== MSIX
) {
12452 /* Check device state for handling interrupt */
12453 if (lpfc_intr_state_check(phba
))
12455 /* Need to read HA REG for slow-path events */
12456 spin_lock_irqsave(&phba
->hbalock
, iflag
);
12457 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
12459 /* If somebody is waiting to handle an eratt don't process it
12460 * here. The brdkill function will do this.
12462 if (phba
->link_flag
& LS_IGNORE_ERATT
)
12463 ha_copy
&= ~HA_ERATT
;
12464 /* Check the need for handling ERATT in interrupt handler */
12465 if (ha_copy
& HA_ERATT
) {
12466 if (phba
->hba_flag
& HBA_ERATT_HANDLED
)
12467 /* ERATT polling has handled ERATT */
12468 ha_copy
&= ~HA_ERATT
;
12470 /* Indicate interrupt handler handles ERATT */
12471 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
12475 * If there is deferred error attention, do not check for any
12478 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
12479 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12483 /* Clear up only attention source related to slow-path */
12484 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
))
12487 writel(hc_copy
& ~(HC_MBINT_ENA
| HC_R2INT_ENA
|
12488 HC_LAINT_ENA
| HC_ERINT_ENA
),
12490 writel((ha_copy
& (HA_MBATT
| HA_R2_CLR_MSK
)),
12492 writel(hc_copy
, phba
->HCregaddr
);
12493 readl(phba
->HAregaddr
); /* flush */
12494 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12496 ha_copy
= phba
->ha_copy
;
12498 work_ha_copy
= ha_copy
& phba
->work_ha_mask
;
12500 if (work_ha_copy
) {
12501 if (work_ha_copy
& HA_LATT
) {
12502 if (phba
->sli
.sli_flag
& LPFC_PROCESS_LA
) {
12504 * Turn off Link Attention interrupts
12505 * until CLEAR_LA done
12507 spin_lock_irqsave(&phba
->hbalock
, iflag
);
12508 phba
->sli
.sli_flag
&= ~LPFC_PROCESS_LA
;
12509 if (lpfc_readl(phba
->HCregaddr
, &control
))
12511 control
&= ~HC_LAINT_ENA
;
12512 writel(control
, phba
->HCregaddr
);
12513 readl(phba
->HCregaddr
); /* flush */
12514 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12517 work_ha_copy
&= ~HA_LATT
;
12520 if (work_ha_copy
& ~(HA_ERATT
| HA_MBATT
| HA_LATT
)) {
12522 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12523 * the only slow ring.
12525 status
= (work_ha_copy
&
12526 (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
12527 status
>>= (4*LPFC_ELS_RING
);
12528 if (status
& HA_RXMASK
) {
12529 spin_lock_irqsave(&phba
->hbalock
, iflag
);
12530 if (lpfc_readl(phba
->HCregaddr
, &control
))
12533 lpfc_debugfs_slow_ring_trc(phba
,
12534 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12536 (uint32_t)phba
->sli
.slistat
.sli_intr
);
12538 if (control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
)) {
12539 lpfc_debugfs_slow_ring_trc(phba
,
12540 "ISR Disable ring:"
12541 "pwork:x%x hawork:x%x wait:x%x",
12542 phba
->work_ha
, work_ha_copy
,
12543 (uint32_t)((unsigned long)
12544 &phba
->work_waitq
));
12547 ~(HC_R0INT_ENA
<< LPFC_ELS_RING
);
12548 writel(control
, phba
->HCregaddr
);
12549 readl(phba
->HCregaddr
); /* flush */
12552 lpfc_debugfs_slow_ring_trc(phba
,
12553 "ISR slow ring: pwork:"
12554 "x%x hawork:x%x wait:x%x",
12555 phba
->work_ha
, work_ha_copy
,
12556 (uint32_t)((unsigned long)
12557 &phba
->work_waitq
));
12559 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12562 spin_lock_irqsave(&phba
->hbalock
, iflag
);
12563 if (work_ha_copy
& HA_ERATT
) {
12564 if (lpfc_sli_read_hs(phba
))
12567 * Check if there is a deferred error condition
12570 if ((HS_FFER1
& phba
->work_hs
) &&
12571 ((HS_FFER2
| HS_FFER3
| HS_FFER4
| HS_FFER5
|
12572 HS_FFER6
| HS_FFER7
| HS_FFER8
) &
12574 phba
->hba_flag
|= DEFER_ERATT
;
12575 /* Clear all interrupt enable conditions */
12576 writel(0, phba
->HCregaddr
);
12577 readl(phba
->HCregaddr
);
12581 if ((work_ha_copy
& HA_MBATT
) && (phba
->sli
.mbox_active
)) {
12582 pmb
= phba
->sli
.mbox_active
;
12583 pmbox
= &pmb
->u
.mb
;
12585 vport
= pmb
->vport
;
12587 /* First check out the status word */
12588 lpfc_sli_pcimem_bcopy(mbox
, pmbox
, sizeof(uint32_t));
12589 if (pmbox
->mbxOwner
!= OWN_HOST
) {
12590 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12592 * Stray Mailbox Interrupt, mbxCommand <cmd>
12593 * mbxStatus <status>
12595 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
|
12597 "(%d):0304 Stray Mailbox "
12598 "Interrupt mbxCommand x%x "
12600 (vport
? vport
->vpi
: 0),
12603 /* clear mailbox attention bit */
12604 work_ha_copy
&= ~HA_MBATT
;
12606 phba
->sli
.mbox_active
= NULL
;
12607 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12608 phba
->last_completion_time
= jiffies
;
12609 del_timer(&phba
->sli
.mbox_tmo
);
12610 if (pmb
->mbox_cmpl
) {
12611 lpfc_sli_pcimem_bcopy(mbox
, pmbox
,
12613 if (pmb
->out_ext_byte_len
&&
12615 lpfc_sli_pcimem_bcopy(
12618 pmb
->out_ext_byte_len
);
12620 if (pmb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) {
12621 pmb
->mbox_flag
&= ~LPFC_MBX_IMED_UNREG
;
12623 lpfc_debugfs_disc_trc(vport
,
12624 LPFC_DISC_TRC_MBOX_VPORT
,
12625 "MBOX dflt rpi: : "
12626 "status:x%x rpi:x%x",
12627 (uint32_t)pmbox
->mbxStatus
,
12628 pmbox
->un
.varWords
[0], 0);
12630 if (!pmbox
->mbxStatus
) {
12631 mp
= (struct lpfc_dmabuf
*)
12633 ndlp
= (struct lpfc_nodelist
*)
12636 /* Reg_LOGIN of dflt RPI was
12637 * successful. new lets get
12638 * rid of the RPI using the
12639 * same mbox buffer.
12641 lpfc_unreg_login(phba
,
12643 pmbox
->un
.varWords
[0],
12646 lpfc_mbx_cmpl_dflt_rpi
;
12648 pmb
->ctx_ndlp
= ndlp
;
12649 pmb
->vport
= vport
;
12650 rc
= lpfc_sli_issue_mbox(phba
,
12653 if (rc
!= MBX_BUSY
)
12654 lpfc_printf_log(phba
,
12656 LOG_MBOX
| LOG_SLI
,
12657 "0350 rc should have"
12658 "been MBX_BUSY\n");
12659 if (rc
!= MBX_NOT_FINISHED
)
12660 goto send_current_mbox
;
12664 &phba
->pport
->work_port_lock
,
12666 phba
->pport
->work_port_events
&=
12668 spin_unlock_irqrestore(
12669 &phba
->pport
->work_port_lock
,
12671 lpfc_mbox_cmpl_put(phba
, pmb
);
12674 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12676 if ((work_ha_copy
& HA_MBATT
) &&
12677 (phba
->sli
.mbox_active
== NULL
)) {
12679 /* Process next mailbox command if there is one */
12681 rc
= lpfc_sli_issue_mbox(phba
, NULL
,
12683 } while (rc
== MBX_NOT_FINISHED
);
12684 if (rc
!= MBX_SUCCESS
)
12685 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
|
12686 LOG_SLI
, "0349 rc should be "
12690 spin_lock_irqsave(&phba
->hbalock
, iflag
);
12691 phba
->work_ha
|= work_ha_copy
;
12692 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12693 lpfc_worker_wake_up(phba
);
12695 return IRQ_HANDLED
;
12697 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12698 return IRQ_HANDLED
;
12700 } /* lpfc_sli_sp_intr_handler */
12703 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
12704 * @irq: Interrupt number.
12705 * @dev_id: The device context pointer.
12707 * This function is directly called from the PCI layer as an interrupt
12708 * service routine when device with SLI-3 interface spec is enabled with
12709 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12710 * ring event in the HBA. However, when the device is enabled with either
12711 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12712 * device-level interrupt handler. When the PCI slot is in error recovery
12713 * or the HBA is undergoing initialization, the interrupt handler will not
12714 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12715 * the intrrupt context. This function is called without any lock held.
12716 * It gets the hbalock to access and update SLI data structures.
12718 * This function returns IRQ_HANDLED when interrupt is handled else it
12719 * returns IRQ_NONE.
12722 lpfc_sli_fp_intr_handler(int irq
, void *dev_id
)
12724 struct lpfc_hba
*phba
;
12726 unsigned long status
;
12727 unsigned long iflag
;
12728 struct lpfc_sli_ring
*pring
;
12730 /* Get the driver's phba structure from the dev_id and
12731 * assume the HBA is not interrupting.
12733 phba
= (struct lpfc_hba
*) dev_id
;
12735 if (unlikely(!phba
))
12739 * Stuff needs to be attented to when this function is invoked as an
12740 * individual interrupt handler in MSI-X multi-message interrupt mode
12742 if (phba
->intr_type
== MSIX
) {
12743 /* Check device state for handling interrupt */
12744 if (lpfc_intr_state_check(phba
))
12746 /* Need to read HA REG for FCP ring and other ring events */
12747 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
12748 return IRQ_HANDLED
;
12749 /* Clear up only attention source related to fast-path */
12750 spin_lock_irqsave(&phba
->hbalock
, iflag
);
12752 * If there is deferred error attention, do not check for
12755 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
12756 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12759 writel((ha_copy
& (HA_R0_CLR_MSK
| HA_R1_CLR_MSK
)),
12761 readl(phba
->HAregaddr
); /* flush */
12762 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12764 ha_copy
= phba
->ha_copy
;
12767 * Process all events on FCP ring. Take the optimized path for FCP IO.
12769 ha_copy
&= ~(phba
->work_ha_mask
);
12771 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_FCP_RING
)));
12772 status
>>= (4*LPFC_FCP_RING
);
12773 pring
= &phba
->sli
.sli3_ring
[LPFC_FCP_RING
];
12774 if (status
& HA_RXMASK
)
12775 lpfc_sli_handle_fast_ring_event(phba
, pring
, status
);
12777 if (phba
->cfg_multi_ring_support
== 2) {
12779 * Process all events on extra ring. Take the optimized path
12780 * for extra ring IO.
12782 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_EXTRA_RING
)));
12783 status
>>= (4*LPFC_EXTRA_RING
);
12784 if (status
& HA_RXMASK
) {
12785 lpfc_sli_handle_fast_ring_event(phba
,
12786 &phba
->sli
.sli3_ring
[LPFC_EXTRA_RING
],
12790 return IRQ_HANDLED
;
12791 } /* lpfc_sli_fp_intr_handler */
12794 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
12795 * @irq: Interrupt number.
12796 * @dev_id: The device context pointer.
12798 * This function is the HBA device-level interrupt handler to device with
12799 * SLI-3 interface spec, called from the PCI layer when either MSI or
12800 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
12801 * requires driver attention. This function invokes the slow-path interrupt
12802 * attention handling function and fast-path interrupt attention handling
12803 * function in turn to process the relevant HBA attention events. This
12804 * function is called without any lock held. It gets the hbalock to access
12805 * and update SLI data structures.
12807 * This function returns IRQ_HANDLED when interrupt is handled, else it
12808 * returns IRQ_NONE.
12811 lpfc_sli_intr_handler(int irq
, void *dev_id
)
12813 struct lpfc_hba
*phba
;
12814 irqreturn_t sp_irq_rc
, fp_irq_rc
;
12815 unsigned long status1
, status2
;
12819 * Get the driver's phba structure from the dev_id and
12820 * assume the HBA is not interrupting.
12822 phba
= (struct lpfc_hba
*) dev_id
;
12824 if (unlikely(!phba
))
12827 /* Check device state for handling interrupt */
12828 if (lpfc_intr_state_check(phba
))
12831 spin_lock(&phba
->hbalock
);
12832 if (lpfc_readl(phba
->HAregaddr
, &phba
->ha_copy
)) {
12833 spin_unlock(&phba
->hbalock
);
12834 return IRQ_HANDLED
;
12837 if (unlikely(!phba
->ha_copy
)) {
12838 spin_unlock(&phba
->hbalock
);
12840 } else if (phba
->ha_copy
& HA_ERATT
) {
12841 if (phba
->hba_flag
& HBA_ERATT_HANDLED
)
12842 /* ERATT polling has handled ERATT */
12843 phba
->ha_copy
&= ~HA_ERATT
;
12845 /* Indicate interrupt handler handles ERATT */
12846 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
12850 * If there is deferred error attention, do not check for any interrupt.
12852 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
12853 spin_unlock(&phba
->hbalock
);
12857 /* Clear attention sources except link and error attentions */
12858 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
)) {
12859 spin_unlock(&phba
->hbalock
);
12860 return IRQ_HANDLED
;
12862 writel(hc_copy
& ~(HC_MBINT_ENA
| HC_R0INT_ENA
| HC_R1INT_ENA
12863 | HC_R2INT_ENA
| HC_LAINT_ENA
| HC_ERINT_ENA
),
12865 writel((phba
->ha_copy
& ~(HA_LATT
| HA_ERATT
)), phba
->HAregaddr
);
12866 writel(hc_copy
, phba
->HCregaddr
);
12867 readl(phba
->HAregaddr
); /* flush */
12868 spin_unlock(&phba
->hbalock
);
12871 * Invokes slow-path host attention interrupt handling as appropriate.
12874 /* status of events with mailbox and link attention */
12875 status1
= phba
->ha_copy
& (HA_MBATT
| HA_LATT
| HA_ERATT
);
12877 /* status of events with ELS ring */
12878 status2
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
12879 status2
>>= (4*LPFC_ELS_RING
);
12881 if (status1
|| (status2
& HA_RXMASK
))
12882 sp_irq_rc
= lpfc_sli_sp_intr_handler(irq
, dev_id
);
12884 sp_irq_rc
= IRQ_NONE
;
12887 * Invoke fast-path host attention interrupt handling as appropriate.
12890 /* status of events with FCP ring */
12891 status1
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_FCP_RING
)));
12892 status1
>>= (4*LPFC_FCP_RING
);
12894 /* status of events with extra ring */
12895 if (phba
->cfg_multi_ring_support
== 2) {
12896 status2
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_EXTRA_RING
)));
12897 status2
>>= (4*LPFC_EXTRA_RING
);
12901 if ((status1
& HA_RXMASK
) || (status2
& HA_RXMASK
))
12902 fp_irq_rc
= lpfc_sli_fp_intr_handler(irq
, dev_id
);
12904 fp_irq_rc
= IRQ_NONE
;
12906 /* Return device-level interrupt handling status */
12907 return (sp_irq_rc
== IRQ_HANDLED
) ? sp_irq_rc
: fp_irq_rc
;
12908 } /* lpfc_sli_intr_handler */
12911 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
12912 * @phba: pointer to lpfc hba data structure.
12914 * This routine is invoked by the worker thread to process all the pending
12915 * SLI4 els abort xri events.
12917 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba
*phba
)
12919 struct lpfc_cq_event
*cq_event
;
12921 /* First, declare the els xri abort event has been handled */
12922 spin_lock_irq(&phba
->hbalock
);
12923 phba
->hba_flag
&= ~ELS_XRI_ABORT_EVENT
;
12924 spin_unlock_irq(&phba
->hbalock
);
12925 /* Now, handle all the els xri abort events */
12926 while (!list_empty(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
)) {
12927 /* Get the first event from the head of the event queue */
12928 spin_lock_irq(&phba
->hbalock
);
12929 list_remove_head(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
,
12930 cq_event
, struct lpfc_cq_event
, list
);
12931 spin_unlock_irq(&phba
->hbalock
);
12932 /* Notify aborted XRI for ELS work queue */
12933 lpfc_sli4_els_xri_aborted(phba
, &cq_event
->cqe
.wcqe_axri
);
12934 /* Free the event processed back to the free pool */
12935 lpfc_sli4_cq_event_release(phba
, cq_event
);
12940 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
12941 * @phba: pointer to lpfc hba data structure
12942 * @pIocbIn: pointer to the rspiocbq
12943 * @pIocbOut: pointer to the cmdiocbq
12944 * @wcqe: pointer to the complete wcqe
12946 * This routine transfers the fields of a command iocbq to a response iocbq
12947 * by copying all the IOCB fields from command iocbq and transferring the
12948 * completion status information from the complete wcqe.
12951 lpfc_sli4_iocb_param_transfer(struct lpfc_hba
*phba
,
12952 struct lpfc_iocbq
*pIocbIn
,
12953 struct lpfc_iocbq
*pIocbOut
,
12954 struct lpfc_wcqe_complete
*wcqe
)
12957 unsigned long iflags
;
12958 uint32_t status
, max_response
;
12959 struct lpfc_dmabuf
*dmabuf
;
12960 struct ulp_bde64
*bpl
, bde
;
12961 size_t offset
= offsetof(struct lpfc_iocbq
, iocb
);
12963 memcpy((char *)pIocbIn
+ offset
, (char *)pIocbOut
+ offset
,
12964 sizeof(struct lpfc_iocbq
) - offset
);
12965 /* Map WCQE parameters into irspiocb parameters */
12966 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
12967 pIocbIn
->iocb
.ulpStatus
= (status
& LPFC_IOCB_STATUS_MASK
);
12968 if (pIocbOut
->iocb_flag
& LPFC_IO_FCP
)
12969 if (pIocbIn
->iocb
.ulpStatus
== IOSTAT_FCP_RSP_ERROR
)
12970 pIocbIn
->iocb
.un
.fcpi
.fcpi_parm
=
12971 pIocbOut
->iocb
.un
.fcpi
.fcpi_parm
-
12972 wcqe
->total_data_placed
;
12974 pIocbIn
->iocb
.un
.ulpWord
[4] = wcqe
->parameter
;
12976 pIocbIn
->iocb
.un
.ulpWord
[4] = wcqe
->parameter
;
12977 switch (pIocbOut
->iocb
.ulpCommand
) {
12978 case CMD_ELS_REQUEST64_CR
:
12979 dmabuf
= (struct lpfc_dmabuf
*)pIocbOut
->context3
;
12980 bpl
= (struct ulp_bde64
*)dmabuf
->virt
;
12981 bde
.tus
.w
= le32_to_cpu(bpl
[1].tus
.w
);
12982 max_response
= bde
.tus
.f
.bdeSize
;
12984 case CMD_GEN_REQUEST64_CR
:
12986 if (!pIocbOut
->context3
)
12988 numBdes
= pIocbOut
->iocb
.un
.genreq64
.bdl
.bdeSize
/
12989 sizeof(struct ulp_bde64
);
12990 dmabuf
= (struct lpfc_dmabuf
*)pIocbOut
->context3
;
12991 bpl
= (struct ulp_bde64
*)dmabuf
->virt
;
12992 for (i
= 0; i
< numBdes
; i
++) {
12993 bde
.tus
.w
= le32_to_cpu(bpl
[i
].tus
.w
);
12994 if (bde
.tus
.f
.bdeFlags
!= BUFF_TYPE_BDE_64
)
12995 max_response
+= bde
.tus
.f
.bdeSize
;
12999 max_response
= wcqe
->total_data_placed
;
13002 if (max_response
< wcqe
->total_data_placed
)
13003 pIocbIn
->iocb
.un
.genreq64
.bdl
.bdeSize
= max_response
;
13005 pIocbIn
->iocb
.un
.genreq64
.bdl
.bdeSize
=
13006 wcqe
->total_data_placed
;
13009 /* Convert BG errors for completion status */
13010 if (status
== CQE_STATUS_DI_ERROR
) {
13011 pIocbIn
->iocb
.ulpStatus
= IOSTAT_LOCAL_REJECT
;
13013 if (bf_get(lpfc_wcqe_c_bg_edir
, wcqe
))
13014 pIocbIn
->iocb
.un
.ulpWord
[4] = IOERR_RX_DMA_FAILED
;
13016 pIocbIn
->iocb
.un
.ulpWord
[4] = IOERR_TX_DMA_FAILED
;
13018 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
= 0;
13019 if (bf_get(lpfc_wcqe_c_bg_ge
, wcqe
)) /* Guard Check failed */
13020 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
13021 BGS_GUARD_ERR_MASK
;
13022 if (bf_get(lpfc_wcqe_c_bg_ae
, wcqe
)) /* App Tag Check failed */
13023 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
13024 BGS_APPTAG_ERR_MASK
;
13025 if (bf_get(lpfc_wcqe_c_bg_re
, wcqe
)) /* Ref Tag Check failed */
13026 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
13027 BGS_REFTAG_ERR_MASK
;
13029 /* Check to see if there was any good data before the error */
13030 if (bf_get(lpfc_wcqe_c_bg_tdpv
, wcqe
)) {
13031 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
13032 BGS_HI_WATER_MARK_PRESENT_MASK
;
13033 pIocbIn
->iocb
.unsli3
.sli3_bg
.bghm
=
13034 wcqe
->total_data_placed
;
13038 * Set ALL the error bits to indicate we don't know what
13039 * type of error it is.
13041 if (!pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
)
13042 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
13043 (BGS_REFTAG_ERR_MASK
| BGS_APPTAG_ERR_MASK
|
13044 BGS_GUARD_ERR_MASK
);
13047 /* Pick up HBA exchange busy condition */
13048 if (bf_get(lpfc_wcqe_c_xb
, wcqe
)) {
13049 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13050 pIocbIn
->iocb_flag
|= LPFC_EXCHANGE_BUSY
;
13051 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13056 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13057 * @phba: Pointer to HBA context object.
13058 * @wcqe: Pointer to work-queue completion queue entry.
13060 * This routine handles an ELS work-queue completion event and construct
13061 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13062 * discovery engine to handle.
13064 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13066 static struct lpfc_iocbq
*
13067 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba
*phba
,
13068 struct lpfc_iocbq
*irspiocbq
)
13070 struct lpfc_sli_ring
*pring
;
13071 struct lpfc_iocbq
*cmdiocbq
;
13072 struct lpfc_wcqe_complete
*wcqe
;
13073 unsigned long iflags
;
13075 pring
= lpfc_phba_elsring(phba
);
13076 if (unlikely(!pring
))
13079 wcqe
= &irspiocbq
->cq_event
.cqe
.wcqe_cmpl
;
13080 pring
->stats
.iocb_event
++;
13081 /* Look up the ELS command IOCB and create pseudo response IOCB */
13082 cmdiocbq
= lpfc_sli_iocbq_lookup_by_tag(phba
, pring
,
13083 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
13084 if (unlikely(!cmdiocbq
)) {
13085 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
13086 "0386 ELS complete with no corresponding "
13087 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13088 wcqe
->word0
, wcqe
->total_data_placed
,
13089 wcqe
->parameter
, wcqe
->word3
);
13090 lpfc_sli_release_iocbq(phba
, irspiocbq
);
13094 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
13095 /* Put the iocb back on the txcmplq */
13096 lpfc_sli_ringtxcmpl_put(phba
, pring
, cmdiocbq
);
13097 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
13099 /* Fake the irspiocbq and copy necessary response information */
13100 lpfc_sli4_iocb_param_transfer(phba
, irspiocbq
, cmdiocbq
, wcqe
);
13105 inline struct lpfc_cq_event
*
13106 lpfc_cq_event_setup(struct lpfc_hba
*phba
, void *entry
, int size
)
13108 struct lpfc_cq_event
*cq_event
;
13110 /* Allocate a new internal CQ_EVENT entry */
13111 cq_event
= lpfc_sli4_cq_event_alloc(phba
);
13113 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13114 "0602 Failed to alloc CQ_EVENT entry\n");
13118 /* Move the CQE into the event */
13119 memcpy(&cq_event
->cqe
, entry
, size
);
13124 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
13125 * @phba: Pointer to HBA context object.
13126 * @cqe: Pointer to mailbox completion queue entry.
13128 * This routine process a mailbox completion queue entry with asynchronous
13131 * Return: true if work posted to worker thread, otherwise false.
13134 lpfc_sli4_sp_handle_async_event(struct lpfc_hba
*phba
, struct lpfc_mcqe
*mcqe
)
13136 struct lpfc_cq_event
*cq_event
;
13137 unsigned long iflags
;
13139 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
13140 "0392 Async Event: word0:x%x, word1:x%x, "
13141 "word2:x%x, word3:x%x\n", mcqe
->word0
,
13142 mcqe
->mcqe_tag0
, mcqe
->mcqe_tag1
, mcqe
->trailer
);
13144 cq_event
= lpfc_cq_event_setup(phba
, mcqe
, sizeof(struct lpfc_mcqe
));
13147 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13148 list_add_tail(&cq_event
->list
, &phba
->sli4_hba
.sp_asynce_work_queue
);
13149 /* Set the async event flag */
13150 phba
->hba_flag
|= ASYNC_EVENT
;
13151 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13157 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13158 * @phba: Pointer to HBA context object.
13159 * @cqe: Pointer to mailbox completion queue entry.
13161 * This routine process a mailbox completion queue entry with mailbox
13162 * completion event.
13164 * Return: true if work posted to worker thread, otherwise false.
13167 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba
*phba
, struct lpfc_mcqe
*mcqe
)
13169 uint32_t mcqe_status
;
13170 MAILBOX_t
*mbox
, *pmbox
;
13171 struct lpfc_mqe
*mqe
;
13172 struct lpfc_vport
*vport
;
13173 struct lpfc_nodelist
*ndlp
;
13174 struct lpfc_dmabuf
*mp
;
13175 unsigned long iflags
;
13177 bool workposted
= false;
13180 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13181 if (!bf_get(lpfc_trailer_completed
, mcqe
))
13182 goto out_no_mqe_complete
;
13184 /* Get the reference to the active mbox command */
13185 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13186 pmb
= phba
->sli
.mbox_active
;
13187 if (unlikely(!pmb
)) {
13188 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
13189 "1832 No pending MBOX command to handle\n");
13190 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13191 goto out_no_mqe_complete
;
13193 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13195 pmbox
= (MAILBOX_t
*)&pmb
->u
.mqe
;
13197 vport
= pmb
->vport
;
13199 /* Reset heartbeat timer */
13200 phba
->last_completion_time
= jiffies
;
13201 del_timer(&phba
->sli
.mbox_tmo
);
13203 /* Move mbox data to caller's mailbox region, do endian swapping */
13204 if (pmb
->mbox_cmpl
&& mbox
)
13205 lpfc_sli4_pcimem_bcopy(mbox
, mqe
, sizeof(struct lpfc_mqe
));
13208 * For mcqe errors, conditionally move a modified error code to
13209 * the mbox so that the error will not be missed.
13211 mcqe_status
= bf_get(lpfc_mcqe_status
, mcqe
);
13212 if (mcqe_status
!= MB_CQE_STATUS_SUCCESS
) {
13213 if (bf_get(lpfc_mqe_status
, mqe
) == MBX_SUCCESS
)
13214 bf_set(lpfc_mqe_status
, mqe
,
13215 (LPFC_MBX_ERROR_RANGE
| mcqe_status
));
13217 if (pmb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) {
13218 pmb
->mbox_flag
&= ~LPFC_MBX_IMED_UNREG
;
13219 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_MBOX_VPORT
,
13220 "MBOX dflt rpi: status:x%x rpi:x%x",
13222 pmbox
->un
.varWords
[0], 0);
13223 if (mcqe_status
== MB_CQE_STATUS_SUCCESS
) {
13224 mp
= (struct lpfc_dmabuf
*)(pmb
->ctx_buf
);
13225 ndlp
= (struct lpfc_nodelist
*)pmb
->ctx_ndlp
;
13226 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13227 * RID of the PPI using the same mbox buffer.
13229 lpfc_unreg_login(phba
, vport
->vpi
,
13230 pmbox
->un
.varWords
[0], pmb
);
13231 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
13233 pmb
->ctx_ndlp
= ndlp
;
13234 pmb
->vport
= vport
;
13235 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
13236 if (rc
!= MBX_BUSY
)
13237 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
|
13238 LOG_SLI
, "0385 rc should "
13239 "have been MBX_BUSY\n");
13240 if (rc
!= MBX_NOT_FINISHED
)
13241 goto send_current_mbox
;
13244 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflags
);
13245 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
13246 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflags
);
13248 /* There is mailbox completion work to do */
13249 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13250 __lpfc_mbox_cmpl_put(phba
, pmb
);
13251 phba
->work_ha
|= HA_MBATT
;
13252 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13256 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13257 /* Release the mailbox command posting token */
13258 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
13259 /* Setting active mailbox pointer need to be in sync to flag clear */
13260 phba
->sli
.mbox_active
= NULL
;
13261 if (bf_get(lpfc_trailer_consumed
, mcqe
))
13262 lpfc_sli4_mq_release(phba
->sli4_hba
.mbx_wq
);
13263 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13264 /* Wake up worker thread to post the next pending mailbox command */
13265 lpfc_worker_wake_up(phba
);
13268 out_no_mqe_complete
:
13269 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13270 if (bf_get(lpfc_trailer_consumed
, mcqe
))
13271 lpfc_sli4_mq_release(phba
->sli4_hba
.mbx_wq
);
13272 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13277 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13278 * @phba: Pointer to HBA context object.
13279 * @cqe: Pointer to mailbox completion queue entry.
13281 * This routine process a mailbox completion queue entry, it invokes the
13282 * proper mailbox complete handling or asynchronous event handling routine
13283 * according to the MCQE's async bit.
13285 * Return: true if work posted to worker thread, otherwise false.
13288 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
13289 struct lpfc_cqe
*cqe
)
13291 struct lpfc_mcqe mcqe
;
13296 /* Copy the mailbox MCQE and convert endian order as needed */
13297 lpfc_sli4_pcimem_bcopy(cqe
, &mcqe
, sizeof(struct lpfc_mcqe
));
13299 /* Invoke the proper event handling routine */
13300 if (!bf_get(lpfc_trailer_async
, &mcqe
))
13301 workposted
= lpfc_sli4_sp_handle_mbox_event(phba
, &mcqe
);
13303 workposted
= lpfc_sli4_sp_handle_async_event(phba
, &mcqe
);
13308 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13309 * @phba: Pointer to HBA context object.
13310 * @cq: Pointer to associated CQ
13311 * @wcqe: Pointer to work-queue completion queue entry.
13313 * This routine handles an ELS work-queue completion event.
13315 * Return: true if work posted to worker thread, otherwise false.
13318 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
13319 struct lpfc_wcqe_complete
*wcqe
)
13321 struct lpfc_iocbq
*irspiocbq
;
13322 unsigned long iflags
;
13323 struct lpfc_sli_ring
*pring
= cq
->pring
;
13325 int txcmplq_cnt
= 0;
13327 /* Check for response status */
13328 if (unlikely(bf_get(lpfc_wcqe_c_status
, wcqe
))) {
13329 /* Log the error status */
13330 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
13331 "0357 ELS CQE error: status=x%x: "
13332 "CQE: %08x %08x %08x %08x\n",
13333 bf_get(lpfc_wcqe_c_status
, wcqe
),
13334 wcqe
->word0
, wcqe
->total_data_placed
,
13335 wcqe
->parameter
, wcqe
->word3
);
13338 /* Get an irspiocbq for later ELS response processing use */
13339 irspiocbq
= lpfc_sli_get_iocbq(phba
);
13341 if (!list_empty(&pring
->txq
))
13343 if (!list_empty(&pring
->txcmplq
))
13345 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13346 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13347 "els_txcmplq_cnt=%d\n",
13348 txq_cnt
, phba
->iocb_cnt
,
13353 /* Save off the slow-path queue event for work thread to process */
13354 memcpy(&irspiocbq
->cq_event
.cqe
.wcqe_cmpl
, wcqe
, sizeof(*wcqe
));
13355 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13356 list_add_tail(&irspiocbq
->cq_event
.list
,
13357 &phba
->sli4_hba
.sp_queue_event
);
13358 phba
->hba_flag
|= HBA_SP_QUEUE_EVT
;
13359 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13365 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13366 * @phba: Pointer to HBA context object.
13367 * @wcqe: Pointer to work-queue completion queue entry.
13369 * This routine handles slow-path WQ entry consumed event by invoking the
13370 * proper WQ release routine to the slow-path WQ.
13373 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba
*phba
,
13374 struct lpfc_wcqe_release
*wcqe
)
13376 /* sanity check on queue memory */
13377 if (unlikely(!phba
->sli4_hba
.els_wq
))
13379 /* Check for the slow-path ELS work queue */
13380 if (bf_get(lpfc_wcqe_r_wq_id
, wcqe
) == phba
->sli4_hba
.els_wq
->queue_id
)
13381 lpfc_sli4_wq_release(phba
->sli4_hba
.els_wq
,
13382 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
));
13384 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
13385 "2579 Slow-path wqe consume event carries "
13386 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13387 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
),
13388 phba
->sli4_hba
.els_wq
->queue_id
);
13392 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13393 * @phba: Pointer to HBA context object.
13394 * @cq: Pointer to a WQ completion queue.
13395 * @wcqe: Pointer to work-queue completion queue entry.
13397 * This routine handles an XRI abort event.
13399 * Return: true if work posted to worker thread, otherwise false.
13402 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba
*phba
,
13403 struct lpfc_queue
*cq
,
13404 struct sli4_wcqe_xri_aborted
*wcqe
)
13406 bool workposted
= false;
13407 struct lpfc_cq_event
*cq_event
;
13408 unsigned long iflags
;
13410 switch (cq
->subtype
) {
13412 lpfc_sli4_io_xri_aborted(phba
, wcqe
, cq
->hdwq
);
13413 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
13414 /* Notify aborted XRI for NVME work queue */
13415 if (phba
->nvmet_support
)
13416 lpfc_sli4_nvmet_xri_aborted(phba
, wcqe
);
13418 workposted
= false;
13420 case LPFC_NVME_LS
: /* NVME LS uses ELS resources */
13422 cq_event
= lpfc_cq_event_setup(
13423 phba
, wcqe
, sizeof(struct sli4_wcqe_xri_aborted
));
13426 cq_event
->hdwq
= cq
->hdwq
;
13427 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13428 list_add_tail(&cq_event
->list
,
13429 &phba
->sli4_hba
.sp_els_xri_aborted_work_queue
);
13430 /* Set the els xri abort event flag */
13431 phba
->hba_flag
|= ELS_XRI_ABORT_EVENT
;
13432 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13436 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13437 "0603 Invalid CQ subtype %d: "
13438 "%08x %08x %08x %08x\n",
13439 cq
->subtype
, wcqe
->word0
, wcqe
->parameter
,
13440 wcqe
->word2
, wcqe
->word3
);
13441 workposted
= false;
13447 #define FC_RCTL_MDS_DIAGS 0xF4
13450 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13451 * @phba: Pointer to HBA context object.
13452 * @rcqe: Pointer to receive-queue completion queue entry.
13454 * This routine process a receive-queue completion queue entry.
13456 * Return: true if work posted to worker thread, otherwise false.
13459 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba
*phba
, struct lpfc_rcqe
*rcqe
)
13461 bool workposted
= false;
13462 struct fc_frame_header
*fc_hdr
;
13463 struct lpfc_queue
*hrq
= phba
->sli4_hba
.hdr_rq
;
13464 struct lpfc_queue
*drq
= phba
->sli4_hba
.dat_rq
;
13465 struct lpfc_nvmet_tgtport
*tgtp
;
13466 struct hbq_dmabuf
*dma_buf
;
13467 uint32_t status
, rq_id
;
13468 unsigned long iflags
;
13470 /* sanity check on queue memory */
13471 if (unlikely(!hrq
) || unlikely(!drq
))
13474 if (bf_get(lpfc_cqe_code
, rcqe
) == CQE_CODE_RECEIVE_V1
)
13475 rq_id
= bf_get(lpfc_rcqe_rq_id_v1
, rcqe
);
13477 rq_id
= bf_get(lpfc_rcqe_rq_id
, rcqe
);
13478 if (rq_id
!= hrq
->queue_id
)
13481 status
= bf_get(lpfc_rcqe_status
, rcqe
);
13483 case FC_STATUS_RQ_BUF_LEN_EXCEEDED
:
13484 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13485 "2537 Receive Frame Truncated!!\n");
13487 case FC_STATUS_RQ_SUCCESS
:
13488 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13489 lpfc_sli4_rq_release(hrq
, drq
);
13490 dma_buf
= lpfc_sli_hbqbuf_get(&phba
->hbqs
[0].hbq_buffer_list
);
13492 hrq
->RQ_no_buf_found
++;
13493 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13497 hrq
->RQ_buf_posted
--;
13498 memcpy(&dma_buf
->cq_event
.cqe
.rcqe_cmpl
, rcqe
, sizeof(*rcqe
));
13500 fc_hdr
= (struct fc_frame_header
*)dma_buf
->hbuf
.virt
;
13502 if (fc_hdr
->fh_r_ctl
== FC_RCTL_MDS_DIAGS
||
13503 fc_hdr
->fh_r_ctl
== FC_RCTL_DD_UNSOL_DATA
) {
13504 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13505 /* Handle MDS Loopback frames */
13506 lpfc_sli4_handle_mds_loopback(phba
->pport
, dma_buf
);
13510 /* save off the frame for the work thread to process */
13511 list_add_tail(&dma_buf
->cq_event
.list
,
13512 &phba
->sli4_hba
.sp_queue_event
);
13513 /* Frame received */
13514 phba
->hba_flag
|= HBA_SP_QUEUE_EVT
;
13515 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13518 case FC_STATUS_INSUFF_BUF_FRM_DISC
:
13519 if (phba
->nvmet_support
) {
13520 tgtp
= phba
->targetport
->private;
13521 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
| LOG_NVME
,
13522 "6402 RQE Error x%x, posted %d err_cnt "
13524 status
, hrq
->RQ_buf_posted
,
13525 hrq
->RQ_no_posted_buf
,
13526 atomic_read(&tgtp
->rcv_fcp_cmd_in
),
13527 atomic_read(&tgtp
->rcv_fcp_cmd_out
),
13528 atomic_read(&tgtp
->xmt_fcp_release
));
13532 case FC_STATUS_INSUFF_BUF_NEED_BUF
:
13533 hrq
->RQ_no_posted_buf
++;
13534 /* Post more buffers if possible */
13535 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13536 phba
->hba_flag
|= HBA_POST_RECEIVE_BUFFER
;
13537 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13546 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13547 * @phba: Pointer to HBA context object.
13548 * @cq: Pointer to the completion queue.
13549 * @cqe: Pointer to a completion queue entry.
13551 * This routine process a slow-path work-queue or receive queue completion queue
13554 * Return: true if work posted to worker thread, otherwise false.
13557 lpfc_sli4_sp_handle_cqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
13558 struct lpfc_cqe
*cqe
)
13560 struct lpfc_cqe cqevt
;
13561 bool workposted
= false;
13563 /* Copy the work queue CQE and convert endian order if needed */
13564 lpfc_sli4_pcimem_bcopy(cqe
, &cqevt
, sizeof(struct lpfc_cqe
));
13566 /* Check and process for different type of WCQE and dispatch */
13567 switch (bf_get(lpfc_cqe_code
, &cqevt
)) {
13568 case CQE_CODE_COMPL_WQE
:
13569 /* Process the WQ/RQ complete event */
13570 phba
->last_completion_time
= jiffies
;
13571 workposted
= lpfc_sli4_sp_handle_els_wcqe(phba
, cq
,
13572 (struct lpfc_wcqe_complete
*)&cqevt
);
13574 case CQE_CODE_RELEASE_WQE
:
13575 /* Process the WQ release event */
13576 lpfc_sli4_sp_handle_rel_wcqe(phba
,
13577 (struct lpfc_wcqe_release
*)&cqevt
);
13579 case CQE_CODE_XRI_ABORTED
:
13580 /* Process the WQ XRI abort event */
13581 phba
->last_completion_time
= jiffies
;
13582 workposted
= lpfc_sli4_sp_handle_abort_xri_wcqe(phba
, cq
,
13583 (struct sli4_wcqe_xri_aborted
*)&cqevt
);
13585 case CQE_CODE_RECEIVE
:
13586 case CQE_CODE_RECEIVE_V1
:
13587 /* Process the RQ event */
13588 phba
->last_completion_time
= jiffies
;
13589 workposted
= lpfc_sli4_sp_handle_rcqe(phba
,
13590 (struct lpfc_rcqe
*)&cqevt
);
13593 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13594 "0388 Not a valid WCQE code: x%x\n",
13595 bf_get(lpfc_cqe_code
, &cqevt
));
13602 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13603 * @phba: Pointer to HBA context object.
13604 * @eqe: Pointer to fast-path event queue entry.
13606 * This routine process a event queue entry from the slow-path event queue.
13607 * It will check the MajorCode and MinorCode to determine this is for a
13608 * completion event on a completion queue, if not, an error shall be logged
13609 * and just return. Otherwise, it will get to the corresponding completion
13610 * queue and process all the entries on that completion queue, rearm the
13611 * completion queue, and then return.
13615 lpfc_sli4_sp_handle_eqe(struct lpfc_hba
*phba
, struct lpfc_eqe
*eqe
,
13616 struct lpfc_queue
*speq
)
13618 struct lpfc_queue
*cq
= NULL
, *childq
;
13621 /* Get the reference to the corresponding CQ */
13622 cqid
= bf_get_le32(lpfc_eqe_resource_id
, eqe
);
13624 list_for_each_entry(childq
, &speq
->child_list
, list
) {
13625 if (childq
->queue_id
== cqid
) {
13630 if (unlikely(!cq
)) {
13631 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)
13632 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13633 "0365 Slow-path CQ identifier "
13634 "(%d) does not exist\n", cqid
);
13638 /* Save EQ associated with this CQ */
13639 cq
->assoc_qp
= speq
;
13641 if (!queue_work_on(cq
->chann
, phba
->wq
, &cq
->spwork
))
13642 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13643 "0390 Cannot schedule soft IRQ "
13644 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
13645 cqid
, cq
->queue_id
, raw_smp_processor_id());
13649 * __lpfc_sli4_process_cq - Process elements of a CQ
13650 * @phba: Pointer to HBA context object.
13651 * @cq: Pointer to CQ to be processed
13652 * @handler: Routine to process each cqe
13653 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
13655 * This routine processes completion queue entries in a CQ. While a valid
13656 * queue element is found, the handler is called. During processing checks
13657 * are made for periodic doorbell writes to let the hardware know of
13658 * element consumption.
13660 * If the max limit on cqes to process is hit, or there are no more valid
13661 * entries, the loop stops. If we processed a sufficient number of elements,
13662 * meaning there is sufficient load, rather than rearming and generating
13663 * another interrupt, a cq rescheduling delay will be set. A delay of 0
13664 * indicates no rescheduling.
13666 * Returns True if work scheduled, False otherwise.
13669 __lpfc_sli4_process_cq(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
13670 bool (*handler
)(struct lpfc_hba
*, struct lpfc_queue
*,
13671 struct lpfc_cqe
*), unsigned long *delay
)
13673 struct lpfc_cqe
*cqe
;
13674 bool workposted
= false;
13675 int count
= 0, consumed
= 0;
13678 /* default - no reschedule */
13681 if (cmpxchg(&cq
->queue_claimed
, 0, 1) != 0)
13682 goto rearm_and_exit
;
13684 /* Process all the entries to the CQ */
13686 cqe
= lpfc_sli4_cq_get(cq
);
13688 workposted
|= handler(phba
, cq
, cqe
);
13689 __lpfc_sli4_consume_cqe(phba
, cq
, cqe
);
13692 if (!(++count
% cq
->max_proc_limit
))
13695 if (!(count
% cq
->notify_interval
)) {
13696 phba
->sli4_hba
.sli4_write_cq_db(phba
, cq
, consumed
,
13699 cq
->assoc_qp
->q_flag
|= HBA_EQ_DELAY_CHK
;
13702 if (count
== LPFC_NVMET_CQ_NOTIFY
)
13703 cq
->q_flag
|= HBA_NVMET_CQ_NOTIFY
;
13705 cqe
= lpfc_sli4_cq_get(cq
);
13707 if (count
>= phba
->cfg_cq_poll_threshold
) {
13712 /* Track the max number of CQEs processed in 1 EQ */
13713 if (count
> cq
->CQ_max_cqe
)
13714 cq
->CQ_max_cqe
= count
;
13716 cq
->assoc_qp
->EQ_cqe_cnt
+= count
;
13718 /* Catch the no cq entry condition */
13719 if (unlikely(count
== 0))
13720 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
13721 "0369 No entry from completion queue "
13722 "qid=%d\n", cq
->queue_id
);
13724 cq
->queue_claimed
= 0;
13727 phba
->sli4_hba
.sli4_write_cq_db(phba
, cq
, consumed
,
13728 arm
? LPFC_QUEUE_REARM
: LPFC_QUEUE_NOARM
);
13734 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
13735 * @cq: pointer to CQ to process
13737 * This routine calls the cq processing routine with a handler specific
13738 * to the type of queue bound to it.
13740 * The CQ routine returns two values: the first is the calling status,
13741 * which indicates whether work was queued to the background discovery
13742 * thread. If true, the routine should wakeup the discovery thread;
13743 * the second is the delay parameter. If non-zero, rather than rearming
13744 * the CQ and yet another interrupt, the CQ handler should be queued so
13745 * that it is processed in a subsequent polling action. The value of
13746 * the delay indicates when to reschedule it.
13749 __lpfc_sli4_sp_process_cq(struct lpfc_queue
*cq
)
13751 struct lpfc_hba
*phba
= cq
->phba
;
13752 unsigned long delay
;
13753 bool workposted
= false;
13755 /* Process and rearm the CQ */
13756 switch (cq
->type
) {
13758 workposted
|= __lpfc_sli4_process_cq(phba
, cq
,
13759 lpfc_sli4_sp_handle_mcqe
,
13763 if (cq
->subtype
== LPFC_IO
)
13764 workposted
|= __lpfc_sli4_process_cq(phba
, cq
,
13765 lpfc_sli4_fp_handle_cqe
,
13768 workposted
|= __lpfc_sli4_process_cq(phba
, cq
,
13769 lpfc_sli4_sp_handle_cqe
,
13773 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13774 "0370 Invalid completion queue type (%d)\n",
13780 if (!queue_delayed_work_on(cq
->chann
, phba
->wq
,
13781 &cq
->sched_spwork
, delay
))
13782 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13783 "0394 Cannot schedule soft IRQ "
13784 "for cqid=%d on CPU %d\n",
13785 cq
->queue_id
, cq
->chann
);
13788 /* wake up worker thread if there are works to be done */
13790 lpfc_worker_wake_up(phba
);
13794 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
13796 * @work: pointer to work element
13798 * translates from the work handler and calls the slow-path handler.
13801 lpfc_sli4_sp_process_cq(struct work_struct
*work
)
13803 struct lpfc_queue
*cq
= container_of(work
, struct lpfc_queue
, spwork
);
13805 __lpfc_sli4_sp_process_cq(cq
);
13809 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
13810 * @work: pointer to work element
13812 * translates from the work handler and calls the slow-path handler.
13815 lpfc_sli4_dly_sp_process_cq(struct work_struct
*work
)
13817 struct lpfc_queue
*cq
= container_of(to_delayed_work(work
),
13818 struct lpfc_queue
, sched_spwork
);
13820 __lpfc_sli4_sp_process_cq(cq
);
13824 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
13825 * @phba: Pointer to HBA context object.
13826 * @cq: Pointer to associated CQ
13827 * @wcqe: Pointer to work-queue completion queue entry.
13829 * This routine process a fast-path work queue completion entry from fast-path
13830 * event queue for FCP command response completion.
13833 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
13834 struct lpfc_wcqe_complete
*wcqe
)
13836 struct lpfc_sli_ring
*pring
= cq
->pring
;
13837 struct lpfc_iocbq
*cmdiocbq
;
13838 struct lpfc_iocbq irspiocbq
;
13839 unsigned long iflags
;
13841 /* Check for response status */
13842 if (unlikely(bf_get(lpfc_wcqe_c_status
, wcqe
))) {
13843 /* If resource errors reported from HBA, reduce queue
13844 * depth of the SCSI device.
13846 if (((bf_get(lpfc_wcqe_c_status
, wcqe
) ==
13847 IOSTAT_LOCAL_REJECT
)) &&
13848 ((wcqe
->parameter
& IOERR_PARAM_MASK
) ==
13849 IOERR_NO_RESOURCES
))
13850 phba
->lpfc_rampdown_queue_depth(phba
);
13852 /* Log the error status */
13853 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
13854 "0373 FCP CQE error: status=x%x: "
13855 "CQE: %08x %08x %08x %08x\n",
13856 bf_get(lpfc_wcqe_c_status
, wcqe
),
13857 wcqe
->word0
, wcqe
->total_data_placed
,
13858 wcqe
->parameter
, wcqe
->word3
);
13861 /* Look up the FCP command IOCB and create pseudo response IOCB */
13862 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
13863 pring
->stats
.iocb_event
++;
13864 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
13865 cmdiocbq
= lpfc_sli_iocbq_lookup_by_tag(phba
, pring
,
13866 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
13867 if (unlikely(!cmdiocbq
)) {
13868 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
13869 "0374 FCP complete with no corresponding "
13870 "cmdiocb: iotag (%d)\n",
13871 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
13874 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
13875 cmdiocbq
->isr_timestamp
= cq
->isr_timestamp
;
13877 if (cmdiocbq
->iocb_cmpl
== NULL
) {
13878 if (cmdiocbq
->wqe_cmpl
) {
13879 if (cmdiocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
) {
13880 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13881 cmdiocbq
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
13882 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13885 /* Pass the cmd_iocb and the wcqe to the upper layer */
13886 (cmdiocbq
->wqe_cmpl
)(phba
, cmdiocbq
, wcqe
);
13889 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
13890 "0375 FCP cmdiocb not callback function "
13892 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
13896 /* Fake the irspiocb and copy necessary response information */
13897 lpfc_sli4_iocb_param_transfer(phba
, &irspiocbq
, cmdiocbq
, wcqe
);
13899 if (cmdiocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
) {
13900 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13901 cmdiocbq
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
13902 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13905 /* Pass the cmd_iocb and the rsp state to the upper layer */
13906 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
, &irspiocbq
);
13910 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
13911 * @phba: Pointer to HBA context object.
13912 * @cq: Pointer to completion queue.
13913 * @wcqe: Pointer to work-queue completion queue entry.
13915 * This routine handles an fast-path WQ entry consumed event by invoking the
13916 * proper WQ release routine to the slow-path WQ.
13919 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
13920 struct lpfc_wcqe_release
*wcqe
)
13922 struct lpfc_queue
*childwq
;
13923 bool wqid_matched
= false;
13926 /* Check for fast-path FCP work queue release */
13927 hba_wqid
= bf_get(lpfc_wcqe_r_wq_id
, wcqe
);
13928 list_for_each_entry(childwq
, &cq
->child_list
, list
) {
13929 if (childwq
->queue_id
== hba_wqid
) {
13930 lpfc_sli4_wq_release(childwq
,
13931 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
));
13932 if (childwq
->q_flag
& HBA_NVMET_WQFULL
)
13933 lpfc_nvmet_wqfull_process(phba
, childwq
);
13934 wqid_matched
= true;
13938 /* Report warning log message if no match found */
13939 if (wqid_matched
!= true)
13940 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
13941 "2580 Fast-path wqe consume event carries "
13942 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid
);
13946 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
13947 * @phba: Pointer to HBA context object.
13948 * @rcqe: Pointer to receive-queue completion queue entry.
13950 * This routine process a receive-queue completion queue entry.
13952 * Return: true if work posted to worker thread, otherwise false.
13955 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
13956 struct lpfc_rcqe
*rcqe
)
13958 bool workposted
= false;
13959 struct lpfc_queue
*hrq
;
13960 struct lpfc_queue
*drq
;
13961 struct rqb_dmabuf
*dma_buf
;
13962 struct fc_frame_header
*fc_hdr
;
13963 struct lpfc_nvmet_tgtport
*tgtp
;
13964 uint32_t status
, rq_id
;
13965 unsigned long iflags
;
13966 uint32_t fctl
, idx
;
13968 if ((phba
->nvmet_support
== 0) ||
13969 (phba
->sli4_hba
.nvmet_cqset
== NULL
))
13972 idx
= cq
->queue_id
- phba
->sli4_hba
.nvmet_cqset
[0]->queue_id
;
13973 hrq
= phba
->sli4_hba
.nvmet_mrq_hdr
[idx
];
13974 drq
= phba
->sli4_hba
.nvmet_mrq_data
[idx
];
13976 /* sanity check on queue memory */
13977 if (unlikely(!hrq
) || unlikely(!drq
))
13980 if (bf_get(lpfc_cqe_code
, rcqe
) == CQE_CODE_RECEIVE_V1
)
13981 rq_id
= bf_get(lpfc_rcqe_rq_id_v1
, rcqe
);
13983 rq_id
= bf_get(lpfc_rcqe_rq_id
, rcqe
);
13985 if ((phba
->nvmet_support
== 0) ||
13986 (rq_id
!= hrq
->queue_id
))
13989 status
= bf_get(lpfc_rcqe_status
, rcqe
);
13991 case FC_STATUS_RQ_BUF_LEN_EXCEEDED
:
13992 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13993 "6126 Receive Frame Truncated!!\n");
13995 case FC_STATUS_RQ_SUCCESS
:
13996 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13997 lpfc_sli4_rq_release(hrq
, drq
);
13998 dma_buf
= lpfc_sli_rqbuf_get(phba
, hrq
);
14000 hrq
->RQ_no_buf_found
++;
14001 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
14004 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
14006 hrq
->RQ_buf_posted
--;
14007 fc_hdr
= (struct fc_frame_header
*)dma_buf
->hbuf
.virt
;
14009 /* Just some basic sanity checks on FCP Command frame */
14010 fctl
= (fc_hdr
->fh_f_ctl
[0] << 16 |
14011 fc_hdr
->fh_f_ctl
[1] << 8 |
14012 fc_hdr
->fh_f_ctl
[2]);
14014 (FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
| FC_FC_SEQ_INIT
)) !=
14015 (FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
| FC_FC_SEQ_INIT
)) ||
14016 (fc_hdr
->fh_seq_cnt
!= 0)) /* 0 byte swapped is still 0 */
14019 if (fc_hdr
->fh_type
== FC_TYPE_FCP
) {
14020 dma_buf
->bytes_recv
= bf_get(lpfc_rcqe_length
, rcqe
);
14021 lpfc_nvmet_unsol_fcp_event(
14022 phba
, idx
, dma_buf
, cq
->isr_timestamp
,
14023 cq
->q_flag
& HBA_NVMET_CQ_NOTIFY
);
14027 lpfc_rq_buf_free(phba
, &dma_buf
->hbuf
);
14029 case FC_STATUS_INSUFF_BUF_FRM_DISC
:
14030 if (phba
->nvmet_support
) {
14031 tgtp
= phba
->targetport
->private;
14032 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
| LOG_NVME
,
14033 "6401 RQE Error x%x, posted %d err_cnt "
14035 status
, hrq
->RQ_buf_posted
,
14036 hrq
->RQ_no_posted_buf
,
14037 atomic_read(&tgtp
->rcv_fcp_cmd_in
),
14038 atomic_read(&tgtp
->rcv_fcp_cmd_out
),
14039 atomic_read(&tgtp
->xmt_fcp_release
));
14043 case FC_STATUS_INSUFF_BUF_NEED_BUF
:
14044 hrq
->RQ_no_posted_buf
++;
14045 /* Post more buffers if possible */
14053 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
14054 * @phba: adapter with cq
14055 * @cq: Pointer to the completion queue.
14056 * @eqe: Pointer to fast-path completion queue entry.
14058 * This routine process a fast-path work queue completion entry from fast-path
14059 * event queue for FCP command response completion.
14061 * Return: true if work posted to worker thread, otherwise false.
14064 lpfc_sli4_fp_handle_cqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
14065 struct lpfc_cqe
*cqe
)
14067 struct lpfc_wcqe_release wcqe
;
14068 bool workposted
= false;
14070 /* Copy the work queue CQE and convert endian order if needed */
14071 lpfc_sli4_pcimem_bcopy(cqe
, &wcqe
, sizeof(struct lpfc_cqe
));
14073 /* Check and process for different type of WCQE and dispatch */
14074 switch (bf_get(lpfc_wcqe_c_code
, &wcqe
)) {
14075 case CQE_CODE_COMPL_WQE
:
14076 case CQE_CODE_NVME_ERSP
:
14078 /* Process the WQ complete event */
14079 phba
->last_completion_time
= jiffies
;
14080 if (cq
->subtype
== LPFC_IO
|| cq
->subtype
== LPFC_NVME_LS
)
14081 lpfc_sli4_fp_handle_fcp_wcqe(phba
, cq
,
14082 (struct lpfc_wcqe_complete
*)&wcqe
);
14084 case CQE_CODE_RELEASE_WQE
:
14085 cq
->CQ_release_wqe
++;
14086 /* Process the WQ release event */
14087 lpfc_sli4_fp_handle_rel_wcqe(phba
, cq
,
14088 (struct lpfc_wcqe_release
*)&wcqe
);
14090 case CQE_CODE_XRI_ABORTED
:
14091 cq
->CQ_xri_aborted
++;
14092 /* Process the WQ XRI abort event */
14093 phba
->last_completion_time
= jiffies
;
14094 workposted
= lpfc_sli4_sp_handle_abort_xri_wcqe(phba
, cq
,
14095 (struct sli4_wcqe_xri_aborted
*)&wcqe
);
14097 case CQE_CODE_RECEIVE_V1
:
14098 case CQE_CODE_RECEIVE
:
14099 phba
->last_completion_time
= jiffies
;
14100 if (cq
->subtype
== LPFC_NVMET
) {
14101 workposted
= lpfc_sli4_nvmet_handle_rcqe(
14102 phba
, cq
, (struct lpfc_rcqe
*)&wcqe
);
14106 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
14107 "0144 Not a valid CQE code: x%x\n",
14108 bf_get(lpfc_wcqe_c_code
, &wcqe
));
14115 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
14116 * @phba: Pointer to HBA context object.
14117 * @eqe: Pointer to fast-path event queue entry.
14119 * This routine process a event queue entry from the fast-path event queue.
14120 * It will check the MajorCode and MinorCode to determine this is for a
14121 * completion event on a completion queue, if not, an error shall be logged
14122 * and just return. Otherwise, it will get to the corresponding completion
14123 * queue and process all the entries on the completion queue, rearm the
14124 * completion queue, and then return.
14127 lpfc_sli4_hba_handle_eqe(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
,
14128 struct lpfc_eqe
*eqe
)
14130 struct lpfc_queue
*cq
= NULL
;
14131 uint32_t qidx
= eq
->hdwq
;
14134 if (unlikely(bf_get_le32(lpfc_eqe_major_code
, eqe
) != 0)) {
14135 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
14136 "0366 Not a valid completion "
14137 "event: majorcode=x%x, minorcode=x%x\n",
14138 bf_get_le32(lpfc_eqe_major_code
, eqe
),
14139 bf_get_le32(lpfc_eqe_minor_code
, eqe
));
14143 /* Get the reference to the corresponding CQ */
14144 cqid
= bf_get_le32(lpfc_eqe_resource_id
, eqe
);
14146 /* Use the fast lookup method first */
14147 if (cqid
<= phba
->sli4_hba
.cq_max
) {
14148 cq
= phba
->sli4_hba
.cq_lookup
[cqid
];
14153 /* Next check for NVMET completion */
14154 if (phba
->cfg_nvmet_mrq
&& phba
->sli4_hba
.nvmet_cqset
) {
14155 id
= phba
->sli4_hba
.nvmet_cqset
[0]->queue_id
;
14156 if ((cqid
>= id
) && (cqid
< (id
+ phba
->cfg_nvmet_mrq
))) {
14157 /* Process NVMET unsol rcv */
14158 cq
= phba
->sli4_hba
.nvmet_cqset
[cqid
- id
];
14163 if (phba
->sli4_hba
.nvmels_cq
&&
14164 (cqid
== phba
->sli4_hba
.nvmels_cq
->queue_id
)) {
14165 /* Process NVME unsol rcv */
14166 cq
= phba
->sli4_hba
.nvmels_cq
;
14169 /* Otherwise this is a Slow path event */
14171 lpfc_sli4_sp_handle_eqe(phba
, eqe
,
14172 phba
->sli4_hba
.hdwq
[qidx
].hba_eq
);
14177 if (unlikely(cqid
!= cq
->queue_id
)) {
14178 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
14179 "0368 Miss-matched fast-path completion "
14180 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14181 cqid
, cq
->queue_id
);
14186 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14187 if (phba
->ktime_on
)
14188 cq
->isr_timestamp
= ktime_get_ns();
14190 cq
->isr_timestamp
= 0;
14192 if (!queue_work_on(cq
->chann
, phba
->wq
, &cq
->irqwork
))
14193 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
14194 "0363 Cannot schedule soft IRQ "
14195 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14196 cqid
, cq
->queue_id
, raw_smp_processor_id());
14200 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14201 * @cq: Pointer to CQ to be processed
14203 * This routine calls the cq processing routine with the handler for
14206 * The CQ routine returns two values: the first is the calling status,
14207 * which indicates whether work was queued to the background discovery
14208 * thread. If true, the routine should wakeup the discovery thread;
14209 * the second is the delay parameter. If non-zero, rather than rearming
14210 * the CQ and yet another interrupt, the CQ handler should be queued so
14211 * that it is processed in a subsequent polling action. The value of
14212 * the delay indicates when to reschedule it.
14215 __lpfc_sli4_hba_process_cq(struct lpfc_queue
*cq
)
14217 struct lpfc_hba
*phba
= cq
->phba
;
14218 unsigned long delay
;
14219 bool workposted
= false;
14221 /* process and rearm the CQ */
14222 workposted
|= __lpfc_sli4_process_cq(phba
, cq
, lpfc_sli4_fp_handle_cqe
,
14226 if (!queue_delayed_work_on(cq
->chann
, phba
->wq
,
14227 &cq
->sched_irqwork
, delay
))
14228 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
14229 "0367 Cannot schedule soft IRQ "
14230 "for cqid=%d on CPU %d\n",
14231 cq
->queue_id
, cq
->chann
);
14234 /* wake up worker thread if there are works to be done */
14236 lpfc_worker_wake_up(phba
);
14240 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14242 * @work: pointer to work element
14244 * translates from the work handler and calls the fast-path handler.
14247 lpfc_sli4_hba_process_cq(struct work_struct
*work
)
14249 struct lpfc_queue
*cq
= container_of(work
, struct lpfc_queue
, irqwork
);
14251 __lpfc_sli4_hba_process_cq(cq
);
14255 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14256 * @work: pointer to work element
14258 * translates from the work handler and calls the fast-path handler.
14261 lpfc_sli4_dly_hba_process_cq(struct work_struct
*work
)
14263 struct lpfc_queue
*cq
= container_of(to_delayed_work(work
),
14264 struct lpfc_queue
, sched_irqwork
);
14266 __lpfc_sli4_hba_process_cq(cq
);
14270 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
14271 * @irq: Interrupt number.
14272 * @dev_id: The device context pointer.
14274 * This function is directly called from the PCI layer as an interrupt
14275 * service routine when device with SLI-4 interface spec is enabled with
14276 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14277 * ring event in the HBA. However, when the device is enabled with either
14278 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14279 * device-level interrupt handler. When the PCI slot is in error recovery
14280 * or the HBA is undergoing initialization, the interrupt handler will not
14281 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14282 * the intrrupt context. This function is called without any lock held.
14283 * It gets the hbalock to access and update SLI data structures. Note that,
14284 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14285 * equal to that of FCP CQ index.
14287 * The link attention and ELS ring attention events are handled
14288 * by the worker thread. The interrupt handler signals the worker thread
14289 * and returns for these events. This function is called without any lock
14290 * held. It gets the hbalock to access and update SLI data structures.
14292 * This function returns IRQ_HANDLED when interrupt is handled else it
14293 * returns IRQ_NONE.
14296 lpfc_sli4_hba_intr_handler(int irq
, void *dev_id
)
14298 struct lpfc_hba
*phba
;
14299 struct lpfc_hba_eq_hdl
*hba_eq_hdl
;
14300 struct lpfc_queue
*fpeq
;
14301 unsigned long iflag
;
14304 struct lpfc_eq_intr_info
*eqi
;
14307 /* Get the driver's phba structure from the dev_id */
14308 hba_eq_hdl
= (struct lpfc_hba_eq_hdl
*)dev_id
;
14309 phba
= hba_eq_hdl
->phba
;
14310 hba_eqidx
= hba_eq_hdl
->idx
;
14312 if (unlikely(!phba
))
14314 if (unlikely(!phba
->sli4_hba
.hdwq
))
14317 /* Get to the EQ struct associated with this vector */
14318 fpeq
= phba
->sli4_hba
.hba_eq_hdl
[hba_eqidx
].eq
;
14319 if (unlikely(!fpeq
))
14322 /* Check device state for handling interrupt */
14323 if (unlikely(lpfc_intr_state_check(phba
))) {
14324 /* Check again for link_state with lock held */
14325 spin_lock_irqsave(&phba
->hbalock
, iflag
);
14326 if (phba
->link_state
< LPFC_LINK_DOWN
)
14327 /* Flush, clear interrupt, and rearm the EQ */
14328 lpfc_sli4_eqcq_flush(phba
, fpeq
);
14329 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
14333 eqi
= phba
->sli4_hba
.eq_info
;
14334 icnt
= this_cpu_inc_return(eqi
->icnt
);
14335 fpeq
->last_cpu
= raw_smp_processor_id();
14337 if (icnt
> LPFC_EQD_ISR_TRIGGER
&&
14338 fpeq
->q_flag
& HBA_EQ_DELAY_CHK
&&
14339 phba
->cfg_auto_imax
&&
14340 fpeq
->q_mode
!= LPFC_MAX_AUTO_EQ_DELAY
&&
14341 phba
->sli
.sli_flag
& LPFC_SLI_USE_EQDR
)
14342 lpfc_sli4_mod_hba_eq_delay(phba
, fpeq
, LPFC_MAX_AUTO_EQ_DELAY
);
14344 /* process and rearm the EQ */
14345 ecount
= lpfc_sli4_process_eq(phba
, fpeq
, LPFC_QUEUE_REARM
);
14347 if (unlikely(ecount
== 0)) {
14348 fpeq
->EQ_no_entry
++;
14349 if (phba
->intr_type
== MSIX
)
14350 /* MSI-X treated interrupt served as no EQ share INT */
14351 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
14352 "0358 MSI-X interrupt with no EQE\n");
14354 /* Non MSI-X treated on interrupt as EQ share INT */
14358 return IRQ_HANDLED
;
14359 } /* lpfc_sli4_fp_intr_handler */
14362 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14363 * @irq: Interrupt number.
14364 * @dev_id: The device context pointer.
14366 * This function is the device-level interrupt handler to device with SLI-4
14367 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14368 * interrupt mode is enabled and there is an event in the HBA which requires
14369 * driver attention. This function invokes the slow-path interrupt attention
14370 * handling function and fast-path interrupt attention handling function in
14371 * turn to process the relevant HBA attention events. This function is called
14372 * without any lock held. It gets the hbalock to access and update SLI data
14375 * This function returns IRQ_HANDLED when interrupt is handled, else it
14376 * returns IRQ_NONE.
14379 lpfc_sli4_intr_handler(int irq
, void *dev_id
)
14381 struct lpfc_hba
*phba
;
14382 irqreturn_t hba_irq_rc
;
14383 bool hba_handled
= false;
14386 /* Get the driver's phba structure from the dev_id */
14387 phba
= (struct lpfc_hba
*)dev_id
;
14389 if (unlikely(!phba
))
14393 * Invoke fast-path host attention interrupt handling as appropriate.
14395 for (qidx
= 0; qidx
< phba
->cfg_irq_chann
; qidx
++) {
14396 hba_irq_rc
= lpfc_sli4_hba_intr_handler(irq
,
14397 &phba
->sli4_hba
.hba_eq_hdl
[qidx
]);
14398 if (hba_irq_rc
== IRQ_HANDLED
)
14399 hba_handled
|= true;
14402 return (hba_handled
== true) ? IRQ_HANDLED
: IRQ_NONE
;
14403 } /* lpfc_sli4_intr_handler */
14405 void lpfc_sli4_poll_hbtimer(struct timer_list
*t
)
14407 struct lpfc_hba
*phba
= from_timer(phba
, t
, cpuhp_poll_timer
);
14408 struct lpfc_queue
*eq
;
14413 list_for_each_entry_rcu(eq
, &phba
->poll_list
, _poll_list
)
14414 i
+= lpfc_sli4_poll_eq(eq
, LPFC_POLL_SLOWPATH
);
14415 if (!list_empty(&phba
->poll_list
))
14416 mod_timer(&phba
->cpuhp_poll_timer
,
14417 jiffies
+ msecs_to_jiffies(LPFC_POLL_HB
));
14422 inline int lpfc_sli4_poll_eq(struct lpfc_queue
*eq
, uint8_t path
)
14424 struct lpfc_hba
*phba
= eq
->phba
;
14428 * Unlocking an irq is one of the entry point to check
14429 * for re-schedule, but we are good for io submission
14430 * path as midlayer does a get_cpu to glue us in. Flush
14431 * out the invalidate queue so we can see the updated
14436 if (READ_ONCE(eq
->mode
) == LPFC_EQ_POLL
)
14437 /* We will not likely get the completion for the caller
14438 * during this iteration but i guess that's fine.
14439 * Future io's coming on this eq should be able to
14440 * pick it up. As for the case of single io's, they
14441 * will be handled through a sched from polling timer
14442 * function which is currently triggered every 1msec.
14444 i
= lpfc_sli4_process_eq(phba
, eq
, LPFC_QUEUE_NOARM
);
14449 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue
*eq
)
14451 struct lpfc_hba
*phba
= eq
->phba
;
14453 if (list_empty(&phba
->poll_list
)) {
14454 timer_setup(&phba
->cpuhp_poll_timer
, lpfc_sli4_poll_hbtimer
, 0);
14455 /* kickstart slowpath processing for this eq */
14456 mod_timer(&phba
->cpuhp_poll_timer
,
14457 jiffies
+ msecs_to_jiffies(LPFC_POLL_HB
));
14460 list_add_rcu(&eq
->_poll_list
, &phba
->poll_list
);
14464 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue
*eq
)
14466 struct lpfc_hba
*phba
= eq
->phba
;
14468 /* Disable slowpath processing for this eq. Kick start the eq
14469 * by RE-ARMING the eq's ASAP
14471 list_del_rcu(&eq
->_poll_list
);
14474 if (list_empty(&phba
->poll_list
))
14475 del_timer_sync(&phba
->cpuhp_poll_timer
);
14478 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba
*phba
)
14480 struct lpfc_queue
*eq
, *next
;
14482 list_for_each_entry_safe(eq
, next
, &phba
->poll_list
, _poll_list
)
14483 list_del(&eq
->_poll_list
);
14485 INIT_LIST_HEAD(&phba
->poll_list
);
14490 __lpfc_sli4_switch_eqmode(struct lpfc_queue
*eq
, uint8_t mode
)
14492 if (mode
== eq
->mode
)
14495 * currently this function is only called during a hotplug
14496 * event and the cpu on which this function is executing
14497 * is going offline. By now the hotplug has instructed
14498 * the scheduler to remove this cpu from cpu active mask.
14499 * So we don't need to work about being put aside by the
14500 * scheduler for a high priority process. Yes, the inte-
14501 * rrupts could come but they are known to retire ASAP.
14504 /* Disable polling in the fastpath */
14505 WRITE_ONCE(eq
->mode
, mode
);
14506 /* flush out the store buffer */
14510 * Add this eq to the polling list and start polling. For
14511 * a grace period both interrupt handler and poller will
14512 * try to process the eq _but_ that's fine. We have a
14513 * synchronization mechanism in place (queue_claimed) to
14514 * deal with it. This is just a draining phase for int-
14515 * errupt handler (not eq's) as we have guranteed through
14516 * barrier that all the CPUs have seen the new CQ_POLLED
14517 * state. which will effectively disable the REARMING of
14518 * the EQ. The whole idea is eq's die off eventually as
14519 * we are not rearming EQ's anymore.
14521 mode
? lpfc_sli4_add_to_poll_list(eq
) :
14522 lpfc_sli4_remove_from_poll_list(eq
);
14525 void lpfc_sli4_start_polling(struct lpfc_queue
*eq
)
14527 __lpfc_sli4_switch_eqmode(eq
, LPFC_EQ_POLL
);
14530 void lpfc_sli4_stop_polling(struct lpfc_queue
*eq
)
14532 struct lpfc_hba
*phba
= eq
->phba
;
14534 __lpfc_sli4_switch_eqmode(eq
, LPFC_EQ_INTERRUPT
);
14536 /* Kick start for the pending io's in h/w.
14537 * Once we switch back to interrupt processing on a eq
14538 * the io path completion will only arm eq's when it
14539 * receives a completion. But since eq's are in disa-
14540 * rmed state it doesn't receive a completion. This
14541 * creates a deadlock scenaro.
14543 phba
->sli4_hba
.sli4_write_eq_db(phba
, eq
, 0, LPFC_QUEUE_REARM
);
14547 * lpfc_sli4_queue_free - free a queue structure and associated memory
14548 * @queue: The queue structure to free.
14550 * This function frees a queue structure and the DMAable memory used for
14551 * the host resident queue. This function must be called after destroying the
14552 * queue on the HBA.
14555 lpfc_sli4_queue_free(struct lpfc_queue
*queue
)
14557 struct lpfc_dmabuf
*dmabuf
;
14562 if (!list_empty(&queue
->wq_list
))
14563 list_del(&queue
->wq_list
);
14565 while (!list_empty(&queue
->page_list
)) {
14566 list_remove_head(&queue
->page_list
, dmabuf
, struct lpfc_dmabuf
,
14568 dma_free_coherent(&queue
->phba
->pcidev
->dev
, queue
->page_size
,
14569 dmabuf
->virt
, dmabuf
->phys
);
14573 lpfc_free_rq_buffer(queue
->phba
, queue
);
14574 kfree(queue
->rqbp
);
14577 if (!list_empty(&queue
->cpu_list
))
14578 list_del(&queue
->cpu_list
);
14585 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
14586 * @phba: The HBA that this queue is being created on.
14587 * @page_size: The size of a queue page
14588 * @entry_size: The size of each queue entry for this queue.
14589 * @entry count: The number of entries that this queue will handle.
14590 * @cpu: The cpu that will primarily utilize this queue.
14592 * This function allocates a queue structure and the DMAable memory used for
14593 * the host resident queue. This function must be called before creating the
14594 * queue on the HBA.
14596 struct lpfc_queue
*
14597 lpfc_sli4_queue_alloc(struct lpfc_hba
*phba
, uint32_t page_size
,
14598 uint32_t entry_size
, uint32_t entry_count
, int cpu
)
14600 struct lpfc_queue
*queue
;
14601 struct lpfc_dmabuf
*dmabuf
;
14602 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
14605 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
14606 hw_page_size
= page_size
;
14608 pgcnt
= ALIGN(entry_size
* entry_count
, hw_page_size
) / hw_page_size
;
14610 /* If needed, Adjust page count to match the max the adapter supports */
14611 if (pgcnt
> phba
->sli4_hba
.pc_sli4_params
.wqpcnt
)
14612 pgcnt
= phba
->sli4_hba
.pc_sli4_params
.wqpcnt
;
14614 queue
= kzalloc_node(sizeof(*queue
) + (sizeof(void *) * pgcnt
),
14615 GFP_KERNEL
, cpu_to_node(cpu
));
14619 INIT_LIST_HEAD(&queue
->list
);
14620 INIT_LIST_HEAD(&queue
->_poll_list
);
14621 INIT_LIST_HEAD(&queue
->wq_list
);
14622 INIT_LIST_HEAD(&queue
->wqfull_list
);
14623 INIT_LIST_HEAD(&queue
->page_list
);
14624 INIT_LIST_HEAD(&queue
->child_list
);
14625 INIT_LIST_HEAD(&queue
->cpu_list
);
14627 /* Set queue parameters now. If the system cannot provide memory
14628 * resources, the free routine needs to know what was allocated.
14630 queue
->page_count
= pgcnt
;
14631 queue
->q_pgs
= (void **)&queue
[1];
14632 queue
->entry_cnt_per_pg
= hw_page_size
/ entry_size
;
14633 queue
->entry_size
= entry_size
;
14634 queue
->entry_count
= entry_count
;
14635 queue
->page_size
= hw_page_size
;
14636 queue
->phba
= phba
;
14638 for (x
= 0; x
< queue
->page_count
; x
++) {
14639 dmabuf
= kzalloc_node(sizeof(*dmabuf
), GFP_KERNEL
,
14640 dev_to_node(&phba
->pcidev
->dev
));
14643 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
14644 hw_page_size
, &dmabuf
->phys
,
14646 if (!dmabuf
->virt
) {
14650 dmabuf
->buffer_tag
= x
;
14651 list_add_tail(&dmabuf
->list
, &queue
->page_list
);
14652 /* use lpfc_sli4_qe to index a paritcular entry in this page */
14653 queue
->q_pgs
[x
] = dmabuf
->virt
;
14655 INIT_WORK(&queue
->irqwork
, lpfc_sli4_hba_process_cq
);
14656 INIT_WORK(&queue
->spwork
, lpfc_sli4_sp_process_cq
);
14657 INIT_DELAYED_WORK(&queue
->sched_irqwork
, lpfc_sli4_dly_hba_process_cq
);
14658 INIT_DELAYED_WORK(&queue
->sched_spwork
, lpfc_sli4_dly_sp_process_cq
);
14660 /* notify_interval will be set during q creation */
14664 lpfc_sli4_queue_free(queue
);
14669 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
14670 * @phba: HBA structure that indicates port to create a queue on.
14671 * @pci_barset: PCI BAR set flag.
14673 * This function shall perform iomap of the specified PCI BAR address to host
14674 * memory address if not already done so and return it. The returned host
14675 * memory address can be NULL.
14677 static void __iomem
*
14678 lpfc_dual_chute_pci_bar_map(struct lpfc_hba
*phba
, uint16_t pci_barset
)
14683 switch (pci_barset
) {
14684 case WQ_PCI_BAR_0_AND_1
:
14685 return phba
->pci_bar0_memmap_p
;
14686 case WQ_PCI_BAR_2_AND_3
:
14687 return phba
->pci_bar2_memmap_p
;
14688 case WQ_PCI_BAR_4_AND_5
:
14689 return phba
->pci_bar4_memmap_p
;
14697 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
14698 * @phba: HBA structure that EQs are on.
14699 * @startq: The starting EQ index to modify
14700 * @numq: The number of EQs (consecutive indexes) to modify
14701 * @usdelay: amount of delay
14703 * This function revises the EQ delay on 1 or more EQs. The EQ delay
14704 * is set either by writing to a register (if supported by the SLI Port)
14705 * or by mailbox command. The mailbox command allows several EQs to be
14708 * The @phba struct is used to send a mailbox command to HBA. The @startq
14709 * is used to get the starting EQ index to change. The @numq value is
14710 * used to specify how many consecutive EQ indexes, starting at EQ index,
14711 * are to be changed. This function is asynchronous and will wait for any
14712 * mailbox commands to finish before returning.
14714 * On success this function will return a zero. If unable to allocate
14715 * enough memory this function will return -ENOMEM. If a mailbox command
14716 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
14717 * have had their delay multipler changed.
14720 lpfc_modify_hba_eq_delay(struct lpfc_hba
*phba
, uint32_t startq
,
14721 uint32_t numq
, uint32_t usdelay
)
14723 struct lpfc_mbx_modify_eq_delay
*eq_delay
;
14724 LPFC_MBOXQ_t
*mbox
;
14725 struct lpfc_queue
*eq
;
14726 int cnt
= 0, rc
, length
;
14727 uint32_t shdr_status
, shdr_add_status
;
14730 union lpfc_sli4_cfg_shdr
*shdr
;
14732 if (startq
>= phba
->cfg_irq_chann
)
14735 if (usdelay
> 0xFFFF) {
14736 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_FCP
| LOG_NVME
,
14737 "6429 usdelay %d too large. Scaled down to "
14738 "0xFFFF.\n", usdelay
);
14742 /* set values by EQ_DELAY register if supported */
14743 if (phba
->sli
.sli_flag
& LPFC_SLI_USE_EQDR
) {
14744 for (qidx
= startq
; qidx
< phba
->cfg_irq_chann
; qidx
++) {
14745 eq
= phba
->sli4_hba
.hba_eq_hdl
[qidx
].eq
;
14749 lpfc_sli4_mod_hba_eq_delay(phba
, eq
, usdelay
);
14757 /* Otherwise, set values by mailbox cmd */
14759 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
14761 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_FCP
| LOG_NVME
,
14762 "6428 Failed allocating mailbox cmd buffer."
14763 " EQ delay was not set.\n");
14766 length
= (sizeof(struct lpfc_mbx_modify_eq_delay
) -
14767 sizeof(struct lpfc_sli4_cfg_mhdr
));
14768 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
14769 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY
,
14770 length
, LPFC_SLI4_MBX_EMBED
);
14771 eq_delay
= &mbox
->u
.mqe
.un
.eq_delay
;
14773 /* Calculate delay multiper from maximum interrupt per second */
14774 dmult
= (usdelay
* LPFC_DMULT_CONST
) / LPFC_SEC_TO_USEC
;
14777 if (dmult
> LPFC_DMULT_MAX
)
14778 dmult
= LPFC_DMULT_MAX
;
14780 for (qidx
= startq
; qidx
< phba
->cfg_irq_chann
; qidx
++) {
14781 eq
= phba
->sli4_hba
.hba_eq_hdl
[qidx
].eq
;
14784 eq
->q_mode
= usdelay
;
14785 eq_delay
->u
.request
.eq
[cnt
].eq_id
= eq
->queue_id
;
14786 eq_delay
->u
.request
.eq
[cnt
].phase
= 0;
14787 eq_delay
->u
.request
.eq
[cnt
].delay_multi
= dmult
;
14792 eq_delay
->u
.request
.num_eq
= cnt
;
14794 mbox
->vport
= phba
->pport
;
14795 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
14796 mbox
->ctx_buf
= NULL
;
14797 mbox
->ctx_ndlp
= NULL
;
14798 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
14799 shdr
= (union lpfc_sli4_cfg_shdr
*) &eq_delay
->header
.cfg_shdr
;
14800 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
14801 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
14802 if (shdr_status
|| shdr_add_status
|| rc
) {
14803 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14804 "2512 MODIFY_EQ_DELAY mailbox failed with "
14805 "status x%x add_status x%x, mbx status x%x\n",
14806 shdr_status
, shdr_add_status
, rc
);
14808 mempool_free(mbox
, phba
->mbox_mem_pool
);
14813 * lpfc_eq_create - Create an Event Queue on the HBA
14814 * @phba: HBA structure that indicates port to create a queue on.
14815 * @eq: The queue structure to use to create the event queue.
14816 * @imax: The maximum interrupt per second limit.
14818 * This function creates an event queue, as detailed in @eq, on a port,
14819 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
14821 * The @phba struct is used to send mailbox command to HBA. The @eq struct
14822 * is used to get the entry count and entry size that are necessary to
14823 * determine the number of pages to allocate and use for this queue. This
14824 * function will send the EQ_CREATE mailbox command to the HBA to setup the
14825 * event queue. This function is asynchronous and will wait for the mailbox
14826 * command to finish before continuing.
14828 * On success this function will return a zero. If unable to allocate enough
14829 * memory this function will return -ENOMEM. If the queue create mailbox command
14830 * fails this function will return -ENXIO.
14833 lpfc_eq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
, uint32_t imax
)
14835 struct lpfc_mbx_eq_create
*eq_create
;
14836 LPFC_MBOXQ_t
*mbox
;
14837 int rc
, length
, status
= 0;
14838 struct lpfc_dmabuf
*dmabuf
;
14839 uint32_t shdr_status
, shdr_add_status
;
14840 union lpfc_sli4_cfg_shdr
*shdr
;
14842 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
14844 /* sanity check on queue memory */
14847 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
14848 hw_page_size
= SLI4_PAGE_SIZE
;
14850 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
14853 length
= (sizeof(struct lpfc_mbx_eq_create
) -
14854 sizeof(struct lpfc_sli4_cfg_mhdr
));
14855 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
14856 LPFC_MBOX_OPCODE_EQ_CREATE
,
14857 length
, LPFC_SLI4_MBX_EMBED
);
14858 eq_create
= &mbox
->u
.mqe
.un
.eq_create
;
14859 shdr
= (union lpfc_sli4_cfg_shdr
*) &eq_create
->header
.cfg_shdr
;
14860 bf_set(lpfc_mbx_eq_create_num_pages
, &eq_create
->u
.request
,
14862 bf_set(lpfc_eq_context_size
, &eq_create
->u
.request
.context
,
14864 bf_set(lpfc_eq_context_valid
, &eq_create
->u
.request
.context
, 1);
14866 /* Use version 2 of CREATE_EQ if eqav is set */
14867 if (phba
->sli4_hba
.pc_sli4_params
.eqav
) {
14868 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
14869 LPFC_Q_CREATE_VERSION_2
);
14870 bf_set(lpfc_eq_context_autovalid
, &eq_create
->u
.request
.context
,
14871 phba
->sli4_hba
.pc_sli4_params
.eqav
);
14874 /* don't setup delay multiplier using EQ_CREATE */
14876 bf_set(lpfc_eq_context_delay_multi
, &eq_create
->u
.request
.context
,
14878 switch (eq
->entry_count
) {
14880 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
14881 "0360 Unsupported EQ count. (%d)\n",
14883 if (eq
->entry_count
< 256) {
14887 /* fall through - otherwise default to smallest count */
14889 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
14893 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
14897 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
14901 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
14905 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
14909 list_for_each_entry(dmabuf
, &eq
->page_list
, list
) {
14910 memset(dmabuf
->virt
, 0, hw_page_size
);
14911 eq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
14912 putPaddrLow(dmabuf
->phys
);
14913 eq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
14914 putPaddrHigh(dmabuf
->phys
);
14916 mbox
->vport
= phba
->pport
;
14917 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
14918 mbox
->ctx_buf
= NULL
;
14919 mbox
->ctx_ndlp
= NULL
;
14920 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
14921 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
14922 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
14923 if (shdr_status
|| shdr_add_status
|| rc
) {
14924 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14925 "2500 EQ_CREATE mailbox failed with "
14926 "status x%x add_status x%x, mbx status x%x\n",
14927 shdr_status
, shdr_add_status
, rc
);
14930 eq
->type
= LPFC_EQ
;
14931 eq
->subtype
= LPFC_NONE
;
14932 eq
->queue_id
= bf_get(lpfc_mbx_eq_create_q_id
, &eq_create
->u
.response
);
14933 if (eq
->queue_id
== 0xFFFF)
14935 eq
->host_index
= 0;
14936 eq
->notify_interval
= LPFC_EQ_NOTIFY_INTRVL
;
14937 eq
->max_proc_limit
= LPFC_EQ_MAX_PROC_LIMIT
;
14939 mempool_free(mbox
, phba
->mbox_mem_pool
);
14944 * lpfc_cq_create - Create a Completion Queue on the HBA
14945 * @phba: HBA structure that indicates port to create a queue on.
14946 * @cq: The queue structure to use to create the completion queue.
14947 * @eq: The event queue to bind this completion queue to.
14949 * This function creates a completion queue, as detailed in @wq, on a port,
14950 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
14952 * The @phba struct is used to send mailbox command to HBA. The @cq struct
14953 * is used to get the entry count and entry size that are necessary to
14954 * determine the number of pages to allocate and use for this queue. The @eq
14955 * is used to indicate which event queue to bind this completion queue to. This
14956 * function will send the CQ_CREATE mailbox command to the HBA to setup the
14957 * completion queue. This function is asynchronous and will wait for the mailbox
14958 * command to finish before continuing.
14960 * On success this function will return a zero. If unable to allocate enough
14961 * memory this function will return -ENOMEM. If the queue create mailbox command
14962 * fails this function will return -ENXIO.
14965 lpfc_cq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
14966 struct lpfc_queue
*eq
, uint32_t type
, uint32_t subtype
)
14968 struct lpfc_mbx_cq_create
*cq_create
;
14969 struct lpfc_dmabuf
*dmabuf
;
14970 LPFC_MBOXQ_t
*mbox
;
14971 int rc
, length
, status
= 0;
14972 uint32_t shdr_status
, shdr_add_status
;
14973 union lpfc_sli4_cfg_shdr
*shdr
;
14975 /* sanity check on queue memory */
14979 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
14982 length
= (sizeof(struct lpfc_mbx_cq_create
) -
14983 sizeof(struct lpfc_sli4_cfg_mhdr
));
14984 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
14985 LPFC_MBOX_OPCODE_CQ_CREATE
,
14986 length
, LPFC_SLI4_MBX_EMBED
);
14987 cq_create
= &mbox
->u
.mqe
.un
.cq_create
;
14988 shdr
= (union lpfc_sli4_cfg_shdr
*) &cq_create
->header
.cfg_shdr
;
14989 bf_set(lpfc_mbx_cq_create_num_pages
, &cq_create
->u
.request
,
14991 bf_set(lpfc_cq_context_event
, &cq_create
->u
.request
.context
, 1);
14992 bf_set(lpfc_cq_context_valid
, &cq_create
->u
.request
.context
, 1);
14993 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
14994 phba
->sli4_hba
.pc_sli4_params
.cqv
);
14995 if (phba
->sli4_hba
.pc_sli4_params
.cqv
== LPFC_Q_CREATE_VERSION_2
) {
14996 bf_set(lpfc_mbx_cq_create_page_size
, &cq_create
->u
.request
,
14997 (cq
->page_size
/ SLI4_PAGE_SIZE
));
14998 bf_set(lpfc_cq_eq_id_2
, &cq_create
->u
.request
.context
,
15000 bf_set(lpfc_cq_context_autovalid
, &cq_create
->u
.request
.context
,
15001 phba
->sli4_hba
.pc_sli4_params
.cqav
);
15003 bf_set(lpfc_cq_eq_id
, &cq_create
->u
.request
.context
,
15006 switch (cq
->entry_count
) {
15009 if (phba
->sli4_hba
.pc_sli4_params
.cqv
==
15010 LPFC_Q_CREATE_VERSION_2
) {
15011 cq_create
->u
.request
.context
.lpfc_cq_context_count
=
15013 bf_set(lpfc_cq_context_count
,
15014 &cq_create
->u
.request
.context
,
15015 LPFC_CQ_CNT_WORD7
);
15020 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15021 "0361 Unsupported CQ count: "
15022 "entry cnt %d sz %d pg cnt %d\n",
15023 cq
->entry_count
, cq
->entry_size
,
15025 if (cq
->entry_count
< 256) {
15029 /* fall through - otherwise default to smallest count */
15031 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
15035 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
15039 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
15043 list_for_each_entry(dmabuf
, &cq
->page_list
, list
) {
15044 memset(dmabuf
->virt
, 0, cq
->page_size
);
15045 cq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
15046 putPaddrLow(dmabuf
->phys
);
15047 cq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
15048 putPaddrHigh(dmabuf
->phys
);
15050 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
15052 /* The IOCTL status is embedded in the mailbox subheader. */
15053 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
15054 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
15055 if (shdr_status
|| shdr_add_status
|| rc
) {
15056 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15057 "2501 CQ_CREATE mailbox failed with "
15058 "status x%x add_status x%x, mbx status x%x\n",
15059 shdr_status
, shdr_add_status
, rc
);
15063 cq
->queue_id
= bf_get(lpfc_mbx_cq_create_q_id
, &cq_create
->u
.response
);
15064 if (cq
->queue_id
== 0xFFFF) {
15068 /* link the cq onto the parent eq child list */
15069 list_add_tail(&cq
->list
, &eq
->child_list
);
15070 /* Set up completion queue's type and subtype */
15072 cq
->subtype
= subtype
;
15073 cq
->queue_id
= bf_get(lpfc_mbx_cq_create_q_id
, &cq_create
->u
.response
);
15074 cq
->assoc_qid
= eq
->queue_id
;
15076 cq
->host_index
= 0;
15077 cq
->notify_interval
= LPFC_CQ_NOTIFY_INTRVL
;
15078 cq
->max_proc_limit
= min(phba
->cfg_cq_max_proc_limit
, cq
->entry_count
);
15080 if (cq
->queue_id
> phba
->sli4_hba
.cq_max
)
15081 phba
->sli4_hba
.cq_max
= cq
->queue_id
;
15083 mempool_free(mbox
, phba
->mbox_mem_pool
);
15088 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
15089 * @phba: HBA structure that indicates port to create a queue on.
15090 * @cqp: The queue structure array to use to create the completion queues.
15091 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
15093 * This function creates a set of completion queue, s to support MRQ
15094 * as detailed in @cqp, on a port,
15095 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
15097 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15098 * is used to get the entry count and entry size that are necessary to
15099 * determine the number of pages to allocate and use for this queue. The @eq
15100 * is used to indicate which event queue to bind this completion queue to. This
15101 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
15102 * completion queue. This function is asynchronous and will wait for the mailbox
15103 * command to finish before continuing.
15105 * On success this function will return a zero. If unable to allocate enough
15106 * memory this function will return -ENOMEM. If the queue create mailbox command
15107 * fails this function will return -ENXIO.
15110 lpfc_cq_create_set(struct lpfc_hba
*phba
, struct lpfc_queue
**cqp
,
15111 struct lpfc_sli4_hdw_queue
*hdwq
, uint32_t type
,
15114 struct lpfc_queue
*cq
;
15115 struct lpfc_queue
*eq
;
15116 struct lpfc_mbx_cq_create_set
*cq_set
;
15117 struct lpfc_dmabuf
*dmabuf
;
15118 LPFC_MBOXQ_t
*mbox
;
15119 int rc
, length
, alloclen
, status
= 0;
15120 int cnt
, idx
, numcq
, page_idx
= 0;
15121 uint32_t shdr_status
, shdr_add_status
;
15122 union lpfc_sli4_cfg_shdr
*shdr
;
15123 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
15125 /* sanity check on queue memory */
15126 numcq
= phba
->cfg_nvmet_mrq
;
15127 if (!cqp
|| !hdwq
|| !numcq
)
15130 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
15134 length
= sizeof(struct lpfc_mbx_cq_create_set
);
15135 length
+= ((numcq
* cqp
[0]->page_count
) *
15136 sizeof(struct dma_address
));
15137 alloclen
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
15138 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET
, length
,
15139 LPFC_SLI4_MBX_NEMBED
);
15140 if (alloclen
< length
) {
15141 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15142 "3098 Allocated DMA memory size (%d) is "
15143 "less than the requested DMA memory size "
15144 "(%d)\n", alloclen
, length
);
15148 cq_set
= mbox
->sge_array
->addr
[0];
15149 shdr
= (union lpfc_sli4_cfg_shdr
*)&cq_set
->cfg_shdr
;
15150 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
, 0);
15152 for (idx
= 0; idx
< numcq
; idx
++) {
15154 eq
= hdwq
[idx
].hba_eq
;
15159 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
15160 hw_page_size
= cq
->page_size
;
15164 bf_set(lpfc_mbx_cq_create_set_page_size
,
15165 &cq_set
->u
.request
,
15166 (hw_page_size
/ SLI4_PAGE_SIZE
));
15167 bf_set(lpfc_mbx_cq_create_set_num_pages
,
15168 &cq_set
->u
.request
, cq
->page_count
);
15169 bf_set(lpfc_mbx_cq_create_set_evt
,
15170 &cq_set
->u
.request
, 1);
15171 bf_set(lpfc_mbx_cq_create_set_valid
,
15172 &cq_set
->u
.request
, 1);
15173 bf_set(lpfc_mbx_cq_create_set_cqe_size
,
15174 &cq_set
->u
.request
, 0);
15175 bf_set(lpfc_mbx_cq_create_set_num_cq
,
15176 &cq_set
->u
.request
, numcq
);
15177 bf_set(lpfc_mbx_cq_create_set_autovalid
,
15178 &cq_set
->u
.request
,
15179 phba
->sli4_hba
.pc_sli4_params
.cqav
);
15180 switch (cq
->entry_count
) {
15183 if (phba
->sli4_hba
.pc_sli4_params
.cqv
==
15184 LPFC_Q_CREATE_VERSION_2
) {
15185 bf_set(lpfc_mbx_cq_create_set_cqe_cnt
,
15186 &cq_set
->u
.request
,
15188 bf_set(lpfc_mbx_cq_create_set_cqe_cnt
,
15189 &cq_set
->u
.request
,
15190 LPFC_CQ_CNT_WORD7
);
15195 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15196 "3118 Bad CQ count. (%d)\n",
15198 if (cq
->entry_count
< 256) {
15202 /* fall through - otherwise default to smallest */
15204 bf_set(lpfc_mbx_cq_create_set_cqe_cnt
,
15205 &cq_set
->u
.request
, LPFC_CQ_CNT_256
);
15208 bf_set(lpfc_mbx_cq_create_set_cqe_cnt
,
15209 &cq_set
->u
.request
, LPFC_CQ_CNT_512
);
15212 bf_set(lpfc_mbx_cq_create_set_cqe_cnt
,
15213 &cq_set
->u
.request
, LPFC_CQ_CNT_1024
);
15216 bf_set(lpfc_mbx_cq_create_set_eq_id0
,
15217 &cq_set
->u
.request
, eq
->queue_id
);
15220 bf_set(lpfc_mbx_cq_create_set_eq_id1
,
15221 &cq_set
->u
.request
, eq
->queue_id
);
15224 bf_set(lpfc_mbx_cq_create_set_eq_id2
,
15225 &cq_set
->u
.request
, eq
->queue_id
);
15228 bf_set(lpfc_mbx_cq_create_set_eq_id3
,
15229 &cq_set
->u
.request
, eq
->queue_id
);
15232 bf_set(lpfc_mbx_cq_create_set_eq_id4
,
15233 &cq_set
->u
.request
, eq
->queue_id
);
15236 bf_set(lpfc_mbx_cq_create_set_eq_id5
,
15237 &cq_set
->u
.request
, eq
->queue_id
);
15240 bf_set(lpfc_mbx_cq_create_set_eq_id6
,
15241 &cq_set
->u
.request
, eq
->queue_id
);
15244 bf_set(lpfc_mbx_cq_create_set_eq_id7
,
15245 &cq_set
->u
.request
, eq
->queue_id
);
15248 bf_set(lpfc_mbx_cq_create_set_eq_id8
,
15249 &cq_set
->u
.request
, eq
->queue_id
);
15252 bf_set(lpfc_mbx_cq_create_set_eq_id9
,
15253 &cq_set
->u
.request
, eq
->queue_id
);
15256 bf_set(lpfc_mbx_cq_create_set_eq_id10
,
15257 &cq_set
->u
.request
, eq
->queue_id
);
15260 bf_set(lpfc_mbx_cq_create_set_eq_id11
,
15261 &cq_set
->u
.request
, eq
->queue_id
);
15264 bf_set(lpfc_mbx_cq_create_set_eq_id12
,
15265 &cq_set
->u
.request
, eq
->queue_id
);
15268 bf_set(lpfc_mbx_cq_create_set_eq_id13
,
15269 &cq_set
->u
.request
, eq
->queue_id
);
15272 bf_set(lpfc_mbx_cq_create_set_eq_id14
,
15273 &cq_set
->u
.request
, eq
->queue_id
);
15276 bf_set(lpfc_mbx_cq_create_set_eq_id15
,
15277 &cq_set
->u
.request
, eq
->queue_id
);
15281 /* link the cq onto the parent eq child list */
15282 list_add_tail(&cq
->list
, &eq
->child_list
);
15283 /* Set up completion queue's type and subtype */
15285 cq
->subtype
= subtype
;
15286 cq
->assoc_qid
= eq
->queue_id
;
15288 cq
->host_index
= 0;
15289 cq
->notify_interval
= LPFC_CQ_NOTIFY_INTRVL
;
15290 cq
->max_proc_limit
= min(phba
->cfg_cq_max_proc_limit
,
15295 list_for_each_entry(dmabuf
, &cq
->page_list
, list
) {
15296 memset(dmabuf
->virt
, 0, hw_page_size
);
15297 cnt
= page_idx
+ dmabuf
->buffer_tag
;
15298 cq_set
->u
.request
.page
[cnt
].addr_lo
=
15299 putPaddrLow(dmabuf
->phys
);
15300 cq_set
->u
.request
.page
[cnt
].addr_hi
=
15301 putPaddrHigh(dmabuf
->phys
);
15307 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
15309 /* The IOCTL status is embedded in the mailbox subheader. */
15310 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
15311 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
15312 if (shdr_status
|| shdr_add_status
|| rc
) {
15313 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15314 "3119 CQ_CREATE_SET mailbox failed with "
15315 "status x%x add_status x%x, mbx status x%x\n",
15316 shdr_status
, shdr_add_status
, rc
);
15320 rc
= bf_get(lpfc_mbx_cq_create_set_base_id
, &cq_set
->u
.response
);
15321 if (rc
== 0xFFFF) {
15326 for (idx
= 0; idx
< numcq
; idx
++) {
15328 cq
->queue_id
= rc
+ idx
;
15329 if (cq
->queue_id
> phba
->sli4_hba
.cq_max
)
15330 phba
->sli4_hba
.cq_max
= cq
->queue_id
;
15334 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
15339 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15340 * @phba: HBA structure that indicates port to create a queue on.
15341 * @mq: The queue structure to use to create the mailbox queue.
15342 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15343 * @cq: The completion queue to associate with this cq.
15345 * This function provides failback (fb) functionality when the
15346 * mq_create_ext fails on older FW generations. It's purpose is identical
15347 * to mq_create_ext otherwise.
15349 * This routine cannot fail as all attributes were previously accessed and
15350 * initialized in mq_create_ext.
15353 lpfc_mq_create_fb_init(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
,
15354 LPFC_MBOXQ_t
*mbox
, struct lpfc_queue
*cq
)
15356 struct lpfc_mbx_mq_create
*mq_create
;
15357 struct lpfc_dmabuf
*dmabuf
;
15360 length
= (sizeof(struct lpfc_mbx_mq_create
) -
15361 sizeof(struct lpfc_sli4_cfg_mhdr
));
15362 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
15363 LPFC_MBOX_OPCODE_MQ_CREATE
,
15364 length
, LPFC_SLI4_MBX_EMBED
);
15365 mq_create
= &mbox
->u
.mqe
.un
.mq_create
;
15366 bf_set(lpfc_mbx_mq_create_num_pages
, &mq_create
->u
.request
,
15368 bf_set(lpfc_mq_context_cq_id
, &mq_create
->u
.request
.context
,
15370 bf_set(lpfc_mq_context_valid
, &mq_create
->u
.request
.context
, 1);
15371 switch (mq
->entry_count
) {
15373 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
15374 LPFC_MQ_RING_SIZE_16
);
15377 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
15378 LPFC_MQ_RING_SIZE_32
);
15381 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
15382 LPFC_MQ_RING_SIZE_64
);
15385 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
15386 LPFC_MQ_RING_SIZE_128
);
15389 list_for_each_entry(dmabuf
, &mq
->page_list
, list
) {
15390 mq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
15391 putPaddrLow(dmabuf
->phys
);
15392 mq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
15393 putPaddrHigh(dmabuf
->phys
);
15398 * lpfc_mq_create - Create a mailbox Queue on the HBA
15399 * @phba: HBA structure that indicates port to create a queue on.
15400 * @mq: The queue structure to use to create the mailbox queue.
15401 * @cq: The completion queue to associate with this cq.
15402 * @subtype: The queue's subtype.
15404 * This function creates a mailbox queue, as detailed in @mq, on a port,
15405 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15407 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15408 * is used to get the entry count and entry size that are necessary to
15409 * determine the number of pages to allocate and use for this queue. This
15410 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15411 * mailbox queue. This function is asynchronous and will wait for the mailbox
15412 * command to finish before continuing.
15414 * On success this function will return a zero. If unable to allocate enough
15415 * memory this function will return -ENOMEM. If the queue create mailbox command
15416 * fails this function will return -ENXIO.
15419 lpfc_mq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
,
15420 struct lpfc_queue
*cq
, uint32_t subtype
)
15422 struct lpfc_mbx_mq_create
*mq_create
;
15423 struct lpfc_mbx_mq_create_ext
*mq_create_ext
;
15424 struct lpfc_dmabuf
*dmabuf
;
15425 LPFC_MBOXQ_t
*mbox
;
15426 int rc
, length
, status
= 0;
15427 uint32_t shdr_status
, shdr_add_status
;
15428 union lpfc_sli4_cfg_shdr
*shdr
;
15429 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
15431 /* sanity check on queue memory */
15434 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
15435 hw_page_size
= SLI4_PAGE_SIZE
;
15437 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
15440 length
= (sizeof(struct lpfc_mbx_mq_create_ext
) -
15441 sizeof(struct lpfc_sli4_cfg_mhdr
));
15442 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
15443 LPFC_MBOX_OPCODE_MQ_CREATE_EXT
,
15444 length
, LPFC_SLI4_MBX_EMBED
);
15446 mq_create_ext
= &mbox
->u
.mqe
.un
.mq_create_ext
;
15447 shdr
= (union lpfc_sli4_cfg_shdr
*) &mq_create_ext
->header
.cfg_shdr
;
15448 bf_set(lpfc_mbx_mq_create_ext_num_pages
,
15449 &mq_create_ext
->u
.request
, mq
->page_count
);
15450 bf_set(lpfc_mbx_mq_create_ext_async_evt_link
,
15451 &mq_create_ext
->u
.request
, 1);
15452 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip
,
15453 &mq_create_ext
->u
.request
, 1);
15454 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5
,
15455 &mq_create_ext
->u
.request
, 1);
15456 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc
,
15457 &mq_create_ext
->u
.request
, 1);
15458 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli
,
15459 &mq_create_ext
->u
.request
, 1);
15460 bf_set(lpfc_mq_context_valid
, &mq_create_ext
->u
.request
.context
, 1);
15461 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
15462 phba
->sli4_hba
.pc_sli4_params
.mqv
);
15463 if (phba
->sli4_hba
.pc_sli4_params
.mqv
== LPFC_Q_CREATE_VERSION_1
)
15464 bf_set(lpfc_mbx_mq_create_ext_cq_id
, &mq_create_ext
->u
.request
,
15467 bf_set(lpfc_mq_context_cq_id
, &mq_create_ext
->u
.request
.context
,
15469 switch (mq
->entry_count
) {
15471 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15472 "0362 Unsupported MQ count. (%d)\n",
15474 if (mq
->entry_count
< 16) {
15478 /* fall through - otherwise default to smallest count */
15480 bf_set(lpfc_mq_context_ring_size
,
15481 &mq_create_ext
->u
.request
.context
,
15482 LPFC_MQ_RING_SIZE_16
);
15485 bf_set(lpfc_mq_context_ring_size
,
15486 &mq_create_ext
->u
.request
.context
,
15487 LPFC_MQ_RING_SIZE_32
);
15490 bf_set(lpfc_mq_context_ring_size
,
15491 &mq_create_ext
->u
.request
.context
,
15492 LPFC_MQ_RING_SIZE_64
);
15495 bf_set(lpfc_mq_context_ring_size
,
15496 &mq_create_ext
->u
.request
.context
,
15497 LPFC_MQ_RING_SIZE_128
);
15500 list_for_each_entry(dmabuf
, &mq
->page_list
, list
) {
15501 memset(dmabuf
->virt
, 0, hw_page_size
);
15502 mq_create_ext
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
15503 putPaddrLow(dmabuf
->phys
);
15504 mq_create_ext
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
15505 putPaddrHigh(dmabuf
->phys
);
15507 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
15508 mq
->queue_id
= bf_get(lpfc_mbx_mq_create_q_id
,
15509 &mq_create_ext
->u
.response
);
15510 if (rc
!= MBX_SUCCESS
) {
15511 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
15512 "2795 MQ_CREATE_EXT failed with "
15513 "status x%x. Failback to MQ_CREATE.\n",
15515 lpfc_mq_create_fb_init(phba
, mq
, mbox
, cq
);
15516 mq_create
= &mbox
->u
.mqe
.un
.mq_create
;
15517 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
15518 shdr
= (union lpfc_sli4_cfg_shdr
*) &mq_create
->header
.cfg_shdr
;
15519 mq
->queue_id
= bf_get(lpfc_mbx_mq_create_q_id
,
15520 &mq_create
->u
.response
);
15523 /* The IOCTL status is embedded in the mailbox subheader. */
15524 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
15525 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
15526 if (shdr_status
|| shdr_add_status
|| rc
) {
15527 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15528 "2502 MQ_CREATE mailbox failed with "
15529 "status x%x add_status x%x, mbx status x%x\n",
15530 shdr_status
, shdr_add_status
, rc
);
15534 if (mq
->queue_id
== 0xFFFF) {
15538 mq
->type
= LPFC_MQ
;
15539 mq
->assoc_qid
= cq
->queue_id
;
15540 mq
->subtype
= subtype
;
15541 mq
->host_index
= 0;
15544 /* link the mq onto the parent cq child list */
15545 list_add_tail(&mq
->list
, &cq
->child_list
);
15547 mempool_free(mbox
, phba
->mbox_mem_pool
);
15552 * lpfc_wq_create - Create a Work Queue on the HBA
15553 * @phba: HBA structure that indicates port to create a queue on.
15554 * @wq: The queue structure to use to create the work queue.
15555 * @cq: The completion queue to bind this work queue to.
15556 * @subtype: The subtype of the work queue indicating its functionality.
15558 * This function creates a work queue, as detailed in @wq, on a port, described
15559 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
15561 * The @phba struct is used to send mailbox command to HBA. The @wq struct
15562 * is used to get the entry count and entry size that are necessary to
15563 * determine the number of pages to allocate and use for this queue. The @cq
15564 * is used to indicate which completion queue to bind this work queue to. This
15565 * function will send the WQ_CREATE mailbox command to the HBA to setup the
15566 * work queue. This function is asynchronous and will wait for the mailbox
15567 * command to finish before continuing.
15569 * On success this function will return a zero. If unable to allocate enough
15570 * memory this function will return -ENOMEM. If the queue create mailbox command
15571 * fails this function will return -ENXIO.
15574 lpfc_wq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*wq
,
15575 struct lpfc_queue
*cq
, uint32_t subtype
)
15577 struct lpfc_mbx_wq_create
*wq_create
;
15578 struct lpfc_dmabuf
*dmabuf
;
15579 LPFC_MBOXQ_t
*mbox
;
15580 int rc
, length
, status
= 0;
15581 uint32_t shdr_status
, shdr_add_status
;
15582 union lpfc_sli4_cfg_shdr
*shdr
;
15583 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
15584 struct dma_address
*page
;
15585 void __iomem
*bar_memmap_p
;
15586 uint32_t db_offset
;
15587 uint16_t pci_barset
;
15588 uint8_t dpp_barset
;
15589 uint32_t dpp_offset
;
15590 unsigned long pg_addr
;
15591 uint8_t wq_create_version
;
15593 /* sanity check on queue memory */
15596 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
15597 hw_page_size
= wq
->page_size
;
15599 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
15602 length
= (sizeof(struct lpfc_mbx_wq_create
) -
15603 sizeof(struct lpfc_sli4_cfg_mhdr
));
15604 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
15605 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE
,
15606 length
, LPFC_SLI4_MBX_EMBED
);
15607 wq_create
= &mbox
->u
.mqe
.un
.wq_create
;
15608 shdr
= (union lpfc_sli4_cfg_shdr
*) &wq_create
->header
.cfg_shdr
;
15609 bf_set(lpfc_mbx_wq_create_num_pages
, &wq_create
->u
.request
,
15611 bf_set(lpfc_mbx_wq_create_cq_id
, &wq_create
->u
.request
,
15614 /* wqv is the earliest version supported, NOT the latest */
15615 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
15616 phba
->sli4_hba
.pc_sli4_params
.wqv
);
15618 if ((phba
->sli4_hba
.pc_sli4_params
.wqsize
& LPFC_WQ_SZ128_SUPPORT
) ||
15619 (wq
->page_size
> SLI4_PAGE_SIZE
))
15620 wq_create_version
= LPFC_Q_CREATE_VERSION_1
;
15622 wq_create_version
= LPFC_Q_CREATE_VERSION_0
;
15625 if (phba
->sli4_hba
.pc_sli4_params
.wqsize
& LPFC_WQ_SZ128_SUPPORT
)
15626 wq_create_version
= LPFC_Q_CREATE_VERSION_1
;
15628 wq_create_version
= LPFC_Q_CREATE_VERSION_0
;
15630 switch (wq_create_version
) {
15631 case LPFC_Q_CREATE_VERSION_1
:
15632 bf_set(lpfc_mbx_wq_create_wqe_count
, &wq_create
->u
.request_1
,
15634 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
15635 LPFC_Q_CREATE_VERSION_1
);
15637 switch (wq
->entry_size
) {
15640 bf_set(lpfc_mbx_wq_create_wqe_size
,
15641 &wq_create
->u
.request_1
,
15642 LPFC_WQ_WQE_SIZE_64
);
15645 bf_set(lpfc_mbx_wq_create_wqe_size
,
15646 &wq_create
->u
.request_1
,
15647 LPFC_WQ_WQE_SIZE_128
);
15650 /* Request DPP by default */
15651 bf_set(lpfc_mbx_wq_create_dpp_req
, &wq_create
->u
.request_1
, 1);
15652 bf_set(lpfc_mbx_wq_create_page_size
,
15653 &wq_create
->u
.request_1
,
15654 (wq
->page_size
/ SLI4_PAGE_SIZE
));
15655 page
= wq_create
->u
.request_1
.page
;
15658 page
= wq_create
->u
.request
.page
;
15662 list_for_each_entry(dmabuf
, &wq
->page_list
, list
) {
15663 memset(dmabuf
->virt
, 0, hw_page_size
);
15664 page
[dmabuf
->buffer_tag
].addr_lo
= putPaddrLow(dmabuf
->phys
);
15665 page
[dmabuf
->buffer_tag
].addr_hi
= putPaddrHigh(dmabuf
->phys
);
15668 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
)
15669 bf_set(lpfc_mbx_wq_create_dua
, &wq_create
->u
.request
, 1);
15671 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
15672 /* The IOCTL status is embedded in the mailbox subheader. */
15673 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
15674 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
15675 if (shdr_status
|| shdr_add_status
|| rc
) {
15676 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15677 "2503 WQ_CREATE mailbox failed with "
15678 "status x%x add_status x%x, mbx status x%x\n",
15679 shdr_status
, shdr_add_status
, rc
);
15684 if (wq_create_version
== LPFC_Q_CREATE_VERSION_0
)
15685 wq
->queue_id
= bf_get(lpfc_mbx_wq_create_q_id
,
15686 &wq_create
->u
.response
);
15688 wq
->queue_id
= bf_get(lpfc_mbx_wq_create_v1_q_id
,
15689 &wq_create
->u
.response_1
);
15691 if (wq
->queue_id
== 0xFFFF) {
15696 wq
->db_format
= LPFC_DB_LIST_FORMAT
;
15697 if (wq_create_version
== LPFC_Q_CREATE_VERSION_0
) {
15698 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
) {
15699 wq
->db_format
= bf_get(lpfc_mbx_wq_create_db_format
,
15700 &wq_create
->u
.response
);
15701 if ((wq
->db_format
!= LPFC_DB_LIST_FORMAT
) &&
15702 (wq
->db_format
!= LPFC_DB_RING_FORMAT
)) {
15703 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15704 "3265 WQ[%d] doorbell format "
15705 "not supported: x%x\n",
15706 wq
->queue_id
, wq
->db_format
);
15710 pci_barset
= bf_get(lpfc_mbx_wq_create_bar_set
,
15711 &wq_create
->u
.response
);
15712 bar_memmap_p
= lpfc_dual_chute_pci_bar_map(phba
,
15714 if (!bar_memmap_p
) {
15715 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15716 "3263 WQ[%d] failed to memmap "
15717 "pci barset:x%x\n",
15718 wq
->queue_id
, pci_barset
);
15722 db_offset
= wq_create
->u
.response
.doorbell_offset
;
15723 if ((db_offset
!= LPFC_ULP0_WQ_DOORBELL
) &&
15724 (db_offset
!= LPFC_ULP1_WQ_DOORBELL
)) {
15725 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15726 "3252 WQ[%d] doorbell offset "
15727 "not supported: x%x\n",
15728 wq
->queue_id
, db_offset
);
15732 wq
->db_regaddr
= bar_memmap_p
+ db_offset
;
15733 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
15734 "3264 WQ[%d]: barset:x%x, offset:x%x, "
15735 "format:x%x\n", wq
->queue_id
,
15736 pci_barset
, db_offset
, wq
->db_format
);
15738 wq
->db_regaddr
= phba
->sli4_hba
.WQDBregaddr
;
15740 /* Check if DPP was honored by the firmware */
15741 wq
->dpp_enable
= bf_get(lpfc_mbx_wq_create_dpp_rsp
,
15742 &wq_create
->u
.response_1
);
15743 if (wq
->dpp_enable
) {
15744 pci_barset
= bf_get(lpfc_mbx_wq_create_v1_bar_set
,
15745 &wq_create
->u
.response_1
);
15746 bar_memmap_p
= lpfc_dual_chute_pci_bar_map(phba
,
15748 if (!bar_memmap_p
) {
15749 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15750 "3267 WQ[%d] failed to memmap "
15751 "pci barset:x%x\n",
15752 wq
->queue_id
, pci_barset
);
15756 db_offset
= wq_create
->u
.response_1
.doorbell_offset
;
15757 wq
->db_regaddr
= bar_memmap_p
+ db_offset
;
15758 wq
->dpp_id
= bf_get(lpfc_mbx_wq_create_dpp_id
,
15759 &wq_create
->u
.response_1
);
15760 dpp_barset
= bf_get(lpfc_mbx_wq_create_dpp_bar
,
15761 &wq_create
->u
.response_1
);
15762 bar_memmap_p
= lpfc_dual_chute_pci_bar_map(phba
,
15764 if (!bar_memmap_p
) {
15765 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15766 "3268 WQ[%d] failed to memmap "
15767 "pci barset:x%x\n",
15768 wq
->queue_id
, dpp_barset
);
15772 dpp_offset
= wq_create
->u
.response_1
.dpp_offset
;
15773 wq
->dpp_regaddr
= bar_memmap_p
+ dpp_offset
;
15774 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
15775 "3271 WQ[%d]: barset:x%x, offset:x%x, "
15776 "dpp_id:x%x dpp_barset:x%x "
15777 "dpp_offset:x%x\n",
15778 wq
->queue_id
, pci_barset
, db_offset
,
15779 wq
->dpp_id
, dpp_barset
, dpp_offset
);
15781 /* Enable combined writes for DPP aperture */
15782 pg_addr
= (unsigned long)(wq
->dpp_regaddr
) & PAGE_MASK
;
15784 rc
= set_memory_wc(pg_addr
, 1);
15786 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15787 "3272 Cannot setup Combined "
15788 "Write on WQ[%d] - disable DPP\n",
15790 phba
->cfg_enable_dpp
= 0;
15793 phba
->cfg_enable_dpp
= 0;
15796 wq
->db_regaddr
= phba
->sli4_hba
.WQDBregaddr
;
15798 wq
->pring
= kzalloc(sizeof(struct lpfc_sli_ring
), GFP_KERNEL
);
15799 if (wq
->pring
== NULL
) {
15803 wq
->type
= LPFC_WQ
;
15804 wq
->assoc_qid
= cq
->queue_id
;
15805 wq
->subtype
= subtype
;
15806 wq
->host_index
= 0;
15808 wq
->notify_interval
= LPFC_WQ_NOTIFY_INTRVL
;
15810 /* link the wq onto the parent cq child list */
15811 list_add_tail(&wq
->list
, &cq
->child_list
);
15813 mempool_free(mbox
, phba
->mbox_mem_pool
);
15818 * lpfc_rq_create - Create a Receive Queue on the HBA
15819 * @phba: HBA structure that indicates port to create a queue on.
15820 * @hrq: The queue structure to use to create the header receive queue.
15821 * @drq: The queue structure to use to create the data receive queue.
15822 * @cq: The completion queue to bind this work queue to.
15824 * This function creates a receive buffer queue pair , as detailed in @hrq and
15825 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
15828 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
15829 * struct is used to get the entry count that is necessary to determine the
15830 * number of pages to use for this queue. The @cq is used to indicate which
15831 * completion queue to bind received buffers that are posted to these queues to.
15832 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
15833 * receive queue pair. This function is asynchronous and will wait for the
15834 * mailbox command to finish before continuing.
15836 * On success this function will return a zero. If unable to allocate enough
15837 * memory this function will return -ENOMEM. If the queue create mailbox command
15838 * fails this function will return -ENXIO.
15841 lpfc_rq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*hrq
,
15842 struct lpfc_queue
*drq
, struct lpfc_queue
*cq
, uint32_t subtype
)
15844 struct lpfc_mbx_rq_create
*rq_create
;
15845 struct lpfc_dmabuf
*dmabuf
;
15846 LPFC_MBOXQ_t
*mbox
;
15847 int rc
, length
, status
= 0;
15848 uint32_t shdr_status
, shdr_add_status
;
15849 union lpfc_sli4_cfg_shdr
*shdr
;
15850 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
15851 void __iomem
*bar_memmap_p
;
15852 uint32_t db_offset
;
15853 uint16_t pci_barset
;
15855 /* sanity check on queue memory */
15856 if (!hrq
|| !drq
|| !cq
)
15858 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
15859 hw_page_size
= SLI4_PAGE_SIZE
;
15861 if (hrq
->entry_count
!= drq
->entry_count
)
15863 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
15866 length
= (sizeof(struct lpfc_mbx_rq_create
) -
15867 sizeof(struct lpfc_sli4_cfg_mhdr
));
15868 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
15869 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE
,
15870 length
, LPFC_SLI4_MBX_EMBED
);
15871 rq_create
= &mbox
->u
.mqe
.un
.rq_create
;
15872 shdr
= (union lpfc_sli4_cfg_shdr
*) &rq_create
->header
.cfg_shdr
;
15873 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
15874 phba
->sli4_hba
.pc_sli4_params
.rqv
);
15875 if (phba
->sli4_hba
.pc_sli4_params
.rqv
== LPFC_Q_CREATE_VERSION_1
) {
15876 bf_set(lpfc_rq_context_rqe_count_1
,
15877 &rq_create
->u
.request
.context
,
15879 rq_create
->u
.request
.context
.buffer_size
= LPFC_HDR_BUF_SIZE
;
15880 bf_set(lpfc_rq_context_rqe_size
,
15881 &rq_create
->u
.request
.context
,
15883 bf_set(lpfc_rq_context_page_size
,
15884 &rq_create
->u
.request
.context
,
15885 LPFC_RQ_PAGE_SIZE_4096
);
15887 switch (hrq
->entry_count
) {
15889 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15890 "2535 Unsupported RQ count. (%d)\n",
15892 if (hrq
->entry_count
< 512) {
15896 /* fall through - otherwise default to smallest count */
15898 bf_set(lpfc_rq_context_rqe_count
,
15899 &rq_create
->u
.request
.context
,
15900 LPFC_RQ_RING_SIZE_512
);
15903 bf_set(lpfc_rq_context_rqe_count
,
15904 &rq_create
->u
.request
.context
,
15905 LPFC_RQ_RING_SIZE_1024
);
15908 bf_set(lpfc_rq_context_rqe_count
,
15909 &rq_create
->u
.request
.context
,
15910 LPFC_RQ_RING_SIZE_2048
);
15913 bf_set(lpfc_rq_context_rqe_count
,
15914 &rq_create
->u
.request
.context
,
15915 LPFC_RQ_RING_SIZE_4096
);
15918 bf_set(lpfc_rq_context_buf_size
, &rq_create
->u
.request
.context
,
15919 LPFC_HDR_BUF_SIZE
);
15921 bf_set(lpfc_rq_context_cq_id
, &rq_create
->u
.request
.context
,
15923 bf_set(lpfc_mbx_rq_create_num_pages
, &rq_create
->u
.request
,
15925 list_for_each_entry(dmabuf
, &hrq
->page_list
, list
) {
15926 memset(dmabuf
->virt
, 0, hw_page_size
);
15927 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
15928 putPaddrLow(dmabuf
->phys
);
15929 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
15930 putPaddrHigh(dmabuf
->phys
);
15932 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
)
15933 bf_set(lpfc_mbx_rq_create_dua
, &rq_create
->u
.request
, 1);
15935 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
15936 /* The IOCTL status is embedded in the mailbox subheader. */
15937 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
15938 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
15939 if (shdr_status
|| shdr_add_status
|| rc
) {
15940 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15941 "2504 RQ_CREATE mailbox failed with "
15942 "status x%x add_status x%x, mbx status x%x\n",
15943 shdr_status
, shdr_add_status
, rc
);
15947 hrq
->queue_id
= bf_get(lpfc_mbx_rq_create_q_id
, &rq_create
->u
.response
);
15948 if (hrq
->queue_id
== 0xFFFF) {
15953 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
) {
15954 hrq
->db_format
= bf_get(lpfc_mbx_rq_create_db_format
,
15955 &rq_create
->u
.response
);
15956 if ((hrq
->db_format
!= LPFC_DB_LIST_FORMAT
) &&
15957 (hrq
->db_format
!= LPFC_DB_RING_FORMAT
)) {
15958 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15959 "3262 RQ [%d] doorbell format not "
15960 "supported: x%x\n", hrq
->queue_id
,
15966 pci_barset
= bf_get(lpfc_mbx_rq_create_bar_set
,
15967 &rq_create
->u
.response
);
15968 bar_memmap_p
= lpfc_dual_chute_pci_bar_map(phba
, pci_barset
);
15969 if (!bar_memmap_p
) {
15970 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15971 "3269 RQ[%d] failed to memmap pci "
15972 "barset:x%x\n", hrq
->queue_id
,
15978 db_offset
= rq_create
->u
.response
.doorbell_offset
;
15979 if ((db_offset
!= LPFC_ULP0_RQ_DOORBELL
) &&
15980 (db_offset
!= LPFC_ULP1_RQ_DOORBELL
)) {
15981 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15982 "3270 RQ[%d] doorbell offset not "
15983 "supported: x%x\n", hrq
->queue_id
,
15988 hrq
->db_regaddr
= bar_memmap_p
+ db_offset
;
15989 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
15990 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
15991 "format:x%x\n", hrq
->queue_id
, pci_barset
,
15992 db_offset
, hrq
->db_format
);
15994 hrq
->db_format
= LPFC_DB_RING_FORMAT
;
15995 hrq
->db_regaddr
= phba
->sli4_hba
.RQDBregaddr
;
15997 hrq
->type
= LPFC_HRQ
;
15998 hrq
->assoc_qid
= cq
->queue_id
;
15999 hrq
->subtype
= subtype
;
16000 hrq
->host_index
= 0;
16001 hrq
->hba_index
= 0;
16002 hrq
->notify_interval
= LPFC_RQ_NOTIFY_INTRVL
;
16004 /* now create the data queue */
16005 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
16006 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE
,
16007 length
, LPFC_SLI4_MBX_EMBED
);
16008 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
16009 phba
->sli4_hba
.pc_sli4_params
.rqv
);
16010 if (phba
->sli4_hba
.pc_sli4_params
.rqv
== LPFC_Q_CREATE_VERSION_1
) {
16011 bf_set(lpfc_rq_context_rqe_count_1
,
16012 &rq_create
->u
.request
.context
, hrq
->entry_count
);
16013 if (subtype
== LPFC_NVMET
)
16014 rq_create
->u
.request
.context
.buffer_size
=
16015 LPFC_NVMET_DATA_BUF_SIZE
;
16017 rq_create
->u
.request
.context
.buffer_size
=
16018 LPFC_DATA_BUF_SIZE
;
16019 bf_set(lpfc_rq_context_rqe_size
, &rq_create
->u
.request
.context
,
16021 bf_set(lpfc_rq_context_page_size
, &rq_create
->u
.request
.context
,
16022 (PAGE_SIZE
/SLI4_PAGE_SIZE
));
16024 switch (drq
->entry_count
) {
16026 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
16027 "2536 Unsupported RQ count. (%d)\n",
16029 if (drq
->entry_count
< 512) {
16033 /* fall through - otherwise default to smallest count */
16035 bf_set(lpfc_rq_context_rqe_count
,
16036 &rq_create
->u
.request
.context
,
16037 LPFC_RQ_RING_SIZE_512
);
16040 bf_set(lpfc_rq_context_rqe_count
,
16041 &rq_create
->u
.request
.context
,
16042 LPFC_RQ_RING_SIZE_1024
);
16045 bf_set(lpfc_rq_context_rqe_count
,
16046 &rq_create
->u
.request
.context
,
16047 LPFC_RQ_RING_SIZE_2048
);
16050 bf_set(lpfc_rq_context_rqe_count
,
16051 &rq_create
->u
.request
.context
,
16052 LPFC_RQ_RING_SIZE_4096
);
16055 if (subtype
== LPFC_NVMET
)
16056 bf_set(lpfc_rq_context_buf_size
,
16057 &rq_create
->u
.request
.context
,
16058 LPFC_NVMET_DATA_BUF_SIZE
);
16060 bf_set(lpfc_rq_context_buf_size
,
16061 &rq_create
->u
.request
.context
,
16062 LPFC_DATA_BUF_SIZE
);
16064 bf_set(lpfc_rq_context_cq_id
, &rq_create
->u
.request
.context
,
16066 bf_set(lpfc_mbx_rq_create_num_pages
, &rq_create
->u
.request
,
16068 list_for_each_entry(dmabuf
, &drq
->page_list
, list
) {
16069 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
16070 putPaddrLow(dmabuf
->phys
);
16071 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
16072 putPaddrHigh(dmabuf
->phys
);
16074 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
)
16075 bf_set(lpfc_mbx_rq_create_dua
, &rq_create
->u
.request
, 1);
16076 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
16077 /* The IOCTL status is embedded in the mailbox subheader. */
16078 shdr
= (union lpfc_sli4_cfg_shdr
*) &rq_create
->header
.cfg_shdr
;
16079 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16080 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16081 if (shdr_status
|| shdr_add_status
|| rc
) {
16085 drq
->queue_id
= bf_get(lpfc_mbx_rq_create_q_id
, &rq_create
->u
.response
);
16086 if (drq
->queue_id
== 0xFFFF) {
16090 drq
->type
= LPFC_DRQ
;
16091 drq
->assoc_qid
= cq
->queue_id
;
16092 drq
->subtype
= subtype
;
16093 drq
->host_index
= 0;
16094 drq
->hba_index
= 0;
16095 drq
->notify_interval
= LPFC_RQ_NOTIFY_INTRVL
;
16097 /* link the header and data RQs onto the parent cq child list */
16098 list_add_tail(&hrq
->list
, &cq
->child_list
);
16099 list_add_tail(&drq
->list
, &cq
->child_list
);
16102 mempool_free(mbox
, phba
->mbox_mem_pool
);
16107 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
16108 * @phba: HBA structure that indicates port to create a queue on.
16109 * @hrqp: The queue structure array to use to create the header receive queues.
16110 * @drqp: The queue structure array to use to create the data receive queues.
16111 * @cqp: The completion queue array to bind these receive queues to.
16113 * This function creates a receive buffer queue pair , as detailed in @hrq and
16114 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16117 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16118 * struct is used to get the entry count that is necessary to determine the
16119 * number of pages to use for this queue. The @cq is used to indicate which
16120 * completion queue to bind received buffers that are posted to these queues to.
16121 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16122 * receive queue pair. This function is asynchronous and will wait for the
16123 * mailbox command to finish before continuing.
16125 * On success this function will return a zero. If unable to allocate enough
16126 * memory this function will return -ENOMEM. If the queue create mailbox command
16127 * fails this function will return -ENXIO.
16130 lpfc_mrq_create(struct lpfc_hba
*phba
, struct lpfc_queue
**hrqp
,
16131 struct lpfc_queue
**drqp
, struct lpfc_queue
**cqp
,
16134 struct lpfc_queue
*hrq
, *drq
, *cq
;
16135 struct lpfc_mbx_rq_create_v2
*rq_create
;
16136 struct lpfc_dmabuf
*dmabuf
;
16137 LPFC_MBOXQ_t
*mbox
;
16138 int rc
, length
, alloclen
, status
= 0;
16139 int cnt
, idx
, numrq
, page_idx
= 0;
16140 uint32_t shdr_status
, shdr_add_status
;
16141 union lpfc_sli4_cfg_shdr
*shdr
;
16142 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
16144 numrq
= phba
->cfg_nvmet_mrq
;
16145 /* sanity check on array memory */
16146 if (!hrqp
|| !drqp
|| !cqp
|| !numrq
)
16148 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
16149 hw_page_size
= SLI4_PAGE_SIZE
;
16151 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16155 length
= sizeof(struct lpfc_mbx_rq_create_v2
);
16156 length
+= ((2 * numrq
* hrqp
[0]->page_count
) *
16157 sizeof(struct dma_address
));
16159 alloclen
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
16160 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE
, length
,
16161 LPFC_SLI4_MBX_NEMBED
);
16162 if (alloclen
< length
) {
16163 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
16164 "3099 Allocated DMA memory size (%d) is "
16165 "less than the requested DMA memory size "
16166 "(%d)\n", alloclen
, length
);
16173 rq_create
= mbox
->sge_array
->addr
[0];
16174 shdr
= (union lpfc_sli4_cfg_shdr
*)&rq_create
->cfg_shdr
;
16176 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
, LPFC_Q_CREATE_VERSION_2
);
16179 for (idx
= 0; idx
< numrq
; idx
++) {
16184 /* sanity check on queue memory */
16185 if (!hrq
|| !drq
|| !cq
) {
16190 if (hrq
->entry_count
!= drq
->entry_count
) {
16196 bf_set(lpfc_mbx_rq_create_num_pages
,
16197 &rq_create
->u
.request
,
16199 bf_set(lpfc_mbx_rq_create_rq_cnt
,
16200 &rq_create
->u
.request
, (numrq
* 2));
16201 bf_set(lpfc_mbx_rq_create_dnb
, &rq_create
->u
.request
,
16203 bf_set(lpfc_rq_context_base_cq
,
16204 &rq_create
->u
.request
.context
,
16206 bf_set(lpfc_rq_context_data_size
,
16207 &rq_create
->u
.request
.context
,
16208 LPFC_NVMET_DATA_BUF_SIZE
);
16209 bf_set(lpfc_rq_context_hdr_size
,
16210 &rq_create
->u
.request
.context
,
16211 LPFC_HDR_BUF_SIZE
);
16212 bf_set(lpfc_rq_context_rqe_count_1
,
16213 &rq_create
->u
.request
.context
,
16215 bf_set(lpfc_rq_context_rqe_size
,
16216 &rq_create
->u
.request
.context
,
16218 bf_set(lpfc_rq_context_page_size
,
16219 &rq_create
->u
.request
.context
,
16220 (PAGE_SIZE
/SLI4_PAGE_SIZE
));
16223 list_for_each_entry(dmabuf
, &hrq
->page_list
, list
) {
16224 memset(dmabuf
->virt
, 0, hw_page_size
);
16225 cnt
= page_idx
+ dmabuf
->buffer_tag
;
16226 rq_create
->u
.request
.page
[cnt
].addr_lo
=
16227 putPaddrLow(dmabuf
->phys
);
16228 rq_create
->u
.request
.page
[cnt
].addr_hi
=
16229 putPaddrHigh(dmabuf
->phys
);
16235 list_for_each_entry(dmabuf
, &drq
->page_list
, list
) {
16236 memset(dmabuf
->virt
, 0, hw_page_size
);
16237 cnt
= page_idx
+ dmabuf
->buffer_tag
;
16238 rq_create
->u
.request
.page
[cnt
].addr_lo
=
16239 putPaddrLow(dmabuf
->phys
);
16240 rq_create
->u
.request
.page
[cnt
].addr_hi
=
16241 putPaddrHigh(dmabuf
->phys
);
16246 hrq
->db_format
= LPFC_DB_RING_FORMAT
;
16247 hrq
->db_regaddr
= phba
->sli4_hba
.RQDBregaddr
;
16248 hrq
->type
= LPFC_HRQ
;
16249 hrq
->assoc_qid
= cq
->queue_id
;
16250 hrq
->subtype
= subtype
;
16251 hrq
->host_index
= 0;
16252 hrq
->hba_index
= 0;
16253 hrq
->notify_interval
= LPFC_RQ_NOTIFY_INTRVL
;
16255 drq
->db_format
= LPFC_DB_RING_FORMAT
;
16256 drq
->db_regaddr
= phba
->sli4_hba
.RQDBregaddr
;
16257 drq
->type
= LPFC_DRQ
;
16258 drq
->assoc_qid
= cq
->queue_id
;
16259 drq
->subtype
= subtype
;
16260 drq
->host_index
= 0;
16261 drq
->hba_index
= 0;
16262 drq
->notify_interval
= LPFC_RQ_NOTIFY_INTRVL
;
16264 list_add_tail(&hrq
->list
, &cq
->child_list
);
16265 list_add_tail(&drq
->list
, &cq
->child_list
);
16268 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
16269 /* The IOCTL status is embedded in the mailbox subheader. */
16270 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16271 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16272 if (shdr_status
|| shdr_add_status
|| rc
) {
16273 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16274 "3120 RQ_CREATE mailbox failed with "
16275 "status x%x add_status x%x, mbx status x%x\n",
16276 shdr_status
, shdr_add_status
, rc
);
16280 rc
= bf_get(lpfc_mbx_rq_create_q_id
, &rq_create
->u
.response
);
16281 if (rc
== 0xFFFF) {
16286 /* Initialize all RQs with associated queue id */
16287 for (idx
= 0; idx
< numrq
; idx
++) {
16289 hrq
->queue_id
= rc
+ (2 * idx
);
16291 drq
->queue_id
= rc
+ (2 * idx
) + 1;
16295 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
16300 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16301 * @eq: The queue structure associated with the queue to destroy.
16303 * This function destroys a queue, as detailed in @eq by sending an mailbox
16304 * command, specific to the type of queue, to the HBA.
16306 * The @eq struct is used to get the queue ID of the queue to destroy.
16308 * On success this function will return a zero. If the queue destroy mailbox
16309 * command fails this function will return -ENXIO.
16312 lpfc_eq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
)
16314 LPFC_MBOXQ_t
*mbox
;
16315 int rc
, length
, status
= 0;
16316 uint32_t shdr_status
, shdr_add_status
;
16317 union lpfc_sli4_cfg_shdr
*shdr
;
16319 /* sanity check on queue memory */
16323 mbox
= mempool_alloc(eq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
16326 length
= (sizeof(struct lpfc_mbx_eq_destroy
) -
16327 sizeof(struct lpfc_sli4_cfg_mhdr
));
16328 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
16329 LPFC_MBOX_OPCODE_EQ_DESTROY
,
16330 length
, LPFC_SLI4_MBX_EMBED
);
16331 bf_set(lpfc_mbx_eq_destroy_q_id
, &mbox
->u
.mqe
.un
.eq_destroy
.u
.request
,
16333 mbox
->vport
= eq
->phba
->pport
;
16334 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
16336 rc
= lpfc_sli_issue_mbox(eq
->phba
, mbox
, MBX_POLL
);
16337 /* The IOCTL status is embedded in the mailbox subheader. */
16338 shdr
= (union lpfc_sli4_cfg_shdr
*)
16339 &mbox
->u
.mqe
.un
.eq_destroy
.header
.cfg_shdr
;
16340 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16341 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16342 if (shdr_status
|| shdr_add_status
|| rc
) {
16343 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16344 "2505 EQ_DESTROY mailbox failed with "
16345 "status x%x add_status x%x, mbx status x%x\n",
16346 shdr_status
, shdr_add_status
, rc
);
16350 /* Remove eq from any list */
16351 list_del_init(&eq
->list
);
16352 mempool_free(mbox
, eq
->phba
->mbox_mem_pool
);
16357 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16358 * @cq: The queue structure associated with the queue to destroy.
16360 * This function destroys a queue, as detailed in @cq by sending an mailbox
16361 * command, specific to the type of queue, to the HBA.
16363 * The @cq struct is used to get the queue ID of the queue to destroy.
16365 * On success this function will return a zero. If the queue destroy mailbox
16366 * command fails this function will return -ENXIO.
16369 lpfc_cq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
)
16371 LPFC_MBOXQ_t
*mbox
;
16372 int rc
, length
, status
= 0;
16373 uint32_t shdr_status
, shdr_add_status
;
16374 union lpfc_sli4_cfg_shdr
*shdr
;
16376 /* sanity check on queue memory */
16379 mbox
= mempool_alloc(cq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
16382 length
= (sizeof(struct lpfc_mbx_cq_destroy
) -
16383 sizeof(struct lpfc_sli4_cfg_mhdr
));
16384 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
16385 LPFC_MBOX_OPCODE_CQ_DESTROY
,
16386 length
, LPFC_SLI4_MBX_EMBED
);
16387 bf_set(lpfc_mbx_cq_destroy_q_id
, &mbox
->u
.mqe
.un
.cq_destroy
.u
.request
,
16389 mbox
->vport
= cq
->phba
->pport
;
16390 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
16391 rc
= lpfc_sli_issue_mbox(cq
->phba
, mbox
, MBX_POLL
);
16392 /* The IOCTL status is embedded in the mailbox subheader. */
16393 shdr
= (union lpfc_sli4_cfg_shdr
*)
16394 &mbox
->u
.mqe
.un
.wq_create
.header
.cfg_shdr
;
16395 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16396 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16397 if (shdr_status
|| shdr_add_status
|| rc
) {
16398 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16399 "2506 CQ_DESTROY mailbox failed with "
16400 "status x%x add_status x%x, mbx status x%x\n",
16401 shdr_status
, shdr_add_status
, rc
);
16404 /* Remove cq from any list */
16405 list_del_init(&cq
->list
);
16406 mempool_free(mbox
, cq
->phba
->mbox_mem_pool
);
16411 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16412 * @qm: The queue structure associated with the queue to destroy.
16414 * This function destroys a queue, as detailed in @mq by sending an mailbox
16415 * command, specific to the type of queue, to the HBA.
16417 * The @mq struct is used to get the queue ID of the queue to destroy.
16419 * On success this function will return a zero. If the queue destroy mailbox
16420 * command fails this function will return -ENXIO.
16423 lpfc_mq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
)
16425 LPFC_MBOXQ_t
*mbox
;
16426 int rc
, length
, status
= 0;
16427 uint32_t shdr_status
, shdr_add_status
;
16428 union lpfc_sli4_cfg_shdr
*shdr
;
16430 /* sanity check on queue memory */
16433 mbox
= mempool_alloc(mq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
16436 length
= (sizeof(struct lpfc_mbx_mq_destroy
) -
16437 sizeof(struct lpfc_sli4_cfg_mhdr
));
16438 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
16439 LPFC_MBOX_OPCODE_MQ_DESTROY
,
16440 length
, LPFC_SLI4_MBX_EMBED
);
16441 bf_set(lpfc_mbx_mq_destroy_q_id
, &mbox
->u
.mqe
.un
.mq_destroy
.u
.request
,
16443 mbox
->vport
= mq
->phba
->pport
;
16444 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
16445 rc
= lpfc_sli_issue_mbox(mq
->phba
, mbox
, MBX_POLL
);
16446 /* The IOCTL status is embedded in the mailbox subheader. */
16447 shdr
= (union lpfc_sli4_cfg_shdr
*)
16448 &mbox
->u
.mqe
.un
.mq_destroy
.header
.cfg_shdr
;
16449 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16450 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16451 if (shdr_status
|| shdr_add_status
|| rc
) {
16452 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16453 "2507 MQ_DESTROY mailbox failed with "
16454 "status x%x add_status x%x, mbx status x%x\n",
16455 shdr_status
, shdr_add_status
, rc
);
16458 /* Remove mq from any list */
16459 list_del_init(&mq
->list
);
16460 mempool_free(mbox
, mq
->phba
->mbox_mem_pool
);
16465 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16466 * @wq: The queue structure associated with the queue to destroy.
16468 * This function destroys a queue, as detailed in @wq by sending an mailbox
16469 * command, specific to the type of queue, to the HBA.
16471 * The @wq struct is used to get the queue ID of the queue to destroy.
16473 * On success this function will return a zero. If the queue destroy mailbox
16474 * command fails this function will return -ENXIO.
16477 lpfc_wq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*wq
)
16479 LPFC_MBOXQ_t
*mbox
;
16480 int rc
, length
, status
= 0;
16481 uint32_t shdr_status
, shdr_add_status
;
16482 union lpfc_sli4_cfg_shdr
*shdr
;
16484 /* sanity check on queue memory */
16487 mbox
= mempool_alloc(wq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
16490 length
= (sizeof(struct lpfc_mbx_wq_destroy
) -
16491 sizeof(struct lpfc_sli4_cfg_mhdr
));
16492 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
16493 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY
,
16494 length
, LPFC_SLI4_MBX_EMBED
);
16495 bf_set(lpfc_mbx_wq_destroy_q_id
, &mbox
->u
.mqe
.un
.wq_destroy
.u
.request
,
16497 mbox
->vport
= wq
->phba
->pport
;
16498 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
16499 rc
= lpfc_sli_issue_mbox(wq
->phba
, mbox
, MBX_POLL
);
16500 shdr
= (union lpfc_sli4_cfg_shdr
*)
16501 &mbox
->u
.mqe
.un
.wq_destroy
.header
.cfg_shdr
;
16502 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16503 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16504 if (shdr_status
|| shdr_add_status
|| rc
) {
16505 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16506 "2508 WQ_DESTROY mailbox failed with "
16507 "status x%x add_status x%x, mbx status x%x\n",
16508 shdr_status
, shdr_add_status
, rc
);
16511 /* Remove wq from any list */
16512 list_del_init(&wq
->list
);
16515 mempool_free(mbox
, wq
->phba
->mbox_mem_pool
);
16520 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16521 * @rq: The queue structure associated with the queue to destroy.
16523 * This function destroys a queue, as detailed in @rq by sending an mailbox
16524 * command, specific to the type of queue, to the HBA.
16526 * The @rq struct is used to get the queue ID of the queue to destroy.
16528 * On success this function will return a zero. If the queue destroy mailbox
16529 * command fails this function will return -ENXIO.
16532 lpfc_rq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*hrq
,
16533 struct lpfc_queue
*drq
)
16535 LPFC_MBOXQ_t
*mbox
;
16536 int rc
, length
, status
= 0;
16537 uint32_t shdr_status
, shdr_add_status
;
16538 union lpfc_sli4_cfg_shdr
*shdr
;
16540 /* sanity check on queue memory */
16543 mbox
= mempool_alloc(hrq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
16546 length
= (sizeof(struct lpfc_mbx_rq_destroy
) -
16547 sizeof(struct lpfc_sli4_cfg_mhdr
));
16548 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
16549 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY
,
16550 length
, LPFC_SLI4_MBX_EMBED
);
16551 bf_set(lpfc_mbx_rq_destroy_q_id
, &mbox
->u
.mqe
.un
.rq_destroy
.u
.request
,
16553 mbox
->vport
= hrq
->phba
->pport
;
16554 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
16555 rc
= lpfc_sli_issue_mbox(hrq
->phba
, mbox
, MBX_POLL
);
16556 /* The IOCTL status is embedded in the mailbox subheader. */
16557 shdr
= (union lpfc_sli4_cfg_shdr
*)
16558 &mbox
->u
.mqe
.un
.rq_destroy
.header
.cfg_shdr
;
16559 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16560 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16561 if (shdr_status
|| shdr_add_status
|| rc
) {
16562 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16563 "2509 RQ_DESTROY mailbox failed with "
16564 "status x%x add_status x%x, mbx status x%x\n",
16565 shdr_status
, shdr_add_status
, rc
);
16566 if (rc
!= MBX_TIMEOUT
)
16567 mempool_free(mbox
, hrq
->phba
->mbox_mem_pool
);
16570 bf_set(lpfc_mbx_rq_destroy_q_id
, &mbox
->u
.mqe
.un
.rq_destroy
.u
.request
,
16572 rc
= lpfc_sli_issue_mbox(drq
->phba
, mbox
, MBX_POLL
);
16573 shdr
= (union lpfc_sli4_cfg_shdr
*)
16574 &mbox
->u
.mqe
.un
.rq_destroy
.header
.cfg_shdr
;
16575 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16576 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16577 if (shdr_status
|| shdr_add_status
|| rc
) {
16578 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16579 "2510 RQ_DESTROY mailbox failed with "
16580 "status x%x add_status x%x, mbx status x%x\n",
16581 shdr_status
, shdr_add_status
, rc
);
16584 list_del_init(&hrq
->list
);
16585 list_del_init(&drq
->list
);
16586 mempool_free(mbox
, hrq
->phba
->mbox_mem_pool
);
16591 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
16592 * @phba: The virtual port for which this call being executed.
16593 * @pdma_phys_addr0: Physical address of the 1st SGL page.
16594 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
16595 * @xritag: the xritag that ties this io to the SGL pages.
16597 * This routine will post the sgl pages for the IO that has the xritag
16598 * that is in the iocbq structure. The xritag is assigned during iocbq
16599 * creation and persists for as long as the driver is loaded.
16600 * if the caller has fewer than 256 scatter gather segments to map then
16601 * pdma_phys_addr1 should be 0.
16602 * If the caller needs to map more than 256 scatter gather segment then
16603 * pdma_phys_addr1 should be a valid physical address.
16604 * physical address for SGLs must be 64 byte aligned.
16605 * If you are going to map 2 SGL's then the first one must have 256 entries
16606 * the second sgl can have between 1 and 256 entries.
16610 * -ENXIO, -ENOMEM - Failure
16613 lpfc_sli4_post_sgl(struct lpfc_hba
*phba
,
16614 dma_addr_t pdma_phys_addr0
,
16615 dma_addr_t pdma_phys_addr1
,
16618 struct lpfc_mbx_post_sgl_pages
*post_sgl_pages
;
16619 LPFC_MBOXQ_t
*mbox
;
16621 uint32_t shdr_status
, shdr_add_status
;
16623 union lpfc_sli4_cfg_shdr
*shdr
;
16625 if (xritag
== NO_XRI
) {
16626 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
16627 "0364 Invalid param:\n");
16631 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16635 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
16636 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
,
16637 sizeof(struct lpfc_mbx_post_sgl_pages
) -
16638 sizeof(struct lpfc_sli4_cfg_mhdr
), LPFC_SLI4_MBX_EMBED
);
16640 post_sgl_pages
= (struct lpfc_mbx_post_sgl_pages
*)
16641 &mbox
->u
.mqe
.un
.post_sgl_pages
;
16642 bf_set(lpfc_post_sgl_pages_xri
, post_sgl_pages
, xritag
);
16643 bf_set(lpfc_post_sgl_pages_xricnt
, post_sgl_pages
, 1);
16645 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg0_addr_lo
=
16646 cpu_to_le32(putPaddrLow(pdma_phys_addr0
));
16647 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg0_addr_hi
=
16648 cpu_to_le32(putPaddrHigh(pdma_phys_addr0
));
16650 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg1_addr_lo
=
16651 cpu_to_le32(putPaddrLow(pdma_phys_addr1
));
16652 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg1_addr_hi
=
16653 cpu_to_le32(putPaddrHigh(pdma_phys_addr1
));
16654 if (!phba
->sli4_hba
.intr_enable
)
16655 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
16657 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
16658 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
16660 /* The IOCTL status is embedded in the mailbox subheader. */
16661 shdr
= (union lpfc_sli4_cfg_shdr
*) &post_sgl_pages
->header
.cfg_shdr
;
16662 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16663 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16664 if (rc
!= MBX_TIMEOUT
)
16665 mempool_free(mbox
, phba
->mbox_mem_pool
);
16666 if (shdr_status
|| shdr_add_status
|| rc
) {
16667 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16668 "2511 POST_SGL mailbox failed with "
16669 "status x%x add_status x%x, mbx status x%x\n",
16670 shdr_status
, shdr_add_status
, rc
);
16676 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
16677 * @phba: pointer to lpfc hba data structure.
16679 * This routine is invoked to post rpi header templates to the
16680 * HBA consistent with the SLI-4 interface spec. This routine
16681 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
16682 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
16685 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
16686 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
16689 lpfc_sli4_alloc_xri(struct lpfc_hba
*phba
)
16694 * Fetch the next logical xri. Because this index is logical,
16695 * the driver starts at 0 each time.
16697 spin_lock_irq(&phba
->hbalock
);
16698 xri
= find_next_zero_bit(phba
->sli4_hba
.xri_bmask
,
16699 phba
->sli4_hba
.max_cfg_param
.max_xri
, 0);
16700 if (xri
>= phba
->sli4_hba
.max_cfg_param
.max_xri
) {
16701 spin_unlock_irq(&phba
->hbalock
);
16704 set_bit(xri
, phba
->sli4_hba
.xri_bmask
);
16705 phba
->sli4_hba
.max_cfg_param
.xri_used
++;
16707 spin_unlock_irq(&phba
->hbalock
);
16712 * lpfc_sli4_free_xri - Release an xri for reuse.
16713 * @phba: pointer to lpfc hba data structure.
16715 * This routine is invoked to release an xri to the pool of
16716 * available rpis maintained by the driver.
16719 __lpfc_sli4_free_xri(struct lpfc_hba
*phba
, int xri
)
16721 if (test_and_clear_bit(xri
, phba
->sli4_hba
.xri_bmask
)) {
16722 phba
->sli4_hba
.max_cfg_param
.xri_used
--;
16727 * lpfc_sli4_free_xri - Release an xri for reuse.
16728 * @phba: pointer to lpfc hba data structure.
16730 * This routine is invoked to release an xri to the pool of
16731 * available rpis maintained by the driver.
16734 lpfc_sli4_free_xri(struct lpfc_hba
*phba
, int xri
)
16736 spin_lock_irq(&phba
->hbalock
);
16737 __lpfc_sli4_free_xri(phba
, xri
);
16738 spin_unlock_irq(&phba
->hbalock
);
16742 * lpfc_sli4_next_xritag - Get an xritag for the io
16743 * @phba: Pointer to HBA context object.
16745 * This function gets an xritag for the iocb. If there is no unused xritag
16746 * it will return 0xffff.
16747 * The function returns the allocated xritag if successful, else returns zero.
16748 * Zero is not a valid xritag.
16749 * The caller is not required to hold any lock.
16752 lpfc_sli4_next_xritag(struct lpfc_hba
*phba
)
16754 uint16_t xri_index
;
16756 xri_index
= lpfc_sli4_alloc_xri(phba
);
16757 if (xri_index
== NO_XRI
)
16758 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
16759 "2004 Failed to allocate XRI.last XRITAG is %d"
16760 " Max XRI is %d, Used XRI is %d\n",
16762 phba
->sli4_hba
.max_cfg_param
.max_xri
,
16763 phba
->sli4_hba
.max_cfg_param
.xri_used
);
16768 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
16769 * @phba: pointer to lpfc hba data structure.
16770 * @post_sgl_list: pointer to els sgl entry list.
16771 * @count: number of els sgl entries on the list.
16773 * This routine is invoked to post a block of driver's sgl pages to the
16774 * HBA using non-embedded mailbox command. No Lock is held. This routine
16775 * is only called when the driver is loading and after all IO has been
16779 lpfc_sli4_post_sgl_list(struct lpfc_hba
*phba
,
16780 struct list_head
*post_sgl_list
,
16783 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_next
= NULL
;
16784 struct lpfc_mbx_post_uembed_sgl_page1
*sgl
;
16785 struct sgl_page_pairs
*sgl_pg_pairs
;
16787 LPFC_MBOXQ_t
*mbox
;
16788 uint32_t reqlen
, alloclen
, pg_pairs
;
16790 uint16_t xritag_start
= 0;
16792 uint32_t shdr_status
, shdr_add_status
;
16793 union lpfc_sli4_cfg_shdr
*shdr
;
16795 reqlen
= post_cnt
* sizeof(struct sgl_page_pairs
) +
16796 sizeof(union lpfc_sli4_cfg_shdr
) + sizeof(uint32_t);
16797 if (reqlen
> SLI4_PAGE_SIZE
) {
16798 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16799 "2559 Block sgl registration required DMA "
16800 "size (%d) great than a page\n", reqlen
);
16804 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16808 /* Allocate DMA memory and set up the non-embedded mailbox command */
16809 alloclen
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
16810 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
, reqlen
,
16811 LPFC_SLI4_MBX_NEMBED
);
16813 if (alloclen
< reqlen
) {
16814 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16815 "0285 Allocated DMA memory size (%d) is "
16816 "less than the requested DMA memory "
16817 "size (%d)\n", alloclen
, reqlen
);
16818 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
16821 /* Set up the SGL pages in the non-embedded DMA pages */
16822 viraddr
= mbox
->sge_array
->addr
[0];
16823 sgl
= (struct lpfc_mbx_post_uembed_sgl_page1
*)viraddr
;
16824 sgl_pg_pairs
= &sgl
->sgl_pg_pairs
;
16827 list_for_each_entry_safe(sglq_entry
, sglq_next
, post_sgl_list
, list
) {
16828 /* Set up the sge entry */
16829 sgl_pg_pairs
->sgl_pg0_addr_lo
=
16830 cpu_to_le32(putPaddrLow(sglq_entry
->phys
));
16831 sgl_pg_pairs
->sgl_pg0_addr_hi
=
16832 cpu_to_le32(putPaddrHigh(sglq_entry
->phys
));
16833 sgl_pg_pairs
->sgl_pg1_addr_lo
=
16834 cpu_to_le32(putPaddrLow(0));
16835 sgl_pg_pairs
->sgl_pg1_addr_hi
=
16836 cpu_to_le32(putPaddrHigh(0));
16838 /* Keep the first xritag on the list */
16840 xritag_start
= sglq_entry
->sli4_xritag
;
16845 /* Complete initialization and perform endian conversion. */
16846 bf_set(lpfc_post_sgl_pages_xri
, sgl
, xritag_start
);
16847 bf_set(lpfc_post_sgl_pages_xricnt
, sgl
, post_cnt
);
16848 sgl
->word0
= cpu_to_le32(sgl
->word0
);
16850 if (!phba
->sli4_hba
.intr_enable
)
16851 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
16853 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
16854 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
16856 shdr
= (union lpfc_sli4_cfg_shdr
*) &sgl
->cfg_shdr
;
16857 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16858 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16859 if (rc
!= MBX_TIMEOUT
)
16860 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
16861 if (shdr_status
|| shdr_add_status
|| rc
) {
16862 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
16863 "2513 POST_SGL_BLOCK mailbox command failed "
16864 "status x%x add_status x%x mbx status x%x\n",
16865 shdr_status
, shdr_add_status
, rc
);
16872 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
16873 * @phba: pointer to lpfc hba data structure.
16874 * @nblist: pointer to nvme buffer list.
16875 * @count: number of scsi buffers on the list.
16877 * This routine is invoked to post a block of @count scsi sgl pages from a
16878 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
16883 lpfc_sli4_post_io_sgl_block(struct lpfc_hba
*phba
, struct list_head
*nblist
,
16886 struct lpfc_io_buf
*lpfc_ncmd
;
16887 struct lpfc_mbx_post_uembed_sgl_page1
*sgl
;
16888 struct sgl_page_pairs
*sgl_pg_pairs
;
16890 LPFC_MBOXQ_t
*mbox
;
16891 uint32_t reqlen
, alloclen
, pg_pairs
;
16893 uint16_t xritag_start
= 0;
16895 uint32_t shdr_status
, shdr_add_status
;
16896 dma_addr_t pdma_phys_bpl1
;
16897 union lpfc_sli4_cfg_shdr
*shdr
;
16899 /* Calculate the requested length of the dma memory */
16900 reqlen
= count
* sizeof(struct sgl_page_pairs
) +
16901 sizeof(union lpfc_sli4_cfg_shdr
) + sizeof(uint32_t);
16902 if (reqlen
> SLI4_PAGE_SIZE
) {
16903 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
16904 "6118 Block sgl registration required DMA "
16905 "size (%d) great than a page\n", reqlen
);
16908 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16910 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16911 "6119 Failed to allocate mbox cmd memory\n");
16915 /* Allocate DMA memory and set up the non-embedded mailbox command */
16916 alloclen
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
16917 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
,
16918 reqlen
, LPFC_SLI4_MBX_NEMBED
);
16920 if (alloclen
< reqlen
) {
16921 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16922 "6120 Allocated DMA memory size (%d) is "
16923 "less than the requested DMA memory "
16924 "size (%d)\n", alloclen
, reqlen
);
16925 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
16929 /* Get the first SGE entry from the non-embedded DMA memory */
16930 viraddr
= mbox
->sge_array
->addr
[0];
16932 /* Set up the SGL pages in the non-embedded DMA pages */
16933 sgl
= (struct lpfc_mbx_post_uembed_sgl_page1
*)viraddr
;
16934 sgl_pg_pairs
= &sgl
->sgl_pg_pairs
;
16937 list_for_each_entry(lpfc_ncmd
, nblist
, list
) {
16938 /* Set up the sge entry */
16939 sgl_pg_pairs
->sgl_pg0_addr_lo
=
16940 cpu_to_le32(putPaddrLow(lpfc_ncmd
->dma_phys_sgl
));
16941 sgl_pg_pairs
->sgl_pg0_addr_hi
=
16942 cpu_to_le32(putPaddrHigh(lpfc_ncmd
->dma_phys_sgl
));
16943 if (phba
->cfg_sg_dma_buf_size
> SGL_PAGE_SIZE
)
16944 pdma_phys_bpl1
= lpfc_ncmd
->dma_phys_sgl
+
16947 pdma_phys_bpl1
= 0;
16948 sgl_pg_pairs
->sgl_pg1_addr_lo
=
16949 cpu_to_le32(putPaddrLow(pdma_phys_bpl1
));
16950 sgl_pg_pairs
->sgl_pg1_addr_hi
=
16951 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1
));
16952 /* Keep the first xritag on the list */
16954 xritag_start
= lpfc_ncmd
->cur_iocbq
.sli4_xritag
;
16958 bf_set(lpfc_post_sgl_pages_xri
, sgl
, xritag_start
);
16959 bf_set(lpfc_post_sgl_pages_xricnt
, sgl
, pg_pairs
);
16960 /* Perform endian conversion if necessary */
16961 sgl
->word0
= cpu_to_le32(sgl
->word0
);
16963 if (!phba
->sli4_hba
.intr_enable
) {
16964 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
16966 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
16967 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
16969 shdr
= (union lpfc_sli4_cfg_shdr
*)&sgl
->cfg_shdr
;
16970 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16971 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16972 if (rc
!= MBX_TIMEOUT
)
16973 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
16974 if (shdr_status
|| shdr_add_status
|| rc
) {
16975 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
16976 "6125 POST_SGL_BLOCK mailbox command failed "
16977 "status x%x add_status x%x mbx status x%x\n",
16978 shdr_status
, shdr_add_status
, rc
);
16985 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
16986 * @phba: pointer to lpfc hba data structure.
16987 * @post_nblist: pointer to the nvme buffer list.
16989 * This routine walks a list of nvme buffers that was passed in. It attempts
16990 * to construct blocks of nvme buffer sgls which contains contiguous xris and
16991 * uses the non-embedded SGL block post mailbox commands to post to the port.
16992 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
16993 * embedded SGL post mailbox command for posting. The @post_nblist passed in
16994 * must be local list, thus no lock is needed when manipulate the list.
16996 * Returns: 0 = failure, non-zero number of successfully posted buffers.
16999 lpfc_sli4_post_io_sgl_list(struct lpfc_hba
*phba
,
17000 struct list_head
*post_nblist
, int sb_count
)
17002 struct lpfc_io_buf
*lpfc_ncmd
, *lpfc_ncmd_next
;
17003 int status
, sgl_size
;
17004 int post_cnt
= 0, block_cnt
= 0, num_posting
= 0, num_posted
= 0;
17005 dma_addr_t pdma_phys_sgl1
;
17006 int last_xritag
= NO_XRI
;
17008 LIST_HEAD(prep_nblist
);
17009 LIST_HEAD(blck_nblist
);
17010 LIST_HEAD(nvme_nblist
);
17016 sgl_size
= phba
->cfg_sg_dma_buf_size
;
17017 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
, post_nblist
, list
) {
17018 list_del_init(&lpfc_ncmd
->list
);
17020 if ((last_xritag
!= NO_XRI
) &&
17021 (lpfc_ncmd
->cur_iocbq
.sli4_xritag
!= last_xritag
+ 1)) {
17022 /* a hole in xri block, form a sgl posting block */
17023 list_splice_init(&prep_nblist
, &blck_nblist
);
17024 post_cnt
= block_cnt
- 1;
17025 /* prepare list for next posting block */
17026 list_add_tail(&lpfc_ncmd
->list
, &prep_nblist
);
17029 /* prepare list for next posting block */
17030 list_add_tail(&lpfc_ncmd
->list
, &prep_nblist
);
17031 /* enough sgls for non-embed sgl mbox command */
17032 if (block_cnt
== LPFC_NEMBED_MBOX_SGL_CNT
) {
17033 list_splice_init(&prep_nblist
, &blck_nblist
);
17034 post_cnt
= block_cnt
;
17039 last_xritag
= lpfc_ncmd
->cur_iocbq
.sli4_xritag
;
17041 /* end of repost sgl list condition for NVME buffers */
17042 if (num_posting
== sb_count
) {
17043 if (post_cnt
== 0) {
17044 /* last sgl posting block */
17045 list_splice_init(&prep_nblist
, &blck_nblist
);
17046 post_cnt
= block_cnt
;
17047 } else if (block_cnt
== 1) {
17048 /* last single sgl with non-contiguous xri */
17049 if (sgl_size
> SGL_PAGE_SIZE
)
17051 lpfc_ncmd
->dma_phys_sgl
+
17054 pdma_phys_sgl1
= 0;
17055 cur_xritag
= lpfc_ncmd
->cur_iocbq
.sli4_xritag
;
17056 status
= lpfc_sli4_post_sgl(
17057 phba
, lpfc_ncmd
->dma_phys_sgl
,
17058 pdma_phys_sgl1
, cur_xritag
);
17060 /* Post error. Buffer unavailable. */
17061 lpfc_ncmd
->flags
|=
17062 LPFC_SBUF_NOT_POSTED
;
17064 /* Post success. Bffer available. */
17065 lpfc_ncmd
->flags
&=
17066 ~LPFC_SBUF_NOT_POSTED
;
17067 lpfc_ncmd
->status
= IOSTAT_SUCCESS
;
17070 /* success, put on NVME buffer sgl list */
17071 list_add_tail(&lpfc_ncmd
->list
, &nvme_nblist
);
17075 /* continue until a nembed page worth of sgls */
17079 /* post block of NVME buffer list sgls */
17080 status
= lpfc_sli4_post_io_sgl_block(phba
, &blck_nblist
,
17083 /* don't reset xirtag due to hole in xri block */
17084 if (block_cnt
== 0)
17085 last_xritag
= NO_XRI
;
17087 /* reset NVME buffer post count for next round of posting */
17090 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
17091 while (!list_empty(&blck_nblist
)) {
17092 list_remove_head(&blck_nblist
, lpfc_ncmd
,
17093 struct lpfc_io_buf
, list
);
17095 /* Post error. Mark buffer unavailable. */
17096 lpfc_ncmd
->flags
|= LPFC_SBUF_NOT_POSTED
;
17098 /* Post success, Mark buffer available. */
17099 lpfc_ncmd
->flags
&= ~LPFC_SBUF_NOT_POSTED
;
17100 lpfc_ncmd
->status
= IOSTAT_SUCCESS
;
17103 list_add_tail(&lpfc_ncmd
->list
, &nvme_nblist
);
17106 /* Push NVME buffers with sgl posted to the available list */
17107 lpfc_io_buf_replenish(phba
, &nvme_nblist
);
17113 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
17114 * @phba: pointer to lpfc_hba struct that the frame was received on
17115 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17117 * This function checks the fields in the @fc_hdr to see if the FC frame is a
17118 * valid type of frame that the LPFC driver will handle. This function will
17119 * return a zero if the frame is a valid frame or a non zero value when the
17120 * frame does not pass the check.
17123 lpfc_fc_frame_check(struct lpfc_hba
*phba
, struct fc_frame_header
*fc_hdr
)
17125 /* make rctl_names static to save stack space */
17126 struct fc_vft_header
*fc_vft_hdr
;
17127 uint32_t *header
= (uint32_t *) fc_hdr
;
17129 #define FC_RCTL_MDS_DIAGS 0xF4
17131 switch (fc_hdr
->fh_r_ctl
) {
17132 case FC_RCTL_DD_UNCAT
: /* uncategorized information */
17133 case FC_RCTL_DD_SOL_DATA
: /* solicited data */
17134 case FC_RCTL_DD_UNSOL_CTL
: /* unsolicited control */
17135 case FC_RCTL_DD_SOL_CTL
: /* solicited control or reply */
17136 case FC_RCTL_DD_UNSOL_DATA
: /* unsolicited data */
17137 case FC_RCTL_DD_DATA_DESC
: /* data descriptor */
17138 case FC_RCTL_DD_UNSOL_CMD
: /* unsolicited command */
17139 case FC_RCTL_DD_CMD_STATUS
: /* command status */
17140 case FC_RCTL_ELS_REQ
: /* extended link services request */
17141 case FC_RCTL_ELS_REP
: /* extended link services reply */
17142 case FC_RCTL_ELS4_REQ
: /* FC-4 ELS request */
17143 case FC_RCTL_ELS4_REP
: /* FC-4 ELS reply */
17144 case FC_RCTL_BA_NOP
: /* basic link service NOP */
17145 case FC_RCTL_BA_ABTS
: /* basic link service abort */
17146 case FC_RCTL_BA_RMC
: /* remove connection */
17147 case FC_RCTL_BA_ACC
: /* basic accept */
17148 case FC_RCTL_BA_RJT
: /* basic reject */
17149 case FC_RCTL_BA_PRMT
:
17150 case FC_RCTL_ACK_1
: /* acknowledge_1 */
17151 case FC_RCTL_ACK_0
: /* acknowledge_0 */
17152 case FC_RCTL_P_RJT
: /* port reject */
17153 case FC_RCTL_F_RJT
: /* fabric reject */
17154 case FC_RCTL_P_BSY
: /* port busy */
17155 case FC_RCTL_F_BSY
: /* fabric busy to data frame */
17156 case FC_RCTL_F_BSYL
: /* fabric busy to link control frame */
17157 case FC_RCTL_LCR
: /* link credit reset */
17158 case FC_RCTL_MDS_DIAGS
: /* MDS Diagnostics */
17159 case FC_RCTL_END
: /* end */
17161 case FC_RCTL_VFTH
: /* Virtual Fabric tagging Header */
17162 fc_vft_hdr
= (struct fc_vft_header
*)fc_hdr
;
17163 fc_hdr
= &((struct fc_frame_header
*)fc_vft_hdr
)[1];
17164 return lpfc_fc_frame_check(phba
, fc_hdr
);
17169 switch (fc_hdr
->fh_type
) {
17182 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
17183 "2538 Received frame rctl:x%x, type:x%x, "
17184 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
17185 fc_hdr
->fh_r_ctl
, fc_hdr
->fh_type
,
17186 be32_to_cpu(header
[0]), be32_to_cpu(header
[1]),
17187 be32_to_cpu(header
[2]), be32_to_cpu(header
[3]),
17188 be32_to_cpu(header
[4]), be32_to_cpu(header
[5]),
17189 be32_to_cpu(header
[6]));
17192 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
,
17193 "2539 Dropped frame rctl:x%x type:x%x\n",
17194 fc_hdr
->fh_r_ctl
, fc_hdr
->fh_type
);
17199 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17200 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17202 * This function processes the FC header to retrieve the VFI from the VF
17203 * header, if one exists. This function will return the VFI if one exists
17204 * or 0 if no VSAN Header exists.
17207 lpfc_fc_hdr_get_vfi(struct fc_frame_header
*fc_hdr
)
17209 struct fc_vft_header
*fc_vft_hdr
= (struct fc_vft_header
*)fc_hdr
;
17211 if (fc_hdr
->fh_r_ctl
!= FC_RCTL_VFTH
)
17213 return bf_get(fc_vft_hdr_vf_id
, fc_vft_hdr
);
17217 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17218 * @phba: Pointer to the HBA structure to search for the vport on
17219 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17220 * @fcfi: The FC Fabric ID that the frame came from
17222 * This function searches the @phba for a vport that matches the content of the
17223 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17224 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17225 * returns the matching vport pointer or NULL if unable to match frame to a
17228 static struct lpfc_vport
*
17229 lpfc_fc_frame_to_vport(struct lpfc_hba
*phba
, struct fc_frame_header
*fc_hdr
,
17230 uint16_t fcfi
, uint32_t did
)
17232 struct lpfc_vport
**vports
;
17233 struct lpfc_vport
*vport
= NULL
;
17236 if (did
== Fabric_DID
)
17237 return phba
->pport
;
17238 if ((phba
->pport
->fc_flag
& FC_PT2PT
) &&
17239 !(phba
->link_state
== LPFC_HBA_READY
))
17240 return phba
->pport
;
17242 vports
= lpfc_create_vport_work_array(phba
);
17243 if (vports
!= NULL
) {
17244 for (i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
17245 if (phba
->fcf
.fcfi
== fcfi
&&
17246 vports
[i
]->vfi
== lpfc_fc_hdr_get_vfi(fc_hdr
) &&
17247 vports
[i
]->fc_myDID
== did
) {
17253 lpfc_destroy_vport_work_array(phba
, vports
);
17258 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17259 * @vport: The vport to work on.
17261 * This function updates the receive sequence time stamp for this vport. The
17262 * receive sequence time stamp indicates the time that the last frame of the
17263 * the sequence that has been idle for the longest amount of time was received.
17264 * the driver uses this time stamp to indicate if any received sequences have
17268 lpfc_update_rcv_time_stamp(struct lpfc_vport
*vport
)
17270 struct lpfc_dmabuf
*h_buf
;
17271 struct hbq_dmabuf
*dmabuf
= NULL
;
17273 /* get the oldest sequence on the rcv list */
17274 h_buf
= list_get_first(&vport
->rcv_buffer_list
,
17275 struct lpfc_dmabuf
, list
);
17278 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
17279 vport
->rcv_buffer_time_stamp
= dmabuf
->time_stamp
;
17283 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17284 * @vport: The vport that the received sequences were sent to.
17286 * This function cleans up all outstanding received sequences. This is called
17287 * by the driver when a link event or user action invalidates all the received
17291 lpfc_cleanup_rcv_buffers(struct lpfc_vport
*vport
)
17293 struct lpfc_dmabuf
*h_buf
, *hnext
;
17294 struct lpfc_dmabuf
*d_buf
, *dnext
;
17295 struct hbq_dmabuf
*dmabuf
= NULL
;
17297 /* start with the oldest sequence on the rcv list */
17298 list_for_each_entry_safe(h_buf
, hnext
, &vport
->rcv_buffer_list
, list
) {
17299 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
17300 list_del_init(&dmabuf
->hbuf
.list
);
17301 list_for_each_entry_safe(d_buf
, dnext
,
17302 &dmabuf
->dbuf
.list
, list
) {
17303 list_del_init(&d_buf
->list
);
17304 lpfc_in_buf_free(vport
->phba
, d_buf
);
17306 lpfc_in_buf_free(vport
->phba
, &dmabuf
->dbuf
);
17311 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17312 * @vport: The vport that the received sequences were sent to.
17314 * This function determines whether any received sequences have timed out by
17315 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17316 * indicates that there is at least one timed out sequence this routine will
17317 * go through the received sequences one at a time from most inactive to most
17318 * active to determine which ones need to be cleaned up. Once it has determined
17319 * that a sequence needs to be cleaned up it will simply free up the resources
17320 * without sending an abort.
17323 lpfc_rcv_seq_check_edtov(struct lpfc_vport
*vport
)
17325 struct lpfc_dmabuf
*h_buf
, *hnext
;
17326 struct lpfc_dmabuf
*d_buf
, *dnext
;
17327 struct hbq_dmabuf
*dmabuf
= NULL
;
17328 unsigned long timeout
;
17329 int abort_count
= 0;
17331 timeout
= (msecs_to_jiffies(vport
->phba
->fc_edtov
) +
17332 vport
->rcv_buffer_time_stamp
);
17333 if (list_empty(&vport
->rcv_buffer_list
) ||
17334 time_before(jiffies
, timeout
))
17336 /* start with the oldest sequence on the rcv list */
17337 list_for_each_entry_safe(h_buf
, hnext
, &vport
->rcv_buffer_list
, list
) {
17338 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
17339 timeout
= (msecs_to_jiffies(vport
->phba
->fc_edtov
) +
17340 dmabuf
->time_stamp
);
17341 if (time_before(jiffies
, timeout
))
17344 list_del_init(&dmabuf
->hbuf
.list
);
17345 list_for_each_entry_safe(d_buf
, dnext
,
17346 &dmabuf
->dbuf
.list
, list
) {
17347 list_del_init(&d_buf
->list
);
17348 lpfc_in_buf_free(vport
->phba
, d_buf
);
17350 lpfc_in_buf_free(vport
->phba
, &dmabuf
->dbuf
);
17353 lpfc_update_rcv_time_stamp(vport
);
17357 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17358 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17360 * This function searches through the existing incomplete sequences that have
17361 * been sent to this @vport. If the frame matches one of the incomplete
17362 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17363 * make up that sequence. If no sequence is found that matches this frame then
17364 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17365 * This function returns a pointer to the first dmabuf in the sequence list that
17366 * the frame was linked to.
17368 static struct hbq_dmabuf
*
17369 lpfc_fc_frame_add(struct lpfc_vport
*vport
, struct hbq_dmabuf
*dmabuf
)
17371 struct fc_frame_header
*new_hdr
;
17372 struct fc_frame_header
*temp_hdr
;
17373 struct lpfc_dmabuf
*d_buf
;
17374 struct lpfc_dmabuf
*h_buf
;
17375 struct hbq_dmabuf
*seq_dmabuf
= NULL
;
17376 struct hbq_dmabuf
*temp_dmabuf
= NULL
;
17379 INIT_LIST_HEAD(&dmabuf
->dbuf
.list
);
17380 dmabuf
->time_stamp
= jiffies
;
17381 new_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
17383 /* Use the hdr_buf to find the sequence that this frame belongs to */
17384 list_for_each_entry(h_buf
, &vport
->rcv_buffer_list
, list
) {
17385 temp_hdr
= (struct fc_frame_header
*)h_buf
->virt
;
17386 if ((temp_hdr
->fh_seq_id
!= new_hdr
->fh_seq_id
) ||
17387 (temp_hdr
->fh_ox_id
!= new_hdr
->fh_ox_id
) ||
17388 (memcmp(&temp_hdr
->fh_s_id
, &new_hdr
->fh_s_id
, 3)))
17390 /* found a pending sequence that matches this frame */
17391 seq_dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
17396 * This indicates first frame received for this sequence.
17397 * Queue the buffer on the vport's rcv_buffer_list.
17399 list_add_tail(&dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
17400 lpfc_update_rcv_time_stamp(vport
);
17403 temp_hdr
= seq_dmabuf
->hbuf
.virt
;
17404 if (be16_to_cpu(new_hdr
->fh_seq_cnt
) <
17405 be16_to_cpu(temp_hdr
->fh_seq_cnt
)) {
17406 list_del_init(&seq_dmabuf
->hbuf
.list
);
17407 list_add_tail(&dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
17408 list_add_tail(&dmabuf
->dbuf
.list
, &seq_dmabuf
->dbuf
.list
);
17409 lpfc_update_rcv_time_stamp(vport
);
17412 /* move this sequence to the tail to indicate a young sequence */
17413 list_move_tail(&seq_dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
17414 seq_dmabuf
->time_stamp
= jiffies
;
17415 lpfc_update_rcv_time_stamp(vport
);
17416 if (list_empty(&seq_dmabuf
->dbuf
.list
)) {
17417 temp_hdr
= dmabuf
->hbuf
.virt
;
17418 list_add_tail(&dmabuf
->dbuf
.list
, &seq_dmabuf
->dbuf
.list
);
17421 /* find the correct place in the sequence to insert this frame */
17422 d_buf
= list_entry(seq_dmabuf
->dbuf
.list
.prev
, typeof(*d_buf
), list
);
17424 temp_dmabuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
17425 temp_hdr
= (struct fc_frame_header
*)temp_dmabuf
->hbuf
.virt
;
17427 * If the frame's sequence count is greater than the frame on
17428 * the list then insert the frame right after this frame
17430 if (be16_to_cpu(new_hdr
->fh_seq_cnt
) >
17431 be16_to_cpu(temp_hdr
->fh_seq_cnt
)) {
17432 list_add(&dmabuf
->dbuf
.list
, &temp_dmabuf
->dbuf
.list
);
17437 if (&d_buf
->list
== &seq_dmabuf
->dbuf
.list
)
17439 d_buf
= list_entry(d_buf
->list
.prev
, typeof(*d_buf
), list
);
17448 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17449 * @vport: pointer to a vitural port
17450 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17452 * This function tries to abort from the partially assembed sequence, described
17453 * by the information from basic abbort @dmabuf. It checks to see whether such
17454 * partially assembled sequence held by the driver. If so, it shall free up all
17455 * the frames from the partially assembled sequence.
17458 * true -- if there is matching partially assembled sequence present and all
17459 * the frames freed with the sequence;
17460 * false -- if there is no matching partially assembled sequence present so
17461 * nothing got aborted in the lower layer driver
17464 lpfc_sli4_abort_partial_seq(struct lpfc_vport
*vport
,
17465 struct hbq_dmabuf
*dmabuf
)
17467 struct fc_frame_header
*new_hdr
;
17468 struct fc_frame_header
*temp_hdr
;
17469 struct lpfc_dmabuf
*d_buf
, *n_buf
, *h_buf
;
17470 struct hbq_dmabuf
*seq_dmabuf
= NULL
;
17472 /* Use the hdr_buf to find the sequence that matches this frame */
17473 INIT_LIST_HEAD(&dmabuf
->dbuf
.list
);
17474 INIT_LIST_HEAD(&dmabuf
->hbuf
.list
);
17475 new_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
17476 list_for_each_entry(h_buf
, &vport
->rcv_buffer_list
, list
) {
17477 temp_hdr
= (struct fc_frame_header
*)h_buf
->virt
;
17478 if ((temp_hdr
->fh_seq_id
!= new_hdr
->fh_seq_id
) ||
17479 (temp_hdr
->fh_ox_id
!= new_hdr
->fh_ox_id
) ||
17480 (memcmp(&temp_hdr
->fh_s_id
, &new_hdr
->fh_s_id
, 3)))
17482 /* found a pending sequence that matches this frame */
17483 seq_dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
17487 /* Free up all the frames from the partially assembled sequence */
17489 list_for_each_entry_safe(d_buf
, n_buf
,
17490 &seq_dmabuf
->dbuf
.list
, list
) {
17491 list_del_init(&d_buf
->list
);
17492 lpfc_in_buf_free(vport
->phba
, d_buf
);
17500 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17501 * @vport: pointer to a vitural port
17502 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17504 * This function tries to abort from the assembed sequence from upper level
17505 * protocol, described by the information from basic abbort @dmabuf. It
17506 * checks to see whether such pending context exists at upper level protocol.
17507 * If so, it shall clean up the pending context.
17510 * true -- if there is matching pending context of the sequence cleaned
17512 * false -- if there is no matching pending context of the sequence present
17516 lpfc_sli4_abort_ulp_seq(struct lpfc_vport
*vport
, struct hbq_dmabuf
*dmabuf
)
17518 struct lpfc_hba
*phba
= vport
->phba
;
17521 /* Accepting abort at ulp with SLI4 only */
17522 if (phba
->sli_rev
< LPFC_SLI_REV4
)
17525 /* Register all caring upper level protocols to attend abort */
17526 handled
= lpfc_ct_handle_unsol_abort(phba
, dmabuf
);
17534 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
17535 * @phba: Pointer to HBA context object.
17536 * @cmd_iocbq: pointer to the command iocbq structure.
17537 * @rsp_iocbq: pointer to the response iocbq structure.
17539 * This function handles the sequence abort response iocb command complete
17540 * event. It properly releases the memory allocated to the sequence abort
17544 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba
*phba
,
17545 struct lpfc_iocbq
*cmd_iocbq
,
17546 struct lpfc_iocbq
*rsp_iocbq
)
17548 struct lpfc_nodelist
*ndlp
;
17551 ndlp
= (struct lpfc_nodelist
*)cmd_iocbq
->context1
;
17552 lpfc_nlp_put(ndlp
);
17553 lpfc_nlp_not_used(ndlp
);
17554 lpfc_sli_release_iocbq(phba
, cmd_iocbq
);
17557 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
17558 if (rsp_iocbq
&& rsp_iocbq
->iocb
.ulpStatus
)
17559 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
17560 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
17561 rsp_iocbq
->iocb
.ulpStatus
,
17562 rsp_iocbq
->iocb
.un
.ulpWord
[4]);
17566 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
17567 * @phba: Pointer to HBA context object.
17568 * @xri: xri id in transaction.
17570 * This function validates the xri maps to the known range of XRIs allocated an
17571 * used by the driver.
17574 lpfc_sli4_xri_inrange(struct lpfc_hba
*phba
,
17579 for (i
= 0; i
< phba
->sli4_hba
.max_cfg_param
.max_xri
; i
++) {
17580 if (xri
== phba
->sli4_hba
.xri_ids
[i
])
17587 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
17588 * @phba: Pointer to HBA context object.
17589 * @fc_hdr: pointer to a FC frame header.
17591 * This function sends a basic response to a previous unsol sequence abort
17592 * event after aborting the sequence handling.
17595 lpfc_sli4_seq_abort_rsp(struct lpfc_vport
*vport
,
17596 struct fc_frame_header
*fc_hdr
, bool aborted
)
17598 struct lpfc_hba
*phba
= vport
->phba
;
17599 struct lpfc_iocbq
*ctiocb
= NULL
;
17600 struct lpfc_nodelist
*ndlp
;
17601 uint16_t oxid
, rxid
, xri
, lxri
;
17602 uint32_t sid
, fctl
;
17606 if (!lpfc_is_link_up(phba
))
17609 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
17610 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
17611 rxid
= be16_to_cpu(fc_hdr
->fh_rx_id
);
17613 ndlp
= lpfc_findnode_did(vport
, sid
);
17615 ndlp
= lpfc_nlp_init(vport
, sid
);
17617 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_ELS
,
17618 "1268 Failed to allocate ndlp for "
17619 "oxid:x%x SID:x%x\n", oxid
, sid
);
17622 /* Put ndlp onto pport node list */
17623 lpfc_enqueue_node(vport
, ndlp
);
17624 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
17625 /* re-setup ndlp without removing from node list */
17626 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
17628 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_ELS
,
17629 "3275 Failed to active ndlp found "
17630 "for oxid:x%x SID:x%x\n", oxid
, sid
);
17635 /* Allocate buffer for rsp iocb */
17636 ctiocb
= lpfc_sli_get_iocbq(phba
);
17640 /* Extract the F_CTL field from FC_HDR */
17641 fctl
= sli4_fctl_from_fc_hdr(fc_hdr
);
17643 icmd
= &ctiocb
->iocb
;
17644 icmd
->un
.xseq64
.bdl
.bdeSize
= 0;
17645 icmd
->un
.xseq64
.bdl
.ulpIoTag32
= 0;
17646 icmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
17647 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_BA_ACC
;
17648 icmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_BLS
;
17650 /* Fill in the rest of iocb fields */
17651 icmd
->ulpCommand
= CMD_XMIT_BLS_RSP64_CX
;
17652 icmd
->ulpBdeCount
= 0;
17654 icmd
->ulpClass
= CLASS3
;
17655 icmd
->ulpContext
= phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
17656 ctiocb
->context1
= lpfc_nlp_get(ndlp
);
17658 ctiocb
->vport
= phba
->pport
;
17659 ctiocb
->iocb_cmpl
= lpfc_sli4_seq_abort_rsp_cmpl
;
17660 ctiocb
->sli4_lxritag
= NO_XRI
;
17661 ctiocb
->sli4_xritag
= NO_XRI
;
17663 if (fctl
& FC_FC_EX_CTX
)
17664 /* Exchange responder sent the abort so we
17670 lxri
= lpfc_sli4_xri_inrange(phba
, xri
);
17671 if (lxri
!= NO_XRI
)
17672 lpfc_set_rrq_active(phba
, ndlp
, lxri
,
17673 (xri
== oxid
) ? rxid
: oxid
, 0);
17674 /* For BA_ABTS from exchange responder, if the logical xri with
17675 * the oxid maps to the FCP XRI range, the port no longer has
17676 * that exchange context, send a BLS_RJT. Override the IOCB for
17679 if ((fctl
& FC_FC_EX_CTX
) &&
17680 (lxri
> lpfc_sli4_get_iocb_cnt(phba
))) {
17681 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_BA_RJT
;
17682 bf_set(lpfc_vndr_code
, &icmd
->un
.bls_rsp
, 0);
17683 bf_set(lpfc_rsn_expln
, &icmd
->un
.bls_rsp
, FC_BA_RJT_INV_XID
);
17684 bf_set(lpfc_rsn_code
, &icmd
->un
.bls_rsp
, FC_BA_RJT_UNABLE
);
17687 /* If BA_ABTS failed to abort a partially assembled receive sequence,
17688 * the driver no longer has that exchange, send a BLS_RJT. Override
17689 * the IOCB for a BA_RJT.
17691 if (aborted
== false) {
17692 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_BA_RJT
;
17693 bf_set(lpfc_vndr_code
, &icmd
->un
.bls_rsp
, 0);
17694 bf_set(lpfc_rsn_expln
, &icmd
->un
.bls_rsp
, FC_BA_RJT_INV_XID
);
17695 bf_set(lpfc_rsn_code
, &icmd
->un
.bls_rsp
, FC_BA_RJT_UNABLE
);
17698 if (fctl
& FC_FC_EX_CTX
) {
17699 /* ABTS sent by responder to CT exchange, construction
17700 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
17701 * field and RX_ID from ABTS for RX_ID field.
17703 bf_set(lpfc_abts_orig
, &icmd
->un
.bls_rsp
, LPFC_ABTS_UNSOL_RSP
);
17705 /* ABTS sent by initiator to CT exchange, construction
17706 * of BA_ACC will need to allocate a new XRI as for the
17709 bf_set(lpfc_abts_orig
, &icmd
->un
.bls_rsp
, LPFC_ABTS_UNSOL_INT
);
17711 bf_set(lpfc_abts_rxid
, &icmd
->un
.bls_rsp
, rxid
);
17712 bf_set(lpfc_abts_oxid
, &icmd
->un
.bls_rsp
, oxid
);
17714 /* Xmit CT abts response on exchange <xid> */
17715 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
17716 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
17717 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
, oxid
, phba
->link_state
);
17719 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, ctiocb
, 0);
17720 if (rc
== IOCB_ERROR
) {
17721 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
17722 "2925 Failed to issue CT ABTS RSP x%x on "
17723 "xri x%x, Data x%x\n",
17724 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
, oxid
,
17726 lpfc_nlp_put(ndlp
);
17727 ctiocb
->context1
= NULL
;
17728 lpfc_sli_release_iocbq(phba
, ctiocb
);
17733 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
17734 * @vport: Pointer to the vport on which this sequence was received
17735 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17737 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
17738 * receive sequence is only partially assembed by the driver, it shall abort
17739 * the partially assembled frames for the sequence. Otherwise, if the
17740 * unsolicited receive sequence has been completely assembled and passed to
17741 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
17742 * unsolicited sequence has been aborted. After that, it will issue a basic
17743 * accept to accept the abort.
17746 lpfc_sli4_handle_unsol_abort(struct lpfc_vport
*vport
,
17747 struct hbq_dmabuf
*dmabuf
)
17749 struct lpfc_hba
*phba
= vport
->phba
;
17750 struct fc_frame_header fc_hdr
;
17754 /* Make a copy of fc_hdr before the dmabuf being released */
17755 memcpy(&fc_hdr
, dmabuf
->hbuf
.virt
, sizeof(struct fc_frame_header
));
17756 fctl
= sli4_fctl_from_fc_hdr(&fc_hdr
);
17758 if (fctl
& FC_FC_EX_CTX
) {
17759 /* ABTS by responder to exchange, no cleanup needed */
17762 /* ABTS by initiator to exchange, need to do cleanup */
17763 aborted
= lpfc_sli4_abort_partial_seq(vport
, dmabuf
);
17764 if (aborted
== false)
17765 aborted
= lpfc_sli4_abort_ulp_seq(vport
, dmabuf
);
17767 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
17769 if (phba
->nvmet_support
) {
17770 lpfc_nvmet_rcv_unsol_abort(vport
, &fc_hdr
);
17774 /* Respond with BA_ACC or BA_RJT accordingly */
17775 lpfc_sli4_seq_abort_rsp(vport
, &fc_hdr
, aborted
);
17779 * lpfc_seq_complete - Indicates if a sequence is complete
17780 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17782 * This function checks the sequence, starting with the frame described by
17783 * @dmabuf, to see if all the frames associated with this sequence are present.
17784 * the frames associated with this sequence are linked to the @dmabuf using the
17785 * dbuf list. This function looks for two major things. 1) That the first frame
17786 * has a sequence count of zero. 2) There is a frame with last frame of sequence
17787 * set. 3) That there are no holes in the sequence count. The function will
17788 * return 1 when the sequence is complete, otherwise it will return 0.
17791 lpfc_seq_complete(struct hbq_dmabuf
*dmabuf
)
17793 struct fc_frame_header
*hdr
;
17794 struct lpfc_dmabuf
*d_buf
;
17795 struct hbq_dmabuf
*seq_dmabuf
;
17799 hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
17800 /* make sure first fame of sequence has a sequence count of zero */
17801 if (hdr
->fh_seq_cnt
!= seq_count
)
17803 fctl
= (hdr
->fh_f_ctl
[0] << 16 |
17804 hdr
->fh_f_ctl
[1] << 8 |
17806 /* If last frame of sequence we can return success. */
17807 if (fctl
& FC_FC_END_SEQ
)
17809 list_for_each_entry(d_buf
, &dmabuf
->dbuf
.list
, list
) {
17810 seq_dmabuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
17811 hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
17812 /* If there is a hole in the sequence count then fail. */
17813 if (++seq_count
!= be16_to_cpu(hdr
->fh_seq_cnt
))
17815 fctl
= (hdr
->fh_f_ctl
[0] << 16 |
17816 hdr
->fh_f_ctl
[1] << 8 |
17818 /* If last frame of sequence we can return success. */
17819 if (fctl
& FC_FC_END_SEQ
)
17826 * lpfc_prep_seq - Prep sequence for ULP processing
17827 * @vport: Pointer to the vport on which this sequence was received
17828 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17830 * This function takes a sequence, described by a list of frames, and creates
17831 * a list of iocbq structures to describe the sequence. This iocbq list will be
17832 * used to issue to the generic unsolicited sequence handler. This routine
17833 * returns a pointer to the first iocbq in the list. If the function is unable
17834 * to allocate an iocbq then it throw out the received frames that were not
17835 * able to be described and return a pointer to the first iocbq. If unable to
17836 * allocate any iocbqs (including the first) this function will return NULL.
17838 static struct lpfc_iocbq
*
17839 lpfc_prep_seq(struct lpfc_vport
*vport
, struct hbq_dmabuf
*seq_dmabuf
)
17841 struct hbq_dmabuf
*hbq_buf
;
17842 struct lpfc_dmabuf
*d_buf
, *n_buf
;
17843 struct lpfc_iocbq
*first_iocbq
, *iocbq
;
17844 struct fc_frame_header
*fc_hdr
;
17846 uint32_t len
, tot_len
;
17847 struct ulp_bde64
*pbde
;
17849 fc_hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
17850 /* remove from receive buffer list */
17851 list_del_init(&seq_dmabuf
->hbuf
.list
);
17852 lpfc_update_rcv_time_stamp(vport
);
17853 /* get the Remote Port's SID */
17854 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
17856 /* Get an iocbq struct to fill in. */
17857 first_iocbq
= lpfc_sli_get_iocbq(vport
->phba
);
17859 /* Initialize the first IOCB. */
17860 first_iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
= 0;
17861 first_iocbq
->iocb
.ulpStatus
= IOSTAT_SUCCESS
;
17862 first_iocbq
->vport
= vport
;
17864 /* Check FC Header to see what TYPE of frame we are rcv'ing */
17865 if (sli4_type_from_fc_hdr(fc_hdr
) == FC_TYPE_ELS
) {
17866 first_iocbq
->iocb
.ulpCommand
= CMD_IOCB_RCV_ELS64_CX
;
17867 first_iocbq
->iocb
.un
.rcvels
.parmRo
=
17868 sli4_did_from_fc_hdr(fc_hdr
);
17869 first_iocbq
->iocb
.ulpPU
= PARM_NPIV_DID
;
17871 first_iocbq
->iocb
.ulpCommand
= CMD_IOCB_RCV_SEQ64_CX
;
17872 first_iocbq
->iocb
.ulpContext
= NO_XRI
;
17873 first_iocbq
->iocb
.unsli3
.rcvsli3
.ox_id
=
17874 be16_to_cpu(fc_hdr
->fh_ox_id
);
17875 /* iocbq is prepped for internal consumption. Physical vpi. */
17876 first_iocbq
->iocb
.unsli3
.rcvsli3
.vpi
=
17877 vport
->phba
->vpi_ids
[vport
->vpi
];
17878 /* put the first buffer into the first IOCBq */
17879 tot_len
= bf_get(lpfc_rcqe_length
,
17880 &seq_dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
17882 first_iocbq
->context2
= &seq_dmabuf
->dbuf
;
17883 first_iocbq
->context3
= NULL
;
17884 first_iocbq
->iocb
.ulpBdeCount
= 1;
17885 if (tot_len
> LPFC_DATA_BUF_SIZE
)
17886 first_iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
=
17887 LPFC_DATA_BUF_SIZE
;
17889 first_iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
= tot_len
;
17891 first_iocbq
->iocb
.un
.rcvels
.remoteID
= sid
;
17893 first_iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
= tot_len
;
17895 iocbq
= first_iocbq
;
17897 * Each IOCBq can have two Buffers assigned, so go through the list
17898 * of buffers for this sequence and save two buffers in each IOCBq
17900 list_for_each_entry_safe(d_buf
, n_buf
, &seq_dmabuf
->dbuf
.list
, list
) {
17902 lpfc_in_buf_free(vport
->phba
, d_buf
);
17905 if (!iocbq
->context3
) {
17906 iocbq
->context3
= d_buf
;
17907 iocbq
->iocb
.ulpBdeCount
++;
17908 /* We need to get the size out of the right CQE */
17909 hbq_buf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
17910 len
= bf_get(lpfc_rcqe_length
,
17911 &hbq_buf
->cq_event
.cqe
.rcqe_cmpl
);
17912 pbde
= (struct ulp_bde64
*)
17913 &iocbq
->iocb
.unsli3
.sli3Words
[4];
17914 if (len
> LPFC_DATA_BUF_SIZE
)
17915 pbde
->tus
.f
.bdeSize
= LPFC_DATA_BUF_SIZE
;
17917 pbde
->tus
.f
.bdeSize
= len
;
17919 iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
+= len
;
17922 iocbq
= lpfc_sli_get_iocbq(vport
->phba
);
17925 first_iocbq
->iocb
.ulpStatus
=
17926 IOSTAT_FCP_RSP_ERROR
;
17927 first_iocbq
->iocb
.un
.ulpWord
[4] =
17928 IOERR_NO_RESOURCES
;
17930 lpfc_in_buf_free(vport
->phba
, d_buf
);
17933 /* We need to get the size out of the right CQE */
17934 hbq_buf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
17935 len
= bf_get(lpfc_rcqe_length
,
17936 &hbq_buf
->cq_event
.cqe
.rcqe_cmpl
);
17937 iocbq
->context2
= d_buf
;
17938 iocbq
->context3
= NULL
;
17939 iocbq
->iocb
.ulpBdeCount
= 1;
17940 if (len
> LPFC_DATA_BUF_SIZE
)
17941 iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
=
17942 LPFC_DATA_BUF_SIZE
;
17944 iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
= len
;
17947 iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
= tot_len
;
17949 iocbq
->iocb
.un
.rcvels
.remoteID
= sid
;
17950 list_add_tail(&iocbq
->list
, &first_iocbq
->list
);
17953 return first_iocbq
;
17957 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport
*vport
,
17958 struct hbq_dmabuf
*seq_dmabuf
)
17960 struct fc_frame_header
*fc_hdr
;
17961 struct lpfc_iocbq
*iocbq
, *curr_iocb
, *next_iocb
;
17962 struct lpfc_hba
*phba
= vport
->phba
;
17964 fc_hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
17965 iocbq
= lpfc_prep_seq(vport
, seq_dmabuf
);
17967 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
17968 "2707 Ring %d handler: Failed to allocate "
17969 "iocb Rctl x%x Type x%x received\n",
17971 fc_hdr
->fh_r_ctl
, fc_hdr
->fh_type
);
17974 if (!lpfc_complete_unsol_iocb(phba
,
17975 phba
->sli4_hba
.els_wq
->pring
,
17976 iocbq
, fc_hdr
->fh_r_ctl
,
17978 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
17979 "2540 Ring %d handler: unexpected Rctl "
17980 "x%x Type x%x received\n",
17982 fc_hdr
->fh_r_ctl
, fc_hdr
->fh_type
);
17984 /* Free iocb created in lpfc_prep_seq */
17985 list_for_each_entry_safe(curr_iocb
, next_iocb
,
17986 &iocbq
->list
, list
) {
17987 list_del_init(&curr_iocb
->list
);
17988 lpfc_sli_release_iocbq(phba
, curr_iocb
);
17990 lpfc_sli_release_iocbq(phba
, iocbq
);
17994 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
17995 struct lpfc_iocbq
*rspiocb
)
17997 struct lpfc_dmabuf
*pcmd
= cmdiocb
->context2
;
17999 if (pcmd
&& pcmd
->virt
)
18000 dma_pool_free(phba
->lpfc_drb_pool
, pcmd
->virt
, pcmd
->phys
);
18002 lpfc_sli_release_iocbq(phba
, cmdiocb
);
18003 lpfc_drain_txq(phba
);
18007 lpfc_sli4_handle_mds_loopback(struct lpfc_vport
*vport
,
18008 struct hbq_dmabuf
*dmabuf
)
18010 struct fc_frame_header
*fc_hdr
;
18011 struct lpfc_hba
*phba
= vport
->phba
;
18012 struct lpfc_iocbq
*iocbq
= NULL
;
18013 union lpfc_wqe
*wqe
;
18014 struct lpfc_dmabuf
*pcmd
= NULL
;
18015 uint32_t frame_len
;
18017 unsigned long iflags
;
18019 fc_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
18020 frame_len
= bf_get(lpfc_rcqe_length
, &dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
18022 /* Send the received frame back */
18023 iocbq
= lpfc_sli_get_iocbq(phba
);
18025 /* Queue cq event and wakeup worker thread to process it */
18026 spin_lock_irqsave(&phba
->hbalock
, iflags
);
18027 list_add_tail(&dmabuf
->cq_event
.list
,
18028 &phba
->sli4_hba
.sp_queue_event
);
18029 phba
->hba_flag
|= HBA_SP_QUEUE_EVT
;
18030 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
18031 lpfc_worker_wake_up(phba
);
18035 /* Allocate buffer for command payload */
18036 pcmd
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
18038 pcmd
->virt
= dma_pool_alloc(phba
->lpfc_drb_pool
, GFP_KERNEL
,
18040 if (!pcmd
|| !pcmd
->virt
)
18043 INIT_LIST_HEAD(&pcmd
->list
);
18045 /* copyin the payload */
18046 memcpy(pcmd
->virt
, dmabuf
->dbuf
.virt
, frame_len
);
18048 /* fill in BDE's for command */
18049 iocbq
->iocb
.un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(pcmd
->phys
);
18050 iocbq
->iocb
.un
.xseq64
.bdl
.addrLow
= putPaddrLow(pcmd
->phys
);
18051 iocbq
->iocb
.un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BDE_64
;
18052 iocbq
->iocb
.un
.xseq64
.bdl
.bdeSize
= frame_len
;
18054 iocbq
->context2
= pcmd
;
18055 iocbq
->vport
= vport
;
18056 iocbq
->iocb_flag
&= ~LPFC_FIP_ELS_ID_MASK
;
18057 iocbq
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
18060 * Setup rest of the iocb as though it were a WQE
18061 * Build the SEND_FRAME WQE
18063 wqe
= (union lpfc_wqe
*)&iocbq
->iocb
;
18065 wqe
->send_frame
.frame_len
= frame_len
;
18066 wqe
->send_frame
.fc_hdr_wd0
= be32_to_cpu(*((uint32_t *)fc_hdr
));
18067 wqe
->send_frame
.fc_hdr_wd1
= be32_to_cpu(*((uint32_t *)fc_hdr
+ 1));
18068 wqe
->send_frame
.fc_hdr_wd2
= be32_to_cpu(*((uint32_t *)fc_hdr
+ 2));
18069 wqe
->send_frame
.fc_hdr_wd3
= be32_to_cpu(*((uint32_t *)fc_hdr
+ 3));
18070 wqe
->send_frame
.fc_hdr_wd4
= be32_to_cpu(*((uint32_t *)fc_hdr
+ 4));
18071 wqe
->send_frame
.fc_hdr_wd5
= be32_to_cpu(*((uint32_t *)fc_hdr
+ 5));
18073 iocbq
->iocb
.ulpCommand
= CMD_SEND_FRAME
;
18074 iocbq
->iocb
.ulpLe
= 1;
18075 iocbq
->iocb_cmpl
= lpfc_sli4_mds_loopback_cmpl
;
18076 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, iocbq
, 0);
18077 if (rc
== IOCB_ERROR
)
18080 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
18084 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
18085 "2023 Unable to process MDS loopback frame\n");
18086 if (pcmd
&& pcmd
->virt
)
18087 dma_pool_free(phba
->lpfc_drb_pool
, pcmd
->virt
, pcmd
->phys
);
18090 lpfc_sli_release_iocbq(phba
, iocbq
);
18091 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
18095 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
18096 * @phba: Pointer to HBA context object.
18098 * This function is called with no lock held. This function processes all
18099 * the received buffers and gives it to upper layers when a received buffer
18100 * indicates that it is the final frame in the sequence. The interrupt
18101 * service routine processes received buffers at interrupt contexts.
18102 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
18103 * appropriate receive function when the final frame in a sequence is received.
18106 lpfc_sli4_handle_received_buffer(struct lpfc_hba
*phba
,
18107 struct hbq_dmabuf
*dmabuf
)
18109 struct hbq_dmabuf
*seq_dmabuf
;
18110 struct fc_frame_header
*fc_hdr
;
18111 struct lpfc_vport
*vport
;
18115 /* Process each received buffer */
18116 fc_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
18118 if (fc_hdr
->fh_r_ctl
== FC_RCTL_MDS_DIAGS
||
18119 fc_hdr
->fh_r_ctl
== FC_RCTL_DD_UNSOL_DATA
) {
18120 vport
= phba
->pport
;
18121 /* Handle MDS Loopback frames */
18122 lpfc_sli4_handle_mds_loopback(vport
, dmabuf
);
18126 /* check to see if this a valid type of frame */
18127 if (lpfc_fc_frame_check(phba
, fc_hdr
)) {
18128 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
18132 if ((bf_get(lpfc_cqe_code
,
18133 &dmabuf
->cq_event
.cqe
.rcqe_cmpl
) == CQE_CODE_RECEIVE_V1
))
18134 fcfi
= bf_get(lpfc_rcqe_fcf_id_v1
,
18135 &dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
18137 fcfi
= bf_get(lpfc_rcqe_fcf_id
,
18138 &dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
18140 if (fc_hdr
->fh_r_ctl
== 0xF4 && fc_hdr
->fh_type
== 0xFF) {
18141 vport
= phba
->pport
;
18142 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
18143 "2023 MDS Loopback %d bytes\n",
18144 bf_get(lpfc_rcqe_length
,
18145 &dmabuf
->cq_event
.cqe
.rcqe_cmpl
));
18146 /* Handle MDS Loopback frames */
18147 lpfc_sli4_handle_mds_loopback(vport
, dmabuf
);
18151 /* d_id this frame is directed to */
18152 did
= sli4_did_from_fc_hdr(fc_hdr
);
18154 vport
= lpfc_fc_frame_to_vport(phba
, fc_hdr
, fcfi
, did
);
18156 /* throw out the frame */
18157 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
18161 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
18162 if (!(vport
->vpi_state
& LPFC_VPI_REGISTERED
) &&
18163 (did
!= Fabric_DID
)) {
18165 * Throw out the frame if we are not pt2pt.
18166 * The pt2pt protocol allows for discovery frames
18167 * to be received without a registered VPI.
18169 if (!(vport
->fc_flag
& FC_PT2PT
) ||
18170 (phba
->link_state
== LPFC_HBA_READY
)) {
18171 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
18176 /* Handle the basic abort sequence (BA_ABTS) event */
18177 if (fc_hdr
->fh_r_ctl
== FC_RCTL_BA_ABTS
) {
18178 lpfc_sli4_handle_unsol_abort(vport
, dmabuf
);
18182 /* Link this frame */
18183 seq_dmabuf
= lpfc_fc_frame_add(vport
, dmabuf
);
18185 /* unable to add frame to vport - throw it out */
18186 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
18189 /* If not last frame in sequence continue processing frames. */
18190 if (!lpfc_seq_complete(seq_dmabuf
))
18193 /* Send the complete sequence to the upper layer protocol */
18194 lpfc_sli4_send_seq_to_ulp(vport
, seq_dmabuf
);
18198 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18199 * @phba: pointer to lpfc hba data structure.
18201 * This routine is invoked to post rpi header templates to the
18202 * HBA consistent with the SLI-4 interface spec. This routine
18203 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18204 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18206 * This routine does not require any locks. It's usage is expected
18207 * to be driver load or reset recovery when the driver is
18212 * -EIO - The mailbox failed to complete successfully.
18213 * When this error occurs, the driver is not guaranteed
18214 * to have any rpi regions posted to the device and
18215 * must either attempt to repost the regions or take a
18219 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba
*phba
)
18221 struct lpfc_rpi_hdr
*rpi_page
;
18225 /* SLI4 ports that support extents do not require RPI headers. */
18226 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
18228 if (phba
->sli4_hba
.extents_in_use
)
18231 list_for_each_entry(rpi_page
, &phba
->sli4_hba
.lpfc_rpi_hdr_list
, list
) {
18233 * Assign the rpi headers a physical rpi only if the driver
18234 * has not initialized those resources. A port reset only
18235 * needs the headers posted.
18237 if (bf_get(lpfc_rpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
) !=
18239 rpi_page
->start_rpi
= phba
->sli4_hba
.rpi_ids
[lrpi
];
18241 rc
= lpfc_sli4_post_rpi_hdr(phba
, rpi_page
);
18242 if (rc
!= MBX_SUCCESS
) {
18243 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
18244 "2008 Error %d posting all rpi "
18252 bf_set(lpfc_rpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
,
18253 LPFC_RPI_RSRC_RDY
);
18258 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18259 * @phba: pointer to lpfc hba data structure.
18260 * @rpi_page: pointer to the rpi memory region.
18262 * This routine is invoked to post a single rpi header to the
18263 * HBA consistent with the SLI-4 interface spec. This memory region
18264 * maps up to 64 rpi context regions.
18268 * -ENOMEM - No available memory
18269 * -EIO - The mailbox failed to complete successfully.
18272 lpfc_sli4_post_rpi_hdr(struct lpfc_hba
*phba
, struct lpfc_rpi_hdr
*rpi_page
)
18274 LPFC_MBOXQ_t
*mboxq
;
18275 struct lpfc_mbx_post_hdr_tmpl
*hdr_tmpl
;
18277 uint32_t shdr_status
, shdr_add_status
;
18278 union lpfc_sli4_cfg_shdr
*shdr
;
18280 /* SLI4 ports that support extents do not require RPI headers. */
18281 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
18283 if (phba
->sli4_hba
.extents_in_use
)
18286 /* The port is notified of the header region via a mailbox command. */
18287 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
18289 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
18290 "2001 Unable to allocate memory for issuing "
18291 "SLI_CONFIG_SPECIAL mailbox command\n");
18295 /* Post all rpi memory regions to the port. */
18296 hdr_tmpl
= &mboxq
->u
.mqe
.un
.hdr_tmpl
;
18297 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
18298 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE
,
18299 sizeof(struct lpfc_mbx_post_hdr_tmpl
) -
18300 sizeof(struct lpfc_sli4_cfg_mhdr
),
18301 LPFC_SLI4_MBX_EMBED
);
18304 /* Post the physical rpi to the port for this rpi header. */
18305 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset
, hdr_tmpl
,
18306 rpi_page
->start_rpi
);
18307 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt
,
18308 hdr_tmpl
, rpi_page
->page_count
);
18310 hdr_tmpl
->rpi_paddr_lo
= putPaddrLow(rpi_page
->dmabuf
->phys
);
18311 hdr_tmpl
->rpi_paddr_hi
= putPaddrHigh(rpi_page
->dmabuf
->phys
);
18312 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
18313 shdr
= (union lpfc_sli4_cfg_shdr
*) &hdr_tmpl
->header
.cfg_shdr
;
18314 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
18315 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
18316 if (rc
!= MBX_TIMEOUT
)
18317 mempool_free(mboxq
, phba
->mbox_mem_pool
);
18318 if (shdr_status
|| shdr_add_status
|| rc
) {
18319 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
18320 "2514 POST_RPI_HDR mailbox failed with "
18321 "status x%x add_status x%x, mbx status x%x\n",
18322 shdr_status
, shdr_add_status
, rc
);
18326 * The next_rpi stores the next logical module-64 rpi value used
18327 * to post physical rpis in subsequent rpi postings.
18329 spin_lock_irq(&phba
->hbalock
);
18330 phba
->sli4_hba
.next_rpi
= rpi_page
->next_rpi
;
18331 spin_unlock_irq(&phba
->hbalock
);
18337 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18338 * @phba: pointer to lpfc hba data structure.
18340 * This routine is invoked to post rpi header templates to the
18341 * HBA consistent with the SLI-4 interface spec. This routine
18342 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18343 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18346 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
18347 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18350 lpfc_sli4_alloc_rpi(struct lpfc_hba
*phba
)
18353 uint16_t max_rpi
, rpi_limit
;
18354 uint16_t rpi_remaining
, lrpi
= 0;
18355 struct lpfc_rpi_hdr
*rpi_hdr
;
18356 unsigned long iflag
;
18359 * Fetch the next logical rpi. Because this index is logical,
18360 * the driver starts at 0 each time.
18362 spin_lock_irqsave(&phba
->hbalock
, iflag
);
18363 max_rpi
= phba
->sli4_hba
.max_cfg_param
.max_rpi
;
18364 rpi_limit
= phba
->sli4_hba
.next_rpi
;
18366 rpi
= find_next_zero_bit(phba
->sli4_hba
.rpi_bmask
, rpi_limit
, 0);
18367 if (rpi
>= rpi_limit
)
18368 rpi
= LPFC_RPI_ALLOC_ERROR
;
18370 set_bit(rpi
, phba
->sli4_hba
.rpi_bmask
);
18371 phba
->sli4_hba
.max_cfg_param
.rpi_used
++;
18372 phba
->sli4_hba
.rpi_count
++;
18374 lpfc_printf_log(phba
, KERN_INFO
,
18375 LOG_NODE
| LOG_DISCOVERY
,
18376 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
18377 (int) rpi
, max_rpi
, rpi_limit
);
18380 * Don't try to allocate more rpi header regions if the device limit
18381 * has been exhausted.
18383 if ((rpi
== LPFC_RPI_ALLOC_ERROR
) &&
18384 (phba
->sli4_hba
.rpi_count
>= max_rpi
)) {
18385 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
18390 * RPI header postings are not required for SLI4 ports capable of
18393 if (!phba
->sli4_hba
.rpi_hdrs_in_use
) {
18394 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
18399 * If the driver is running low on rpi resources, allocate another
18400 * page now. Note that the next_rpi value is used because
18401 * it represents how many are actually in use whereas max_rpi notes
18402 * how many are supported max by the device.
18404 rpi_remaining
= phba
->sli4_hba
.next_rpi
- phba
->sli4_hba
.rpi_count
;
18405 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
18406 if (rpi_remaining
< LPFC_RPI_LOW_WATER_MARK
) {
18407 rpi_hdr
= lpfc_sli4_create_rpi_hdr(phba
);
18409 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
18410 "2002 Error Could not grow rpi "
18413 lrpi
= rpi_hdr
->start_rpi
;
18414 rpi_hdr
->start_rpi
= phba
->sli4_hba
.rpi_ids
[lrpi
];
18415 lpfc_sli4_post_rpi_hdr(phba
, rpi_hdr
);
18423 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18424 * @phba: pointer to lpfc hba data structure.
18426 * This routine is invoked to release an rpi to the pool of
18427 * available rpis maintained by the driver.
18430 __lpfc_sli4_free_rpi(struct lpfc_hba
*phba
, int rpi
)
18433 * if the rpi value indicates a prior unreg has already
18434 * been done, skip the unreg.
18436 if (rpi
== LPFC_RPI_ALLOC_ERROR
)
18439 if (test_and_clear_bit(rpi
, phba
->sli4_hba
.rpi_bmask
)) {
18440 phba
->sli4_hba
.rpi_count
--;
18441 phba
->sli4_hba
.max_cfg_param
.rpi_used
--;
18443 lpfc_printf_log(phba
, KERN_INFO
,
18444 LOG_NODE
| LOG_DISCOVERY
,
18445 "2016 rpi %x not inuse\n",
18451 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18452 * @phba: pointer to lpfc hba data structure.
18454 * This routine is invoked to release an rpi to the pool of
18455 * available rpis maintained by the driver.
18458 lpfc_sli4_free_rpi(struct lpfc_hba
*phba
, int rpi
)
18460 spin_lock_irq(&phba
->hbalock
);
18461 __lpfc_sli4_free_rpi(phba
, rpi
);
18462 spin_unlock_irq(&phba
->hbalock
);
18466 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18467 * @phba: pointer to lpfc hba data structure.
18469 * This routine is invoked to remove the memory region that
18470 * provided rpi via a bitmask.
18473 lpfc_sli4_remove_rpis(struct lpfc_hba
*phba
)
18475 kfree(phba
->sli4_hba
.rpi_bmask
);
18476 kfree(phba
->sli4_hba
.rpi_ids
);
18477 bf_set(lpfc_rpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
18481 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18482 * @phba: pointer to lpfc hba data structure.
18484 * This routine is invoked to remove the memory region that
18485 * provided rpi via a bitmask.
18488 lpfc_sli4_resume_rpi(struct lpfc_nodelist
*ndlp
,
18489 void (*cmpl
)(struct lpfc_hba
*, LPFC_MBOXQ_t
*), void *arg
)
18491 LPFC_MBOXQ_t
*mboxq
;
18492 struct lpfc_hba
*phba
= ndlp
->phba
;
18495 /* The port is notified of the header region via a mailbox command. */
18496 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
18500 /* Post all rpi memory regions to the port. */
18501 lpfc_resume_rpi(mboxq
, ndlp
);
18503 mboxq
->mbox_cmpl
= cmpl
;
18504 mboxq
->ctx_buf
= arg
;
18505 mboxq
->ctx_ndlp
= ndlp
;
18507 mboxq
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
18508 mboxq
->vport
= ndlp
->vport
;
18509 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
18510 if (rc
== MBX_NOT_FINISHED
) {
18511 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
18512 "2010 Resume RPI Mailbox failed "
18513 "status %d, mbxStatus x%x\n", rc
,
18514 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
));
18515 mempool_free(mboxq
, phba
->mbox_mem_pool
);
18522 * lpfc_sli4_init_vpi - Initialize a vpi with the port
18523 * @vport: Pointer to the vport for which the vpi is being initialized
18525 * This routine is invoked to activate a vpi with the port.
18529 * -Evalue otherwise
18532 lpfc_sli4_init_vpi(struct lpfc_vport
*vport
)
18534 LPFC_MBOXQ_t
*mboxq
;
18536 int retval
= MBX_SUCCESS
;
18538 struct lpfc_hba
*phba
= vport
->phba
;
18539 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
18542 lpfc_init_vpi(phba
, mboxq
, vport
->vpi
);
18543 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mboxq
);
18544 rc
= lpfc_sli_issue_mbox_wait(phba
, mboxq
, mbox_tmo
);
18545 if (rc
!= MBX_SUCCESS
) {
18546 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_SLI
,
18547 "2022 INIT VPI Mailbox failed "
18548 "status %d, mbxStatus x%x\n", rc
,
18549 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
));
18552 if (rc
!= MBX_TIMEOUT
)
18553 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
18559 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
18560 * @phba: pointer to lpfc hba data structure.
18561 * @mboxq: Pointer to mailbox object.
18563 * This routine is invoked to manually add a single FCF record. The caller
18564 * must pass a completely initialized FCF_Record. This routine takes
18565 * care of the nonembedded mailbox operations.
18568 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
18571 union lpfc_sli4_cfg_shdr
*shdr
;
18572 uint32_t shdr_status
, shdr_add_status
;
18574 virt_addr
= mboxq
->sge_array
->addr
[0];
18575 /* The IOCTL status is embedded in the mailbox subheader. */
18576 shdr
= (union lpfc_sli4_cfg_shdr
*) virt_addr
;
18577 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
18578 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
18580 if ((shdr_status
|| shdr_add_status
) &&
18581 (shdr_status
!= STATUS_FCF_IN_USE
))
18582 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
18583 "2558 ADD_FCF_RECORD mailbox failed with "
18584 "status x%x add_status x%x\n",
18585 shdr_status
, shdr_add_status
);
18587 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
18591 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
18592 * @phba: pointer to lpfc hba data structure.
18593 * @fcf_record: pointer to the initialized fcf record to add.
18595 * This routine is invoked to manually add a single FCF record. The caller
18596 * must pass a completely initialized FCF_Record. This routine takes
18597 * care of the nonembedded mailbox operations.
18600 lpfc_sli4_add_fcf_record(struct lpfc_hba
*phba
, struct fcf_record
*fcf_record
)
18603 LPFC_MBOXQ_t
*mboxq
;
18606 struct lpfc_mbx_sge sge
;
18607 uint32_t alloc_len
, req_len
;
18610 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
18612 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
18613 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
18617 req_len
= sizeof(struct fcf_record
) + sizeof(union lpfc_sli4_cfg_shdr
) +
18620 /* Allocate DMA memory and set up the non-embedded mailbox command */
18621 alloc_len
= lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
18622 LPFC_MBOX_OPCODE_FCOE_ADD_FCF
,
18623 req_len
, LPFC_SLI4_MBX_NEMBED
);
18624 if (alloc_len
< req_len
) {
18625 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
18626 "2523 Allocated DMA memory size (x%x) is "
18627 "less than the requested DMA memory "
18628 "size (x%x)\n", alloc_len
, req_len
);
18629 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
18634 * Get the first SGE entry from the non-embedded DMA memory. This
18635 * routine only uses a single SGE.
18637 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
18638 virt_addr
= mboxq
->sge_array
->addr
[0];
18640 * Configure the FCF record for FCFI 0. This is the driver's
18641 * hardcoded default and gets used in nonFIP mode.
18643 fcfindex
= bf_get(lpfc_fcf_record_fcf_index
, fcf_record
);
18644 bytep
= virt_addr
+ sizeof(union lpfc_sli4_cfg_shdr
);
18645 lpfc_sli_pcimem_bcopy(&fcfindex
, bytep
, sizeof(uint32_t));
18648 * Copy the fcf_index and the FCF Record Data. The data starts after
18649 * the FCoE header plus word10. The data copy needs to be endian
18652 bytep
+= sizeof(uint32_t);
18653 lpfc_sli_pcimem_bcopy(fcf_record
, bytep
, sizeof(struct fcf_record
));
18654 mboxq
->vport
= phba
->pport
;
18655 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_add_fcf_record
;
18656 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
18657 if (rc
== MBX_NOT_FINISHED
) {
18658 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
18659 "2515 ADD_FCF_RECORD mailbox failed with "
18660 "status 0x%x\n", rc
);
18661 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
18670 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
18671 * @phba: pointer to lpfc hba data structure.
18672 * @fcf_record: pointer to the fcf record to write the default data.
18673 * @fcf_index: FCF table entry index.
18675 * This routine is invoked to build the driver's default FCF record. The
18676 * values used are hardcoded. This routine handles memory initialization.
18680 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba
*phba
,
18681 struct fcf_record
*fcf_record
,
18682 uint16_t fcf_index
)
18684 memset(fcf_record
, 0, sizeof(struct fcf_record
));
18685 fcf_record
->max_rcv_size
= LPFC_FCOE_MAX_RCV_SIZE
;
18686 fcf_record
->fka_adv_period
= LPFC_FCOE_FKA_ADV_PER
;
18687 fcf_record
->fip_priority
= LPFC_FCOE_FIP_PRIORITY
;
18688 bf_set(lpfc_fcf_record_mac_0
, fcf_record
, phba
->fc_map
[0]);
18689 bf_set(lpfc_fcf_record_mac_1
, fcf_record
, phba
->fc_map
[1]);
18690 bf_set(lpfc_fcf_record_mac_2
, fcf_record
, phba
->fc_map
[2]);
18691 bf_set(lpfc_fcf_record_mac_3
, fcf_record
, LPFC_FCOE_FCF_MAC3
);
18692 bf_set(lpfc_fcf_record_mac_4
, fcf_record
, LPFC_FCOE_FCF_MAC4
);
18693 bf_set(lpfc_fcf_record_mac_5
, fcf_record
, LPFC_FCOE_FCF_MAC5
);
18694 bf_set(lpfc_fcf_record_fc_map_0
, fcf_record
, phba
->fc_map
[0]);
18695 bf_set(lpfc_fcf_record_fc_map_1
, fcf_record
, phba
->fc_map
[1]);
18696 bf_set(lpfc_fcf_record_fc_map_2
, fcf_record
, phba
->fc_map
[2]);
18697 bf_set(lpfc_fcf_record_fcf_valid
, fcf_record
, 1);
18698 bf_set(lpfc_fcf_record_fcf_avail
, fcf_record
, 1);
18699 bf_set(lpfc_fcf_record_fcf_index
, fcf_record
, fcf_index
);
18700 bf_set(lpfc_fcf_record_mac_addr_prov
, fcf_record
,
18701 LPFC_FCF_FPMA
| LPFC_FCF_SPMA
);
18702 /* Set the VLAN bit map */
18703 if (phba
->valid_vlan
) {
18704 fcf_record
->vlan_bitmap
[phba
->vlan_id
/ 8]
18705 = 1 << (phba
->vlan_id
% 8);
18710 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
18711 * @phba: pointer to lpfc hba data structure.
18712 * @fcf_index: FCF table entry offset.
18714 * This routine is invoked to scan the entire FCF table by reading FCF
18715 * record and processing it one at a time starting from the @fcf_index
18716 * for initial FCF discovery or fast FCF failover rediscovery.
18718 * Return 0 if the mailbox command is submitted successfully, none 0
18722 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
18725 LPFC_MBOXQ_t
*mboxq
;
18727 phba
->fcoe_eventtag_at_fcf_scan
= phba
->fcoe_eventtag
;
18728 phba
->fcoe_cvl_eventtag_attn
= phba
->fcoe_cvl_eventtag
;
18729 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
18731 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
18732 "2000 Failed to allocate mbox for "
18735 goto fail_fcf_scan
;
18737 /* Construct the read FCF record mailbox command */
18738 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
18741 goto fail_fcf_scan
;
18743 /* Issue the mailbox command asynchronously */
18744 mboxq
->vport
= phba
->pport
;
18745 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_fcf_scan_read_fcf_rec
;
18747 spin_lock_irq(&phba
->hbalock
);
18748 phba
->hba_flag
|= FCF_TS_INPROG
;
18749 spin_unlock_irq(&phba
->hbalock
);
18751 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
18752 if (rc
== MBX_NOT_FINISHED
)
18755 /* Reset eligible FCF count for new scan */
18756 if (fcf_index
== LPFC_FCOE_FCF_GET_FIRST
)
18757 phba
->fcf
.eligible_fcf_cnt
= 0;
18763 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
18764 /* FCF scan failed, clear FCF_TS_INPROG flag */
18765 spin_lock_irq(&phba
->hbalock
);
18766 phba
->hba_flag
&= ~FCF_TS_INPROG
;
18767 spin_unlock_irq(&phba
->hbalock
);
18773 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
18774 * @phba: pointer to lpfc hba data structure.
18775 * @fcf_index: FCF table entry offset.
18777 * This routine is invoked to read an FCF record indicated by @fcf_index
18778 * and to use it for FLOGI roundrobin FCF failover.
18780 * Return 0 if the mailbox command is submitted successfully, none 0
18784 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
18787 LPFC_MBOXQ_t
*mboxq
;
18789 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
18791 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_INIT
,
18792 "2763 Failed to allocate mbox for "
18795 goto fail_fcf_read
;
18797 /* Construct the read FCF record mailbox command */
18798 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
18801 goto fail_fcf_read
;
18803 /* Issue the mailbox command asynchronously */
18804 mboxq
->vport
= phba
->pport
;
18805 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_fcf_rr_read_fcf_rec
;
18806 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
18807 if (rc
== MBX_NOT_FINISHED
)
18813 if (error
&& mboxq
)
18814 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
18819 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
18820 * @phba: pointer to lpfc hba data structure.
18821 * @fcf_index: FCF table entry offset.
18823 * This routine is invoked to read an FCF record indicated by @fcf_index to
18824 * determine whether it's eligible for FLOGI roundrobin failover list.
18826 * Return 0 if the mailbox command is submitted successfully, none 0
18830 lpfc_sli4_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
18833 LPFC_MBOXQ_t
*mboxq
;
18835 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
18837 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_INIT
,
18838 "2758 Failed to allocate mbox for "
18841 goto fail_fcf_read
;
18843 /* Construct the read FCF record mailbox command */
18844 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
18847 goto fail_fcf_read
;
18849 /* Issue the mailbox command asynchronously */
18850 mboxq
->vport
= phba
->pport
;
18851 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_read_fcf_rec
;
18852 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
18853 if (rc
== MBX_NOT_FINISHED
)
18859 if (error
&& mboxq
)
18860 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
18865 * lpfc_check_next_fcf_pri_level
18866 * phba pointer to the lpfc_hba struct for this port.
18867 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
18868 * routine when the rr_bmask is empty. The FCF indecies are put into the
18869 * rr_bmask based on their priority level. Starting from the highest priority
18870 * to the lowest. The most likely FCF candidate will be in the highest
18871 * priority group. When this routine is called it searches the fcf_pri list for
18872 * next lowest priority group and repopulates the rr_bmask with only those
18875 * 1=success 0=failure
18878 lpfc_check_next_fcf_pri_level(struct lpfc_hba
*phba
)
18880 uint16_t next_fcf_pri
;
18881 uint16_t last_index
;
18882 struct lpfc_fcf_pri
*fcf_pri
;
18886 last_index
= find_first_bit(phba
->fcf
.fcf_rr_bmask
,
18887 LPFC_SLI4_FCF_TBL_INDX_MAX
);
18888 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
18889 "3060 Last IDX %d\n", last_index
);
18891 /* Verify the priority list has 2 or more entries */
18892 spin_lock_irq(&phba
->hbalock
);
18893 if (list_empty(&phba
->fcf
.fcf_pri_list
) ||
18894 list_is_singular(&phba
->fcf
.fcf_pri_list
)) {
18895 spin_unlock_irq(&phba
->hbalock
);
18896 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
18897 "3061 Last IDX %d\n", last_index
);
18898 return 0; /* Empty rr list */
18900 spin_unlock_irq(&phba
->hbalock
);
18904 * Clear the rr_bmask and set all of the bits that are at this
18907 memset(phba
->fcf
.fcf_rr_bmask
, 0,
18908 sizeof(*phba
->fcf
.fcf_rr_bmask
));
18909 spin_lock_irq(&phba
->hbalock
);
18910 list_for_each_entry(fcf_pri
, &phba
->fcf
.fcf_pri_list
, list
) {
18911 if (fcf_pri
->fcf_rec
.flag
& LPFC_FCF_FLOGI_FAILED
)
18914 * the 1st priority that has not FLOGI failed
18915 * will be the highest.
18918 next_fcf_pri
= fcf_pri
->fcf_rec
.priority
;
18919 spin_unlock_irq(&phba
->hbalock
);
18920 if (fcf_pri
->fcf_rec
.priority
== next_fcf_pri
) {
18921 rc
= lpfc_sli4_fcf_rr_index_set(phba
,
18922 fcf_pri
->fcf_rec
.fcf_index
);
18926 spin_lock_irq(&phba
->hbalock
);
18929 * if next_fcf_pri was not set above and the list is not empty then
18930 * we have failed flogis on all of them. So reset flogi failed
18931 * and start at the beginning.
18933 if (!next_fcf_pri
&& !list_empty(&phba
->fcf
.fcf_pri_list
)) {
18934 list_for_each_entry(fcf_pri
, &phba
->fcf
.fcf_pri_list
, list
) {
18935 fcf_pri
->fcf_rec
.flag
&= ~LPFC_FCF_FLOGI_FAILED
;
18937 * the 1st priority that has not FLOGI failed
18938 * will be the highest.
18941 next_fcf_pri
= fcf_pri
->fcf_rec
.priority
;
18942 spin_unlock_irq(&phba
->hbalock
);
18943 if (fcf_pri
->fcf_rec
.priority
== next_fcf_pri
) {
18944 rc
= lpfc_sli4_fcf_rr_index_set(phba
,
18945 fcf_pri
->fcf_rec
.fcf_index
);
18949 spin_lock_irq(&phba
->hbalock
);
18953 spin_unlock_irq(&phba
->hbalock
);
18958 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
18959 * @phba: pointer to lpfc hba data structure.
18961 * This routine is to get the next eligible FCF record index in a round
18962 * robin fashion. If the next eligible FCF record index equals to the
18963 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
18964 * shall be returned, otherwise, the next eligible FCF record's index
18965 * shall be returned.
18968 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba
*phba
)
18970 uint16_t next_fcf_index
;
18973 /* Search start from next bit of currently registered FCF index */
18974 next_fcf_index
= phba
->fcf
.current_rec
.fcf_indx
;
18977 /* Determine the next fcf index to check */
18978 next_fcf_index
= (next_fcf_index
+ 1) % LPFC_SLI4_FCF_TBL_INDX_MAX
;
18979 next_fcf_index
= find_next_bit(phba
->fcf
.fcf_rr_bmask
,
18980 LPFC_SLI4_FCF_TBL_INDX_MAX
,
18983 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
18984 if (next_fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
18986 * If we have wrapped then we need to clear the bits that
18987 * have been tested so that we can detect when we should
18988 * change the priority level.
18990 next_fcf_index
= find_next_bit(phba
->fcf
.fcf_rr_bmask
,
18991 LPFC_SLI4_FCF_TBL_INDX_MAX
, 0);
18995 /* Check roundrobin failover list empty condition */
18996 if (next_fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
||
18997 next_fcf_index
== phba
->fcf
.current_rec
.fcf_indx
) {
18999 * If next fcf index is not found check if there are lower
19000 * Priority level fcf's in the fcf_priority list.
19001 * Set up the rr_bmask with all of the avaiable fcf bits
19002 * at that level and continue the selection process.
19004 if (lpfc_check_next_fcf_pri_level(phba
))
19005 goto initial_priority
;
19006 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
19007 "2844 No roundrobin failover FCF available\n");
19009 return LPFC_FCOE_FCF_NEXT_NONE
;
19012 if (next_fcf_index
< LPFC_SLI4_FCF_TBL_INDX_MAX
&&
19013 phba
->fcf
.fcf_pri
[next_fcf_index
].fcf_rec
.flag
&
19014 LPFC_FCF_FLOGI_FAILED
) {
19015 if (list_is_singular(&phba
->fcf
.fcf_pri_list
))
19016 return LPFC_FCOE_FCF_NEXT_NONE
;
19018 goto next_priority
;
19021 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
19022 "2845 Get next roundrobin failover FCF (x%x)\n",
19025 return next_fcf_index
;
19029 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
19030 * @phba: pointer to lpfc hba data structure.
19032 * This routine sets the FCF record index in to the eligible bmask for
19033 * roundrobin failover search. It checks to make sure that the index
19034 * does not go beyond the range of the driver allocated bmask dimension
19035 * before setting the bit.
19037 * Returns 0 if the index bit successfully set, otherwise, it returns
19041 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba
*phba
, uint16_t fcf_index
)
19043 if (fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
19044 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
19045 "2610 FCF (x%x) reached driver's book "
19046 "keeping dimension:x%x\n",
19047 fcf_index
, LPFC_SLI4_FCF_TBL_INDX_MAX
);
19050 /* Set the eligible FCF record index bmask */
19051 set_bit(fcf_index
, phba
->fcf
.fcf_rr_bmask
);
19053 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
19054 "2790 Set FCF (x%x) to roundrobin FCF failover "
19055 "bmask\n", fcf_index
);
19061 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
19062 * @phba: pointer to lpfc hba data structure.
19064 * This routine clears the FCF record index from the eligible bmask for
19065 * roundrobin failover search. It checks to make sure that the index
19066 * does not go beyond the range of the driver allocated bmask dimension
19067 * before clearing the bit.
19070 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba
*phba
, uint16_t fcf_index
)
19072 struct lpfc_fcf_pri
*fcf_pri
, *fcf_pri_next
;
19073 if (fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
19074 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
19075 "2762 FCF (x%x) reached driver's book "
19076 "keeping dimension:x%x\n",
19077 fcf_index
, LPFC_SLI4_FCF_TBL_INDX_MAX
);
19080 /* Clear the eligible FCF record index bmask */
19081 spin_lock_irq(&phba
->hbalock
);
19082 list_for_each_entry_safe(fcf_pri
, fcf_pri_next
, &phba
->fcf
.fcf_pri_list
,
19084 if (fcf_pri
->fcf_rec
.fcf_index
== fcf_index
) {
19085 list_del_init(&fcf_pri
->list
);
19089 spin_unlock_irq(&phba
->hbalock
);
19090 clear_bit(fcf_index
, phba
->fcf
.fcf_rr_bmask
);
19092 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
19093 "2791 Clear FCF (x%x) from roundrobin failover "
19094 "bmask\n", fcf_index
);
19098 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
19099 * @phba: pointer to lpfc hba data structure.
19101 * This routine is the completion routine for the rediscover FCF table mailbox
19102 * command. If the mailbox command returned failure, it will try to stop the
19103 * FCF rediscover wait timer.
19106 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mbox
)
19108 struct lpfc_mbx_redisc_fcf_tbl
*redisc_fcf
;
19109 uint32_t shdr_status
, shdr_add_status
;
19111 redisc_fcf
= &mbox
->u
.mqe
.un
.redisc_fcf_tbl
;
19113 shdr_status
= bf_get(lpfc_mbox_hdr_status
,
19114 &redisc_fcf
->header
.cfg_shdr
.response
);
19115 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
,
19116 &redisc_fcf
->header
.cfg_shdr
.response
);
19117 if (shdr_status
|| shdr_add_status
) {
19118 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
19119 "2746 Requesting for FCF rediscovery failed "
19120 "status x%x add_status x%x\n",
19121 shdr_status
, shdr_add_status
);
19122 if (phba
->fcf
.fcf_flag
& FCF_ACVL_DISC
) {
19123 spin_lock_irq(&phba
->hbalock
);
19124 phba
->fcf
.fcf_flag
&= ~FCF_ACVL_DISC
;
19125 spin_unlock_irq(&phba
->hbalock
);
19127 * CVL event triggered FCF rediscover request failed,
19128 * last resort to re-try current registered FCF entry.
19130 lpfc_retry_pport_discovery(phba
);
19132 spin_lock_irq(&phba
->hbalock
);
19133 phba
->fcf
.fcf_flag
&= ~FCF_DEAD_DISC
;
19134 spin_unlock_irq(&phba
->hbalock
);
19136 * DEAD FCF event triggered FCF rediscover request
19137 * failed, last resort to fail over as a link down
19138 * to FCF registration.
19140 lpfc_sli4_fcf_dead_failthrough(phba
);
19143 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
19144 "2775 Start FCF rediscover quiescent timer\n");
19146 * Start FCF rediscovery wait timer for pending FCF
19147 * before rescan FCF record table.
19149 lpfc_fcf_redisc_wait_start_timer(phba
);
19152 mempool_free(mbox
, phba
->mbox_mem_pool
);
19156 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
19157 * @phba: pointer to lpfc hba data structure.
19159 * This routine is invoked to request for rediscovery of the entire FCF table
19163 lpfc_sli4_redisc_fcf_table(struct lpfc_hba
*phba
)
19165 LPFC_MBOXQ_t
*mbox
;
19166 struct lpfc_mbx_redisc_fcf_tbl
*redisc_fcf
;
19169 /* Cancel retry delay timers to all vports before FCF rediscover */
19170 lpfc_cancel_all_vport_retry_delay_timer(phba
);
19172 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
19174 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
19175 "2745 Failed to allocate mbox for "
19176 "requesting FCF rediscover.\n");
19180 length
= (sizeof(struct lpfc_mbx_redisc_fcf_tbl
) -
19181 sizeof(struct lpfc_sli4_cfg_mhdr
));
19182 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
19183 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF
,
19184 length
, LPFC_SLI4_MBX_EMBED
);
19186 redisc_fcf
= &mbox
->u
.mqe
.un
.redisc_fcf_tbl
;
19187 /* Set count to 0 for invalidating the entire FCF database */
19188 bf_set(lpfc_mbx_redisc_fcf_count
, redisc_fcf
, 0);
19190 /* Issue the mailbox command asynchronously */
19191 mbox
->vport
= phba
->pport
;
19192 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_redisc_fcf_table
;
19193 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
19195 if (rc
== MBX_NOT_FINISHED
) {
19196 mempool_free(mbox
, phba
->mbox_mem_pool
);
19203 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19204 * @phba: pointer to lpfc hba data structure.
19206 * This function is the failover routine as a last resort to the FCF DEAD
19207 * event when driver failed to perform fast FCF failover.
19210 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba
*phba
)
19212 uint32_t link_state
;
19215 * Last resort as FCF DEAD event failover will treat this as
19216 * a link down, but save the link state because we don't want
19217 * it to be changed to Link Down unless it is already down.
19219 link_state
= phba
->link_state
;
19220 lpfc_linkdown(phba
);
19221 phba
->link_state
= link_state
;
19223 /* Unregister FCF if no devices connected to it */
19224 lpfc_unregister_unused_fcf(phba
);
19228 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
19229 * @phba: pointer to lpfc hba data structure.
19230 * @rgn23_data: pointer to configure region 23 data.
19232 * This function gets SLI3 port configure region 23 data through memory dump
19233 * mailbox command. When it successfully retrieves data, the size of the data
19234 * will be returned, otherwise, 0 will be returned.
19237 lpfc_sli_get_config_region23(struct lpfc_hba
*phba
, char *rgn23_data
)
19239 LPFC_MBOXQ_t
*pmb
= NULL
;
19241 uint32_t offset
= 0;
19247 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
19249 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
19250 "2600 failed to allocate mailbox memory\n");
19256 lpfc_dump_mem(phba
, pmb
, offset
, DMP_REGION_23
);
19257 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
19259 if (rc
!= MBX_SUCCESS
) {
19260 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
19261 "2601 failed to read config "
19262 "region 23, rc 0x%x Status 0x%x\n",
19263 rc
, mb
->mbxStatus
);
19264 mb
->un
.varDmp
.word_cnt
= 0;
19267 * dump mem may return a zero when finished or we got a
19268 * mailbox error, either way we are done.
19270 if (mb
->un
.varDmp
.word_cnt
== 0)
19272 if (mb
->un
.varDmp
.word_cnt
> DMP_RGN23_SIZE
- offset
)
19273 mb
->un
.varDmp
.word_cnt
= DMP_RGN23_SIZE
- offset
;
19275 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
19276 rgn23_data
+ offset
,
19277 mb
->un
.varDmp
.word_cnt
);
19278 offset
+= mb
->un
.varDmp
.word_cnt
;
19279 } while (mb
->un
.varDmp
.word_cnt
&& offset
< DMP_RGN23_SIZE
);
19281 mempool_free(pmb
, phba
->mbox_mem_pool
);
19286 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19287 * @phba: pointer to lpfc hba data structure.
19288 * @rgn23_data: pointer to configure region 23 data.
19290 * This function gets SLI4 port configure region 23 data through memory dump
19291 * mailbox command. When it successfully retrieves data, the size of the data
19292 * will be returned, otherwise, 0 will be returned.
19295 lpfc_sli4_get_config_region23(struct lpfc_hba
*phba
, char *rgn23_data
)
19297 LPFC_MBOXQ_t
*mboxq
= NULL
;
19298 struct lpfc_dmabuf
*mp
= NULL
;
19299 struct lpfc_mqe
*mqe
;
19300 uint32_t data_length
= 0;
19306 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
19308 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
19309 "3105 failed to allocate mailbox memory\n");
19313 if (lpfc_sli4_dump_cfg_rg23(phba
, mboxq
))
19315 mqe
= &mboxq
->u
.mqe
;
19316 mp
= (struct lpfc_dmabuf
*)mboxq
->ctx_buf
;
19317 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
19320 data_length
= mqe
->un
.mb_words
[5];
19321 if (data_length
== 0)
19323 if (data_length
> DMP_RGN23_SIZE
) {
19327 lpfc_sli_pcimem_bcopy((char *)mp
->virt
, rgn23_data
, data_length
);
19329 mempool_free(mboxq
, phba
->mbox_mem_pool
);
19331 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
19334 return data_length
;
19338 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19339 * @phba: pointer to lpfc hba data structure.
19341 * This function read region 23 and parse TLV for port status to
19342 * decide if the user disaled the port. If the TLV indicates the
19343 * port is disabled, the hba_flag is set accordingly.
19346 lpfc_sli_read_link_ste(struct lpfc_hba
*phba
)
19348 uint8_t *rgn23_data
= NULL
;
19349 uint32_t if_type
, data_size
, sub_tlv_len
, tlv_offset
;
19350 uint32_t offset
= 0;
19352 /* Get adapter Region 23 data */
19353 rgn23_data
= kzalloc(DMP_RGN23_SIZE
, GFP_KERNEL
);
19357 if (phba
->sli_rev
< LPFC_SLI_REV4
)
19358 data_size
= lpfc_sli_get_config_region23(phba
, rgn23_data
);
19360 if_type
= bf_get(lpfc_sli_intf_if_type
,
19361 &phba
->sli4_hba
.sli_intf
);
19362 if (if_type
== LPFC_SLI_INTF_IF_TYPE_0
)
19364 data_size
= lpfc_sli4_get_config_region23(phba
, rgn23_data
);
19370 /* Check the region signature first */
19371 if (memcmp(&rgn23_data
[offset
], LPFC_REGION23_SIGNATURE
, 4)) {
19372 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
19373 "2619 Config region 23 has bad signature\n");
19378 /* Check the data structure version */
19379 if (rgn23_data
[offset
] != LPFC_REGION23_VERSION
) {
19380 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
19381 "2620 Config region 23 has bad version\n");
19386 /* Parse TLV entries in the region */
19387 while (offset
< data_size
) {
19388 if (rgn23_data
[offset
] == LPFC_REGION23_LAST_REC
)
19391 * If the TLV is not driver specific TLV or driver id is
19392 * not linux driver id, skip the record.
19394 if ((rgn23_data
[offset
] != DRIVER_SPECIFIC_TYPE
) ||
19395 (rgn23_data
[offset
+ 2] != LINUX_DRIVER_ID
) ||
19396 (rgn23_data
[offset
+ 3] != 0)) {
19397 offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
19401 /* Driver found a driver specific TLV in the config region */
19402 sub_tlv_len
= rgn23_data
[offset
+ 1] * 4;
19407 * Search for configured port state sub-TLV.
19409 while ((offset
< data_size
) &&
19410 (tlv_offset
< sub_tlv_len
)) {
19411 if (rgn23_data
[offset
] == LPFC_REGION23_LAST_REC
) {
19416 if (rgn23_data
[offset
] != PORT_STE_TYPE
) {
19417 offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
19418 tlv_offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
19422 /* This HBA contains PORT_STE configured */
19423 if (!rgn23_data
[offset
+ 2])
19424 phba
->hba_flag
|= LINK_DISABLED
;
19436 * lpfc_wr_object - write an object to the firmware
19437 * @phba: HBA structure that indicates port to create a queue on.
19438 * @dmabuf_list: list of dmabufs to write to the port.
19439 * @size: the total byte value of the objects to write to the port.
19440 * @offset: the current offset to be used to start the transfer.
19442 * This routine will create a wr_object mailbox command to send to the port.
19443 * the mailbox command will be constructed using the dma buffers described in
19444 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19445 * BDEs that the imbedded mailbox can support. The @offset variable will be
19446 * used to indicate the starting offset of the transfer and will also return
19447 * the offset after the write object mailbox has completed. @size is used to
19448 * determine the end of the object and whether the eof bit should be set.
19450 * Return 0 is successful and offset will contain the the new offset to use
19451 * for the next write.
19452 * Return negative value for error cases.
19455 lpfc_wr_object(struct lpfc_hba
*phba
, struct list_head
*dmabuf_list
,
19456 uint32_t size
, uint32_t *offset
)
19458 struct lpfc_mbx_wr_object
*wr_object
;
19459 LPFC_MBOXQ_t
*mbox
;
19461 uint32_t shdr_status
, shdr_add_status
, shdr_change_status
, shdr_csf
;
19463 struct lpfc_dmabuf
*dmabuf
;
19464 uint32_t written
= 0;
19465 bool check_change_status
= false;
19467 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
19471 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
19472 LPFC_MBOX_OPCODE_WRITE_OBJECT
,
19473 sizeof(struct lpfc_mbx_wr_object
) -
19474 sizeof(struct lpfc_sli4_cfg_mhdr
), LPFC_SLI4_MBX_EMBED
);
19476 wr_object
= (struct lpfc_mbx_wr_object
*)&mbox
->u
.mqe
.un
.wr_object
;
19477 wr_object
->u
.request
.write_offset
= *offset
;
19478 sprintf((uint8_t *)wr_object
->u
.request
.object_name
, "/");
19479 wr_object
->u
.request
.object_name
[0] =
19480 cpu_to_le32(wr_object
->u
.request
.object_name
[0]);
19481 bf_set(lpfc_wr_object_eof
, &wr_object
->u
.request
, 0);
19482 list_for_each_entry(dmabuf
, dmabuf_list
, list
) {
19483 if (i
>= LPFC_MBX_WR_CONFIG_MAX_BDE
|| written
>= size
)
19485 wr_object
->u
.request
.bde
[i
].addrLow
= putPaddrLow(dmabuf
->phys
);
19486 wr_object
->u
.request
.bde
[i
].addrHigh
=
19487 putPaddrHigh(dmabuf
->phys
);
19488 if (written
+ SLI4_PAGE_SIZE
>= size
) {
19489 wr_object
->u
.request
.bde
[i
].tus
.f
.bdeSize
=
19491 written
+= (size
- written
);
19492 bf_set(lpfc_wr_object_eof
, &wr_object
->u
.request
, 1);
19493 bf_set(lpfc_wr_object_eas
, &wr_object
->u
.request
, 1);
19494 check_change_status
= true;
19496 wr_object
->u
.request
.bde
[i
].tus
.f
.bdeSize
=
19498 written
+= SLI4_PAGE_SIZE
;
19502 wr_object
->u
.request
.bde_count
= i
;
19503 bf_set(lpfc_wr_object_write_length
, &wr_object
->u
.request
, written
);
19504 if (!phba
->sli4_hba
.intr_enable
)
19505 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
19507 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
19508 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
19510 /* The IOCTL status is embedded in the mailbox subheader. */
19511 shdr_status
= bf_get(lpfc_mbox_hdr_status
,
19512 &wr_object
->header
.cfg_shdr
.response
);
19513 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
,
19514 &wr_object
->header
.cfg_shdr
.response
);
19515 if (check_change_status
) {
19516 shdr_change_status
= bf_get(lpfc_wr_object_change_status
,
19517 &wr_object
->u
.response
);
19519 if (shdr_change_status
== LPFC_CHANGE_STATUS_FW_RESET
||
19520 shdr_change_status
== LPFC_CHANGE_STATUS_PORT_MIGRATION
) {
19521 shdr_csf
= bf_get(lpfc_wr_object_csf
,
19522 &wr_object
->u
.response
);
19524 shdr_change_status
=
19525 LPFC_CHANGE_STATUS_PCI_RESET
;
19528 switch (shdr_change_status
) {
19529 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET
):
19530 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
19531 "3198 Firmware write complete: System "
19532 "reboot required to instantiate\n");
19534 case (LPFC_CHANGE_STATUS_FW_RESET
):
19535 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
19536 "3199 Firmware write complete: Firmware"
19537 " reset required to instantiate\n");
19539 case (LPFC_CHANGE_STATUS_PORT_MIGRATION
):
19540 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
19541 "3200 Firmware write complete: Port "
19542 "Migration or PCI Reset required to "
19545 case (LPFC_CHANGE_STATUS_PCI_RESET
):
19546 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
19547 "3201 Firmware write complete: PCI "
19548 "Reset required to instantiate\n");
19554 if (rc
!= MBX_TIMEOUT
)
19555 mempool_free(mbox
, phba
->mbox_mem_pool
);
19556 if (shdr_status
|| shdr_add_status
|| rc
) {
19557 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
19558 "3025 Write Object mailbox failed with "
19559 "status x%x add_status x%x, mbx status x%x\n",
19560 shdr_status
, shdr_add_status
, rc
);
19562 *offset
= shdr_add_status
;
19564 *offset
+= wr_object
->u
.response
.actual_write_length
;
19569 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
19570 * @vport: pointer to vport data structure.
19572 * This function iterate through the mailboxq and clean up all REG_LOGIN
19573 * and REG_VPI mailbox commands associated with the vport. This function
19574 * is called when driver want to restart discovery of the vport due to
19575 * a Clear Virtual Link event.
19578 lpfc_cleanup_pending_mbox(struct lpfc_vport
*vport
)
19580 struct lpfc_hba
*phba
= vport
->phba
;
19581 LPFC_MBOXQ_t
*mb
, *nextmb
;
19582 struct lpfc_dmabuf
*mp
;
19583 struct lpfc_nodelist
*ndlp
;
19584 struct lpfc_nodelist
*act_mbx_ndlp
= NULL
;
19585 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
19586 LIST_HEAD(mbox_cmd_list
);
19587 uint8_t restart_loop
;
19589 /* Clean up internally queued mailbox commands with the vport */
19590 spin_lock_irq(&phba
->hbalock
);
19591 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
19592 if (mb
->vport
!= vport
)
19595 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) &&
19596 (mb
->u
.mb
.mbxCommand
!= MBX_REG_VPI
))
19599 list_del(&mb
->list
);
19600 list_add_tail(&mb
->list
, &mbox_cmd_list
);
19602 /* Clean up active mailbox command with the vport */
19603 mb
= phba
->sli
.mbox_active
;
19604 if (mb
&& (mb
->vport
== vport
)) {
19605 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) ||
19606 (mb
->u
.mb
.mbxCommand
== MBX_REG_VPI
))
19607 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
19608 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
19609 act_mbx_ndlp
= (struct lpfc_nodelist
*)mb
->ctx_ndlp
;
19610 /* Put reference count for delayed processing */
19611 act_mbx_ndlp
= lpfc_nlp_get(act_mbx_ndlp
);
19612 /* Unregister the RPI when mailbox complete */
19613 mb
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
19616 /* Cleanup any mailbox completions which are not yet processed */
19619 list_for_each_entry(mb
, &phba
->sli
.mboxq_cmpl
, list
) {
19621 * If this mailox is already processed or it is
19622 * for another vport ignore it.
19624 if ((mb
->vport
!= vport
) ||
19625 (mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
))
19628 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) &&
19629 (mb
->u
.mb
.mbxCommand
!= MBX_REG_VPI
))
19632 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
19633 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
19634 ndlp
= (struct lpfc_nodelist
*)mb
->ctx_ndlp
;
19635 /* Unregister the RPI when mailbox complete */
19636 mb
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
19638 spin_unlock_irq(&phba
->hbalock
);
19639 spin_lock(shost
->host_lock
);
19640 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
19641 spin_unlock(shost
->host_lock
);
19642 spin_lock_irq(&phba
->hbalock
);
19646 } while (restart_loop
);
19648 spin_unlock_irq(&phba
->hbalock
);
19650 /* Release the cleaned-up mailbox commands */
19651 while (!list_empty(&mbox_cmd_list
)) {
19652 list_remove_head(&mbox_cmd_list
, mb
, LPFC_MBOXQ_t
, list
);
19653 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
19654 mp
= (struct lpfc_dmabuf
*)(mb
->ctx_buf
);
19656 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
19659 mb
->ctx_buf
= NULL
;
19660 ndlp
= (struct lpfc_nodelist
*)mb
->ctx_ndlp
;
19661 mb
->ctx_ndlp
= NULL
;
19663 spin_lock(shost
->host_lock
);
19664 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
19665 spin_unlock(shost
->host_lock
);
19666 lpfc_nlp_put(ndlp
);
19669 mempool_free(mb
, phba
->mbox_mem_pool
);
19672 /* Release the ndlp with the cleaned-up active mailbox command */
19673 if (act_mbx_ndlp
) {
19674 spin_lock(shost
->host_lock
);
19675 act_mbx_ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
19676 spin_unlock(shost
->host_lock
);
19677 lpfc_nlp_put(act_mbx_ndlp
);
19682 * lpfc_drain_txq - Drain the txq
19683 * @phba: Pointer to HBA context object.
19685 * This function attempt to submit IOCBs on the txq
19686 * to the adapter. For SLI4 adapters, the txq contains
19687 * ELS IOCBs that have been deferred because the there
19688 * are no SGLs. This congestion can occur with large
19689 * vport counts during node discovery.
19693 lpfc_drain_txq(struct lpfc_hba
*phba
)
19695 LIST_HEAD(completions
);
19696 struct lpfc_sli_ring
*pring
;
19697 struct lpfc_iocbq
*piocbq
= NULL
;
19698 unsigned long iflags
= 0;
19699 char *fail_msg
= NULL
;
19700 struct lpfc_sglq
*sglq
;
19701 union lpfc_wqe128 wqe
;
19702 uint32_t txq_cnt
= 0;
19703 struct lpfc_queue
*wq
;
19705 if (phba
->link_flag
& LS_MDS_LOOPBACK
) {
19706 /* MDS WQE are posted only to first WQ*/
19707 wq
= phba
->sli4_hba
.hdwq
[0].io_wq
;
19712 wq
= phba
->sli4_hba
.els_wq
;
19715 pring
= lpfc_phba_elsring(phba
);
19718 if (unlikely(!pring
) || list_empty(&pring
->txq
))
19721 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
19722 list_for_each_entry(piocbq
, &pring
->txq
, list
) {
19726 if (txq_cnt
> pring
->txq_max
)
19727 pring
->txq_max
= txq_cnt
;
19729 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
19731 while (!list_empty(&pring
->txq
)) {
19732 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
19734 piocbq
= lpfc_sli_ringtx_get(phba
, pring
);
19736 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
19737 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
19738 "2823 txq empty and txq_cnt is %d\n ",
19742 sglq
= __lpfc_sli_get_els_sglq(phba
, piocbq
);
19744 __lpfc_sli_ringtx_put(phba
, pring
, piocbq
);
19745 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
19750 /* The xri and iocb resources secured,
19751 * attempt to issue request
19753 piocbq
->sli4_lxritag
= sglq
->sli4_lxritag
;
19754 piocbq
->sli4_xritag
= sglq
->sli4_xritag
;
19755 if (NO_XRI
== lpfc_sli4_bpl2sgl(phba
, piocbq
, sglq
))
19756 fail_msg
= "to convert bpl to sgl";
19757 else if (lpfc_sli4_iocb2wqe(phba
, piocbq
, &wqe
))
19758 fail_msg
= "to convert iocb to wqe";
19759 else if (lpfc_sli4_wq_put(wq
, &wqe
))
19760 fail_msg
= " - Wq is full";
19762 lpfc_sli_ringtxcmpl_put(phba
, pring
, piocbq
);
19765 /* Failed means we can't issue and need to cancel */
19766 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
19767 "2822 IOCB failed %s iotag 0x%x "
19770 piocbq
->iotag
, piocbq
->sli4_xritag
);
19771 list_add_tail(&piocbq
->list
, &completions
);
19773 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
19776 /* Cancel all the IOCBs that cannot be issued */
19777 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
19778 IOERR_SLI_ABORTED
);
19784 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
19785 * @phba: Pointer to HBA context object.
19786 * @pwqe: Pointer to command WQE.
19787 * @sglq: Pointer to the scatter gather queue object.
19789 * This routine converts the bpl or bde that is in the WQE
19790 * to a sgl list for the sli4 hardware. The physical address
19791 * of the bpl/bde is converted back to a virtual address.
19792 * If the WQE contains a BPL then the list of BDE's is
19793 * converted to sli4_sge's. If the WQE contains a single
19794 * BDE then it is converted to a single sli_sge.
19795 * The WQE is still in cpu endianness so the contents of
19796 * the bpl can be used without byte swapping.
19798 * Returns valid XRI = Success, NO_XRI = Failure.
19801 lpfc_wqe_bpl2sgl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*pwqeq
,
19802 struct lpfc_sglq
*sglq
)
19804 uint16_t xritag
= NO_XRI
;
19805 struct ulp_bde64
*bpl
= NULL
;
19806 struct ulp_bde64 bde
;
19807 struct sli4_sge
*sgl
= NULL
;
19808 struct lpfc_dmabuf
*dmabuf
;
19809 union lpfc_wqe128
*wqe
;
19812 uint32_t offset
= 0; /* accumulated offset in the sg request list */
19813 int inbound
= 0; /* number of sg reply entries inbound from firmware */
19816 if (!pwqeq
|| !sglq
)
19819 sgl
= (struct sli4_sge
*)sglq
->sgl
;
19821 pwqeq
->iocb
.ulpIoTag
= pwqeq
->iotag
;
19823 cmd
= bf_get(wqe_cmnd
, &wqe
->generic
.wqe_com
);
19824 if (cmd
== CMD_XMIT_BLS_RSP64_WQE
)
19825 return sglq
->sli4_xritag
;
19826 numBdes
= pwqeq
->rsvd2
;
19828 /* The addrHigh and addrLow fields within the WQE
19829 * have not been byteswapped yet so there is no
19830 * need to swap them back.
19832 if (pwqeq
->context3
)
19833 dmabuf
= (struct lpfc_dmabuf
*)pwqeq
->context3
;
19837 bpl
= (struct ulp_bde64
*)dmabuf
->virt
;
19841 for (i
= 0; i
< numBdes
; i
++) {
19842 /* Should already be byte swapped. */
19843 sgl
->addr_hi
= bpl
->addrHigh
;
19844 sgl
->addr_lo
= bpl
->addrLow
;
19846 sgl
->word2
= le32_to_cpu(sgl
->word2
);
19847 if ((i
+1) == numBdes
)
19848 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
19850 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
19851 /* swap the size field back to the cpu so we
19852 * can assign it to the sgl.
19854 bde
.tus
.w
= le32_to_cpu(bpl
->tus
.w
);
19855 sgl
->sge_len
= cpu_to_le32(bde
.tus
.f
.bdeSize
);
19856 /* The offsets in the sgl need to be accumulated
19857 * separately for the request and reply lists.
19858 * The request is always first, the reply follows.
19861 case CMD_GEN_REQUEST64_WQE
:
19862 /* add up the reply sg entries */
19863 if (bpl
->tus
.f
.bdeFlags
== BUFF_TYPE_BDE_64I
)
19865 /* first inbound? reset the offset */
19868 bf_set(lpfc_sli4_sge_offset
, sgl
, offset
);
19869 bf_set(lpfc_sli4_sge_type
, sgl
,
19870 LPFC_SGE_TYPE_DATA
);
19871 offset
+= bde
.tus
.f
.bdeSize
;
19873 case CMD_FCP_TRSP64_WQE
:
19874 bf_set(lpfc_sli4_sge_offset
, sgl
, 0);
19875 bf_set(lpfc_sli4_sge_type
, sgl
,
19876 LPFC_SGE_TYPE_DATA
);
19878 case CMD_FCP_TSEND64_WQE
:
19879 case CMD_FCP_TRECEIVE64_WQE
:
19880 bf_set(lpfc_sli4_sge_type
, sgl
,
19881 bpl
->tus
.f
.bdeFlags
);
19885 offset
+= bde
.tus
.f
.bdeSize
;
19886 bf_set(lpfc_sli4_sge_offset
, sgl
, offset
);
19889 sgl
->word2
= cpu_to_le32(sgl
->word2
);
19893 } else if (wqe
->gen_req
.bde
.tus
.f
.bdeFlags
== BUFF_TYPE_BDE_64
) {
19894 /* The addrHigh and addrLow fields of the BDE have not
19895 * been byteswapped yet so they need to be swapped
19896 * before putting them in the sgl.
19898 sgl
->addr_hi
= cpu_to_le32(wqe
->gen_req
.bde
.addrHigh
);
19899 sgl
->addr_lo
= cpu_to_le32(wqe
->gen_req
.bde
.addrLow
);
19900 sgl
->word2
= le32_to_cpu(sgl
->word2
);
19901 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
19902 sgl
->word2
= cpu_to_le32(sgl
->word2
);
19903 sgl
->sge_len
= cpu_to_le32(wqe
->gen_req
.bde
.tus
.f
.bdeSize
);
19905 return sglq
->sli4_xritag
;
19909 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
19910 * @phba: Pointer to HBA context object.
19911 * @ring_number: Base sli ring number
19912 * @pwqe: Pointer to command WQE.
19915 lpfc_sli4_issue_wqe(struct lpfc_hba
*phba
, struct lpfc_sli4_hdw_queue
*qp
,
19916 struct lpfc_iocbq
*pwqe
)
19918 union lpfc_wqe128
*wqe
= &pwqe
->wqe
;
19919 struct lpfc_nvmet_rcv_ctx
*ctxp
;
19920 struct lpfc_queue
*wq
;
19921 struct lpfc_sglq
*sglq
;
19922 struct lpfc_sli_ring
*pring
;
19923 unsigned long iflags
;
19926 /* NVME_LS and NVME_LS ABTS requests. */
19927 if (pwqe
->iocb_flag
& LPFC_IO_NVME_LS
) {
19928 pring
= phba
->sli4_hba
.nvmels_wq
->pring
;
19929 lpfc_qp_spin_lock_irqsave(&pring
->ring_lock
, iflags
,
19931 sglq
= __lpfc_sli_get_els_sglq(phba
, pwqe
);
19933 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
19936 pwqe
->sli4_lxritag
= sglq
->sli4_lxritag
;
19937 pwqe
->sli4_xritag
= sglq
->sli4_xritag
;
19938 if (lpfc_wqe_bpl2sgl(phba
, pwqe
, sglq
) == NO_XRI
) {
19939 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
19942 bf_set(wqe_xri_tag
, &pwqe
->wqe
.xmit_bls_rsp
.wqe_com
,
19943 pwqe
->sli4_xritag
);
19944 ret
= lpfc_sli4_wq_put(phba
->sli4_hba
.nvmels_wq
, wqe
);
19946 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
19950 lpfc_sli_ringtxcmpl_put(phba
, pring
, pwqe
);
19951 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
19953 lpfc_sli4_poll_eq(qp
->hba_eq
, LPFC_POLL_FASTPATH
);
19957 /* NVME_FCREQ and NVME_ABTS requests */
19958 if (pwqe
->iocb_flag
& LPFC_IO_NVME
) {
19959 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19963 bf_set(wqe_cqid
, &wqe
->generic
.wqe_com
, qp
->io_cq_map
);
19965 lpfc_qp_spin_lock_irqsave(&pring
->ring_lock
, iflags
,
19967 ret
= lpfc_sli4_wq_put(wq
, wqe
);
19969 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
19972 lpfc_sli_ringtxcmpl_put(phba
, pring
, pwqe
);
19973 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
19975 lpfc_sli4_poll_eq(qp
->hba_eq
, LPFC_POLL_FASTPATH
);
19979 /* NVMET requests */
19980 if (pwqe
->iocb_flag
& LPFC_IO_NVMET
) {
19981 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
19985 ctxp
= pwqe
->context2
;
19986 sglq
= ctxp
->ctxbuf
->sglq
;
19987 if (pwqe
->sli4_xritag
== NO_XRI
) {
19988 pwqe
->sli4_lxritag
= sglq
->sli4_lxritag
;
19989 pwqe
->sli4_xritag
= sglq
->sli4_xritag
;
19991 bf_set(wqe_xri_tag
, &pwqe
->wqe
.xmit_bls_rsp
.wqe_com
,
19992 pwqe
->sli4_xritag
);
19993 bf_set(wqe_cqid
, &wqe
->generic
.wqe_com
, qp
->io_cq_map
);
19995 lpfc_qp_spin_lock_irqsave(&pring
->ring_lock
, iflags
,
19997 ret
= lpfc_sli4_wq_put(wq
, wqe
);
19999 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
20002 lpfc_sli_ringtxcmpl_put(phba
, pring
, pwqe
);
20003 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
20005 lpfc_sli4_poll_eq(qp
->hba_eq
, LPFC_POLL_FASTPATH
);
20011 #ifdef LPFC_MXP_STAT
20013 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
20014 * @phba: pointer to lpfc hba data structure.
20015 * @hwqid: belong to which HWQ.
20017 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
20018 * 15 seconds after a test case is running.
20020 * The user should call lpfc_debugfs_multixripools_write before running a test
20021 * case to clear stat_snapshot_taken. Then the user starts a test case. During
20022 * test case is running, stat_snapshot_taken is incremented by 1 every time when
20023 * this routine is called from heartbeat timer. When stat_snapshot_taken is
20024 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
20026 void lpfc_snapshot_mxp(struct lpfc_hba
*phba
, u32 hwqid
)
20028 struct lpfc_sli4_hdw_queue
*qp
;
20029 struct lpfc_multixri_pool
*multixri_pool
;
20030 struct lpfc_pvt_pool
*pvt_pool
;
20031 struct lpfc_pbl_pool
*pbl_pool
;
20034 qp
= &phba
->sli4_hba
.hdwq
[hwqid
];
20035 multixri_pool
= qp
->p_multixri_pool
;
20036 if (!multixri_pool
)
20039 if (multixri_pool
->stat_snapshot_taken
== LPFC_MXP_SNAPSHOT_TAKEN
) {
20040 pvt_pool
= &qp
->p_multixri_pool
->pvt_pool
;
20041 pbl_pool
= &qp
->p_multixri_pool
->pbl_pool
;
20042 txcmplq_cnt
= qp
->io_wq
->pring
->txcmplq_cnt
;
20044 multixri_pool
->stat_pbl_count
= pbl_pool
->count
;
20045 multixri_pool
->stat_pvt_count
= pvt_pool
->count
;
20046 multixri_pool
->stat_busy_count
= txcmplq_cnt
;
20049 multixri_pool
->stat_snapshot_taken
++;
20054 * lpfc_adjust_pvt_pool_count - Adjust private pool count
20055 * @phba: pointer to lpfc hba data structure.
20056 * @hwqid: belong to which HWQ.
20058 * This routine moves some XRIs from private to public pool when private pool
20061 void lpfc_adjust_pvt_pool_count(struct lpfc_hba
*phba
, u32 hwqid
)
20063 struct lpfc_multixri_pool
*multixri_pool
;
20065 u32 prev_io_req_count
;
20067 multixri_pool
= phba
->sli4_hba
.hdwq
[hwqid
].p_multixri_pool
;
20068 if (!multixri_pool
)
20070 io_req_count
= multixri_pool
->io_req_count
;
20071 prev_io_req_count
= multixri_pool
->prev_io_req_count
;
20073 if (prev_io_req_count
!= io_req_count
) {
20074 /* Private pool is busy */
20075 multixri_pool
->prev_io_req_count
= io_req_count
;
20077 /* Private pool is not busy.
20078 * Move XRIs from private to public pool.
20080 lpfc_move_xri_pvt_to_pbl(phba
, hwqid
);
20085 * lpfc_adjust_high_watermark - Adjust high watermark
20086 * @phba: pointer to lpfc hba data structure.
20087 * @hwqid: belong to which HWQ.
20089 * This routine sets high watermark as number of outstanding XRIs,
20090 * but make sure the new value is between xri_limit/2 and xri_limit.
20092 void lpfc_adjust_high_watermark(struct lpfc_hba
*phba
, u32 hwqid
)
20100 struct lpfc_multixri_pool
*multixri_pool
;
20101 struct lpfc_sli4_hdw_queue
*qp
;
20103 qp
= &phba
->sli4_hba
.hdwq
[hwqid
];
20104 multixri_pool
= qp
->p_multixri_pool
;
20105 if (!multixri_pool
)
20107 xri_limit
= multixri_pool
->xri_limit
;
20109 watermark_max
= xri_limit
;
20110 watermark_min
= xri_limit
/ 2;
20112 txcmplq_cnt
= qp
->io_wq
->pring
->txcmplq_cnt
;
20113 abts_io_bufs
= qp
->abts_scsi_io_bufs
;
20114 abts_io_bufs
+= qp
->abts_nvme_io_bufs
;
20116 new_watermark
= txcmplq_cnt
+ abts_io_bufs
;
20117 new_watermark
= min(watermark_max
, new_watermark
);
20118 new_watermark
= max(watermark_min
, new_watermark
);
20119 multixri_pool
->pvt_pool
.high_watermark
= new_watermark
;
20121 #ifdef LPFC_MXP_STAT
20122 multixri_pool
->stat_max_hwm
= max(multixri_pool
->stat_max_hwm
,
20128 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
20129 * @phba: pointer to lpfc hba data structure.
20130 * @hwqid: belong to which HWQ.
20132 * This routine is called from hearbeat timer when pvt_pool is idle.
20133 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
20134 * The first step moves (all - low_watermark) amount of XRIs.
20135 * The second step moves the rest of XRIs.
20137 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba
*phba
, u32 hwqid
)
20139 struct lpfc_pbl_pool
*pbl_pool
;
20140 struct lpfc_pvt_pool
*pvt_pool
;
20141 struct lpfc_sli4_hdw_queue
*qp
;
20142 struct lpfc_io_buf
*lpfc_ncmd
;
20143 struct lpfc_io_buf
*lpfc_ncmd_next
;
20144 unsigned long iflag
;
20145 struct list_head tmp_list
;
20148 qp
= &phba
->sli4_hba
.hdwq
[hwqid
];
20149 pbl_pool
= &qp
->p_multixri_pool
->pbl_pool
;
20150 pvt_pool
= &qp
->p_multixri_pool
->pvt_pool
;
20153 lpfc_qp_spin_lock_irqsave(&pbl_pool
->lock
, iflag
, qp
, mv_to_pub_pool
);
20154 lpfc_qp_spin_lock(&pvt_pool
->lock
, qp
, mv_from_pvt_pool
);
20156 if (pvt_pool
->count
> pvt_pool
->low_watermark
) {
20157 /* Step 1: move (all - low_watermark) from pvt_pool
20161 /* Move low watermark of bufs from pvt_pool to tmp_list */
20162 INIT_LIST_HEAD(&tmp_list
);
20163 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
20164 &pvt_pool
->list
, list
) {
20165 list_move_tail(&lpfc_ncmd
->list
, &tmp_list
);
20167 if (tmp_count
>= pvt_pool
->low_watermark
)
20171 /* Move all bufs from pvt_pool to pbl_pool */
20172 list_splice_init(&pvt_pool
->list
, &pbl_pool
->list
);
20174 /* Move all bufs from tmp_list to pvt_pool */
20175 list_splice(&tmp_list
, &pvt_pool
->list
);
20177 pbl_pool
->count
+= (pvt_pool
->count
- tmp_count
);
20178 pvt_pool
->count
= tmp_count
;
20180 /* Step 2: move the rest from pvt_pool to pbl_pool */
20181 list_splice_init(&pvt_pool
->list
, &pbl_pool
->list
);
20182 pbl_pool
->count
+= pvt_pool
->count
;
20183 pvt_pool
->count
= 0;
20186 spin_unlock(&pvt_pool
->lock
);
20187 spin_unlock_irqrestore(&pbl_pool
->lock
, iflag
);
20191 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20192 * @phba: pointer to lpfc hba data structure
20193 * @pbl_pool: specified public free XRI pool
20194 * @pvt_pool: specified private free XRI pool
20195 * @count: number of XRIs to move
20197 * This routine tries to move some free common bufs from the specified pbl_pool
20198 * to the specified pvt_pool. It might move less than count XRIs if there's not
20199 * enough in public pool.
20202 * true - if XRIs are successfully moved from the specified pbl_pool to the
20203 * specified pvt_pool
20204 * false - if the specified pbl_pool is empty or locked by someone else
20207 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba
*phba
, struct lpfc_sli4_hdw_queue
*qp
,
20208 struct lpfc_pbl_pool
*pbl_pool
,
20209 struct lpfc_pvt_pool
*pvt_pool
, u32 count
)
20211 struct lpfc_io_buf
*lpfc_ncmd
;
20212 struct lpfc_io_buf
*lpfc_ncmd_next
;
20213 unsigned long iflag
;
20216 ret
= spin_trylock_irqsave(&pbl_pool
->lock
, iflag
);
20218 if (pbl_pool
->count
) {
20219 /* Move a batch of XRIs from public to private pool */
20220 lpfc_qp_spin_lock(&pvt_pool
->lock
, qp
, mv_to_pvt_pool
);
20221 list_for_each_entry_safe(lpfc_ncmd
,
20225 list_move_tail(&lpfc_ncmd
->list
,
20234 spin_unlock(&pvt_pool
->lock
);
20235 spin_unlock_irqrestore(&pbl_pool
->lock
, iflag
);
20238 spin_unlock_irqrestore(&pbl_pool
->lock
, iflag
);
20245 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20246 * @phba: pointer to lpfc hba data structure.
20247 * @hwqid: belong to which HWQ.
20248 * @count: number of XRIs to move
20250 * This routine tries to find some free common bufs in one of public pools with
20251 * Round Robin method. The search always starts from local hwqid, then the next
20252 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20253 * a batch of free common bufs are moved to private pool on hwqid.
20254 * It might move less than count XRIs if there's not enough in public pool.
20256 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba
*phba
, u32 hwqid
, u32 count
)
20258 struct lpfc_multixri_pool
*multixri_pool
;
20259 struct lpfc_multixri_pool
*next_multixri_pool
;
20260 struct lpfc_pvt_pool
*pvt_pool
;
20261 struct lpfc_pbl_pool
*pbl_pool
;
20262 struct lpfc_sli4_hdw_queue
*qp
;
20267 qp
= &phba
->sli4_hba
.hdwq
[hwqid
];
20268 multixri_pool
= qp
->p_multixri_pool
;
20269 pvt_pool
= &multixri_pool
->pvt_pool
;
20270 pbl_pool
= &multixri_pool
->pbl_pool
;
20272 /* Check if local pbl_pool is available */
20273 ret
= _lpfc_move_xri_pbl_to_pvt(phba
, qp
, pbl_pool
, pvt_pool
, count
);
20275 #ifdef LPFC_MXP_STAT
20276 multixri_pool
->local_pbl_hit_count
++;
20281 hwq_count
= phba
->cfg_hdw_queue
;
20283 /* Get the next hwqid which was found last time */
20284 next_hwqid
= multixri_pool
->rrb_next_hwqid
;
20287 /* Go to next hwq */
20288 next_hwqid
= (next_hwqid
+ 1) % hwq_count
;
20290 next_multixri_pool
=
20291 phba
->sli4_hba
.hdwq
[next_hwqid
].p_multixri_pool
;
20292 pbl_pool
= &next_multixri_pool
->pbl_pool
;
20294 /* Check if the public free xri pool is available */
20295 ret
= _lpfc_move_xri_pbl_to_pvt(
20296 phba
, qp
, pbl_pool
, pvt_pool
, count
);
20298 /* Exit while-loop if success or all hwqid are checked */
20299 } while (!ret
&& next_hwqid
!= multixri_pool
->rrb_next_hwqid
);
20301 /* Starting point for the next time */
20302 multixri_pool
->rrb_next_hwqid
= next_hwqid
;
20305 /* stats: all public pools are empty*/
20306 multixri_pool
->pbl_empty_count
++;
20309 #ifdef LPFC_MXP_STAT
20311 if (next_hwqid
== hwqid
)
20312 multixri_pool
->local_pbl_hit_count
++;
20314 multixri_pool
->other_pbl_hit_count
++;
20320 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20321 * @phba: pointer to lpfc hba data structure.
20322 * @qp: belong to which HWQ.
20324 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20327 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba
*phba
, u32 hwqid
)
20329 struct lpfc_multixri_pool
*multixri_pool
;
20330 struct lpfc_pvt_pool
*pvt_pool
;
20332 multixri_pool
= phba
->sli4_hba
.hdwq
[hwqid
].p_multixri_pool
;
20333 pvt_pool
= &multixri_pool
->pvt_pool
;
20335 if (pvt_pool
->count
< pvt_pool
->low_watermark
)
20336 lpfc_move_xri_pbl_to_pvt(phba
, hwqid
, XRI_BATCH
);
20340 * lpfc_release_io_buf - Return one IO buf back to free pool
20341 * @phba: pointer to lpfc hba data structure.
20342 * @lpfc_ncmd: IO buf to be returned.
20343 * @qp: belong to which HWQ.
20345 * This routine returns one IO buf back to free pool. If this is an urgent IO,
20346 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20347 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20348 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
20349 * lpfc_io_buf_list_put.
20351 void lpfc_release_io_buf(struct lpfc_hba
*phba
, struct lpfc_io_buf
*lpfc_ncmd
,
20352 struct lpfc_sli4_hdw_queue
*qp
)
20354 unsigned long iflag
;
20355 struct lpfc_pbl_pool
*pbl_pool
;
20356 struct lpfc_pvt_pool
*pvt_pool
;
20357 struct lpfc_epd_pool
*epd_pool
;
20363 /* MUST zero fields if buffer is reused by another protocol */
20364 lpfc_ncmd
->nvmeCmd
= NULL
;
20365 lpfc_ncmd
->cur_iocbq
.wqe_cmpl
= NULL
;
20366 lpfc_ncmd
->cur_iocbq
.iocb_cmpl
= NULL
;
20368 if (phba
->cfg_xpsgl
&& !phba
->nvmet_support
&&
20369 !list_empty(&lpfc_ncmd
->dma_sgl_xtra_list
))
20370 lpfc_put_sgl_per_hdwq(phba
, lpfc_ncmd
);
20372 if (!list_empty(&lpfc_ncmd
->dma_cmd_rsp_list
))
20373 lpfc_put_cmd_rsp_buf_per_hdwq(phba
, lpfc_ncmd
);
20375 if (phba
->cfg_xri_rebalancing
) {
20376 if (lpfc_ncmd
->expedite
) {
20377 /* Return to expedite pool */
20378 epd_pool
= &phba
->epd_pool
;
20379 spin_lock_irqsave(&epd_pool
->lock
, iflag
);
20380 list_add_tail(&lpfc_ncmd
->list
, &epd_pool
->list
);
20382 spin_unlock_irqrestore(&epd_pool
->lock
, iflag
);
20386 /* Avoid invalid access if an IO sneaks in and is being rejected
20387 * just _after_ xri pools are destroyed in lpfc_offline.
20388 * Nothing much can be done at this point.
20390 if (!qp
->p_multixri_pool
)
20393 pbl_pool
= &qp
->p_multixri_pool
->pbl_pool
;
20394 pvt_pool
= &qp
->p_multixri_pool
->pvt_pool
;
20396 txcmplq_cnt
= qp
->io_wq
->pring
->txcmplq_cnt
;
20397 abts_io_bufs
= qp
->abts_scsi_io_bufs
;
20398 abts_io_bufs
+= qp
->abts_nvme_io_bufs
;
20400 xri_owned
= pvt_pool
->count
+ txcmplq_cnt
+ abts_io_bufs
;
20401 xri_limit
= qp
->p_multixri_pool
->xri_limit
;
20403 #ifdef LPFC_MXP_STAT
20404 if (xri_owned
<= xri_limit
)
20405 qp
->p_multixri_pool
->below_limit_count
++;
20407 qp
->p_multixri_pool
->above_limit_count
++;
20410 /* XRI goes to either public or private free xri pool
20411 * based on watermark and xri_limit
20413 if ((pvt_pool
->count
< pvt_pool
->low_watermark
) ||
20414 (xri_owned
< xri_limit
&&
20415 pvt_pool
->count
< pvt_pool
->high_watermark
)) {
20416 lpfc_qp_spin_lock_irqsave(&pvt_pool
->lock
, iflag
,
20417 qp
, free_pvt_pool
);
20418 list_add_tail(&lpfc_ncmd
->list
,
20421 spin_unlock_irqrestore(&pvt_pool
->lock
, iflag
);
20423 lpfc_qp_spin_lock_irqsave(&pbl_pool
->lock
, iflag
,
20424 qp
, free_pub_pool
);
20425 list_add_tail(&lpfc_ncmd
->list
,
20428 spin_unlock_irqrestore(&pbl_pool
->lock
, iflag
);
20431 lpfc_qp_spin_lock_irqsave(&qp
->io_buf_list_put_lock
, iflag
,
20433 list_add_tail(&lpfc_ncmd
->list
,
20434 &qp
->lpfc_io_buf_list_put
);
20436 spin_unlock_irqrestore(&qp
->io_buf_list_put_lock
,
20442 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
20443 * @phba: pointer to lpfc hba data structure.
20444 * @pvt_pool: pointer to private pool data structure.
20445 * @ndlp: pointer to lpfc nodelist data structure.
20447 * This routine tries to get one free IO buf from private pool.
20450 * pointer to one free IO buf - if private pool is not empty
20451 * NULL - if private pool is empty
20453 static struct lpfc_io_buf
*
20454 lpfc_get_io_buf_from_private_pool(struct lpfc_hba
*phba
,
20455 struct lpfc_sli4_hdw_queue
*qp
,
20456 struct lpfc_pvt_pool
*pvt_pool
,
20457 struct lpfc_nodelist
*ndlp
)
20459 struct lpfc_io_buf
*lpfc_ncmd
;
20460 struct lpfc_io_buf
*lpfc_ncmd_next
;
20461 unsigned long iflag
;
20463 lpfc_qp_spin_lock_irqsave(&pvt_pool
->lock
, iflag
, qp
, alloc_pvt_pool
);
20464 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
20465 &pvt_pool
->list
, list
) {
20466 if (lpfc_test_rrq_active(
20467 phba
, ndlp
, lpfc_ncmd
->cur_iocbq
.sli4_lxritag
))
20469 list_del(&lpfc_ncmd
->list
);
20471 spin_unlock_irqrestore(&pvt_pool
->lock
, iflag
);
20474 spin_unlock_irqrestore(&pvt_pool
->lock
, iflag
);
20480 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
20481 * @phba: pointer to lpfc hba data structure.
20483 * This routine tries to get one free IO buf from expedite pool.
20486 * pointer to one free IO buf - if expedite pool is not empty
20487 * NULL - if expedite pool is empty
20489 static struct lpfc_io_buf
*
20490 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba
*phba
)
20492 struct lpfc_io_buf
*lpfc_ncmd
;
20493 struct lpfc_io_buf
*lpfc_ncmd_next
;
20494 unsigned long iflag
;
20495 struct lpfc_epd_pool
*epd_pool
;
20497 epd_pool
= &phba
->epd_pool
;
20500 spin_lock_irqsave(&epd_pool
->lock
, iflag
);
20501 if (epd_pool
->count
> 0) {
20502 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
20503 &epd_pool
->list
, list
) {
20504 list_del(&lpfc_ncmd
->list
);
20509 spin_unlock_irqrestore(&epd_pool
->lock
, iflag
);
20515 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
20516 * @phba: pointer to lpfc hba data structure.
20517 * @ndlp: pointer to lpfc nodelist data structure.
20518 * @hwqid: belong to which HWQ
20519 * @expedite: 1 means this request is urgent.
20521 * This routine will do the following actions and then return a pointer to
20524 * 1. If private free xri count is empty, move some XRIs from public to
20526 * 2. Get one XRI from private free xri pool.
20527 * 3. If we fail to get one from pvt_pool and this is an expedite request,
20528 * get one free xri from expedite pool.
20530 * Note: ndlp is only used on SCSI side for RRQ testing.
20531 * The caller should pass NULL for ndlp on NVME side.
20534 * pointer to one free IO buf - if private pool is not empty
20535 * NULL - if private pool is empty
20537 static struct lpfc_io_buf
*
20538 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba
*phba
,
20539 struct lpfc_nodelist
*ndlp
,
20540 int hwqid
, int expedite
)
20542 struct lpfc_sli4_hdw_queue
*qp
;
20543 struct lpfc_multixri_pool
*multixri_pool
;
20544 struct lpfc_pvt_pool
*pvt_pool
;
20545 struct lpfc_io_buf
*lpfc_ncmd
;
20547 qp
= &phba
->sli4_hba
.hdwq
[hwqid
];
20549 multixri_pool
= qp
->p_multixri_pool
;
20550 pvt_pool
= &multixri_pool
->pvt_pool
;
20551 multixri_pool
->io_req_count
++;
20553 /* If pvt_pool is empty, move some XRIs from public to private pool */
20554 if (pvt_pool
->count
== 0)
20555 lpfc_move_xri_pbl_to_pvt(phba
, hwqid
, XRI_BATCH
);
20557 /* Get one XRI from private free xri pool */
20558 lpfc_ncmd
= lpfc_get_io_buf_from_private_pool(phba
, qp
, pvt_pool
, ndlp
);
20561 lpfc_ncmd
->hdwq
= qp
;
20562 lpfc_ncmd
->hdwq_no
= hwqid
;
20563 } else if (expedite
) {
20564 /* If we fail to get one from pvt_pool and this is an expedite
20565 * request, get one free xri from expedite pool.
20567 lpfc_ncmd
= lpfc_get_io_buf_from_expedite_pool(phba
);
20573 static inline struct lpfc_io_buf
*
20574 lpfc_io_buf(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
, int idx
)
20576 struct lpfc_sli4_hdw_queue
*qp
;
20577 struct lpfc_io_buf
*lpfc_cmd
, *lpfc_cmd_next
;
20579 qp
= &phba
->sli4_hba
.hdwq
[idx
];
20580 list_for_each_entry_safe(lpfc_cmd
, lpfc_cmd_next
,
20581 &qp
->lpfc_io_buf_list_get
, list
) {
20582 if (lpfc_test_rrq_active(phba
, ndlp
,
20583 lpfc_cmd
->cur_iocbq
.sli4_lxritag
))
20586 if (lpfc_cmd
->flags
& LPFC_SBUF_NOT_POSTED
)
20589 list_del_init(&lpfc_cmd
->list
);
20591 lpfc_cmd
->hdwq
= qp
;
20592 lpfc_cmd
->hdwq_no
= idx
;
20599 * lpfc_get_io_buf - Get one IO buffer from free pool
20600 * @phba: The HBA for which this call is being executed.
20601 * @ndlp: pointer to lpfc nodelist data structure.
20602 * @hwqid: belong to which HWQ
20603 * @expedite: 1 means this request is urgent.
20605 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
20606 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
20607 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
20609 * Note: ndlp is only used on SCSI side for RRQ testing.
20610 * The caller should pass NULL for ndlp on NVME side.
20614 * Pointer to lpfc_io_buf - Success
20616 struct lpfc_io_buf
*lpfc_get_io_buf(struct lpfc_hba
*phba
,
20617 struct lpfc_nodelist
*ndlp
,
20618 u32 hwqid
, int expedite
)
20620 struct lpfc_sli4_hdw_queue
*qp
;
20621 unsigned long iflag
;
20622 struct lpfc_io_buf
*lpfc_cmd
;
20624 qp
= &phba
->sli4_hba
.hdwq
[hwqid
];
20627 if (phba
->cfg_xri_rebalancing
)
20628 lpfc_cmd
= lpfc_get_io_buf_from_multixri_pools(
20629 phba
, ndlp
, hwqid
, expedite
);
20631 lpfc_qp_spin_lock_irqsave(&qp
->io_buf_list_get_lock
, iflag
,
20632 qp
, alloc_xri_get
);
20633 if (qp
->get_io_bufs
> LPFC_NVME_EXPEDITE_XRICNT
|| expedite
)
20634 lpfc_cmd
= lpfc_io_buf(phba
, ndlp
, hwqid
);
20636 lpfc_qp_spin_lock(&qp
->io_buf_list_put_lock
,
20637 qp
, alloc_xri_put
);
20638 list_splice(&qp
->lpfc_io_buf_list_put
,
20639 &qp
->lpfc_io_buf_list_get
);
20640 qp
->get_io_bufs
+= qp
->put_io_bufs
;
20641 INIT_LIST_HEAD(&qp
->lpfc_io_buf_list_put
);
20642 qp
->put_io_bufs
= 0;
20643 spin_unlock(&qp
->io_buf_list_put_lock
);
20644 if (qp
->get_io_bufs
> LPFC_NVME_EXPEDITE_XRICNT
||
20646 lpfc_cmd
= lpfc_io_buf(phba
, ndlp
, hwqid
);
20648 spin_unlock_irqrestore(&qp
->io_buf_list_get_lock
, iflag
);
20655 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
20656 * @phba: The HBA for which this call is being executed.
20657 * @lpfc_buf: IO buf structure to append the SGL chunk
20659 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
20660 * and will allocate an SGL chunk if the pool is empty.
20664 * Pointer to sli4_hybrid_sgl - Success
20666 struct sli4_hybrid_sgl
*
20667 lpfc_get_sgl_per_hdwq(struct lpfc_hba
*phba
, struct lpfc_io_buf
*lpfc_buf
)
20669 struct sli4_hybrid_sgl
*list_entry
= NULL
;
20670 struct sli4_hybrid_sgl
*tmp
= NULL
;
20671 struct sli4_hybrid_sgl
*allocated_sgl
= NULL
;
20672 struct lpfc_sli4_hdw_queue
*hdwq
= lpfc_buf
->hdwq
;
20673 struct list_head
*buf_list
= &hdwq
->sgl_list
;
20674 unsigned long iflags
;
20676 spin_lock_irqsave(&hdwq
->hdwq_lock
, iflags
);
20678 if (likely(!list_empty(buf_list
))) {
20679 /* break off 1 chunk from the sgl_list */
20680 list_for_each_entry_safe(list_entry
, tmp
,
20681 buf_list
, list_node
) {
20682 list_move_tail(&list_entry
->list_node
,
20683 &lpfc_buf
->dma_sgl_xtra_list
);
20687 /* allocate more */
20688 spin_unlock_irqrestore(&hdwq
->hdwq_lock
, iflags
);
20689 tmp
= kmalloc_node(sizeof(*tmp
), GFP_ATOMIC
,
20690 cpu_to_node(hdwq
->io_wq
->chann
));
20692 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
20693 "8353 error kmalloc memory for HDWQ "
20695 lpfc_buf
->hdwq_no
, __func__
);
20699 tmp
->dma_sgl
= dma_pool_alloc(phba
->lpfc_sg_dma_buf_pool
,
20700 GFP_ATOMIC
, &tmp
->dma_phys_sgl
);
20701 if (!tmp
->dma_sgl
) {
20702 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
20703 "8354 error pool_alloc memory for HDWQ "
20705 lpfc_buf
->hdwq_no
, __func__
);
20710 spin_lock_irqsave(&hdwq
->hdwq_lock
, iflags
);
20711 list_add_tail(&tmp
->list_node
, &lpfc_buf
->dma_sgl_xtra_list
);
20714 allocated_sgl
= list_last_entry(&lpfc_buf
->dma_sgl_xtra_list
,
20715 struct sli4_hybrid_sgl
,
20718 spin_unlock_irqrestore(&hdwq
->hdwq_lock
, iflags
);
20720 return allocated_sgl
;
20724 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
20725 * @phba: The HBA for which this call is being executed.
20726 * @lpfc_buf: IO buf structure with the SGL chunk
20728 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
20735 lpfc_put_sgl_per_hdwq(struct lpfc_hba
*phba
, struct lpfc_io_buf
*lpfc_buf
)
20738 struct sli4_hybrid_sgl
*list_entry
= NULL
;
20739 struct sli4_hybrid_sgl
*tmp
= NULL
;
20740 struct lpfc_sli4_hdw_queue
*hdwq
= lpfc_buf
->hdwq
;
20741 struct list_head
*buf_list
= &hdwq
->sgl_list
;
20742 unsigned long iflags
;
20744 spin_lock_irqsave(&hdwq
->hdwq_lock
, iflags
);
20746 if (likely(!list_empty(&lpfc_buf
->dma_sgl_xtra_list
))) {
20747 list_for_each_entry_safe(list_entry
, tmp
,
20748 &lpfc_buf
->dma_sgl_xtra_list
,
20750 list_move_tail(&list_entry
->list_node
,
20757 spin_unlock_irqrestore(&hdwq
->hdwq_lock
, iflags
);
20762 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
20763 * @phba: phba object
20764 * @hdwq: hdwq to cleanup sgl buff resources on
20766 * This routine frees all SGL chunks of hdwq SGL chunk pool.
20772 lpfc_free_sgl_per_hdwq(struct lpfc_hba
*phba
,
20773 struct lpfc_sli4_hdw_queue
*hdwq
)
20775 struct list_head
*buf_list
= &hdwq
->sgl_list
;
20776 struct sli4_hybrid_sgl
*list_entry
= NULL
;
20777 struct sli4_hybrid_sgl
*tmp
= NULL
;
20778 unsigned long iflags
;
20780 spin_lock_irqsave(&hdwq
->hdwq_lock
, iflags
);
20782 /* Free sgl pool */
20783 list_for_each_entry_safe(list_entry
, tmp
,
20784 buf_list
, list_node
) {
20785 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
,
20786 list_entry
->dma_sgl
,
20787 list_entry
->dma_phys_sgl
);
20788 list_del(&list_entry
->list_node
);
20792 spin_unlock_irqrestore(&hdwq
->hdwq_lock
, iflags
);
20796 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
20797 * @phba: The HBA for which this call is being executed.
20798 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
20800 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
20801 * and will allocate an CMD/RSP buffer if the pool is empty.
20805 * Pointer to fcp_cmd_rsp_buf - Success
20807 struct fcp_cmd_rsp_buf
*
20808 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba
*phba
,
20809 struct lpfc_io_buf
*lpfc_buf
)
20811 struct fcp_cmd_rsp_buf
*list_entry
= NULL
;
20812 struct fcp_cmd_rsp_buf
*tmp
= NULL
;
20813 struct fcp_cmd_rsp_buf
*allocated_buf
= NULL
;
20814 struct lpfc_sli4_hdw_queue
*hdwq
= lpfc_buf
->hdwq
;
20815 struct list_head
*buf_list
= &hdwq
->cmd_rsp_buf_list
;
20816 unsigned long iflags
;
20818 spin_lock_irqsave(&hdwq
->hdwq_lock
, iflags
);
20820 if (likely(!list_empty(buf_list
))) {
20821 /* break off 1 chunk from the list */
20822 list_for_each_entry_safe(list_entry
, tmp
,
20825 list_move_tail(&list_entry
->list_node
,
20826 &lpfc_buf
->dma_cmd_rsp_list
);
20830 /* allocate more */
20831 spin_unlock_irqrestore(&hdwq
->hdwq_lock
, iflags
);
20832 tmp
= kmalloc_node(sizeof(*tmp
), GFP_ATOMIC
,
20833 cpu_to_node(hdwq
->io_wq
->chann
));
20835 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
20836 "8355 error kmalloc memory for HDWQ "
20838 lpfc_buf
->hdwq_no
, __func__
);
20842 tmp
->fcp_cmnd
= dma_pool_alloc(phba
->lpfc_cmd_rsp_buf_pool
,
20844 &tmp
->fcp_cmd_rsp_dma_handle
);
20846 if (!tmp
->fcp_cmnd
) {
20847 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
20848 "8356 error pool_alloc memory for HDWQ "
20850 lpfc_buf
->hdwq_no
, __func__
);
20855 tmp
->fcp_rsp
= (struct fcp_rsp
*)((uint8_t *)tmp
->fcp_cmnd
+
20856 sizeof(struct fcp_cmnd
));
20858 spin_lock_irqsave(&hdwq
->hdwq_lock
, iflags
);
20859 list_add_tail(&tmp
->list_node
, &lpfc_buf
->dma_cmd_rsp_list
);
20862 allocated_buf
= list_last_entry(&lpfc_buf
->dma_cmd_rsp_list
,
20863 struct fcp_cmd_rsp_buf
,
20866 spin_unlock_irqrestore(&hdwq
->hdwq_lock
, iflags
);
20868 return allocated_buf
;
20872 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
20873 * @phba: The HBA for which this call is being executed.
20874 * @lpfc_buf: IO buf structure with the CMD/RSP buf
20876 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
20883 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba
*phba
,
20884 struct lpfc_io_buf
*lpfc_buf
)
20887 struct fcp_cmd_rsp_buf
*list_entry
= NULL
;
20888 struct fcp_cmd_rsp_buf
*tmp
= NULL
;
20889 struct lpfc_sli4_hdw_queue
*hdwq
= lpfc_buf
->hdwq
;
20890 struct list_head
*buf_list
= &hdwq
->cmd_rsp_buf_list
;
20891 unsigned long iflags
;
20893 spin_lock_irqsave(&hdwq
->hdwq_lock
, iflags
);
20895 if (likely(!list_empty(&lpfc_buf
->dma_cmd_rsp_list
))) {
20896 list_for_each_entry_safe(list_entry
, tmp
,
20897 &lpfc_buf
->dma_cmd_rsp_list
,
20899 list_move_tail(&list_entry
->list_node
,
20906 spin_unlock_irqrestore(&hdwq
->hdwq_lock
, iflags
);
20911 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
20912 * @phba: phba object
20913 * @hdwq: hdwq to cleanup cmd rsp buff resources on
20915 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
20921 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba
*phba
,
20922 struct lpfc_sli4_hdw_queue
*hdwq
)
20924 struct list_head
*buf_list
= &hdwq
->cmd_rsp_buf_list
;
20925 struct fcp_cmd_rsp_buf
*list_entry
= NULL
;
20926 struct fcp_cmd_rsp_buf
*tmp
= NULL
;
20927 unsigned long iflags
;
20929 spin_lock_irqsave(&hdwq
->hdwq_lock
, iflags
);
20931 /* Free cmd_rsp buf pool */
20932 list_for_each_entry_safe(list_entry
, tmp
,
20935 dma_pool_free(phba
->lpfc_cmd_rsp_buf_pool
,
20936 list_entry
->fcp_cmnd
,
20937 list_entry
->fcp_cmd_rsp_dma_handle
);
20938 list_del(&list_entry
->list_node
);
20942 spin_unlock_irqrestore(&hdwq
->hdwq_lock
, iflags
);