1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 *******************************************************************/
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/aer.h>
38 #include <linux/crash_dump.h>
40 #include <asm/set_memory.h>
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_crtn.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_compat.h"
55 #include "lpfc_debugfs.h"
56 #include "lpfc_vport.h"
57 #include "lpfc_version.h"
59 /* There are only four IOCB completion types. */
60 typedef enum _lpfc_iocb_type
{
68 /* Provide function prototypes local to this module. */
69 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba
*, LPFC_MBOXQ_t
*,
71 static int lpfc_sli4_read_rev(struct lpfc_hba
*, LPFC_MBOXQ_t
*,
72 uint8_t *, uint32_t *);
73 static struct lpfc_iocbq
*lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba
*,
75 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport
*,
77 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport
*vport
,
78 struct hbq_dmabuf
*dmabuf
);
79 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba
*phba
,
80 struct lpfc_queue
*cq
, struct lpfc_cqe
*cqe
);
81 static int lpfc_sli4_post_sgl_list(struct lpfc_hba
*, struct list_head
*,
83 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba
*phba
,
84 struct lpfc_queue
*eq
,
85 struct lpfc_eqe
*eqe
);
86 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba
*phba
);
87 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba
*phba
);
88 static struct lpfc_cqe
*lpfc_sli4_cq_get(struct lpfc_queue
*q
);
89 static void __lpfc_sli4_consume_cqe(struct lpfc_hba
*phba
,
90 struct lpfc_queue
*cq
,
91 struct lpfc_cqe
*cqe
);
93 union lpfc_wqe128 lpfc_iread_cmd_template
;
94 union lpfc_wqe128 lpfc_iwrite_cmd_template
;
95 union lpfc_wqe128 lpfc_icmnd_cmd_template
;
98 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq
*iocbq
)
103 /* Setup WQE templates for IOs */
104 void lpfc_wqe_cmd_template(void)
106 union lpfc_wqe128
*wqe
;
109 wqe
= &lpfc_iread_cmd_template
;
110 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
112 /* Word 0, 1, 2 - BDE is variable */
114 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
116 /* Word 4 - total_xfer_len is variable */
118 /* Word 5 - is zero */
120 /* Word 6 - ctxt_tag, xri_tag is variable */
123 bf_set(wqe_cmnd
, &wqe
->fcp_iread
.wqe_com
, CMD_FCP_IREAD64_WQE
);
124 bf_set(wqe_pu
, &wqe
->fcp_iread
.wqe_com
, PARM_READ_CHECK
);
125 bf_set(wqe_class
, &wqe
->fcp_iread
.wqe_com
, CLASS3
);
126 bf_set(wqe_ct
, &wqe
->fcp_iread
.wqe_com
, SLI4_CT_RPI
);
128 /* Word 8 - abort_tag is variable */
130 /* Word 9 - reqtag is variable */
132 /* Word 10 - dbde, wqes is variable */
133 bf_set(wqe_qosd
, &wqe
->fcp_iread
.wqe_com
, 0);
134 bf_set(wqe_iod
, &wqe
->fcp_iread
.wqe_com
, LPFC_WQE_IOD_READ
);
135 bf_set(wqe_lenloc
, &wqe
->fcp_iread
.wqe_com
, LPFC_WQE_LENLOC_WORD4
);
136 bf_set(wqe_dbde
, &wqe
->fcp_iread
.wqe_com
, 0);
137 bf_set(wqe_wqes
, &wqe
->fcp_iread
.wqe_com
, 1);
139 /* Word 11 - pbde is variable */
140 bf_set(wqe_cmd_type
, &wqe
->fcp_iread
.wqe_com
, COMMAND_DATA_IN
);
141 bf_set(wqe_cqid
, &wqe
->fcp_iread
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
142 bf_set(wqe_pbde
, &wqe
->fcp_iread
.wqe_com
, 0);
144 /* Word 12 - is zero */
146 /* Word 13, 14, 15 - PBDE is variable */
148 /* IWRITE template */
149 wqe
= &lpfc_iwrite_cmd_template
;
150 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
152 /* Word 0, 1, 2 - BDE is variable */
154 /* Word 3 - cmd_buff_len, payload_offset_len is zero */
156 /* Word 4 - total_xfer_len is variable */
158 /* Word 5 - initial_xfer_len is variable */
160 /* Word 6 - ctxt_tag, xri_tag is variable */
163 bf_set(wqe_cmnd
, &wqe
->fcp_iwrite
.wqe_com
, CMD_FCP_IWRITE64_WQE
);
164 bf_set(wqe_pu
, &wqe
->fcp_iwrite
.wqe_com
, PARM_READ_CHECK
);
165 bf_set(wqe_class
, &wqe
->fcp_iwrite
.wqe_com
, CLASS3
);
166 bf_set(wqe_ct
, &wqe
->fcp_iwrite
.wqe_com
, SLI4_CT_RPI
);
168 /* Word 8 - abort_tag is variable */
170 /* Word 9 - reqtag is variable */
172 /* Word 10 - dbde, wqes is variable */
173 bf_set(wqe_qosd
, &wqe
->fcp_iwrite
.wqe_com
, 0);
174 bf_set(wqe_iod
, &wqe
->fcp_iwrite
.wqe_com
, LPFC_WQE_IOD_WRITE
);
175 bf_set(wqe_lenloc
, &wqe
->fcp_iwrite
.wqe_com
, LPFC_WQE_LENLOC_WORD4
);
176 bf_set(wqe_dbde
, &wqe
->fcp_iwrite
.wqe_com
, 0);
177 bf_set(wqe_wqes
, &wqe
->fcp_iwrite
.wqe_com
, 1);
179 /* Word 11 - pbde is variable */
180 bf_set(wqe_cmd_type
, &wqe
->fcp_iwrite
.wqe_com
, COMMAND_DATA_OUT
);
181 bf_set(wqe_cqid
, &wqe
->fcp_iwrite
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
182 bf_set(wqe_pbde
, &wqe
->fcp_iwrite
.wqe_com
, 0);
184 /* Word 12 - is zero */
186 /* Word 13, 14, 15 - PBDE is variable */
189 wqe
= &lpfc_icmnd_cmd_template
;
190 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
192 /* Word 0, 1, 2 - BDE is variable */
194 /* Word 3 - payload_offset_len is variable */
196 /* Word 4, 5 - is zero */
198 /* Word 6 - ctxt_tag, xri_tag is variable */
201 bf_set(wqe_cmnd
, &wqe
->fcp_icmd
.wqe_com
, CMD_FCP_ICMND64_WQE
);
202 bf_set(wqe_pu
, &wqe
->fcp_icmd
.wqe_com
, 0);
203 bf_set(wqe_class
, &wqe
->fcp_icmd
.wqe_com
, CLASS3
);
204 bf_set(wqe_ct
, &wqe
->fcp_icmd
.wqe_com
, SLI4_CT_RPI
);
206 /* Word 8 - abort_tag is variable */
208 /* Word 9 - reqtag is variable */
210 /* Word 10 - dbde, wqes is variable */
211 bf_set(wqe_qosd
, &wqe
->fcp_icmd
.wqe_com
, 1);
212 bf_set(wqe_iod
, &wqe
->fcp_icmd
.wqe_com
, LPFC_WQE_IOD_NONE
);
213 bf_set(wqe_lenloc
, &wqe
->fcp_icmd
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
214 bf_set(wqe_dbde
, &wqe
->fcp_icmd
.wqe_com
, 0);
215 bf_set(wqe_wqes
, &wqe
->fcp_icmd
.wqe_com
, 1);
218 bf_set(wqe_cmd_type
, &wqe
->fcp_icmd
.wqe_com
, COMMAND_DATA_IN
);
219 bf_set(wqe_cqid
, &wqe
->fcp_icmd
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
220 bf_set(wqe_pbde
, &wqe
->fcp_icmd
.wqe_com
, 0);
222 /* Word 12, 13, 14, 15 - is zero */
225 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
227 * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
228 * @srcp: Source memory pointer.
229 * @destp: Destination memory pointer.
230 * @cnt: Number of words required to be copied.
231 * Must be a multiple of sizeof(uint64_t)
233 * This function is used for copying data between driver memory
234 * and the SLI WQ. This function also changes the endianness
235 * of each word if native endianness is different from SLI
236 * endianness. This function can be called with or without
240 lpfc_sli4_pcimem_bcopy(void *srcp
, void *destp
, uint32_t cnt
)
242 uint64_t *src
= srcp
;
243 uint64_t *dest
= destp
;
246 for (i
= 0; i
< (int)cnt
; i
+= sizeof(uint64_t))
250 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
254 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
255 * @q: The Work Queue to operate on.
256 * @wqe: The work Queue Entry to put on the Work queue.
258 * This routine will copy the contents of @wqe to the next available entry on
259 * the @q. This function will then ring the Work Queue Doorbell to signal the
260 * HBA to start processing the Work Queue Entry. This function returns 0 if
261 * successful. If no entries are available on @q then this function will return
263 * The caller is expected to hold the hbalock when calling this routine.
266 lpfc_sli4_wq_put(struct lpfc_queue
*q
, union lpfc_wqe128
*wqe
)
268 union lpfc_wqe
*temp_wqe
;
269 struct lpfc_register doorbell
;
276 /* sanity check on queue memory */
280 temp_wqe
= lpfc_sli4_qe(q
, q
->host_index
);
282 /* If the host has not yet processed the next entry then we are done */
283 idx
= ((q
->host_index
+ 1) % q
->entry_count
);
284 if (idx
== q
->hba_index
) {
289 /* set consumption flag every once in a while */
290 if (!((q
->host_index
+ 1) % q
->notify_interval
))
291 bf_set(wqe_wqec
, &wqe
->generic
.wqe_com
, 1);
293 bf_set(wqe_wqec
, &wqe
->generic
.wqe_com
, 0);
294 if (q
->phba
->sli3_options
& LPFC_SLI4_PHWQ_ENABLED
)
295 bf_set(wqe_wqid
, &wqe
->generic
.wqe_com
, q
->queue_id
);
296 lpfc_sli4_pcimem_bcopy(wqe
, temp_wqe
, q
->entry_size
);
297 if (q
->dpp_enable
&& q
->phba
->cfg_enable_dpp
) {
298 /* write to DPP aperture taking advatage of Combined Writes */
299 tmp
= (uint8_t *)temp_wqe
;
301 for (i
= 0; i
< q
->entry_size
; i
+= sizeof(uint64_t))
302 __raw_writeq(*((uint64_t *)(tmp
+ i
)),
305 for (i
= 0; i
< q
->entry_size
; i
+= sizeof(uint32_t))
306 __raw_writel(*((uint32_t *)(tmp
+ i
)),
310 /* ensure WQE bcopy and DPP flushed before doorbell write */
313 /* Update the host index before invoking device */
314 host_index
= q
->host_index
;
320 if (q
->db_format
== LPFC_DB_LIST_FORMAT
) {
321 if (q
->dpp_enable
&& q
->phba
->cfg_enable_dpp
) {
322 bf_set(lpfc_if6_wq_db_list_fm_num_posted
, &doorbell
, 1);
323 bf_set(lpfc_if6_wq_db_list_fm_dpp
, &doorbell
, 1);
324 bf_set(lpfc_if6_wq_db_list_fm_dpp_id
, &doorbell
,
326 bf_set(lpfc_if6_wq_db_list_fm_id
, &doorbell
,
329 bf_set(lpfc_wq_db_list_fm_num_posted
, &doorbell
, 1);
330 bf_set(lpfc_wq_db_list_fm_id
, &doorbell
, q
->queue_id
);
332 /* Leave bits <23:16> clear for if_type 6 dpp */
333 if_type
= bf_get(lpfc_sli_intf_if_type
,
334 &q
->phba
->sli4_hba
.sli_intf
);
335 if (if_type
!= LPFC_SLI_INTF_IF_TYPE_6
)
336 bf_set(lpfc_wq_db_list_fm_index
, &doorbell
,
339 } else if (q
->db_format
== LPFC_DB_RING_FORMAT
) {
340 bf_set(lpfc_wq_db_ring_fm_num_posted
, &doorbell
, 1);
341 bf_set(lpfc_wq_db_ring_fm_id
, &doorbell
, q
->queue_id
);
345 writel(doorbell
.word0
, q
->db_regaddr
);
351 * lpfc_sli4_wq_release - Updates internal hba index for WQ
352 * @q: The Work Queue to operate on.
353 * @index: The index to advance the hba index to.
355 * This routine will update the HBA index of a queue to reflect consumption of
356 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
357 * an entry the host calls this function to update the queue's internal
361 lpfc_sli4_wq_release(struct lpfc_queue
*q
, uint32_t index
)
363 /* sanity check on queue memory */
367 q
->hba_index
= index
;
371 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
372 * @q: The Mailbox Queue to operate on.
373 * @mqe: The Mailbox Queue Entry to put on the Work queue.
375 * This routine will copy the contents of @mqe to the next available entry on
376 * the @q. This function will then ring the Work Queue Doorbell to signal the
377 * HBA to start processing the Work Queue Entry. This function returns 0 if
378 * successful. If no entries are available on @q then this function will return
380 * The caller is expected to hold the hbalock when calling this routine.
383 lpfc_sli4_mq_put(struct lpfc_queue
*q
, struct lpfc_mqe
*mqe
)
385 struct lpfc_mqe
*temp_mqe
;
386 struct lpfc_register doorbell
;
388 /* sanity check on queue memory */
391 temp_mqe
= lpfc_sli4_qe(q
, q
->host_index
);
393 /* If the host has not yet processed the next entry then we are done */
394 if (((q
->host_index
+ 1) % q
->entry_count
) == q
->hba_index
)
396 lpfc_sli4_pcimem_bcopy(mqe
, temp_mqe
, q
->entry_size
);
397 /* Save off the mailbox pointer for completion */
398 q
->phba
->mbox
= (MAILBOX_t
*)temp_mqe
;
400 /* Update the host index before invoking device */
401 q
->host_index
= ((q
->host_index
+ 1) % q
->entry_count
);
405 bf_set(lpfc_mq_doorbell_num_posted
, &doorbell
, 1);
406 bf_set(lpfc_mq_doorbell_id
, &doorbell
, q
->queue_id
);
407 writel(doorbell
.word0
, q
->phba
->sli4_hba
.MQDBregaddr
);
412 * lpfc_sli4_mq_release - Updates internal hba index for MQ
413 * @q: The Mailbox Queue to operate on.
415 * This routine will update the HBA index of a queue to reflect consumption of
416 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
417 * an entry the host calls this function to update the queue's internal
418 * pointers. This routine returns the number of entries that were consumed by
422 lpfc_sli4_mq_release(struct lpfc_queue
*q
)
424 /* sanity check on queue memory */
428 /* Clear the mailbox pointer for completion */
429 q
->phba
->mbox
= NULL
;
430 q
->hba_index
= ((q
->hba_index
+ 1) % q
->entry_count
);
435 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
436 * @q: The Event Queue to get the first valid EQE from
438 * This routine will get the first valid Event Queue Entry from @q, update
439 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
440 * the Queue (no more work to do), or the Queue is full of EQEs that have been
441 * processed, but not popped back to the HBA then this routine will return NULL.
443 static struct lpfc_eqe
*
444 lpfc_sli4_eq_get(struct lpfc_queue
*q
)
446 struct lpfc_eqe
*eqe
;
448 /* sanity check on queue memory */
451 eqe
= lpfc_sli4_qe(q
, q
->host_index
);
453 /* If the next EQE is not valid then we are done */
454 if (bf_get_le32(lpfc_eqe_valid
, eqe
) != q
->qe_valid
)
458 * insert barrier for instruction interlock : data from the hardware
459 * must have the valid bit checked before it can be copied and acted
460 * upon. Speculative instructions were allowing a bcopy at the start
461 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
462 * after our return, to copy data before the valid bit check above
463 * was done. As such, some of the copied data was stale. The barrier
464 * ensures the check is before any data is copied.
471 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
472 * @q: The Event Queue to disable interrupts
476 lpfc_sli4_eq_clr_intr(struct lpfc_queue
*q
)
478 struct lpfc_register doorbell
;
481 bf_set(lpfc_eqcq_doorbell_eqci
, &doorbell
, 1);
482 bf_set(lpfc_eqcq_doorbell_qt
, &doorbell
, LPFC_QUEUE_TYPE_EVENT
);
483 bf_set(lpfc_eqcq_doorbell_eqid_hi
, &doorbell
,
484 (q
->queue_id
>> LPFC_EQID_HI_FIELD_SHIFT
));
485 bf_set(lpfc_eqcq_doorbell_eqid_lo
, &doorbell
, q
->queue_id
);
486 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQDBregaddr
);
490 * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
491 * @q: The Event Queue to disable interrupts
495 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue
*q
)
497 struct lpfc_register doorbell
;
500 bf_set(lpfc_if6_eq_doorbell_eqid
, &doorbell
, q
->queue_id
);
501 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQDBregaddr
);
505 * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
506 * @phba: adapter with EQ
507 * @q: The Event Queue that the host has completed processing for.
508 * @count: Number of elements that have been consumed
509 * @arm: Indicates whether the host wants to arms this CQ.
511 * This routine will notify the HBA, by ringing the doorbell, that count
512 * number of EQEs have been processed. The @arm parameter indicates whether
513 * the queue should be rearmed when ringing the doorbell.
516 lpfc_sli4_write_eq_db(struct lpfc_hba
*phba
, struct lpfc_queue
*q
,
517 uint32_t count
, bool arm
)
519 struct lpfc_register doorbell
;
521 /* sanity check on queue memory */
522 if (unlikely(!q
|| (count
== 0 && !arm
)))
525 /* ring doorbell for number popped */
528 bf_set(lpfc_eqcq_doorbell_arm
, &doorbell
, 1);
529 bf_set(lpfc_eqcq_doorbell_eqci
, &doorbell
, 1);
531 bf_set(lpfc_eqcq_doorbell_num_released
, &doorbell
, count
);
532 bf_set(lpfc_eqcq_doorbell_qt
, &doorbell
, LPFC_QUEUE_TYPE_EVENT
);
533 bf_set(lpfc_eqcq_doorbell_eqid_hi
, &doorbell
,
534 (q
->queue_id
>> LPFC_EQID_HI_FIELD_SHIFT
));
535 bf_set(lpfc_eqcq_doorbell_eqid_lo
, &doorbell
, q
->queue_id
);
536 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQDBregaddr
);
537 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
538 if ((q
->phba
->intr_type
== INTx
) && (arm
== LPFC_QUEUE_REARM
))
539 readl(q
->phba
->sli4_hba
.EQDBregaddr
);
543 * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
544 * @phba: adapter with EQ
545 * @q: The Event Queue that the host has completed processing for.
546 * @count: Number of elements that have been consumed
547 * @arm: Indicates whether the host wants to arms this CQ.
549 * This routine will notify the HBA, by ringing the doorbell, that count
550 * number of EQEs have been processed. The @arm parameter indicates whether
551 * the queue should be rearmed when ringing the doorbell.
554 lpfc_sli4_if6_write_eq_db(struct lpfc_hba
*phba
, struct lpfc_queue
*q
,
555 uint32_t count
, bool arm
)
557 struct lpfc_register doorbell
;
559 /* sanity check on queue memory */
560 if (unlikely(!q
|| (count
== 0 && !arm
)))
563 /* ring doorbell for number popped */
566 bf_set(lpfc_if6_eq_doorbell_arm
, &doorbell
, 1);
567 bf_set(lpfc_if6_eq_doorbell_num_released
, &doorbell
, count
);
568 bf_set(lpfc_if6_eq_doorbell_eqid
, &doorbell
, q
->queue_id
);
569 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQDBregaddr
);
570 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
571 if ((q
->phba
->intr_type
== INTx
) && (arm
== LPFC_QUEUE_REARM
))
572 readl(q
->phba
->sli4_hba
.EQDBregaddr
);
576 __lpfc_sli4_consume_eqe(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
,
577 struct lpfc_eqe
*eqe
)
579 if (!phba
->sli4_hba
.pc_sli4_params
.eqav
)
580 bf_set_le32(lpfc_eqe_valid
, eqe
, 0);
582 eq
->host_index
= ((eq
->host_index
+ 1) % eq
->entry_count
);
584 /* if the index wrapped around, toggle the valid bit */
585 if (phba
->sli4_hba
.pc_sli4_params
.eqav
&& !eq
->host_index
)
586 eq
->qe_valid
= (eq
->qe_valid
) ? 0 : 1;
590 lpfc_sli4_eqcq_flush(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
)
592 struct lpfc_eqe
*eqe
= NULL
;
593 u32 eq_count
= 0, cq_count
= 0;
594 struct lpfc_cqe
*cqe
= NULL
;
595 struct lpfc_queue
*cq
= NULL
, *childq
= NULL
;
598 /* walk all the EQ entries and drop on the floor */
599 eqe
= lpfc_sli4_eq_get(eq
);
601 /* Get the reference to the corresponding CQ */
602 cqid
= bf_get_le32(lpfc_eqe_resource_id
, eqe
);
605 list_for_each_entry(childq
, &eq
->child_list
, list
) {
606 if (childq
->queue_id
== cqid
) {
611 /* If CQ is valid, iterate through it and drop all the CQEs */
613 cqe
= lpfc_sli4_cq_get(cq
);
615 __lpfc_sli4_consume_cqe(phba
, cq
, cqe
);
617 cqe
= lpfc_sli4_cq_get(cq
);
619 /* Clear and re-arm the CQ */
620 phba
->sli4_hba
.sli4_write_cq_db(phba
, cq
, cq_count
,
624 __lpfc_sli4_consume_eqe(phba
, eq
, eqe
);
626 eqe
= lpfc_sli4_eq_get(eq
);
629 /* Clear and re-arm the EQ */
630 phba
->sli4_hba
.sli4_write_eq_db(phba
, eq
, eq_count
, LPFC_QUEUE_REARM
);
634 lpfc_sli4_process_eq(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
,
637 struct lpfc_eqe
*eqe
;
638 int count
= 0, consumed
= 0;
640 if (cmpxchg(&eq
->queue_claimed
, 0, 1) != 0)
643 eqe
= lpfc_sli4_eq_get(eq
);
645 lpfc_sli4_hba_handle_eqe(phba
, eq
, eqe
);
646 __lpfc_sli4_consume_eqe(phba
, eq
, eqe
);
649 if (!(++count
% eq
->max_proc_limit
))
652 if (!(count
% eq
->notify_interval
)) {
653 phba
->sli4_hba
.sli4_write_eq_db(phba
, eq
, consumed
,
658 eqe
= lpfc_sli4_eq_get(eq
);
660 eq
->EQ_processed
+= count
;
662 /* Track the max number of EQEs processed in 1 intr */
663 if (count
> eq
->EQ_max_eqe
)
664 eq
->EQ_max_eqe
= count
;
666 xchg(&eq
->queue_claimed
, 0);
669 /* Always clear the EQ. */
670 phba
->sli4_hba
.sli4_write_eq_db(phba
, eq
, consumed
, rearm
);
676 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
677 * @q: The Completion Queue to get the first valid CQE from
679 * This routine will get the first valid Completion Queue Entry from @q, update
680 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
681 * the Queue (no more work to do), or the Queue is full of CQEs that have been
682 * processed, but not popped back to the HBA then this routine will return NULL.
684 static struct lpfc_cqe
*
685 lpfc_sli4_cq_get(struct lpfc_queue
*q
)
687 struct lpfc_cqe
*cqe
;
689 /* sanity check on queue memory */
692 cqe
= lpfc_sli4_qe(q
, q
->host_index
);
694 /* If the next CQE is not valid then we are done */
695 if (bf_get_le32(lpfc_cqe_valid
, cqe
) != q
->qe_valid
)
699 * insert barrier for instruction interlock : data from the hardware
700 * must have the valid bit checked before it can be copied and acted
701 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
702 * instructions allowing action on content before valid bit checked,
703 * add barrier here as well. May not be needed as "content" is a
704 * single 32-bit entity here (vs multi word structure for cq's).
711 __lpfc_sli4_consume_cqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
712 struct lpfc_cqe
*cqe
)
714 if (!phba
->sli4_hba
.pc_sli4_params
.cqav
)
715 bf_set_le32(lpfc_cqe_valid
, cqe
, 0);
717 cq
->host_index
= ((cq
->host_index
+ 1) % cq
->entry_count
);
719 /* if the index wrapped around, toggle the valid bit */
720 if (phba
->sli4_hba
.pc_sli4_params
.cqav
&& !cq
->host_index
)
721 cq
->qe_valid
= (cq
->qe_valid
) ? 0 : 1;
725 * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
726 * @phba: the adapter with the CQ
727 * @q: The Completion Queue that the host has completed processing for.
728 * @count: the number of elements that were consumed
729 * @arm: Indicates whether the host wants to arms this CQ.
731 * This routine will notify the HBA, by ringing the doorbell, that the
732 * CQEs have been processed. The @arm parameter specifies whether the
733 * queue should be rearmed when ringing the doorbell.
736 lpfc_sli4_write_cq_db(struct lpfc_hba
*phba
, struct lpfc_queue
*q
,
737 uint32_t count
, bool arm
)
739 struct lpfc_register doorbell
;
741 /* sanity check on queue memory */
742 if (unlikely(!q
|| (count
== 0 && !arm
)))
745 /* ring doorbell for number popped */
748 bf_set(lpfc_eqcq_doorbell_arm
, &doorbell
, 1);
749 bf_set(lpfc_eqcq_doorbell_num_released
, &doorbell
, count
);
750 bf_set(lpfc_eqcq_doorbell_qt
, &doorbell
, LPFC_QUEUE_TYPE_COMPLETION
);
751 bf_set(lpfc_eqcq_doorbell_cqid_hi
, &doorbell
,
752 (q
->queue_id
>> LPFC_CQID_HI_FIELD_SHIFT
));
753 bf_set(lpfc_eqcq_doorbell_cqid_lo
, &doorbell
, q
->queue_id
);
754 writel(doorbell
.word0
, q
->phba
->sli4_hba
.CQDBregaddr
);
758 * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
759 * @phba: the adapter with the CQ
760 * @q: The Completion Queue that the host has completed processing for.
761 * @count: the number of elements that were consumed
762 * @arm: Indicates whether the host wants to arms this CQ.
764 * This routine will notify the HBA, by ringing the doorbell, that the
765 * CQEs have been processed. The @arm parameter specifies whether the
766 * queue should be rearmed when ringing the doorbell.
769 lpfc_sli4_if6_write_cq_db(struct lpfc_hba
*phba
, struct lpfc_queue
*q
,
770 uint32_t count
, bool arm
)
772 struct lpfc_register doorbell
;
774 /* sanity check on queue memory */
775 if (unlikely(!q
|| (count
== 0 && !arm
)))
778 /* ring doorbell for number popped */
781 bf_set(lpfc_if6_cq_doorbell_arm
, &doorbell
, 1);
782 bf_set(lpfc_if6_cq_doorbell_num_released
, &doorbell
, count
);
783 bf_set(lpfc_if6_cq_doorbell_cqid
, &doorbell
, q
->queue_id
);
784 writel(doorbell
.word0
, q
->phba
->sli4_hba
.CQDBregaddr
);
788 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
790 * This routine will copy the contents of @wqe to the next available entry on
791 * the @q. This function will then ring the Receive Queue Doorbell to signal the
792 * HBA to start processing the Receive Queue Entry. This function returns the
793 * index that the rqe was copied to if successful. If no entries are available
794 * on @q then this function will return -ENOMEM.
795 * The caller is expected to hold the hbalock when calling this routine.
798 lpfc_sli4_rq_put(struct lpfc_queue
*hq
, struct lpfc_queue
*dq
,
799 struct lpfc_rqe
*hrqe
, struct lpfc_rqe
*drqe
)
801 struct lpfc_rqe
*temp_hrqe
;
802 struct lpfc_rqe
*temp_drqe
;
803 struct lpfc_register doorbell
;
807 /* sanity check on queue memory */
808 if (unlikely(!hq
) || unlikely(!dq
))
810 hq_put_index
= hq
->host_index
;
811 dq_put_index
= dq
->host_index
;
812 temp_hrqe
= lpfc_sli4_qe(hq
, hq_put_index
);
813 temp_drqe
= lpfc_sli4_qe(dq
, dq_put_index
);
815 if (hq
->type
!= LPFC_HRQ
|| dq
->type
!= LPFC_DRQ
)
817 if (hq_put_index
!= dq_put_index
)
819 /* If the host has not yet processed the next entry then we are done */
820 if (((hq_put_index
+ 1) % hq
->entry_count
) == hq
->hba_index
)
822 lpfc_sli4_pcimem_bcopy(hrqe
, temp_hrqe
, hq
->entry_size
);
823 lpfc_sli4_pcimem_bcopy(drqe
, temp_drqe
, dq
->entry_size
);
825 /* Update the host index to point to the next slot */
826 hq
->host_index
= ((hq_put_index
+ 1) % hq
->entry_count
);
827 dq
->host_index
= ((dq_put_index
+ 1) % dq
->entry_count
);
830 /* Ring The Header Receive Queue Doorbell */
831 if (!(hq
->host_index
% hq
->notify_interval
)) {
833 if (hq
->db_format
== LPFC_DB_RING_FORMAT
) {
834 bf_set(lpfc_rq_db_ring_fm_num_posted
, &doorbell
,
835 hq
->notify_interval
);
836 bf_set(lpfc_rq_db_ring_fm_id
, &doorbell
, hq
->queue_id
);
837 } else if (hq
->db_format
== LPFC_DB_LIST_FORMAT
) {
838 bf_set(lpfc_rq_db_list_fm_num_posted
, &doorbell
,
839 hq
->notify_interval
);
840 bf_set(lpfc_rq_db_list_fm_index
, &doorbell
,
842 bf_set(lpfc_rq_db_list_fm_id
, &doorbell
, hq
->queue_id
);
846 writel(doorbell
.word0
, hq
->db_regaddr
);
852 * lpfc_sli4_rq_release - Updates internal hba index for RQ
854 * This routine will update the HBA index of a queue to reflect consumption of
855 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
856 * consumed an entry the host calls this function to update the queue's
857 * internal pointers. This routine returns the number of entries that were
858 * consumed by the HBA.
861 lpfc_sli4_rq_release(struct lpfc_queue
*hq
, struct lpfc_queue
*dq
)
863 /* sanity check on queue memory */
864 if (unlikely(!hq
) || unlikely(!dq
))
867 if ((hq
->type
!= LPFC_HRQ
) || (dq
->type
!= LPFC_DRQ
))
869 hq
->hba_index
= ((hq
->hba_index
+ 1) % hq
->entry_count
);
870 dq
->hba_index
= ((dq
->hba_index
+ 1) % dq
->entry_count
);
875 * lpfc_cmd_iocb - Get next command iocb entry in the ring
876 * @phba: Pointer to HBA context object.
877 * @pring: Pointer to driver SLI ring object.
879 * This function returns pointer to next command iocb entry
880 * in the command ring. The caller must hold hbalock to prevent
881 * other threads consume the next command iocb.
882 * SLI-2/SLI-3 provide different sized iocbs.
884 static inline IOCB_t
*
885 lpfc_cmd_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
887 return (IOCB_t
*) (((char *) pring
->sli
.sli3
.cmdringaddr
) +
888 pring
->sli
.sli3
.cmdidx
* phba
->iocb_cmd_size
);
892 * lpfc_resp_iocb - Get next response iocb entry in the ring
893 * @phba: Pointer to HBA context object.
894 * @pring: Pointer to driver SLI ring object.
896 * This function returns pointer to next response iocb entry
897 * in the response ring. The caller must hold hbalock to make sure
898 * that no other thread consume the next response iocb.
899 * SLI-2/SLI-3 provide different sized iocbs.
901 static inline IOCB_t
*
902 lpfc_resp_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
904 return (IOCB_t
*) (((char *) pring
->sli
.sli3
.rspringaddr
) +
905 pring
->sli
.sli3
.rspidx
* phba
->iocb_rsp_size
);
909 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
910 * @phba: Pointer to HBA context object.
912 * This function is called with hbalock held. This function
913 * allocates a new driver iocb object from the iocb pool. If the
914 * allocation is successful, it returns pointer to the newly
915 * allocated iocb object else it returns NULL.
918 __lpfc_sli_get_iocbq(struct lpfc_hba
*phba
)
920 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
921 struct lpfc_iocbq
* iocbq
= NULL
;
923 lockdep_assert_held(&phba
->hbalock
);
925 list_remove_head(lpfc_iocb_list
, iocbq
, struct lpfc_iocbq
, list
);
928 if (phba
->iocb_cnt
> phba
->iocb_max
)
929 phba
->iocb_max
= phba
->iocb_cnt
;
934 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
935 * @phba: Pointer to HBA context object.
936 * @xritag: XRI value.
938 * This function clears the sglq pointer from the array of acive
939 * sglq's. The xritag that is passed in is used to index into the
940 * array. Before the xritag can be used it needs to be adjusted
941 * by subtracting the xribase.
943 * Returns sglq ponter = success, NULL = Failure.
946 __lpfc_clear_active_sglq(struct lpfc_hba
*phba
, uint16_t xritag
)
948 struct lpfc_sglq
*sglq
;
950 sglq
= phba
->sli4_hba
.lpfc_sglq_active_list
[xritag
];
951 phba
->sli4_hba
.lpfc_sglq_active_list
[xritag
] = NULL
;
956 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
957 * @phba: Pointer to HBA context object.
958 * @xritag: XRI value.
960 * This function returns the sglq pointer from the array of acive
961 * sglq's. The xritag that is passed in is used to index into the
962 * array. Before the xritag can be used it needs to be adjusted
963 * by subtracting the xribase.
965 * Returns sglq ponter = success, NULL = Failure.
968 __lpfc_get_active_sglq(struct lpfc_hba
*phba
, uint16_t xritag
)
970 struct lpfc_sglq
*sglq
;
972 sglq
= phba
->sli4_hba
.lpfc_sglq_active_list
[xritag
];
977 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
978 * @phba: Pointer to HBA context object.
979 * @xritag: xri used in this exchange.
980 * @rrq: The RRQ to be cleared.
984 lpfc_clr_rrq_active(struct lpfc_hba
*phba
,
986 struct lpfc_node_rrq
*rrq
)
988 struct lpfc_nodelist
*ndlp
= NULL
;
991 ndlp
= lpfc_findnode_did(rrq
->vport
, rrq
->nlp_DID
);
993 /* The target DID could have been swapped (cable swap)
994 * we should use the ndlp from the findnode if it is
997 if ((!ndlp
) && rrq
->ndlp
)
1003 if (test_and_clear_bit(xritag
, ndlp
->active_rrqs_xri_bitmap
)) {
1006 rrq
->rrq_stop_time
= 0;
1009 mempool_free(rrq
, phba
->rrq_pool
);
1013 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1014 * @phba: Pointer to HBA context object.
1016 * This function is called with hbalock held. This function
1017 * Checks if stop_time (ratov from setting rrq active) has
1018 * been reached, if it has and the send_rrq flag is set then
1019 * it will call lpfc_send_rrq. If the send_rrq flag is not set
1020 * then it will just call the routine to clear the rrq and
1021 * free the rrq resource.
1022 * The timer is set to the next rrq that is going to expire before
1023 * leaving the routine.
1027 lpfc_handle_rrq_active(struct lpfc_hba
*phba
)
1029 struct lpfc_node_rrq
*rrq
;
1030 struct lpfc_node_rrq
*nextrrq
;
1031 unsigned long next_time
;
1032 unsigned long iflags
;
1033 LIST_HEAD(send_rrq
);
1035 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1036 phba
->hba_flag
&= ~HBA_RRQ_ACTIVE
;
1037 next_time
= jiffies
+ msecs_to_jiffies(1000 * (phba
->fc_ratov
+ 1));
1038 list_for_each_entry_safe(rrq
, nextrrq
,
1039 &phba
->active_rrq_list
, list
) {
1040 if (time_after(jiffies
, rrq
->rrq_stop_time
))
1041 list_move(&rrq
->list
, &send_rrq
);
1042 else if (time_before(rrq
->rrq_stop_time
, next_time
))
1043 next_time
= rrq
->rrq_stop_time
;
1045 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1046 if ((!list_empty(&phba
->active_rrq_list
)) &&
1047 (!(phba
->pport
->load_flag
& FC_UNLOADING
)))
1048 mod_timer(&phba
->rrq_tmr
, next_time
);
1049 list_for_each_entry_safe(rrq
, nextrrq
, &send_rrq
, list
) {
1050 list_del(&rrq
->list
);
1051 if (!rrq
->send_rrq
) {
1052 /* this call will free the rrq */
1053 lpfc_clr_rrq_active(phba
, rrq
->xritag
, rrq
);
1054 } else if (lpfc_send_rrq(phba
, rrq
)) {
1055 /* if we send the rrq then the completion handler
1056 * will clear the bit in the xribitmap.
1058 lpfc_clr_rrq_active(phba
, rrq
->xritag
,
1065 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1066 * @vport: Pointer to vport context object.
1067 * @xri: The xri used in the exchange.
1068 * @did: The targets DID for this exchange.
1070 * returns NULL = rrq not found in the phba->active_rrq_list.
1071 * rrq = rrq for this xri and target.
1073 struct lpfc_node_rrq
*
1074 lpfc_get_active_rrq(struct lpfc_vport
*vport
, uint16_t xri
, uint32_t did
)
1076 struct lpfc_hba
*phba
= vport
->phba
;
1077 struct lpfc_node_rrq
*rrq
;
1078 struct lpfc_node_rrq
*nextrrq
;
1079 unsigned long iflags
;
1081 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
1083 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1084 list_for_each_entry_safe(rrq
, nextrrq
, &phba
->active_rrq_list
, list
) {
1085 if (rrq
->vport
== vport
&& rrq
->xritag
== xri
&&
1086 rrq
->nlp_DID
== did
){
1087 list_del(&rrq
->list
);
1088 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1092 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1097 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1098 * @vport: Pointer to vport context object.
1099 * @ndlp: Pointer to the lpfc_node_list structure.
1100 * If ndlp is NULL Remove all active RRQs for this vport from the
1101 * phba->active_rrq_list and clear the rrq.
1102 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1105 lpfc_cleanup_vports_rrqs(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
1108 struct lpfc_hba
*phba
= vport
->phba
;
1109 struct lpfc_node_rrq
*rrq
;
1110 struct lpfc_node_rrq
*nextrrq
;
1111 unsigned long iflags
;
1112 LIST_HEAD(rrq_list
);
1114 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
1117 lpfc_sli4_vport_delete_els_xri_aborted(vport
);
1118 lpfc_sli4_vport_delete_fcp_xri_aborted(vport
);
1120 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1121 list_for_each_entry_safe(rrq
, nextrrq
, &phba
->active_rrq_list
, list
)
1122 if ((rrq
->vport
== vport
) && (!ndlp
|| rrq
->ndlp
== ndlp
))
1123 list_move(&rrq
->list
, &rrq_list
);
1124 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1126 list_for_each_entry_safe(rrq
, nextrrq
, &rrq_list
, list
) {
1127 list_del(&rrq
->list
);
1128 lpfc_clr_rrq_active(phba
, rrq
->xritag
, rrq
);
1133 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1134 * @phba: Pointer to HBA context object.
1135 * @ndlp: Targets nodelist pointer for this exchange.
1136 * @xritag: the xri in the bitmap to test.
1138 * This function returns:
1139 * 0 = rrq not active for this xri
1140 * 1 = rrq is valid for this xri.
1143 lpfc_test_rrq_active(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
,
1148 if (!ndlp
->active_rrqs_xri_bitmap
)
1150 if (test_bit(xritag
, ndlp
->active_rrqs_xri_bitmap
))
1157 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1158 * @phba: Pointer to HBA context object.
1159 * @ndlp: nodelist pointer for this target.
1160 * @xritag: xri used in this exchange.
1161 * @rxid: Remote Exchange ID.
1162 * @send_rrq: Flag used to determine if we should send rrq els cmd.
1164 * This function takes the hbalock.
1165 * The active bit is always set in the active rrq xri_bitmap even
1166 * if there is no slot avaiable for the other rrq information.
1168 * returns 0 rrq actived for this xri
1169 * < 0 No memory or invalid ndlp.
1172 lpfc_set_rrq_active(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
,
1173 uint16_t xritag
, uint16_t rxid
, uint16_t send_rrq
)
1175 unsigned long iflags
;
1176 struct lpfc_node_rrq
*rrq
;
1182 if (!phba
->cfg_enable_rrq
)
1185 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1186 if (phba
->pport
->load_flag
& FC_UNLOADING
) {
1187 phba
->hba_flag
&= ~HBA_RRQ_ACTIVE
;
1191 if (ndlp
->vport
&& (ndlp
->vport
->load_flag
& FC_UNLOADING
))
1194 if (!ndlp
->active_rrqs_xri_bitmap
)
1197 if (test_and_set_bit(xritag
, ndlp
->active_rrqs_xri_bitmap
))
1200 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1201 rrq
= mempool_alloc(phba
->rrq_pool
, GFP_ATOMIC
);
1203 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
1204 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1205 " DID:0x%x Send:%d\n",
1206 xritag
, rxid
, ndlp
->nlp_DID
, send_rrq
);
1209 if (phba
->cfg_enable_rrq
== 1)
1210 rrq
->send_rrq
= send_rrq
;
1213 rrq
->xritag
= xritag
;
1214 rrq
->rrq_stop_time
= jiffies
+
1215 msecs_to_jiffies(1000 * (phba
->fc_ratov
+ 1));
1217 rrq
->nlp_DID
= ndlp
->nlp_DID
;
1218 rrq
->vport
= ndlp
->vport
;
1220 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1221 empty
= list_empty(&phba
->active_rrq_list
);
1222 list_add_tail(&rrq
->list
, &phba
->active_rrq_list
);
1223 phba
->hba_flag
|= HBA_RRQ_ACTIVE
;
1225 lpfc_worker_wake_up(phba
);
1226 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1229 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1230 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
1231 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
1232 " DID:0x%x Send:%d\n",
1233 xritag
, rxid
, ndlp
->nlp_DID
, send_rrq
);
1238 * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1239 * @phba: Pointer to HBA context object.
1240 * @piocbq: Pointer to the iocbq.
1242 * The driver calls this function with either the nvme ls ring lock
1243 * or the fc els ring lock held depending on the iocb usage. This function
1244 * gets a new driver sglq object from the sglq list. If the list is not empty
1245 * then it is successful, it returns pointer to the newly allocated sglq
1246 * object else it returns NULL.
1248 static struct lpfc_sglq
*
1249 __lpfc_sli_get_els_sglq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocbq
)
1251 struct list_head
*lpfc_els_sgl_list
= &phba
->sli4_hba
.lpfc_els_sgl_list
;
1252 struct lpfc_sglq
*sglq
= NULL
;
1253 struct lpfc_sglq
*start_sglq
= NULL
;
1254 struct lpfc_io_buf
*lpfc_cmd
;
1255 struct lpfc_nodelist
*ndlp
;
1256 struct lpfc_sli_ring
*pring
= NULL
;
1259 if (piocbq
->iocb_flag
& LPFC_IO_NVME_LS
)
1260 pring
= phba
->sli4_hba
.nvmels_wq
->pring
;
1262 pring
= lpfc_phba_elsring(phba
);
1264 lockdep_assert_held(&pring
->ring_lock
);
1266 if (piocbq
->iocb_flag
& LPFC_IO_FCP
) {
1267 lpfc_cmd
= (struct lpfc_io_buf
*) piocbq
->context1
;
1268 ndlp
= lpfc_cmd
->rdata
->pnode
;
1269 } else if ((piocbq
->iocb
.ulpCommand
== CMD_GEN_REQUEST64_CR
) &&
1270 !(piocbq
->iocb_flag
& LPFC_IO_LIBDFC
)) {
1271 ndlp
= piocbq
->context_un
.ndlp
;
1272 } else if (piocbq
->iocb_flag
& LPFC_IO_LIBDFC
) {
1273 if (piocbq
->iocb_flag
& LPFC_IO_LOOPBACK
)
1276 ndlp
= piocbq
->context_un
.ndlp
;
1278 ndlp
= piocbq
->context1
;
1281 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
1282 list_remove_head(lpfc_els_sgl_list
, sglq
, struct lpfc_sglq
, list
);
1287 if (ndlp
&& ndlp
->active_rrqs_xri_bitmap
&&
1288 test_bit(sglq
->sli4_lxritag
,
1289 ndlp
->active_rrqs_xri_bitmap
)) {
1290 /* This xri has an rrq outstanding for this DID.
1291 * put it back in the list and get another xri.
1293 list_add_tail(&sglq
->list
, lpfc_els_sgl_list
);
1295 list_remove_head(lpfc_els_sgl_list
, sglq
,
1296 struct lpfc_sglq
, list
);
1297 if (sglq
== start_sglq
) {
1298 list_add_tail(&sglq
->list
, lpfc_els_sgl_list
);
1306 phba
->sli4_hba
.lpfc_sglq_active_list
[sglq
->sli4_lxritag
] = sglq
;
1307 sglq
->state
= SGL_ALLOCATED
;
1309 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
1314 * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1315 * @phba: Pointer to HBA context object.
1316 * @piocbq: Pointer to the iocbq.
1318 * This function is called with the sgl_list lock held. This function
1319 * gets a new driver sglq object from the sglq list. If the
1320 * list is not empty then it is successful, it returns pointer to the newly
1321 * allocated sglq object else it returns NULL.
1324 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocbq
)
1326 struct list_head
*lpfc_nvmet_sgl_list
;
1327 struct lpfc_sglq
*sglq
= NULL
;
1329 lpfc_nvmet_sgl_list
= &phba
->sli4_hba
.lpfc_nvmet_sgl_list
;
1331 lockdep_assert_held(&phba
->sli4_hba
.sgl_list_lock
);
1333 list_remove_head(lpfc_nvmet_sgl_list
, sglq
, struct lpfc_sglq
, list
);
1336 phba
->sli4_hba
.lpfc_sglq_active_list
[sglq
->sli4_lxritag
] = sglq
;
1337 sglq
->state
= SGL_ALLOCATED
;
1342 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1343 * @phba: Pointer to HBA context object.
1345 * This function is called with no lock held. This function
1346 * allocates a new driver iocb object from the iocb pool. If the
1347 * allocation is successful, it returns pointer to the newly
1348 * allocated iocb object else it returns NULL.
1351 lpfc_sli_get_iocbq(struct lpfc_hba
*phba
)
1353 struct lpfc_iocbq
* iocbq
= NULL
;
1354 unsigned long iflags
;
1356 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1357 iocbq
= __lpfc_sli_get_iocbq(phba
);
1358 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1363 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1364 * @phba: Pointer to HBA context object.
1365 * @iocbq: Pointer to driver iocb object.
1367 * This function is called to release the driver iocb object
1368 * to the iocb pool. The iotag in the iocb object
1369 * does not change for each use of the iocb object. This function
1370 * clears all other fields of the iocb object when it is freed.
1371 * The sqlq structure that holds the xritag and phys and virtual
1372 * mappings for the scatter gather list is retrieved from the
1373 * active array of sglq. The get of the sglq pointer also clears
1374 * the entry in the array. If the status of the IO indiactes that
1375 * this IO was aborted then the sglq entry it put on the
1376 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1377 * IO has good status or fails for any other reason then the sglq
1378 * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1379 * asserted held in the code path calling this routine.
1382 __lpfc_sli_release_iocbq_s4(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1384 struct lpfc_sglq
*sglq
;
1385 size_t start_clean
= offsetof(struct lpfc_iocbq
, iocb
);
1386 unsigned long iflag
= 0;
1387 struct lpfc_sli_ring
*pring
;
1389 if (iocbq
->sli4_xritag
== NO_XRI
)
1392 sglq
= __lpfc_clear_active_sglq(phba
, iocbq
->sli4_lxritag
);
1396 if (iocbq
->iocb_flag
& LPFC_IO_NVMET
) {
1397 spin_lock_irqsave(&phba
->sli4_hba
.sgl_list_lock
,
1399 sglq
->state
= SGL_FREED
;
1401 list_add_tail(&sglq
->list
,
1402 &phba
->sli4_hba
.lpfc_nvmet_sgl_list
);
1403 spin_unlock_irqrestore(
1404 &phba
->sli4_hba
.sgl_list_lock
, iflag
);
1408 pring
= phba
->sli4_hba
.els_wq
->pring
;
1409 if ((iocbq
->iocb_flag
& LPFC_EXCHANGE_BUSY
) &&
1410 (sglq
->state
!= SGL_XRI_ABORTED
)) {
1411 spin_lock_irqsave(&phba
->sli4_hba
.sgl_list_lock
,
1414 /* Check if we can get a reference on ndlp */
1415 if (sglq
->ndlp
&& !lpfc_nlp_get(sglq
->ndlp
))
1418 list_add(&sglq
->list
,
1419 &phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
1420 spin_unlock_irqrestore(
1421 &phba
->sli4_hba
.sgl_list_lock
, iflag
);
1423 spin_lock_irqsave(&phba
->sli4_hba
.sgl_list_lock
,
1425 sglq
->state
= SGL_FREED
;
1427 list_add_tail(&sglq
->list
,
1428 &phba
->sli4_hba
.lpfc_els_sgl_list
);
1429 spin_unlock_irqrestore(
1430 &phba
->sli4_hba
.sgl_list_lock
, iflag
);
1432 /* Check if TXQ queue needs to be serviced */
1433 if (!list_empty(&pring
->txq
))
1434 lpfc_worker_wake_up(phba
);
1440 * Clean all volatile data fields, preserve iotag and node struct.
1442 memset((char *)iocbq
+ start_clean
, 0, sizeof(*iocbq
) - start_clean
);
1443 iocbq
->sli4_lxritag
= NO_XRI
;
1444 iocbq
->sli4_xritag
= NO_XRI
;
1445 iocbq
->iocb_flag
&= ~(LPFC_IO_NVME
| LPFC_IO_NVMET
|
1447 list_add_tail(&iocbq
->list
, &phba
->lpfc_iocb_list
);
1452 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1453 * @phba: Pointer to HBA context object.
1454 * @iocbq: Pointer to driver iocb object.
1456 * This function is called to release the driver iocb object to the
1457 * iocb pool. The iotag in the iocb object does not change for each
1458 * use of the iocb object. This function clears all other fields of
1459 * the iocb object when it is freed. The hbalock is asserted held in
1460 * the code path calling this routine.
1463 __lpfc_sli_release_iocbq_s3(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1465 size_t start_clean
= offsetof(struct lpfc_iocbq
, iocb
);
1468 * Clean all volatile data fields, preserve iotag and node struct.
1470 memset((char*)iocbq
+ start_clean
, 0, sizeof(*iocbq
) - start_clean
);
1471 iocbq
->sli4_xritag
= NO_XRI
;
1472 list_add_tail(&iocbq
->list
, &phba
->lpfc_iocb_list
);
1476 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1477 * @phba: Pointer to HBA context object.
1478 * @iocbq: Pointer to driver iocb object.
1480 * This function is called with hbalock held to release driver
1481 * iocb object to the iocb pool. The iotag in the iocb object
1482 * does not change for each use of the iocb object. This function
1483 * clears all other fields of the iocb object when it is freed.
1486 __lpfc_sli_release_iocbq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1488 lockdep_assert_held(&phba
->hbalock
);
1490 phba
->__lpfc_sli_release_iocbq(phba
, iocbq
);
1495 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1496 * @phba: Pointer to HBA context object.
1497 * @iocbq: Pointer to driver iocb object.
1499 * This function is called with no lock held to release the iocb to
1503 lpfc_sli_release_iocbq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1505 unsigned long iflags
;
1508 * Clean all volatile data fields, preserve iotag and node struct.
1510 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1511 __lpfc_sli_release_iocbq(phba
, iocbq
);
1512 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1516 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1517 * @phba: Pointer to HBA context object.
1518 * @iocblist: List of IOCBs.
1519 * @ulpstatus: ULP status in IOCB command field.
1520 * @ulpWord4: ULP word-4 in IOCB command field.
1522 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1523 * on the list by invoking the complete callback function associated with the
1524 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1528 lpfc_sli_cancel_iocbs(struct lpfc_hba
*phba
, struct list_head
*iocblist
,
1529 uint32_t ulpstatus
, uint32_t ulpWord4
)
1531 struct lpfc_iocbq
*piocb
;
1533 while (!list_empty(iocblist
)) {
1534 list_remove_head(iocblist
, piocb
, struct lpfc_iocbq
, list
);
1535 if (!piocb
->iocb_cmpl
) {
1536 if (piocb
->iocb_flag
& LPFC_IO_NVME
)
1537 lpfc_nvme_cancel_iocb(phba
, piocb
);
1539 lpfc_sli_release_iocbq(phba
, piocb
);
1541 piocb
->iocb
.ulpStatus
= ulpstatus
;
1542 piocb
->iocb
.un
.ulpWord
[4] = ulpWord4
;
1543 (piocb
->iocb_cmpl
) (phba
, piocb
, piocb
);
1550 * lpfc_sli_iocb_cmd_type - Get the iocb type
1551 * @iocb_cmnd: iocb command code.
1553 * This function is called by ring event handler function to get the iocb type.
1554 * This function translates the iocb command to an iocb command type used to
1555 * decide the final disposition of each completed IOCB.
1556 * The function returns
1557 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1558 * LPFC_SOL_IOCB if it is a solicited iocb completion
1559 * LPFC_ABORT_IOCB if it is an abort iocb
1560 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1562 * The caller is not required to hold any lock.
1564 static lpfc_iocb_type
1565 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd
)
1567 lpfc_iocb_type type
= LPFC_UNKNOWN_IOCB
;
1569 if (iocb_cmnd
> CMD_MAX_IOCB_CMD
)
1572 switch (iocb_cmnd
) {
1573 case CMD_XMIT_SEQUENCE_CR
:
1574 case CMD_XMIT_SEQUENCE_CX
:
1575 case CMD_XMIT_BCAST_CN
:
1576 case CMD_XMIT_BCAST_CX
:
1577 case CMD_ELS_REQUEST_CR
:
1578 case CMD_ELS_REQUEST_CX
:
1579 case CMD_CREATE_XRI_CR
:
1580 case CMD_CREATE_XRI_CX
:
1581 case CMD_GET_RPI_CN
:
1582 case CMD_XMIT_ELS_RSP_CX
:
1583 case CMD_GET_RPI_CR
:
1584 case CMD_FCP_IWRITE_CR
:
1585 case CMD_FCP_IWRITE_CX
:
1586 case CMD_FCP_IREAD_CR
:
1587 case CMD_FCP_IREAD_CX
:
1588 case CMD_FCP_ICMND_CR
:
1589 case CMD_FCP_ICMND_CX
:
1590 case CMD_FCP_TSEND_CX
:
1591 case CMD_FCP_TRSP_CX
:
1592 case CMD_FCP_TRECEIVE_CX
:
1593 case CMD_FCP_AUTO_TRSP_CX
:
1594 case CMD_ADAPTER_MSG
:
1595 case CMD_ADAPTER_DUMP
:
1596 case CMD_XMIT_SEQUENCE64_CR
:
1597 case CMD_XMIT_SEQUENCE64_CX
:
1598 case CMD_XMIT_BCAST64_CN
:
1599 case CMD_XMIT_BCAST64_CX
:
1600 case CMD_ELS_REQUEST64_CR
:
1601 case CMD_ELS_REQUEST64_CX
:
1602 case CMD_FCP_IWRITE64_CR
:
1603 case CMD_FCP_IWRITE64_CX
:
1604 case CMD_FCP_IREAD64_CR
:
1605 case CMD_FCP_IREAD64_CX
:
1606 case CMD_FCP_ICMND64_CR
:
1607 case CMD_FCP_ICMND64_CX
:
1608 case CMD_FCP_TSEND64_CX
:
1609 case CMD_FCP_TRSP64_CX
:
1610 case CMD_FCP_TRECEIVE64_CX
:
1611 case CMD_GEN_REQUEST64_CR
:
1612 case CMD_GEN_REQUEST64_CX
:
1613 case CMD_XMIT_ELS_RSP64_CX
:
1614 case DSSCMD_IWRITE64_CR
:
1615 case DSSCMD_IWRITE64_CX
:
1616 case DSSCMD_IREAD64_CR
:
1617 case DSSCMD_IREAD64_CX
:
1618 case CMD_SEND_FRAME
:
1619 type
= LPFC_SOL_IOCB
;
1621 case CMD_ABORT_XRI_CN
:
1622 case CMD_ABORT_XRI_CX
:
1623 case CMD_CLOSE_XRI_CN
:
1624 case CMD_CLOSE_XRI_CX
:
1625 case CMD_XRI_ABORTED_CX
:
1626 case CMD_ABORT_MXRI64_CN
:
1627 case CMD_XMIT_BLS_RSP64_CX
:
1628 type
= LPFC_ABORT_IOCB
;
1630 case CMD_RCV_SEQUENCE_CX
:
1631 case CMD_RCV_ELS_REQ_CX
:
1632 case CMD_RCV_SEQUENCE64_CX
:
1633 case CMD_RCV_ELS_REQ64_CX
:
1634 case CMD_ASYNC_STATUS
:
1635 case CMD_IOCB_RCV_SEQ64_CX
:
1636 case CMD_IOCB_RCV_ELS64_CX
:
1637 case CMD_IOCB_RCV_CONT64_CX
:
1638 case CMD_IOCB_RET_XRI64_CX
:
1639 type
= LPFC_UNSOL_IOCB
;
1641 case CMD_IOCB_XMIT_MSEQ64_CR
:
1642 case CMD_IOCB_XMIT_MSEQ64_CX
:
1643 case CMD_IOCB_RCV_SEQ_LIST64_CX
:
1644 case CMD_IOCB_RCV_ELS_LIST64_CX
:
1645 case CMD_IOCB_CLOSE_EXTENDED_CN
:
1646 case CMD_IOCB_ABORT_EXTENDED_CN
:
1647 case CMD_IOCB_RET_HBQE64_CN
:
1648 case CMD_IOCB_FCP_IBIDIR64_CR
:
1649 case CMD_IOCB_FCP_IBIDIR64_CX
:
1650 case CMD_IOCB_FCP_ITASKMGT64_CX
:
1651 case CMD_IOCB_LOGENTRY_CN
:
1652 case CMD_IOCB_LOGENTRY_ASYNC_CN
:
1653 printk("%s - Unhandled SLI-3 Command x%x\n",
1654 __func__
, iocb_cmnd
);
1655 type
= LPFC_UNKNOWN_IOCB
;
1658 type
= LPFC_UNKNOWN_IOCB
;
1666 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1667 * @phba: Pointer to HBA context object.
1669 * This function is called from SLI initialization code
1670 * to configure every ring of the HBA's SLI interface. The
1671 * caller is not required to hold any lock. This function issues
1672 * a config_ring mailbox command for each ring.
1673 * This function returns zero if successful else returns a negative
1677 lpfc_sli_ring_map(struct lpfc_hba
*phba
)
1679 struct lpfc_sli
*psli
= &phba
->sli
;
1684 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1688 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
1689 for (i
= 0; i
< psli
->num_rings
; i
++) {
1690 lpfc_config_ring(phba
, i
, pmb
);
1691 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
1692 if (rc
!= MBX_SUCCESS
) {
1693 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1694 "0446 Adapter failed to init (%d), "
1695 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1697 rc
, pmbox
->mbxCommand
,
1698 pmbox
->mbxStatus
, i
);
1699 phba
->link_state
= LPFC_HBA_ERROR
;
1704 mempool_free(pmb
, phba
->mbox_mem_pool
);
1709 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1710 * @phba: Pointer to HBA context object.
1711 * @pring: Pointer to driver SLI ring object.
1712 * @piocb: Pointer to the driver iocb object.
1714 * The driver calls this function with the hbalock held for SLI3 ports or
1715 * the ring lock held for SLI4 ports. The function adds the
1716 * new iocb to txcmplq of the given ring. This function always returns
1717 * 0. If this function is called for ELS ring, this function checks if
1718 * there is a vport associated with the ELS command. This function also
1719 * starts els_tmofunc timer if this is an ELS command.
1722 lpfc_sli_ringtxcmpl_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1723 struct lpfc_iocbq
*piocb
)
1725 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1726 lockdep_assert_held(&pring
->ring_lock
);
1728 lockdep_assert_held(&phba
->hbalock
);
1732 list_add_tail(&piocb
->list
, &pring
->txcmplq
);
1733 piocb
->iocb_flag
|= LPFC_IO_ON_TXCMPLQ
;
1734 pring
->txcmplq_cnt
++;
1736 if ((unlikely(pring
->ringno
== LPFC_ELS_RING
)) &&
1737 (piocb
->iocb
.ulpCommand
!= CMD_ABORT_XRI_CN
) &&
1738 (piocb
->iocb
.ulpCommand
!= CMD_CLOSE_XRI_CN
)) {
1739 BUG_ON(!piocb
->vport
);
1740 if (!(piocb
->vport
->load_flag
& FC_UNLOADING
))
1741 mod_timer(&piocb
->vport
->els_tmofunc
,
1743 msecs_to_jiffies(1000 * (phba
->fc_ratov
<< 1)));
1750 * lpfc_sli_ringtx_get - Get first element of the txq
1751 * @phba: Pointer to HBA context object.
1752 * @pring: Pointer to driver SLI ring object.
1754 * This function is called with hbalock held to get next
1755 * iocb in txq of the given ring. If there is any iocb in
1756 * the txq, the function returns first iocb in the list after
1757 * removing the iocb from the list, else it returns NULL.
1760 lpfc_sli_ringtx_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1762 struct lpfc_iocbq
*cmd_iocb
;
1764 lockdep_assert_held(&phba
->hbalock
);
1766 list_remove_head((&pring
->txq
), cmd_iocb
, struct lpfc_iocbq
, list
);
1771 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1772 * @phba: Pointer to HBA context object.
1773 * @pring: Pointer to driver SLI ring object.
1775 * This function is called with hbalock held and the caller must post the
1776 * iocb without releasing the lock. If the caller releases the lock,
1777 * iocb slot returned by the function is not guaranteed to be available.
1778 * The function returns pointer to the next available iocb slot if there
1779 * is available slot in the ring, else it returns NULL.
1780 * If the get index of the ring is ahead of the put index, the function
1781 * will post an error attention event to the worker thread to take the
1782 * HBA to offline state.
1785 lpfc_sli_next_iocb_slot (struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1787 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
1788 uint32_t max_cmd_idx
= pring
->sli
.sli3
.numCiocb
;
1790 lockdep_assert_held(&phba
->hbalock
);
1792 if ((pring
->sli
.sli3
.next_cmdidx
== pring
->sli
.sli3
.cmdidx
) &&
1793 (++pring
->sli
.sli3
.next_cmdidx
>= max_cmd_idx
))
1794 pring
->sli
.sli3
.next_cmdidx
= 0;
1796 if (unlikely(pring
->sli
.sli3
.local_getidx
==
1797 pring
->sli
.sli3
.next_cmdidx
)) {
1799 pring
->sli
.sli3
.local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
1801 if (unlikely(pring
->sli
.sli3
.local_getidx
>= max_cmd_idx
)) {
1802 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
1803 "0315 Ring %d issue: portCmdGet %d "
1804 "is bigger than cmd ring %d\n",
1806 pring
->sli
.sli3
.local_getidx
,
1809 phba
->link_state
= LPFC_HBA_ERROR
;
1811 * All error attention handlers are posted to
1814 phba
->work_ha
|= HA_ERATT
;
1815 phba
->work_hs
= HS_FFER3
;
1817 lpfc_worker_wake_up(phba
);
1822 if (pring
->sli
.sli3
.local_getidx
== pring
->sli
.sli3
.next_cmdidx
)
1826 return lpfc_cmd_iocb(phba
, pring
);
1830 * lpfc_sli_next_iotag - Get an iotag for the iocb
1831 * @phba: Pointer to HBA context object.
1832 * @iocbq: Pointer to driver iocb object.
1834 * This function gets an iotag for the iocb. If there is no unused iotag and
1835 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1836 * array and assigns a new iotag.
1837 * The function returns the allocated iotag if successful, else returns zero.
1838 * Zero is not a valid iotag.
1839 * The caller is not required to hold any lock.
1842 lpfc_sli_next_iotag(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1844 struct lpfc_iocbq
**new_arr
;
1845 struct lpfc_iocbq
**old_arr
;
1847 struct lpfc_sli
*psli
= &phba
->sli
;
1850 spin_lock_irq(&phba
->hbalock
);
1851 iotag
= psli
->last_iotag
;
1852 if(++iotag
< psli
->iocbq_lookup_len
) {
1853 psli
->last_iotag
= iotag
;
1854 psli
->iocbq_lookup
[iotag
] = iocbq
;
1855 spin_unlock_irq(&phba
->hbalock
);
1856 iocbq
->iotag
= iotag
;
1858 } else if (psli
->iocbq_lookup_len
< (0xffff
1859 - LPFC_IOCBQ_LOOKUP_INCREMENT
)) {
1860 new_len
= psli
->iocbq_lookup_len
+ LPFC_IOCBQ_LOOKUP_INCREMENT
;
1861 spin_unlock_irq(&phba
->hbalock
);
1862 new_arr
= kcalloc(new_len
, sizeof(struct lpfc_iocbq
*),
1865 spin_lock_irq(&phba
->hbalock
);
1866 old_arr
= psli
->iocbq_lookup
;
1867 if (new_len
<= psli
->iocbq_lookup_len
) {
1868 /* highly unprobable case */
1870 iotag
= psli
->last_iotag
;
1871 if(++iotag
< psli
->iocbq_lookup_len
) {
1872 psli
->last_iotag
= iotag
;
1873 psli
->iocbq_lookup
[iotag
] = iocbq
;
1874 spin_unlock_irq(&phba
->hbalock
);
1875 iocbq
->iotag
= iotag
;
1878 spin_unlock_irq(&phba
->hbalock
);
1881 if (psli
->iocbq_lookup
)
1882 memcpy(new_arr
, old_arr
,
1883 ((psli
->last_iotag
+ 1) *
1884 sizeof (struct lpfc_iocbq
*)));
1885 psli
->iocbq_lookup
= new_arr
;
1886 psli
->iocbq_lookup_len
= new_len
;
1887 psli
->last_iotag
= iotag
;
1888 psli
->iocbq_lookup
[iotag
] = iocbq
;
1889 spin_unlock_irq(&phba
->hbalock
);
1890 iocbq
->iotag
= iotag
;
1895 spin_unlock_irq(&phba
->hbalock
);
1897 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
1898 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1905 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1906 * @phba: Pointer to HBA context object.
1907 * @pring: Pointer to driver SLI ring object.
1908 * @iocb: Pointer to iocb slot in the ring.
1909 * @nextiocb: Pointer to driver iocb object which need to be
1910 * posted to firmware.
1912 * This function is called to post a new iocb to the firmware. This
1913 * function copies the new iocb to ring iocb slot and updates the
1914 * ring pointers. It adds the new iocb to txcmplq if there is
1915 * a completion call back for this iocb else the function will free the
1916 * iocb object. The hbalock is asserted held in the code path calling
1920 lpfc_sli_submit_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1921 IOCB_t
*iocb
, struct lpfc_iocbq
*nextiocb
)
1926 nextiocb
->iocb
.ulpIoTag
= (nextiocb
->iocb_cmpl
) ? nextiocb
->iotag
: 0;
1929 if (pring
->ringno
== LPFC_ELS_RING
) {
1930 lpfc_debugfs_slow_ring_trc(phba
,
1931 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1932 *(((uint32_t *) &nextiocb
->iocb
) + 4),
1933 *(((uint32_t *) &nextiocb
->iocb
) + 6),
1934 *(((uint32_t *) &nextiocb
->iocb
) + 7));
1938 * Issue iocb command to adapter
1940 lpfc_sli_pcimem_bcopy(&nextiocb
->iocb
, iocb
, phba
->iocb_cmd_size
);
1942 pring
->stats
.iocb_cmd
++;
1945 * If there is no completion routine to call, we can release the
1946 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1947 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1949 if (nextiocb
->iocb_cmpl
)
1950 lpfc_sli_ringtxcmpl_put(phba
, pring
, nextiocb
);
1952 __lpfc_sli_release_iocbq(phba
, nextiocb
);
1955 * Let the HBA know what IOCB slot will be the next one the
1956 * driver will put a command into.
1958 pring
->sli
.sli3
.cmdidx
= pring
->sli
.sli3
.next_cmdidx
;
1959 writel(pring
->sli
.sli3
.cmdidx
, &phba
->host_gp
[pring
->ringno
].cmdPutInx
);
1963 * lpfc_sli_update_full_ring - Update the chip attention register
1964 * @phba: Pointer to HBA context object.
1965 * @pring: Pointer to driver SLI ring object.
1967 * The caller is not required to hold any lock for calling this function.
1968 * This function updates the chip attention bits for the ring to inform firmware
1969 * that there are pending work to be done for this ring and requests an
1970 * interrupt when there is space available in the ring. This function is
1971 * called when the driver is unable to post more iocbs to the ring due
1972 * to unavailability of space in the ring.
1975 lpfc_sli_update_full_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1977 int ringno
= pring
->ringno
;
1979 pring
->flag
|= LPFC_CALL_RING_AVAILABLE
;
1984 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1985 * The HBA will tell us when an IOCB entry is available.
1987 writel((CA_R0ATT
|CA_R0CE_REQ
) << (ringno
*4), phba
->CAregaddr
);
1988 readl(phba
->CAregaddr
); /* flush */
1990 pring
->stats
.iocb_cmd_full
++;
1994 * lpfc_sli_update_ring - Update chip attention register
1995 * @phba: Pointer to HBA context object.
1996 * @pring: Pointer to driver SLI ring object.
1998 * This function updates the chip attention register bit for the
1999 * given ring to inform HBA that there is more work to be done
2000 * in this ring. The caller is not required to hold any lock.
2003 lpfc_sli_update_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
2005 int ringno
= pring
->ringno
;
2008 * Tell the HBA that there is work to do in this ring.
2010 if (!(phba
->sli3_options
& LPFC_SLI3_CRP_ENABLED
)) {
2012 writel(CA_R0ATT
<< (ringno
* 4), phba
->CAregaddr
);
2013 readl(phba
->CAregaddr
); /* flush */
2018 * lpfc_sli_resume_iocb - Process iocbs in the txq
2019 * @phba: Pointer to HBA context object.
2020 * @pring: Pointer to driver SLI ring object.
2022 * This function is called with hbalock held to post pending iocbs
2023 * in the txq to the firmware. This function is called when driver
2024 * detects space available in the ring.
2027 lpfc_sli_resume_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
2030 struct lpfc_iocbq
*nextiocb
;
2032 lockdep_assert_held(&phba
->hbalock
);
2036 * (a) there is anything on the txq to send
2038 * (c) link attention events can be processed (fcp ring only)
2039 * (d) IOCB processing is not blocked by the outstanding mbox command.
2042 if (lpfc_is_link_up(phba
) &&
2043 (!list_empty(&pring
->txq
)) &&
2044 (pring
->ringno
!= LPFC_FCP_RING
||
2045 phba
->sli
.sli_flag
& LPFC_PROCESS_LA
)) {
2047 while ((iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)) &&
2048 (nextiocb
= lpfc_sli_ringtx_get(phba
, pring
)))
2049 lpfc_sli_submit_iocb(phba
, pring
, iocb
, nextiocb
);
2052 lpfc_sli_update_ring(phba
, pring
);
2054 lpfc_sli_update_full_ring(phba
, pring
);
2061 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2062 * @phba: Pointer to HBA context object.
2063 * @hbqno: HBQ number.
2065 * This function is called with hbalock held to get the next
2066 * available slot for the given HBQ. If there is free slot
2067 * available for the HBQ it will return pointer to the next available
2068 * HBQ entry else it will return NULL.
2070 static struct lpfc_hbq_entry
*
2071 lpfc_sli_next_hbq_slot(struct lpfc_hba
*phba
, uint32_t hbqno
)
2073 struct hbq_s
*hbqp
= &phba
->hbqs
[hbqno
];
2075 lockdep_assert_held(&phba
->hbalock
);
2077 if (hbqp
->next_hbqPutIdx
== hbqp
->hbqPutIdx
&&
2078 ++hbqp
->next_hbqPutIdx
>= hbqp
->entry_count
)
2079 hbqp
->next_hbqPutIdx
= 0;
2081 if (unlikely(hbqp
->local_hbqGetIdx
== hbqp
->next_hbqPutIdx
)) {
2082 uint32_t raw_index
= phba
->hbq_get
[hbqno
];
2083 uint32_t getidx
= le32_to_cpu(raw_index
);
2085 hbqp
->local_hbqGetIdx
= getidx
;
2087 if (unlikely(hbqp
->local_hbqGetIdx
>= hbqp
->entry_count
)) {
2088 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2089 "1802 HBQ %d: local_hbqGetIdx "
2090 "%u is > than hbqp->entry_count %u\n",
2091 hbqno
, hbqp
->local_hbqGetIdx
,
2094 phba
->link_state
= LPFC_HBA_ERROR
;
2098 if (hbqp
->local_hbqGetIdx
== hbqp
->next_hbqPutIdx
)
2102 return (struct lpfc_hbq_entry
*) phba
->hbqs
[hbqno
].hbq_virt
+
2107 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2108 * @phba: Pointer to HBA context object.
2110 * This function is called with no lock held to free all the
2111 * hbq buffers while uninitializing the SLI interface. It also
2112 * frees the HBQ buffers returned by the firmware but not yet
2113 * processed by the upper layers.
2116 lpfc_sli_hbqbuf_free_all(struct lpfc_hba
*phba
)
2118 struct lpfc_dmabuf
*dmabuf
, *next_dmabuf
;
2119 struct hbq_dmabuf
*hbq_buf
;
2120 unsigned long flags
;
2123 hbq_count
= lpfc_sli_hbq_count();
2124 /* Return all memory used by all HBQs */
2125 spin_lock_irqsave(&phba
->hbalock
, flags
);
2126 for (i
= 0; i
< hbq_count
; ++i
) {
2127 list_for_each_entry_safe(dmabuf
, next_dmabuf
,
2128 &phba
->hbqs
[i
].hbq_buffer_list
, list
) {
2129 hbq_buf
= container_of(dmabuf
, struct hbq_dmabuf
, dbuf
);
2130 list_del(&hbq_buf
->dbuf
.list
);
2131 (phba
->hbqs
[i
].hbq_free_buffer
)(phba
, hbq_buf
);
2133 phba
->hbqs
[i
].buffer_count
= 0;
2136 /* Mark the HBQs not in use */
2137 phba
->hbq_in_use
= 0;
2138 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2142 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2143 * @phba: Pointer to HBA context object.
2144 * @hbqno: HBQ number.
2145 * @hbq_buf: Pointer to HBQ buffer.
2147 * This function is called with the hbalock held to post a
2148 * hbq buffer to the firmware. If the function finds an empty
2149 * slot in the HBQ, it will post the buffer. The function will return
2150 * pointer to the hbq entry if it successfully post the buffer
2151 * else it will return NULL.
2154 lpfc_sli_hbq_to_firmware(struct lpfc_hba
*phba
, uint32_t hbqno
,
2155 struct hbq_dmabuf
*hbq_buf
)
2157 lockdep_assert_held(&phba
->hbalock
);
2158 return phba
->lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buf
);
2162 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2163 * @phba: Pointer to HBA context object.
2164 * @hbqno: HBQ number.
2165 * @hbq_buf: Pointer to HBQ buffer.
2167 * This function is called with the hbalock held to post a hbq buffer to the
2168 * firmware. If the function finds an empty slot in the HBQ, it will post the
2169 * buffer and place it on the hbq_buffer_list. The function will return zero if
2170 * it successfully post the buffer else it will return an error.
2173 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba
*phba
, uint32_t hbqno
,
2174 struct hbq_dmabuf
*hbq_buf
)
2176 struct lpfc_hbq_entry
*hbqe
;
2177 dma_addr_t physaddr
= hbq_buf
->dbuf
.phys
;
2179 lockdep_assert_held(&phba
->hbalock
);
2180 /* Get next HBQ entry slot to use */
2181 hbqe
= lpfc_sli_next_hbq_slot(phba
, hbqno
);
2183 struct hbq_s
*hbqp
= &phba
->hbqs
[hbqno
];
2185 hbqe
->bde
.addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
2186 hbqe
->bde
.addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
2187 hbqe
->bde
.tus
.f
.bdeSize
= hbq_buf
->total_size
;
2188 hbqe
->bde
.tus
.f
.bdeFlags
= 0;
2189 hbqe
->bde
.tus
.w
= le32_to_cpu(hbqe
->bde
.tus
.w
);
2190 hbqe
->buffer_tag
= le32_to_cpu(hbq_buf
->tag
);
2192 hbqp
->hbqPutIdx
= hbqp
->next_hbqPutIdx
;
2193 writel(hbqp
->hbqPutIdx
, phba
->hbq_put
+ hbqno
);
2195 readl(phba
->hbq_put
+ hbqno
);
2196 list_add_tail(&hbq_buf
->dbuf
.list
, &hbqp
->hbq_buffer_list
);
2203 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2204 * @phba: Pointer to HBA context object.
2205 * @hbqno: HBQ number.
2206 * @hbq_buf: Pointer to HBQ buffer.
2208 * This function is called with the hbalock held to post an RQE to the SLI4
2209 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2210 * the hbq_buffer_list and return zero, otherwise it will return an error.
2213 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba
*phba
, uint32_t hbqno
,
2214 struct hbq_dmabuf
*hbq_buf
)
2217 struct lpfc_rqe hrqe
;
2218 struct lpfc_rqe drqe
;
2219 struct lpfc_queue
*hrq
;
2220 struct lpfc_queue
*drq
;
2222 if (hbqno
!= LPFC_ELS_HBQ
)
2224 hrq
= phba
->sli4_hba
.hdr_rq
;
2225 drq
= phba
->sli4_hba
.dat_rq
;
2227 lockdep_assert_held(&phba
->hbalock
);
2228 hrqe
.address_lo
= putPaddrLow(hbq_buf
->hbuf
.phys
);
2229 hrqe
.address_hi
= putPaddrHigh(hbq_buf
->hbuf
.phys
);
2230 drqe
.address_lo
= putPaddrLow(hbq_buf
->dbuf
.phys
);
2231 drqe
.address_hi
= putPaddrHigh(hbq_buf
->dbuf
.phys
);
2232 rc
= lpfc_sli4_rq_put(hrq
, drq
, &hrqe
, &drqe
);
2235 hbq_buf
->tag
= (rc
| (hbqno
<< 16));
2236 list_add_tail(&hbq_buf
->dbuf
.list
, &phba
->hbqs
[hbqno
].hbq_buffer_list
);
2240 /* HBQ for ELS and CT traffic. */
2241 static struct lpfc_hbq_init lpfc_els_hbq
= {
2246 .ring_mask
= (1 << LPFC_ELS_RING
),
2253 struct lpfc_hbq_init
*lpfc_hbq_defs
[] = {
2258 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2259 * @phba: Pointer to HBA context object.
2260 * @hbqno: HBQ number.
2261 * @count: Number of HBQ buffers to be posted.
2263 * This function is called with no lock held to post more hbq buffers to the
2264 * given HBQ. The function returns the number of HBQ buffers successfully
2268 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba
*phba
, uint32_t hbqno
, uint32_t count
)
2270 uint32_t i
, posted
= 0;
2271 unsigned long flags
;
2272 struct hbq_dmabuf
*hbq_buffer
;
2273 LIST_HEAD(hbq_buf_list
);
2274 if (!phba
->hbqs
[hbqno
].hbq_alloc_buffer
)
2277 if ((phba
->hbqs
[hbqno
].buffer_count
+ count
) >
2278 lpfc_hbq_defs
[hbqno
]->entry_count
)
2279 count
= lpfc_hbq_defs
[hbqno
]->entry_count
-
2280 phba
->hbqs
[hbqno
].buffer_count
;
2283 /* Allocate HBQ entries */
2284 for (i
= 0; i
< count
; i
++) {
2285 hbq_buffer
= (phba
->hbqs
[hbqno
].hbq_alloc_buffer
)(phba
);
2288 list_add_tail(&hbq_buffer
->dbuf
.list
, &hbq_buf_list
);
2290 /* Check whether HBQ is still in use */
2291 spin_lock_irqsave(&phba
->hbalock
, flags
);
2292 if (!phba
->hbq_in_use
)
2294 while (!list_empty(&hbq_buf_list
)) {
2295 list_remove_head(&hbq_buf_list
, hbq_buffer
, struct hbq_dmabuf
,
2297 hbq_buffer
->tag
= (phba
->hbqs
[hbqno
].buffer_count
|
2299 if (!lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buffer
)) {
2300 phba
->hbqs
[hbqno
].buffer_count
++;
2303 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
2305 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2308 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
2309 while (!list_empty(&hbq_buf_list
)) {
2310 list_remove_head(&hbq_buf_list
, hbq_buffer
, struct hbq_dmabuf
,
2312 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
2318 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2319 * @phba: Pointer to HBA context object.
2322 * This function posts more buffers to the HBQ. This function
2323 * is called with no lock held. The function returns the number of HBQ entries
2324 * successfully allocated.
2327 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba
*phba
, uint32_t qno
)
2329 if (phba
->sli_rev
== LPFC_SLI_REV4
)
2332 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
2333 lpfc_hbq_defs
[qno
]->add_count
);
2337 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2338 * @phba: Pointer to HBA context object.
2339 * @qno: HBQ queue number.
2341 * This function is called from SLI initialization code path with
2342 * no lock held to post initial HBQ buffers to firmware. The
2343 * function returns the number of HBQ entries successfully allocated.
2346 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba
*phba
, uint32_t qno
)
2348 if (phba
->sli_rev
== LPFC_SLI_REV4
)
2349 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
2350 lpfc_hbq_defs
[qno
]->entry_count
);
2352 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
2353 lpfc_hbq_defs
[qno
]->init_count
);
2357 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2359 * This function removes the first hbq buffer on an hbq list and returns a
2360 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2362 static struct hbq_dmabuf
*
2363 lpfc_sli_hbqbuf_get(struct list_head
*rb_list
)
2365 struct lpfc_dmabuf
*d_buf
;
2367 list_remove_head(rb_list
, d_buf
, struct lpfc_dmabuf
, list
);
2370 return container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
2374 * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2375 * @phba: Pointer to HBA context object.
2378 * This function removes the first RQ buffer on an RQ buffer list and returns a
2379 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2381 static struct rqb_dmabuf
*
2382 lpfc_sli_rqbuf_get(struct lpfc_hba
*phba
, struct lpfc_queue
*hrq
)
2384 struct lpfc_dmabuf
*h_buf
;
2385 struct lpfc_rqb
*rqbp
;
2388 list_remove_head(&rqbp
->rqb_buffer_list
, h_buf
,
2389 struct lpfc_dmabuf
, list
);
2392 rqbp
->buffer_count
--;
2393 return container_of(h_buf
, struct rqb_dmabuf
, hbuf
);
2397 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2398 * @phba: Pointer to HBA context object.
2399 * @tag: Tag of the hbq buffer.
2401 * This function searches for the hbq buffer associated with the given tag in
2402 * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2403 * otherwise it returns NULL.
2405 static struct hbq_dmabuf
*
2406 lpfc_sli_hbqbuf_find(struct lpfc_hba
*phba
, uint32_t tag
)
2408 struct lpfc_dmabuf
*d_buf
;
2409 struct hbq_dmabuf
*hbq_buf
;
2413 if (hbqno
>= LPFC_MAX_HBQS
)
2416 spin_lock_irq(&phba
->hbalock
);
2417 list_for_each_entry(d_buf
, &phba
->hbqs
[hbqno
].hbq_buffer_list
, list
) {
2418 hbq_buf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
2419 if (hbq_buf
->tag
== tag
) {
2420 spin_unlock_irq(&phba
->hbalock
);
2424 spin_unlock_irq(&phba
->hbalock
);
2425 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2426 "1803 Bad hbq tag. Data: x%x x%x\n",
2427 tag
, phba
->hbqs
[tag
>> 16].buffer_count
);
2432 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2433 * @phba: Pointer to HBA context object.
2434 * @hbq_buffer: Pointer to HBQ buffer.
2436 * This function is called with hbalock. This function gives back
2437 * the hbq buffer to firmware. If the HBQ does not have space to
2438 * post the buffer, it will free the buffer.
2441 lpfc_sli_free_hbq(struct lpfc_hba
*phba
, struct hbq_dmabuf
*hbq_buffer
)
2446 hbqno
= hbq_buffer
->tag
>> 16;
2447 if (lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buffer
))
2448 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
2453 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2454 * @mbxCommand: mailbox command code.
2456 * This function is called by the mailbox event handler function to verify
2457 * that the completed mailbox command is a legitimate mailbox command. If the
2458 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2459 * and the mailbox event handler will take the HBA offline.
2462 lpfc_sli_chk_mbx_command(uint8_t mbxCommand
)
2466 switch (mbxCommand
) {
2470 case MBX_WRITE_VPARMS
:
2471 case MBX_RUN_BIU_DIAG
:
2474 case MBX_CONFIG_LINK
:
2475 case MBX_CONFIG_RING
:
2476 case MBX_RESET_RING
:
2477 case MBX_READ_CONFIG
:
2478 case MBX_READ_RCONFIG
:
2479 case MBX_READ_SPARM
:
2480 case MBX_READ_STATUS
:
2484 case MBX_READ_LNK_STAT
:
2486 case MBX_UNREG_LOGIN
:
2488 case MBX_DUMP_MEMORY
:
2489 case MBX_DUMP_CONTEXT
:
2492 case MBX_UPDATE_CFG
:
2494 case MBX_DEL_LD_ENTRY
:
2495 case MBX_RUN_PROGRAM
:
2497 case MBX_SET_VARIABLE
:
2498 case MBX_UNREG_D_ID
:
2499 case MBX_KILL_BOARD
:
2500 case MBX_CONFIG_FARP
:
2503 case MBX_RUN_BIU_DIAG64
:
2504 case MBX_CONFIG_PORT
:
2505 case MBX_READ_SPARM64
:
2506 case MBX_READ_RPI64
:
2507 case MBX_REG_LOGIN64
:
2508 case MBX_READ_TOPOLOGY
:
2511 case MBX_LOAD_EXP_ROM
:
2512 case MBX_ASYNCEVT_ENABLE
:
2516 case MBX_PORT_CAPABILITIES
:
2517 case MBX_PORT_IOV_CONTROL
:
2518 case MBX_SLI4_CONFIG
:
2519 case MBX_SLI4_REQ_FTRS
:
2521 case MBX_UNREG_FCFI
:
2526 case MBX_RESUME_RPI
:
2527 case MBX_READ_EVENT_LOG_STATUS
:
2528 case MBX_READ_EVENT_LOG
:
2529 case MBX_SECURITY_MGMT
:
2531 case MBX_ACCESS_VDATA
:
2542 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2543 * @phba: Pointer to HBA context object.
2544 * @pmboxq: Pointer to mailbox command.
2546 * This is completion handler function for mailbox commands issued from
2547 * lpfc_sli_issue_mbox_wait function. This function is called by the
2548 * mailbox event handler function with no lock held. This function
2549 * will wake up thread waiting on the wait queue pointed by context1
2553 lpfc_sli_wake_mbox_wait(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
2555 unsigned long drvr_flag
;
2556 struct completion
*pmbox_done
;
2559 * If pmbox_done is empty, the driver thread gave up waiting and
2560 * continued running.
2562 pmboxq
->mbox_flag
|= LPFC_MBX_WAKE
;
2563 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
2564 pmbox_done
= (struct completion
*)pmboxq
->context3
;
2566 complete(pmbox_done
);
2567 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
2572 __lpfc_sli_rpi_release(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
2574 unsigned long iflags
;
2576 if (ndlp
->nlp_flag
& NLP_RELEASE_RPI
) {
2577 lpfc_sli4_free_rpi(vport
->phba
, ndlp
->nlp_rpi
);
2578 spin_lock_irqsave(&ndlp
->lock
, iflags
);
2579 ndlp
->nlp_flag
&= ~NLP_RELEASE_RPI
;
2580 ndlp
->nlp_rpi
= LPFC_RPI_ALLOC_ERROR
;
2581 spin_unlock_irqrestore(&ndlp
->lock
, iflags
);
2583 ndlp
->nlp_flag
&= ~NLP_UNREG_INP
;
2587 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2588 * @phba: Pointer to HBA context object.
2589 * @pmb: Pointer to mailbox object.
2591 * This function is the default mailbox completion handler. It
2592 * frees the memory resources associated with the completed mailbox
2593 * command. If the completed command is a REG_LOGIN mailbox command,
2594 * this function will issue a UREG_LOGIN to re-claim the RPI.
2597 lpfc_sli_def_mbox_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2599 struct lpfc_vport
*vport
= pmb
->vport
;
2600 struct lpfc_dmabuf
*mp
;
2601 struct lpfc_nodelist
*ndlp
;
2602 struct Scsi_Host
*shost
;
2606 mp
= (struct lpfc_dmabuf
*)(pmb
->ctx_buf
);
2609 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2614 * If a REG_LOGIN succeeded after node is destroyed or node
2615 * is in re-discovery driver need to cleanup the RPI.
2617 if (!(phba
->pport
->load_flag
& FC_UNLOADING
) &&
2618 pmb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
&&
2619 !pmb
->u
.mb
.mbxStatus
) {
2620 rpi
= pmb
->u
.mb
.un
.varWords
[0];
2621 vpi
= pmb
->u
.mb
.un
.varRegLogin
.vpi
;
2622 if (phba
->sli_rev
== LPFC_SLI_REV4
)
2623 vpi
-= phba
->sli4_hba
.max_cfg_param
.vpi_base
;
2624 lpfc_unreg_login(phba
, vpi
, rpi
, pmb
);
2626 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
2627 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
2628 if (rc
!= MBX_NOT_FINISHED
)
2632 if ((pmb
->u
.mb
.mbxCommand
== MBX_REG_VPI
) &&
2633 !(phba
->pport
->load_flag
& FC_UNLOADING
) &&
2634 !pmb
->u
.mb
.mbxStatus
) {
2635 shost
= lpfc_shost_from_vport(vport
);
2636 spin_lock_irq(shost
->host_lock
);
2637 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2638 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2639 spin_unlock_irq(shost
->host_lock
);
2642 if (pmb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
2643 ndlp
= (struct lpfc_nodelist
*)pmb
->ctx_ndlp
;
2645 pmb
->ctx_buf
= NULL
;
2646 pmb
->ctx_ndlp
= NULL
;
2649 if (pmb
->u
.mb
.mbxCommand
== MBX_UNREG_LOGIN
) {
2650 ndlp
= (struct lpfc_nodelist
*)pmb
->ctx_ndlp
;
2652 /* Check to see if there are any deferred events to process */
2656 KERN_INFO
, LOG_MBOX
| LOG_DISCOVERY
,
2657 "1438 UNREG cmpl deferred mbox x%x "
2658 "on NPort x%x Data: x%x x%x %px x%x x%x\n",
2659 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
2660 ndlp
->nlp_flag
, ndlp
->nlp_defer_did
,
2661 ndlp
, vport
->load_flag
, kref_read(&ndlp
->kref
));
2663 if ((ndlp
->nlp_flag
& NLP_UNREG_INP
) &&
2664 (ndlp
->nlp_defer_did
!= NLP_EVT_NOTHING_PENDING
)) {
2665 ndlp
->nlp_flag
&= ~NLP_UNREG_INP
;
2666 ndlp
->nlp_defer_did
= NLP_EVT_NOTHING_PENDING
;
2667 lpfc_issue_els_plogi(vport
, ndlp
->nlp_DID
, 0);
2669 __lpfc_sli_rpi_release(vport
, ndlp
);
2672 /* The unreg_login mailbox is complete and had a
2673 * reference that has to be released. The PLOGI
2677 pmb
->ctx_ndlp
= NULL
;
2681 /* Check security permission status on INIT_LINK mailbox command */
2682 if ((pmb
->u
.mb
.mbxCommand
== MBX_INIT_LINK
) &&
2683 (pmb
->u
.mb
.mbxStatus
== MBXERR_SEC_NO_PERMISSION
))
2684 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2685 "2860 SLI authentication is required "
2686 "for INIT_LINK but has not done yet\n");
2688 if (bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
) == MBX_SLI4_CONFIG
)
2689 lpfc_sli4_mbox_cmd_free(phba
, pmb
);
2691 mempool_free(pmb
, phba
->mbox_mem_pool
);
2694 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2695 * @phba: Pointer to HBA context object.
2696 * @pmb: Pointer to mailbox object.
2698 * This function is the unreg rpi mailbox completion handler. It
2699 * frees the memory resources associated with the completed mailbox
2700 * command. An additional reference is put on the ndlp to prevent
2701 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2702 * the unreg mailbox command completes, this routine puts the
2707 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2709 struct lpfc_vport
*vport
= pmb
->vport
;
2710 struct lpfc_nodelist
*ndlp
;
2712 ndlp
= pmb
->ctx_ndlp
;
2713 if (pmb
->u
.mb
.mbxCommand
== MBX_UNREG_LOGIN
) {
2714 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
2715 (bf_get(lpfc_sli_intf_if_type
,
2716 &phba
->sli4_hba
.sli_intf
) >=
2717 LPFC_SLI_INTF_IF_TYPE_2
)) {
2720 vport
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
2721 "0010 UNREG_LOGIN vpi:%x "
2722 "rpi:%x DID:%x defer x%x flg x%x "
2724 vport
->vpi
, ndlp
->nlp_rpi
,
2725 ndlp
->nlp_DID
, ndlp
->nlp_defer_did
,
2728 ndlp
->nlp_flag
&= ~NLP_LOGO_ACC
;
2730 /* Check to see if there are any deferred
2733 if ((ndlp
->nlp_flag
& NLP_UNREG_INP
) &&
2734 (ndlp
->nlp_defer_did
!=
2735 NLP_EVT_NOTHING_PENDING
)) {
2737 vport
, KERN_INFO
, LOG_DISCOVERY
,
2738 "4111 UNREG cmpl deferred "
2740 "NPort x%x Data: x%x x%px\n",
2741 ndlp
->nlp_rpi
, ndlp
->nlp_DID
,
2742 ndlp
->nlp_defer_did
, ndlp
);
2743 ndlp
->nlp_flag
&= ~NLP_UNREG_INP
;
2744 ndlp
->nlp_defer_did
=
2745 NLP_EVT_NOTHING_PENDING
;
2746 lpfc_issue_els_plogi(
2747 vport
, ndlp
->nlp_DID
, 0);
2749 __lpfc_sli_rpi_release(vport
, ndlp
);
2757 mempool_free(pmb
, phba
->mbox_mem_pool
);
2761 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2762 * @phba: Pointer to HBA context object.
2764 * This function is called with no lock held. This function processes all
2765 * the completed mailbox commands and gives it to upper layers. The interrupt
2766 * service routine processes mailbox completion interrupt and adds completed
2767 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2768 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2769 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2770 * function returns the mailbox commands to the upper layer by calling the
2771 * completion handler function of each mailbox.
2774 lpfc_sli_handle_mb_event(struct lpfc_hba
*phba
)
2781 phba
->sli
.slistat
.mbox_event
++;
2783 /* Get all completed mailboxe buffers into the cmplq */
2784 spin_lock_irq(&phba
->hbalock
);
2785 list_splice_init(&phba
->sli
.mboxq_cmpl
, &cmplq
);
2786 spin_unlock_irq(&phba
->hbalock
);
2788 /* Get a Mailbox buffer to setup mailbox commands for callback */
2790 list_remove_head(&cmplq
, pmb
, LPFC_MBOXQ_t
, list
);
2796 if (pmbox
->mbxCommand
!= MBX_HEARTBEAT
) {
2798 lpfc_debugfs_disc_trc(pmb
->vport
,
2799 LPFC_DISC_TRC_MBOX_VPORT
,
2800 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2801 (uint32_t)pmbox
->mbxCommand
,
2802 pmbox
->un
.varWords
[0],
2803 pmbox
->un
.varWords
[1]);
2806 lpfc_debugfs_disc_trc(phba
->pport
,
2808 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2809 (uint32_t)pmbox
->mbxCommand
,
2810 pmbox
->un
.varWords
[0],
2811 pmbox
->un
.varWords
[1]);
2816 * It is a fatal error if unknown mbox command completion.
2818 if (lpfc_sli_chk_mbx_command(pmbox
->mbxCommand
) ==
2820 /* Unknown mailbox command compl */
2821 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2822 "(%d):0323 Unknown Mailbox command "
2823 "x%x (x%x/x%x) Cmpl\n",
2824 pmb
->vport
? pmb
->vport
->vpi
:
2827 lpfc_sli_config_mbox_subsys_get(phba
,
2829 lpfc_sli_config_mbox_opcode_get(phba
,
2831 phba
->link_state
= LPFC_HBA_ERROR
;
2832 phba
->work_hs
= HS_FFER3
;
2833 lpfc_handle_eratt(phba
);
2837 if (pmbox
->mbxStatus
) {
2838 phba
->sli
.slistat
.mbox_stat_err
++;
2839 if (pmbox
->mbxStatus
== MBXERR_NO_RESOURCES
) {
2840 /* Mbox cmd cmpl error - RETRYing */
2841 lpfc_printf_log(phba
, KERN_INFO
,
2843 "(%d):0305 Mbox cmd cmpl "
2844 "error - RETRYing Data: x%x "
2845 "(x%x/x%x) x%x x%x x%x\n",
2846 pmb
->vport
? pmb
->vport
->vpi
:
2849 lpfc_sli_config_mbox_subsys_get(phba
,
2851 lpfc_sli_config_mbox_opcode_get(phba
,
2854 pmbox
->un
.varWords
[0],
2855 pmb
->vport
? pmb
->vport
->port_state
:
2856 LPFC_VPORT_UNKNOWN
);
2857 pmbox
->mbxStatus
= 0;
2858 pmbox
->mbxOwner
= OWN_HOST
;
2859 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
2860 if (rc
!= MBX_NOT_FINISHED
)
2865 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2866 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
2867 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
2868 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2870 pmb
->vport
? pmb
->vport
->vpi
: 0,
2872 lpfc_sli_config_mbox_subsys_get(phba
, pmb
),
2873 lpfc_sli_config_mbox_opcode_get(phba
, pmb
),
2875 *((uint32_t *) pmbox
),
2876 pmbox
->un
.varWords
[0],
2877 pmbox
->un
.varWords
[1],
2878 pmbox
->un
.varWords
[2],
2879 pmbox
->un
.varWords
[3],
2880 pmbox
->un
.varWords
[4],
2881 pmbox
->un
.varWords
[5],
2882 pmbox
->un
.varWords
[6],
2883 pmbox
->un
.varWords
[7],
2884 pmbox
->un
.varWords
[8],
2885 pmbox
->un
.varWords
[9],
2886 pmbox
->un
.varWords
[10]);
2889 pmb
->mbox_cmpl(phba
,pmb
);
2895 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2896 * @phba: Pointer to HBA context object.
2897 * @pring: Pointer to driver SLI ring object.
2900 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2901 * is set in the tag the buffer is posted for a particular exchange,
2902 * the function will return the buffer without replacing the buffer.
2903 * If the buffer is for unsolicited ELS or CT traffic, this function
2904 * returns the buffer and also posts another buffer to the firmware.
2906 static struct lpfc_dmabuf
*
2907 lpfc_sli_get_buff(struct lpfc_hba
*phba
,
2908 struct lpfc_sli_ring
*pring
,
2911 struct hbq_dmabuf
*hbq_entry
;
2913 if (tag
& QUE_BUFTAG_BIT
)
2914 return lpfc_sli_ring_taggedbuf_get(phba
, pring
, tag
);
2915 hbq_entry
= lpfc_sli_hbqbuf_find(phba
, tag
);
2918 return &hbq_entry
->dbuf
;
2922 * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
2923 * containing a NVME LS request.
2924 * @phba: pointer to lpfc hba data structure.
2925 * @piocb: pointer to the iocbq struct representing the sequence starting
2928 * This routine initially validates the NVME LS, validates there is a login
2929 * with the port that sent the LS, and then calls the appropriate nvme host
2930 * or target LS request handler.
2933 lpfc_nvme_unsol_ls_handler(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocb
)
2935 struct lpfc_nodelist
*ndlp
;
2936 struct lpfc_dmabuf
*d_buf
;
2937 struct hbq_dmabuf
*nvmebuf
;
2938 struct fc_frame_header
*fc_hdr
;
2939 struct lpfc_async_xchg_ctx
*axchg
= NULL
;
2940 char *failwhy
= NULL
;
2941 uint32_t oxid
, sid
, did
, fctl
, size
;
2944 d_buf
= piocb
->context2
;
2946 nvmebuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
2947 fc_hdr
= nvmebuf
->hbuf
.virt
;
2948 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
2949 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
2950 did
= sli4_did_from_fc_hdr(fc_hdr
);
2951 fctl
= (fc_hdr
->fh_f_ctl
[0] << 16 |
2952 fc_hdr
->fh_f_ctl
[1] << 8 |
2953 fc_hdr
->fh_f_ctl
[2]);
2954 size
= bf_get(lpfc_rcqe_length
, &nvmebuf
->cq_event
.cqe
.rcqe_cmpl
);
2956 lpfc_nvmeio_data(phba
, "NVME LS RCV: xri x%x sz %d from %06x\n",
2959 if (phba
->pport
->load_flag
& FC_UNLOADING
) {
2960 failwhy
= "Driver Unloading";
2961 } else if (!(phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
)) {
2962 failwhy
= "NVME FC4 Disabled";
2963 } else if (!phba
->nvmet_support
&& !phba
->pport
->localport
) {
2964 failwhy
= "No Localport";
2965 } else if (phba
->nvmet_support
&& !phba
->targetport
) {
2966 failwhy
= "No Targetport";
2967 } else if (unlikely(fc_hdr
->fh_r_ctl
!= FC_RCTL_ELS4_REQ
)) {
2968 failwhy
= "Bad NVME LS R_CTL";
2969 } else if (unlikely((fctl
& 0x00FF0000) !=
2970 (FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
| FC_FC_SEQ_INIT
))) {
2971 failwhy
= "Bad NVME LS F_CTL";
2973 axchg
= kzalloc(sizeof(*axchg
), GFP_ATOMIC
);
2975 failwhy
= "No CTX memory";
2978 if (unlikely(failwhy
)) {
2979 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
2980 "6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
2981 sid
, oxid
, failwhy
);
2985 /* validate the source of the LS is logged in */
2986 ndlp
= lpfc_findnode_did(phba
->pport
, sid
);
2988 ((ndlp
->nlp_state
!= NLP_STE_UNMAPPED_NODE
) &&
2989 (ndlp
->nlp_state
!= NLP_STE_MAPPED_NODE
))) {
2990 lpfc_printf_log(phba
, KERN_ERR
, LOG_NVME_DISC
,
2991 "6216 NVME Unsol rcv: No ndlp: "
2992 "NPort_ID x%x oxid x%x\n",
3003 axchg
->state
= LPFC_NVME_STE_LS_RCV
;
3004 axchg
->entry_cnt
= 1;
3005 axchg
->rqb_buffer
= (void *)nvmebuf
;
3006 axchg
->hdwq
= &phba
->sli4_hba
.hdwq
[0];
3007 axchg
->payload
= nvmebuf
->dbuf
.virt
;
3008 INIT_LIST_HEAD(&axchg
->list
);
3010 if (phba
->nvmet_support
)
3011 ret
= lpfc_nvmet_handle_lsreq(phba
, axchg
);
3013 ret
= lpfc_nvme_handle_lsreq(phba
, axchg
);
3015 /* if zero, LS was successfully handled. If non-zero, LS not handled */
3019 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3020 "6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3021 "NVMe%s handler failed %d\n",
3023 (phba
->nvmet_support
) ? "T" : "I", ret
);
3027 /* recycle receive buffer */
3028 lpfc_in_buf_free(phba
, &nvmebuf
->dbuf
);
3030 /* If start of new exchange, abort it */
3031 if (axchg
&& (fctl
& FC_FC_FIRST_SEQ
&& !(fctl
& FC_FC_EX_CTX
)))
3032 ret
= lpfc_nvme_unsol_ls_issue_abort(phba
, axchg
, sid
, oxid
);
3039 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3040 * @phba: Pointer to HBA context object.
3041 * @pring: Pointer to driver SLI ring object.
3042 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3043 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3044 * @fch_type: the type for the first frame of the sequence.
3046 * This function is called with no lock held. This function uses the r_ctl and
3047 * type of the received sequence to find the correct callback function to call
3048 * to process the sequence.
3051 lpfc_complete_unsol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
3052 struct lpfc_iocbq
*saveq
, uint32_t fch_r_ctl
,
3059 lpfc_nvme_unsol_ls_handler(phba
, saveq
);
3065 /* unSolicited Responses */
3066 if (pring
->prt
[0].profile
) {
3067 if (pring
->prt
[0].lpfc_sli_rcv_unsol_event
)
3068 (pring
->prt
[0].lpfc_sli_rcv_unsol_event
) (phba
, pring
,
3072 /* We must search, based on rctl / type
3073 for the right routine */
3074 for (i
= 0; i
< pring
->num_mask
; i
++) {
3075 if ((pring
->prt
[i
].rctl
== fch_r_ctl
) &&
3076 (pring
->prt
[i
].type
== fch_type
)) {
3077 if (pring
->prt
[i
].lpfc_sli_rcv_unsol_event
)
3078 (pring
->prt
[i
].lpfc_sli_rcv_unsol_event
)
3079 (phba
, pring
, saveq
);
3087 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3088 * @phba: Pointer to HBA context object.
3089 * @pring: Pointer to driver SLI ring object.
3090 * @saveq: Pointer to the unsolicited iocb.
3092 * This function is called with no lock held by the ring event handler
3093 * when there is an unsolicited iocb posted to the response ring by the
3094 * firmware. This function gets the buffer associated with the iocbs
3095 * and calls the event handler for the ring. This function handles both
3096 * qring buffers and hbq buffers.
3097 * When the function returns 1 the caller can free the iocb object otherwise
3098 * upper layer functions will free the iocb objects.
3101 lpfc_sli_process_unsol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
3102 struct lpfc_iocbq
*saveq
)
3106 uint32_t Rctl
, Type
;
3107 struct lpfc_iocbq
*iocbq
;
3108 struct lpfc_dmabuf
*dmzbuf
;
3110 irsp
= &(saveq
->iocb
);
3112 if (irsp
->ulpCommand
== CMD_ASYNC_STATUS
) {
3113 if (pring
->lpfc_sli_rcv_async_status
)
3114 pring
->lpfc_sli_rcv_async_status(phba
, pring
, saveq
);
3116 lpfc_printf_log(phba
,
3119 "0316 Ring %d handler: unexpected "
3120 "ASYNC_STATUS iocb received evt_code "
3123 irsp
->un
.asyncstat
.evt_code
);
3127 if ((irsp
->ulpCommand
== CMD_IOCB_RET_XRI64_CX
) &&
3128 (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)) {
3129 if (irsp
->ulpBdeCount
> 0) {
3130 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
3131 irsp
->un
.ulpWord
[3]);
3132 lpfc_in_buf_free(phba
, dmzbuf
);
3135 if (irsp
->ulpBdeCount
> 1) {
3136 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
3137 irsp
->unsli3
.sli3Words
[3]);
3138 lpfc_in_buf_free(phba
, dmzbuf
);
3141 if (irsp
->ulpBdeCount
> 2) {
3142 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
3143 irsp
->unsli3
.sli3Words
[7]);
3144 lpfc_in_buf_free(phba
, dmzbuf
);
3150 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
3151 if (irsp
->ulpBdeCount
!= 0) {
3152 saveq
->context2
= lpfc_sli_get_buff(phba
, pring
,
3153 irsp
->un
.ulpWord
[3]);
3154 if (!saveq
->context2
)
3155 lpfc_printf_log(phba
,
3158 "0341 Ring %d Cannot find buffer for "
3159 "an unsolicited iocb. tag 0x%x\n",
3161 irsp
->un
.ulpWord
[3]);
3163 if (irsp
->ulpBdeCount
== 2) {
3164 saveq
->context3
= lpfc_sli_get_buff(phba
, pring
,
3165 irsp
->unsli3
.sli3Words
[7]);
3166 if (!saveq
->context3
)
3167 lpfc_printf_log(phba
,
3170 "0342 Ring %d Cannot find buffer for an"
3171 " unsolicited iocb. tag 0x%x\n",
3173 irsp
->unsli3
.sli3Words
[7]);
3175 list_for_each_entry(iocbq
, &saveq
->list
, list
) {
3176 irsp
= &(iocbq
->iocb
);
3177 if (irsp
->ulpBdeCount
!= 0) {
3178 iocbq
->context2
= lpfc_sli_get_buff(phba
, pring
,
3179 irsp
->un
.ulpWord
[3]);
3180 if (!iocbq
->context2
)
3181 lpfc_printf_log(phba
,
3184 "0343 Ring %d Cannot find "
3185 "buffer for an unsolicited iocb"
3186 ". tag 0x%x\n", pring
->ringno
,
3187 irsp
->un
.ulpWord
[3]);
3189 if (irsp
->ulpBdeCount
== 2) {
3190 iocbq
->context3
= lpfc_sli_get_buff(phba
, pring
,
3191 irsp
->unsli3
.sli3Words
[7]);
3192 if (!iocbq
->context3
)
3193 lpfc_printf_log(phba
,
3196 "0344 Ring %d Cannot find "
3197 "buffer for an unsolicited "
3200 irsp
->unsli3
.sli3Words
[7]);
3204 if (irsp
->ulpBdeCount
!= 0 &&
3205 (irsp
->ulpCommand
== CMD_IOCB_RCV_CONT64_CX
||
3206 irsp
->ulpStatus
== IOSTAT_INTERMED_RSP
)) {
3209 /* search continue save q for same XRI */
3210 list_for_each_entry(iocbq
, &pring
->iocb_continue_saveq
, clist
) {
3211 if (iocbq
->iocb
.unsli3
.rcvsli3
.ox_id
==
3212 saveq
->iocb
.unsli3
.rcvsli3
.ox_id
) {
3213 list_add_tail(&saveq
->list
, &iocbq
->list
);
3219 list_add_tail(&saveq
->clist
,
3220 &pring
->iocb_continue_saveq
);
3221 if (saveq
->iocb
.ulpStatus
!= IOSTAT_INTERMED_RSP
) {
3222 list_del_init(&iocbq
->clist
);
3224 irsp
= &(saveq
->iocb
);
3228 if ((irsp
->ulpCommand
== CMD_RCV_ELS_REQ64_CX
) ||
3229 (irsp
->ulpCommand
== CMD_RCV_ELS_REQ_CX
) ||
3230 (irsp
->ulpCommand
== CMD_IOCB_RCV_ELS64_CX
)) {
3231 Rctl
= FC_RCTL_ELS_REQ
;
3234 w5p
= (WORD5
*)&(saveq
->iocb
.un
.ulpWord
[5]);
3235 Rctl
= w5p
->hcsw
.Rctl
;
3236 Type
= w5p
->hcsw
.Type
;
3238 /* Firmware Workaround */
3239 if ((Rctl
== 0) && (pring
->ringno
== LPFC_ELS_RING
) &&
3240 (irsp
->ulpCommand
== CMD_RCV_SEQUENCE64_CX
||
3241 irsp
->ulpCommand
== CMD_IOCB_RCV_SEQ64_CX
)) {
3242 Rctl
= FC_RCTL_ELS_REQ
;
3244 w5p
->hcsw
.Rctl
= Rctl
;
3245 w5p
->hcsw
.Type
= Type
;
3249 if (!lpfc_complete_unsol_iocb(phba
, pring
, saveq
, Rctl
, Type
))
3250 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3251 "0313 Ring %d handler: unexpected Rctl x%x "
3252 "Type x%x received\n",
3253 pring
->ringno
, Rctl
, Type
);
3259 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3260 * @phba: Pointer to HBA context object.
3261 * @pring: Pointer to driver SLI ring object.
3262 * @prspiocb: Pointer to response iocb object.
3264 * This function looks up the iocb_lookup table to get the command iocb
3265 * corresponding to the given response iocb using the iotag of the
3266 * response iocb. The driver calls this function with the hbalock held
3267 * for SLI3 ports or the ring lock held for SLI4 ports.
3268 * This function returns the command iocb object if it finds the command
3269 * iocb else returns NULL.
3271 static struct lpfc_iocbq
*
3272 lpfc_sli_iocbq_lookup(struct lpfc_hba
*phba
,
3273 struct lpfc_sli_ring
*pring
,
3274 struct lpfc_iocbq
*prspiocb
)
3276 struct lpfc_iocbq
*cmd_iocb
= NULL
;
3278 spinlock_t
*temp_lock
= NULL
;
3279 unsigned long iflag
= 0;
3281 if (phba
->sli_rev
== LPFC_SLI_REV4
)
3282 temp_lock
= &pring
->ring_lock
;
3284 temp_lock
= &phba
->hbalock
;
3286 spin_lock_irqsave(temp_lock
, iflag
);
3287 iotag
= prspiocb
->iocb
.ulpIoTag
;
3289 if (iotag
!= 0 && iotag
<= phba
->sli
.last_iotag
) {
3290 cmd_iocb
= phba
->sli
.iocbq_lookup
[iotag
];
3291 if (cmd_iocb
->iocb_flag
& LPFC_IO_ON_TXCMPLQ
) {
3292 /* remove from txcmpl queue list */
3293 list_del_init(&cmd_iocb
->list
);
3294 cmd_iocb
->iocb_flag
&= ~LPFC_IO_ON_TXCMPLQ
;
3295 pring
->txcmplq_cnt
--;
3296 spin_unlock_irqrestore(temp_lock
, iflag
);
3301 spin_unlock_irqrestore(temp_lock
, iflag
);
3302 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3303 "0317 iotag x%x is out of "
3304 "range: max iotag x%x wd0 x%x\n",
3305 iotag
, phba
->sli
.last_iotag
,
3306 *(((uint32_t *) &prspiocb
->iocb
) + 7));
3311 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3312 * @phba: Pointer to HBA context object.
3313 * @pring: Pointer to driver SLI ring object.
3316 * This function looks up the iocb_lookup table to get the command iocb
3317 * corresponding to the given iotag. The driver calls this function with
3318 * the ring lock held because this function is an SLI4 port only helper.
3319 * This function returns the command iocb object if it finds the command
3320 * iocb else returns NULL.
3322 static struct lpfc_iocbq
*
3323 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba
*phba
,
3324 struct lpfc_sli_ring
*pring
, uint16_t iotag
)
3326 struct lpfc_iocbq
*cmd_iocb
= NULL
;
3327 spinlock_t
*temp_lock
= NULL
;
3328 unsigned long iflag
= 0;
3330 if (phba
->sli_rev
== LPFC_SLI_REV4
)
3331 temp_lock
= &pring
->ring_lock
;
3333 temp_lock
= &phba
->hbalock
;
3335 spin_lock_irqsave(temp_lock
, iflag
);
3336 if (iotag
!= 0 && iotag
<= phba
->sli
.last_iotag
) {
3337 cmd_iocb
= phba
->sli
.iocbq_lookup
[iotag
];
3338 if (cmd_iocb
->iocb_flag
& LPFC_IO_ON_TXCMPLQ
) {
3339 /* remove from txcmpl queue list */
3340 list_del_init(&cmd_iocb
->list
);
3341 cmd_iocb
->iocb_flag
&= ~LPFC_IO_ON_TXCMPLQ
;
3342 pring
->txcmplq_cnt
--;
3343 spin_unlock_irqrestore(temp_lock
, iflag
);
3348 spin_unlock_irqrestore(temp_lock
, iflag
);
3349 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3350 "0372 iotag x%x lookup error: max iotag (x%x) "
3352 iotag
, phba
->sli
.last_iotag
,
3353 cmd_iocb
? cmd_iocb
->iocb_flag
: 0xffff);
3358 * lpfc_sli_process_sol_iocb - process solicited iocb completion
3359 * @phba: Pointer to HBA context object.
3360 * @pring: Pointer to driver SLI ring object.
3361 * @saveq: Pointer to the response iocb to be processed.
3363 * This function is called by the ring event handler for non-fcp
3364 * rings when there is a new response iocb in the response ring.
3365 * The caller is not required to hold any locks. This function
3366 * gets the command iocb associated with the response iocb and
3367 * calls the completion handler for the command iocb. If there
3368 * is no completion handler, the function will free the resources
3369 * associated with command iocb. If the response iocb is for
3370 * an already aborted command iocb, the status of the completion
3371 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3372 * This function always returns 1.
3375 lpfc_sli_process_sol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
3376 struct lpfc_iocbq
*saveq
)
3378 struct lpfc_iocbq
*cmdiocbp
;
3380 unsigned long iflag
;
3382 cmdiocbp
= lpfc_sli_iocbq_lookup(phba
, pring
, saveq
);
3384 if (cmdiocbp
->iocb_cmpl
) {
3386 * If an ELS command failed send an event to mgmt
3389 if (saveq
->iocb
.ulpStatus
&&
3390 (pring
->ringno
== LPFC_ELS_RING
) &&
3391 (cmdiocbp
->iocb
.ulpCommand
==
3392 CMD_ELS_REQUEST64_CR
))
3393 lpfc_send_els_failure_event(phba
,
3397 * Post all ELS completions to the worker thread.
3398 * All other are passed to the completion callback.
3400 if (pring
->ringno
== LPFC_ELS_RING
) {
3401 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
3402 (cmdiocbp
->iocb_flag
&
3403 LPFC_DRIVER_ABORTED
)) {
3404 spin_lock_irqsave(&phba
->hbalock
,
3406 cmdiocbp
->iocb_flag
&=
3407 ~LPFC_DRIVER_ABORTED
;
3408 spin_unlock_irqrestore(&phba
->hbalock
,
3410 saveq
->iocb
.ulpStatus
=
3411 IOSTAT_LOCAL_REJECT
;
3412 saveq
->iocb
.un
.ulpWord
[4] =
3415 /* Firmware could still be in progress
3416 * of DMAing payload, so don't free data
3417 * buffer till after a hbeat.
3419 spin_lock_irqsave(&phba
->hbalock
,
3421 saveq
->iocb_flag
|= LPFC_DELAY_MEM_FREE
;
3422 spin_unlock_irqrestore(&phba
->hbalock
,
3425 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
3426 if (saveq
->iocb_flag
&
3427 LPFC_EXCHANGE_BUSY
) {
3428 /* Set cmdiocb flag for the
3429 * exchange busy so sgl (xri)
3430 * will not be released until
3431 * the abort xri is received
3435 &phba
->hbalock
, iflag
);
3436 cmdiocbp
->iocb_flag
|=
3438 spin_unlock_irqrestore(
3439 &phba
->hbalock
, iflag
);
3441 if (cmdiocbp
->iocb_flag
&
3442 LPFC_DRIVER_ABORTED
) {
3444 * Clear LPFC_DRIVER_ABORTED
3445 * bit in case it was driver
3449 &phba
->hbalock
, iflag
);
3450 cmdiocbp
->iocb_flag
&=
3451 ~LPFC_DRIVER_ABORTED
;
3452 spin_unlock_irqrestore(
3453 &phba
->hbalock
, iflag
);
3454 cmdiocbp
->iocb
.ulpStatus
=
3455 IOSTAT_LOCAL_REJECT
;
3456 cmdiocbp
->iocb
.un
.ulpWord
[4] =
3457 IOERR_ABORT_REQUESTED
;
3459 * For SLI4, irsiocb contains
3460 * NO_XRI in sli_xritag, it
3461 * shall not affect releasing
3462 * sgl (xri) process.
3464 saveq
->iocb
.ulpStatus
=
3465 IOSTAT_LOCAL_REJECT
;
3466 saveq
->iocb
.un
.ulpWord
[4] =
3469 &phba
->hbalock
, iflag
);
3471 LPFC_DELAY_MEM_FREE
;
3472 spin_unlock_irqrestore(
3473 &phba
->hbalock
, iflag
);
3477 (cmdiocbp
->iocb_cmpl
) (phba
, cmdiocbp
, saveq
);
3479 lpfc_sli_release_iocbq(phba
, cmdiocbp
);
3482 * Unknown initiating command based on the response iotag.
3483 * This could be the case on the ELS ring because of
3486 if (pring
->ringno
!= LPFC_ELS_RING
) {
3488 * Ring <ringno> handler: unexpected completion IoTag
3491 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3492 "0322 Ring %d handler: "
3493 "unexpected completion IoTag x%x "
3494 "Data: x%x x%x x%x x%x\n",
3496 saveq
->iocb
.ulpIoTag
,
3497 saveq
->iocb
.ulpStatus
,
3498 saveq
->iocb
.un
.ulpWord
[4],
3499 saveq
->iocb
.ulpCommand
,
3500 saveq
->iocb
.ulpContext
);
3508 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3509 * @phba: Pointer to HBA context object.
3510 * @pring: Pointer to driver SLI ring object.
3512 * This function is called from the iocb ring event handlers when
3513 * put pointer is ahead of the get pointer for a ring. This function signal
3514 * an error attention condition to the worker thread and the worker
3515 * thread will transition the HBA to offline state.
3518 lpfc_sli_rsp_pointers_error(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
3520 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
3522 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3523 * rsp ring <portRspMax>
3525 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3526 "0312 Ring %d handler: portRspPut %d "
3527 "is bigger than rsp ring %d\n",
3528 pring
->ringno
, le32_to_cpu(pgp
->rspPutInx
),
3529 pring
->sli
.sli3
.numRiocb
);
3531 phba
->link_state
= LPFC_HBA_ERROR
;
3534 * All error attention handlers are posted to
3537 phba
->work_ha
|= HA_ERATT
;
3538 phba
->work_hs
= HS_FFER3
;
3540 lpfc_worker_wake_up(phba
);
3546 * lpfc_poll_eratt - Error attention polling timer timeout handler
3547 * @t: Context to fetch pointer to address of HBA context object from.
3549 * This function is invoked by the Error Attention polling timer when the
3550 * timer times out. It will check the SLI Error Attention register for
3551 * possible attention events. If so, it will post an Error Attention event
3552 * and wake up worker thread to process it. Otherwise, it will set up the
3553 * Error Attention polling timer for the next poll.
3555 void lpfc_poll_eratt(struct timer_list
*t
)
3557 struct lpfc_hba
*phba
;
3559 uint64_t sli_intr
, cnt
;
3561 phba
= from_timer(phba
, t
, eratt_poll
);
3563 /* Here we will also keep track of interrupts per sec of the hba */
3564 sli_intr
= phba
->sli
.slistat
.sli_intr
;
3566 if (phba
->sli
.slistat
.sli_prev_intr
> sli_intr
)
3567 cnt
= (((uint64_t)(-1) - phba
->sli
.slistat
.sli_prev_intr
) +
3570 cnt
= (sli_intr
- phba
->sli
.slistat
.sli_prev_intr
);
3572 /* 64-bit integer division not supported on 32-bit x86 - use do_div */
3573 do_div(cnt
, phba
->eratt_poll_interval
);
3574 phba
->sli
.slistat
.sli_ips
= cnt
;
3576 phba
->sli
.slistat
.sli_prev_intr
= sli_intr
;
3578 /* Check chip HA register for error event */
3579 eratt
= lpfc_sli_check_eratt(phba
);
3582 /* Tell the worker thread there is work to do */
3583 lpfc_worker_wake_up(phba
);
3585 /* Restart the timer for next eratt poll */
3586 mod_timer(&phba
->eratt_poll
,
3588 msecs_to_jiffies(1000 * phba
->eratt_poll_interval
));
3594 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3595 * @phba: Pointer to HBA context object.
3596 * @pring: Pointer to driver SLI ring object.
3597 * @mask: Host attention register mask for this ring.
3599 * This function is called from the interrupt context when there is a ring
3600 * event for the fcp ring. The caller does not hold any lock.
3601 * The function processes each response iocb in the response ring until it
3602 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3603 * LE bit set. The function will call the completion handler of the command iocb
3604 * if the response iocb indicates a completion for a command iocb or it is
3605 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3606 * function if this is an unsolicited iocb.
3607 * This routine presumes LPFC_FCP_RING handling and doesn't bother
3608 * to check it explicitly.
3611 lpfc_sli_handle_fast_ring_event(struct lpfc_hba
*phba
,
3612 struct lpfc_sli_ring
*pring
, uint32_t mask
)
3614 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
3615 IOCB_t
*irsp
= NULL
;
3616 IOCB_t
*entry
= NULL
;
3617 struct lpfc_iocbq
*cmdiocbq
= NULL
;
3618 struct lpfc_iocbq rspiocbq
;
3620 uint32_t portRspPut
, portRspMax
;
3622 lpfc_iocb_type type
;
3623 unsigned long iflag
;
3624 uint32_t rsp_cmpl
= 0;
3626 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3627 pring
->stats
.iocb_event
++;
3630 * The next available response entry should never exceed the maximum
3631 * entries. If it does, treat it as an adapter hardware error.
3633 portRspMax
= pring
->sli
.sli3
.numRiocb
;
3634 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
3635 if (unlikely(portRspPut
>= portRspMax
)) {
3636 lpfc_sli_rsp_pointers_error(phba
, pring
);
3637 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3640 if (phba
->fcp_ring_in_use
) {
3641 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3644 phba
->fcp_ring_in_use
= 1;
3647 while (pring
->sli
.sli3
.rspidx
!= portRspPut
) {
3649 * Fetch an entry off the ring and copy it into a local data
3650 * structure. The copy involves a byte-swap since the
3651 * network byte order and pci byte orders are different.
3653 entry
= lpfc_resp_iocb(phba
, pring
);
3654 phba
->last_completion_time
= jiffies
;
3656 if (++pring
->sli
.sli3
.rspidx
>= portRspMax
)
3657 pring
->sli
.sli3
.rspidx
= 0;
3659 lpfc_sli_pcimem_bcopy((uint32_t *) entry
,
3660 (uint32_t *) &rspiocbq
.iocb
,
3661 phba
->iocb_rsp_size
);
3662 INIT_LIST_HEAD(&(rspiocbq
.list
));
3663 irsp
= &rspiocbq
.iocb
;
3665 type
= lpfc_sli_iocb_cmd_type(irsp
->ulpCommand
& CMD_IOCB_MASK
);
3666 pring
->stats
.iocb_rsp
++;
3669 if (unlikely(irsp
->ulpStatus
)) {
3671 * If resource errors reported from HBA, reduce
3672 * queuedepths of the SCSI device.
3674 if ((irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) &&
3675 ((irsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) ==
3676 IOERR_NO_RESOURCES
)) {
3677 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3678 phba
->lpfc_rampdown_queue_depth(phba
);
3679 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3682 /* Rsp ring <ringno> error: IOCB */
3683 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3684 "0336 Rsp Ring %d error: IOCB Data: "
3685 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3687 irsp
->un
.ulpWord
[0],
3688 irsp
->un
.ulpWord
[1],
3689 irsp
->un
.ulpWord
[2],
3690 irsp
->un
.ulpWord
[3],
3691 irsp
->un
.ulpWord
[4],
3692 irsp
->un
.ulpWord
[5],
3693 *(uint32_t *)&irsp
->un1
,
3694 *((uint32_t *)&irsp
->un1
+ 1));
3698 case LPFC_ABORT_IOCB
:
3701 * Idle exchange closed via ABTS from port. No iocb
3702 * resources need to be recovered.
3704 if (unlikely(irsp
->ulpCommand
== CMD_XRI_ABORTED_CX
)) {
3705 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3706 "0333 IOCB cmd 0x%x"
3707 " processed. Skipping"
3713 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3714 cmdiocbq
= lpfc_sli_iocbq_lookup(phba
, pring
,
3716 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3717 if (unlikely(!cmdiocbq
))
3719 if (cmdiocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
)
3720 cmdiocbq
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
3721 if (cmdiocbq
->iocb_cmpl
) {
3722 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3723 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
,
3725 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3728 case LPFC_UNSOL_IOCB
:
3729 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3730 lpfc_sli_process_unsol_iocb(phba
, pring
, &rspiocbq
);
3731 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3734 if (irsp
->ulpCommand
== CMD_ADAPTER_MSG
) {
3735 char adaptermsg
[LPFC_MAX_ADPTMSG
];
3736 memset(adaptermsg
, 0, LPFC_MAX_ADPTMSG
);
3737 memcpy(&adaptermsg
[0], (uint8_t *) irsp
,
3739 dev_warn(&((phba
->pcidev
)->dev
),
3741 phba
->brd_no
, adaptermsg
);
3743 /* Unknown IOCB command */
3744 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3745 "0334 Unknown IOCB command "
3746 "Data: x%x, x%x x%x x%x x%x\n",
3747 type
, irsp
->ulpCommand
,
3756 * The response IOCB has been processed. Update the ring
3757 * pointer in SLIM. If the port response put pointer has not
3758 * been updated, sync the pgp->rspPutInx and fetch the new port
3759 * response put pointer.
3761 writel(pring
->sli
.sli3
.rspidx
,
3762 &phba
->host_gp
[pring
->ringno
].rspGetInx
);
3764 if (pring
->sli
.sli3
.rspidx
== portRspPut
)
3765 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
3768 if ((rsp_cmpl
> 0) && (mask
& HA_R0RE_REQ
)) {
3769 pring
->stats
.iocb_rsp_full
++;
3770 status
= ((CA_R0ATT
| CA_R0RE_RSP
) << (pring
->ringno
* 4));
3771 writel(status
, phba
->CAregaddr
);
3772 readl(phba
->CAregaddr
);
3774 if ((mask
& HA_R0CE_RSP
) && (pring
->flag
& LPFC_CALL_RING_AVAILABLE
)) {
3775 pring
->flag
&= ~LPFC_CALL_RING_AVAILABLE
;
3776 pring
->stats
.iocb_cmd_empty
++;
3778 /* Force update of the local copy of cmdGetInx */
3779 pring
->sli
.sli3
.local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
3780 lpfc_sli_resume_iocb(phba
, pring
);
3782 if ((pring
->lpfc_sli_cmd_available
))
3783 (pring
->lpfc_sli_cmd_available
) (phba
, pring
);
3787 phba
->fcp_ring_in_use
= 0;
3788 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3793 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3794 * @phba: Pointer to HBA context object.
3795 * @pring: Pointer to driver SLI ring object.
3796 * @rspiocbp: Pointer to driver response IOCB object.
3798 * This function is called from the worker thread when there is a slow-path
3799 * response IOCB to process. This function chains all the response iocbs until
3800 * seeing the iocb with the LE bit set. The function will call
3801 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3802 * completion of a command iocb. The function will call the
3803 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3804 * The function frees the resources or calls the completion handler if this
3805 * iocb is an abort completion. The function returns NULL when the response
3806 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3807 * this function shall chain the iocb on to the iocb_continueq and return the
3808 * response iocb passed in.
3810 static struct lpfc_iocbq
*
3811 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
3812 struct lpfc_iocbq
*rspiocbp
)
3814 struct lpfc_iocbq
*saveq
;
3815 struct lpfc_iocbq
*cmdiocbp
;
3816 struct lpfc_iocbq
*next_iocb
;
3817 IOCB_t
*irsp
= NULL
;
3818 uint32_t free_saveq
;
3819 uint8_t iocb_cmd_type
;
3820 lpfc_iocb_type type
;
3821 unsigned long iflag
;
3824 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3825 /* First add the response iocb to the countinueq list */
3826 list_add_tail(&rspiocbp
->list
, &(pring
->iocb_continueq
));
3827 pring
->iocb_continueq_cnt
++;
3829 /* Now, determine whether the list is completed for processing */
3830 irsp
= &rspiocbp
->iocb
;
3833 * By default, the driver expects to free all resources
3834 * associated with this iocb completion.
3837 saveq
= list_get_first(&pring
->iocb_continueq
,
3838 struct lpfc_iocbq
, list
);
3839 irsp
= &(saveq
->iocb
);
3840 list_del_init(&pring
->iocb_continueq
);
3841 pring
->iocb_continueq_cnt
= 0;
3843 pring
->stats
.iocb_rsp
++;
3846 * If resource errors reported from HBA, reduce
3847 * queuedepths of the SCSI device.
3849 if ((irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) &&
3850 ((irsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) ==
3851 IOERR_NO_RESOURCES
)) {
3852 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3853 phba
->lpfc_rampdown_queue_depth(phba
);
3854 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3857 if (irsp
->ulpStatus
) {
3858 /* Rsp ring <ringno> error: IOCB */
3859 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3860 "0328 Rsp Ring %d error: "
3865 "x%x x%x x%x x%x\n",
3867 irsp
->un
.ulpWord
[0],
3868 irsp
->un
.ulpWord
[1],
3869 irsp
->un
.ulpWord
[2],
3870 irsp
->un
.ulpWord
[3],
3871 irsp
->un
.ulpWord
[4],
3872 irsp
->un
.ulpWord
[5],
3873 *(((uint32_t *) irsp
) + 6),
3874 *(((uint32_t *) irsp
) + 7),
3875 *(((uint32_t *) irsp
) + 8),
3876 *(((uint32_t *) irsp
) + 9),
3877 *(((uint32_t *) irsp
) + 10),
3878 *(((uint32_t *) irsp
) + 11),
3879 *(((uint32_t *) irsp
) + 12),
3880 *(((uint32_t *) irsp
) + 13),
3881 *(((uint32_t *) irsp
) + 14),
3882 *(((uint32_t *) irsp
) + 15));
3886 * Fetch the IOCB command type and call the correct completion
3887 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3888 * get freed back to the lpfc_iocb_list by the discovery
3891 iocb_cmd_type
= irsp
->ulpCommand
& CMD_IOCB_MASK
;
3892 type
= lpfc_sli_iocb_cmd_type(iocb_cmd_type
);
3895 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3896 rc
= lpfc_sli_process_sol_iocb(phba
, pring
, saveq
);
3897 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3900 case LPFC_UNSOL_IOCB
:
3901 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3902 rc
= lpfc_sli_process_unsol_iocb(phba
, pring
, saveq
);
3903 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3908 case LPFC_ABORT_IOCB
:
3910 if (irsp
->ulpCommand
!= CMD_XRI_ABORTED_CX
) {
3911 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3912 cmdiocbp
= lpfc_sli_iocbq_lookup(phba
, pring
,
3914 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3917 /* Call the specified completion routine */
3918 if (cmdiocbp
->iocb_cmpl
) {
3919 spin_unlock_irqrestore(&phba
->hbalock
,
3921 (cmdiocbp
->iocb_cmpl
)(phba
, cmdiocbp
,
3923 spin_lock_irqsave(&phba
->hbalock
,
3926 __lpfc_sli_release_iocbq(phba
,
3931 case LPFC_UNKNOWN_IOCB
:
3932 if (irsp
->ulpCommand
== CMD_ADAPTER_MSG
) {
3933 char adaptermsg
[LPFC_MAX_ADPTMSG
];
3934 memset(adaptermsg
, 0, LPFC_MAX_ADPTMSG
);
3935 memcpy(&adaptermsg
[0], (uint8_t *)irsp
,
3937 dev_warn(&((phba
->pcidev
)->dev
),
3939 phba
->brd_no
, adaptermsg
);
3941 /* Unknown IOCB command */
3942 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
3943 "0335 Unknown IOCB "
3944 "command Data: x%x "
3955 list_for_each_entry_safe(rspiocbp
, next_iocb
,
3956 &saveq
->list
, list
) {
3957 list_del_init(&rspiocbp
->list
);
3958 __lpfc_sli_release_iocbq(phba
, rspiocbp
);
3960 __lpfc_sli_release_iocbq(phba
, saveq
);
3964 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3969 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3970 * @phba: Pointer to HBA context object.
3971 * @pring: Pointer to driver SLI ring object.
3972 * @mask: Host attention register mask for this ring.
3974 * This routine wraps the actual slow_ring event process routine from the
3975 * API jump table function pointer from the lpfc_hba struct.
3978 lpfc_sli_handle_slow_ring_event(struct lpfc_hba
*phba
,
3979 struct lpfc_sli_ring
*pring
, uint32_t mask
)
3981 phba
->lpfc_sli_handle_slow_ring_event(phba
, pring
, mask
);
3985 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3986 * @phba: Pointer to HBA context object.
3987 * @pring: Pointer to driver SLI ring object.
3988 * @mask: Host attention register mask for this ring.
3990 * This function is called from the worker thread when there is a ring event
3991 * for non-fcp rings. The caller does not hold any lock. The function will
3992 * remove each response iocb in the response ring and calls the handle
3993 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3996 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba
*phba
,
3997 struct lpfc_sli_ring
*pring
, uint32_t mask
)
3999 struct lpfc_pgp
*pgp
;
4001 IOCB_t
*irsp
= NULL
;
4002 struct lpfc_iocbq
*rspiocbp
= NULL
;
4003 uint32_t portRspPut
, portRspMax
;
4004 unsigned long iflag
;
4007 pgp
= &phba
->port_gp
[pring
->ringno
];
4008 spin_lock_irqsave(&phba
->hbalock
, iflag
);
4009 pring
->stats
.iocb_event
++;
4012 * The next available response entry should never exceed the maximum
4013 * entries. If it does, treat it as an adapter hardware error.
4015 portRspMax
= pring
->sli
.sli3
.numRiocb
;
4016 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
4017 if (portRspPut
>= portRspMax
) {
4019 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4020 * rsp ring <portRspMax>
4022 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4023 "0303 Ring %d handler: portRspPut %d "
4024 "is bigger than rsp ring %d\n",
4025 pring
->ringno
, portRspPut
, portRspMax
);
4027 phba
->link_state
= LPFC_HBA_ERROR
;
4028 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
4030 phba
->work_hs
= HS_FFER3
;
4031 lpfc_handle_eratt(phba
);
4037 while (pring
->sli
.sli3
.rspidx
!= portRspPut
) {
4039 * Build a completion list and call the appropriate handler.
4040 * The process is to get the next available response iocb, get
4041 * a free iocb from the list, copy the response data into the
4042 * free iocb, insert to the continuation list, and update the
4043 * next response index to slim. This process makes response
4044 * iocb's in the ring available to DMA as fast as possible but
4045 * pays a penalty for a copy operation. Since the iocb is
4046 * only 32 bytes, this penalty is considered small relative to
4047 * the PCI reads for register values and a slim write. When
4048 * the ulpLe field is set, the entire Command has been
4051 entry
= lpfc_resp_iocb(phba
, pring
);
4053 phba
->last_completion_time
= jiffies
;
4054 rspiocbp
= __lpfc_sli_get_iocbq(phba
);
4055 if (rspiocbp
== NULL
) {
4056 printk(KERN_ERR
"%s: out of buffers! Failing "
4057 "completion.\n", __func__
);
4061 lpfc_sli_pcimem_bcopy(entry
, &rspiocbp
->iocb
,
4062 phba
->iocb_rsp_size
);
4063 irsp
= &rspiocbp
->iocb
;
4065 if (++pring
->sli
.sli3
.rspidx
>= portRspMax
)
4066 pring
->sli
.sli3
.rspidx
= 0;
4068 if (pring
->ringno
== LPFC_ELS_RING
) {
4069 lpfc_debugfs_slow_ring_trc(phba
,
4070 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
4071 *(((uint32_t *) irsp
) + 4),
4072 *(((uint32_t *) irsp
) + 6),
4073 *(((uint32_t *) irsp
) + 7));
4076 writel(pring
->sli
.sli3
.rspidx
,
4077 &phba
->host_gp
[pring
->ringno
].rspGetInx
);
4079 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
4080 /* Handle the response IOCB */
4081 rspiocbp
= lpfc_sli_sp_handle_rspiocb(phba
, pring
, rspiocbp
);
4082 spin_lock_irqsave(&phba
->hbalock
, iflag
);
4085 * If the port response put pointer has not been updated, sync
4086 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4087 * response put pointer.
4089 if (pring
->sli
.sli3
.rspidx
== portRspPut
) {
4090 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
4092 } /* while (pring->sli.sli3.rspidx != portRspPut) */
4094 if ((rspiocbp
!= NULL
) && (mask
& HA_R0RE_REQ
)) {
4095 /* At least one response entry has been freed */
4096 pring
->stats
.iocb_rsp_full
++;
4097 /* SET RxRE_RSP in Chip Att register */
4098 status
= ((CA_R0ATT
| CA_R0RE_RSP
) << (pring
->ringno
* 4));
4099 writel(status
, phba
->CAregaddr
);
4100 readl(phba
->CAregaddr
); /* flush */
4102 if ((mask
& HA_R0CE_RSP
) && (pring
->flag
& LPFC_CALL_RING_AVAILABLE
)) {
4103 pring
->flag
&= ~LPFC_CALL_RING_AVAILABLE
;
4104 pring
->stats
.iocb_cmd_empty
++;
4106 /* Force update of the local copy of cmdGetInx */
4107 pring
->sli
.sli3
.local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
4108 lpfc_sli_resume_iocb(phba
, pring
);
4110 if ((pring
->lpfc_sli_cmd_available
))
4111 (pring
->lpfc_sli_cmd_available
) (phba
, pring
);
4115 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
4120 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4121 * @phba: Pointer to HBA context object.
4122 * @pring: Pointer to driver SLI ring object.
4123 * @mask: Host attention register mask for this ring.
4125 * This function is called from the worker thread when there is a pending
4126 * ELS response iocb on the driver internal slow-path response iocb worker
4127 * queue. The caller does not hold any lock. The function will remove each
4128 * response iocb from the response worker queue and calls the handle
4129 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4132 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba
*phba
,
4133 struct lpfc_sli_ring
*pring
, uint32_t mask
)
4135 struct lpfc_iocbq
*irspiocbq
;
4136 struct hbq_dmabuf
*dmabuf
;
4137 struct lpfc_cq_event
*cq_event
;
4138 unsigned long iflag
;
4141 spin_lock_irqsave(&phba
->hbalock
, iflag
);
4142 phba
->hba_flag
&= ~HBA_SP_QUEUE_EVT
;
4143 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
4144 while (!list_empty(&phba
->sli4_hba
.sp_queue_event
)) {
4145 /* Get the response iocb from the head of work queue */
4146 spin_lock_irqsave(&phba
->hbalock
, iflag
);
4147 list_remove_head(&phba
->sli4_hba
.sp_queue_event
,
4148 cq_event
, struct lpfc_cq_event
, list
);
4149 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
4151 switch (bf_get(lpfc_wcqe_c_code
, &cq_event
->cqe
.wcqe_cmpl
)) {
4152 case CQE_CODE_COMPL_WQE
:
4153 irspiocbq
= container_of(cq_event
, struct lpfc_iocbq
,
4155 /* Translate ELS WCQE to response IOCBQ */
4156 irspiocbq
= lpfc_sli4_els_wcqe_to_rspiocbq(phba
,
4159 lpfc_sli_sp_handle_rspiocb(phba
, pring
,
4163 case CQE_CODE_RECEIVE
:
4164 case CQE_CODE_RECEIVE_V1
:
4165 dmabuf
= container_of(cq_event
, struct hbq_dmabuf
,
4167 lpfc_sli4_handle_received_buffer(phba
, dmabuf
);
4174 /* Limit the number of events to 64 to avoid soft lockups */
4181 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4182 * @phba: Pointer to HBA context object.
4183 * @pring: Pointer to driver SLI ring object.
4185 * This function aborts all iocbs in the given ring and frees all the iocb
4186 * objects in txq. This function issues an abort iocb for all the iocb commands
4187 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4188 * the return of this function. The caller is not required to hold any locks.
4191 lpfc_sli_abort_iocb_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
4193 LIST_HEAD(completions
);
4194 struct lpfc_iocbq
*iocb
, *next_iocb
;
4196 if (pring
->ringno
== LPFC_ELS_RING
) {
4197 lpfc_fabric_abort_hba(phba
);
4200 /* Error everything on txq and txcmplq
4203 if (phba
->sli_rev
>= LPFC_SLI_REV4
) {
4204 spin_lock_irq(&pring
->ring_lock
);
4205 list_splice_init(&pring
->txq
, &completions
);
4207 spin_unlock_irq(&pring
->ring_lock
);
4209 spin_lock_irq(&phba
->hbalock
);
4210 /* Next issue ABTS for everything on the txcmplq */
4211 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
)
4212 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
, NULL
);
4213 spin_unlock_irq(&phba
->hbalock
);
4215 spin_lock_irq(&phba
->hbalock
);
4216 list_splice_init(&pring
->txq
, &completions
);
4219 /* Next issue ABTS for everything on the txcmplq */
4220 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
)
4221 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
, NULL
);
4222 spin_unlock_irq(&phba
->hbalock
);
4225 /* Cancel all the IOCBs from the completions list */
4226 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
4231 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4232 * @phba: Pointer to HBA context object.
4234 * This function aborts all iocbs in FCP rings and frees all the iocb
4235 * objects in txq. This function issues an abort iocb for all the iocb commands
4236 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4237 * the return of this function. The caller is not required to hold any locks.
4240 lpfc_sli_abort_fcp_rings(struct lpfc_hba
*phba
)
4242 struct lpfc_sli
*psli
= &phba
->sli
;
4243 struct lpfc_sli_ring
*pring
;
4246 /* Look on all the FCP Rings for the iotag */
4247 if (phba
->sli_rev
>= LPFC_SLI_REV4
) {
4248 for (i
= 0; i
< phba
->cfg_hdw_queue
; i
++) {
4249 pring
= phba
->sli4_hba
.hdwq
[i
].io_wq
->pring
;
4250 lpfc_sli_abort_iocb_ring(phba
, pring
);
4253 pring
= &psli
->sli3_ring
[LPFC_FCP_RING
];
4254 lpfc_sli_abort_iocb_ring(phba
, pring
);
4259 * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4260 * @phba: Pointer to HBA context object.
4262 * This function flushes all iocbs in the IO ring and frees all the iocb
4263 * objects in txq and txcmplq. This function will not issue abort iocbs
4264 * for all the iocb commands in txcmplq, they will just be returned with
4265 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4266 * slot has been permanently disabled.
4269 lpfc_sli_flush_io_rings(struct lpfc_hba
*phba
)
4273 struct lpfc_sli
*psli
= &phba
->sli
;
4274 struct lpfc_sli_ring
*pring
;
4276 struct lpfc_iocbq
*piocb
, *next_iocb
;
4278 spin_lock_irq(&phba
->hbalock
);
4279 if (phba
->hba_flag
& HBA_IOQ_FLUSH
||
4280 !phba
->sli4_hba
.hdwq
) {
4281 spin_unlock_irq(&phba
->hbalock
);
4284 /* Indicate the I/O queues are flushed */
4285 phba
->hba_flag
|= HBA_IOQ_FLUSH
;
4286 spin_unlock_irq(&phba
->hbalock
);
4288 /* Look on all the FCP Rings for the iotag */
4289 if (phba
->sli_rev
>= LPFC_SLI_REV4
) {
4290 for (i
= 0; i
< phba
->cfg_hdw_queue
; i
++) {
4291 pring
= phba
->sli4_hba
.hdwq
[i
].io_wq
->pring
;
4293 spin_lock_irq(&pring
->ring_lock
);
4294 /* Retrieve everything on txq */
4295 list_splice_init(&pring
->txq
, &txq
);
4296 list_for_each_entry_safe(piocb
, next_iocb
,
4297 &pring
->txcmplq
, list
)
4298 piocb
->iocb_flag
&= ~LPFC_IO_ON_TXCMPLQ
;
4299 /* Retrieve everything on the txcmplq */
4300 list_splice_init(&pring
->txcmplq
, &txcmplq
);
4302 pring
->txcmplq_cnt
= 0;
4303 spin_unlock_irq(&pring
->ring_lock
);
4306 lpfc_sli_cancel_iocbs(phba
, &txq
,
4307 IOSTAT_LOCAL_REJECT
,
4309 /* Flush the txcmpq */
4310 lpfc_sli_cancel_iocbs(phba
, &txcmplq
,
4311 IOSTAT_LOCAL_REJECT
,
4315 pring
= &psli
->sli3_ring
[LPFC_FCP_RING
];
4317 spin_lock_irq(&phba
->hbalock
);
4318 /* Retrieve everything on txq */
4319 list_splice_init(&pring
->txq
, &txq
);
4320 list_for_each_entry_safe(piocb
, next_iocb
,
4321 &pring
->txcmplq
, list
)
4322 piocb
->iocb_flag
&= ~LPFC_IO_ON_TXCMPLQ
;
4323 /* Retrieve everything on the txcmplq */
4324 list_splice_init(&pring
->txcmplq
, &txcmplq
);
4326 pring
->txcmplq_cnt
= 0;
4327 spin_unlock_irq(&phba
->hbalock
);
4330 lpfc_sli_cancel_iocbs(phba
, &txq
, IOSTAT_LOCAL_REJECT
,
4332 /* Flush the txcmpq */
4333 lpfc_sli_cancel_iocbs(phba
, &txcmplq
, IOSTAT_LOCAL_REJECT
,
4339 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4340 * @phba: Pointer to HBA context object.
4341 * @mask: Bit mask to be checked.
4343 * This function reads the host status register and compares
4344 * with the provided bit mask to check if HBA completed
4345 * the restart. This function will wait in a loop for the
4346 * HBA to complete restart. If the HBA does not restart within
4347 * 15 iterations, the function will reset the HBA again. The
4348 * function returns 1 when HBA fail to restart otherwise returns
4352 lpfc_sli_brdready_s3(struct lpfc_hba
*phba
, uint32_t mask
)
4358 /* Read the HBA Host Status Register */
4359 if (lpfc_readl(phba
->HSregaddr
, &status
))
4363 * Check status register every 100ms for 5 retries, then every
4364 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4365 * every 2.5 sec for 4.
4366 * Break our of the loop if errors occurred during init.
4368 while (((status
& mask
) != mask
) &&
4369 !(status
& HS_FFERM
) &&
4381 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
4382 lpfc_sli_brdrestart(phba
);
4384 /* Read the HBA Host Status Register */
4385 if (lpfc_readl(phba
->HSregaddr
, &status
)) {
4391 /* Check to see if any errors occurred during init */
4392 if ((status
& HS_FFERM
) || (i
>= 20)) {
4393 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4394 "2751 Adapter failed to restart, "
4395 "status reg x%x, FW Data: A8 x%x AC x%x\n",
4397 readl(phba
->MBslimaddr
+ 0xa8),
4398 readl(phba
->MBslimaddr
+ 0xac));
4399 phba
->link_state
= LPFC_HBA_ERROR
;
4407 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4408 * @phba: Pointer to HBA context object.
4409 * @mask: Bit mask to be checked.
4411 * This function checks the host status register to check if HBA is
4412 * ready. This function will wait in a loop for the HBA to be ready
4413 * If the HBA is not ready , the function will will reset the HBA PCI
4414 * function again. The function returns 1 when HBA fail to be ready
4415 * otherwise returns zero.
4418 lpfc_sli_brdready_s4(struct lpfc_hba
*phba
, uint32_t mask
)
4423 /* Read the HBA Host Status Register */
4424 status
= lpfc_sli4_post_status_check(phba
);
4427 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
4428 lpfc_sli_brdrestart(phba
);
4429 status
= lpfc_sli4_post_status_check(phba
);
4432 /* Check to see if any errors occurred during init */
4434 phba
->link_state
= LPFC_HBA_ERROR
;
4437 phba
->sli4_hba
.intr_enable
= 0;
4443 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4444 * @phba: Pointer to HBA context object.
4445 * @mask: Bit mask to be checked.
4447 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4448 * from the API jump table function pointer from the lpfc_hba struct.
4451 lpfc_sli_brdready(struct lpfc_hba
*phba
, uint32_t mask
)
4453 return phba
->lpfc_sli_brdready(phba
, mask
);
4456 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4459 * lpfc_reset_barrier - Make HBA ready for HBA reset
4460 * @phba: Pointer to HBA context object.
4462 * This function is called before resetting an HBA. This function is called
4463 * with hbalock held and requests HBA to quiesce DMAs before a reset.
4465 void lpfc_reset_barrier(struct lpfc_hba
*phba
)
4467 uint32_t __iomem
*resp_buf
;
4468 uint32_t __iomem
*mbox_buf
;
4469 volatile uint32_t mbox
;
4470 uint32_t hc_copy
, ha_copy
, resp_data
;
4474 lockdep_assert_held(&phba
->hbalock
);
4476 pci_read_config_byte(phba
->pcidev
, PCI_HEADER_TYPE
, &hdrtype
);
4477 if (hdrtype
!= 0x80 ||
4478 (FC_JEDEC_ID(phba
->vpd
.rev
.biuRev
) != HELIOS_JEDEC_ID
&&
4479 FC_JEDEC_ID(phba
->vpd
.rev
.biuRev
) != THOR_JEDEC_ID
))
4483 * Tell the other part of the chip to suspend temporarily all
4486 resp_buf
= phba
->MBslimaddr
;
4488 /* Disable the error attention */
4489 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
))
4491 writel((hc_copy
& ~HC_ERINT_ENA
), phba
->HCregaddr
);
4492 readl(phba
->HCregaddr
); /* flush */
4493 phba
->link_flag
|= LS_IGNORE_ERATT
;
4495 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
4497 if (ha_copy
& HA_ERATT
) {
4498 /* Clear Chip error bit */
4499 writel(HA_ERATT
, phba
->HAregaddr
);
4500 phba
->pport
->stopped
= 1;
4504 ((MAILBOX_t
*)&mbox
)->mbxCommand
= MBX_KILL_BOARD
;
4505 ((MAILBOX_t
*)&mbox
)->mbxOwner
= OWN_CHIP
;
4507 writel(BARRIER_TEST_PATTERN
, (resp_buf
+ 1));
4508 mbox_buf
= phba
->MBslimaddr
;
4509 writel(mbox
, mbox_buf
);
4511 for (i
= 0; i
< 50; i
++) {
4512 if (lpfc_readl((resp_buf
+ 1), &resp_data
))
4514 if (resp_data
!= ~(BARRIER_TEST_PATTERN
))
4520 if (lpfc_readl((resp_buf
+ 1), &resp_data
))
4522 if (resp_data
!= ~(BARRIER_TEST_PATTERN
)) {
4523 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
||
4524 phba
->pport
->stopped
)
4530 ((MAILBOX_t
*)&mbox
)->mbxOwner
= OWN_HOST
;
4532 for (i
= 0; i
< 500; i
++) {
4533 if (lpfc_readl(resp_buf
, &resp_data
))
4535 if (resp_data
!= mbox
)
4544 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
4546 if (!(ha_copy
& HA_ERATT
))
4552 if (readl(phba
->HAregaddr
) & HA_ERATT
) {
4553 writel(HA_ERATT
, phba
->HAregaddr
);
4554 phba
->pport
->stopped
= 1;
4558 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
4559 writel(hc_copy
, phba
->HCregaddr
);
4560 readl(phba
->HCregaddr
); /* flush */
4564 * lpfc_sli_brdkill - Issue a kill_board mailbox command
4565 * @phba: Pointer to HBA context object.
4567 * This function issues a kill_board mailbox command and waits for
4568 * the error attention interrupt. This function is called for stopping
4569 * the firmware processing. The caller is not required to hold any
4570 * locks. This function calls lpfc_hba_down_post function to free
4571 * any pending commands after the kill. The function will return 1 when it
4572 * fails to kill the board else will return 0.
4575 lpfc_sli_brdkill(struct lpfc_hba
*phba
)
4577 struct lpfc_sli
*psli
;
4587 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4588 "0329 Kill HBA Data: x%x x%x\n",
4589 phba
->pport
->port_state
, psli
->sli_flag
);
4591 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4595 /* Disable the error attention */
4596 spin_lock_irq(&phba
->hbalock
);
4597 if (lpfc_readl(phba
->HCregaddr
, &status
)) {
4598 spin_unlock_irq(&phba
->hbalock
);
4599 mempool_free(pmb
, phba
->mbox_mem_pool
);
4602 status
&= ~HC_ERINT_ENA
;
4603 writel(status
, phba
->HCregaddr
);
4604 readl(phba
->HCregaddr
); /* flush */
4605 phba
->link_flag
|= LS_IGNORE_ERATT
;
4606 spin_unlock_irq(&phba
->hbalock
);
4608 lpfc_kill_board(phba
, pmb
);
4609 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
4610 retval
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
4612 if (retval
!= MBX_SUCCESS
) {
4613 if (retval
!= MBX_BUSY
)
4614 mempool_free(pmb
, phba
->mbox_mem_pool
);
4615 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4616 "2752 KILL_BOARD command failed retval %d\n",
4618 spin_lock_irq(&phba
->hbalock
);
4619 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
4620 spin_unlock_irq(&phba
->hbalock
);
4624 spin_lock_irq(&phba
->hbalock
);
4625 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
4626 spin_unlock_irq(&phba
->hbalock
);
4628 mempool_free(pmb
, phba
->mbox_mem_pool
);
4630 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
4631 * attention every 100ms for 3 seconds. If we don't get ERATT after
4632 * 3 seconds we still set HBA_ERROR state because the status of the
4633 * board is now undefined.
4635 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
4637 while ((i
++ < 30) && !(ha_copy
& HA_ERATT
)) {
4639 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
4643 del_timer_sync(&psli
->mbox_tmo
);
4644 if (ha_copy
& HA_ERATT
) {
4645 writel(HA_ERATT
, phba
->HAregaddr
);
4646 phba
->pport
->stopped
= 1;
4648 spin_lock_irq(&phba
->hbalock
);
4649 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
4650 psli
->mbox_active
= NULL
;
4651 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
4652 spin_unlock_irq(&phba
->hbalock
);
4654 lpfc_hba_down_post(phba
);
4655 phba
->link_state
= LPFC_HBA_ERROR
;
4657 return ha_copy
& HA_ERATT
? 0 : 1;
4661 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
4662 * @phba: Pointer to HBA context object.
4664 * This function resets the HBA by writing HC_INITFF to the control
4665 * register. After the HBA resets, this function resets all the iocb ring
4666 * indices. This function disables PCI layer parity checking during
4668 * This function returns 0 always.
4669 * The caller is not required to hold any locks.
4672 lpfc_sli_brdreset(struct lpfc_hba
*phba
)
4674 struct lpfc_sli
*psli
;
4675 struct lpfc_sli_ring
*pring
;
4682 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4683 "0325 Reset HBA Data: x%x x%x\n",
4684 (phba
->pport
) ? phba
->pport
->port_state
: 0,
4687 /* perform board reset */
4688 phba
->fc_eventTag
= 0;
4689 phba
->link_events
= 0;
4691 phba
->pport
->fc_myDID
= 0;
4692 phba
->pport
->fc_prevDID
= 0;
4695 /* Turn off parity checking and serr during the physical reset */
4696 if (pci_read_config_word(phba
->pcidev
, PCI_COMMAND
, &cfg_value
))
4699 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
,
4701 ~(PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
)));
4703 psli
->sli_flag
&= ~(LPFC_SLI_ACTIVE
| LPFC_PROCESS_LA
);
4705 /* Now toggle INITFF bit in the Host Control Register */
4706 writel(HC_INITFF
, phba
->HCregaddr
);
4708 readl(phba
->HCregaddr
); /* flush */
4709 writel(0, phba
->HCregaddr
);
4710 readl(phba
->HCregaddr
); /* flush */
4712 /* Restore PCI cmd register */
4713 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, cfg_value
);
4715 /* Initialize relevant SLI info */
4716 for (i
= 0; i
< psli
->num_rings
; i
++) {
4717 pring
= &psli
->sli3_ring
[i
];
4719 pring
->sli
.sli3
.rspidx
= 0;
4720 pring
->sli
.sli3
.next_cmdidx
= 0;
4721 pring
->sli
.sli3
.local_getidx
= 0;
4722 pring
->sli
.sli3
.cmdidx
= 0;
4723 pring
->missbufcnt
= 0;
4726 phba
->link_state
= LPFC_WARM_START
;
4731 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4732 * @phba: Pointer to HBA context object.
4734 * This function resets a SLI4 HBA. This function disables PCI layer parity
4735 * checking during resets the device. The caller is not required to hold
4738 * This function returns 0 on success else returns negative error code.
4741 lpfc_sli4_brdreset(struct lpfc_hba
*phba
)
4743 struct lpfc_sli
*psli
= &phba
->sli
;
4748 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4749 "0295 Reset HBA Data: x%x x%x x%x\n",
4750 phba
->pport
->port_state
, psli
->sli_flag
,
4753 /* perform board reset */
4754 phba
->fc_eventTag
= 0;
4755 phba
->link_events
= 0;
4756 phba
->pport
->fc_myDID
= 0;
4757 phba
->pport
->fc_prevDID
= 0;
4759 spin_lock_irq(&phba
->hbalock
);
4760 psli
->sli_flag
&= ~(LPFC_PROCESS_LA
);
4761 phba
->fcf
.fcf_flag
= 0;
4762 spin_unlock_irq(&phba
->hbalock
);
4764 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4765 if (phba
->hba_flag
& HBA_FW_DUMP_OP
) {
4766 phba
->hba_flag
&= ~HBA_FW_DUMP_OP
;
4770 /* Now physically reset the device */
4771 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4772 "0389 Performing PCI function reset!\n");
4774 /* Turn off parity checking and serr during the physical reset */
4775 if (pci_read_config_word(phba
->pcidev
, PCI_COMMAND
, &cfg_value
)) {
4776 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4777 "3205 PCI read Config failed\n");
4781 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, (cfg_value
&
4782 ~(PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
)));
4784 /* Perform FCoE PCI function reset before freeing queue memory */
4785 rc
= lpfc_pci_function_reset(phba
);
4787 /* Restore PCI cmd register */
4788 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, cfg_value
);
4794 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4795 * @phba: Pointer to HBA context object.
4797 * This function is called in the SLI initialization code path to
4798 * restart the HBA. The caller is not required to hold any lock.
4799 * This function writes MBX_RESTART mailbox command to the SLIM and
4800 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4801 * function to free any pending commands. The function enables
4802 * POST only during the first initialization. The function returns zero.
4803 * The function does not guarantee completion of MBX_RESTART mailbox
4804 * command before the return of this function.
4807 lpfc_sli_brdrestart_s3(struct lpfc_hba
*phba
)
4810 struct lpfc_sli
*psli
;
4811 volatile uint32_t word0
;
4812 void __iomem
*to_slim
;
4813 uint32_t hba_aer_enabled
;
4815 spin_lock_irq(&phba
->hbalock
);
4817 /* Take PCIe device Advanced Error Reporting (AER) state */
4818 hba_aer_enabled
= phba
->hba_flag
& HBA_AER_ENABLED
;
4823 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4824 "0337 Restart HBA Data: x%x x%x\n",
4825 (phba
->pport
) ? phba
->pport
->port_state
: 0,
4829 mb
= (MAILBOX_t
*) &word0
;
4830 mb
->mbxCommand
= MBX_RESTART
;
4833 lpfc_reset_barrier(phba
);
4835 to_slim
= phba
->MBslimaddr
;
4836 writel(*(uint32_t *) mb
, to_slim
);
4837 readl(to_slim
); /* flush */
4839 /* Only skip post after fc_ffinit is completed */
4840 if (phba
->pport
&& phba
->pport
->port_state
)
4841 word0
= 1; /* This is really setting up word1 */
4843 word0
= 0; /* This is really setting up word1 */
4844 to_slim
= phba
->MBslimaddr
+ sizeof (uint32_t);
4845 writel(*(uint32_t *) mb
, to_slim
);
4846 readl(to_slim
); /* flush */
4848 lpfc_sli_brdreset(phba
);
4850 phba
->pport
->stopped
= 0;
4851 phba
->link_state
= LPFC_INIT_START
;
4853 spin_unlock_irq(&phba
->hbalock
);
4855 memset(&psli
->lnk_stat_offsets
, 0, sizeof(psli
->lnk_stat_offsets
));
4856 psli
->stats_start
= ktime_get_seconds();
4858 /* Give the INITFF and Post time to settle. */
4861 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4862 if (hba_aer_enabled
)
4863 pci_disable_pcie_error_reporting(phba
->pcidev
);
4865 lpfc_hba_down_post(phba
);
4871 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4872 * @phba: Pointer to HBA context object.
4874 * This function is called in the SLI initialization code path to restart
4875 * a SLI4 HBA. The caller is not required to hold any lock.
4876 * At the end of the function, it calls lpfc_hba_down_post function to
4877 * free any pending commands.
4880 lpfc_sli_brdrestart_s4(struct lpfc_hba
*phba
)
4882 struct lpfc_sli
*psli
= &phba
->sli
;
4883 uint32_t hba_aer_enabled
;
4887 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4888 "0296 Restart HBA Data: x%x x%x\n",
4889 phba
->pport
->port_state
, psli
->sli_flag
);
4891 /* Take PCIe device Advanced Error Reporting (AER) state */
4892 hba_aer_enabled
= phba
->hba_flag
& HBA_AER_ENABLED
;
4894 rc
= lpfc_sli4_brdreset(phba
);
4896 phba
->link_state
= LPFC_HBA_ERROR
;
4897 goto hba_down_queue
;
4900 spin_lock_irq(&phba
->hbalock
);
4901 phba
->pport
->stopped
= 0;
4902 phba
->link_state
= LPFC_INIT_START
;
4904 spin_unlock_irq(&phba
->hbalock
);
4906 memset(&psli
->lnk_stat_offsets
, 0, sizeof(psli
->lnk_stat_offsets
));
4907 psli
->stats_start
= ktime_get_seconds();
4909 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4910 if (hba_aer_enabled
)
4911 pci_disable_pcie_error_reporting(phba
->pcidev
);
4914 lpfc_hba_down_post(phba
);
4915 lpfc_sli4_queue_destroy(phba
);
4921 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4922 * @phba: Pointer to HBA context object.
4924 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4925 * API jump table function pointer from the lpfc_hba struct.
4928 lpfc_sli_brdrestart(struct lpfc_hba
*phba
)
4930 return phba
->lpfc_sli_brdrestart(phba
);
4934 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4935 * @phba: Pointer to HBA context object.
4937 * This function is called after a HBA restart to wait for successful
4938 * restart of the HBA. Successful restart of the HBA is indicated by
4939 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4940 * iteration, the function will restart the HBA again. The function returns
4941 * zero if HBA successfully restarted else returns negative error code.
4944 lpfc_sli_chipset_init(struct lpfc_hba
*phba
)
4946 uint32_t status
, i
= 0;
4948 /* Read the HBA Host Status Register */
4949 if (lpfc_readl(phba
->HSregaddr
, &status
))
4952 /* Check status register to see what current state is */
4954 while ((status
& (HS_FFRDY
| HS_MBRDY
)) != (HS_FFRDY
| HS_MBRDY
)) {
4956 /* Check every 10ms for 10 retries, then every 100ms for 90
4957 * retries, then every 1 sec for 50 retires for a total of
4958 * ~60 seconds before reset the board again and check every
4959 * 1 sec for 50 retries. The up to 60 seconds before the
4960 * board ready is required by the Falcon FIPS zeroization
4961 * complete, and any reset the board in between shall cause
4962 * restart of zeroization, further delay the board ready.
4965 /* Adapter failed to init, timeout, status reg
4967 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4968 "0436 Adapter failed to init, "
4969 "timeout, status reg x%x, "
4970 "FW Data: A8 x%x AC x%x\n", status
,
4971 readl(phba
->MBslimaddr
+ 0xa8),
4972 readl(phba
->MBslimaddr
+ 0xac));
4973 phba
->link_state
= LPFC_HBA_ERROR
;
4977 /* Check to see if any errors occurred during init */
4978 if (status
& HS_FFERM
) {
4979 /* ERROR: During chipset initialization */
4980 /* Adapter failed to init, chipset, status reg
4982 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
4983 "0437 Adapter failed to init, "
4984 "chipset, status reg x%x, "
4985 "FW Data: A8 x%x AC x%x\n", status
,
4986 readl(phba
->MBslimaddr
+ 0xa8),
4987 readl(phba
->MBslimaddr
+ 0xac));
4988 phba
->link_state
= LPFC_HBA_ERROR
;
5001 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
5002 lpfc_sli_brdrestart(phba
);
5004 /* Read the HBA Host Status Register */
5005 if (lpfc_readl(phba
->HSregaddr
, &status
))
5009 /* Check to see if any errors occurred during init */
5010 if (status
& HS_FFERM
) {
5011 /* ERROR: During chipset initialization */
5012 /* Adapter failed to init, chipset, status reg <status> */
5013 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5014 "0438 Adapter failed to init, chipset, "
5016 "FW Data: A8 x%x AC x%x\n", status
,
5017 readl(phba
->MBslimaddr
+ 0xa8),
5018 readl(phba
->MBslimaddr
+ 0xac));
5019 phba
->link_state
= LPFC_HBA_ERROR
;
5023 /* Clear all interrupt enable conditions */
5024 writel(0, phba
->HCregaddr
);
5025 readl(phba
->HCregaddr
); /* flush */
5027 /* setup host attn register */
5028 writel(0xffffffff, phba
->HAregaddr
);
5029 readl(phba
->HAregaddr
); /* flush */
5034 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
5036 * This function calculates and returns the number of HBQs required to be
5040 lpfc_sli_hbq_count(void)
5042 return ARRAY_SIZE(lpfc_hbq_defs
);
5046 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
5048 * This function adds the number of hbq entries in every HBQ to get
5049 * the total number of hbq entries required for the HBA and returns
5053 lpfc_sli_hbq_entry_count(void)
5055 int hbq_count
= lpfc_sli_hbq_count();
5059 for (i
= 0; i
< hbq_count
; ++i
)
5060 count
+= lpfc_hbq_defs
[i
]->entry_count
;
5065 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
5067 * This function calculates amount of memory required for all hbq entries
5068 * to be configured and returns the total memory required.
5071 lpfc_sli_hbq_size(void)
5073 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry
);
5077 * lpfc_sli_hbq_setup - configure and initialize HBQs
5078 * @phba: Pointer to HBA context object.
5080 * This function is called during the SLI initialization to configure
5081 * all the HBQs and post buffers to the HBQ. The caller is not
5082 * required to hold any locks. This function will return zero if successful
5083 * else it will return negative error code.
5086 lpfc_sli_hbq_setup(struct lpfc_hba
*phba
)
5088 int hbq_count
= lpfc_sli_hbq_count();
5092 uint32_t hbq_entry_index
;
5094 /* Get a Mailbox buffer to setup mailbox
5095 * commands for HBA initialization
5097 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5104 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
5105 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
5106 phba
->hbq_in_use
= 1;
5108 hbq_entry_index
= 0;
5109 for (hbqno
= 0; hbqno
< hbq_count
; ++hbqno
) {
5110 phba
->hbqs
[hbqno
].next_hbqPutIdx
= 0;
5111 phba
->hbqs
[hbqno
].hbqPutIdx
= 0;
5112 phba
->hbqs
[hbqno
].local_hbqGetIdx
= 0;
5113 phba
->hbqs
[hbqno
].entry_count
=
5114 lpfc_hbq_defs
[hbqno
]->entry_count
;
5115 lpfc_config_hbq(phba
, hbqno
, lpfc_hbq_defs
[hbqno
],
5116 hbq_entry_index
, pmb
);
5117 hbq_entry_index
+= phba
->hbqs
[hbqno
].entry_count
;
5119 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
5120 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5121 mbxStatus <status>, ring <num> */
5123 lpfc_printf_log(phba
, KERN_ERR
,
5124 LOG_SLI
| LOG_VPORT
,
5125 "1805 Adapter failed to init. "
5126 "Data: x%x x%x x%x\n",
5128 pmbox
->mbxStatus
, hbqno
);
5130 phba
->link_state
= LPFC_HBA_ERROR
;
5131 mempool_free(pmb
, phba
->mbox_mem_pool
);
5135 phba
->hbq_count
= hbq_count
;
5137 mempool_free(pmb
, phba
->mbox_mem_pool
);
5139 /* Initially populate or replenish the HBQs */
5140 for (hbqno
= 0; hbqno
< hbq_count
; ++hbqno
)
5141 lpfc_sli_hbqbuf_init_hbqs(phba
, hbqno
);
5146 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5147 * @phba: Pointer to HBA context object.
5149 * This function is called during the SLI initialization to configure
5150 * all the HBQs and post buffers to the HBQ. The caller is not
5151 * required to hold any locks. This function will return zero if successful
5152 * else it will return negative error code.
5155 lpfc_sli4_rb_setup(struct lpfc_hba
*phba
)
5157 phba
->hbq_in_use
= 1;
5159 * Specific case when the MDS diagnostics is enabled and supported.
5160 * The receive buffer count is truncated to manage the incoming
5163 if (phba
->cfg_enable_mds_diags
&& phba
->mds_diags_support
)
5164 phba
->hbqs
[LPFC_ELS_HBQ
].entry_count
=
5165 lpfc_hbq_defs
[LPFC_ELS_HBQ
]->entry_count
>> 1;
5167 phba
->hbqs
[LPFC_ELS_HBQ
].entry_count
=
5168 lpfc_hbq_defs
[LPFC_ELS_HBQ
]->entry_count
;
5169 phba
->hbq_count
= 1;
5170 lpfc_sli_hbqbuf_init_hbqs(phba
, LPFC_ELS_HBQ
);
5171 /* Initially populate or replenish the HBQs */
5176 * lpfc_sli_config_port - Issue config port mailbox command
5177 * @phba: Pointer to HBA context object.
5178 * @sli_mode: sli mode - 2/3
5180 * This function is called by the sli initialization code path
5181 * to issue config_port mailbox command. This function restarts the
5182 * HBA firmware and issues a config_port mailbox command to configure
5183 * the SLI interface in the sli mode specified by sli_mode
5184 * variable. The caller is not required to hold any locks.
5185 * The function returns 0 if successful, else returns negative error
5189 lpfc_sli_config_port(struct lpfc_hba
*phba
, int sli_mode
)
5192 uint32_t resetcount
= 0, rc
= 0, done
= 0;
5194 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5196 phba
->link_state
= LPFC_HBA_ERROR
;
5200 phba
->sli_rev
= sli_mode
;
5201 while (resetcount
< 2 && !done
) {
5202 spin_lock_irq(&phba
->hbalock
);
5203 phba
->sli
.sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
5204 spin_unlock_irq(&phba
->hbalock
);
5205 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
5206 lpfc_sli_brdrestart(phba
);
5207 rc
= lpfc_sli_chipset_init(phba
);
5211 spin_lock_irq(&phba
->hbalock
);
5212 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
5213 spin_unlock_irq(&phba
->hbalock
);
5216 /* Call pre CONFIG_PORT mailbox command initialization. A
5217 * value of 0 means the call was successful. Any other
5218 * nonzero value is a failure, but if ERESTART is returned,
5219 * the driver may reset the HBA and try again.
5221 rc
= lpfc_config_port_prep(phba
);
5222 if (rc
== -ERESTART
) {
5223 phba
->link_state
= LPFC_LINK_UNKNOWN
;
5228 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
5229 lpfc_config_port(phba
, pmb
);
5230 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
5231 phba
->sli3_options
&= ~(LPFC_SLI3_NPIV_ENABLED
|
5232 LPFC_SLI3_HBQ_ENABLED
|
5233 LPFC_SLI3_CRP_ENABLED
|
5234 LPFC_SLI3_DSS_ENABLED
);
5235 if (rc
!= MBX_SUCCESS
) {
5236 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5237 "0442 Adapter failed to init, mbxCmd x%x "
5238 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5239 pmb
->u
.mb
.mbxCommand
, pmb
->u
.mb
.mbxStatus
, 0);
5240 spin_lock_irq(&phba
->hbalock
);
5241 phba
->sli
.sli_flag
&= ~LPFC_SLI_ACTIVE
;
5242 spin_unlock_irq(&phba
->hbalock
);
5245 /* Allow asynchronous mailbox command to go through */
5246 spin_lock_irq(&phba
->hbalock
);
5247 phba
->sli
.sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
5248 spin_unlock_irq(&phba
->hbalock
);
5251 if ((pmb
->u
.mb
.un
.varCfgPort
.casabt
== 1) &&
5252 (pmb
->u
.mb
.un
.varCfgPort
.gasabt
== 0))
5253 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
5254 "3110 Port did not grant ASABT\n");
5259 goto do_prep_failed
;
5261 if (pmb
->u
.mb
.un
.varCfgPort
.sli_mode
== 3) {
5262 if (!pmb
->u
.mb
.un
.varCfgPort
.cMA
) {
5264 goto do_prep_failed
;
5266 if (phba
->max_vpi
&& pmb
->u
.mb
.un
.varCfgPort
.gmv
) {
5267 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
5268 phba
->max_vpi
= pmb
->u
.mb
.un
.varCfgPort
.max_vpi
;
5269 phba
->max_vports
= (phba
->max_vpi
> phba
->max_vports
) ?
5270 phba
->max_vpi
: phba
->max_vports
;
5274 if (pmb
->u
.mb
.un
.varCfgPort
.gerbm
)
5275 phba
->sli3_options
|= LPFC_SLI3_HBQ_ENABLED
;
5276 if (pmb
->u
.mb
.un
.varCfgPort
.gcrp
)
5277 phba
->sli3_options
|= LPFC_SLI3_CRP_ENABLED
;
5279 phba
->hbq_get
= phba
->mbox
->us
.s3_pgp
.hbq_get
;
5280 phba
->port_gp
= phba
->mbox
->us
.s3_pgp
.port
;
5282 if (phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
) {
5283 if (pmb
->u
.mb
.un
.varCfgPort
.gbg
== 0) {
5284 phba
->cfg_enable_bg
= 0;
5285 phba
->sli3_options
&= ~LPFC_SLI3_BG_ENABLED
;
5286 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5287 "0443 Adapter did not grant "
5292 phba
->hbq_get
= NULL
;
5293 phba
->port_gp
= phba
->mbox
->us
.s2
.port
;
5297 mempool_free(pmb
, phba
->mbox_mem_pool
);
5303 * lpfc_sli_hba_setup - SLI initialization function
5304 * @phba: Pointer to HBA context object.
5306 * This function is the main SLI initialization function. This function
5307 * is called by the HBA initialization code, HBA reset code and HBA
5308 * error attention handler code. Caller is not required to hold any
5309 * locks. This function issues config_port mailbox command to configure
5310 * the SLI, setup iocb rings and HBQ rings. In the end the function
5311 * calls the config_port_post function to issue init_link mailbox
5312 * command and to start the discovery. The function will return zero
5313 * if successful, else it will return negative error code.
5316 lpfc_sli_hba_setup(struct lpfc_hba
*phba
)
5322 switch (phba
->cfg_sli_mode
) {
5324 if (phba
->cfg_enable_npiv
) {
5325 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5326 "1824 NPIV enabled: Override sli_mode "
5327 "parameter (%d) to auto (0).\n",
5328 phba
->cfg_sli_mode
);
5337 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5338 "1819 Unrecognized sli_mode parameter: %d.\n",
5339 phba
->cfg_sli_mode
);
5343 phba
->fcp_embed_io
= 0; /* SLI4 FC support only */
5345 rc
= lpfc_sli_config_port(phba
, mode
);
5347 if (rc
&& phba
->cfg_sli_mode
== 3)
5348 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5349 "1820 Unable to select SLI-3. "
5350 "Not supported by adapter.\n");
5351 if (rc
&& mode
!= 2)
5352 rc
= lpfc_sli_config_port(phba
, 2);
5353 else if (rc
&& mode
== 2)
5354 rc
= lpfc_sli_config_port(phba
, 3);
5356 goto lpfc_sli_hba_setup_error
;
5358 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
5359 if (phba
->cfg_aer_support
== 1 && !(phba
->hba_flag
& HBA_AER_ENABLED
)) {
5360 rc
= pci_enable_pcie_error_reporting(phba
->pcidev
);
5362 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
5363 "2709 This device supports "
5364 "Advanced Error Reporting (AER)\n");
5365 spin_lock_irq(&phba
->hbalock
);
5366 phba
->hba_flag
|= HBA_AER_ENABLED
;
5367 spin_unlock_irq(&phba
->hbalock
);
5369 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
5370 "2708 This device does not support "
5371 "Advanced Error Reporting (AER): %d\n",
5373 phba
->cfg_aer_support
= 0;
5377 if (phba
->sli_rev
== 3) {
5378 phba
->iocb_cmd_size
= SLI3_IOCB_CMD_SIZE
;
5379 phba
->iocb_rsp_size
= SLI3_IOCB_RSP_SIZE
;
5381 phba
->iocb_cmd_size
= SLI2_IOCB_CMD_SIZE
;
5382 phba
->iocb_rsp_size
= SLI2_IOCB_RSP_SIZE
;
5383 phba
->sli3_options
= 0;
5386 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
5387 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
5388 phba
->sli_rev
, phba
->max_vpi
);
5389 rc
= lpfc_sli_ring_map(phba
);
5392 goto lpfc_sli_hba_setup_error
;
5394 /* Initialize VPIs. */
5395 if (phba
->sli_rev
== LPFC_SLI_REV3
) {
5397 * The VPI bitmask and physical ID array are allocated
5398 * and initialized once only - at driver load. A port
5399 * reset doesn't need to reinitialize this memory.
5401 if ((phba
->vpi_bmask
== NULL
) && (phba
->vpi_ids
== NULL
)) {
5402 longs
= (phba
->max_vpi
+ BITS_PER_LONG
) / BITS_PER_LONG
;
5403 phba
->vpi_bmask
= kcalloc(longs
,
5404 sizeof(unsigned long),
5406 if (!phba
->vpi_bmask
) {
5408 goto lpfc_sli_hba_setup_error
;
5411 phba
->vpi_ids
= kcalloc(phba
->max_vpi
+ 1,
5414 if (!phba
->vpi_ids
) {
5415 kfree(phba
->vpi_bmask
);
5417 goto lpfc_sli_hba_setup_error
;
5419 for (i
= 0; i
< phba
->max_vpi
; i
++)
5420 phba
->vpi_ids
[i
] = i
;
5425 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
5426 rc
= lpfc_sli_hbq_setup(phba
);
5428 goto lpfc_sli_hba_setup_error
;
5430 spin_lock_irq(&phba
->hbalock
);
5431 phba
->sli
.sli_flag
|= LPFC_PROCESS_LA
;
5432 spin_unlock_irq(&phba
->hbalock
);
5434 rc
= lpfc_config_port_post(phba
);
5436 goto lpfc_sli_hba_setup_error
;
5440 lpfc_sli_hba_setup_error
:
5441 phba
->link_state
= LPFC_HBA_ERROR
;
5442 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5443 "0445 Firmware initialization failed\n");
5448 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5449 * @phba: Pointer to HBA context object.
5451 * This function issue a dump mailbox command to read config region
5452 * 23 and parse the records in the region and populate driver
5456 lpfc_sli4_read_fcoe_params(struct lpfc_hba
*phba
)
5458 LPFC_MBOXQ_t
*mboxq
;
5459 struct lpfc_dmabuf
*mp
;
5460 struct lpfc_mqe
*mqe
;
5461 uint32_t data_length
;
5464 /* Program the default value of vlan_id and fc_map */
5465 phba
->valid_vlan
= 0;
5466 phba
->fc_map
[0] = LPFC_FCOE_FCF_MAP0
;
5467 phba
->fc_map
[1] = LPFC_FCOE_FCF_MAP1
;
5468 phba
->fc_map
[2] = LPFC_FCOE_FCF_MAP2
;
5470 mboxq
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5474 mqe
= &mboxq
->u
.mqe
;
5475 if (lpfc_sli4_dump_cfg_rg23(phba
, mboxq
)) {
5477 goto out_free_mboxq
;
5480 mp
= (struct lpfc_dmabuf
*)mboxq
->ctx_buf
;
5481 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
5483 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
5484 "(%d):2571 Mailbox cmd x%x Status x%x "
5485 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5486 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5487 "CQ: x%x x%x x%x x%x\n",
5488 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
5489 bf_get(lpfc_mqe_command
, mqe
),
5490 bf_get(lpfc_mqe_status
, mqe
),
5491 mqe
->un
.mb_words
[0], mqe
->un
.mb_words
[1],
5492 mqe
->un
.mb_words
[2], mqe
->un
.mb_words
[3],
5493 mqe
->un
.mb_words
[4], mqe
->un
.mb_words
[5],
5494 mqe
->un
.mb_words
[6], mqe
->un
.mb_words
[7],
5495 mqe
->un
.mb_words
[8], mqe
->un
.mb_words
[9],
5496 mqe
->un
.mb_words
[10], mqe
->un
.mb_words
[11],
5497 mqe
->un
.mb_words
[12], mqe
->un
.mb_words
[13],
5498 mqe
->un
.mb_words
[14], mqe
->un
.mb_words
[15],
5499 mqe
->un
.mb_words
[16], mqe
->un
.mb_words
[50],
5501 mboxq
->mcqe
.mcqe_tag0
, mboxq
->mcqe
.mcqe_tag1
,
5502 mboxq
->mcqe
.trailer
);
5505 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
5508 goto out_free_mboxq
;
5510 data_length
= mqe
->un
.mb_words
[5];
5511 if (data_length
> DMP_RGN23_SIZE
) {
5512 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
5515 goto out_free_mboxq
;
5518 lpfc_parse_fcoe_conf(phba
, mp
->virt
, data_length
);
5519 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
5524 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5529 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5530 * @phba: pointer to lpfc hba data structure.
5531 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5532 * @vpd: pointer to the memory to hold resulting port vpd data.
5533 * @vpd_size: On input, the number of bytes allocated to @vpd.
5534 * On output, the number of data bytes in @vpd.
5536 * This routine executes a READ_REV SLI4 mailbox command. In
5537 * addition, this routine gets the port vpd data.
5541 * -ENOMEM - could not allocated memory.
5544 lpfc_sli4_read_rev(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
5545 uint8_t *vpd
, uint32_t *vpd_size
)
5549 struct lpfc_dmabuf
*dmabuf
;
5550 struct lpfc_mqe
*mqe
;
5552 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
5557 * Get a DMA buffer for the vpd data resulting from the READ_REV
5560 dma_size
= *vpd_size
;
5561 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
, dma_size
,
5562 &dmabuf
->phys
, GFP_KERNEL
);
5563 if (!dmabuf
->virt
) {
5569 * The SLI4 implementation of READ_REV conflicts at word1,
5570 * bits 31:16 and SLI4 adds vpd functionality not present
5571 * in SLI3. This code corrects the conflicts.
5573 lpfc_read_rev(phba
, mboxq
);
5574 mqe
= &mboxq
->u
.mqe
;
5575 mqe
->un
.read_rev
.vpd_paddr_high
= putPaddrHigh(dmabuf
->phys
);
5576 mqe
->un
.read_rev
.vpd_paddr_low
= putPaddrLow(dmabuf
->phys
);
5577 mqe
->un
.read_rev
.word1
&= 0x0000FFFF;
5578 bf_set(lpfc_mbx_rd_rev_vpd
, &mqe
->un
.read_rev
, 1);
5579 bf_set(lpfc_mbx_rd_rev_avail_len
, &mqe
->un
.read_rev
, dma_size
);
5581 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
5583 dma_free_coherent(&phba
->pcidev
->dev
, dma_size
,
5584 dmabuf
->virt
, dmabuf
->phys
);
5590 * The available vpd length cannot be bigger than the
5591 * DMA buffer passed to the port. Catch the less than
5592 * case and update the caller's size.
5594 if (mqe
->un
.read_rev
.avail_vpd_len
< *vpd_size
)
5595 *vpd_size
= mqe
->un
.read_rev
.avail_vpd_len
;
5597 memcpy(vpd
, dmabuf
->virt
, *vpd_size
);
5599 dma_free_coherent(&phba
->pcidev
->dev
, dma_size
,
5600 dmabuf
->virt
, dmabuf
->phys
);
5606 * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5607 * @phba: pointer to lpfc hba data structure.
5609 * This routine retrieves SLI4 device physical port name this PCI function
5614 * otherwise - failed to retrieve controller attributes
5617 lpfc_sli4_get_ctl_attr(struct lpfc_hba
*phba
)
5619 LPFC_MBOXQ_t
*mboxq
;
5620 struct lpfc_mbx_get_cntl_attributes
*mbx_cntl_attr
;
5621 struct lpfc_controller_attribute
*cntl_attr
;
5622 void *virtaddr
= NULL
;
5623 uint32_t alloclen
, reqlen
;
5624 uint32_t shdr_status
, shdr_add_status
;
5625 union lpfc_sli4_cfg_shdr
*shdr
;
5628 mboxq
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5632 /* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5633 reqlen
= sizeof(struct lpfc_mbx_get_cntl_attributes
);
5634 alloclen
= lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
5635 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES
, reqlen
,
5636 LPFC_SLI4_MBX_NEMBED
);
5638 if (alloclen
< reqlen
) {
5639 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5640 "3084 Allocated DMA memory size (%d) is "
5641 "less than the requested DMA memory size "
5642 "(%d)\n", alloclen
, reqlen
);
5644 goto out_free_mboxq
;
5646 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
5647 virtaddr
= mboxq
->sge_array
->addr
[0];
5648 mbx_cntl_attr
= (struct lpfc_mbx_get_cntl_attributes
*)virtaddr
;
5649 shdr
= &mbx_cntl_attr
->cfg_shdr
;
5650 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
5651 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
5652 if (shdr_status
|| shdr_add_status
|| rc
) {
5653 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
5654 "3085 Mailbox x%x (x%x/x%x) failed, "
5655 "rc:x%x, status:x%x, add_status:x%x\n",
5656 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
5657 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
5658 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
5659 rc
, shdr_status
, shdr_add_status
);
5661 goto out_free_mboxq
;
5664 cntl_attr
= &mbx_cntl_attr
->cntl_attr
;
5665 phba
->sli4_hba
.lnk_info
.lnk_dv
= LPFC_LNK_DAT_VAL
;
5666 phba
->sli4_hba
.lnk_info
.lnk_tp
=
5667 bf_get(lpfc_cntl_attr_lnk_type
, cntl_attr
);
5668 phba
->sli4_hba
.lnk_info
.lnk_no
=
5669 bf_get(lpfc_cntl_attr_lnk_numb
, cntl_attr
);
5671 memset(phba
->BIOSVersion
, 0, sizeof(phba
->BIOSVersion
));
5672 strlcat(phba
->BIOSVersion
, (char *)cntl_attr
->bios_ver_str
,
5673 sizeof(phba
->BIOSVersion
));
5675 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5676 "3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s\n",
5677 phba
->sli4_hba
.lnk_info
.lnk_tp
,
5678 phba
->sli4_hba
.lnk_info
.lnk_no
,
5681 if (rc
!= MBX_TIMEOUT
) {
5682 if (bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
) == MBX_SLI4_CONFIG
)
5683 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
5685 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5691 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
5692 * @phba: pointer to lpfc hba data structure.
5694 * This routine retrieves SLI4 device physical port name this PCI function
5699 * otherwise - failed to retrieve physical port name
5702 lpfc_sli4_retrieve_pport_name(struct lpfc_hba
*phba
)
5704 LPFC_MBOXQ_t
*mboxq
;
5705 struct lpfc_mbx_get_port_name
*get_port_name
;
5706 uint32_t shdr_status
, shdr_add_status
;
5707 union lpfc_sli4_cfg_shdr
*shdr
;
5708 char cport_name
= 0;
5711 /* We assume nothing at this point */
5712 phba
->sli4_hba
.lnk_info
.lnk_dv
= LPFC_LNK_DAT_INVAL
;
5713 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_NON
;
5715 mboxq
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5718 /* obtain link type and link number via READ_CONFIG */
5719 phba
->sli4_hba
.lnk_info
.lnk_dv
= LPFC_LNK_DAT_INVAL
;
5720 lpfc_sli4_read_config(phba
);
5721 if (phba
->sli4_hba
.lnk_info
.lnk_dv
== LPFC_LNK_DAT_VAL
)
5722 goto retrieve_ppname
;
5724 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
5725 rc
= lpfc_sli4_get_ctl_attr(phba
);
5727 goto out_free_mboxq
;
5730 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
5731 LPFC_MBOX_OPCODE_GET_PORT_NAME
,
5732 sizeof(struct lpfc_mbx_get_port_name
) -
5733 sizeof(struct lpfc_sli4_cfg_mhdr
),
5734 LPFC_SLI4_MBX_EMBED
);
5735 get_port_name
= &mboxq
->u
.mqe
.un
.get_port_name
;
5736 shdr
= (union lpfc_sli4_cfg_shdr
*)&get_port_name
->header
.cfg_shdr
;
5737 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
, LPFC_OPCODE_VERSION_1
);
5738 bf_set(lpfc_mbx_get_port_name_lnk_type
, &get_port_name
->u
.request
,
5739 phba
->sli4_hba
.lnk_info
.lnk_tp
);
5740 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
5741 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
5742 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
5743 if (shdr_status
|| shdr_add_status
|| rc
) {
5744 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
5745 "3087 Mailbox x%x (x%x/x%x) failed: "
5746 "rc:x%x, status:x%x, add_status:x%x\n",
5747 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
5748 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
5749 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
5750 rc
, shdr_status
, shdr_add_status
);
5752 goto out_free_mboxq
;
5754 switch (phba
->sli4_hba
.lnk_info
.lnk_no
) {
5755 case LPFC_LINK_NUMBER_0
:
5756 cport_name
= bf_get(lpfc_mbx_get_port_name_name0
,
5757 &get_port_name
->u
.response
);
5758 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_GET
;
5760 case LPFC_LINK_NUMBER_1
:
5761 cport_name
= bf_get(lpfc_mbx_get_port_name_name1
,
5762 &get_port_name
->u
.response
);
5763 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_GET
;
5765 case LPFC_LINK_NUMBER_2
:
5766 cport_name
= bf_get(lpfc_mbx_get_port_name_name2
,
5767 &get_port_name
->u
.response
);
5768 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_GET
;
5770 case LPFC_LINK_NUMBER_3
:
5771 cport_name
= bf_get(lpfc_mbx_get_port_name_name3
,
5772 &get_port_name
->u
.response
);
5773 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_GET
;
5779 if (phba
->sli4_hba
.pport_name_sta
== LPFC_SLI4_PPNAME_GET
) {
5780 phba
->Port
[0] = cport_name
;
5781 phba
->Port
[1] = '\0';
5782 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5783 "3091 SLI get port name: %s\n", phba
->Port
);
5787 if (rc
!= MBX_TIMEOUT
) {
5788 if (bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
) == MBX_SLI4_CONFIG
)
5789 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
5791 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5797 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5798 * @phba: pointer to lpfc hba data structure.
5800 * This routine is called to explicitly arm the SLI4 device's completion and
5804 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba
*phba
)
5807 struct lpfc_sli4_hba
*sli4_hba
= &phba
->sli4_hba
;
5808 struct lpfc_sli4_hdw_queue
*qp
;
5809 struct lpfc_queue
*eq
;
5811 sli4_hba
->sli4_write_cq_db(phba
, sli4_hba
->mbx_cq
, 0, LPFC_QUEUE_REARM
);
5812 sli4_hba
->sli4_write_cq_db(phba
, sli4_hba
->els_cq
, 0, LPFC_QUEUE_REARM
);
5813 if (sli4_hba
->nvmels_cq
)
5814 sli4_hba
->sli4_write_cq_db(phba
, sli4_hba
->nvmels_cq
, 0,
5817 if (sli4_hba
->hdwq
) {
5818 /* Loop thru all Hardware Queues */
5819 for (qidx
= 0; qidx
< phba
->cfg_hdw_queue
; qidx
++) {
5820 qp
= &sli4_hba
->hdwq
[qidx
];
5821 /* ARM the corresponding CQ */
5822 sli4_hba
->sli4_write_cq_db(phba
, qp
->io_cq
, 0,
5826 /* Loop thru all IRQ vectors */
5827 for (qidx
= 0; qidx
< phba
->cfg_irq_chann
; qidx
++) {
5828 eq
= sli4_hba
->hba_eq_hdl
[qidx
].eq
;
5829 /* ARM the corresponding EQ */
5830 sli4_hba
->sli4_write_eq_db(phba
, eq
,
5831 0, LPFC_QUEUE_REARM
);
5835 if (phba
->nvmet_support
) {
5836 for (qidx
= 0; qidx
< phba
->cfg_nvmet_mrq
; qidx
++) {
5837 sli4_hba
->sli4_write_cq_db(phba
,
5838 sli4_hba
->nvmet_cqset
[qidx
], 0,
5845 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5846 * @phba: Pointer to HBA context object.
5847 * @type: The resource extent type.
5848 * @extnt_count: buffer to hold port available extent count.
5849 * @extnt_size: buffer to hold element count per extent.
5851 * This function calls the port and retrievs the number of available
5852 * extents and their size for a particular extent type.
5854 * Returns: 0 if successful. Nonzero otherwise.
5857 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba
*phba
, uint16_t type
,
5858 uint16_t *extnt_count
, uint16_t *extnt_size
)
5863 struct lpfc_mbx_get_rsrc_extent_info
*rsrc_info
;
5866 mbox
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5870 /* Find out how many extents are available for this resource type */
5871 length
= (sizeof(struct lpfc_mbx_get_rsrc_extent_info
) -
5872 sizeof(struct lpfc_sli4_cfg_mhdr
));
5873 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
5874 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO
,
5875 length
, LPFC_SLI4_MBX_EMBED
);
5877 /* Send an extents count of 0 - the GET doesn't use it. */
5878 rc
= lpfc_sli4_mbox_rsrc_extent(phba
, mbox
, 0, type
,
5879 LPFC_SLI4_MBX_EMBED
);
5885 if (!phba
->sli4_hba
.intr_enable
)
5886 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
5888 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
5889 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
5896 rsrc_info
= &mbox
->u
.mqe
.un
.rsrc_extent_info
;
5897 if (bf_get(lpfc_mbox_hdr_status
,
5898 &rsrc_info
->header
.cfg_shdr
.response
)) {
5899 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
5900 "2930 Failed to get resource extents "
5901 "Status 0x%x Add'l Status 0x%x\n",
5902 bf_get(lpfc_mbox_hdr_status
,
5903 &rsrc_info
->header
.cfg_shdr
.response
),
5904 bf_get(lpfc_mbox_hdr_add_status
,
5905 &rsrc_info
->header
.cfg_shdr
.response
));
5910 *extnt_count
= bf_get(lpfc_mbx_get_rsrc_extent_info_cnt
,
5912 *extnt_size
= bf_get(lpfc_mbx_get_rsrc_extent_info_size
,
5915 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5916 "3162 Retrieved extents type-%d from port: count:%d, "
5917 "size:%d\n", type
, *extnt_count
, *extnt_size
);
5920 mempool_free(mbox
, phba
->mbox_mem_pool
);
5925 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5926 * @phba: Pointer to HBA context object.
5927 * @type: The extent type to check.
5929 * This function reads the current available extents from the port and checks
5930 * if the extent count or extent size has changed since the last access.
5931 * Callers use this routine post port reset to understand if there is a
5932 * extent reprovisioning requirement.
5935 * -Error: error indicates problem.
5936 * 1: Extent count or size has changed.
5940 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba
*phba
, uint16_t type
)
5942 uint16_t curr_ext_cnt
, rsrc_ext_cnt
;
5943 uint16_t size_diff
, rsrc_ext_size
;
5945 struct lpfc_rsrc_blks
*rsrc_entry
;
5946 struct list_head
*rsrc_blk_list
= NULL
;
5950 rc
= lpfc_sli4_get_avail_extnt_rsrc(phba
, type
,
5957 case LPFC_RSC_TYPE_FCOE_RPI
:
5958 rsrc_blk_list
= &phba
->sli4_hba
.lpfc_rpi_blk_list
;
5960 case LPFC_RSC_TYPE_FCOE_VPI
:
5961 rsrc_blk_list
= &phba
->lpfc_vpi_blk_list
;
5963 case LPFC_RSC_TYPE_FCOE_XRI
:
5964 rsrc_blk_list
= &phba
->sli4_hba
.lpfc_xri_blk_list
;
5966 case LPFC_RSC_TYPE_FCOE_VFI
:
5967 rsrc_blk_list
= &phba
->sli4_hba
.lpfc_vfi_blk_list
;
5973 list_for_each_entry(rsrc_entry
, rsrc_blk_list
, list
) {
5975 if (rsrc_entry
->rsrc_size
!= rsrc_ext_size
)
5979 if (curr_ext_cnt
!= rsrc_ext_cnt
|| size_diff
!= 0)
5986 * lpfc_sli4_cfg_post_extnts -
5987 * @phba: Pointer to HBA context object.
5988 * @extnt_cnt: number of available extents.
5989 * @type: the extent type (rpi, xri, vfi, vpi).
5990 * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5991 * @mbox: pointer to the caller's allocated mailbox structure.
5993 * This function executes the extents allocation request. It also
5994 * takes care of the amount of memory needed to allocate or get the
5995 * allocated extents. It is the caller's responsibility to evaluate
5999 * -Error: Error value describes the condition found.
6003 lpfc_sli4_cfg_post_extnts(struct lpfc_hba
*phba
, uint16_t extnt_cnt
,
6004 uint16_t type
, bool *emb
, LPFC_MBOXQ_t
*mbox
)
6009 uint32_t alloc_len
, mbox_tmo
;
6011 /* Calculate the total requested length of the dma memory */
6012 req_len
= extnt_cnt
* sizeof(uint16_t);
6015 * Calculate the size of an embedded mailbox. The uint32_t
6016 * accounts for extents-specific word.
6018 emb_len
= sizeof(MAILBOX_t
) - sizeof(struct mbox_header
) -
6022 * Presume the allocation and response will fit into an embedded
6023 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6025 *emb
= LPFC_SLI4_MBX_EMBED
;
6026 if (req_len
> emb_len
) {
6027 req_len
= extnt_cnt
* sizeof(uint16_t) +
6028 sizeof(union lpfc_sli4_cfg_shdr
) +
6030 *emb
= LPFC_SLI4_MBX_NEMBED
;
6033 alloc_len
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
6034 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT
,
6036 if (alloc_len
< req_len
) {
6037 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6038 "2982 Allocated DMA memory size (x%x) is "
6039 "less than the requested DMA memory "
6040 "size (x%x)\n", alloc_len
, req_len
);
6043 rc
= lpfc_sli4_mbox_rsrc_extent(phba
, mbox
, extnt_cnt
, type
, *emb
);
6047 if (!phba
->sli4_hba
.intr_enable
)
6048 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
6050 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
6051 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
6060 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6061 * @phba: Pointer to HBA context object.
6062 * @type: The resource extent type to allocate.
6064 * This function allocates the number of elements for the specified
6068 lpfc_sli4_alloc_extent(struct lpfc_hba
*phba
, uint16_t type
)
6071 uint16_t rsrc_id_cnt
, rsrc_cnt
, rsrc_size
;
6072 uint16_t rsrc_id
, rsrc_start
, j
, k
;
6075 unsigned long longs
;
6076 unsigned long *bmask
;
6077 struct lpfc_rsrc_blks
*rsrc_blks
;
6080 struct lpfc_id_range
*id_array
= NULL
;
6081 void *virtaddr
= NULL
;
6082 struct lpfc_mbx_nembed_rsrc_extent
*n_rsrc
;
6083 struct lpfc_mbx_alloc_rsrc_extents
*rsrc_ext
;
6084 struct list_head
*ext_blk_list
;
6086 rc
= lpfc_sli4_get_avail_extnt_rsrc(phba
, type
,
6092 if ((rsrc_cnt
== 0) || (rsrc_size
== 0)) {
6093 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6094 "3009 No available Resource Extents "
6095 "for resource type 0x%x: Count: 0x%x, "
6096 "Size 0x%x\n", type
, rsrc_cnt
,
6101 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_INIT
| LOG_SLI
,
6102 "2903 Post resource extents type-0x%x: "
6103 "count:%d, size %d\n", type
, rsrc_cnt
, rsrc_size
);
6105 mbox
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6109 rc
= lpfc_sli4_cfg_post_extnts(phba
, rsrc_cnt
, type
, &emb
, mbox
);
6116 * Figure out where the response is located. Then get local pointers
6117 * to the response data. The port does not guarantee to respond to
6118 * all extents counts request so update the local variable with the
6119 * allocated count from the port.
6121 if (emb
== LPFC_SLI4_MBX_EMBED
) {
6122 rsrc_ext
= &mbox
->u
.mqe
.un
.alloc_rsrc_extents
;
6123 id_array
= &rsrc_ext
->u
.rsp
.id
[0];
6124 rsrc_cnt
= bf_get(lpfc_mbx_rsrc_cnt
, &rsrc_ext
->u
.rsp
);
6126 virtaddr
= mbox
->sge_array
->addr
[0];
6127 n_rsrc
= (struct lpfc_mbx_nembed_rsrc_extent
*) virtaddr
;
6128 rsrc_cnt
= bf_get(lpfc_mbx_rsrc_cnt
, n_rsrc
);
6129 id_array
= &n_rsrc
->id
;
6132 longs
= ((rsrc_cnt
* rsrc_size
) + BITS_PER_LONG
- 1) / BITS_PER_LONG
;
6133 rsrc_id_cnt
= rsrc_cnt
* rsrc_size
;
6136 * Based on the resource size and count, correct the base and max
6139 length
= sizeof(struct lpfc_rsrc_blks
);
6141 case LPFC_RSC_TYPE_FCOE_RPI
:
6142 phba
->sli4_hba
.rpi_bmask
= kcalloc(longs
,
6143 sizeof(unsigned long),
6145 if (unlikely(!phba
->sli4_hba
.rpi_bmask
)) {
6149 phba
->sli4_hba
.rpi_ids
= kcalloc(rsrc_id_cnt
,
6152 if (unlikely(!phba
->sli4_hba
.rpi_ids
)) {
6153 kfree(phba
->sli4_hba
.rpi_bmask
);
6159 * The next_rpi was initialized with the maximum available
6160 * count but the port may allocate a smaller number. Catch
6161 * that case and update the next_rpi.
6163 phba
->sli4_hba
.next_rpi
= rsrc_id_cnt
;
6165 /* Initialize local ptrs for common extent processing later. */
6166 bmask
= phba
->sli4_hba
.rpi_bmask
;
6167 ids
= phba
->sli4_hba
.rpi_ids
;
6168 ext_blk_list
= &phba
->sli4_hba
.lpfc_rpi_blk_list
;
6170 case LPFC_RSC_TYPE_FCOE_VPI
:
6171 phba
->vpi_bmask
= kcalloc(longs
, sizeof(unsigned long),
6173 if (unlikely(!phba
->vpi_bmask
)) {
6177 phba
->vpi_ids
= kcalloc(rsrc_id_cnt
, sizeof(uint16_t),
6179 if (unlikely(!phba
->vpi_ids
)) {
6180 kfree(phba
->vpi_bmask
);
6185 /* Initialize local ptrs for common extent processing later. */
6186 bmask
= phba
->vpi_bmask
;
6187 ids
= phba
->vpi_ids
;
6188 ext_blk_list
= &phba
->lpfc_vpi_blk_list
;
6190 case LPFC_RSC_TYPE_FCOE_XRI
:
6191 phba
->sli4_hba
.xri_bmask
= kcalloc(longs
,
6192 sizeof(unsigned long),
6194 if (unlikely(!phba
->sli4_hba
.xri_bmask
)) {
6198 phba
->sli4_hba
.max_cfg_param
.xri_used
= 0;
6199 phba
->sli4_hba
.xri_ids
= kcalloc(rsrc_id_cnt
,
6202 if (unlikely(!phba
->sli4_hba
.xri_ids
)) {
6203 kfree(phba
->sli4_hba
.xri_bmask
);
6208 /* Initialize local ptrs for common extent processing later. */
6209 bmask
= phba
->sli4_hba
.xri_bmask
;
6210 ids
= phba
->sli4_hba
.xri_ids
;
6211 ext_blk_list
= &phba
->sli4_hba
.lpfc_xri_blk_list
;
6213 case LPFC_RSC_TYPE_FCOE_VFI
:
6214 phba
->sli4_hba
.vfi_bmask
= kcalloc(longs
,
6215 sizeof(unsigned long),
6217 if (unlikely(!phba
->sli4_hba
.vfi_bmask
)) {
6221 phba
->sli4_hba
.vfi_ids
= kcalloc(rsrc_id_cnt
,
6224 if (unlikely(!phba
->sli4_hba
.vfi_ids
)) {
6225 kfree(phba
->sli4_hba
.vfi_bmask
);
6230 /* Initialize local ptrs for common extent processing later. */
6231 bmask
= phba
->sli4_hba
.vfi_bmask
;
6232 ids
= phba
->sli4_hba
.vfi_ids
;
6233 ext_blk_list
= &phba
->sli4_hba
.lpfc_vfi_blk_list
;
6236 /* Unsupported Opcode. Fail call. */
6240 ext_blk_list
= NULL
;
6245 * Complete initializing the extent configuration with the
6246 * allocated ids assigned to this function. The bitmask serves
6247 * as an index into the array and manages the available ids. The
6248 * array just stores the ids communicated to the port via the wqes.
6250 for (i
= 0, j
= 0, k
= 0; i
< rsrc_cnt
; i
++) {
6252 rsrc_id
= bf_get(lpfc_mbx_rsrc_id_word4_0
,
6255 rsrc_id
= bf_get(lpfc_mbx_rsrc_id_word4_1
,
6258 rsrc_blks
= kzalloc(length
, GFP_KERNEL
);
6259 if (unlikely(!rsrc_blks
)) {
6265 rsrc_blks
->rsrc_start
= rsrc_id
;
6266 rsrc_blks
->rsrc_size
= rsrc_size
;
6267 list_add_tail(&rsrc_blks
->list
, ext_blk_list
);
6268 rsrc_start
= rsrc_id
;
6269 if ((type
== LPFC_RSC_TYPE_FCOE_XRI
) && (j
== 0)) {
6270 phba
->sli4_hba
.io_xri_start
= rsrc_start
+
6271 lpfc_sli4_get_iocb_cnt(phba
);
6274 while (rsrc_id
< (rsrc_start
+ rsrc_size
)) {
6279 /* Entire word processed. Get next word.*/
6284 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
6291 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6292 * @phba: Pointer to HBA context object.
6293 * @type: the extent's type.
6295 * This function deallocates all extents of a particular resource type.
6296 * SLI4 does not allow for deallocating a particular extent range. It
6297 * is the caller's responsibility to release all kernel memory resources.
6300 lpfc_sli4_dealloc_extent(struct lpfc_hba
*phba
, uint16_t type
)
6303 uint32_t length
, mbox_tmo
= 0;
6305 struct lpfc_mbx_dealloc_rsrc_extents
*dealloc_rsrc
;
6306 struct lpfc_rsrc_blks
*rsrc_blk
, *rsrc_blk_next
;
6308 mbox
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6313 * This function sends an embedded mailbox because it only sends the
6314 * the resource type. All extents of this type are released by the
6317 length
= (sizeof(struct lpfc_mbx_dealloc_rsrc_extents
) -
6318 sizeof(struct lpfc_sli4_cfg_mhdr
));
6319 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
6320 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT
,
6321 length
, LPFC_SLI4_MBX_EMBED
);
6323 /* Send an extents count of 0 - the dealloc doesn't use it. */
6324 rc
= lpfc_sli4_mbox_rsrc_extent(phba
, mbox
, 0, type
,
6325 LPFC_SLI4_MBX_EMBED
);
6330 if (!phba
->sli4_hba
.intr_enable
)
6331 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
6333 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
6334 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
6341 dealloc_rsrc
= &mbox
->u
.mqe
.un
.dealloc_rsrc_extents
;
6342 if (bf_get(lpfc_mbox_hdr_status
,
6343 &dealloc_rsrc
->header
.cfg_shdr
.response
)) {
6344 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6345 "2919 Failed to release resource extents "
6346 "for type %d - Status 0x%x Add'l Status 0x%x. "
6347 "Resource memory not released.\n",
6349 bf_get(lpfc_mbox_hdr_status
,
6350 &dealloc_rsrc
->header
.cfg_shdr
.response
),
6351 bf_get(lpfc_mbox_hdr_add_status
,
6352 &dealloc_rsrc
->header
.cfg_shdr
.response
));
6357 /* Release kernel memory resources for the specific type. */
6359 case LPFC_RSC_TYPE_FCOE_VPI
:
6360 kfree(phba
->vpi_bmask
);
6361 kfree(phba
->vpi_ids
);
6362 bf_set(lpfc_vpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
6363 list_for_each_entry_safe(rsrc_blk
, rsrc_blk_next
,
6364 &phba
->lpfc_vpi_blk_list
, list
) {
6365 list_del_init(&rsrc_blk
->list
);
6368 phba
->sli4_hba
.max_cfg_param
.vpi_used
= 0;
6370 case LPFC_RSC_TYPE_FCOE_XRI
:
6371 kfree(phba
->sli4_hba
.xri_bmask
);
6372 kfree(phba
->sli4_hba
.xri_ids
);
6373 list_for_each_entry_safe(rsrc_blk
, rsrc_blk_next
,
6374 &phba
->sli4_hba
.lpfc_xri_blk_list
, list
) {
6375 list_del_init(&rsrc_blk
->list
);
6379 case LPFC_RSC_TYPE_FCOE_VFI
:
6380 kfree(phba
->sli4_hba
.vfi_bmask
);
6381 kfree(phba
->sli4_hba
.vfi_ids
);
6382 bf_set(lpfc_vfi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
6383 list_for_each_entry_safe(rsrc_blk
, rsrc_blk_next
,
6384 &phba
->sli4_hba
.lpfc_vfi_blk_list
, list
) {
6385 list_del_init(&rsrc_blk
->list
);
6389 case LPFC_RSC_TYPE_FCOE_RPI
:
6390 /* RPI bitmask and physical id array are cleaned up earlier. */
6391 list_for_each_entry_safe(rsrc_blk
, rsrc_blk_next
,
6392 &phba
->sli4_hba
.lpfc_rpi_blk_list
, list
) {
6393 list_del_init(&rsrc_blk
->list
);
6401 bf_set(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
6404 mempool_free(mbox
, phba
->mbox_mem_pool
);
6409 lpfc_set_features(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mbox
,
6414 len
= sizeof(struct lpfc_mbx_set_feature
) -
6415 sizeof(struct lpfc_sli4_cfg_mhdr
);
6416 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
6417 LPFC_MBOX_OPCODE_SET_FEATURES
, len
,
6418 LPFC_SLI4_MBX_EMBED
);
6421 case LPFC_SET_UE_RECOVERY
:
6422 bf_set(lpfc_mbx_set_feature_UER
,
6423 &mbox
->u
.mqe
.un
.set_feature
, 1);
6424 mbox
->u
.mqe
.un
.set_feature
.feature
= LPFC_SET_UE_RECOVERY
;
6425 mbox
->u
.mqe
.un
.set_feature
.param_len
= 8;
6427 case LPFC_SET_MDS_DIAGS
:
6428 bf_set(lpfc_mbx_set_feature_mds
,
6429 &mbox
->u
.mqe
.un
.set_feature
, 1);
6430 bf_set(lpfc_mbx_set_feature_mds_deep_loopbk
,
6431 &mbox
->u
.mqe
.un
.set_feature
, 1);
6432 mbox
->u
.mqe
.un
.set_feature
.feature
= LPFC_SET_MDS_DIAGS
;
6433 mbox
->u
.mqe
.un
.set_feature
.param_len
= 8;
6435 case LPFC_SET_DUAL_DUMP
:
6436 bf_set(lpfc_mbx_set_feature_dd
,
6437 &mbox
->u
.mqe
.un
.set_feature
, LPFC_ENABLE_DUAL_DUMP
);
6438 bf_set(lpfc_mbx_set_feature_ddquery
,
6439 &mbox
->u
.mqe
.un
.set_feature
, 0);
6440 mbox
->u
.mqe
.un
.set_feature
.feature
= LPFC_SET_DUAL_DUMP
;
6441 mbox
->u
.mqe
.un
.set_feature
.param_len
= 4;
6449 * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6450 * @phba: Pointer to HBA context object.
6452 * Disable FW logging into host memory on the adapter. To
6453 * be done before reading logs from the host memory.
6456 lpfc_ras_stop_fwlog(struct lpfc_hba
*phba
)
6458 struct lpfc_ras_fwlog
*ras_fwlog
= &phba
->ras_fwlog
;
6460 spin_lock_irq(&phba
->hbalock
);
6461 ras_fwlog
->state
= INACTIVE
;
6462 spin_unlock_irq(&phba
->hbalock
);
6464 /* Disable FW logging to host memory */
6465 writel(LPFC_CTL_PDEV_CTL_DDL_RAS
,
6466 phba
->sli4_hba
.conf_regs_memmap_p
+ LPFC_CTL_PDEV_CTL_OFFSET
);
6468 /* Wait 10ms for firmware to stop using DMA buffer */
6469 usleep_range(10 * 1000, 20 * 1000);
6473 * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6474 * @phba: Pointer to HBA context object.
6476 * This function is called to free memory allocated for RAS FW logging
6477 * support in the driver.
6480 lpfc_sli4_ras_dma_free(struct lpfc_hba
*phba
)
6482 struct lpfc_ras_fwlog
*ras_fwlog
= &phba
->ras_fwlog
;
6483 struct lpfc_dmabuf
*dmabuf
, *next
;
6485 if (!list_empty(&ras_fwlog
->fwlog_buff_list
)) {
6486 list_for_each_entry_safe(dmabuf
, next
,
6487 &ras_fwlog
->fwlog_buff_list
,
6489 list_del(&dmabuf
->list
);
6490 dma_free_coherent(&phba
->pcidev
->dev
,
6491 LPFC_RAS_MAX_ENTRY_SIZE
,
6492 dmabuf
->virt
, dmabuf
->phys
);
6497 if (ras_fwlog
->lwpd
.virt
) {
6498 dma_free_coherent(&phba
->pcidev
->dev
,
6499 sizeof(uint32_t) * 2,
6500 ras_fwlog
->lwpd
.virt
,
6501 ras_fwlog
->lwpd
.phys
);
6502 ras_fwlog
->lwpd
.virt
= NULL
;
6505 spin_lock_irq(&phba
->hbalock
);
6506 ras_fwlog
->state
= INACTIVE
;
6507 spin_unlock_irq(&phba
->hbalock
);
6511 * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6512 * @phba: Pointer to HBA context object.
6513 * @fwlog_buff_count: Count of buffers to be created.
6515 * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6516 * to update FW log is posted to the adapter.
6517 * Buffer count is calculated based on module param ras_fwlog_buffsize
6518 * Size of each buffer posted to FW is 64K.
6522 lpfc_sli4_ras_dma_alloc(struct lpfc_hba
*phba
,
6523 uint32_t fwlog_buff_count
)
6525 struct lpfc_ras_fwlog
*ras_fwlog
= &phba
->ras_fwlog
;
6526 struct lpfc_dmabuf
*dmabuf
;
6529 /* Initialize List */
6530 INIT_LIST_HEAD(&ras_fwlog
->fwlog_buff_list
);
6532 /* Allocate memory for the LWPD */
6533 ras_fwlog
->lwpd
.virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
6534 sizeof(uint32_t) * 2,
6535 &ras_fwlog
->lwpd
.phys
,
6537 if (!ras_fwlog
->lwpd
.virt
) {
6538 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6539 "6185 LWPD Memory Alloc Failed\n");
6544 ras_fwlog
->fw_buffcount
= fwlog_buff_count
;
6545 for (i
= 0; i
< ras_fwlog
->fw_buffcount
; i
++) {
6546 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
),
6550 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
6551 "6186 Memory Alloc failed FW logging");
6555 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
6556 LPFC_RAS_MAX_ENTRY_SIZE
,
6557 &dmabuf
->phys
, GFP_KERNEL
);
6558 if (!dmabuf
->virt
) {
6561 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
6562 "6187 DMA Alloc Failed FW logging");
6565 dmabuf
->buffer_tag
= i
;
6566 list_add_tail(&dmabuf
->list
, &ras_fwlog
->fwlog_buff_list
);
6571 lpfc_sli4_ras_dma_free(phba
);
6577 * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6578 * @phba: pointer to lpfc hba data structure.
6579 * @pmb: pointer to the driver internal queue element for mailbox command.
6581 * Completion handler for driver's RAS MBX command to the device.
6584 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
6587 union lpfc_sli4_cfg_shdr
*shdr
;
6588 uint32_t shdr_status
, shdr_add_status
;
6589 struct lpfc_ras_fwlog
*ras_fwlog
= &phba
->ras_fwlog
;
6593 shdr
= (union lpfc_sli4_cfg_shdr
*)
6594 &pmb
->u
.mqe
.un
.ras_fwlog
.header
.cfg_shdr
;
6595 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
6596 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
6598 if (mb
->mbxStatus
!= MBX_SUCCESS
|| shdr_status
) {
6599 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6600 "6188 FW LOG mailbox "
6601 "completed with status x%x add_status x%x,"
6602 " mbx status x%x\n",
6603 shdr_status
, shdr_add_status
, mb
->mbxStatus
);
6605 ras_fwlog
->ras_hwsupport
= false;
6609 spin_lock_irq(&phba
->hbalock
);
6610 ras_fwlog
->state
= ACTIVE
;
6611 spin_unlock_irq(&phba
->hbalock
);
6612 mempool_free(pmb
, phba
->mbox_mem_pool
);
6617 /* Free RAS DMA memory */
6618 lpfc_sli4_ras_dma_free(phba
);
6619 mempool_free(pmb
, phba
->mbox_mem_pool
);
6623 * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6624 * @phba: pointer to lpfc hba data structure.
6625 * @fwlog_level: Logging verbosity level.
6626 * @fwlog_enable: Enable/Disable logging.
6628 * Initialize memory and post mailbox command to enable FW logging in host
6632 lpfc_sli4_ras_fwlog_init(struct lpfc_hba
*phba
,
6633 uint32_t fwlog_level
,
6634 uint32_t fwlog_enable
)
6636 struct lpfc_ras_fwlog
*ras_fwlog
= &phba
->ras_fwlog
;
6637 struct lpfc_mbx_set_ras_fwlog
*mbx_fwlog
= NULL
;
6638 struct lpfc_dmabuf
*dmabuf
;
6640 uint32_t len
= 0, fwlog_buffsize
, fwlog_entry_count
;
6643 spin_lock_irq(&phba
->hbalock
);
6644 ras_fwlog
->state
= INACTIVE
;
6645 spin_unlock_irq(&phba
->hbalock
);
6647 fwlog_buffsize
= (LPFC_RAS_MIN_BUFF_POST_SIZE
*
6648 phba
->cfg_ras_fwlog_buffsize
);
6649 fwlog_entry_count
= (fwlog_buffsize
/LPFC_RAS_MAX_ENTRY_SIZE
);
6652 * If re-enabling FW logging support use earlier allocated
6653 * DMA buffers while posting MBX command.
6655 if (!ras_fwlog
->lwpd
.virt
) {
6656 rc
= lpfc_sli4_ras_dma_alloc(phba
, fwlog_entry_count
);
6658 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
6659 "6189 FW Log Memory Allocation Failed");
6664 /* Setup Mailbox command */
6665 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6667 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6668 "6190 RAS MBX Alloc Failed");
6673 ras_fwlog
->fw_loglevel
= fwlog_level
;
6674 len
= (sizeof(struct lpfc_mbx_set_ras_fwlog
) -
6675 sizeof(struct lpfc_sli4_cfg_mhdr
));
6677 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_LOWLEVEL
,
6678 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION
,
6679 len
, LPFC_SLI4_MBX_EMBED
);
6681 mbx_fwlog
= (struct lpfc_mbx_set_ras_fwlog
*)&mbox
->u
.mqe
.un
.ras_fwlog
;
6682 bf_set(lpfc_fwlog_enable
, &mbx_fwlog
->u
.request
,
6684 bf_set(lpfc_fwlog_loglvl
, &mbx_fwlog
->u
.request
,
6685 ras_fwlog
->fw_loglevel
);
6686 bf_set(lpfc_fwlog_buffcnt
, &mbx_fwlog
->u
.request
,
6687 ras_fwlog
->fw_buffcount
);
6688 bf_set(lpfc_fwlog_buffsz
, &mbx_fwlog
->u
.request
,
6689 LPFC_RAS_MAX_ENTRY_SIZE
/SLI4_PAGE_SIZE
);
6691 /* Update DMA buffer address */
6692 list_for_each_entry(dmabuf
, &ras_fwlog
->fwlog_buff_list
, list
) {
6693 memset(dmabuf
->virt
, 0, LPFC_RAS_MAX_ENTRY_SIZE
);
6695 mbx_fwlog
->u
.request
.buff_fwlog
[dmabuf
->buffer_tag
].addr_lo
=
6696 putPaddrLow(dmabuf
->phys
);
6698 mbx_fwlog
->u
.request
.buff_fwlog
[dmabuf
->buffer_tag
].addr_hi
=
6699 putPaddrHigh(dmabuf
->phys
);
6702 /* Update LPWD address */
6703 mbx_fwlog
->u
.request
.lwpd
.addr_lo
= putPaddrLow(ras_fwlog
->lwpd
.phys
);
6704 mbx_fwlog
->u
.request
.lwpd
.addr_hi
= putPaddrHigh(ras_fwlog
->lwpd
.phys
);
6706 spin_lock_irq(&phba
->hbalock
);
6707 ras_fwlog
->state
= REG_INPROGRESS
;
6708 spin_unlock_irq(&phba
->hbalock
);
6709 mbox
->vport
= phba
->pport
;
6710 mbox
->mbox_cmpl
= lpfc_sli4_ras_mbox_cmpl
;
6712 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
6714 if (rc
== MBX_NOT_FINISHED
) {
6715 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6716 "6191 FW-Log Mailbox failed. "
6717 "status %d mbxStatus : x%x", rc
,
6718 bf_get(lpfc_mqe_status
, &mbox
->u
.mqe
));
6719 mempool_free(mbox
, phba
->mbox_mem_pool
);
6726 lpfc_sli4_ras_dma_free(phba
);
6732 * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
6733 * @phba: Pointer to HBA context object.
6735 * Check if RAS is supported on the adapter and initialize it.
6738 lpfc_sli4_ras_setup(struct lpfc_hba
*phba
)
6740 /* Check RAS FW Log needs to be enabled or not */
6741 if (lpfc_check_fwlog_support(phba
))
6744 lpfc_sli4_ras_fwlog_init(phba
, phba
->cfg_ras_fwlog_level
,
6745 LPFC_RAS_ENABLE_LOGGING
);
6749 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
6750 * @phba: Pointer to HBA context object.
6752 * This function allocates all SLI4 resource identifiers.
6755 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba
*phba
)
6757 int i
, rc
, error
= 0;
6758 uint16_t count
, base
;
6759 unsigned long longs
;
6761 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
6762 phba
->sli4_hba
.next_rpi
= phba
->sli4_hba
.max_cfg_param
.max_rpi
;
6763 if (phba
->sli4_hba
.extents_in_use
) {
6765 * The port supports resource extents. The XRI, VPI, VFI, RPI
6766 * resource extent count must be read and allocated before
6767 * provisioning the resource id arrays.
6769 if (bf_get(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
) ==
6770 LPFC_IDX_RSRC_RDY
) {
6772 * Extent-based resources are set - the driver could
6773 * be in a port reset. Figure out if any corrective
6774 * actions need to be taken.
6776 rc
= lpfc_sli4_chk_avail_extnt_rsrc(phba
,
6777 LPFC_RSC_TYPE_FCOE_VFI
);
6780 rc
= lpfc_sli4_chk_avail_extnt_rsrc(phba
,
6781 LPFC_RSC_TYPE_FCOE_VPI
);
6784 rc
= lpfc_sli4_chk_avail_extnt_rsrc(phba
,
6785 LPFC_RSC_TYPE_FCOE_XRI
);
6788 rc
= lpfc_sli4_chk_avail_extnt_rsrc(phba
,
6789 LPFC_RSC_TYPE_FCOE_RPI
);
6794 * It's possible that the number of resources
6795 * provided to this port instance changed between
6796 * resets. Detect this condition and reallocate
6797 * resources. Otherwise, there is no action.
6800 lpfc_printf_log(phba
, KERN_INFO
,
6801 LOG_MBOX
| LOG_INIT
,
6802 "2931 Detected extent resource "
6803 "change. Reallocating all "
6805 rc
= lpfc_sli4_dealloc_extent(phba
,
6806 LPFC_RSC_TYPE_FCOE_VFI
);
6807 rc
= lpfc_sli4_dealloc_extent(phba
,
6808 LPFC_RSC_TYPE_FCOE_VPI
);
6809 rc
= lpfc_sli4_dealloc_extent(phba
,
6810 LPFC_RSC_TYPE_FCOE_XRI
);
6811 rc
= lpfc_sli4_dealloc_extent(phba
,
6812 LPFC_RSC_TYPE_FCOE_RPI
);
6817 rc
= lpfc_sli4_alloc_extent(phba
, LPFC_RSC_TYPE_FCOE_VFI
);
6821 rc
= lpfc_sli4_alloc_extent(phba
, LPFC_RSC_TYPE_FCOE_VPI
);
6825 rc
= lpfc_sli4_alloc_extent(phba
, LPFC_RSC_TYPE_FCOE_RPI
);
6829 rc
= lpfc_sli4_alloc_extent(phba
, LPFC_RSC_TYPE_FCOE_XRI
);
6832 bf_set(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
,
6837 * The port does not support resource extents. The XRI, VPI,
6838 * VFI, RPI resource ids were determined from READ_CONFIG.
6839 * Just allocate the bitmasks and provision the resource id
6840 * arrays. If a port reset is active, the resources don't
6841 * need any action - just exit.
6843 if (bf_get(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
) ==
6844 LPFC_IDX_RSRC_RDY
) {
6845 lpfc_sli4_dealloc_resource_identifiers(phba
);
6846 lpfc_sli4_remove_rpis(phba
);
6849 count
= phba
->sli4_hba
.max_cfg_param
.max_rpi
;
6851 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6852 "3279 Invalid provisioning of "
6857 base
= phba
->sli4_hba
.max_cfg_param
.rpi_base
;
6858 longs
= (count
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
6859 phba
->sli4_hba
.rpi_bmask
= kcalloc(longs
,
6860 sizeof(unsigned long),
6862 if (unlikely(!phba
->sli4_hba
.rpi_bmask
)) {
6866 phba
->sli4_hba
.rpi_ids
= kcalloc(count
, sizeof(uint16_t),
6868 if (unlikely(!phba
->sli4_hba
.rpi_ids
)) {
6870 goto free_rpi_bmask
;
6873 for (i
= 0; i
< count
; i
++)
6874 phba
->sli4_hba
.rpi_ids
[i
] = base
+ i
;
6877 count
= phba
->sli4_hba
.max_cfg_param
.max_vpi
;
6879 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6880 "3280 Invalid provisioning of "
6885 base
= phba
->sli4_hba
.max_cfg_param
.vpi_base
;
6886 longs
= (count
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
6887 phba
->vpi_bmask
= kcalloc(longs
, sizeof(unsigned long),
6889 if (unlikely(!phba
->vpi_bmask
)) {
6893 phba
->vpi_ids
= kcalloc(count
, sizeof(uint16_t),
6895 if (unlikely(!phba
->vpi_ids
)) {
6897 goto free_vpi_bmask
;
6900 for (i
= 0; i
< count
; i
++)
6901 phba
->vpi_ids
[i
] = base
+ i
;
6904 count
= phba
->sli4_hba
.max_cfg_param
.max_xri
;
6906 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6907 "3281 Invalid provisioning of "
6912 base
= phba
->sli4_hba
.max_cfg_param
.xri_base
;
6913 longs
= (count
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
6914 phba
->sli4_hba
.xri_bmask
= kcalloc(longs
,
6915 sizeof(unsigned long),
6917 if (unlikely(!phba
->sli4_hba
.xri_bmask
)) {
6921 phba
->sli4_hba
.max_cfg_param
.xri_used
= 0;
6922 phba
->sli4_hba
.xri_ids
= kcalloc(count
, sizeof(uint16_t),
6924 if (unlikely(!phba
->sli4_hba
.xri_ids
)) {
6926 goto free_xri_bmask
;
6929 for (i
= 0; i
< count
; i
++)
6930 phba
->sli4_hba
.xri_ids
[i
] = base
+ i
;
6933 count
= phba
->sli4_hba
.max_cfg_param
.max_vfi
;
6935 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
6936 "3282 Invalid provisioning of "
6941 base
= phba
->sli4_hba
.max_cfg_param
.vfi_base
;
6942 longs
= (count
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
6943 phba
->sli4_hba
.vfi_bmask
= kcalloc(longs
,
6944 sizeof(unsigned long),
6946 if (unlikely(!phba
->sli4_hba
.vfi_bmask
)) {
6950 phba
->sli4_hba
.vfi_ids
= kcalloc(count
, sizeof(uint16_t),
6952 if (unlikely(!phba
->sli4_hba
.vfi_ids
)) {
6954 goto free_vfi_bmask
;
6957 for (i
= 0; i
< count
; i
++)
6958 phba
->sli4_hba
.vfi_ids
[i
] = base
+ i
;
6961 * Mark all resources ready. An HBA reset doesn't need
6962 * to reset the initialization.
6964 bf_set(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
,
6970 kfree(phba
->sli4_hba
.vfi_bmask
);
6971 phba
->sli4_hba
.vfi_bmask
= NULL
;
6973 kfree(phba
->sli4_hba
.xri_ids
);
6974 phba
->sli4_hba
.xri_ids
= NULL
;
6976 kfree(phba
->sli4_hba
.xri_bmask
);
6977 phba
->sli4_hba
.xri_bmask
= NULL
;
6979 kfree(phba
->vpi_ids
);
6980 phba
->vpi_ids
= NULL
;
6982 kfree(phba
->vpi_bmask
);
6983 phba
->vpi_bmask
= NULL
;
6985 kfree(phba
->sli4_hba
.rpi_ids
);
6986 phba
->sli4_hba
.rpi_ids
= NULL
;
6988 kfree(phba
->sli4_hba
.rpi_bmask
);
6989 phba
->sli4_hba
.rpi_bmask
= NULL
;
6995 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
6996 * @phba: Pointer to HBA context object.
6998 * This function allocates the number of elements for the specified
7002 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba
*phba
)
7004 if (phba
->sli4_hba
.extents_in_use
) {
7005 lpfc_sli4_dealloc_extent(phba
, LPFC_RSC_TYPE_FCOE_VPI
);
7006 lpfc_sli4_dealloc_extent(phba
, LPFC_RSC_TYPE_FCOE_RPI
);
7007 lpfc_sli4_dealloc_extent(phba
, LPFC_RSC_TYPE_FCOE_XRI
);
7008 lpfc_sli4_dealloc_extent(phba
, LPFC_RSC_TYPE_FCOE_VFI
);
7010 kfree(phba
->vpi_bmask
);
7011 phba
->sli4_hba
.max_cfg_param
.vpi_used
= 0;
7012 kfree(phba
->vpi_ids
);
7013 bf_set(lpfc_vpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
7014 kfree(phba
->sli4_hba
.xri_bmask
);
7015 kfree(phba
->sli4_hba
.xri_ids
);
7016 kfree(phba
->sli4_hba
.vfi_bmask
);
7017 kfree(phba
->sli4_hba
.vfi_ids
);
7018 bf_set(lpfc_vfi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
7019 bf_set(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
7026 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7027 * @phba: Pointer to HBA context object.
7028 * @type: The resource extent type.
7029 * @extnt_cnt: buffer to hold port extent count response
7030 * @extnt_size: buffer to hold port extent size response.
7032 * This function calls the port to read the host allocated extents
7033 * for a particular type.
7036 lpfc_sli4_get_allocated_extnts(struct lpfc_hba
*phba
, uint16_t type
,
7037 uint16_t *extnt_cnt
, uint16_t *extnt_size
)
7041 uint16_t curr_blks
= 0;
7042 uint32_t req_len
, emb_len
;
7043 uint32_t alloc_len
, mbox_tmo
;
7044 struct list_head
*blk_list_head
;
7045 struct lpfc_rsrc_blks
*rsrc_blk
;
7047 void *virtaddr
= NULL
;
7048 struct lpfc_mbx_nembed_rsrc_extent
*n_rsrc
;
7049 struct lpfc_mbx_alloc_rsrc_extents
*rsrc_ext
;
7050 union lpfc_sli4_cfg_shdr
*shdr
;
7053 case LPFC_RSC_TYPE_FCOE_VPI
:
7054 blk_list_head
= &phba
->lpfc_vpi_blk_list
;
7056 case LPFC_RSC_TYPE_FCOE_XRI
:
7057 blk_list_head
= &phba
->sli4_hba
.lpfc_xri_blk_list
;
7059 case LPFC_RSC_TYPE_FCOE_VFI
:
7060 blk_list_head
= &phba
->sli4_hba
.lpfc_vfi_blk_list
;
7062 case LPFC_RSC_TYPE_FCOE_RPI
:
7063 blk_list_head
= &phba
->sli4_hba
.lpfc_rpi_blk_list
;
7069 /* Count the number of extents currently allocatd for this type. */
7070 list_for_each_entry(rsrc_blk
, blk_list_head
, list
) {
7071 if (curr_blks
== 0) {
7073 * The GET_ALLOCATED mailbox does not return the size,
7074 * just the count. The size should be just the size
7075 * stored in the current allocated block and all sizes
7076 * for an extent type are the same so set the return
7079 *extnt_size
= rsrc_blk
->rsrc_size
;
7085 * Calculate the size of an embedded mailbox. The uint32_t
7086 * accounts for extents-specific word.
7088 emb_len
= sizeof(MAILBOX_t
) - sizeof(struct mbox_header
) -
7092 * Presume the allocation and response will fit into an embedded
7093 * mailbox. If not true, reconfigure to a non-embedded mailbox.
7095 emb
= LPFC_SLI4_MBX_EMBED
;
7097 if (req_len
> emb_len
) {
7098 req_len
= curr_blks
* sizeof(uint16_t) +
7099 sizeof(union lpfc_sli4_cfg_shdr
) +
7101 emb
= LPFC_SLI4_MBX_NEMBED
;
7104 mbox
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
7107 memset(mbox
, 0, sizeof(LPFC_MBOXQ_t
));
7109 alloc_len
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
7110 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT
,
7112 if (alloc_len
< req_len
) {
7113 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7114 "2983 Allocated DMA memory size (x%x) is "
7115 "less than the requested DMA memory "
7116 "size (x%x)\n", alloc_len
, req_len
);
7120 rc
= lpfc_sli4_mbox_rsrc_extent(phba
, mbox
, curr_blks
, type
, emb
);
7126 if (!phba
->sli4_hba
.intr_enable
)
7127 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
7129 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
7130 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
7139 * Figure out where the response is located. Then get local pointers
7140 * to the response data. The port does not guarantee to respond to
7141 * all extents counts request so update the local variable with the
7142 * allocated count from the port.
7144 if (emb
== LPFC_SLI4_MBX_EMBED
) {
7145 rsrc_ext
= &mbox
->u
.mqe
.un
.alloc_rsrc_extents
;
7146 shdr
= &rsrc_ext
->header
.cfg_shdr
;
7147 *extnt_cnt
= bf_get(lpfc_mbx_rsrc_cnt
, &rsrc_ext
->u
.rsp
);
7149 virtaddr
= mbox
->sge_array
->addr
[0];
7150 n_rsrc
= (struct lpfc_mbx_nembed_rsrc_extent
*) virtaddr
;
7151 shdr
= &n_rsrc
->cfg_shdr
;
7152 *extnt_cnt
= bf_get(lpfc_mbx_rsrc_cnt
, n_rsrc
);
7155 if (bf_get(lpfc_mbox_hdr_status
, &shdr
->response
)) {
7156 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7157 "2984 Failed to read allocated resources "
7158 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
7160 bf_get(lpfc_mbox_hdr_status
, &shdr
->response
),
7161 bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
));
7166 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
7171 * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7172 * @phba: pointer to lpfc hba data structure.
7173 * @sgl_list: linked link of sgl buffers to post
7174 * @cnt: number of linked list buffers
7176 * This routine walks the list of buffers that have been allocated and
7177 * repost them to the port by using SGL block post. This is needed after a
7178 * pci_function_reset/warm_start or start. It attempts to construct blocks
7179 * of buffer sgls which contains contiguous xris and uses the non-embedded
7180 * SGL block post mailbox commands to post them to the port. For single
7181 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7182 * mailbox command for posting.
7184 * Returns: 0 = success, non-zero failure.
7187 lpfc_sli4_repost_sgl_list(struct lpfc_hba
*phba
,
7188 struct list_head
*sgl_list
, int cnt
)
7190 struct lpfc_sglq
*sglq_entry
= NULL
;
7191 struct lpfc_sglq
*sglq_entry_next
= NULL
;
7192 struct lpfc_sglq
*sglq_entry_first
= NULL
;
7193 int status
, total_cnt
;
7194 int post_cnt
= 0, num_posted
= 0, block_cnt
= 0;
7195 int last_xritag
= NO_XRI
;
7196 LIST_HEAD(prep_sgl_list
);
7197 LIST_HEAD(blck_sgl_list
);
7198 LIST_HEAD(allc_sgl_list
);
7199 LIST_HEAD(post_sgl_list
);
7200 LIST_HEAD(free_sgl_list
);
7202 spin_lock_irq(&phba
->hbalock
);
7203 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
7204 list_splice_init(sgl_list
, &allc_sgl_list
);
7205 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
7206 spin_unlock_irq(&phba
->hbalock
);
7209 list_for_each_entry_safe(sglq_entry
, sglq_entry_next
,
7210 &allc_sgl_list
, list
) {
7211 list_del_init(&sglq_entry
->list
);
7213 if ((last_xritag
!= NO_XRI
) &&
7214 (sglq_entry
->sli4_xritag
!= last_xritag
+ 1)) {
7215 /* a hole in xri block, form a sgl posting block */
7216 list_splice_init(&prep_sgl_list
, &blck_sgl_list
);
7217 post_cnt
= block_cnt
- 1;
7218 /* prepare list for next posting block */
7219 list_add_tail(&sglq_entry
->list
, &prep_sgl_list
);
7222 /* prepare list for next posting block */
7223 list_add_tail(&sglq_entry
->list
, &prep_sgl_list
);
7224 /* enough sgls for non-embed sgl mbox command */
7225 if (block_cnt
== LPFC_NEMBED_MBOX_SGL_CNT
) {
7226 list_splice_init(&prep_sgl_list
,
7228 post_cnt
= block_cnt
;
7234 /* keep track of last sgl's xritag */
7235 last_xritag
= sglq_entry
->sli4_xritag
;
7237 /* end of repost sgl list condition for buffers */
7238 if (num_posted
== total_cnt
) {
7239 if (post_cnt
== 0) {
7240 list_splice_init(&prep_sgl_list
,
7242 post_cnt
= block_cnt
;
7243 } else if (block_cnt
== 1) {
7244 status
= lpfc_sli4_post_sgl(phba
,
7245 sglq_entry
->phys
, 0,
7246 sglq_entry
->sli4_xritag
);
7248 /* successful, put sgl to posted list */
7249 list_add_tail(&sglq_entry
->list
,
7252 /* Failure, put sgl to free list */
7253 lpfc_printf_log(phba
, KERN_WARNING
,
7255 "3159 Failed to post "
7256 "sgl, xritag:x%x\n",
7257 sglq_entry
->sli4_xritag
);
7258 list_add_tail(&sglq_entry
->list
,
7265 /* continue until a nembed page worth of sgls */
7269 /* post the buffer list sgls as a block */
7270 status
= lpfc_sli4_post_sgl_list(phba
, &blck_sgl_list
,
7274 /* success, put sgl list to posted sgl list */
7275 list_splice_init(&blck_sgl_list
, &post_sgl_list
);
7277 /* Failure, put sgl list to free sgl list */
7278 sglq_entry_first
= list_first_entry(&blck_sgl_list
,
7281 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
7282 "3160 Failed to post sgl-list, "
7284 sglq_entry_first
->sli4_xritag
,
7285 (sglq_entry_first
->sli4_xritag
+
7287 list_splice_init(&blck_sgl_list
, &free_sgl_list
);
7288 total_cnt
-= post_cnt
;
7291 /* don't reset xirtag due to hole in xri block */
7293 last_xritag
= NO_XRI
;
7295 /* reset sgl post count for next round of posting */
7299 /* free the sgls failed to post */
7300 lpfc_free_sgl_list(phba
, &free_sgl_list
);
7302 /* push sgls posted to the available list */
7303 if (!list_empty(&post_sgl_list
)) {
7304 spin_lock_irq(&phba
->hbalock
);
7305 spin_lock(&phba
->sli4_hba
.sgl_list_lock
);
7306 list_splice_init(&post_sgl_list
, sgl_list
);
7307 spin_unlock(&phba
->sli4_hba
.sgl_list_lock
);
7308 spin_unlock_irq(&phba
->hbalock
);
7310 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7311 "3161 Failure to post sgl to port.\n");
7315 /* return the number of XRIs actually posted */
7320 * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7321 * @phba: pointer to lpfc hba data structure.
7323 * This routine walks the list of nvme buffers that have been allocated and
7324 * repost them to the port by using SGL block post. This is needed after a
7325 * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7326 * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7327 * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7329 * Returns: 0 = success, non-zero failure.
7332 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba
*phba
)
7334 LIST_HEAD(post_nblist
);
7335 int num_posted
, rc
= 0;
7337 /* get all NVME buffers need to repost to a local list */
7338 lpfc_io_buf_flush(phba
, &post_nblist
);
7340 /* post the list of nvme buffer sgls to port if available */
7341 if (!list_empty(&post_nblist
)) {
7342 num_posted
= lpfc_sli4_post_io_sgl_list(
7343 phba
, &post_nblist
, phba
->sli4_hba
.io_xri_cnt
);
7344 /* failed to post any nvme buffer, return error */
7345 if (num_posted
== 0)
7352 lpfc_set_host_data(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mbox
)
7356 len
= sizeof(struct lpfc_mbx_set_host_data
) -
7357 sizeof(struct lpfc_sli4_cfg_mhdr
);
7358 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
7359 LPFC_MBOX_OPCODE_SET_HOST_DATA
, len
,
7360 LPFC_SLI4_MBX_EMBED
);
7362 mbox
->u
.mqe
.un
.set_host_data
.param_id
= LPFC_SET_HOST_OS_DRIVER_VERSION
;
7363 mbox
->u
.mqe
.un
.set_host_data
.param_len
=
7364 LPFC_HOST_OS_DRIVER_VERSION_SIZE
;
7365 snprintf(mbox
->u
.mqe
.un
.set_host_data
.data
,
7366 LPFC_HOST_OS_DRIVER_VERSION_SIZE
,
7367 "Linux %s v"LPFC_DRIVER_VERSION
,
7368 (phba
->hba_flag
& HBA_FCOE_MODE
) ? "FCoE" : "FC");
7372 lpfc_post_rq_buffer(struct lpfc_hba
*phba
, struct lpfc_queue
*hrq
,
7373 struct lpfc_queue
*drq
, int count
, int idx
)
7376 struct lpfc_rqe hrqe
;
7377 struct lpfc_rqe drqe
;
7378 struct lpfc_rqb
*rqbp
;
7379 unsigned long flags
;
7380 struct rqb_dmabuf
*rqb_buffer
;
7381 LIST_HEAD(rqb_buf_list
);
7384 for (i
= 0; i
< count
; i
++) {
7385 spin_lock_irqsave(&phba
->hbalock
, flags
);
7386 /* IF RQ is already full, don't bother */
7387 if (rqbp
->buffer_count
+ i
>= rqbp
->entry_count
- 1) {
7388 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
7391 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
7393 rqb_buffer
= rqbp
->rqb_alloc_buffer(phba
);
7396 rqb_buffer
->hrq
= hrq
;
7397 rqb_buffer
->drq
= drq
;
7398 rqb_buffer
->idx
= idx
;
7399 list_add_tail(&rqb_buffer
->hbuf
.list
, &rqb_buf_list
);
7402 spin_lock_irqsave(&phba
->hbalock
, flags
);
7403 while (!list_empty(&rqb_buf_list
)) {
7404 list_remove_head(&rqb_buf_list
, rqb_buffer
, struct rqb_dmabuf
,
7407 hrqe
.address_lo
= putPaddrLow(rqb_buffer
->hbuf
.phys
);
7408 hrqe
.address_hi
= putPaddrHigh(rqb_buffer
->hbuf
.phys
);
7409 drqe
.address_lo
= putPaddrLow(rqb_buffer
->dbuf
.phys
);
7410 drqe
.address_hi
= putPaddrHigh(rqb_buffer
->dbuf
.phys
);
7411 rc
= lpfc_sli4_rq_put(hrq
, drq
, &hrqe
, &drqe
);
7413 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7414 "6421 Cannot post to HRQ %d: %x %x %x "
7422 rqbp
->rqb_free_buffer(phba
, rqb_buffer
);
7424 list_add_tail(&rqb_buffer
->hbuf
.list
,
7425 &rqbp
->rqb_buffer_list
);
7426 rqbp
->buffer_count
++;
7429 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
7434 * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7435 * @phba: pointer to lpfc hba data structure.
7437 * This routine initializes the per-cq idle_stat to dynamically dictate
7438 * polling decisions.
7443 static void lpfc_init_idle_stat_hb(struct lpfc_hba
*phba
)
7446 struct lpfc_sli4_hdw_queue
*hdwq
;
7447 struct lpfc_queue
*cq
;
7448 struct lpfc_idle_stat
*idle_stat
;
7451 for_each_present_cpu(i
) {
7452 hdwq
= &phba
->sli4_hba
.hdwq
[phba
->sli4_hba
.cpu_map
[i
].hdwq
];
7455 /* Skip if we've already handled this cq's primary CPU */
7459 idle_stat
= &phba
->sli4_hba
.idle_stat
[i
];
7461 idle_stat
->prev_idle
= get_cpu_idle_time(i
, &wall
, 1);
7462 idle_stat
->prev_wall
= wall
;
7464 if (phba
->nvmet_support
)
7465 cq
->poll_mode
= LPFC_QUEUE_WORK
;
7467 cq
->poll_mode
= LPFC_IRQ_POLL
;
7470 if (!phba
->nvmet_support
)
7471 schedule_delayed_work(&phba
->idle_stat_delay_work
,
7472 msecs_to_jiffies(LPFC_IDLE_STAT_DELAY
));
7475 static void lpfc_sli4_dip(struct lpfc_hba
*phba
)
7479 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
7480 if (if_type
== LPFC_SLI_INTF_IF_TYPE_2
||
7481 if_type
== LPFC_SLI_INTF_IF_TYPE_6
) {
7482 struct lpfc_register reg_data
;
7484 if (lpfc_readl(phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
,
7488 if (bf_get(lpfc_sliport_status_dip
, ®_data
))
7489 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7490 "2904 Firmware Dump Image Present"
7496 * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
7497 * @phba: Pointer to HBA context object.
7499 * This function is the main SLI4 device initialization PCI function. This
7500 * function is called by the HBA initialization code, HBA reset code and
7501 * HBA error attention handler code. Caller is not required to hold any
7505 lpfc_sli4_hba_setup(struct lpfc_hba
*phba
)
7507 int rc
, i
, cnt
, len
, dd
;
7508 LPFC_MBOXQ_t
*mboxq
;
7509 struct lpfc_mqe
*mqe
;
7512 uint32_t ftr_rsp
= 0;
7513 struct Scsi_Host
*shost
= lpfc_shost_from_vport(phba
->pport
);
7514 struct lpfc_vport
*vport
= phba
->pport
;
7515 struct lpfc_dmabuf
*mp
;
7516 struct lpfc_rqb
*rqbp
;
7518 /* Perform a PCI function reset to start from clean */
7519 rc
= lpfc_pci_function_reset(phba
);
7523 /* Check the HBA Host Status Register for readyness */
7524 rc
= lpfc_sli4_post_status_check(phba
);
7528 spin_lock_irq(&phba
->hbalock
);
7529 phba
->sli
.sli_flag
|= LPFC_SLI_ACTIVE
;
7530 spin_unlock_irq(&phba
->hbalock
);
7533 lpfc_sli4_dip(phba
);
7536 * Allocate a single mailbox container for initializing the
7539 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
7543 /* Issue READ_REV to collect vpd and FW information. */
7544 vpd_size
= SLI4_PAGE_SIZE
;
7545 vpd
= kzalloc(vpd_size
, GFP_KERNEL
);
7551 rc
= lpfc_sli4_read_rev(phba
, mboxq
, vpd
, &vpd_size
);
7557 mqe
= &mboxq
->u
.mqe
;
7558 phba
->sli_rev
= bf_get(lpfc_mbx_rd_rev_sli_lvl
, &mqe
->un
.read_rev
);
7559 if (bf_get(lpfc_mbx_rd_rev_fcoe
, &mqe
->un
.read_rev
)) {
7560 phba
->hba_flag
|= HBA_FCOE_MODE
;
7561 phba
->fcp_embed_io
= 0; /* SLI4 FC support only */
7563 phba
->hba_flag
&= ~HBA_FCOE_MODE
;
7566 if (bf_get(lpfc_mbx_rd_rev_cee_ver
, &mqe
->un
.read_rev
) ==
7568 phba
->hba_flag
|= HBA_FIP_SUPPORT
;
7570 phba
->hba_flag
&= ~HBA_FIP_SUPPORT
;
7572 phba
->hba_flag
&= ~HBA_IOQ_FLUSH
;
7574 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
7575 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7576 "0376 READ_REV Error. SLI Level %d "
7577 "FCoE enabled %d\n",
7578 phba
->sli_rev
, phba
->hba_flag
& HBA_FCOE_MODE
);
7585 * Continue initialization with default values even if driver failed
7586 * to read FCoE param config regions, only read parameters if the
7589 if (phba
->hba_flag
& HBA_FCOE_MODE
&&
7590 lpfc_sli4_read_fcoe_params(phba
))
7591 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_INIT
,
7592 "2570 Failed to read FCoE parameters\n");
7595 * Retrieve sli4 device physical port name, failure of doing it
7596 * is considered as non-fatal.
7598 rc
= lpfc_sli4_retrieve_pport_name(phba
);
7600 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
7601 "3080 Successful retrieving SLI4 device "
7602 "physical port name: %s.\n", phba
->Port
);
7604 rc
= lpfc_sli4_get_ctl_attr(phba
);
7606 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
7607 "8351 Successful retrieving SLI4 device "
7611 * Evaluate the read rev and vpd data. Populate the driver
7612 * state with the results. If this routine fails, the failure
7613 * is not fatal as the driver will use generic values.
7615 rc
= lpfc_parse_vpd(phba
, vpd
, vpd_size
);
7616 if (unlikely(!rc
)) {
7617 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7618 "0377 Error %d parsing vpd. "
7619 "Using defaults.\n", rc
);
7624 /* Save information as VPD data */
7625 phba
->vpd
.rev
.biuRev
= mqe
->un
.read_rev
.first_hw_rev
;
7626 phba
->vpd
.rev
.smRev
= mqe
->un
.read_rev
.second_hw_rev
;
7629 * This is because first G7 ASIC doesn't support the standard
7630 * 0x5a NVME cmd descriptor type/subtype
7632 if ((bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
7633 LPFC_SLI_INTF_IF_TYPE_6
) &&
7634 (phba
->vpd
.rev
.biuRev
== LPFC_G7_ASIC_1
) &&
7635 (phba
->vpd
.rev
.smRev
== 0) &&
7636 (phba
->cfg_nvme_embed_cmd
== 1))
7637 phba
->cfg_nvme_embed_cmd
= 0;
7639 phba
->vpd
.rev
.endecRev
= mqe
->un
.read_rev
.third_hw_rev
;
7640 phba
->vpd
.rev
.fcphHigh
= bf_get(lpfc_mbx_rd_rev_fcph_high
,
7642 phba
->vpd
.rev
.fcphLow
= bf_get(lpfc_mbx_rd_rev_fcph_low
,
7644 phba
->vpd
.rev
.feaLevelHigh
= bf_get(lpfc_mbx_rd_rev_ftr_lvl_high
,
7646 phba
->vpd
.rev
.feaLevelLow
= bf_get(lpfc_mbx_rd_rev_ftr_lvl_low
,
7648 phba
->vpd
.rev
.sli1FwRev
= mqe
->un
.read_rev
.fw_id_rev
;
7649 memcpy(phba
->vpd
.rev
.sli1FwName
, mqe
->un
.read_rev
.fw_name
, 16);
7650 phba
->vpd
.rev
.sli2FwRev
= mqe
->un
.read_rev
.ulp_fw_id_rev
;
7651 memcpy(phba
->vpd
.rev
.sli2FwName
, mqe
->un
.read_rev
.ulp_fw_name
, 16);
7652 phba
->vpd
.rev
.opFwRev
= mqe
->un
.read_rev
.fw_id_rev
;
7653 memcpy(phba
->vpd
.rev
.opFwName
, mqe
->un
.read_rev
.fw_name
, 16);
7654 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
7655 "(%d):0380 READ_REV Status x%x "
7656 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
7657 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
7658 bf_get(lpfc_mqe_status
, mqe
),
7659 phba
->vpd
.rev
.opFwName
,
7660 phba
->vpd
.rev
.fcphHigh
, phba
->vpd
.rev
.fcphLow
,
7661 phba
->vpd
.rev
.feaLevelHigh
, phba
->vpd
.rev
.feaLevelLow
);
7663 if (bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
) ==
7664 LPFC_SLI_INTF_IF_TYPE_0
) {
7665 lpfc_set_features(phba
, mboxq
, LPFC_SET_UE_RECOVERY
);
7666 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7667 if (rc
== MBX_SUCCESS
) {
7668 phba
->hba_flag
|= HBA_RECOVERABLE_UE
;
7669 /* Set 1Sec interval to detect UE */
7670 phba
->eratt_poll_interval
= 1;
7671 phba
->sli4_hba
.ue_to_sr
= bf_get(
7672 lpfc_mbx_set_feature_UESR
,
7673 &mboxq
->u
.mqe
.un
.set_feature
);
7674 phba
->sli4_hba
.ue_to_rp
= bf_get(
7675 lpfc_mbx_set_feature_UERP
,
7676 &mboxq
->u
.mqe
.un
.set_feature
);
7680 if (phba
->cfg_enable_mds_diags
&& phba
->mds_diags_support
) {
7681 /* Enable MDS Diagnostics only if the SLI Port supports it */
7682 lpfc_set_features(phba
, mboxq
, LPFC_SET_MDS_DIAGS
);
7683 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7684 if (rc
!= MBX_SUCCESS
)
7685 phba
->mds_diags_support
= 0;
7689 * Discover the port's supported feature set and match it against the
7692 lpfc_request_features(phba
, mboxq
);
7693 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7700 * The port must support FCP initiator mode as this is the
7701 * only mode running in the host.
7703 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi
, &mqe
->un
.req_ftrs
))) {
7704 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
7705 "0378 No support for fcpi mode.\n");
7709 /* Performance Hints are ONLY for FCoE */
7710 if (phba
->hba_flag
& HBA_FCOE_MODE
) {
7711 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh
, &mqe
->un
.req_ftrs
))
7712 phba
->sli3_options
|= LPFC_SLI4_PERFH_ENABLED
;
7714 phba
->sli3_options
&= ~LPFC_SLI4_PERFH_ENABLED
;
7718 * If the port cannot support the host's requested features
7719 * then turn off the global config parameters to disable the
7720 * feature in the driver. This is not a fatal error.
7722 if (phba
->sli3_options
& LPFC_SLI3_BG_ENABLED
) {
7723 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif
, &mqe
->un
.req_ftrs
))) {
7724 phba
->cfg_enable_bg
= 0;
7725 phba
->sli3_options
&= ~LPFC_SLI3_BG_ENABLED
;
7730 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
7731 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv
, &mqe
->un
.req_ftrs
)))
7735 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
7736 "0379 Feature Mismatch Data: x%08x %08x "
7737 "x%x x%x x%x\n", mqe
->un
.req_ftrs
.word2
,
7738 mqe
->un
.req_ftrs
.word3
, phba
->cfg_enable_bg
,
7739 phba
->cfg_enable_npiv
, phba
->max_vpi
);
7740 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif
, &mqe
->un
.req_ftrs
)))
7741 phba
->cfg_enable_bg
= 0;
7742 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv
, &mqe
->un
.req_ftrs
)))
7743 phba
->cfg_enable_npiv
= 0;
7746 /* These SLI3 features are assumed in SLI4 */
7747 spin_lock_irq(&phba
->hbalock
);
7748 phba
->sli3_options
|= (LPFC_SLI3_NPIV_ENABLED
| LPFC_SLI3_HBQ_ENABLED
);
7749 spin_unlock_irq(&phba
->hbalock
);
7751 /* Always try to enable dual dump feature if we can */
7752 lpfc_set_features(phba
, mboxq
, LPFC_SET_DUAL_DUMP
);
7753 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7754 dd
= bf_get(lpfc_mbx_set_feature_dd
, &mboxq
->u
.mqe
.un
.set_feature
);
7755 if ((rc
== MBX_SUCCESS
) && (dd
== LPFC_ENABLE_DUAL_DUMP
))
7756 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
7757 "6448 Dual Dump is enabled\n");
7759 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
| LOG_INIT
,
7760 "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
7762 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
7763 lpfc_sli_config_mbox_subsys_get(
7765 lpfc_sli_config_mbox_opcode_get(
7769 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
7770 * calls depends on these resources to complete port setup.
7772 rc
= lpfc_sli4_alloc_resource_identifiers(phba
);
7774 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7775 "2920 Failed to alloc Resource IDs "
7780 lpfc_set_host_data(phba
, mboxq
);
7782 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7784 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
7785 "2134 Failed to set host os driver version %x",
7789 /* Read the port's service parameters. */
7790 rc
= lpfc_read_sparam(phba
, mboxq
, vport
->vpi
);
7792 phba
->link_state
= LPFC_HBA_ERROR
;
7797 mboxq
->vport
= vport
;
7798 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7799 mp
= (struct lpfc_dmabuf
*)mboxq
->ctx_buf
;
7800 if (rc
== MBX_SUCCESS
) {
7801 memcpy(&vport
->fc_sparam
, mp
->virt
, sizeof(struct serv_parm
));
7806 * This memory was allocated by the lpfc_read_sparam routine. Release
7807 * it to the mbuf pool.
7809 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
7811 mboxq
->ctx_buf
= NULL
;
7813 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7814 "0382 READ_SPARAM command failed "
7815 "status %d, mbxStatus x%x\n",
7816 rc
, bf_get(lpfc_mqe_status
, mqe
));
7817 phba
->link_state
= LPFC_HBA_ERROR
;
7822 lpfc_update_vport_wwn(vport
);
7824 /* Update the fc_host data structures with new wwn. */
7825 fc_host_node_name(shost
) = wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
7826 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
7828 /* Create all the SLI4 queues */
7829 rc
= lpfc_sli4_queue_create(phba
);
7831 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7832 "3089 Failed to allocate queues\n");
7836 /* Set up all the queues to the device */
7837 rc
= lpfc_sli4_queue_setup(phba
);
7839 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7840 "0381 Error %d during queue setup.\n ", rc
);
7841 goto out_stop_timers
;
7843 /* Initialize the driver internal SLI layer lists. */
7844 lpfc_sli4_setup(phba
);
7845 lpfc_sli4_queue_init(phba
);
7847 /* update host els xri-sgl sizes and mappings */
7848 rc
= lpfc_sli4_els_sgl_update(phba
);
7850 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7851 "1400 Failed to update xri-sgl size and "
7852 "mapping: %d\n", rc
);
7853 goto out_destroy_queue
;
7856 /* register the els sgl pool to the port */
7857 rc
= lpfc_sli4_repost_sgl_list(phba
, &phba
->sli4_hba
.lpfc_els_sgl_list
,
7858 phba
->sli4_hba
.els_xri_cnt
);
7859 if (unlikely(rc
< 0)) {
7860 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7861 "0582 Error %d during els sgl post "
7864 goto out_destroy_queue
;
7866 phba
->sli4_hba
.els_xri_cnt
= rc
;
7868 if (phba
->nvmet_support
) {
7869 /* update host nvmet xri-sgl sizes and mappings */
7870 rc
= lpfc_sli4_nvmet_sgl_update(phba
);
7872 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7873 "6308 Failed to update nvmet-sgl size "
7874 "and mapping: %d\n", rc
);
7875 goto out_destroy_queue
;
7878 /* register the nvmet sgl pool to the port */
7879 rc
= lpfc_sli4_repost_sgl_list(
7881 &phba
->sli4_hba
.lpfc_nvmet_sgl_list
,
7882 phba
->sli4_hba
.nvmet_xri_cnt
);
7883 if (unlikely(rc
< 0)) {
7884 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7885 "3117 Error %d during nvmet "
7888 goto out_destroy_queue
;
7890 phba
->sli4_hba
.nvmet_xri_cnt
= rc
;
7892 /* We allocate an iocbq for every receive context SGL.
7893 * The additional allocation is for abort and ls handling.
7895 cnt
= phba
->sli4_hba
.nvmet_xri_cnt
+
7896 phba
->sli4_hba
.max_cfg_param
.max_xri
;
7898 /* update host common xri-sgl sizes and mappings */
7899 rc
= lpfc_sli4_io_sgl_update(phba
);
7901 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7902 "6082 Failed to update nvme-sgl size "
7903 "and mapping: %d\n", rc
);
7904 goto out_destroy_queue
;
7907 /* register the allocated common sgl pool to the port */
7908 rc
= lpfc_sli4_repost_io_sgl_list(phba
);
7910 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7911 "6116 Error %d during nvme sgl post "
7913 /* Some NVME buffers were moved to abort nvme list */
7914 /* A pci function reset will repost them */
7916 goto out_destroy_queue
;
7918 /* Each lpfc_io_buf job structure has an iocbq element.
7919 * This cnt provides for abort, els, ct and ls requests.
7921 cnt
= phba
->sli4_hba
.max_cfg_param
.max_xri
;
7924 if (!phba
->sli
.iocbq_lookup
) {
7925 /* Initialize and populate the iocb list per host */
7926 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
7927 "2821 initialize iocb list with %d entries\n",
7929 rc
= lpfc_init_iocb_list(phba
, cnt
);
7931 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7932 "1413 Failed to init iocb list.\n");
7933 goto out_destroy_queue
;
7937 if (phba
->nvmet_support
)
7938 lpfc_nvmet_create_targetport(phba
);
7940 if (phba
->nvmet_support
&& phba
->cfg_nvmet_mrq
) {
7941 /* Post initial buffers to all RQs created */
7942 for (i
= 0; i
< phba
->cfg_nvmet_mrq
; i
++) {
7943 rqbp
= phba
->sli4_hba
.nvmet_mrq_hdr
[i
]->rqbp
;
7944 INIT_LIST_HEAD(&rqbp
->rqb_buffer_list
);
7945 rqbp
->rqb_alloc_buffer
= lpfc_sli4_nvmet_alloc
;
7946 rqbp
->rqb_free_buffer
= lpfc_sli4_nvmet_free
;
7947 rqbp
->entry_count
= LPFC_NVMET_RQE_DEF_COUNT
;
7948 rqbp
->buffer_count
= 0;
7950 lpfc_post_rq_buffer(
7951 phba
, phba
->sli4_hba
.nvmet_mrq_hdr
[i
],
7952 phba
->sli4_hba
.nvmet_mrq_data
[i
],
7953 phba
->cfg_nvmet_mrq_post
, i
);
7957 /* Post the rpi header region to the device. */
7958 rc
= lpfc_sli4_post_all_rpi_hdrs(phba
);
7960 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
7961 "0393 Error %d during rpi post operation\n",
7964 goto out_destroy_queue
;
7966 lpfc_sli4_node_prep(phba
);
7968 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
7969 if ((phba
->nvmet_support
== 0) || (phba
->cfg_nvmet_mrq
== 1)) {
7971 * The FC Port needs to register FCFI (index 0)
7973 lpfc_reg_fcfi(phba
, mboxq
);
7974 mboxq
->vport
= phba
->pport
;
7975 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7976 if (rc
!= MBX_SUCCESS
)
7977 goto out_unset_queue
;
7979 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
,
7980 &mboxq
->u
.mqe
.un
.reg_fcfi
);
7982 /* We are a NVME Target mode with MRQ > 1 */
7984 /* First register the FCFI */
7985 lpfc_reg_fcfi_mrq(phba
, mboxq
, 0);
7986 mboxq
->vport
= phba
->pport
;
7987 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7988 if (rc
!= MBX_SUCCESS
)
7989 goto out_unset_queue
;
7991 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_mrq_fcfi
,
7992 &mboxq
->u
.mqe
.un
.reg_fcfi_mrq
);
7994 /* Next register the MRQs */
7995 lpfc_reg_fcfi_mrq(phba
, mboxq
, 1);
7996 mboxq
->vport
= phba
->pport
;
7997 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
7998 if (rc
!= MBX_SUCCESS
)
7999 goto out_unset_queue
;
8002 /* Check if the port is configured to be disabled */
8003 lpfc_sli_read_link_ste(phba
);
8006 /* Don't post more new bufs if repost already recovered
8009 if (phba
->nvmet_support
== 0) {
8010 if (phba
->sli4_hba
.io_xri_cnt
== 0) {
8011 len
= lpfc_new_io_buf(
8012 phba
, phba
->sli4_hba
.io_xri_max
);
8015 goto out_unset_queue
;
8018 if (phba
->cfg_xri_rebalancing
)
8019 lpfc_create_multixri_pools(phba
);
8022 phba
->cfg_xri_rebalancing
= 0;
8025 /* Allow asynchronous mailbox command to go through */
8026 spin_lock_irq(&phba
->hbalock
);
8027 phba
->sli
.sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
8028 spin_unlock_irq(&phba
->hbalock
);
8030 /* Post receive buffers to the device */
8031 lpfc_sli4_rb_setup(phba
);
8033 /* Reset HBA FCF states after HBA reset */
8034 phba
->fcf
.fcf_flag
= 0;
8035 phba
->fcf
.current_rec
.flag
= 0;
8037 /* Start the ELS watchdog timer */
8038 mod_timer(&vport
->els_tmofunc
,
8039 jiffies
+ msecs_to_jiffies(1000 * (phba
->fc_ratov
* 2)));
8041 /* Start heart beat timer */
8042 mod_timer(&phba
->hb_tmofunc
,
8043 jiffies
+ msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
));
8044 phba
->hb_outstanding
= 0;
8045 phba
->last_completion_time
= jiffies
;
8047 /* start eq_delay heartbeat */
8048 if (phba
->cfg_auto_imax
)
8049 queue_delayed_work(phba
->wq
, &phba
->eq_delay_work
,
8050 msecs_to_jiffies(LPFC_EQ_DELAY_MSECS
));
8052 /* start per phba idle_stat_delay heartbeat */
8053 lpfc_init_idle_stat_hb(phba
);
8055 /* Start error attention (ERATT) polling timer */
8056 mod_timer(&phba
->eratt_poll
,
8057 jiffies
+ msecs_to_jiffies(1000 * phba
->eratt_poll_interval
));
8059 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
8060 if (phba
->cfg_aer_support
== 1 && !(phba
->hba_flag
& HBA_AER_ENABLED
)) {
8061 rc
= pci_enable_pcie_error_reporting(phba
->pcidev
);
8063 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8064 "2829 This device supports "
8065 "Advanced Error Reporting (AER)\n");
8066 spin_lock_irq(&phba
->hbalock
);
8067 phba
->hba_flag
|= HBA_AER_ENABLED
;
8068 spin_unlock_irq(&phba
->hbalock
);
8070 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
8071 "2830 This device does not support "
8072 "Advanced Error Reporting (AER)\n");
8073 phba
->cfg_aer_support
= 0;
8079 * The port is ready, set the host's link state to LINK_DOWN
8080 * in preparation for link interrupts.
8082 spin_lock_irq(&phba
->hbalock
);
8083 phba
->link_state
= LPFC_LINK_DOWN
;
8085 /* Check if physical ports are trunked */
8086 if (bf_get(lpfc_conf_trunk_port0
, &phba
->sli4_hba
))
8087 phba
->trunk_link
.link0
.state
= LPFC_LINK_DOWN
;
8088 if (bf_get(lpfc_conf_trunk_port1
, &phba
->sli4_hba
))
8089 phba
->trunk_link
.link1
.state
= LPFC_LINK_DOWN
;
8090 if (bf_get(lpfc_conf_trunk_port2
, &phba
->sli4_hba
))
8091 phba
->trunk_link
.link2
.state
= LPFC_LINK_DOWN
;
8092 if (bf_get(lpfc_conf_trunk_port3
, &phba
->sli4_hba
))
8093 phba
->trunk_link
.link3
.state
= LPFC_LINK_DOWN
;
8094 spin_unlock_irq(&phba
->hbalock
);
8096 /* Arm the CQs and then EQs on device */
8097 lpfc_sli4_arm_cqeq_intr(phba
);
8099 /* Indicate device interrupt mode */
8100 phba
->sli4_hba
.intr_enable
= 1;
8102 if (!(phba
->hba_flag
& HBA_FCOE_MODE
) &&
8103 (phba
->hba_flag
& LINK_DISABLED
)) {
8104 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8105 "3103 Adapter Link is disabled.\n");
8106 lpfc_down_link(phba
, mboxq
);
8107 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
8108 if (rc
!= MBX_SUCCESS
) {
8109 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8110 "3104 Adapter failed to issue "
8111 "DOWN_LINK mbox cmd, rc:x%x\n", rc
);
8112 goto out_io_buff_free
;
8114 } else if (phba
->cfg_suppress_link_up
== LPFC_INITIALIZE_LINK
) {
8115 /* don't perform init_link on SLI4 FC port loopback test */
8116 if (!(phba
->link_flag
& LS_LOOPBACK_MODE
)) {
8117 rc
= phba
->lpfc_hba_init_link(phba
, MBX_NOWAIT
);
8119 goto out_io_buff_free
;
8122 mempool_free(mboxq
, phba
->mbox_mem_pool
);
8125 /* Free allocated IO Buffers */
8128 /* Unset all the queues set up in this routine when error out */
8129 lpfc_sli4_queue_unset(phba
);
8131 lpfc_free_iocb_list(phba
);
8132 lpfc_sli4_queue_destroy(phba
);
8134 lpfc_stop_hba_timers(phba
);
8136 mempool_free(mboxq
, phba
->mbox_mem_pool
);
8141 * lpfc_mbox_timeout - Timeout call back function for mbox timer
8142 * @t: Context to fetch pointer to hba structure from.
8144 * This is the callback function for mailbox timer. The mailbox
8145 * timer is armed when a new mailbox command is issued and the timer
8146 * is deleted when the mailbox complete. The function is called by
8147 * the kernel timer code when a mailbox does not complete within
8148 * expected time. This function wakes up the worker thread to
8149 * process the mailbox timeout and returns. All the processing is
8150 * done by the worker thread function lpfc_mbox_timeout_handler.
8153 lpfc_mbox_timeout(struct timer_list
*t
)
8155 struct lpfc_hba
*phba
= from_timer(phba
, t
, sli
.mbox_tmo
);
8156 unsigned long iflag
;
8157 uint32_t tmo_posted
;
8159 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflag
);
8160 tmo_posted
= phba
->pport
->work_port_events
& WORKER_MBOX_TMO
;
8162 phba
->pport
->work_port_events
|= WORKER_MBOX_TMO
;
8163 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflag
);
8166 lpfc_worker_wake_up(phba
);
8171 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
8173 * @phba: Pointer to HBA context object.
8175 * This function checks if any mailbox completions are present on the mailbox
8179 lpfc_sli4_mbox_completions_pending(struct lpfc_hba
*phba
)
8183 struct lpfc_queue
*mcq
;
8184 struct lpfc_mcqe
*mcqe
;
8185 bool pending_completions
= false;
8188 if (unlikely(!phba
) || (phba
->sli_rev
!= LPFC_SLI_REV4
))
8191 /* Check for completions on mailbox completion queue */
8193 mcq
= phba
->sli4_hba
.mbx_cq
;
8194 idx
= mcq
->hba_index
;
8195 qe_valid
= mcq
->qe_valid
;
8196 while (bf_get_le32(lpfc_cqe_valid
,
8197 (struct lpfc_cqe
*)lpfc_sli4_qe(mcq
, idx
)) == qe_valid
) {
8198 mcqe
= (struct lpfc_mcqe
*)(lpfc_sli4_qe(mcq
, idx
));
8199 if (bf_get_le32(lpfc_trailer_completed
, mcqe
) &&
8200 (!bf_get_le32(lpfc_trailer_async
, mcqe
))) {
8201 pending_completions
= true;
8204 idx
= (idx
+ 1) % mcq
->entry_count
;
8205 if (mcq
->hba_index
== idx
)
8208 /* if the index wrapped around, toggle the valid bit */
8209 if (phba
->sli4_hba
.pc_sli4_params
.cqav
&& !idx
)
8210 qe_valid
= (qe_valid
) ? 0 : 1;
8212 return pending_completions
;
8217 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
8219 * @phba: Pointer to HBA context object.
8221 * For sli4, it is possible to miss an interrupt. As such mbox completions
8222 * maybe missed causing erroneous mailbox timeouts to occur. This function
8223 * checks to see if mbox completions are on the mailbox completion queue
8224 * and will process all the completions associated with the eq for the
8225 * mailbox completion queue.
8228 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba
*phba
)
8230 struct lpfc_sli4_hba
*sli4_hba
= &phba
->sli4_hba
;
8232 struct lpfc_queue
*fpeq
= NULL
;
8233 struct lpfc_queue
*eq
;
8236 if (unlikely(!phba
) || (phba
->sli_rev
!= LPFC_SLI_REV4
))
8239 /* Find the EQ associated with the mbox CQ */
8240 if (sli4_hba
->hdwq
) {
8241 for (eqidx
= 0; eqidx
< phba
->cfg_irq_chann
; eqidx
++) {
8242 eq
= phba
->sli4_hba
.hba_eq_hdl
[eqidx
].eq
;
8243 if (eq
&& eq
->queue_id
== sli4_hba
->mbx_cq
->assoc_qid
) {
8252 /* Turn off interrupts from this EQ */
8254 sli4_hba
->sli4_eq_clr_intr(fpeq
);
8256 /* Check to see if a mbox completion is pending */
8258 mbox_pending
= lpfc_sli4_mbox_completions_pending(phba
);
8261 * If a mbox completion is pending, process all the events on EQ
8262 * associated with the mbox completion queue (this could include
8263 * mailbox commands, async events, els commands, receive queue data
8268 /* process and rearm the EQ */
8269 lpfc_sli4_process_eq(phba
, fpeq
, LPFC_QUEUE_REARM
);
8271 /* Always clear and re-arm the EQ */
8272 sli4_hba
->sli4_write_eq_db(phba
, fpeq
, 0, LPFC_QUEUE_REARM
);
8274 return mbox_pending
;
8279 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
8280 * @phba: Pointer to HBA context object.
8282 * This function is called from worker thread when a mailbox command times out.
8283 * The caller is not required to hold any locks. This function will reset the
8284 * HBA and recover all the pending commands.
8287 lpfc_mbox_timeout_handler(struct lpfc_hba
*phba
)
8289 LPFC_MBOXQ_t
*pmbox
= phba
->sli
.mbox_active
;
8290 MAILBOX_t
*mb
= NULL
;
8292 struct lpfc_sli
*psli
= &phba
->sli
;
8294 /* If the mailbox completed, process the completion and return */
8295 if (lpfc_sli4_process_missed_mbox_completions(phba
))
8300 /* Check the pmbox pointer first. There is a race condition
8301 * between the mbox timeout handler getting executed in the
8302 * worklist and the mailbox actually completing. When this
8303 * race condition occurs, the mbox_active will be NULL.
8305 spin_lock_irq(&phba
->hbalock
);
8306 if (pmbox
== NULL
) {
8307 lpfc_printf_log(phba
, KERN_WARNING
,
8309 "0353 Active Mailbox cleared - mailbox timeout "
8311 spin_unlock_irq(&phba
->hbalock
);
8315 /* Mbox cmd <mbxCommand> timeout */
8316 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8317 "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
8319 phba
->pport
->port_state
,
8321 phba
->sli
.mbox_active
);
8322 spin_unlock_irq(&phba
->hbalock
);
8324 /* Setting state unknown so lpfc_sli_abort_iocb_ring
8325 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
8326 * it to fail all outstanding SCSI IO.
8328 spin_lock_irq(&phba
->pport
->work_port_lock
);
8329 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
8330 spin_unlock_irq(&phba
->pport
->work_port_lock
);
8331 spin_lock_irq(&phba
->hbalock
);
8332 phba
->link_state
= LPFC_LINK_UNKNOWN
;
8333 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
8334 spin_unlock_irq(&phba
->hbalock
);
8336 lpfc_sli_abort_fcp_rings(phba
);
8338 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8339 "0345 Resetting board due to mailbox timeout\n");
8341 /* Reset the HBA device */
8342 lpfc_reset_hba(phba
);
8346 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
8347 * @phba: Pointer to HBA context object.
8348 * @pmbox: Pointer to mailbox object.
8349 * @flag: Flag indicating how the mailbox need to be processed.
8351 * This function is called by discovery code and HBA management code
8352 * to submit a mailbox command to firmware with SLI-3 interface spec. This
8353 * function gets the hbalock to protect the data structures.
8354 * The mailbox command can be submitted in polling mode, in which case
8355 * this function will wait in a polling loop for the completion of the
8357 * If the mailbox is submitted in no_wait mode (not polling) the
8358 * function will submit the command and returns immediately without waiting
8359 * for the mailbox completion. The no_wait is supported only when HBA
8360 * is in SLI2/SLI3 mode - interrupts are enabled.
8361 * The SLI interface allows only one mailbox pending at a time. If the
8362 * mailbox is issued in polling mode and there is already a mailbox
8363 * pending, then the function will return an error. If the mailbox is issued
8364 * in NO_WAIT mode and there is a mailbox pending already, the function
8365 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
8366 * The sli layer owns the mailbox object until the completion of mailbox
8367 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
8368 * return codes the caller owns the mailbox command after the return of
8372 lpfc_sli_issue_mbox_s3(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmbox
,
8376 struct lpfc_sli
*psli
= &phba
->sli
;
8377 uint32_t status
, evtctr
;
8378 uint32_t ha_copy
, hc_copy
;
8380 unsigned long timeout
;
8381 unsigned long drvr_flag
= 0;
8382 uint32_t word0
, ldata
;
8383 void __iomem
*to_slim
;
8384 int processing_queue
= 0;
8386 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
8388 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
8389 /* processing mbox queue from intr_handler */
8390 if (unlikely(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
8391 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8394 processing_queue
= 1;
8395 pmbox
= lpfc_mbox_get(phba
);
8397 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8402 if (pmbox
->mbox_cmpl
&& pmbox
->mbox_cmpl
!= lpfc_sli_def_mbox_cmpl
&&
8403 pmbox
->mbox_cmpl
!= lpfc_sli_wake_mbox_wait
) {
8405 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8406 lpfc_printf_log(phba
, KERN_ERR
,
8407 LOG_MBOX
| LOG_VPORT
,
8408 "1806 Mbox x%x failed. No vport\n",
8409 pmbox
->u
.mb
.mbxCommand
);
8411 goto out_not_finished
;
8415 /* If the PCI channel is in offline state, do not post mbox. */
8416 if (unlikely(pci_channel_offline(phba
->pcidev
))) {
8417 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8418 goto out_not_finished
;
8421 /* If HBA has a deferred error attention, fail the iocb. */
8422 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
8423 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8424 goto out_not_finished
;
8430 status
= MBX_SUCCESS
;
8432 if (phba
->link_state
== LPFC_HBA_ERROR
) {
8433 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8435 /* Mbox command <mbxCommand> cannot issue */
8436 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8437 "(%d):0311 Mailbox command x%x cannot "
8438 "issue Data: x%x x%x\n",
8439 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
8440 pmbox
->u
.mb
.mbxCommand
, psli
->sli_flag
, flag
);
8441 goto out_not_finished
;
8444 if (mbx
->mbxCommand
!= MBX_KILL_BOARD
&& flag
& MBX_NOWAIT
) {
8445 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
) ||
8446 !(hc_copy
& HC_MBINT_ENA
)) {
8447 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8448 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8449 "(%d):2528 Mailbox command x%x cannot "
8450 "issue Data: x%x x%x\n",
8451 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
8452 pmbox
->u
.mb
.mbxCommand
, psli
->sli_flag
, flag
);
8453 goto out_not_finished
;
8457 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
8458 /* Polling for a mbox command when another one is already active
8459 * is not allowed in SLI. Also, the driver must have established
8460 * SLI2 mode to queue and process multiple mbox commands.
8463 if (flag
& MBX_POLL
) {
8464 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8466 /* Mbox command <mbxCommand> cannot issue */
8467 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8468 "(%d):2529 Mailbox command x%x "
8469 "cannot issue Data: x%x x%x\n",
8470 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
8471 pmbox
->u
.mb
.mbxCommand
,
8472 psli
->sli_flag
, flag
);
8473 goto out_not_finished
;
8476 if (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
)) {
8477 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8478 /* Mbox command <mbxCommand> cannot issue */
8479 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8480 "(%d):2530 Mailbox command x%x "
8481 "cannot issue Data: x%x x%x\n",
8482 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
8483 pmbox
->u
.mb
.mbxCommand
,
8484 psli
->sli_flag
, flag
);
8485 goto out_not_finished
;
8488 /* Another mailbox command is still being processed, queue this
8489 * command to be processed later.
8491 lpfc_mbox_put(phba
, pmbox
);
8493 /* Mbox cmd issue - BUSY */
8494 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
8495 "(%d):0308 Mbox cmd issue - BUSY Data: "
8496 "x%x x%x x%x x%x\n",
8497 pmbox
->vport
? pmbox
->vport
->vpi
: 0xffffff,
8499 phba
->pport
? phba
->pport
->port_state
: 0xff,
8500 psli
->sli_flag
, flag
);
8502 psli
->slistat
.mbox_busy
++;
8503 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8506 lpfc_debugfs_disc_trc(pmbox
->vport
,
8507 LPFC_DISC_TRC_MBOX_VPORT
,
8508 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
8509 (uint32_t)mbx
->mbxCommand
,
8510 mbx
->un
.varWords
[0], mbx
->un
.varWords
[1]);
8513 lpfc_debugfs_disc_trc(phba
->pport
,
8515 "MBOX Bsy: cmd:x%x mb:x%x x%x",
8516 (uint32_t)mbx
->mbxCommand
,
8517 mbx
->un
.varWords
[0], mbx
->un
.varWords
[1]);
8523 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
8525 /* If we are not polling, we MUST be in SLI2 mode */
8526 if (flag
!= MBX_POLL
) {
8527 if (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
) &&
8528 (mbx
->mbxCommand
!= MBX_KILL_BOARD
)) {
8529 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
8530 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8531 /* Mbox command <mbxCommand> cannot issue */
8532 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8533 "(%d):2531 Mailbox command x%x "
8534 "cannot issue Data: x%x x%x\n",
8535 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
8536 pmbox
->u
.mb
.mbxCommand
,
8537 psli
->sli_flag
, flag
);
8538 goto out_not_finished
;
8540 /* timeout active mbox command */
8541 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, pmbox
) *
8543 mod_timer(&psli
->mbox_tmo
, jiffies
+ timeout
);
8546 /* Mailbox cmd <cmd> issue */
8547 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
8548 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
8550 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
8552 phba
->pport
? phba
->pport
->port_state
: 0xff,
8553 psli
->sli_flag
, flag
);
8555 if (mbx
->mbxCommand
!= MBX_HEARTBEAT
) {
8557 lpfc_debugfs_disc_trc(pmbox
->vport
,
8558 LPFC_DISC_TRC_MBOX_VPORT
,
8559 "MBOX Send vport: cmd:x%x mb:x%x x%x",
8560 (uint32_t)mbx
->mbxCommand
,
8561 mbx
->un
.varWords
[0], mbx
->un
.varWords
[1]);
8564 lpfc_debugfs_disc_trc(phba
->pport
,
8566 "MBOX Send: cmd:x%x mb:x%x x%x",
8567 (uint32_t)mbx
->mbxCommand
,
8568 mbx
->un
.varWords
[0], mbx
->un
.varWords
[1]);
8572 psli
->slistat
.mbox_cmd
++;
8573 evtctr
= psli
->slistat
.mbox_event
;
8575 /* next set own bit for the adapter and copy over command word */
8576 mbx
->mbxOwner
= OWN_CHIP
;
8578 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
8579 /* Populate mbox extension offset word. */
8580 if (pmbox
->in_ext_byte_len
|| pmbox
->out_ext_byte_len
) {
8581 *(((uint32_t *)mbx
) + pmbox
->mbox_offset_word
)
8582 = (uint8_t *)phba
->mbox_ext
8583 - (uint8_t *)phba
->mbox
;
8586 /* Copy the mailbox extension data */
8587 if (pmbox
->in_ext_byte_len
&& pmbox
->ctx_buf
) {
8588 lpfc_sli_pcimem_bcopy(pmbox
->ctx_buf
,
8589 (uint8_t *)phba
->mbox_ext
,
8590 pmbox
->in_ext_byte_len
);
8592 /* Copy command data to host SLIM area */
8593 lpfc_sli_pcimem_bcopy(mbx
, phba
->mbox
, MAILBOX_CMD_SIZE
);
8595 /* Populate mbox extension offset word. */
8596 if (pmbox
->in_ext_byte_len
|| pmbox
->out_ext_byte_len
)
8597 *(((uint32_t *)mbx
) + pmbox
->mbox_offset_word
)
8598 = MAILBOX_HBA_EXT_OFFSET
;
8600 /* Copy the mailbox extension data */
8601 if (pmbox
->in_ext_byte_len
&& pmbox
->ctx_buf
)
8602 lpfc_memcpy_to_slim(phba
->MBslimaddr
+
8603 MAILBOX_HBA_EXT_OFFSET
,
8604 pmbox
->ctx_buf
, pmbox
->in_ext_byte_len
);
8606 if (mbx
->mbxCommand
== MBX_CONFIG_PORT
)
8607 /* copy command data into host mbox for cmpl */
8608 lpfc_sli_pcimem_bcopy(mbx
, phba
->mbox
,
8611 /* First copy mbox command data to HBA SLIM, skip past first
8613 to_slim
= phba
->MBslimaddr
+ sizeof (uint32_t);
8614 lpfc_memcpy_to_slim(to_slim
, &mbx
->un
.varWords
[0],
8615 MAILBOX_CMD_SIZE
- sizeof (uint32_t));
8617 /* Next copy over first word, with mbxOwner set */
8618 ldata
= *((uint32_t *)mbx
);
8619 to_slim
= phba
->MBslimaddr
;
8620 writel(ldata
, to_slim
);
8621 readl(to_slim
); /* flush */
8623 if (mbx
->mbxCommand
== MBX_CONFIG_PORT
)
8624 /* switch over to host mailbox */
8625 psli
->sli_flag
|= LPFC_SLI_ACTIVE
;
8632 /* Set up reference to mailbox command */
8633 psli
->mbox_active
= pmbox
;
8634 /* Interrupt board to do it */
8635 writel(CA_MBATT
, phba
->CAregaddr
);
8636 readl(phba
->CAregaddr
); /* flush */
8637 /* Don't wait for it to finish, just return */
8641 /* Set up null reference to mailbox command */
8642 psli
->mbox_active
= NULL
;
8643 /* Interrupt board to do it */
8644 writel(CA_MBATT
, phba
->CAregaddr
);
8645 readl(phba
->CAregaddr
); /* flush */
8647 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
8648 /* First read mbox status word */
8649 word0
= *((uint32_t *)phba
->mbox
);
8650 word0
= le32_to_cpu(word0
);
8652 /* First read mbox status word */
8653 if (lpfc_readl(phba
->MBslimaddr
, &word0
)) {
8654 spin_unlock_irqrestore(&phba
->hbalock
,
8656 goto out_not_finished
;
8660 /* Read the HBA Host Attention Register */
8661 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
)) {
8662 spin_unlock_irqrestore(&phba
->hbalock
,
8664 goto out_not_finished
;
8666 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, pmbox
) *
8669 /* Wait for command to complete */
8670 while (((word0
& OWN_CHIP
) == OWN_CHIP
) ||
8671 (!(ha_copy
& HA_MBATT
) &&
8672 (phba
->link_state
> LPFC_WARM_START
))) {
8673 if (time_after(jiffies
, timeout
)) {
8674 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
8675 spin_unlock_irqrestore(&phba
->hbalock
,
8677 goto out_not_finished
;
8680 /* Check if we took a mbox interrupt while we were
8682 if (((word0
& OWN_CHIP
) != OWN_CHIP
)
8683 && (evtctr
!= psli
->slistat
.mbox_event
))
8687 spin_unlock_irqrestore(&phba
->hbalock
,
8690 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
8693 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
8694 /* First copy command data */
8695 word0
= *((uint32_t *)phba
->mbox
);
8696 word0
= le32_to_cpu(word0
);
8697 if (mbx
->mbxCommand
== MBX_CONFIG_PORT
) {
8700 /* Check real SLIM for any errors */
8701 slimword0
= readl(phba
->MBslimaddr
);
8702 slimmb
= (MAILBOX_t
*) & slimword0
;
8703 if (((slimword0
& OWN_CHIP
) != OWN_CHIP
)
8704 && slimmb
->mbxStatus
) {
8711 /* First copy command data */
8712 word0
= readl(phba
->MBslimaddr
);
8714 /* Read the HBA Host Attention Register */
8715 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
)) {
8716 spin_unlock_irqrestore(&phba
->hbalock
,
8718 goto out_not_finished
;
8722 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
8723 /* copy results back to user */
8724 lpfc_sli_pcimem_bcopy(phba
->mbox
, mbx
,
8726 /* Copy the mailbox extension data */
8727 if (pmbox
->out_ext_byte_len
&& pmbox
->ctx_buf
) {
8728 lpfc_sli_pcimem_bcopy(phba
->mbox_ext
,
8730 pmbox
->out_ext_byte_len
);
8733 /* First copy command data */
8734 lpfc_memcpy_from_slim(mbx
, phba
->MBslimaddr
,
8736 /* Copy the mailbox extension data */
8737 if (pmbox
->out_ext_byte_len
&& pmbox
->ctx_buf
) {
8738 lpfc_memcpy_from_slim(
8741 MAILBOX_HBA_EXT_OFFSET
,
8742 pmbox
->out_ext_byte_len
);
8746 writel(HA_MBATT
, phba
->HAregaddr
);
8747 readl(phba
->HAregaddr
); /* flush */
8749 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
8750 status
= mbx
->mbxStatus
;
8753 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
8757 if (processing_queue
) {
8758 pmbox
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
8759 lpfc_mbox_cmpl_put(phba
, pmbox
);
8761 return MBX_NOT_FINISHED
;
8765 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
8766 * @phba: Pointer to HBA context object.
8768 * The function blocks the posting of SLI4 asynchronous mailbox commands from
8769 * the driver internal pending mailbox queue. It will then try to wait out the
8770 * possible outstanding mailbox command before return.
8773 * 0 - the outstanding mailbox command completed; otherwise, the wait for
8774 * the outstanding mailbox command timed out.
8777 lpfc_sli4_async_mbox_block(struct lpfc_hba
*phba
)
8779 struct lpfc_sli
*psli
= &phba
->sli
;
8781 unsigned long timeout
= 0;
8783 /* Mark the asynchronous mailbox command posting as blocked */
8784 spin_lock_irq(&phba
->hbalock
);
8785 psli
->sli_flag
|= LPFC_SLI_ASYNC_MBX_BLK
;
8786 /* Determine how long we might wait for the active mailbox
8787 * command to be gracefully completed by firmware.
8789 if (phba
->sli
.mbox_active
)
8790 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
,
8791 phba
->sli
.mbox_active
) *
8793 spin_unlock_irq(&phba
->hbalock
);
8795 /* Make sure the mailbox is really active */
8797 lpfc_sli4_process_missed_mbox_completions(phba
);
8799 /* Wait for the outstnading mailbox command to complete */
8800 while (phba
->sli
.mbox_active
) {
8801 /* Check active mailbox complete status every 2ms */
8803 if (time_after(jiffies
, timeout
)) {
8804 /* Timeout, marked the outstanding cmd not complete */
8810 /* Can not cleanly block async mailbox command, fails it */
8812 spin_lock_irq(&phba
->hbalock
);
8813 psli
->sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
8814 spin_unlock_irq(&phba
->hbalock
);
8820 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
8821 * @phba: Pointer to HBA context object.
8823 * The function unblocks and resume posting of SLI4 asynchronous mailbox
8824 * commands from the driver internal pending mailbox queue. It makes sure
8825 * that there is no outstanding mailbox command before resuming posting
8826 * asynchronous mailbox commands. If, for any reason, there is outstanding
8827 * mailbox command, it will try to wait it out before resuming asynchronous
8828 * mailbox command posting.
8831 lpfc_sli4_async_mbox_unblock(struct lpfc_hba
*phba
)
8833 struct lpfc_sli
*psli
= &phba
->sli
;
8835 spin_lock_irq(&phba
->hbalock
);
8836 if (!(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
8837 /* Asynchronous mailbox posting is not blocked, do nothing */
8838 spin_unlock_irq(&phba
->hbalock
);
8842 /* Outstanding synchronous mailbox command is guaranteed to be done,
8843 * successful or timeout, after timing-out the outstanding mailbox
8844 * command shall always be removed, so just unblock posting async
8845 * mailbox command and resume
8847 psli
->sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
8848 spin_unlock_irq(&phba
->hbalock
);
8850 /* wake up worker thread to post asynchronous mailbox command */
8851 lpfc_worker_wake_up(phba
);
8855 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
8856 * @phba: Pointer to HBA context object.
8857 * @mboxq: Pointer to mailbox object.
8859 * The function waits for the bootstrap mailbox register ready bit from
8860 * port for twice the regular mailbox command timeout value.
8862 * 0 - no timeout on waiting for bootstrap mailbox register ready.
8863 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
8866 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
8869 unsigned long timeout
;
8870 struct lpfc_register bmbx_reg
;
8872 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, mboxq
)
8876 bmbx_reg
.word0
= readl(phba
->sli4_hba
.BMBXregaddr
);
8877 db_ready
= bf_get(lpfc_bmbx_rdy
, &bmbx_reg
);
8881 if (time_after(jiffies
, timeout
))
8882 return MBXERR_ERROR
;
8883 } while (!db_ready
);
8889 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
8890 * @phba: Pointer to HBA context object.
8891 * @mboxq: Pointer to mailbox object.
8893 * The function posts a mailbox to the port. The mailbox is expected
8894 * to be comletely filled in and ready for the port to operate on it.
8895 * This routine executes a synchronous completion operation on the
8896 * mailbox by polling for its completion.
8898 * The caller must not be holding any locks when calling this routine.
8901 * MBX_SUCCESS - mailbox posted successfully
8902 * Any of the MBX error values.
8905 lpfc_sli4_post_sync_mbox(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
8907 int rc
= MBX_SUCCESS
;
8908 unsigned long iflag
;
8909 uint32_t mcqe_status
;
8911 struct lpfc_sli
*psli
= &phba
->sli
;
8912 struct lpfc_mqe
*mb
= &mboxq
->u
.mqe
;
8913 struct lpfc_bmbx_create
*mbox_rgn
;
8914 struct dma_address
*dma_address
;
8917 * Only one mailbox can be active to the bootstrap mailbox region
8918 * at a time and there is no queueing provided.
8920 spin_lock_irqsave(&phba
->hbalock
, iflag
);
8921 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
8922 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8923 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
8924 "(%d):2532 Mailbox command x%x (x%x/x%x) "
8925 "cannot issue Data: x%x x%x\n",
8926 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
8927 mboxq
->u
.mb
.mbxCommand
,
8928 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
8929 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
8930 psli
->sli_flag
, MBX_POLL
);
8931 return MBXERR_ERROR
;
8933 /* The server grabs the token and owns it until release */
8934 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
8935 phba
->sli
.mbox_active
= mboxq
;
8936 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8938 /* wait for bootstrap mbox register for readyness */
8939 rc
= lpfc_sli4_wait_bmbx_ready(phba
, mboxq
);
8943 * Initialize the bootstrap memory region to avoid stale data areas
8944 * in the mailbox post. Then copy the caller's mailbox contents to
8945 * the bmbx mailbox region.
8947 mbx_cmnd
= bf_get(lpfc_mqe_command
, mb
);
8948 memset(phba
->sli4_hba
.bmbx
.avirt
, 0, sizeof(struct lpfc_bmbx_create
));
8949 lpfc_sli4_pcimem_bcopy(mb
, phba
->sli4_hba
.bmbx
.avirt
,
8950 sizeof(struct lpfc_mqe
));
8952 /* Post the high mailbox dma address to the port and wait for ready. */
8953 dma_address
= &phba
->sli4_hba
.bmbx
.dma_address
;
8954 writel(dma_address
->addr_hi
, phba
->sli4_hba
.BMBXregaddr
);
8956 /* wait for bootstrap mbox register for hi-address write done */
8957 rc
= lpfc_sli4_wait_bmbx_ready(phba
, mboxq
);
8961 /* Post the low mailbox dma address to the port. */
8962 writel(dma_address
->addr_lo
, phba
->sli4_hba
.BMBXregaddr
);
8964 /* wait for bootstrap mbox register for low address write done */
8965 rc
= lpfc_sli4_wait_bmbx_ready(phba
, mboxq
);
8970 * Read the CQ to ensure the mailbox has completed.
8971 * If so, update the mailbox status so that the upper layers
8972 * can complete the request normally.
8974 lpfc_sli4_pcimem_bcopy(phba
->sli4_hba
.bmbx
.avirt
, mb
,
8975 sizeof(struct lpfc_mqe
));
8976 mbox_rgn
= (struct lpfc_bmbx_create
*) phba
->sli4_hba
.bmbx
.avirt
;
8977 lpfc_sli4_pcimem_bcopy(&mbox_rgn
->mcqe
, &mboxq
->mcqe
,
8978 sizeof(struct lpfc_mcqe
));
8979 mcqe_status
= bf_get(lpfc_mcqe_status
, &mbox_rgn
->mcqe
);
8981 * When the CQE status indicates a failure and the mailbox status
8982 * indicates success then copy the CQE status into the mailbox status
8983 * (and prefix it with x4000).
8985 if (mcqe_status
!= MB_CQE_STATUS_SUCCESS
) {
8986 if (bf_get(lpfc_mqe_status
, mb
) == MBX_SUCCESS
)
8987 bf_set(lpfc_mqe_status
, mb
,
8988 (LPFC_MBX_ERROR_RANGE
| mcqe_status
));
8991 lpfc_sli4_swap_str(phba
, mboxq
);
8993 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
8994 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
8995 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
8996 " x%x x%x CQ: x%x x%x x%x x%x\n",
8997 mboxq
->vport
? mboxq
->vport
->vpi
: 0, mbx_cmnd
,
8998 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
8999 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
9000 bf_get(lpfc_mqe_status
, mb
),
9001 mb
->un
.mb_words
[0], mb
->un
.mb_words
[1],
9002 mb
->un
.mb_words
[2], mb
->un
.mb_words
[3],
9003 mb
->un
.mb_words
[4], mb
->un
.mb_words
[5],
9004 mb
->un
.mb_words
[6], mb
->un
.mb_words
[7],
9005 mb
->un
.mb_words
[8], mb
->un
.mb_words
[9],
9006 mb
->un
.mb_words
[10], mb
->un
.mb_words
[11],
9007 mb
->un
.mb_words
[12], mboxq
->mcqe
.word0
,
9008 mboxq
->mcqe
.mcqe_tag0
, mboxq
->mcqe
.mcqe_tag1
,
9009 mboxq
->mcqe
.trailer
);
9011 /* We are holding the token, no needed for lock when release */
9012 spin_lock_irqsave(&phba
->hbalock
, iflag
);
9013 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
9014 phba
->sli
.mbox_active
= NULL
;
9015 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
9020 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
9021 * @phba: Pointer to HBA context object.
9022 * @mboxq: Pointer to mailbox object.
9023 * @flag: Flag indicating how the mailbox need to be processed.
9025 * This function is called by discovery code and HBA management code to submit
9026 * a mailbox command to firmware with SLI-4 interface spec.
9028 * Return codes the caller owns the mailbox command after the return of the
9032 lpfc_sli_issue_mbox_s4(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
9035 struct lpfc_sli
*psli
= &phba
->sli
;
9036 unsigned long iflags
;
9039 /* dump from issue mailbox command if setup */
9040 lpfc_idiag_mbxacc_dump_issue_mbox(phba
, &mboxq
->u
.mb
);
9042 rc
= lpfc_mbox_dev_check(phba
);
9044 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9045 "(%d):2544 Mailbox command x%x (x%x/x%x) "
9046 "cannot issue Data: x%x x%x\n",
9047 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
9048 mboxq
->u
.mb
.mbxCommand
,
9049 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
9050 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
9051 psli
->sli_flag
, flag
);
9052 goto out_not_finished
;
9055 /* Detect polling mode and jump to a handler */
9056 if (!phba
->sli4_hba
.intr_enable
) {
9057 if (flag
== MBX_POLL
)
9058 rc
= lpfc_sli4_post_sync_mbox(phba
, mboxq
);
9061 if (rc
!= MBX_SUCCESS
)
9062 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
9063 "(%d):2541 Mailbox command x%x "
9064 "(x%x/x%x) failure: "
9065 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9067 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
9068 mboxq
->u
.mb
.mbxCommand
,
9069 lpfc_sli_config_mbox_subsys_get(phba
,
9071 lpfc_sli_config_mbox_opcode_get(phba
,
9073 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
),
9074 bf_get(lpfc_mcqe_status
, &mboxq
->mcqe
),
9075 bf_get(lpfc_mcqe_ext_status
,
9077 psli
->sli_flag
, flag
);
9079 } else if (flag
== MBX_POLL
) {
9080 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
9081 "(%d):2542 Try to issue mailbox command "
9082 "x%x (x%x/x%x) synchronously ahead of async "
9083 "mailbox command queue: x%x x%x\n",
9084 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
9085 mboxq
->u
.mb
.mbxCommand
,
9086 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
9087 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
9088 psli
->sli_flag
, flag
);
9089 /* Try to block the asynchronous mailbox posting */
9090 rc
= lpfc_sli4_async_mbox_block(phba
);
9092 /* Successfully blocked, now issue sync mbox cmd */
9093 rc
= lpfc_sli4_post_sync_mbox(phba
, mboxq
);
9094 if (rc
!= MBX_SUCCESS
)
9095 lpfc_printf_log(phba
, KERN_WARNING
,
9097 "(%d):2597 Sync Mailbox command "
9098 "x%x (x%x/x%x) failure: "
9099 "mqe_sta: x%x mcqe_sta: x%x/x%x "
9101 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
9102 mboxq
->u
.mb
.mbxCommand
,
9103 lpfc_sli_config_mbox_subsys_get(phba
,
9105 lpfc_sli_config_mbox_opcode_get(phba
,
9107 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
),
9108 bf_get(lpfc_mcqe_status
, &mboxq
->mcqe
),
9109 bf_get(lpfc_mcqe_ext_status
,
9111 psli
->sli_flag
, flag
);
9112 /* Unblock the async mailbox posting afterward */
9113 lpfc_sli4_async_mbox_unblock(phba
);
9118 /* Now, interrupt mode asynchronous mailbox command */
9119 rc
= lpfc_mbox_cmd_check(phba
, mboxq
);
9121 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9122 "(%d):2543 Mailbox command x%x (x%x/x%x) "
9123 "cannot issue Data: x%x x%x\n",
9124 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
9125 mboxq
->u
.mb
.mbxCommand
,
9126 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
9127 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
9128 psli
->sli_flag
, flag
);
9129 goto out_not_finished
;
9132 /* Put the mailbox command to the driver internal FIFO */
9133 psli
->slistat
.mbox_busy
++;
9134 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9135 lpfc_mbox_put(phba
, mboxq
);
9136 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9137 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
9138 "(%d):0354 Mbox cmd issue - Enqueue Data: "
9139 "x%x (x%x/x%x) x%x x%x x%x\n",
9140 mboxq
->vport
? mboxq
->vport
->vpi
: 0xffffff,
9141 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
9142 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
9143 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
9144 phba
->pport
->port_state
,
9145 psli
->sli_flag
, MBX_NOWAIT
);
9146 /* Wake up worker thread to transport mailbox command from head */
9147 lpfc_worker_wake_up(phba
);
9152 return MBX_NOT_FINISHED
;
9156 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
9157 * @phba: Pointer to HBA context object.
9159 * This function is called by worker thread to send a mailbox command to
9160 * SLI4 HBA firmware.
9164 lpfc_sli4_post_async_mbox(struct lpfc_hba
*phba
)
9166 struct lpfc_sli
*psli
= &phba
->sli
;
9167 LPFC_MBOXQ_t
*mboxq
;
9168 int rc
= MBX_SUCCESS
;
9169 unsigned long iflags
;
9170 struct lpfc_mqe
*mqe
;
9173 /* Check interrupt mode before post async mailbox command */
9174 if (unlikely(!phba
->sli4_hba
.intr_enable
))
9175 return MBX_NOT_FINISHED
;
9177 /* Check for mailbox command service token */
9178 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9179 if (unlikely(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
9180 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9181 return MBX_NOT_FINISHED
;
9183 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
9184 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9185 return MBX_NOT_FINISHED
;
9187 if (unlikely(phba
->sli
.mbox_active
)) {
9188 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9189 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9190 "0384 There is pending active mailbox cmd\n");
9191 return MBX_NOT_FINISHED
;
9193 /* Take the mailbox command service token */
9194 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
9196 /* Get the next mailbox command from head of queue */
9197 mboxq
= lpfc_mbox_get(phba
);
9199 /* If no more mailbox command waiting for post, we're done */
9201 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
9202 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9205 phba
->sli
.mbox_active
= mboxq
;
9206 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9208 /* Check device readiness for posting mailbox command */
9209 rc
= lpfc_mbox_dev_check(phba
);
9211 /* Driver clean routine will clean up pending mailbox */
9212 goto out_not_finished
;
9214 /* Prepare the mbox command to be posted */
9215 mqe
= &mboxq
->u
.mqe
;
9216 mbx_cmnd
= bf_get(lpfc_mqe_command
, mqe
);
9218 /* Start timer for the mbox_tmo and log some mailbox post messages */
9219 mod_timer(&psli
->mbox_tmo
, (jiffies
+
9220 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba
, mboxq
))));
9222 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
9223 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
9225 mboxq
->vport
? mboxq
->vport
->vpi
: 0, mbx_cmnd
,
9226 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
9227 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
9228 phba
->pport
->port_state
, psli
->sli_flag
);
9230 if (mbx_cmnd
!= MBX_HEARTBEAT
) {
9232 lpfc_debugfs_disc_trc(mboxq
->vport
,
9233 LPFC_DISC_TRC_MBOX_VPORT
,
9234 "MBOX Send vport: cmd:x%x mb:x%x x%x",
9235 mbx_cmnd
, mqe
->un
.mb_words
[0],
9236 mqe
->un
.mb_words
[1]);
9238 lpfc_debugfs_disc_trc(phba
->pport
,
9240 "MBOX Send: cmd:x%x mb:x%x x%x",
9241 mbx_cmnd
, mqe
->un
.mb_words
[0],
9242 mqe
->un
.mb_words
[1]);
9245 psli
->slistat
.mbox_cmd
++;
9247 /* Post the mailbox command to the port */
9248 rc
= lpfc_sli4_mq_put(phba
->sli4_hba
.mbx_wq
, mqe
);
9249 if (rc
!= MBX_SUCCESS
) {
9250 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9251 "(%d):2533 Mailbox command x%x (x%x/x%x) "
9252 "cannot issue Data: x%x x%x\n",
9253 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
9254 mboxq
->u
.mb
.mbxCommand
,
9255 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
9256 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
9257 psli
->sli_flag
, MBX_NOWAIT
);
9258 goto out_not_finished
;
9264 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9265 if (phba
->sli
.mbox_active
) {
9266 mboxq
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
9267 __lpfc_mbox_cmpl_put(phba
, mboxq
);
9268 /* Release the token */
9269 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
9270 phba
->sli
.mbox_active
= NULL
;
9272 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9274 return MBX_NOT_FINISHED
;
9278 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
9279 * @phba: Pointer to HBA context object.
9280 * @pmbox: Pointer to mailbox object.
9281 * @flag: Flag indicating how the mailbox need to be processed.
9283 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
9284 * the API jump table function pointer from the lpfc_hba struct.
9286 * Return codes the caller owns the mailbox command after the return of the
9290 lpfc_sli_issue_mbox(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmbox
, uint32_t flag
)
9292 return phba
->lpfc_sli_issue_mbox(phba
, pmbox
, flag
);
9296 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
9297 * @phba: The hba struct for which this call is being executed.
9298 * @dev_grp: The HBA PCI-Device group number.
9300 * This routine sets up the mbox interface API function jump table in @phba
9302 * Returns: 0 - success, -ENODEV - failure.
9305 lpfc_mbox_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
9309 case LPFC_PCI_DEV_LP
:
9310 phba
->lpfc_sli_issue_mbox
= lpfc_sli_issue_mbox_s3
;
9311 phba
->lpfc_sli_handle_slow_ring_event
=
9312 lpfc_sli_handle_slow_ring_event_s3
;
9313 phba
->lpfc_sli_hbq_to_firmware
= lpfc_sli_hbq_to_firmware_s3
;
9314 phba
->lpfc_sli_brdrestart
= lpfc_sli_brdrestart_s3
;
9315 phba
->lpfc_sli_brdready
= lpfc_sli_brdready_s3
;
9317 case LPFC_PCI_DEV_OC
:
9318 phba
->lpfc_sli_issue_mbox
= lpfc_sli_issue_mbox_s4
;
9319 phba
->lpfc_sli_handle_slow_ring_event
=
9320 lpfc_sli_handle_slow_ring_event_s4
;
9321 phba
->lpfc_sli_hbq_to_firmware
= lpfc_sli_hbq_to_firmware_s4
;
9322 phba
->lpfc_sli_brdrestart
= lpfc_sli_brdrestart_s4
;
9323 phba
->lpfc_sli_brdready
= lpfc_sli_brdready_s4
;
9326 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9327 "1420 Invalid HBA PCI-device group: 0x%x\n",
9335 * __lpfc_sli_ringtx_put - Add an iocb to the txq
9336 * @phba: Pointer to HBA context object.
9337 * @pring: Pointer to driver SLI ring object.
9338 * @piocb: Pointer to address of newly added command iocb.
9340 * This function is called with hbalock held for SLI3 ports or
9341 * the ring lock held for SLI4 ports to add a command
9342 * iocb to the txq when SLI layer cannot submit the command iocb
9346 __lpfc_sli_ringtx_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
9347 struct lpfc_iocbq
*piocb
)
9349 if (phba
->sli_rev
== LPFC_SLI_REV4
)
9350 lockdep_assert_held(&pring
->ring_lock
);
9352 lockdep_assert_held(&phba
->hbalock
);
9353 /* Insert the caller's iocb in the txq tail for later processing. */
9354 list_add_tail(&piocb
->list
, &pring
->txq
);
9358 * lpfc_sli_next_iocb - Get the next iocb in the txq
9359 * @phba: Pointer to HBA context object.
9360 * @pring: Pointer to driver SLI ring object.
9361 * @piocb: Pointer to address of newly added command iocb.
9363 * This function is called with hbalock held before a new
9364 * iocb is submitted to the firmware. This function checks
9365 * txq to flush the iocbs in txq to Firmware before
9366 * submitting new iocbs to the Firmware.
9367 * If there are iocbs in the txq which need to be submitted
9368 * to firmware, lpfc_sli_next_iocb returns the first element
9369 * of the txq after dequeuing it from txq.
9370 * If there is no iocb in the txq then the function will return
9371 * *piocb and *piocb is set to NULL. Caller needs to check
9372 * *piocb to find if there are more commands in the txq.
9374 static struct lpfc_iocbq
*
9375 lpfc_sli_next_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
9376 struct lpfc_iocbq
**piocb
)
9378 struct lpfc_iocbq
* nextiocb
;
9380 lockdep_assert_held(&phba
->hbalock
);
9382 nextiocb
= lpfc_sli_ringtx_get(phba
, pring
);
9392 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
9393 * @phba: Pointer to HBA context object.
9394 * @ring_number: SLI ring number to issue iocb on.
9395 * @piocb: Pointer to command iocb.
9396 * @flag: Flag indicating if this command can be put into txq.
9398 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
9399 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
9400 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
9401 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
9402 * this function allows only iocbs for posting buffers. This function finds
9403 * next available slot in the command ring and posts the command to the
9404 * available slot and writes the port attention register to request HBA start
9405 * processing new iocb. If there is no slot available in the ring and
9406 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
9407 * the function returns IOCB_BUSY.
9409 * This function is called with hbalock held. The function will return success
9410 * after it successfully submit the iocb to firmware or after adding to the
9414 __lpfc_sli_issue_iocb_s3(struct lpfc_hba
*phba
, uint32_t ring_number
,
9415 struct lpfc_iocbq
*piocb
, uint32_t flag
)
9417 struct lpfc_iocbq
*nextiocb
;
9419 struct lpfc_sli_ring
*pring
= &phba
->sli
.sli3_ring
[ring_number
];
9421 lockdep_assert_held(&phba
->hbalock
);
9423 if (piocb
->iocb_cmpl
&& (!piocb
->vport
) &&
9424 (piocb
->iocb
.ulpCommand
!= CMD_ABORT_XRI_CN
) &&
9425 (piocb
->iocb
.ulpCommand
!= CMD_CLOSE_XRI_CN
)) {
9426 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9427 "1807 IOCB x%x failed. No vport\n",
9428 piocb
->iocb
.ulpCommand
);
9434 /* If the PCI channel is in offline state, do not post iocbs. */
9435 if (unlikely(pci_channel_offline(phba
->pcidev
)))
9438 /* If HBA has a deferred error attention, fail the iocb. */
9439 if (unlikely(phba
->hba_flag
& DEFER_ERATT
))
9443 * We should never get an IOCB if we are in a < LINK_DOWN state
9445 if (unlikely(phba
->link_state
< LPFC_LINK_DOWN
))
9449 * Check to see if we are blocking IOCB processing because of a
9450 * outstanding event.
9452 if (unlikely(pring
->flag
& LPFC_STOP_IOCB_EVENT
))
9455 if (unlikely(phba
->link_state
== LPFC_LINK_DOWN
)) {
9457 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
9458 * can be issued if the link is not up.
9460 switch (piocb
->iocb
.ulpCommand
) {
9461 case CMD_GEN_REQUEST64_CR
:
9462 case CMD_GEN_REQUEST64_CX
:
9463 if (!(phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
) ||
9464 (piocb
->iocb
.un
.genreq64
.w5
.hcsw
.Rctl
!=
9465 FC_RCTL_DD_UNSOL_CMD
) ||
9466 (piocb
->iocb
.un
.genreq64
.w5
.hcsw
.Type
!=
9467 MENLO_TRANSPORT_TYPE
))
9471 case CMD_QUE_RING_BUF_CN
:
9472 case CMD_QUE_RING_BUF64_CN
:
9474 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
9475 * completion, iocb_cmpl MUST be 0.
9477 if (piocb
->iocb_cmpl
)
9478 piocb
->iocb_cmpl
= NULL
;
9480 case CMD_CREATE_XRI_CR
:
9481 case CMD_CLOSE_XRI_CN
:
9482 case CMD_CLOSE_XRI_CX
:
9489 * For FCP commands, we must be in a state where we can process link
9492 } else if (unlikely(pring
->ringno
== LPFC_FCP_RING
&&
9493 !(phba
->sli
.sli_flag
& LPFC_PROCESS_LA
))) {
9497 while ((iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)) &&
9498 (nextiocb
= lpfc_sli_next_iocb(phba
, pring
, &piocb
)))
9499 lpfc_sli_submit_iocb(phba
, pring
, iocb
, nextiocb
);
9502 lpfc_sli_update_ring(phba
, pring
);
9504 lpfc_sli_update_full_ring(phba
, pring
);
9507 return IOCB_SUCCESS
;
9512 pring
->stats
.iocb_cmd_delay
++;
9516 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
9517 __lpfc_sli_ringtx_put(phba
, pring
, piocb
);
9518 return IOCB_SUCCESS
;
9525 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
9526 * @phba: Pointer to HBA context object.
9527 * @piocbq: Pointer to command iocb.
9528 * @sglq: Pointer to the scatter gather queue object.
9530 * This routine converts the bpl or bde that is in the IOCB
9531 * to a sgl list for the sli4 hardware. The physical address
9532 * of the bpl/bde is converted back to a virtual address.
9533 * If the IOCB contains a BPL then the list of BDE's is
9534 * converted to sli4_sge's. If the IOCB contains a single
9535 * BDE then it is converted to a single sli_sge.
9536 * The IOCB is still in cpu endianess so the contents of
9537 * the bpl can be used without byte swapping.
9539 * Returns valid XRI = Success, NO_XRI = Failure.
9542 lpfc_sli4_bpl2sgl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocbq
,
9543 struct lpfc_sglq
*sglq
)
9545 uint16_t xritag
= NO_XRI
;
9546 struct ulp_bde64
*bpl
= NULL
;
9547 struct ulp_bde64 bde
;
9548 struct sli4_sge
*sgl
= NULL
;
9549 struct lpfc_dmabuf
*dmabuf
;
9553 uint32_t offset
= 0; /* accumulated offset in the sg request list */
9554 int inbound
= 0; /* number of sg reply entries inbound from firmware */
9556 if (!piocbq
|| !sglq
)
9559 sgl
= (struct sli4_sge
*)sglq
->sgl
;
9560 icmd
= &piocbq
->iocb
;
9561 if (icmd
->ulpCommand
== CMD_XMIT_BLS_RSP64_CX
)
9562 return sglq
->sli4_xritag
;
9563 if (icmd
->un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BLP_64
) {
9564 numBdes
= icmd
->un
.genreq64
.bdl
.bdeSize
/
9565 sizeof(struct ulp_bde64
);
9566 /* The addrHigh and addrLow fields within the IOCB
9567 * have not been byteswapped yet so there is no
9568 * need to swap them back.
9570 if (piocbq
->context3
)
9571 dmabuf
= (struct lpfc_dmabuf
*)piocbq
->context3
;
9575 bpl
= (struct ulp_bde64
*)dmabuf
->virt
;
9579 for (i
= 0; i
< numBdes
; i
++) {
9580 /* Should already be byte swapped. */
9581 sgl
->addr_hi
= bpl
->addrHigh
;
9582 sgl
->addr_lo
= bpl
->addrLow
;
9584 sgl
->word2
= le32_to_cpu(sgl
->word2
);
9585 if ((i
+1) == numBdes
)
9586 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
9588 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
9589 /* swap the size field back to the cpu so we
9590 * can assign it to the sgl.
9592 bde
.tus
.w
= le32_to_cpu(bpl
->tus
.w
);
9593 sgl
->sge_len
= cpu_to_le32(bde
.tus
.f
.bdeSize
);
9594 /* The offsets in the sgl need to be accumulated
9595 * separately for the request and reply lists.
9596 * The request is always first, the reply follows.
9598 if (piocbq
->iocb
.ulpCommand
== CMD_GEN_REQUEST64_CR
) {
9599 /* add up the reply sg entries */
9600 if (bpl
->tus
.f
.bdeFlags
== BUFF_TYPE_BDE_64I
)
9602 /* first inbound? reset the offset */
9605 bf_set(lpfc_sli4_sge_offset
, sgl
, offset
);
9606 bf_set(lpfc_sli4_sge_type
, sgl
,
9607 LPFC_SGE_TYPE_DATA
);
9608 offset
+= bde
.tus
.f
.bdeSize
;
9610 sgl
->word2
= cpu_to_le32(sgl
->word2
);
9614 } else if (icmd
->un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BDE_64
) {
9615 /* The addrHigh and addrLow fields of the BDE have not
9616 * been byteswapped yet so they need to be swapped
9617 * before putting them in the sgl.
9620 cpu_to_le32(icmd
->un
.genreq64
.bdl
.addrHigh
);
9622 cpu_to_le32(icmd
->un
.genreq64
.bdl
.addrLow
);
9623 sgl
->word2
= le32_to_cpu(sgl
->word2
);
9624 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
9625 sgl
->word2
= cpu_to_le32(sgl
->word2
);
9627 cpu_to_le32(icmd
->un
.genreq64
.bdl
.bdeSize
);
9629 return sglq
->sli4_xritag
;
9633 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
9634 * @phba: Pointer to HBA context object.
9635 * @iocbq: Pointer to command iocb.
9636 * @wqe: Pointer to the work queue entry.
9638 * This routine converts the iocb command to its Work Queue Entry
9639 * equivalent. The wqe pointer should not have any fields set when
9640 * this routine is called because it will memcpy over them.
9641 * This routine does not set the CQ_ID or the WQEC bits in the
9644 * Returns: 0 = Success, IOCB_ERROR = Failure.
9647 lpfc_sli4_iocb2wqe(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
,
9648 union lpfc_wqe128
*wqe
)
9650 uint32_t xmit_len
= 0, total_len
= 0;
9654 uint8_t command_type
= ELS_COMMAND_NON_FIP
;
9657 uint16_t abrt_iotag
;
9658 struct lpfc_iocbq
*abrtiocbq
;
9659 struct ulp_bde64
*bpl
= NULL
;
9660 uint32_t els_id
= LPFC_ELS_ID_DEFAULT
;
9662 struct ulp_bde64 bde
;
9663 struct lpfc_nodelist
*ndlp
;
9667 fip
= phba
->hba_flag
& HBA_FIP_SUPPORT
;
9668 /* The fcp commands will set command type */
9669 if (iocbq
->iocb_flag
& LPFC_IO_FCP
)
9670 command_type
= FCP_COMMAND
;
9671 else if (fip
&& (iocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
))
9672 command_type
= ELS_COMMAND_FIP
;
9674 command_type
= ELS_COMMAND_NON_FIP
;
9676 if (phba
->fcp_embed_io
)
9677 memset(wqe
, 0, sizeof(union lpfc_wqe128
));
9678 /* Some of the fields are in the right position already */
9679 memcpy(wqe
, &iocbq
->iocb
, sizeof(union lpfc_wqe
));
9680 /* The ct field has moved so reset */
9681 wqe
->generic
.wqe_com
.word7
= 0;
9682 wqe
->generic
.wqe_com
.word10
= 0;
9684 abort_tag
= (uint32_t) iocbq
->iotag
;
9685 xritag
= iocbq
->sli4_xritag
;
9686 /* words0-2 bpl convert bde */
9687 if (iocbq
->iocb
.un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BLP_64
) {
9688 numBdes
= iocbq
->iocb
.un
.genreq64
.bdl
.bdeSize
/
9689 sizeof(struct ulp_bde64
);
9690 bpl
= (struct ulp_bde64
*)
9691 ((struct lpfc_dmabuf
*)iocbq
->context3
)->virt
;
9695 /* Should already be byte swapped. */
9696 wqe
->generic
.bde
.addrHigh
= le32_to_cpu(bpl
->addrHigh
);
9697 wqe
->generic
.bde
.addrLow
= le32_to_cpu(bpl
->addrLow
);
9698 /* swap the size field back to the cpu so we
9699 * can assign it to the sgl.
9701 wqe
->generic
.bde
.tus
.w
= le32_to_cpu(bpl
->tus
.w
);
9702 xmit_len
= wqe
->generic
.bde
.tus
.f
.bdeSize
;
9704 for (i
= 0; i
< numBdes
; i
++) {
9705 bde
.tus
.w
= le32_to_cpu(bpl
[i
].tus
.w
);
9706 total_len
+= bde
.tus
.f
.bdeSize
;
9709 xmit_len
= iocbq
->iocb
.un
.fcpi64
.bdl
.bdeSize
;
9711 iocbq
->iocb
.ulpIoTag
= iocbq
->iotag
;
9712 cmnd
= iocbq
->iocb
.ulpCommand
;
9714 switch (iocbq
->iocb
.ulpCommand
) {
9715 case CMD_ELS_REQUEST64_CR
:
9716 if (iocbq
->iocb_flag
& LPFC_IO_LIBDFC
)
9717 ndlp
= iocbq
->context_un
.ndlp
;
9719 ndlp
= (struct lpfc_nodelist
*)iocbq
->context1
;
9720 if (!iocbq
->iocb
.ulpLe
) {
9721 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
9722 "2007 Only Limited Edition cmd Format"
9723 " supported 0x%x\n",
9724 iocbq
->iocb
.ulpCommand
);
9728 wqe
->els_req
.payload_len
= xmit_len
;
9729 /* Els_reguest64 has a TMO */
9730 bf_set(wqe_tmo
, &wqe
->els_req
.wqe_com
,
9731 iocbq
->iocb
.ulpTimeout
);
9732 /* Need a VF for word 4 set the vf bit*/
9733 bf_set(els_req64_vf
, &wqe
->els_req
, 0);
9734 /* And a VFID for word 12 */
9735 bf_set(els_req64_vfid
, &wqe
->els_req
, 0);
9736 ct
= ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
);
9737 bf_set(wqe_ctxt_tag
, &wqe
->els_req
.wqe_com
,
9738 iocbq
->iocb
.ulpContext
);
9739 bf_set(wqe_ct
, &wqe
->els_req
.wqe_com
, ct
);
9740 bf_set(wqe_pu
, &wqe
->els_req
.wqe_com
, 0);
9741 /* CCP CCPE PV PRI in word10 were set in the memcpy */
9742 if (command_type
== ELS_COMMAND_FIP
)
9743 els_id
= ((iocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
)
9744 >> LPFC_FIP_ELS_ID_SHIFT
);
9745 pcmd
= (uint32_t *) (((struct lpfc_dmabuf
*)
9746 iocbq
->context2
)->virt
);
9747 if_type
= bf_get(lpfc_sli_intf_if_type
,
9748 &phba
->sli4_hba
.sli_intf
);
9749 if (if_type
>= LPFC_SLI_INTF_IF_TYPE_2
) {
9750 if (pcmd
&& (*pcmd
== ELS_CMD_FLOGI
||
9751 *pcmd
== ELS_CMD_SCR
||
9752 *pcmd
== ELS_CMD_RDF
||
9753 *pcmd
== ELS_CMD_RSCN_XMT
||
9754 *pcmd
== ELS_CMD_FDISC
||
9755 *pcmd
== ELS_CMD_LOGO
||
9756 *pcmd
== ELS_CMD_PLOGI
)) {
9757 bf_set(els_req64_sp
, &wqe
->els_req
, 1);
9758 bf_set(els_req64_sid
, &wqe
->els_req
,
9759 iocbq
->vport
->fc_myDID
);
9760 if ((*pcmd
== ELS_CMD_FLOGI
) &&
9761 !(phba
->fc_topology
==
9762 LPFC_TOPOLOGY_LOOP
))
9763 bf_set(els_req64_sid
, &wqe
->els_req
, 0);
9764 bf_set(wqe_ct
, &wqe
->els_req
.wqe_com
, 1);
9765 bf_set(wqe_ctxt_tag
, &wqe
->els_req
.wqe_com
,
9766 phba
->vpi_ids
[iocbq
->vport
->vpi
]);
9767 } else if (pcmd
&& iocbq
->context1
) {
9768 bf_set(wqe_ct
, &wqe
->els_req
.wqe_com
, 0);
9769 bf_set(wqe_ctxt_tag
, &wqe
->els_req
.wqe_com
,
9770 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
9773 bf_set(wqe_temp_rpi
, &wqe
->els_req
.wqe_com
,
9774 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
9775 bf_set(wqe_els_id
, &wqe
->els_req
.wqe_com
, els_id
);
9776 bf_set(wqe_dbde
, &wqe
->els_req
.wqe_com
, 1);
9777 bf_set(wqe_iod
, &wqe
->els_req
.wqe_com
, LPFC_WQE_IOD_READ
);
9778 bf_set(wqe_qosd
, &wqe
->els_req
.wqe_com
, 1);
9779 bf_set(wqe_lenloc
, &wqe
->els_req
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
9780 bf_set(wqe_ebde_cnt
, &wqe
->els_req
.wqe_com
, 0);
9781 wqe
->els_req
.max_response_payload_len
= total_len
- xmit_len
;
9783 case CMD_XMIT_SEQUENCE64_CX
:
9784 bf_set(wqe_ctxt_tag
, &wqe
->xmit_sequence
.wqe_com
,
9785 iocbq
->iocb
.un
.ulpWord
[3]);
9786 bf_set(wqe_rcvoxid
, &wqe
->xmit_sequence
.wqe_com
,
9787 iocbq
->iocb
.unsli3
.rcvsli3
.ox_id
);
9788 /* The entire sequence is transmitted for this IOCB */
9789 xmit_len
= total_len
;
9790 cmnd
= CMD_XMIT_SEQUENCE64_CR
;
9791 if (phba
->link_flag
& LS_LOOPBACK_MODE
)
9792 bf_set(wqe_xo
, &wqe
->xmit_sequence
.wge_ctl
, 1);
9794 case CMD_XMIT_SEQUENCE64_CR
:
9795 /* word3 iocb=io_tag32 wqe=reserved */
9796 wqe
->xmit_sequence
.rsvd3
= 0;
9797 /* word4 relative_offset memcpy */
9798 /* word5 r_ctl/df_ctl memcpy */
9799 bf_set(wqe_pu
, &wqe
->xmit_sequence
.wqe_com
, 0);
9800 bf_set(wqe_dbde
, &wqe
->xmit_sequence
.wqe_com
, 1);
9801 bf_set(wqe_iod
, &wqe
->xmit_sequence
.wqe_com
,
9802 LPFC_WQE_IOD_WRITE
);
9803 bf_set(wqe_lenloc
, &wqe
->xmit_sequence
.wqe_com
,
9804 LPFC_WQE_LENLOC_WORD12
);
9805 bf_set(wqe_ebde_cnt
, &wqe
->xmit_sequence
.wqe_com
, 0);
9806 wqe
->xmit_sequence
.xmit_len
= xmit_len
;
9807 command_type
= OTHER_COMMAND
;
9809 case CMD_XMIT_BCAST64_CN
:
9810 /* word3 iocb=iotag32 wqe=seq_payload_len */
9811 wqe
->xmit_bcast64
.seq_payload_len
= xmit_len
;
9812 /* word4 iocb=rsvd wqe=rsvd */
9813 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
9814 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
9815 bf_set(wqe_ct
, &wqe
->xmit_bcast64
.wqe_com
,
9816 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
9817 bf_set(wqe_dbde
, &wqe
->xmit_bcast64
.wqe_com
, 1);
9818 bf_set(wqe_iod
, &wqe
->xmit_bcast64
.wqe_com
, LPFC_WQE_IOD_WRITE
);
9819 bf_set(wqe_lenloc
, &wqe
->xmit_bcast64
.wqe_com
,
9820 LPFC_WQE_LENLOC_WORD3
);
9821 bf_set(wqe_ebde_cnt
, &wqe
->xmit_bcast64
.wqe_com
, 0);
9823 case CMD_FCP_IWRITE64_CR
:
9824 command_type
= FCP_COMMAND_DATA_OUT
;
9825 /* word3 iocb=iotag wqe=payload_offset_len */
9826 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9827 bf_set(payload_offset_len
, &wqe
->fcp_iwrite
,
9828 xmit_len
+ sizeof(struct fcp_rsp
));
9829 bf_set(cmd_buff_len
, &wqe
->fcp_iwrite
,
9831 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9832 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9833 bf_set(wqe_erp
, &wqe
->fcp_iwrite
.wqe_com
,
9834 iocbq
->iocb
.ulpFCP2Rcvy
);
9835 bf_set(wqe_lnk
, &wqe
->fcp_iwrite
.wqe_com
, iocbq
->iocb
.ulpXS
);
9836 /* Always open the exchange */
9837 bf_set(wqe_iod
, &wqe
->fcp_iwrite
.wqe_com
, LPFC_WQE_IOD_WRITE
);
9838 bf_set(wqe_lenloc
, &wqe
->fcp_iwrite
.wqe_com
,
9839 LPFC_WQE_LENLOC_WORD4
);
9840 bf_set(wqe_pu
, &wqe
->fcp_iwrite
.wqe_com
, iocbq
->iocb
.ulpPU
);
9841 bf_set(wqe_dbde
, &wqe
->fcp_iwrite
.wqe_com
, 1);
9842 if (iocbq
->iocb_flag
& LPFC_IO_OAS
) {
9843 bf_set(wqe_oas
, &wqe
->fcp_iwrite
.wqe_com
, 1);
9844 bf_set(wqe_ccpe
, &wqe
->fcp_iwrite
.wqe_com
, 1);
9845 if (iocbq
->priority
) {
9846 bf_set(wqe_ccp
, &wqe
->fcp_iwrite
.wqe_com
,
9847 (iocbq
->priority
<< 1));
9849 bf_set(wqe_ccp
, &wqe
->fcp_iwrite
.wqe_com
,
9850 (phba
->cfg_XLanePriority
<< 1));
9853 /* Note, word 10 is already initialized to 0 */
9855 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9856 if (phba
->cfg_enable_pbde
)
9857 bf_set(wqe_pbde
, &wqe
->fcp_iwrite
.wqe_com
, 1);
9859 bf_set(wqe_pbde
, &wqe
->fcp_iwrite
.wqe_com
, 0);
9861 if (phba
->fcp_embed_io
) {
9862 struct lpfc_io_buf
*lpfc_cmd
;
9863 struct sli4_sge
*sgl
;
9864 struct fcp_cmnd
*fcp_cmnd
;
9867 /* 128 byte wqe support here */
9869 lpfc_cmd
= iocbq
->context1
;
9870 sgl
= (struct sli4_sge
*)lpfc_cmd
->dma_sgl
;
9871 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
9873 /* Word 0-2 - FCP_CMND */
9874 wqe
->generic
.bde
.tus
.f
.bdeFlags
=
9875 BUFF_TYPE_BDE_IMMED
;
9876 wqe
->generic
.bde
.tus
.f
.bdeSize
= sgl
->sge_len
;
9877 wqe
->generic
.bde
.addrHigh
= 0;
9878 wqe
->generic
.bde
.addrLow
= 88; /* Word 22 */
9880 bf_set(wqe_wqes
, &wqe
->fcp_iwrite
.wqe_com
, 1);
9881 bf_set(wqe_dbde
, &wqe
->fcp_iwrite
.wqe_com
, 0);
9883 /* Word 22-29 FCP CMND Payload */
9884 ptr
= &wqe
->words
[22];
9885 memcpy(ptr
, fcp_cmnd
, sizeof(struct fcp_cmnd
));
9888 case CMD_FCP_IREAD64_CR
:
9889 /* word3 iocb=iotag wqe=payload_offset_len */
9890 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9891 bf_set(payload_offset_len
, &wqe
->fcp_iread
,
9892 xmit_len
+ sizeof(struct fcp_rsp
));
9893 bf_set(cmd_buff_len
, &wqe
->fcp_iread
,
9895 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
9896 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
9897 bf_set(wqe_erp
, &wqe
->fcp_iread
.wqe_com
,
9898 iocbq
->iocb
.ulpFCP2Rcvy
);
9899 bf_set(wqe_lnk
, &wqe
->fcp_iread
.wqe_com
, iocbq
->iocb
.ulpXS
);
9900 /* Always open the exchange */
9901 bf_set(wqe_iod
, &wqe
->fcp_iread
.wqe_com
, LPFC_WQE_IOD_READ
);
9902 bf_set(wqe_lenloc
, &wqe
->fcp_iread
.wqe_com
,
9903 LPFC_WQE_LENLOC_WORD4
);
9904 bf_set(wqe_pu
, &wqe
->fcp_iread
.wqe_com
, iocbq
->iocb
.ulpPU
);
9905 bf_set(wqe_dbde
, &wqe
->fcp_iread
.wqe_com
, 1);
9906 if (iocbq
->iocb_flag
& LPFC_IO_OAS
) {
9907 bf_set(wqe_oas
, &wqe
->fcp_iread
.wqe_com
, 1);
9908 bf_set(wqe_ccpe
, &wqe
->fcp_iread
.wqe_com
, 1);
9909 if (iocbq
->priority
) {
9910 bf_set(wqe_ccp
, &wqe
->fcp_iread
.wqe_com
,
9911 (iocbq
->priority
<< 1));
9913 bf_set(wqe_ccp
, &wqe
->fcp_iread
.wqe_com
,
9914 (phba
->cfg_XLanePriority
<< 1));
9917 /* Note, word 10 is already initialized to 0 */
9919 /* Don't set PBDE for Perf hints, just lpfc_enable_pbde */
9920 if (phba
->cfg_enable_pbde
)
9921 bf_set(wqe_pbde
, &wqe
->fcp_iread
.wqe_com
, 1);
9923 bf_set(wqe_pbde
, &wqe
->fcp_iread
.wqe_com
, 0);
9925 if (phba
->fcp_embed_io
) {
9926 struct lpfc_io_buf
*lpfc_cmd
;
9927 struct sli4_sge
*sgl
;
9928 struct fcp_cmnd
*fcp_cmnd
;
9931 /* 128 byte wqe support here */
9933 lpfc_cmd
= iocbq
->context1
;
9934 sgl
= (struct sli4_sge
*)lpfc_cmd
->dma_sgl
;
9935 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
9937 /* Word 0-2 - FCP_CMND */
9938 wqe
->generic
.bde
.tus
.f
.bdeFlags
=
9939 BUFF_TYPE_BDE_IMMED
;
9940 wqe
->generic
.bde
.tus
.f
.bdeSize
= sgl
->sge_len
;
9941 wqe
->generic
.bde
.addrHigh
= 0;
9942 wqe
->generic
.bde
.addrLow
= 88; /* Word 22 */
9944 bf_set(wqe_wqes
, &wqe
->fcp_iread
.wqe_com
, 1);
9945 bf_set(wqe_dbde
, &wqe
->fcp_iread
.wqe_com
, 0);
9947 /* Word 22-29 FCP CMND Payload */
9948 ptr
= &wqe
->words
[22];
9949 memcpy(ptr
, fcp_cmnd
, sizeof(struct fcp_cmnd
));
9952 case CMD_FCP_ICMND64_CR
:
9953 /* word3 iocb=iotag wqe=payload_offset_len */
9954 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
9955 bf_set(payload_offset_len
, &wqe
->fcp_icmd
,
9956 xmit_len
+ sizeof(struct fcp_rsp
));
9957 bf_set(cmd_buff_len
, &wqe
->fcp_icmd
,
9959 /* word3 iocb=IO_TAG wqe=reserved */
9960 bf_set(wqe_pu
, &wqe
->fcp_icmd
.wqe_com
, 0);
9961 /* Always open the exchange */
9962 bf_set(wqe_dbde
, &wqe
->fcp_icmd
.wqe_com
, 1);
9963 bf_set(wqe_iod
, &wqe
->fcp_icmd
.wqe_com
, LPFC_WQE_IOD_WRITE
);
9964 bf_set(wqe_qosd
, &wqe
->fcp_icmd
.wqe_com
, 1);
9965 bf_set(wqe_lenloc
, &wqe
->fcp_icmd
.wqe_com
,
9966 LPFC_WQE_LENLOC_NONE
);
9967 bf_set(wqe_erp
, &wqe
->fcp_icmd
.wqe_com
,
9968 iocbq
->iocb
.ulpFCP2Rcvy
);
9969 if (iocbq
->iocb_flag
& LPFC_IO_OAS
) {
9970 bf_set(wqe_oas
, &wqe
->fcp_icmd
.wqe_com
, 1);
9971 bf_set(wqe_ccpe
, &wqe
->fcp_icmd
.wqe_com
, 1);
9972 if (iocbq
->priority
) {
9973 bf_set(wqe_ccp
, &wqe
->fcp_icmd
.wqe_com
,
9974 (iocbq
->priority
<< 1));
9976 bf_set(wqe_ccp
, &wqe
->fcp_icmd
.wqe_com
,
9977 (phba
->cfg_XLanePriority
<< 1));
9980 /* Note, word 10 is already initialized to 0 */
9982 if (phba
->fcp_embed_io
) {
9983 struct lpfc_io_buf
*lpfc_cmd
;
9984 struct sli4_sge
*sgl
;
9985 struct fcp_cmnd
*fcp_cmnd
;
9988 /* 128 byte wqe support here */
9990 lpfc_cmd
= iocbq
->context1
;
9991 sgl
= (struct sli4_sge
*)lpfc_cmd
->dma_sgl
;
9992 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
9994 /* Word 0-2 - FCP_CMND */
9995 wqe
->generic
.bde
.tus
.f
.bdeFlags
=
9996 BUFF_TYPE_BDE_IMMED
;
9997 wqe
->generic
.bde
.tus
.f
.bdeSize
= sgl
->sge_len
;
9998 wqe
->generic
.bde
.addrHigh
= 0;
9999 wqe
->generic
.bde
.addrLow
= 88; /* Word 22 */
10001 bf_set(wqe_wqes
, &wqe
->fcp_icmd
.wqe_com
, 1);
10002 bf_set(wqe_dbde
, &wqe
->fcp_icmd
.wqe_com
, 0);
10004 /* Word 22-29 FCP CMND Payload */
10005 ptr
= &wqe
->words
[22];
10006 memcpy(ptr
, fcp_cmnd
, sizeof(struct fcp_cmnd
));
10009 case CMD_GEN_REQUEST64_CR
:
10010 /* For this command calculate the xmit length of the
10014 numBdes
= iocbq
->iocb
.un
.genreq64
.bdl
.bdeSize
/
10015 sizeof(struct ulp_bde64
);
10016 for (i
= 0; i
< numBdes
; i
++) {
10017 bde
.tus
.w
= le32_to_cpu(bpl
[i
].tus
.w
);
10018 if (bde
.tus
.f
.bdeFlags
!= BUFF_TYPE_BDE_64
)
10020 xmit_len
+= bde
.tus
.f
.bdeSize
;
10022 /* word3 iocb=IO_TAG wqe=request_payload_len */
10023 wqe
->gen_req
.request_payload_len
= xmit_len
;
10024 /* word4 iocb=parameter wqe=relative_offset memcpy */
10025 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
10026 /* word6 context tag copied in memcpy */
10027 if (iocbq
->iocb
.ulpCt_h
|| iocbq
->iocb
.ulpCt_l
) {
10028 ct
= ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
);
10029 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10030 "2015 Invalid CT %x command 0x%x\n",
10031 ct
, iocbq
->iocb
.ulpCommand
);
10034 bf_set(wqe_ct
, &wqe
->gen_req
.wqe_com
, 0);
10035 bf_set(wqe_tmo
, &wqe
->gen_req
.wqe_com
, iocbq
->iocb
.ulpTimeout
);
10036 bf_set(wqe_pu
, &wqe
->gen_req
.wqe_com
, iocbq
->iocb
.ulpPU
);
10037 bf_set(wqe_dbde
, &wqe
->gen_req
.wqe_com
, 1);
10038 bf_set(wqe_iod
, &wqe
->gen_req
.wqe_com
, LPFC_WQE_IOD_READ
);
10039 bf_set(wqe_qosd
, &wqe
->gen_req
.wqe_com
, 1);
10040 bf_set(wqe_lenloc
, &wqe
->gen_req
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
10041 bf_set(wqe_ebde_cnt
, &wqe
->gen_req
.wqe_com
, 0);
10042 wqe
->gen_req
.max_response_payload_len
= total_len
- xmit_len
;
10043 command_type
= OTHER_COMMAND
;
10045 case CMD_XMIT_ELS_RSP64_CX
:
10046 ndlp
= (struct lpfc_nodelist
*)iocbq
->context1
;
10047 /* words0-2 BDE memcpy */
10048 /* word3 iocb=iotag32 wqe=response_payload_len */
10049 wqe
->xmit_els_rsp
.response_payload_len
= xmit_len
;
10051 wqe
->xmit_els_rsp
.word4
= 0;
10052 /* word5 iocb=rsvd wge=did */
10053 bf_set(wqe_els_did
, &wqe
->xmit_els_rsp
.wqe_dest
,
10054 iocbq
->iocb
.un
.xseq64
.xmit_els_remoteID
);
10056 if_type
= bf_get(lpfc_sli_intf_if_type
,
10057 &phba
->sli4_hba
.sli_intf
);
10058 if (if_type
>= LPFC_SLI_INTF_IF_TYPE_2
) {
10059 if (iocbq
->vport
->fc_flag
& FC_PT2PT
) {
10060 bf_set(els_rsp64_sp
, &wqe
->xmit_els_rsp
, 1);
10061 bf_set(els_rsp64_sid
, &wqe
->xmit_els_rsp
,
10062 iocbq
->vport
->fc_myDID
);
10063 if (iocbq
->vport
->fc_myDID
== Fabric_DID
) {
10064 bf_set(wqe_els_did
,
10065 &wqe
->xmit_els_rsp
.wqe_dest
, 0);
10069 bf_set(wqe_ct
, &wqe
->xmit_els_rsp
.wqe_com
,
10070 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
10071 bf_set(wqe_pu
, &wqe
->xmit_els_rsp
.wqe_com
, iocbq
->iocb
.ulpPU
);
10072 bf_set(wqe_rcvoxid
, &wqe
->xmit_els_rsp
.wqe_com
,
10073 iocbq
->iocb
.unsli3
.rcvsli3
.ox_id
);
10074 if (!iocbq
->iocb
.ulpCt_h
&& iocbq
->iocb
.ulpCt_l
)
10075 bf_set(wqe_ctxt_tag
, &wqe
->xmit_els_rsp
.wqe_com
,
10076 phba
->vpi_ids
[iocbq
->vport
->vpi
]);
10077 bf_set(wqe_dbde
, &wqe
->xmit_els_rsp
.wqe_com
, 1);
10078 bf_set(wqe_iod
, &wqe
->xmit_els_rsp
.wqe_com
, LPFC_WQE_IOD_WRITE
);
10079 bf_set(wqe_qosd
, &wqe
->xmit_els_rsp
.wqe_com
, 1);
10080 bf_set(wqe_lenloc
, &wqe
->xmit_els_rsp
.wqe_com
,
10081 LPFC_WQE_LENLOC_WORD3
);
10082 bf_set(wqe_ebde_cnt
, &wqe
->xmit_els_rsp
.wqe_com
, 0);
10083 bf_set(wqe_rsp_temp_rpi
, &wqe
->xmit_els_rsp
,
10084 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
10085 pcmd
= (uint32_t *) (((struct lpfc_dmabuf
*)
10086 iocbq
->context2
)->virt
);
10087 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
10088 bf_set(els_rsp64_sp
, &wqe
->xmit_els_rsp
, 1);
10089 bf_set(els_rsp64_sid
, &wqe
->xmit_els_rsp
,
10090 iocbq
->vport
->fc_myDID
);
10091 bf_set(wqe_ct
, &wqe
->xmit_els_rsp
.wqe_com
, 1);
10092 bf_set(wqe_ctxt_tag
, &wqe
->xmit_els_rsp
.wqe_com
,
10093 phba
->vpi_ids
[phba
->pport
->vpi
]);
10095 command_type
= OTHER_COMMAND
;
10097 case CMD_CLOSE_XRI_CN
:
10098 case CMD_ABORT_XRI_CN
:
10099 case CMD_ABORT_XRI_CX
:
10100 /* words 0-2 memcpy should be 0 rserved */
10101 /* port will send abts */
10102 abrt_iotag
= iocbq
->iocb
.un
.acxri
.abortContextTag
;
10103 if (abrt_iotag
!= 0 && abrt_iotag
<= phba
->sli
.last_iotag
) {
10104 abrtiocbq
= phba
->sli
.iocbq_lookup
[abrt_iotag
];
10105 fip
= abrtiocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
;
10109 if ((iocbq
->iocb
.ulpCommand
== CMD_CLOSE_XRI_CN
) || fip
)
10111 * The link is down, or the command was ELS_FIP
10112 * so the fw does not need to send abts
10115 bf_set(abort_cmd_ia
, &wqe
->abort_cmd
, 1);
10117 bf_set(abort_cmd_ia
, &wqe
->abort_cmd
, 0);
10118 bf_set(abort_cmd_criteria
, &wqe
->abort_cmd
, T_XRI_TAG
);
10119 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
10120 wqe
->abort_cmd
.rsrvd5
= 0;
10121 bf_set(wqe_ct
, &wqe
->abort_cmd
.wqe_com
,
10122 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
10123 abort_tag
= iocbq
->iocb
.un
.acxri
.abortIoTag
;
10125 * The abort handler will send us CMD_ABORT_XRI_CN or
10126 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
10128 bf_set(wqe_cmnd
, &wqe
->abort_cmd
.wqe_com
, CMD_ABORT_XRI_CX
);
10129 bf_set(wqe_qosd
, &wqe
->abort_cmd
.wqe_com
, 1);
10130 bf_set(wqe_lenloc
, &wqe
->abort_cmd
.wqe_com
,
10131 LPFC_WQE_LENLOC_NONE
);
10132 cmnd
= CMD_ABORT_XRI_CX
;
10133 command_type
= OTHER_COMMAND
;
10136 case CMD_XMIT_BLS_RSP64_CX
:
10137 ndlp
= (struct lpfc_nodelist
*)iocbq
->context1
;
10138 /* As BLS ABTS RSP WQE is very different from other WQEs,
10139 * we re-construct this WQE here based on information in
10140 * iocbq from scratch.
10142 memset(wqe
, 0, sizeof(*wqe
));
10143 /* OX_ID is invariable to who sent ABTS to CT exchange */
10144 bf_set(xmit_bls_rsp64_oxid
, &wqe
->xmit_bls_rsp
,
10145 bf_get(lpfc_abts_oxid
, &iocbq
->iocb
.un
.bls_rsp
));
10146 if (bf_get(lpfc_abts_orig
, &iocbq
->iocb
.un
.bls_rsp
) ==
10147 LPFC_ABTS_UNSOL_INT
) {
10148 /* ABTS sent by initiator to CT exchange, the
10149 * RX_ID field will be filled with the newly
10150 * allocated responder XRI.
10152 bf_set(xmit_bls_rsp64_rxid
, &wqe
->xmit_bls_rsp
,
10153 iocbq
->sli4_xritag
);
10155 /* ABTS sent by responder to CT exchange, the
10156 * RX_ID field will be filled with the responder
10159 bf_set(xmit_bls_rsp64_rxid
, &wqe
->xmit_bls_rsp
,
10160 bf_get(lpfc_abts_rxid
, &iocbq
->iocb
.un
.bls_rsp
));
10162 bf_set(xmit_bls_rsp64_seqcnthi
, &wqe
->xmit_bls_rsp
, 0xffff);
10163 bf_set(wqe_xmit_bls_pt
, &wqe
->xmit_bls_rsp
.wqe_dest
, 0x1);
10166 bf_set(wqe_els_did
, &wqe
->xmit_bls_rsp
.wqe_dest
,
10168 bf_set(xmit_bls_rsp64_temprpi
, &wqe
->xmit_bls_rsp
,
10169 iocbq
->iocb
.ulpContext
);
10170 bf_set(wqe_ct
, &wqe
->xmit_bls_rsp
.wqe_com
, 1);
10171 bf_set(wqe_ctxt_tag
, &wqe
->xmit_bls_rsp
.wqe_com
,
10172 phba
->vpi_ids
[phba
->pport
->vpi
]);
10173 bf_set(wqe_qosd
, &wqe
->xmit_bls_rsp
.wqe_com
, 1);
10174 bf_set(wqe_lenloc
, &wqe
->xmit_bls_rsp
.wqe_com
,
10175 LPFC_WQE_LENLOC_NONE
);
10176 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
10177 command_type
= OTHER_COMMAND
;
10178 if (iocbq
->iocb
.un
.xseq64
.w5
.hcsw
.Rctl
== FC_RCTL_BA_RJT
) {
10179 bf_set(xmit_bls_rsp64_rjt_vspec
, &wqe
->xmit_bls_rsp
,
10180 bf_get(lpfc_vndr_code
, &iocbq
->iocb
.un
.bls_rsp
));
10181 bf_set(xmit_bls_rsp64_rjt_expc
, &wqe
->xmit_bls_rsp
,
10182 bf_get(lpfc_rsn_expln
, &iocbq
->iocb
.un
.bls_rsp
));
10183 bf_set(xmit_bls_rsp64_rjt_rsnc
, &wqe
->xmit_bls_rsp
,
10184 bf_get(lpfc_rsn_code
, &iocbq
->iocb
.un
.bls_rsp
));
10188 case CMD_SEND_FRAME
:
10189 bf_set(wqe_cmnd
, &wqe
->generic
.wqe_com
, CMD_SEND_FRAME
);
10190 bf_set(wqe_sof
, &wqe
->generic
.wqe_com
, 0x2E); /* SOF byte */
10191 bf_set(wqe_eof
, &wqe
->generic
.wqe_com
, 0x41); /* EOF byte */
10192 bf_set(wqe_lenloc
, &wqe
->generic
.wqe_com
, 1);
10193 bf_set(wqe_xbl
, &wqe
->generic
.wqe_com
, 1);
10194 bf_set(wqe_dbde
, &wqe
->generic
.wqe_com
, 1);
10195 bf_set(wqe_xc
, &wqe
->generic
.wqe_com
, 1);
10196 bf_set(wqe_cmd_type
, &wqe
->generic
.wqe_com
, 0xA);
10197 bf_set(wqe_cqid
, &wqe
->generic
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
10198 bf_set(wqe_xri_tag
, &wqe
->generic
.wqe_com
, xritag
);
10199 bf_set(wqe_reqtag
, &wqe
->generic
.wqe_com
, iocbq
->iotag
);
10201 case CMD_XRI_ABORTED_CX
:
10202 case CMD_CREATE_XRI_CR
: /* Do we expect to use this? */
10203 case CMD_IOCB_FCP_IBIDIR64_CR
: /* bidirectional xfer */
10204 case CMD_FCP_TSEND64_CX
: /* Target mode send xfer-ready */
10205 case CMD_FCP_TRSP64_CX
: /* Target mode rcv */
10206 case CMD_FCP_AUTO_TRSP_CX
: /* Auto target rsp */
10208 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10209 "2014 Invalid command 0x%x\n",
10210 iocbq
->iocb
.ulpCommand
);
10214 if (iocbq
->iocb_flag
& LPFC_IO_DIF_PASS
)
10215 bf_set(wqe_dif
, &wqe
->generic
.wqe_com
, LPFC_WQE_DIF_PASSTHRU
);
10216 else if (iocbq
->iocb_flag
& LPFC_IO_DIF_STRIP
)
10217 bf_set(wqe_dif
, &wqe
->generic
.wqe_com
, LPFC_WQE_DIF_STRIP
);
10218 else if (iocbq
->iocb_flag
& LPFC_IO_DIF_INSERT
)
10219 bf_set(wqe_dif
, &wqe
->generic
.wqe_com
, LPFC_WQE_DIF_INSERT
);
10220 iocbq
->iocb_flag
&= ~(LPFC_IO_DIF_PASS
| LPFC_IO_DIF_STRIP
|
10221 LPFC_IO_DIF_INSERT
);
10222 bf_set(wqe_xri_tag
, &wqe
->generic
.wqe_com
, xritag
);
10223 bf_set(wqe_reqtag
, &wqe
->generic
.wqe_com
, iocbq
->iotag
);
10224 wqe
->generic
.wqe_com
.abort_tag
= abort_tag
;
10225 bf_set(wqe_cmd_type
, &wqe
->generic
.wqe_com
, command_type
);
10226 bf_set(wqe_cmnd
, &wqe
->generic
.wqe_com
, cmnd
);
10227 bf_set(wqe_class
, &wqe
->generic
.wqe_com
, iocbq
->iocb
.ulpClass
);
10228 bf_set(wqe_cqid
, &wqe
->generic
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
10233 * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10234 * @phba: Pointer to HBA context object.
10235 * @ring_number: SLI ring number to issue wqe on.
10236 * @piocb: Pointer to command iocb.
10237 * @flag: Flag indicating if this command can be put into txq.
10239 * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10240 * send an iocb command to an HBA with SLI-4 interface spec.
10242 * This function takes the hbalock before invoking the lockless version.
10243 * The function will return success after it successfully submit the wqe to
10244 * firmware or after adding to the txq.
10247 __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba
*phba
, uint32_t ring_number
,
10248 struct lpfc_iocbq
*piocb
, uint32_t flag
)
10250 unsigned long iflags
;
10253 spin_lock_irqsave(&phba
->hbalock
, iflags
);
10254 rc
= __lpfc_sli_issue_iocb_s3(phba
, ring_number
, piocb
, flag
);
10255 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
10261 * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10262 * @phba: Pointer to HBA context object.
10263 * @ring_number: SLI ring number to issue wqe on.
10264 * @piocb: Pointer to command iocb.
10265 * @flag: Flag indicating if this command can be put into txq.
10267 * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10268 * an wqe command to an HBA with SLI-4 interface spec.
10270 * This function is a lockless version. The function will return success
10271 * after it successfully submit the wqe to firmware or after adding to the
10275 __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba
*phba
, uint32_t ring_number
,
10276 struct lpfc_iocbq
*piocb
, uint32_t flag
)
10279 struct lpfc_io_buf
*lpfc_cmd
=
10280 (struct lpfc_io_buf
*)piocb
->context1
;
10281 union lpfc_wqe128
*wqe
= &piocb
->wqe
;
10282 struct sli4_sge
*sgl
;
10284 /* 128 byte wqe support here */
10285 sgl
= (struct sli4_sge
*)lpfc_cmd
->dma_sgl
;
10287 if (phba
->fcp_embed_io
) {
10288 struct fcp_cmnd
*fcp_cmnd
;
10291 fcp_cmnd
= lpfc_cmd
->fcp_cmnd
;
10293 /* Word 0-2 - FCP_CMND */
10294 wqe
->generic
.bde
.tus
.f
.bdeFlags
=
10295 BUFF_TYPE_BDE_IMMED
;
10296 wqe
->generic
.bde
.tus
.f
.bdeSize
= sgl
->sge_len
;
10297 wqe
->generic
.bde
.addrHigh
= 0;
10298 wqe
->generic
.bde
.addrLow
= 88; /* Word 22 */
10300 bf_set(wqe_wqes
, &wqe
->fcp_iwrite
.wqe_com
, 1);
10301 bf_set(wqe_dbde
, &wqe
->fcp_iwrite
.wqe_com
, 0);
10303 /* Word 22-29 FCP CMND Payload */
10304 ptr
= &wqe
->words
[22];
10305 memcpy(ptr
, fcp_cmnd
, sizeof(struct fcp_cmnd
));
10307 /* Word 0-2 - Inline BDE */
10308 wqe
->generic
.bde
.tus
.f
.bdeFlags
= BUFF_TYPE_BDE_64
;
10309 wqe
->generic
.bde
.tus
.f
.bdeSize
= sizeof(struct fcp_cmnd
);
10310 wqe
->generic
.bde
.addrHigh
= sgl
->addr_hi
;
10311 wqe
->generic
.bde
.addrLow
= sgl
->addr_lo
;
10314 bf_set(wqe_dbde
, &wqe
->generic
.wqe_com
, 1);
10315 bf_set(wqe_wqes
, &wqe
->generic
.wqe_com
, 0);
10318 rc
= lpfc_sli4_issue_wqe(phba
, lpfc_cmd
->hdwq
, piocb
);
10323 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10324 * @phba: Pointer to HBA context object.
10325 * @ring_number: SLI ring number to issue iocb on.
10326 * @piocb: Pointer to command iocb.
10327 * @flag: Flag indicating if this command can be put into txq.
10329 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10330 * an iocb command to an HBA with SLI-4 interface spec.
10332 * This function is called with ringlock held. The function will return success
10333 * after it successfully submit the iocb to firmware or after adding to the
10337 __lpfc_sli_issue_iocb_s4(struct lpfc_hba
*phba
, uint32_t ring_number
,
10338 struct lpfc_iocbq
*piocb
, uint32_t flag
)
10340 struct lpfc_sglq
*sglq
;
10341 union lpfc_wqe128 wqe
;
10342 struct lpfc_queue
*wq
;
10343 struct lpfc_sli_ring
*pring
;
10346 if ((piocb
->iocb_flag
& LPFC_IO_FCP
) ||
10347 (piocb
->iocb_flag
& LPFC_USE_FCPWQIDX
)) {
10348 wq
= phba
->sli4_hba
.hdwq
[piocb
->hba_wqidx
].io_wq
;
10350 wq
= phba
->sli4_hba
.els_wq
;
10353 /* Get corresponding ring */
10357 * The WQE can be either 64 or 128 bytes,
10360 lockdep_assert_held(&pring
->ring_lock
);
10362 if (piocb
->sli4_xritag
== NO_XRI
) {
10363 if (piocb
->iocb
.ulpCommand
== CMD_ABORT_XRI_CN
||
10364 piocb
->iocb
.ulpCommand
== CMD_CLOSE_XRI_CN
)
10367 if (!list_empty(&pring
->txq
)) {
10368 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
10369 __lpfc_sli_ringtx_put(phba
,
10371 return IOCB_SUCCESS
;
10376 sglq
= __lpfc_sli_get_els_sglq(phba
, piocb
);
10378 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
10379 __lpfc_sli_ringtx_put(phba
,
10382 return IOCB_SUCCESS
;
10388 } else if (piocb
->iocb_flag
& LPFC_IO_FCP
) {
10389 /* These IO's already have an XRI and a mapped sgl. */
10394 * This is a continuation of a commandi,(CX) so this
10395 * sglq is on the active list
10397 sglq
= __lpfc_get_active_sglq(phba
, piocb
->sli4_lxritag
);
10403 piocb
->sli4_lxritag
= sglq
->sli4_lxritag
;
10404 piocb
->sli4_xritag
= sglq
->sli4_xritag
;
10405 if (NO_XRI
== lpfc_sli4_bpl2sgl(phba
, piocb
, sglq
))
10409 if (lpfc_sli4_iocb2wqe(phba
, piocb
, &wqe
))
10412 if (lpfc_sli4_wq_put(wq
, &wqe
))
10414 lpfc_sli_ringtxcmpl_put(phba
, pring
, piocb
);
10420 * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
10422 * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
10423 * or IOCB for sli-3 function.
10424 * pointer from the lpfc_hba struct.
10427 * IOCB_ERROR - Error
10428 * IOCB_SUCCESS - Success
10432 lpfc_sli_issue_fcp_io(struct lpfc_hba
*phba
, uint32_t ring_number
,
10433 struct lpfc_iocbq
*piocb
, uint32_t flag
)
10435 return phba
->__lpfc_sli_issue_fcp_io(phba
, ring_number
, piocb
, flag
);
10439 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10441 * This routine wraps the actual lockless version for issusing IOCB function
10442 * pointer from the lpfc_hba struct.
10445 * IOCB_ERROR - Error
10446 * IOCB_SUCCESS - Success
10450 __lpfc_sli_issue_iocb(struct lpfc_hba
*phba
, uint32_t ring_number
,
10451 struct lpfc_iocbq
*piocb
, uint32_t flag
)
10453 return phba
->__lpfc_sli_issue_iocb(phba
, ring_number
, piocb
, flag
);
10457 * lpfc_sli_api_table_setup - Set up sli api function jump table
10458 * @phba: The hba struct for which this call is being executed.
10459 * @dev_grp: The HBA PCI-Device group number.
10461 * This routine sets up the SLI interface API function jump table in @phba
10463 * Returns: 0 - success, -ENODEV - failure.
10466 lpfc_sli_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
10470 case LPFC_PCI_DEV_LP
:
10471 phba
->__lpfc_sli_issue_iocb
= __lpfc_sli_issue_iocb_s3
;
10472 phba
->__lpfc_sli_release_iocbq
= __lpfc_sli_release_iocbq_s3
;
10473 phba
->__lpfc_sli_issue_fcp_io
= __lpfc_sli_issue_fcp_io_s3
;
10475 case LPFC_PCI_DEV_OC
:
10476 phba
->__lpfc_sli_issue_iocb
= __lpfc_sli_issue_iocb_s4
;
10477 phba
->__lpfc_sli_release_iocbq
= __lpfc_sli_release_iocbq_s4
;
10478 phba
->__lpfc_sli_issue_fcp_io
= __lpfc_sli_issue_fcp_io_s4
;
10481 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10482 "1419 Invalid HBA PCI-device group: 0x%x\n",
10486 phba
->lpfc_get_iocb_from_iocbq
= lpfc_get_iocb_from_iocbq
;
10491 * lpfc_sli4_calc_ring - Calculates which ring to use
10492 * @phba: Pointer to HBA context object.
10493 * @piocb: Pointer to command iocb.
10495 * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
10496 * hba_wqidx, thus we need to calculate the corresponding ring.
10497 * Since ABORTS must go on the same WQ of the command they are
10498 * aborting, we use command's hba_wqidx.
10500 struct lpfc_sli_ring
*
10501 lpfc_sli4_calc_ring(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocb
)
10503 struct lpfc_io_buf
*lpfc_cmd
;
10505 if (piocb
->iocb_flag
& (LPFC_IO_FCP
| LPFC_USE_FCPWQIDX
)) {
10506 if (unlikely(!phba
->sli4_hba
.hdwq
))
10509 * for abort iocb hba_wqidx should already
10510 * be setup based on what work queue we used.
10512 if (!(piocb
->iocb_flag
& LPFC_USE_FCPWQIDX
)) {
10513 lpfc_cmd
= (struct lpfc_io_buf
*)piocb
->context1
;
10514 piocb
->hba_wqidx
= lpfc_cmd
->hdwq_no
;
10516 return phba
->sli4_hba
.hdwq
[piocb
->hba_wqidx
].io_wq
->pring
;
10518 if (unlikely(!phba
->sli4_hba
.els_wq
))
10520 piocb
->hba_wqidx
= 0;
10521 return phba
->sli4_hba
.els_wq
->pring
;
10526 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
10527 * @phba: Pointer to HBA context object.
10528 * @ring_number: Ring number
10529 * @piocb: Pointer to command iocb.
10530 * @flag: Flag indicating if this command can be put into txq.
10532 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
10533 * function. This function gets the hbalock and calls
10534 * __lpfc_sli_issue_iocb function and will return the error returned
10535 * by __lpfc_sli_issue_iocb function. This wrapper is used by
10536 * functions which do not hold hbalock.
10539 lpfc_sli_issue_iocb(struct lpfc_hba
*phba
, uint32_t ring_number
,
10540 struct lpfc_iocbq
*piocb
, uint32_t flag
)
10542 struct lpfc_sli_ring
*pring
;
10543 struct lpfc_queue
*eq
;
10544 unsigned long iflags
;
10547 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
10548 eq
= phba
->sli4_hba
.hdwq
[piocb
->hba_wqidx
].hba_eq
;
10550 pring
= lpfc_sli4_calc_ring(phba
, piocb
);
10551 if (unlikely(pring
== NULL
))
10554 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
10555 rc
= __lpfc_sli_issue_iocb(phba
, ring_number
, piocb
, flag
);
10556 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
10558 lpfc_sli4_poll_eq(eq
, LPFC_POLL_FASTPATH
);
10560 /* For now, SLI2/3 will still use hbalock */
10561 spin_lock_irqsave(&phba
->hbalock
, iflags
);
10562 rc
= __lpfc_sli_issue_iocb(phba
, ring_number
, piocb
, flag
);
10563 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
10569 * lpfc_extra_ring_setup - Extra ring setup function
10570 * @phba: Pointer to HBA context object.
10572 * This function is called while driver attaches with the
10573 * HBA to setup the extra ring. The extra ring is used
10574 * only when driver needs to support target mode functionality
10575 * or IP over FC functionalities.
10577 * This function is called with no lock held. SLI3 only.
10580 lpfc_extra_ring_setup( struct lpfc_hba
*phba
)
10582 struct lpfc_sli
*psli
;
10583 struct lpfc_sli_ring
*pring
;
10587 /* Adjust cmd/rsp ring iocb entries more evenly */
10589 /* Take some away from the FCP ring */
10590 pring
= &psli
->sli3_ring
[LPFC_FCP_RING
];
10591 pring
->sli
.sli3
.numCiocb
-= SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
10592 pring
->sli
.sli3
.numRiocb
-= SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
10593 pring
->sli
.sli3
.numCiocb
-= SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
10594 pring
->sli
.sli3
.numRiocb
-= SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
10596 /* and give them to the extra ring */
10597 pring
= &psli
->sli3_ring
[LPFC_EXTRA_RING
];
10599 pring
->sli
.sli3
.numCiocb
+= SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
10600 pring
->sli
.sli3
.numRiocb
+= SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
10601 pring
->sli
.sli3
.numCiocb
+= SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
10602 pring
->sli
.sli3
.numRiocb
+= SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
10604 /* Setup default profile for this ring */
10605 pring
->iotag_max
= 4096;
10606 pring
->num_mask
= 1;
10607 pring
->prt
[0].profile
= 0; /* Mask 0 */
10608 pring
->prt
[0].rctl
= phba
->cfg_multi_ring_rctl
;
10609 pring
->prt
[0].type
= phba
->cfg_multi_ring_type
;
10610 pring
->prt
[0].lpfc_sli_rcv_unsol_event
= NULL
;
10615 lpfc_sli_post_recovery_event(struct lpfc_hba
*phba
,
10616 struct lpfc_nodelist
*ndlp
)
10618 unsigned long iflags
;
10619 struct lpfc_work_evt
*evtp
= &ndlp
->recovery_evt
;
10621 spin_lock_irqsave(&phba
->hbalock
, iflags
);
10622 if (!list_empty(&evtp
->evt_listp
)) {
10623 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
10627 /* Incrementing the reference count until the queued work is done. */
10628 evtp
->evt_arg1
= lpfc_nlp_get(ndlp
);
10629 if (!evtp
->evt_arg1
) {
10630 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
10633 evtp
->evt
= LPFC_EVT_RECOVER_PORT
;
10634 list_add_tail(&evtp
->evt_listp
, &phba
->work_list
);
10635 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
10637 lpfc_worker_wake_up(phba
);
10640 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
10641 * @phba: Pointer to HBA context object.
10642 * @iocbq: Pointer to iocb object.
10644 * The async_event handler calls this routine when it receives
10645 * an ASYNC_STATUS_CN event from the port. The port generates
10646 * this event when an Abort Sequence request to an rport fails
10647 * twice in succession. The abort could be originated by the
10648 * driver or by the port. The ABTS could have been for an ELS
10649 * or FCP IO. The port only generates this event when an ABTS
10650 * fails to complete after one retry.
10653 lpfc_sli_abts_err_handler(struct lpfc_hba
*phba
,
10654 struct lpfc_iocbq
*iocbq
)
10656 struct lpfc_nodelist
*ndlp
= NULL
;
10657 uint16_t rpi
= 0, vpi
= 0;
10658 struct lpfc_vport
*vport
= NULL
;
10660 /* The rpi in the ulpContext is vport-sensitive. */
10661 vpi
= iocbq
->iocb
.un
.asyncstat
.sub_ctxt_tag
;
10662 rpi
= iocbq
->iocb
.ulpContext
;
10664 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
10665 "3092 Port generated ABTS async event "
10666 "on vpi %d rpi %d status 0x%x\n",
10667 vpi
, rpi
, iocbq
->iocb
.ulpStatus
);
10669 vport
= lpfc_find_vport_by_vpid(phba
, vpi
);
10672 ndlp
= lpfc_findnode_rpi(vport
, rpi
);
10676 if (iocbq
->iocb
.ulpStatus
== IOSTAT_LOCAL_REJECT
)
10677 lpfc_sli_abts_recover_port(vport
, ndlp
);
10681 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
10682 "3095 Event Context not found, no "
10683 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
10684 iocbq
->iocb
.ulpContext
, iocbq
->iocb
.ulpStatus
,
10688 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
10689 * @phba: pointer to HBA context object.
10690 * @ndlp: nodelist pointer for the impacted rport.
10691 * @axri: pointer to the wcqe containing the failed exchange.
10693 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
10694 * port. The port generates this event when an abort exchange request to an
10695 * rport fails twice in succession with no reply. The abort could be originated
10696 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
10699 lpfc_sli4_abts_err_handler(struct lpfc_hba
*phba
,
10700 struct lpfc_nodelist
*ndlp
,
10701 struct sli4_wcqe_xri_aborted
*axri
)
10703 uint32_t ext_status
= 0;
10706 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
10707 "3115 Node Context not found, driver "
10708 "ignoring abts err event\n");
10712 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
10713 "3116 Port generated FCP XRI ABORT event on "
10714 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
10715 ndlp
->vport
->vpi
, phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
],
10716 bf_get(lpfc_wcqe_xa_xri
, axri
),
10717 bf_get(lpfc_wcqe_xa_status
, axri
),
10721 * Catch the ABTS protocol failure case. Older OCe FW releases returned
10722 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
10723 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
10725 ext_status
= axri
->parameter
& IOERR_PARAM_MASK
;
10726 if ((bf_get(lpfc_wcqe_xa_status
, axri
) == IOSTAT_LOCAL_REJECT
) &&
10727 ((ext_status
== IOERR_SEQUENCE_TIMEOUT
) || (ext_status
== 0)))
10728 lpfc_sli_post_recovery_event(phba
, ndlp
);
10732 * lpfc_sli_async_event_handler - ASYNC iocb handler function
10733 * @phba: Pointer to HBA context object.
10734 * @pring: Pointer to driver SLI ring object.
10735 * @iocbq: Pointer to iocb object.
10737 * This function is called by the slow ring event handler
10738 * function when there is an ASYNC event iocb in the ring.
10739 * This function is called with no lock held.
10740 * Currently this function handles only temperature related
10741 * ASYNC events. The function decodes the temperature sensor
10742 * event message and posts events for the management applications.
10745 lpfc_sli_async_event_handler(struct lpfc_hba
* phba
,
10746 struct lpfc_sli_ring
* pring
, struct lpfc_iocbq
* iocbq
)
10750 struct temp_event temp_event_data
;
10751 struct Scsi_Host
*shost
;
10754 icmd
= &iocbq
->iocb
;
10755 evt_code
= icmd
->un
.asyncstat
.evt_code
;
10757 switch (evt_code
) {
10758 case ASYNC_TEMP_WARN
:
10759 case ASYNC_TEMP_SAFE
:
10760 temp_event_data
.data
= (uint32_t) icmd
->ulpContext
;
10761 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
10762 if (evt_code
== ASYNC_TEMP_WARN
) {
10763 temp_event_data
.event_code
= LPFC_THRESHOLD_TEMP
;
10764 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10765 "0347 Adapter is very hot, please take "
10766 "corrective action. temperature : %d Celsius\n",
10767 (uint32_t) icmd
->ulpContext
);
10769 temp_event_data
.event_code
= LPFC_NORMAL_TEMP
;
10770 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10771 "0340 Adapter temperature is OK now. "
10772 "temperature : %d Celsius\n",
10773 (uint32_t) icmd
->ulpContext
);
10776 /* Send temperature change event to applications */
10777 shost
= lpfc_shost_from_vport(phba
->pport
);
10778 fc_host_post_vendor_event(shost
, fc_get_event_number(),
10779 sizeof(temp_event_data
), (char *) &temp_event_data
,
10780 LPFC_NL_VENDOR_ID
);
10782 case ASYNC_STATUS_CN
:
10783 lpfc_sli_abts_err_handler(phba
, iocbq
);
10786 iocb_w
= (uint32_t *) icmd
;
10787 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
10788 "0346 Ring %d handler: unexpected ASYNC_STATUS"
10790 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
10791 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
10792 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
10793 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
10794 pring
->ringno
, icmd
->un
.asyncstat
.evt_code
,
10795 iocb_w
[0], iocb_w
[1], iocb_w
[2], iocb_w
[3],
10796 iocb_w
[4], iocb_w
[5], iocb_w
[6], iocb_w
[7],
10797 iocb_w
[8], iocb_w
[9], iocb_w
[10], iocb_w
[11],
10798 iocb_w
[12], iocb_w
[13], iocb_w
[14], iocb_w
[15]);
10806 * lpfc_sli4_setup - SLI ring setup function
10807 * @phba: Pointer to HBA context object.
10809 * lpfc_sli_setup sets up rings of the SLI interface with
10810 * number of iocbs per ring and iotags. This function is
10811 * called while driver attach to the HBA and before the
10812 * interrupts are enabled. So there is no need for locking.
10814 * This function always returns 0.
10817 lpfc_sli4_setup(struct lpfc_hba
*phba
)
10819 struct lpfc_sli_ring
*pring
;
10821 pring
= phba
->sli4_hba
.els_wq
->pring
;
10822 pring
->num_mask
= LPFC_MAX_RING_MASK
;
10823 pring
->prt
[0].profile
= 0; /* Mask 0 */
10824 pring
->prt
[0].rctl
= FC_RCTL_ELS_REQ
;
10825 pring
->prt
[0].type
= FC_TYPE_ELS
;
10826 pring
->prt
[0].lpfc_sli_rcv_unsol_event
=
10827 lpfc_els_unsol_event
;
10828 pring
->prt
[1].profile
= 0; /* Mask 1 */
10829 pring
->prt
[1].rctl
= FC_RCTL_ELS_REP
;
10830 pring
->prt
[1].type
= FC_TYPE_ELS
;
10831 pring
->prt
[1].lpfc_sli_rcv_unsol_event
=
10832 lpfc_els_unsol_event
;
10833 pring
->prt
[2].profile
= 0; /* Mask 2 */
10834 /* NameServer Inquiry */
10835 pring
->prt
[2].rctl
= FC_RCTL_DD_UNSOL_CTL
;
10837 pring
->prt
[2].type
= FC_TYPE_CT
;
10838 pring
->prt
[2].lpfc_sli_rcv_unsol_event
=
10839 lpfc_ct_unsol_event
;
10840 pring
->prt
[3].profile
= 0; /* Mask 3 */
10841 /* NameServer response */
10842 pring
->prt
[3].rctl
= FC_RCTL_DD_SOL_CTL
;
10844 pring
->prt
[3].type
= FC_TYPE_CT
;
10845 pring
->prt
[3].lpfc_sli_rcv_unsol_event
=
10846 lpfc_ct_unsol_event
;
10851 * lpfc_sli_setup - SLI ring setup function
10852 * @phba: Pointer to HBA context object.
10854 * lpfc_sli_setup sets up rings of the SLI interface with
10855 * number of iocbs per ring and iotags. This function is
10856 * called while driver attach to the HBA and before the
10857 * interrupts are enabled. So there is no need for locking.
10859 * This function always returns 0. SLI3 only.
10862 lpfc_sli_setup(struct lpfc_hba
*phba
)
10864 int i
, totiocbsize
= 0;
10865 struct lpfc_sli
*psli
= &phba
->sli
;
10866 struct lpfc_sli_ring
*pring
;
10868 psli
->num_rings
= MAX_SLI3_CONFIGURED_RINGS
;
10869 psli
->sli_flag
= 0;
10871 psli
->iocbq_lookup
= NULL
;
10872 psli
->iocbq_lookup_len
= 0;
10873 psli
->last_iotag
= 0;
10875 for (i
= 0; i
< psli
->num_rings
; i
++) {
10876 pring
= &psli
->sli3_ring
[i
];
10878 case LPFC_FCP_RING
: /* ring 0 - FCP */
10879 /* numCiocb and numRiocb are used in config_port */
10880 pring
->sli
.sli3
.numCiocb
= SLI2_IOCB_CMD_R0_ENTRIES
;
10881 pring
->sli
.sli3
.numRiocb
= SLI2_IOCB_RSP_R0_ENTRIES
;
10882 pring
->sli
.sli3
.numCiocb
+=
10883 SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
10884 pring
->sli
.sli3
.numRiocb
+=
10885 SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
10886 pring
->sli
.sli3
.numCiocb
+=
10887 SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
10888 pring
->sli
.sli3
.numRiocb
+=
10889 SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
10890 pring
->sli
.sli3
.sizeCiocb
= (phba
->sli_rev
== 3) ?
10891 SLI3_IOCB_CMD_SIZE
:
10892 SLI2_IOCB_CMD_SIZE
;
10893 pring
->sli
.sli3
.sizeRiocb
= (phba
->sli_rev
== 3) ?
10894 SLI3_IOCB_RSP_SIZE
:
10895 SLI2_IOCB_RSP_SIZE
;
10896 pring
->iotag_ctr
= 0;
10898 (phba
->cfg_hba_queue_depth
* 2);
10899 pring
->fast_iotag
= pring
->iotag_max
;
10900 pring
->num_mask
= 0;
10902 case LPFC_EXTRA_RING
: /* ring 1 - EXTRA */
10903 /* numCiocb and numRiocb are used in config_port */
10904 pring
->sli
.sli3
.numCiocb
= SLI2_IOCB_CMD_R1_ENTRIES
;
10905 pring
->sli
.sli3
.numRiocb
= SLI2_IOCB_RSP_R1_ENTRIES
;
10906 pring
->sli
.sli3
.sizeCiocb
= (phba
->sli_rev
== 3) ?
10907 SLI3_IOCB_CMD_SIZE
:
10908 SLI2_IOCB_CMD_SIZE
;
10909 pring
->sli
.sli3
.sizeRiocb
= (phba
->sli_rev
== 3) ?
10910 SLI3_IOCB_RSP_SIZE
:
10911 SLI2_IOCB_RSP_SIZE
;
10912 pring
->iotag_max
= phba
->cfg_hba_queue_depth
;
10913 pring
->num_mask
= 0;
10915 case LPFC_ELS_RING
: /* ring 2 - ELS / CT */
10916 /* numCiocb and numRiocb are used in config_port */
10917 pring
->sli
.sli3
.numCiocb
= SLI2_IOCB_CMD_R2_ENTRIES
;
10918 pring
->sli
.sli3
.numRiocb
= SLI2_IOCB_RSP_R2_ENTRIES
;
10919 pring
->sli
.sli3
.sizeCiocb
= (phba
->sli_rev
== 3) ?
10920 SLI3_IOCB_CMD_SIZE
:
10921 SLI2_IOCB_CMD_SIZE
;
10922 pring
->sli
.sli3
.sizeRiocb
= (phba
->sli_rev
== 3) ?
10923 SLI3_IOCB_RSP_SIZE
:
10924 SLI2_IOCB_RSP_SIZE
;
10925 pring
->fast_iotag
= 0;
10926 pring
->iotag_ctr
= 0;
10927 pring
->iotag_max
= 4096;
10928 pring
->lpfc_sli_rcv_async_status
=
10929 lpfc_sli_async_event_handler
;
10930 pring
->num_mask
= LPFC_MAX_RING_MASK
;
10931 pring
->prt
[0].profile
= 0; /* Mask 0 */
10932 pring
->prt
[0].rctl
= FC_RCTL_ELS_REQ
;
10933 pring
->prt
[0].type
= FC_TYPE_ELS
;
10934 pring
->prt
[0].lpfc_sli_rcv_unsol_event
=
10935 lpfc_els_unsol_event
;
10936 pring
->prt
[1].profile
= 0; /* Mask 1 */
10937 pring
->prt
[1].rctl
= FC_RCTL_ELS_REP
;
10938 pring
->prt
[1].type
= FC_TYPE_ELS
;
10939 pring
->prt
[1].lpfc_sli_rcv_unsol_event
=
10940 lpfc_els_unsol_event
;
10941 pring
->prt
[2].profile
= 0; /* Mask 2 */
10942 /* NameServer Inquiry */
10943 pring
->prt
[2].rctl
= FC_RCTL_DD_UNSOL_CTL
;
10945 pring
->prt
[2].type
= FC_TYPE_CT
;
10946 pring
->prt
[2].lpfc_sli_rcv_unsol_event
=
10947 lpfc_ct_unsol_event
;
10948 pring
->prt
[3].profile
= 0; /* Mask 3 */
10949 /* NameServer response */
10950 pring
->prt
[3].rctl
= FC_RCTL_DD_SOL_CTL
;
10952 pring
->prt
[3].type
= FC_TYPE_CT
;
10953 pring
->prt
[3].lpfc_sli_rcv_unsol_event
=
10954 lpfc_ct_unsol_event
;
10957 totiocbsize
+= (pring
->sli
.sli3
.numCiocb
*
10958 pring
->sli
.sli3
.sizeCiocb
) +
10959 (pring
->sli
.sli3
.numRiocb
* pring
->sli
.sli3
.sizeRiocb
);
10961 if (totiocbsize
> MAX_SLIM_IOCB_SIZE
) {
10962 /* Too many cmd / rsp ring entries in SLI2 SLIM */
10963 printk(KERN_ERR
"%d:0462 Too many cmd / rsp ring entries in "
10964 "SLI2 SLIM Data: x%x x%lx\n",
10965 phba
->brd_no
, totiocbsize
,
10966 (unsigned long) MAX_SLIM_IOCB_SIZE
);
10968 if (phba
->cfg_multi_ring_support
== 2)
10969 lpfc_extra_ring_setup(phba
);
10975 * lpfc_sli4_queue_init - Queue initialization function
10976 * @phba: Pointer to HBA context object.
10978 * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
10979 * ring. This function also initializes ring indices of each ring.
10980 * This function is called during the initialization of the SLI
10981 * interface of an HBA.
10982 * This function is called with no lock held and always returns
10986 lpfc_sli4_queue_init(struct lpfc_hba
*phba
)
10988 struct lpfc_sli
*psli
;
10989 struct lpfc_sli_ring
*pring
;
10993 spin_lock_irq(&phba
->hbalock
);
10994 INIT_LIST_HEAD(&psli
->mboxq
);
10995 INIT_LIST_HEAD(&psli
->mboxq_cmpl
);
10996 /* Initialize list headers for txq and txcmplq as double linked lists */
10997 for (i
= 0; i
< phba
->cfg_hdw_queue
; i
++) {
10998 pring
= phba
->sli4_hba
.hdwq
[i
].io_wq
->pring
;
11000 pring
->ringno
= LPFC_FCP_RING
;
11001 pring
->txcmplq_cnt
= 0;
11002 INIT_LIST_HEAD(&pring
->txq
);
11003 INIT_LIST_HEAD(&pring
->txcmplq
);
11004 INIT_LIST_HEAD(&pring
->iocb_continueq
);
11005 spin_lock_init(&pring
->ring_lock
);
11007 pring
= phba
->sli4_hba
.els_wq
->pring
;
11009 pring
->ringno
= LPFC_ELS_RING
;
11010 pring
->txcmplq_cnt
= 0;
11011 INIT_LIST_HEAD(&pring
->txq
);
11012 INIT_LIST_HEAD(&pring
->txcmplq
);
11013 INIT_LIST_HEAD(&pring
->iocb_continueq
);
11014 spin_lock_init(&pring
->ring_lock
);
11016 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
11017 pring
= phba
->sli4_hba
.nvmels_wq
->pring
;
11019 pring
->ringno
= LPFC_ELS_RING
;
11020 pring
->txcmplq_cnt
= 0;
11021 INIT_LIST_HEAD(&pring
->txq
);
11022 INIT_LIST_HEAD(&pring
->txcmplq
);
11023 INIT_LIST_HEAD(&pring
->iocb_continueq
);
11024 spin_lock_init(&pring
->ring_lock
);
11027 spin_unlock_irq(&phba
->hbalock
);
11031 * lpfc_sli_queue_init - Queue initialization function
11032 * @phba: Pointer to HBA context object.
11034 * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11035 * ring. This function also initializes ring indices of each ring.
11036 * This function is called during the initialization of the SLI
11037 * interface of an HBA.
11038 * This function is called with no lock held and always returns
11042 lpfc_sli_queue_init(struct lpfc_hba
*phba
)
11044 struct lpfc_sli
*psli
;
11045 struct lpfc_sli_ring
*pring
;
11049 spin_lock_irq(&phba
->hbalock
);
11050 INIT_LIST_HEAD(&psli
->mboxq
);
11051 INIT_LIST_HEAD(&psli
->mboxq_cmpl
);
11052 /* Initialize list headers for txq and txcmplq as double linked lists */
11053 for (i
= 0; i
< psli
->num_rings
; i
++) {
11054 pring
= &psli
->sli3_ring
[i
];
11056 pring
->sli
.sli3
.next_cmdidx
= 0;
11057 pring
->sli
.sli3
.local_getidx
= 0;
11058 pring
->sli
.sli3
.cmdidx
= 0;
11059 INIT_LIST_HEAD(&pring
->iocb_continueq
);
11060 INIT_LIST_HEAD(&pring
->iocb_continue_saveq
);
11061 INIT_LIST_HEAD(&pring
->postbufq
);
11063 INIT_LIST_HEAD(&pring
->txq
);
11064 INIT_LIST_HEAD(&pring
->txcmplq
);
11065 spin_lock_init(&pring
->ring_lock
);
11067 spin_unlock_irq(&phba
->hbalock
);
11071 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11072 * @phba: Pointer to HBA context object.
11074 * This routine flushes the mailbox command subsystem. It will unconditionally
11075 * flush all the mailbox commands in the three possible stages in the mailbox
11076 * command sub-system: pending mailbox command queue; the outstanding mailbox
11077 * command; and completed mailbox command queue. It is caller's responsibility
11078 * to make sure that the driver is in the proper state to flush the mailbox
11079 * command sub-system. Namely, the posting of mailbox commands into the
11080 * pending mailbox command queue from the various clients must be stopped;
11081 * either the HBA is in a state that it will never works on the outstanding
11082 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11083 * mailbox command has been completed.
11086 lpfc_sli_mbox_sys_flush(struct lpfc_hba
*phba
)
11088 LIST_HEAD(completions
);
11089 struct lpfc_sli
*psli
= &phba
->sli
;
11091 unsigned long iflag
;
11093 /* Disable softirqs, including timers from obtaining phba->hbalock */
11094 local_bh_disable();
11096 /* Flush all the mailbox commands in the mbox system */
11097 spin_lock_irqsave(&phba
->hbalock
, iflag
);
11099 /* The pending mailbox command queue */
11100 list_splice_init(&phba
->sli
.mboxq
, &completions
);
11101 /* The outstanding active mailbox command */
11102 if (psli
->mbox_active
) {
11103 list_add_tail(&psli
->mbox_active
->list
, &completions
);
11104 psli
->mbox_active
= NULL
;
11105 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
11107 /* The completed mailbox command queue */
11108 list_splice_init(&phba
->sli
.mboxq_cmpl
, &completions
);
11109 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11111 /* Enable softirqs again, done with phba->hbalock */
11114 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11115 while (!list_empty(&completions
)) {
11116 list_remove_head(&completions
, pmb
, LPFC_MBOXQ_t
, list
);
11117 pmb
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
11118 if (pmb
->mbox_cmpl
)
11119 pmb
->mbox_cmpl(phba
, pmb
);
11124 * lpfc_sli_host_down - Vport cleanup function
11125 * @vport: Pointer to virtual port object.
11127 * lpfc_sli_host_down is called to clean up the resources
11128 * associated with a vport before destroying virtual
11129 * port data structures.
11130 * This function does following operations:
11131 * - Free discovery resources associated with this virtual
11133 * - Free iocbs associated with this virtual port in
11135 * - Send abort for all iocb commands associated with this
11136 * vport in txcmplq.
11138 * This function is called with no lock held and always returns 1.
11141 lpfc_sli_host_down(struct lpfc_vport
*vport
)
11143 LIST_HEAD(completions
);
11144 struct lpfc_hba
*phba
= vport
->phba
;
11145 struct lpfc_sli
*psli
= &phba
->sli
;
11146 struct lpfc_queue
*qp
= NULL
;
11147 struct lpfc_sli_ring
*pring
;
11148 struct lpfc_iocbq
*iocb
, *next_iocb
;
11150 unsigned long flags
= 0;
11151 uint16_t prev_pring_flag
;
11153 lpfc_cleanup_discovery_resources(vport
);
11155 spin_lock_irqsave(&phba
->hbalock
, flags
);
11158 * Error everything on the txq since these iocbs
11159 * have not been given to the FW yet.
11160 * Also issue ABTS for everything on the txcmplq
11162 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
11163 for (i
= 0; i
< psli
->num_rings
; i
++) {
11164 pring
= &psli
->sli3_ring
[i
];
11165 prev_pring_flag
= pring
->flag
;
11166 /* Only slow rings */
11167 if (pring
->ringno
== LPFC_ELS_RING
) {
11168 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
11169 /* Set the lpfc data pending flag */
11170 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
11172 list_for_each_entry_safe(iocb
, next_iocb
,
11173 &pring
->txq
, list
) {
11174 if (iocb
->vport
!= vport
)
11176 list_move_tail(&iocb
->list
, &completions
);
11178 list_for_each_entry_safe(iocb
, next_iocb
,
11179 &pring
->txcmplq
, list
) {
11180 if (iocb
->vport
!= vport
)
11182 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
,
11185 pring
->flag
= prev_pring_flag
;
11188 list_for_each_entry(qp
, &phba
->sli4_hba
.lpfc_wq_list
, wq_list
) {
11192 if (pring
== phba
->sli4_hba
.els_wq
->pring
) {
11193 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
11194 /* Set the lpfc data pending flag */
11195 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
11197 prev_pring_flag
= pring
->flag
;
11198 spin_lock(&pring
->ring_lock
);
11199 list_for_each_entry_safe(iocb
, next_iocb
,
11200 &pring
->txq
, list
) {
11201 if (iocb
->vport
!= vport
)
11203 list_move_tail(&iocb
->list
, &completions
);
11205 spin_unlock(&pring
->ring_lock
);
11206 list_for_each_entry_safe(iocb
, next_iocb
,
11207 &pring
->txcmplq
, list
) {
11208 if (iocb
->vport
!= vport
)
11210 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
,
11213 pring
->flag
= prev_pring_flag
;
11216 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
11218 /* Cancel all the IOCBs from the completions list */
11219 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
11225 * lpfc_sli_hba_down - Resource cleanup function for the HBA
11226 * @phba: Pointer to HBA context object.
11228 * This function cleans up all iocb, buffers, mailbox commands
11229 * while shutting down the HBA. This function is called with no
11230 * lock held and always returns 1.
11231 * This function does the following to cleanup driver resources:
11232 * - Free discovery resources for each virtual port
11233 * - Cleanup any pending fabric iocbs
11234 * - Iterate through the iocb txq and free each entry
11236 * - Free up any buffer posted to the HBA
11237 * - Free mailbox commands in the mailbox queue.
11240 lpfc_sli_hba_down(struct lpfc_hba
*phba
)
11242 LIST_HEAD(completions
);
11243 struct lpfc_sli
*psli
= &phba
->sli
;
11244 struct lpfc_queue
*qp
= NULL
;
11245 struct lpfc_sli_ring
*pring
;
11246 struct lpfc_dmabuf
*buf_ptr
;
11247 unsigned long flags
= 0;
11250 /* Shutdown the mailbox command sub-system */
11251 lpfc_sli_mbox_sys_shutdown(phba
, LPFC_MBX_WAIT
);
11253 lpfc_hba_down_prep(phba
);
11255 /* Disable softirqs, including timers from obtaining phba->hbalock */
11256 local_bh_disable();
11258 lpfc_fabric_abort_hba(phba
);
11260 spin_lock_irqsave(&phba
->hbalock
, flags
);
11263 * Error everything on the txq since these iocbs
11264 * have not been given to the FW yet.
11266 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
11267 for (i
= 0; i
< psli
->num_rings
; i
++) {
11268 pring
= &psli
->sli3_ring
[i
];
11269 /* Only slow rings */
11270 if (pring
->ringno
== LPFC_ELS_RING
) {
11271 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
11272 /* Set the lpfc data pending flag */
11273 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
11275 list_splice_init(&pring
->txq
, &completions
);
11278 list_for_each_entry(qp
, &phba
->sli4_hba
.lpfc_wq_list
, wq_list
) {
11282 spin_lock(&pring
->ring_lock
);
11283 list_splice_init(&pring
->txq
, &completions
);
11284 spin_unlock(&pring
->ring_lock
);
11285 if (pring
== phba
->sli4_hba
.els_wq
->pring
) {
11286 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
11287 /* Set the lpfc data pending flag */
11288 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
11292 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
11294 /* Cancel all the IOCBs from the completions list */
11295 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
11298 spin_lock_irqsave(&phba
->hbalock
, flags
);
11299 list_splice_init(&phba
->elsbuf
, &completions
);
11300 phba
->elsbuf_cnt
= 0;
11301 phba
->elsbuf_prev_cnt
= 0;
11302 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
11304 while (!list_empty(&completions
)) {
11305 list_remove_head(&completions
, buf_ptr
,
11306 struct lpfc_dmabuf
, list
);
11307 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
11311 /* Enable softirqs again, done with phba->hbalock */
11314 /* Return any active mbox cmds */
11315 del_timer_sync(&psli
->mbox_tmo
);
11317 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
11318 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
11319 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
11325 * lpfc_sli_pcimem_bcopy - SLI memory copy function
11326 * @srcp: Source memory pointer.
11327 * @destp: Destination memory pointer.
11328 * @cnt: Number of words required to be copied.
11330 * This function is used for copying data between driver memory
11331 * and the SLI memory. This function also changes the endianness
11332 * of each word if native endianness is different from SLI
11333 * endianness. This function can be called with or without
11337 lpfc_sli_pcimem_bcopy(void *srcp
, void *destp
, uint32_t cnt
)
11339 uint32_t *src
= srcp
;
11340 uint32_t *dest
= destp
;
11344 for (i
= 0; i
< (int)cnt
; i
+= sizeof (uint32_t)) {
11346 ldata
= le32_to_cpu(ldata
);
11355 * lpfc_sli_bemem_bcopy - SLI memory copy function
11356 * @srcp: Source memory pointer.
11357 * @destp: Destination memory pointer.
11358 * @cnt: Number of words required to be copied.
11360 * This function is used for copying data between a data structure
11361 * with big endian representation to local endianness.
11362 * This function can be called with or without lock.
11365 lpfc_sli_bemem_bcopy(void *srcp
, void *destp
, uint32_t cnt
)
11367 uint32_t *src
= srcp
;
11368 uint32_t *dest
= destp
;
11372 for (i
= 0; i
< (int)cnt
; i
+= sizeof(uint32_t)) {
11374 ldata
= be32_to_cpu(ldata
);
11382 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
11383 * @phba: Pointer to HBA context object.
11384 * @pring: Pointer to driver SLI ring object.
11385 * @mp: Pointer to driver buffer object.
11387 * This function is called with no lock held.
11388 * It always return zero after adding the buffer to the postbufq
11392 lpfc_sli_ringpostbuf_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
11393 struct lpfc_dmabuf
*mp
)
11395 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
11397 spin_lock_irq(&phba
->hbalock
);
11398 list_add_tail(&mp
->list
, &pring
->postbufq
);
11399 pring
->postbufq_cnt
++;
11400 spin_unlock_irq(&phba
->hbalock
);
11405 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
11406 * @phba: Pointer to HBA context object.
11408 * When HBQ is enabled, buffers are searched based on tags. This function
11409 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
11410 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
11411 * does not conflict with tags of buffer posted for unsolicited events.
11412 * The function returns the allocated tag. The function is called with
11416 lpfc_sli_get_buffer_tag(struct lpfc_hba
*phba
)
11418 spin_lock_irq(&phba
->hbalock
);
11419 phba
->buffer_tag_count
++;
11421 * Always set the QUE_BUFTAG_BIT to distiguish between
11422 * a tag assigned by HBQ.
11424 phba
->buffer_tag_count
|= QUE_BUFTAG_BIT
;
11425 spin_unlock_irq(&phba
->hbalock
);
11426 return phba
->buffer_tag_count
;
11430 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
11431 * @phba: Pointer to HBA context object.
11432 * @pring: Pointer to driver SLI ring object.
11433 * @tag: Buffer tag.
11435 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
11436 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
11437 * iocb is posted to the response ring with the tag of the buffer.
11438 * This function searches the pring->postbufq list using the tag
11439 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
11440 * iocb. If the buffer is found then lpfc_dmabuf object of the
11441 * buffer is returned to the caller else NULL is returned.
11442 * This function is called with no lock held.
11444 struct lpfc_dmabuf
*
11445 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
11448 struct lpfc_dmabuf
*mp
, *next_mp
;
11449 struct list_head
*slp
= &pring
->postbufq
;
11451 /* Search postbufq, from the beginning, looking for a match on tag */
11452 spin_lock_irq(&phba
->hbalock
);
11453 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
11454 if (mp
->buffer_tag
== tag
) {
11455 list_del_init(&mp
->list
);
11456 pring
->postbufq_cnt
--;
11457 spin_unlock_irq(&phba
->hbalock
);
11462 spin_unlock_irq(&phba
->hbalock
);
11463 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11464 "0402 Cannot find virtual addr for buffer tag on "
11465 "ring %d Data x%lx x%px x%px x%x\n",
11466 pring
->ringno
, (unsigned long) tag
,
11467 slp
->next
, slp
->prev
, pring
->postbufq_cnt
);
11473 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
11474 * @phba: Pointer to HBA context object.
11475 * @pring: Pointer to driver SLI ring object.
11476 * @phys: DMA address of the buffer.
11478 * This function searches the buffer list using the dma_address
11479 * of unsolicited event to find the driver's lpfc_dmabuf object
11480 * corresponding to the dma_address. The function returns the
11481 * lpfc_dmabuf object if a buffer is found else it returns NULL.
11482 * This function is called by the ct and els unsolicited event
11483 * handlers to get the buffer associated with the unsolicited
11486 * This function is called with no lock held.
11488 struct lpfc_dmabuf
*
11489 lpfc_sli_ringpostbuf_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
11492 struct lpfc_dmabuf
*mp
, *next_mp
;
11493 struct list_head
*slp
= &pring
->postbufq
;
11495 /* Search postbufq, from the beginning, looking for a match on phys */
11496 spin_lock_irq(&phba
->hbalock
);
11497 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
11498 if (mp
->phys
== phys
) {
11499 list_del_init(&mp
->list
);
11500 pring
->postbufq_cnt
--;
11501 spin_unlock_irq(&phba
->hbalock
);
11506 spin_unlock_irq(&phba
->hbalock
);
11507 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
11508 "0410 Cannot find virtual addr for mapped buf on "
11509 "ring %d Data x%llx x%px x%px x%x\n",
11510 pring
->ringno
, (unsigned long long)phys
,
11511 slp
->next
, slp
->prev
, pring
->postbufq_cnt
);
11516 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
11517 * @phba: Pointer to HBA context object.
11518 * @cmdiocb: Pointer to driver command iocb object.
11519 * @rspiocb: Pointer to driver response iocb object.
11521 * This function is the completion handler for the abort iocbs for
11522 * ELS commands. This function is called from the ELS ring event
11523 * handler with no lock held. This function frees memory resources
11524 * associated with the abort iocb.
11527 lpfc_sli_abort_els_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
11528 struct lpfc_iocbq
*rspiocb
)
11530 IOCB_t
*irsp
= &rspiocb
->iocb
;
11531 uint16_t abort_iotag
, abort_context
;
11532 struct lpfc_iocbq
*abort_iocb
= NULL
;
11534 if (irsp
->ulpStatus
) {
11537 * Assume that the port already completed and returned, or
11538 * will return the iocb. Just Log the message.
11540 abort_context
= cmdiocb
->iocb
.un
.acxri
.abortContextTag
;
11541 abort_iotag
= cmdiocb
->iocb
.un
.acxri
.abortIoTag
;
11543 spin_lock_irq(&phba
->hbalock
);
11544 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
11545 if (irsp
->ulpCommand
== CMD_ABORT_XRI_CX
&&
11546 irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
&&
11547 irsp
->un
.ulpWord
[4] == IOERR_ABORT_REQUESTED
) {
11548 spin_unlock_irq(&phba
->hbalock
);
11551 if (abort_iotag
!= 0 &&
11552 abort_iotag
<= phba
->sli
.last_iotag
)
11554 phba
->sli
.iocbq_lookup
[abort_iotag
];
11556 /* For sli4 the abort_tag is the XRI,
11557 * so the abort routine puts the iotag of the iocb
11558 * being aborted in the context field of the abort
11561 abort_iocb
= phba
->sli
.iocbq_lookup
[abort_context
];
11563 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
| LOG_SLI
,
11564 "0327 Cannot abort els iocb x%px "
11565 "with tag %x context %x, abort status %x, "
11567 abort_iocb
, abort_iotag
, abort_context
,
11568 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4]);
11570 spin_unlock_irq(&phba
->hbalock
);
11573 lpfc_sli_release_iocbq(phba
, cmdiocb
);
11578 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
11579 * @phba: Pointer to HBA context object.
11580 * @cmdiocb: Pointer to driver command iocb object.
11581 * @rspiocb: Pointer to driver response iocb object.
11583 * The function is called from SLI ring event handler with no
11584 * lock held. This function is the completion handler for ELS commands
11585 * which are aborted. The function frees memory resources used for
11586 * the aborted ELS commands.
11589 lpfc_ignore_els_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
11590 struct lpfc_iocbq
*rspiocb
)
11592 IOCB_t
*irsp
= &rspiocb
->iocb
;
11594 /* ELS cmd tag <ulpIoTag> completes */
11595 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
11596 "0139 Ignoring ELS cmd tag x%x completion Data: "
11598 irsp
->ulpIoTag
, irsp
->ulpStatus
,
11599 irsp
->un
.ulpWord
[4], irsp
->ulpTimeout
);
11600 lpfc_nlp_put((struct lpfc_nodelist
*)cmdiocb
->context1
);
11601 if (cmdiocb
->iocb
.ulpCommand
== CMD_GEN_REQUEST64_CR
)
11602 lpfc_ct_free_iocb(phba
, cmdiocb
);
11604 lpfc_els_free_iocb(phba
, cmdiocb
);
11608 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
11609 * @phba: Pointer to HBA context object.
11610 * @pring: Pointer to driver SLI ring object.
11611 * @cmdiocb: Pointer to driver command iocb object.
11612 * @cmpl: completion function.
11614 * This function issues an abort iocb for the provided command iocb. In case
11615 * of unloading, the abort iocb will not be issued to commands on the ELS
11616 * ring. Instead, the callback function shall be changed to those commands
11617 * so that nothing happens when them finishes. This function is called with
11618 * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
11619 * when the command iocb is an abort request.
11623 lpfc_sli_issue_abort_iotag(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
11624 struct lpfc_iocbq
*cmdiocb
, void *cmpl
)
11626 struct lpfc_vport
*vport
= cmdiocb
->vport
;
11627 struct lpfc_iocbq
*abtsiocbp
;
11628 IOCB_t
*icmd
= NULL
;
11629 IOCB_t
*iabt
= NULL
;
11630 int retval
= IOCB_ERROR
;
11631 unsigned long iflags
;
11632 struct lpfc_nodelist
*ndlp
;
11635 * There are certain command types we don't want to abort. And we
11636 * don't want to abort commands that are already in the process of
11639 icmd
= &cmdiocb
->iocb
;
11640 if (icmd
->ulpCommand
== CMD_ABORT_XRI_CN
||
11641 icmd
->ulpCommand
== CMD_CLOSE_XRI_CN
||
11642 (cmdiocb
->iocb_flag
& LPFC_DRIVER_ABORTED
) != 0)
11643 return IOCB_ABORTING
;
11646 if (cmdiocb
->iocb_flag
& LPFC_IO_FABRIC
)
11647 cmdiocb
->fabric_iocb_cmpl
= lpfc_ignore_els_cmpl
;
11649 cmdiocb
->iocb_cmpl
= lpfc_ignore_els_cmpl
;
11654 * If we're unloading, don't abort iocb on the ELS ring, but change
11655 * the callback so that nothing happens when it finishes.
11657 if ((vport
->load_flag
& FC_UNLOADING
) &&
11658 pring
->ringno
== LPFC_ELS_RING
) {
11659 if (cmdiocb
->iocb_flag
& LPFC_IO_FABRIC
)
11660 cmdiocb
->fabric_iocb_cmpl
= lpfc_ignore_els_cmpl
;
11662 cmdiocb
->iocb_cmpl
= lpfc_ignore_els_cmpl
;
11666 /* issue ABTS for this IOCB based on iotag */
11667 abtsiocbp
= __lpfc_sli_get_iocbq(phba
);
11668 if (abtsiocbp
== NULL
)
11669 return IOCB_NORESOURCE
;
11671 /* This signals the response to set the correct status
11672 * before calling the completion handler
11674 cmdiocb
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
11676 iabt
= &abtsiocbp
->iocb
;
11677 iabt
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
11678 iabt
->un
.acxri
.abortContextTag
= icmd
->ulpContext
;
11679 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
11680 iabt
->un
.acxri
.abortIoTag
= cmdiocb
->sli4_xritag
;
11681 if (pring
->ringno
== LPFC_ELS_RING
)
11682 iabt
->un
.acxri
.abortContextTag
= cmdiocb
->iotag
;
11684 iabt
->un
.acxri
.abortIoTag
= icmd
->ulpIoTag
;
11685 if (pring
->ringno
== LPFC_ELS_RING
) {
11686 ndlp
= (struct lpfc_nodelist
*)(cmdiocb
->context1
);
11687 iabt
->un
.acxri
.abortContextTag
= ndlp
->nlp_rpi
;
11691 iabt
->ulpClass
= icmd
->ulpClass
;
11693 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
11694 abtsiocbp
->hba_wqidx
= cmdiocb
->hba_wqidx
;
11695 if (cmdiocb
->iocb_flag
& LPFC_IO_FCP
) {
11696 abtsiocbp
->iocb_flag
|= LPFC_IO_FCP
;
11697 abtsiocbp
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
11699 if (cmdiocb
->iocb_flag
& LPFC_IO_FOF
)
11700 abtsiocbp
->iocb_flag
|= LPFC_IO_FOF
;
11702 if (phba
->link_state
>= LPFC_LINK_UP
)
11703 iabt
->ulpCommand
= CMD_ABORT_XRI_CN
;
11705 iabt
->ulpCommand
= CMD_CLOSE_XRI_CN
;
11708 abtsiocbp
->iocb_cmpl
= cmpl
;
11710 abtsiocbp
->iocb_cmpl
= lpfc_sli_abort_els_cmpl
;
11711 abtsiocbp
->vport
= vport
;
11713 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
11714 pring
= lpfc_sli4_calc_ring(phba
, abtsiocbp
);
11715 if (unlikely(pring
== NULL
))
11716 goto abort_iotag_exit
;
11717 /* Note: both hbalock and ring_lock need to be set here */
11718 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
11719 retval
= __lpfc_sli_issue_iocb(phba
, pring
->ringno
,
11721 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
11723 retval
= __lpfc_sli_issue_iocb(phba
, pring
->ringno
,
11729 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
11730 "0339 Abort xri x%x, original iotag x%x, "
11731 "abort cmd iotag x%x retval x%x\n",
11732 iabt
->un
.acxri
.abortIoTag
,
11733 iabt
->un
.acxri
.abortContextTag
,
11734 abtsiocbp
->iotag
, retval
);
11737 cmdiocb
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
11738 __lpfc_sli_release_iocbq(phba
, abtsiocbp
);
11742 * Caller to this routine should check for IOCB_ERROR
11743 * and handle it properly. This routine no longer removes
11744 * iocb off txcmplq and call compl in case of IOCB_ERROR.
11750 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
11751 * @phba: pointer to lpfc HBA data structure.
11753 * This routine will abort all pending and outstanding iocbs to an HBA.
11756 lpfc_sli_hba_iocb_abort(struct lpfc_hba
*phba
)
11758 struct lpfc_sli
*psli
= &phba
->sli
;
11759 struct lpfc_sli_ring
*pring
;
11760 struct lpfc_queue
*qp
= NULL
;
11763 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
11764 for (i
= 0; i
< psli
->num_rings
; i
++) {
11765 pring
= &psli
->sli3_ring
[i
];
11766 lpfc_sli_abort_iocb_ring(phba
, pring
);
11770 list_for_each_entry(qp
, &phba
->sli4_hba
.lpfc_wq_list
, wq_list
) {
11774 lpfc_sli_abort_iocb_ring(phba
, pring
);
11779 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
11780 * @iocbq: Pointer to driver iocb object.
11781 * @vport: Pointer to driver virtual port object.
11782 * @tgt_id: SCSI ID of the target.
11783 * @lun_id: LUN ID of the scsi device.
11784 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
11786 * This function acts as an iocb filter for functions which abort or count
11787 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
11788 * 0 if the filtering criteria is met for the given iocb and will return
11789 * 1 if the filtering criteria is not met.
11790 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
11791 * given iocb is for the SCSI device specified by vport, tgt_id and
11792 * lun_id parameter.
11793 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
11794 * given iocb is for the SCSI target specified by vport and tgt_id
11796 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
11797 * given iocb is for the SCSI host associated with the given vport.
11798 * This function is called with no locks held.
11801 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq
*iocbq
, struct lpfc_vport
*vport
,
11802 uint16_t tgt_id
, uint64_t lun_id
,
11803 lpfc_ctx_cmd ctx_cmd
)
11805 struct lpfc_io_buf
*lpfc_cmd
;
11808 if (iocbq
->vport
!= vport
)
11811 if (!(iocbq
->iocb_flag
& LPFC_IO_FCP
) ||
11812 !(iocbq
->iocb_flag
& LPFC_IO_ON_TXCMPLQ
))
11815 lpfc_cmd
= container_of(iocbq
, struct lpfc_io_buf
, cur_iocbq
);
11817 if (lpfc_cmd
->pCmd
== NULL
)
11822 if ((lpfc_cmd
->rdata
) && (lpfc_cmd
->rdata
->pnode
) &&
11823 (lpfc_cmd
->rdata
->pnode
->nlp_sid
== tgt_id
) &&
11824 (scsilun_to_int(&lpfc_cmd
->fcp_cmnd
->fcp_lun
) == lun_id
))
11828 if ((lpfc_cmd
->rdata
) && (lpfc_cmd
->rdata
->pnode
) &&
11829 (lpfc_cmd
->rdata
->pnode
->nlp_sid
== tgt_id
))
11832 case LPFC_CTX_HOST
:
11836 printk(KERN_ERR
"%s: Unknown context cmd type, value %d\n",
11837 __func__
, ctx_cmd
);
11845 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
11846 * @vport: Pointer to virtual port.
11847 * @tgt_id: SCSI ID of the target.
11848 * @lun_id: LUN ID of the scsi device.
11849 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11851 * This function returns number of FCP commands pending for the vport.
11852 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
11853 * commands pending on the vport associated with SCSI device specified
11854 * by tgt_id and lun_id parameters.
11855 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
11856 * commands pending on the vport associated with SCSI target specified
11857 * by tgt_id parameter.
11858 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
11859 * commands pending on the vport.
11860 * This function returns the number of iocbs which satisfy the filter.
11861 * This function is called without any lock held.
11864 lpfc_sli_sum_iocb(struct lpfc_vport
*vport
, uint16_t tgt_id
, uint64_t lun_id
,
11865 lpfc_ctx_cmd ctx_cmd
)
11867 struct lpfc_hba
*phba
= vport
->phba
;
11868 struct lpfc_iocbq
*iocbq
;
11871 spin_lock_irq(&phba
->hbalock
);
11872 for (i
= 1, sum
= 0; i
<= phba
->sli
.last_iotag
; i
++) {
11873 iocbq
= phba
->sli
.iocbq_lookup
[i
];
11875 if (lpfc_sli_validate_fcp_iocb (iocbq
, vport
, tgt_id
, lun_id
,
11879 spin_unlock_irq(&phba
->hbalock
);
11885 * lpfc_sli4_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11886 * @phba: Pointer to HBA context object
11887 * @cmdiocb: Pointer to command iocb object.
11888 * @wcqe: pointer to the complete wcqe
11890 * This function is called when an aborted FCP iocb completes. This
11891 * function is called by the ring event handler with no lock held.
11892 * This function frees the iocb. It is called for sli-4 adapters.
11895 lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
11896 struct lpfc_wcqe_complete
*wcqe
)
11898 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
11899 "3017 ABORT_XRI_CN completing on rpi x%x "
11900 "original iotag x%x, abort cmd iotag x%x "
11901 "status 0x%x, reason 0x%x\n",
11902 cmdiocb
->iocb
.un
.acxri
.abortContextTag
,
11903 cmdiocb
->iocb
.un
.acxri
.abortIoTag
,
11905 (bf_get(lpfc_wcqe_c_status
, wcqe
)
11906 & LPFC_IOCB_STATUS_MASK
),
11908 lpfc_sli_release_iocbq(phba
, cmdiocb
);
11912 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
11913 * @phba: Pointer to HBA context object
11914 * @cmdiocb: Pointer to command iocb object.
11915 * @rspiocb: Pointer to response iocb object.
11917 * This function is called when an aborted FCP iocb completes. This
11918 * function is called by the ring event handler with no lock held.
11919 * This function frees the iocb.
11922 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
11923 struct lpfc_iocbq
*rspiocb
)
11925 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
11926 "3096 ABORT_XRI_CN completing on rpi x%x "
11927 "original iotag x%x, abort cmd iotag x%x "
11928 "status 0x%x, reason 0x%x\n",
11929 cmdiocb
->iocb
.un
.acxri
.abortContextTag
,
11930 cmdiocb
->iocb
.un
.acxri
.abortIoTag
,
11931 cmdiocb
->iotag
, rspiocb
->iocb
.ulpStatus
,
11932 rspiocb
->iocb
.un
.ulpWord
[4]);
11933 lpfc_sli_release_iocbq(phba
, cmdiocb
);
11938 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
11939 * @vport: Pointer to virtual port.
11940 * @pring: Pointer to driver SLI ring object.
11941 * @tgt_id: SCSI ID of the target.
11942 * @lun_id: LUN ID of the scsi device.
11943 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11945 * This function sends an abort command for every SCSI command
11946 * associated with the given virtual port pending on the ring
11947 * filtered by lpfc_sli_validate_fcp_iocb function.
11948 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
11949 * FCP iocbs associated with lun specified by tgt_id and lun_id
11951 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
11952 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
11953 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
11954 * FCP iocbs associated with virtual port.
11955 * This function returns number of iocbs it failed to abort.
11956 * This function is called with no locks held.
11959 lpfc_sli_abort_iocb(struct lpfc_vport
*vport
, struct lpfc_sli_ring
*pring
,
11960 uint16_t tgt_id
, uint64_t lun_id
, lpfc_ctx_cmd abort_cmd
)
11962 struct lpfc_hba
*phba
= vport
->phba
;
11963 struct lpfc_iocbq
*iocbq
;
11964 int errcnt
= 0, ret_val
= 0;
11965 unsigned long iflags
;
11968 /* all I/Os are in process of being flushed */
11969 if (phba
->hba_flag
& HBA_IOQ_FLUSH
)
11972 for (i
= 1; i
<= phba
->sli
.last_iotag
; i
++) {
11973 iocbq
= phba
->sli
.iocbq_lookup
[i
];
11975 if (lpfc_sli_validate_fcp_iocb(iocbq
, vport
, tgt_id
, lun_id
,
11979 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11980 ret_val
= lpfc_sli_issue_abort_iotag(phba
, pring
, iocbq
,
11981 lpfc_sli_abort_fcp_cmpl
);
11982 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11983 if (ret_val
!= IOCB_SUCCESS
)
11991 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
11992 * @vport: Pointer to virtual port.
11993 * @pring: Pointer to driver SLI ring object.
11994 * @tgt_id: SCSI ID of the target.
11995 * @lun_id: LUN ID of the scsi device.
11996 * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
11998 * This function sends an abort command for every SCSI command
11999 * associated with the given virtual port pending on the ring
12000 * filtered by lpfc_sli_validate_fcp_iocb function.
12001 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12002 * FCP iocbs associated with lun specified by tgt_id and lun_id
12004 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12005 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12006 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12007 * FCP iocbs associated with virtual port.
12008 * This function returns number of iocbs it aborted .
12009 * This function is called with no locks held right after a taskmgmt
12013 lpfc_sli_abort_taskmgmt(struct lpfc_vport
*vport
, struct lpfc_sli_ring
*pring
,
12014 uint16_t tgt_id
, uint64_t lun_id
, lpfc_ctx_cmd cmd
)
12016 struct lpfc_hba
*phba
= vport
->phba
;
12017 struct lpfc_io_buf
*lpfc_cmd
;
12018 struct lpfc_iocbq
*abtsiocbq
;
12019 struct lpfc_nodelist
*ndlp
;
12020 struct lpfc_iocbq
*iocbq
;
12022 int sum
, i
, ret_val
;
12023 unsigned long iflags
;
12024 struct lpfc_sli_ring
*pring_s4
= NULL
;
12026 spin_lock_irqsave(&phba
->hbalock
, iflags
);
12028 /* all I/Os are in process of being flushed */
12029 if (phba
->hba_flag
& HBA_IOQ_FLUSH
) {
12030 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
12035 for (i
= 1; i
<= phba
->sli
.last_iotag
; i
++) {
12036 iocbq
= phba
->sli
.iocbq_lookup
[i
];
12038 if (lpfc_sli_validate_fcp_iocb(iocbq
, vport
, tgt_id
, lun_id
,
12042 /* Guard against IO completion being called at same time */
12043 lpfc_cmd
= container_of(iocbq
, struct lpfc_io_buf
, cur_iocbq
);
12044 spin_lock(&lpfc_cmd
->buf_lock
);
12046 if (!lpfc_cmd
->pCmd
) {
12047 spin_unlock(&lpfc_cmd
->buf_lock
);
12051 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
12053 phba
->sli4_hba
.hdwq
[iocbq
->hba_wqidx
].io_wq
->pring
;
12055 spin_unlock(&lpfc_cmd
->buf_lock
);
12058 /* Note: both hbalock and ring_lock must be set here */
12059 spin_lock(&pring_s4
->ring_lock
);
12063 * If the iocbq is already being aborted, don't take a second
12064 * action, but do count it.
12066 if ((iocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
) ||
12067 !(iocbq
->iocb_flag
& LPFC_IO_ON_TXCMPLQ
)) {
12068 if (phba
->sli_rev
== LPFC_SLI_REV4
)
12069 spin_unlock(&pring_s4
->ring_lock
);
12070 spin_unlock(&lpfc_cmd
->buf_lock
);
12074 /* issue ABTS for this IOCB based on iotag */
12075 abtsiocbq
= __lpfc_sli_get_iocbq(phba
);
12077 if (phba
->sli_rev
== LPFC_SLI_REV4
)
12078 spin_unlock(&pring_s4
->ring_lock
);
12079 spin_unlock(&lpfc_cmd
->buf_lock
);
12083 icmd
= &iocbq
->iocb
;
12084 abtsiocbq
->iocb
.un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
12085 abtsiocbq
->iocb
.un
.acxri
.abortContextTag
= icmd
->ulpContext
;
12086 if (phba
->sli_rev
== LPFC_SLI_REV4
)
12087 abtsiocbq
->iocb
.un
.acxri
.abortIoTag
=
12088 iocbq
->sli4_xritag
;
12090 abtsiocbq
->iocb
.un
.acxri
.abortIoTag
= icmd
->ulpIoTag
;
12091 abtsiocbq
->iocb
.ulpLe
= 1;
12092 abtsiocbq
->iocb
.ulpClass
= icmd
->ulpClass
;
12093 abtsiocbq
->vport
= vport
;
12095 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
12096 abtsiocbq
->hba_wqidx
= iocbq
->hba_wqidx
;
12097 if (iocbq
->iocb_flag
& LPFC_IO_FCP
)
12098 abtsiocbq
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
12099 if (iocbq
->iocb_flag
& LPFC_IO_FOF
)
12100 abtsiocbq
->iocb_flag
|= LPFC_IO_FOF
;
12102 ndlp
= lpfc_cmd
->rdata
->pnode
;
12104 if (lpfc_is_link_up(phba
) &&
12105 (ndlp
&& ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
))
12106 abtsiocbq
->iocb
.ulpCommand
= CMD_ABORT_XRI_CN
;
12108 abtsiocbq
->iocb
.ulpCommand
= CMD_CLOSE_XRI_CN
;
12110 /* Setup callback routine and issue the command. */
12111 abtsiocbq
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
12114 * Indicate the IO is being aborted by the driver and set
12115 * the caller's flag into the aborted IO.
12117 iocbq
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
12119 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
12120 ret_val
= __lpfc_sli_issue_iocb(phba
, pring_s4
->ringno
,
12122 spin_unlock(&pring_s4
->ring_lock
);
12124 ret_val
= __lpfc_sli_issue_iocb(phba
, pring
->ringno
,
12128 spin_unlock(&lpfc_cmd
->buf_lock
);
12130 if (ret_val
== IOCB_ERROR
)
12131 __lpfc_sli_release_iocbq(phba
, abtsiocbq
);
12135 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
12140 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
12141 * @phba: Pointer to HBA context object.
12142 * @cmdiocbq: Pointer to command iocb.
12143 * @rspiocbq: Pointer to response iocb.
12145 * This function is the completion handler for iocbs issued using
12146 * lpfc_sli_issue_iocb_wait function. This function is called by the
12147 * ring event handler function without any lock held. This function
12148 * can be called from both worker thread context and interrupt
12149 * context. This function also can be called from other thread which
12150 * cleans up the SLI layer objects.
12151 * This function copy the contents of the response iocb to the
12152 * response iocb memory object provided by the caller of
12153 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
12154 * sleeps for the iocb completion.
12157 lpfc_sli_wake_iocb_wait(struct lpfc_hba
*phba
,
12158 struct lpfc_iocbq
*cmdiocbq
,
12159 struct lpfc_iocbq
*rspiocbq
)
12161 wait_queue_head_t
*pdone_q
;
12162 unsigned long iflags
;
12163 struct lpfc_io_buf
*lpfc_cmd
;
12165 spin_lock_irqsave(&phba
->hbalock
, iflags
);
12166 if (cmdiocbq
->iocb_flag
& LPFC_IO_WAKE_TMO
) {
12169 * A time out has occurred for the iocb. If a time out
12170 * completion handler has been supplied, call it. Otherwise,
12171 * just free the iocbq.
12174 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
12175 cmdiocbq
->iocb_cmpl
= cmdiocbq
->wait_iocb_cmpl
;
12176 cmdiocbq
->wait_iocb_cmpl
= NULL
;
12177 if (cmdiocbq
->iocb_cmpl
)
12178 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
, NULL
);
12180 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
12184 cmdiocbq
->iocb_flag
|= LPFC_IO_WAKE
;
12185 if (cmdiocbq
->context2
&& rspiocbq
)
12186 memcpy(&((struct lpfc_iocbq
*)cmdiocbq
->context2
)->iocb
,
12187 &rspiocbq
->iocb
, sizeof(IOCB_t
));
12189 /* Set the exchange busy flag for task management commands */
12190 if ((cmdiocbq
->iocb_flag
& LPFC_IO_FCP
) &&
12191 !(cmdiocbq
->iocb_flag
& LPFC_IO_LIBDFC
)) {
12192 lpfc_cmd
= container_of(cmdiocbq
, struct lpfc_io_buf
,
12194 if (rspiocbq
&& (rspiocbq
->iocb_flag
& LPFC_EXCHANGE_BUSY
))
12195 lpfc_cmd
->flags
|= LPFC_SBUF_XBUSY
;
12197 lpfc_cmd
->flags
&= ~LPFC_SBUF_XBUSY
;
12200 pdone_q
= cmdiocbq
->context_un
.wait_queue
;
12203 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
12208 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
12209 * @phba: Pointer to HBA context object..
12210 * @piocbq: Pointer to command iocb.
12211 * @flag: Flag to test.
12213 * This routine grabs the hbalock and then test the iocb_flag to
12214 * see if the passed in flag is set.
12216 * 1 if flag is set.
12217 * 0 if flag is not set.
12220 lpfc_chk_iocb_flg(struct lpfc_hba
*phba
,
12221 struct lpfc_iocbq
*piocbq
, uint32_t flag
)
12223 unsigned long iflags
;
12226 spin_lock_irqsave(&phba
->hbalock
, iflags
);
12227 ret
= piocbq
->iocb_flag
& flag
;
12228 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
12234 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
12235 * @phba: Pointer to HBA context object..
12236 * @ring_number: Ring number
12237 * @piocb: Pointer to command iocb.
12238 * @prspiocbq: Pointer to response iocb.
12239 * @timeout: Timeout in number of seconds.
12241 * This function issues the iocb to firmware and waits for the
12242 * iocb to complete. The iocb_cmpl field of the shall be used
12243 * to handle iocbs which time out. If the field is NULL, the
12244 * function shall free the iocbq structure. If more clean up is
12245 * needed, the caller is expected to provide a completion function
12246 * that will provide the needed clean up. If the iocb command is
12247 * not completed within timeout seconds, the function will either
12248 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
12249 * completion function set in the iocb_cmpl field and then return
12250 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
12251 * resources if this function returns IOCB_TIMEDOUT.
12252 * The function waits for the iocb completion using an
12253 * non-interruptible wait.
12254 * This function will sleep while waiting for iocb completion.
12255 * So, this function should not be called from any context which
12256 * does not allow sleeping. Due to the same reason, this function
12257 * cannot be called with interrupt disabled.
12258 * This function assumes that the iocb completions occur while
12259 * this function sleep. So, this function cannot be called from
12260 * the thread which process iocb completion for this ring.
12261 * This function clears the iocb_flag of the iocb object before
12262 * issuing the iocb and the iocb completion handler sets this
12263 * flag and wakes this thread when the iocb completes.
12264 * The contents of the response iocb will be copied to prspiocbq
12265 * by the completion handler when the command completes.
12266 * This function returns IOCB_SUCCESS when success.
12267 * This function is called with no lock held.
12270 lpfc_sli_issue_iocb_wait(struct lpfc_hba
*phba
,
12271 uint32_t ring_number
,
12272 struct lpfc_iocbq
*piocb
,
12273 struct lpfc_iocbq
*prspiocbq
,
12276 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q
);
12277 long timeleft
, timeout_req
= 0;
12278 int retval
= IOCB_SUCCESS
;
12280 struct lpfc_iocbq
*iocb
;
12282 int txcmplq_cnt
= 0;
12283 struct lpfc_sli_ring
*pring
;
12284 unsigned long iflags
;
12285 bool iocb_completed
= true;
12287 if (phba
->sli_rev
>= LPFC_SLI_REV4
)
12288 pring
= lpfc_sli4_calc_ring(phba
, piocb
);
12290 pring
= &phba
->sli
.sli3_ring
[ring_number
];
12292 * If the caller has provided a response iocbq buffer, then context2
12293 * is NULL or its an error.
12296 if (piocb
->context2
)
12298 piocb
->context2
= prspiocbq
;
12301 piocb
->wait_iocb_cmpl
= piocb
->iocb_cmpl
;
12302 piocb
->iocb_cmpl
= lpfc_sli_wake_iocb_wait
;
12303 piocb
->context_un
.wait_queue
= &done_q
;
12304 piocb
->iocb_flag
&= ~(LPFC_IO_WAKE
| LPFC_IO_WAKE_TMO
);
12306 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
12307 if (lpfc_readl(phba
->HCregaddr
, &creg_val
))
12309 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
12310 writel(creg_val
, phba
->HCregaddr
);
12311 readl(phba
->HCregaddr
); /* flush */
12314 retval
= lpfc_sli_issue_iocb(phba
, ring_number
, piocb
,
12315 SLI_IOCB_RET_IOCB
);
12316 if (retval
== IOCB_SUCCESS
) {
12317 timeout_req
= msecs_to_jiffies(timeout
* 1000);
12318 timeleft
= wait_event_timeout(done_q
,
12319 lpfc_chk_iocb_flg(phba
, piocb
, LPFC_IO_WAKE
),
12321 spin_lock_irqsave(&phba
->hbalock
, iflags
);
12322 if (!(piocb
->iocb_flag
& LPFC_IO_WAKE
)) {
12325 * IOCB timed out. Inform the wake iocb wait
12326 * completion function and set local status
12329 iocb_completed
= false;
12330 piocb
->iocb_flag
|= LPFC_IO_WAKE_TMO
;
12332 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
12333 if (iocb_completed
) {
12334 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
12335 "0331 IOCB wake signaled\n");
12336 /* Note: we are not indicating if the IOCB has a success
12337 * status or not - that's for the caller to check.
12338 * IOCB_SUCCESS means just that the command was sent and
12339 * completed. Not that it completed successfully.
12341 } else if (timeleft
== 0) {
12342 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12343 "0338 IOCB wait timeout error - no "
12344 "wake response Data x%x\n", timeout
);
12345 retval
= IOCB_TIMEDOUT
;
12347 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12348 "0330 IOCB wake NOT set, "
12350 timeout
, (timeleft
/ jiffies
));
12351 retval
= IOCB_TIMEDOUT
;
12353 } else if (retval
== IOCB_BUSY
) {
12354 if (phba
->cfg_log_verbose
& LOG_SLI
) {
12355 list_for_each_entry(iocb
, &pring
->txq
, list
) {
12358 list_for_each_entry(iocb
, &pring
->txcmplq
, list
) {
12361 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
12362 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
12363 phba
->iocb_cnt
, txq_cnt
, txcmplq_cnt
);
12367 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
12368 "0332 IOCB wait issue failed, Data x%x\n",
12370 retval
= IOCB_ERROR
;
12373 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
12374 if (lpfc_readl(phba
->HCregaddr
, &creg_val
))
12376 creg_val
&= ~(HC_R0INT_ENA
<< LPFC_FCP_RING
);
12377 writel(creg_val
, phba
->HCregaddr
);
12378 readl(phba
->HCregaddr
); /* flush */
12382 piocb
->context2
= NULL
;
12384 piocb
->context_un
.wait_queue
= NULL
;
12385 piocb
->iocb_cmpl
= NULL
;
12390 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
12391 * @phba: Pointer to HBA context object.
12392 * @pmboxq: Pointer to driver mailbox object.
12393 * @timeout: Timeout in number of seconds.
12395 * This function issues the mailbox to firmware and waits for the
12396 * mailbox command to complete. If the mailbox command is not
12397 * completed within timeout seconds, it returns MBX_TIMEOUT.
12398 * The function waits for the mailbox completion using an
12399 * interruptible wait. If the thread is woken up due to a
12400 * signal, MBX_TIMEOUT error is returned to the caller. Caller
12401 * should not free the mailbox resources, if this function returns
12403 * This function will sleep while waiting for mailbox completion.
12404 * So, this function should not be called from any context which
12405 * does not allow sleeping. Due to the same reason, this function
12406 * cannot be called with interrupt disabled.
12407 * This function assumes that the mailbox completion occurs while
12408 * this function sleep. So, this function cannot be called from
12409 * the worker thread which processes mailbox completion.
12410 * This function is called in the context of HBA management
12412 * This function returns MBX_SUCCESS when successful.
12413 * This function is called with no lock held.
12416 lpfc_sli_issue_mbox_wait(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
,
12419 struct completion mbox_done
;
12421 unsigned long flag
;
12423 pmboxq
->mbox_flag
&= ~LPFC_MBX_WAKE
;
12424 /* setup wake call as IOCB callback */
12425 pmboxq
->mbox_cmpl
= lpfc_sli_wake_mbox_wait
;
12427 /* setup context3 field to pass wait_queue pointer to wake function */
12428 init_completion(&mbox_done
);
12429 pmboxq
->context3
= &mbox_done
;
12430 /* now issue the command */
12431 retval
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
12432 if (retval
== MBX_BUSY
|| retval
== MBX_SUCCESS
) {
12433 wait_for_completion_timeout(&mbox_done
,
12434 msecs_to_jiffies(timeout
* 1000));
12436 spin_lock_irqsave(&phba
->hbalock
, flag
);
12437 pmboxq
->context3
= NULL
;
12439 * if LPFC_MBX_WAKE flag is set the mailbox is completed
12440 * else do not free the resources.
12442 if (pmboxq
->mbox_flag
& LPFC_MBX_WAKE
) {
12443 retval
= MBX_SUCCESS
;
12445 retval
= MBX_TIMEOUT
;
12446 pmboxq
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
12448 spin_unlock_irqrestore(&phba
->hbalock
, flag
);
12454 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
12455 * @phba: Pointer to HBA context.
12456 * @mbx_action: Mailbox shutdown options.
12458 * This function is called to shutdown the driver's mailbox sub-system.
12459 * It first marks the mailbox sub-system is in a block state to prevent
12460 * the asynchronous mailbox command from issued off the pending mailbox
12461 * command queue. If the mailbox command sub-system shutdown is due to
12462 * HBA error conditions such as EEH or ERATT, this routine shall invoke
12463 * the mailbox sub-system flush routine to forcefully bring down the
12464 * mailbox sub-system. Otherwise, if it is due to normal condition (such
12465 * as with offline or HBA function reset), this routine will wait for the
12466 * outstanding mailbox command to complete before invoking the mailbox
12467 * sub-system flush routine to gracefully bring down mailbox sub-system.
12470 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba
*phba
, int mbx_action
)
12472 struct lpfc_sli
*psli
= &phba
->sli
;
12473 unsigned long timeout
;
12475 if (mbx_action
== LPFC_MBX_NO_WAIT
) {
12476 /* delay 100ms for port state */
12478 lpfc_sli_mbox_sys_flush(phba
);
12481 timeout
= msecs_to_jiffies(LPFC_MBOX_TMO
* 1000) + jiffies
;
12483 /* Disable softirqs, including timers from obtaining phba->hbalock */
12484 local_bh_disable();
12486 spin_lock_irq(&phba
->hbalock
);
12487 psli
->sli_flag
|= LPFC_SLI_ASYNC_MBX_BLK
;
12489 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
12490 /* Determine how long we might wait for the active mailbox
12491 * command to be gracefully completed by firmware.
12493 if (phba
->sli
.mbox_active
)
12494 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
,
12495 phba
->sli
.mbox_active
) *
12497 spin_unlock_irq(&phba
->hbalock
);
12499 /* Enable softirqs again, done with phba->hbalock */
12502 while (phba
->sli
.mbox_active
) {
12503 /* Check active mailbox complete status every 2ms */
12505 if (time_after(jiffies
, timeout
))
12506 /* Timeout, let the mailbox flush routine to
12507 * forcefully release active mailbox command
12512 spin_unlock_irq(&phba
->hbalock
);
12514 /* Enable softirqs again, done with phba->hbalock */
12518 lpfc_sli_mbox_sys_flush(phba
);
12522 * lpfc_sli_eratt_read - read sli-3 error attention events
12523 * @phba: Pointer to HBA context.
12525 * This function is called to read the SLI3 device error attention registers
12526 * for possible error attention events. The caller must hold the hostlock
12527 * with spin_lock_irq().
12529 * This function returns 1 when there is Error Attention in the Host Attention
12530 * Register and returns 0 otherwise.
12533 lpfc_sli_eratt_read(struct lpfc_hba
*phba
)
12537 /* Read chip Host Attention (HA) register */
12538 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
12541 if (ha_copy
& HA_ERATT
) {
12542 /* Read host status register to retrieve error event */
12543 if (lpfc_sli_read_hs(phba
))
12546 /* Check if there is a deferred error condition is active */
12547 if ((HS_FFER1
& phba
->work_hs
) &&
12548 ((HS_FFER2
| HS_FFER3
| HS_FFER4
| HS_FFER5
|
12549 HS_FFER6
| HS_FFER7
| HS_FFER8
) & phba
->work_hs
)) {
12550 phba
->hba_flag
|= DEFER_ERATT
;
12551 /* Clear all interrupt enable conditions */
12552 writel(0, phba
->HCregaddr
);
12553 readl(phba
->HCregaddr
);
12556 /* Set the driver HA work bitmap */
12557 phba
->work_ha
|= HA_ERATT
;
12558 /* Indicate polling handles this ERATT */
12559 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
12565 /* Set the driver HS work bitmap */
12566 phba
->work_hs
|= UNPLUG_ERR
;
12567 /* Set the driver HA work bitmap */
12568 phba
->work_ha
|= HA_ERATT
;
12569 /* Indicate polling handles this ERATT */
12570 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
12575 * lpfc_sli4_eratt_read - read sli-4 error attention events
12576 * @phba: Pointer to HBA context.
12578 * This function is called to read the SLI4 device error attention registers
12579 * for possible error attention events. The caller must hold the hostlock
12580 * with spin_lock_irq().
12582 * This function returns 1 when there is Error Attention in the Host Attention
12583 * Register and returns 0 otherwise.
12586 lpfc_sli4_eratt_read(struct lpfc_hba
*phba
)
12588 uint32_t uerr_sta_hi
, uerr_sta_lo
;
12589 uint32_t if_type
, portsmphr
;
12590 struct lpfc_register portstat_reg
;
12593 * For now, use the SLI4 device internal unrecoverable error
12594 * registers for error attention. This can be changed later.
12596 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
12598 case LPFC_SLI_INTF_IF_TYPE_0
:
12599 if (lpfc_readl(phba
->sli4_hba
.u
.if_type0
.UERRLOregaddr
,
12601 lpfc_readl(phba
->sli4_hba
.u
.if_type0
.UERRHIregaddr
,
12603 phba
->work_hs
|= UNPLUG_ERR
;
12604 phba
->work_ha
|= HA_ERATT
;
12605 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
12608 if ((~phba
->sli4_hba
.ue_mask_lo
& uerr_sta_lo
) ||
12609 (~phba
->sli4_hba
.ue_mask_hi
& uerr_sta_hi
)) {
12610 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12611 "1423 HBA Unrecoverable error: "
12612 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
12613 "ue_mask_lo_reg=0x%x, "
12614 "ue_mask_hi_reg=0x%x\n",
12615 uerr_sta_lo
, uerr_sta_hi
,
12616 phba
->sli4_hba
.ue_mask_lo
,
12617 phba
->sli4_hba
.ue_mask_hi
);
12618 phba
->work_status
[0] = uerr_sta_lo
;
12619 phba
->work_status
[1] = uerr_sta_hi
;
12620 phba
->work_ha
|= HA_ERATT
;
12621 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
12625 case LPFC_SLI_INTF_IF_TYPE_2
:
12626 case LPFC_SLI_INTF_IF_TYPE_6
:
12627 if (lpfc_readl(phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
,
12628 &portstat_reg
.word0
) ||
12629 lpfc_readl(phba
->sli4_hba
.PSMPHRregaddr
,
12631 phba
->work_hs
|= UNPLUG_ERR
;
12632 phba
->work_ha
|= HA_ERATT
;
12633 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
12636 if (bf_get(lpfc_sliport_status_err
, &portstat_reg
)) {
12637 phba
->work_status
[0] =
12638 readl(phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
);
12639 phba
->work_status
[1] =
12640 readl(phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
);
12641 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12642 "2885 Port Status Event: "
12643 "port status reg 0x%x, "
12644 "port smphr reg 0x%x, "
12645 "error 1=0x%x, error 2=0x%x\n",
12646 portstat_reg
.word0
,
12648 phba
->work_status
[0],
12649 phba
->work_status
[1]);
12650 phba
->work_ha
|= HA_ERATT
;
12651 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
12655 case LPFC_SLI_INTF_IF_TYPE_1
:
12657 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12658 "2886 HBA Error Attention on unsupported "
12659 "if type %d.", if_type
);
12667 * lpfc_sli_check_eratt - check error attention events
12668 * @phba: Pointer to HBA context.
12670 * This function is called from timer soft interrupt context to check HBA's
12671 * error attention register bit for error attention events.
12673 * This function returns 1 when there is Error Attention in the Host Attention
12674 * Register and returns 0 otherwise.
12677 lpfc_sli_check_eratt(struct lpfc_hba
*phba
)
12681 /* If somebody is waiting to handle an eratt, don't process it
12682 * here. The brdkill function will do this.
12684 if (phba
->link_flag
& LS_IGNORE_ERATT
)
12687 /* Check if interrupt handler handles this ERATT */
12688 spin_lock_irq(&phba
->hbalock
);
12689 if (phba
->hba_flag
& HBA_ERATT_HANDLED
) {
12690 /* Interrupt handler has handled ERATT */
12691 spin_unlock_irq(&phba
->hbalock
);
12696 * If there is deferred error attention, do not check for error
12699 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
12700 spin_unlock_irq(&phba
->hbalock
);
12704 /* If PCI channel is offline, don't process it */
12705 if (unlikely(pci_channel_offline(phba
->pcidev
))) {
12706 spin_unlock_irq(&phba
->hbalock
);
12710 switch (phba
->sli_rev
) {
12711 case LPFC_SLI_REV2
:
12712 case LPFC_SLI_REV3
:
12713 /* Read chip Host Attention (HA) register */
12714 ha_copy
= lpfc_sli_eratt_read(phba
);
12716 case LPFC_SLI_REV4
:
12717 /* Read device Uncoverable Error (UERR) registers */
12718 ha_copy
= lpfc_sli4_eratt_read(phba
);
12721 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12722 "0299 Invalid SLI revision (%d)\n",
12727 spin_unlock_irq(&phba
->hbalock
);
12733 * lpfc_intr_state_check - Check device state for interrupt handling
12734 * @phba: Pointer to HBA context.
12736 * This inline routine checks whether a device or its PCI slot is in a state
12737 * that the interrupt should be handled.
12739 * This function returns 0 if the device or the PCI slot is in a state that
12740 * interrupt should be handled, otherwise -EIO.
12743 lpfc_intr_state_check(struct lpfc_hba
*phba
)
12745 /* If the pci channel is offline, ignore all the interrupts */
12746 if (unlikely(pci_channel_offline(phba
->pcidev
)))
12749 /* Update device level interrupt statistics */
12750 phba
->sli
.slistat
.sli_intr
++;
12752 /* Ignore all interrupts during initialization. */
12753 if (unlikely(phba
->link_state
< LPFC_LINK_DOWN
))
12760 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
12761 * @irq: Interrupt number.
12762 * @dev_id: The device context pointer.
12764 * This function is directly called from the PCI layer as an interrupt
12765 * service routine when device with SLI-3 interface spec is enabled with
12766 * MSI-X multi-message interrupt mode and there are slow-path events in
12767 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
12768 * interrupt mode, this function is called as part of the device-level
12769 * interrupt handler. When the PCI slot is in error recovery or the HBA
12770 * is undergoing initialization, the interrupt handler will not process
12771 * the interrupt. The link attention and ELS ring attention events are
12772 * handled by the worker thread. The interrupt handler signals the worker
12773 * thread and returns for these events. This function is called without
12774 * any lock held. It gets the hbalock to access and update SLI data
12777 * This function returns IRQ_HANDLED when interrupt is handled else it
12778 * returns IRQ_NONE.
12781 lpfc_sli_sp_intr_handler(int irq
, void *dev_id
)
12783 struct lpfc_hba
*phba
;
12784 uint32_t ha_copy
, hc_copy
;
12785 uint32_t work_ha_copy
;
12786 unsigned long status
;
12787 unsigned long iflag
;
12790 MAILBOX_t
*mbox
, *pmbox
;
12791 struct lpfc_vport
*vport
;
12792 struct lpfc_nodelist
*ndlp
;
12793 struct lpfc_dmabuf
*mp
;
12798 * Get the driver's phba structure from the dev_id and
12799 * assume the HBA is not interrupting.
12801 phba
= (struct lpfc_hba
*)dev_id
;
12803 if (unlikely(!phba
))
12807 * Stuff needs to be attented to when this function is invoked as an
12808 * individual interrupt handler in MSI-X multi-message interrupt mode
12810 if (phba
->intr_type
== MSIX
) {
12811 /* Check device state for handling interrupt */
12812 if (lpfc_intr_state_check(phba
))
12814 /* Need to read HA REG for slow-path events */
12815 spin_lock_irqsave(&phba
->hbalock
, iflag
);
12816 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
12818 /* If somebody is waiting to handle an eratt don't process it
12819 * here. The brdkill function will do this.
12821 if (phba
->link_flag
& LS_IGNORE_ERATT
)
12822 ha_copy
&= ~HA_ERATT
;
12823 /* Check the need for handling ERATT in interrupt handler */
12824 if (ha_copy
& HA_ERATT
) {
12825 if (phba
->hba_flag
& HBA_ERATT_HANDLED
)
12826 /* ERATT polling has handled ERATT */
12827 ha_copy
&= ~HA_ERATT
;
12829 /* Indicate interrupt handler handles ERATT */
12830 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
12834 * If there is deferred error attention, do not check for any
12837 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
12838 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12842 /* Clear up only attention source related to slow-path */
12843 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
))
12846 writel(hc_copy
& ~(HC_MBINT_ENA
| HC_R2INT_ENA
|
12847 HC_LAINT_ENA
| HC_ERINT_ENA
),
12849 writel((ha_copy
& (HA_MBATT
| HA_R2_CLR_MSK
)),
12851 writel(hc_copy
, phba
->HCregaddr
);
12852 readl(phba
->HAregaddr
); /* flush */
12853 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12855 ha_copy
= phba
->ha_copy
;
12857 work_ha_copy
= ha_copy
& phba
->work_ha_mask
;
12859 if (work_ha_copy
) {
12860 if (work_ha_copy
& HA_LATT
) {
12861 if (phba
->sli
.sli_flag
& LPFC_PROCESS_LA
) {
12863 * Turn off Link Attention interrupts
12864 * until CLEAR_LA done
12866 spin_lock_irqsave(&phba
->hbalock
, iflag
);
12867 phba
->sli
.sli_flag
&= ~LPFC_PROCESS_LA
;
12868 if (lpfc_readl(phba
->HCregaddr
, &control
))
12870 control
&= ~HC_LAINT_ENA
;
12871 writel(control
, phba
->HCregaddr
);
12872 readl(phba
->HCregaddr
); /* flush */
12873 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12876 work_ha_copy
&= ~HA_LATT
;
12879 if (work_ha_copy
& ~(HA_ERATT
| HA_MBATT
| HA_LATT
)) {
12881 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
12882 * the only slow ring.
12884 status
= (work_ha_copy
&
12885 (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
12886 status
>>= (4*LPFC_ELS_RING
);
12887 if (status
& HA_RXMASK
) {
12888 spin_lock_irqsave(&phba
->hbalock
, iflag
);
12889 if (lpfc_readl(phba
->HCregaddr
, &control
))
12892 lpfc_debugfs_slow_ring_trc(phba
,
12893 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
12895 (uint32_t)phba
->sli
.slistat
.sli_intr
);
12897 if (control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
)) {
12898 lpfc_debugfs_slow_ring_trc(phba
,
12899 "ISR Disable ring:"
12900 "pwork:x%x hawork:x%x wait:x%x",
12901 phba
->work_ha
, work_ha_copy
,
12902 (uint32_t)((unsigned long)
12903 &phba
->work_waitq
));
12906 ~(HC_R0INT_ENA
<< LPFC_ELS_RING
);
12907 writel(control
, phba
->HCregaddr
);
12908 readl(phba
->HCregaddr
); /* flush */
12911 lpfc_debugfs_slow_ring_trc(phba
,
12912 "ISR slow ring: pwork:"
12913 "x%x hawork:x%x wait:x%x",
12914 phba
->work_ha
, work_ha_copy
,
12915 (uint32_t)((unsigned long)
12916 &phba
->work_waitq
));
12918 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12921 spin_lock_irqsave(&phba
->hbalock
, iflag
);
12922 if (work_ha_copy
& HA_ERATT
) {
12923 if (lpfc_sli_read_hs(phba
))
12926 * Check if there is a deferred error condition
12929 if ((HS_FFER1
& phba
->work_hs
) &&
12930 ((HS_FFER2
| HS_FFER3
| HS_FFER4
| HS_FFER5
|
12931 HS_FFER6
| HS_FFER7
| HS_FFER8
) &
12933 phba
->hba_flag
|= DEFER_ERATT
;
12934 /* Clear all interrupt enable conditions */
12935 writel(0, phba
->HCregaddr
);
12936 readl(phba
->HCregaddr
);
12940 if ((work_ha_copy
& HA_MBATT
) && (phba
->sli
.mbox_active
)) {
12941 pmb
= phba
->sli
.mbox_active
;
12942 pmbox
= &pmb
->u
.mb
;
12944 vport
= pmb
->vport
;
12946 /* First check out the status word */
12947 lpfc_sli_pcimem_bcopy(mbox
, pmbox
, sizeof(uint32_t));
12948 if (pmbox
->mbxOwner
!= OWN_HOST
) {
12949 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12951 * Stray Mailbox Interrupt, mbxCommand <cmd>
12952 * mbxStatus <status>
12954 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
12955 "(%d):0304 Stray Mailbox "
12956 "Interrupt mbxCommand x%x "
12958 (vport
? vport
->vpi
: 0),
12961 /* clear mailbox attention bit */
12962 work_ha_copy
&= ~HA_MBATT
;
12964 phba
->sli
.mbox_active
= NULL
;
12965 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12966 phba
->last_completion_time
= jiffies
;
12967 del_timer(&phba
->sli
.mbox_tmo
);
12968 if (pmb
->mbox_cmpl
) {
12969 lpfc_sli_pcimem_bcopy(mbox
, pmbox
,
12971 if (pmb
->out_ext_byte_len
&&
12973 lpfc_sli_pcimem_bcopy(
12976 pmb
->out_ext_byte_len
);
12978 if (pmb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) {
12979 pmb
->mbox_flag
&= ~LPFC_MBX_IMED_UNREG
;
12981 lpfc_debugfs_disc_trc(vport
,
12982 LPFC_DISC_TRC_MBOX_VPORT
,
12983 "MBOX dflt rpi: : "
12984 "status:x%x rpi:x%x",
12985 (uint32_t)pmbox
->mbxStatus
,
12986 pmbox
->un
.varWords
[0], 0);
12988 if (!pmbox
->mbxStatus
) {
12989 mp
= (struct lpfc_dmabuf
*)
12991 ndlp
= (struct lpfc_nodelist
*)
12994 /* Reg_LOGIN of dflt RPI was
12995 * successful. new lets get
12996 * rid of the RPI using the
12997 * same mbox buffer.
12999 lpfc_unreg_login(phba
,
13001 pmbox
->un
.varWords
[0],
13004 lpfc_mbx_cmpl_dflt_rpi
;
13006 pmb
->ctx_ndlp
= ndlp
;
13007 pmb
->vport
= vport
;
13008 rc
= lpfc_sli_issue_mbox(phba
,
13011 if (rc
!= MBX_BUSY
)
13012 lpfc_printf_log(phba
,
13015 "0350 rc should have"
13016 "been MBX_BUSY\n");
13017 if (rc
!= MBX_NOT_FINISHED
)
13018 goto send_current_mbox
;
13022 &phba
->pport
->work_port_lock
,
13024 phba
->pport
->work_port_events
&=
13026 spin_unlock_irqrestore(
13027 &phba
->pport
->work_port_lock
,
13029 lpfc_mbox_cmpl_put(phba
, pmb
);
13032 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
13034 if ((work_ha_copy
& HA_MBATT
) &&
13035 (phba
->sli
.mbox_active
== NULL
)) {
13037 /* Process next mailbox command if there is one */
13039 rc
= lpfc_sli_issue_mbox(phba
, NULL
,
13041 } while (rc
== MBX_NOT_FINISHED
);
13042 if (rc
!= MBX_SUCCESS
)
13043 lpfc_printf_log(phba
, KERN_ERR
,
13045 "0349 rc should be "
13049 spin_lock_irqsave(&phba
->hbalock
, iflag
);
13050 phba
->work_ha
|= work_ha_copy
;
13051 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
13052 lpfc_worker_wake_up(phba
);
13054 return IRQ_HANDLED
;
13056 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
13057 return IRQ_HANDLED
;
13059 } /* lpfc_sli_sp_intr_handler */
13062 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
13063 * @irq: Interrupt number.
13064 * @dev_id: The device context pointer.
13066 * This function is directly called from the PCI layer as an interrupt
13067 * service routine when device with SLI-3 interface spec is enabled with
13068 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13069 * ring event in the HBA. However, when the device is enabled with either
13070 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13071 * device-level interrupt handler. When the PCI slot is in error recovery
13072 * or the HBA is undergoing initialization, the interrupt handler will not
13073 * process the interrupt. The SCSI FCP fast-path ring event are handled in
13074 * the intrrupt context. This function is called without any lock held.
13075 * It gets the hbalock to access and update SLI data structures.
13077 * This function returns IRQ_HANDLED when interrupt is handled else it
13078 * returns IRQ_NONE.
13081 lpfc_sli_fp_intr_handler(int irq
, void *dev_id
)
13083 struct lpfc_hba
*phba
;
13085 unsigned long status
;
13086 unsigned long iflag
;
13087 struct lpfc_sli_ring
*pring
;
13089 /* Get the driver's phba structure from the dev_id and
13090 * assume the HBA is not interrupting.
13092 phba
= (struct lpfc_hba
*) dev_id
;
13094 if (unlikely(!phba
))
13098 * Stuff needs to be attented to when this function is invoked as an
13099 * individual interrupt handler in MSI-X multi-message interrupt mode
13101 if (phba
->intr_type
== MSIX
) {
13102 /* Check device state for handling interrupt */
13103 if (lpfc_intr_state_check(phba
))
13105 /* Need to read HA REG for FCP ring and other ring events */
13106 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
13107 return IRQ_HANDLED
;
13108 /* Clear up only attention source related to fast-path */
13109 spin_lock_irqsave(&phba
->hbalock
, iflag
);
13111 * If there is deferred error attention, do not check for
13114 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
13115 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
13118 writel((ha_copy
& (HA_R0_CLR_MSK
| HA_R1_CLR_MSK
)),
13120 readl(phba
->HAregaddr
); /* flush */
13121 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
13123 ha_copy
= phba
->ha_copy
;
13126 * Process all events on FCP ring. Take the optimized path for FCP IO.
13128 ha_copy
&= ~(phba
->work_ha_mask
);
13130 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_FCP_RING
)));
13131 status
>>= (4*LPFC_FCP_RING
);
13132 pring
= &phba
->sli
.sli3_ring
[LPFC_FCP_RING
];
13133 if (status
& HA_RXMASK
)
13134 lpfc_sli_handle_fast_ring_event(phba
, pring
, status
);
13136 if (phba
->cfg_multi_ring_support
== 2) {
13138 * Process all events on extra ring. Take the optimized path
13139 * for extra ring IO.
13141 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_EXTRA_RING
)));
13142 status
>>= (4*LPFC_EXTRA_RING
);
13143 if (status
& HA_RXMASK
) {
13144 lpfc_sli_handle_fast_ring_event(phba
,
13145 &phba
->sli
.sli3_ring
[LPFC_EXTRA_RING
],
13149 return IRQ_HANDLED
;
13150 } /* lpfc_sli_fp_intr_handler */
13153 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
13154 * @irq: Interrupt number.
13155 * @dev_id: The device context pointer.
13157 * This function is the HBA device-level interrupt handler to device with
13158 * SLI-3 interface spec, called from the PCI layer when either MSI or
13159 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
13160 * requires driver attention. This function invokes the slow-path interrupt
13161 * attention handling function and fast-path interrupt attention handling
13162 * function in turn to process the relevant HBA attention events. This
13163 * function is called without any lock held. It gets the hbalock to access
13164 * and update SLI data structures.
13166 * This function returns IRQ_HANDLED when interrupt is handled, else it
13167 * returns IRQ_NONE.
13170 lpfc_sli_intr_handler(int irq
, void *dev_id
)
13172 struct lpfc_hba
*phba
;
13173 irqreturn_t sp_irq_rc
, fp_irq_rc
;
13174 unsigned long status1
, status2
;
13178 * Get the driver's phba structure from the dev_id and
13179 * assume the HBA is not interrupting.
13181 phba
= (struct lpfc_hba
*) dev_id
;
13183 if (unlikely(!phba
))
13186 /* Check device state for handling interrupt */
13187 if (lpfc_intr_state_check(phba
))
13190 spin_lock(&phba
->hbalock
);
13191 if (lpfc_readl(phba
->HAregaddr
, &phba
->ha_copy
)) {
13192 spin_unlock(&phba
->hbalock
);
13193 return IRQ_HANDLED
;
13196 if (unlikely(!phba
->ha_copy
)) {
13197 spin_unlock(&phba
->hbalock
);
13199 } else if (phba
->ha_copy
& HA_ERATT
) {
13200 if (phba
->hba_flag
& HBA_ERATT_HANDLED
)
13201 /* ERATT polling has handled ERATT */
13202 phba
->ha_copy
&= ~HA_ERATT
;
13204 /* Indicate interrupt handler handles ERATT */
13205 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
13209 * If there is deferred error attention, do not check for any interrupt.
13211 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
13212 spin_unlock(&phba
->hbalock
);
13216 /* Clear attention sources except link and error attentions */
13217 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
)) {
13218 spin_unlock(&phba
->hbalock
);
13219 return IRQ_HANDLED
;
13221 writel(hc_copy
& ~(HC_MBINT_ENA
| HC_R0INT_ENA
| HC_R1INT_ENA
13222 | HC_R2INT_ENA
| HC_LAINT_ENA
| HC_ERINT_ENA
),
13224 writel((phba
->ha_copy
& ~(HA_LATT
| HA_ERATT
)), phba
->HAregaddr
);
13225 writel(hc_copy
, phba
->HCregaddr
);
13226 readl(phba
->HAregaddr
); /* flush */
13227 spin_unlock(&phba
->hbalock
);
13230 * Invokes slow-path host attention interrupt handling as appropriate.
13233 /* status of events with mailbox and link attention */
13234 status1
= phba
->ha_copy
& (HA_MBATT
| HA_LATT
| HA_ERATT
);
13236 /* status of events with ELS ring */
13237 status2
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
13238 status2
>>= (4*LPFC_ELS_RING
);
13240 if (status1
|| (status2
& HA_RXMASK
))
13241 sp_irq_rc
= lpfc_sli_sp_intr_handler(irq
, dev_id
);
13243 sp_irq_rc
= IRQ_NONE
;
13246 * Invoke fast-path host attention interrupt handling as appropriate.
13249 /* status of events with FCP ring */
13250 status1
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_FCP_RING
)));
13251 status1
>>= (4*LPFC_FCP_RING
);
13253 /* status of events with extra ring */
13254 if (phba
->cfg_multi_ring_support
== 2) {
13255 status2
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_EXTRA_RING
)));
13256 status2
>>= (4*LPFC_EXTRA_RING
);
13260 if ((status1
& HA_RXMASK
) || (status2
& HA_RXMASK
))
13261 fp_irq_rc
= lpfc_sli_fp_intr_handler(irq
, dev_id
);
13263 fp_irq_rc
= IRQ_NONE
;
13265 /* Return device-level interrupt handling status */
13266 return (sp_irq_rc
== IRQ_HANDLED
) ? sp_irq_rc
: fp_irq_rc
;
13267 } /* lpfc_sli_intr_handler */
13270 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
13271 * @phba: pointer to lpfc hba data structure.
13273 * This routine is invoked by the worker thread to process all the pending
13274 * SLI4 els abort xri events.
13276 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba
*phba
)
13278 struct lpfc_cq_event
*cq_event
;
13279 unsigned long iflags
;
13281 /* First, declare the els xri abort event has been handled */
13282 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13283 phba
->hba_flag
&= ~ELS_XRI_ABORT_EVENT
;
13284 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13286 /* Now, handle all the els xri abort events */
13287 spin_lock_irqsave(&phba
->sli4_hba
.els_xri_abrt_list_lock
, iflags
);
13288 while (!list_empty(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
)) {
13289 /* Get the first event from the head of the event queue */
13290 list_remove_head(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
,
13291 cq_event
, struct lpfc_cq_event
, list
);
13292 spin_unlock_irqrestore(&phba
->sli4_hba
.els_xri_abrt_list_lock
,
13294 /* Notify aborted XRI for ELS work queue */
13295 lpfc_sli4_els_xri_aborted(phba
, &cq_event
->cqe
.wcqe_axri
);
13297 /* Free the event processed back to the free pool */
13298 lpfc_sli4_cq_event_release(phba
, cq_event
);
13299 spin_lock_irqsave(&phba
->sli4_hba
.els_xri_abrt_list_lock
,
13302 spin_unlock_irqrestore(&phba
->sli4_hba
.els_xri_abrt_list_lock
, iflags
);
13306 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
13307 * @phba: pointer to lpfc hba data structure
13308 * @pIocbIn: pointer to the rspiocbq
13309 * @pIocbOut: pointer to the cmdiocbq
13310 * @wcqe: pointer to the complete wcqe
13312 * This routine transfers the fields of a command iocbq to a response iocbq
13313 * by copying all the IOCB fields from command iocbq and transferring the
13314 * completion status information from the complete wcqe.
13317 lpfc_sli4_iocb_param_transfer(struct lpfc_hba
*phba
,
13318 struct lpfc_iocbq
*pIocbIn
,
13319 struct lpfc_iocbq
*pIocbOut
,
13320 struct lpfc_wcqe_complete
*wcqe
)
13323 unsigned long iflags
;
13324 uint32_t status
, max_response
;
13325 struct lpfc_dmabuf
*dmabuf
;
13326 struct ulp_bde64
*bpl
, bde
;
13327 size_t offset
= offsetof(struct lpfc_iocbq
, iocb
);
13329 memcpy((char *)pIocbIn
+ offset
, (char *)pIocbOut
+ offset
,
13330 sizeof(struct lpfc_iocbq
) - offset
);
13331 /* Map WCQE parameters into irspiocb parameters */
13332 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
13333 pIocbIn
->iocb
.ulpStatus
= (status
& LPFC_IOCB_STATUS_MASK
);
13334 if (pIocbOut
->iocb_flag
& LPFC_IO_FCP
)
13335 if (pIocbIn
->iocb
.ulpStatus
== IOSTAT_FCP_RSP_ERROR
)
13336 pIocbIn
->iocb
.un
.fcpi
.fcpi_parm
=
13337 pIocbOut
->iocb
.un
.fcpi
.fcpi_parm
-
13338 wcqe
->total_data_placed
;
13340 pIocbIn
->iocb
.un
.ulpWord
[4] = wcqe
->parameter
;
13342 pIocbIn
->iocb
.un
.ulpWord
[4] = wcqe
->parameter
;
13343 switch (pIocbOut
->iocb
.ulpCommand
) {
13344 case CMD_ELS_REQUEST64_CR
:
13345 dmabuf
= (struct lpfc_dmabuf
*)pIocbOut
->context3
;
13346 bpl
= (struct ulp_bde64
*)dmabuf
->virt
;
13347 bde
.tus
.w
= le32_to_cpu(bpl
[1].tus
.w
);
13348 max_response
= bde
.tus
.f
.bdeSize
;
13350 case CMD_GEN_REQUEST64_CR
:
13352 if (!pIocbOut
->context3
)
13354 numBdes
= pIocbOut
->iocb
.un
.genreq64
.bdl
.bdeSize
/
13355 sizeof(struct ulp_bde64
);
13356 dmabuf
= (struct lpfc_dmabuf
*)pIocbOut
->context3
;
13357 bpl
= (struct ulp_bde64
*)dmabuf
->virt
;
13358 for (i
= 0; i
< numBdes
; i
++) {
13359 bde
.tus
.w
= le32_to_cpu(bpl
[i
].tus
.w
);
13360 if (bde
.tus
.f
.bdeFlags
!= BUFF_TYPE_BDE_64
)
13361 max_response
+= bde
.tus
.f
.bdeSize
;
13365 max_response
= wcqe
->total_data_placed
;
13368 if (max_response
< wcqe
->total_data_placed
)
13369 pIocbIn
->iocb
.un
.genreq64
.bdl
.bdeSize
= max_response
;
13371 pIocbIn
->iocb
.un
.genreq64
.bdl
.bdeSize
=
13372 wcqe
->total_data_placed
;
13375 /* Convert BG errors for completion status */
13376 if (status
== CQE_STATUS_DI_ERROR
) {
13377 pIocbIn
->iocb
.ulpStatus
= IOSTAT_LOCAL_REJECT
;
13379 if (bf_get(lpfc_wcqe_c_bg_edir
, wcqe
))
13380 pIocbIn
->iocb
.un
.ulpWord
[4] = IOERR_RX_DMA_FAILED
;
13382 pIocbIn
->iocb
.un
.ulpWord
[4] = IOERR_TX_DMA_FAILED
;
13384 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
= 0;
13385 if (bf_get(lpfc_wcqe_c_bg_ge
, wcqe
)) /* Guard Check failed */
13386 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
13387 BGS_GUARD_ERR_MASK
;
13388 if (bf_get(lpfc_wcqe_c_bg_ae
, wcqe
)) /* App Tag Check failed */
13389 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
13390 BGS_APPTAG_ERR_MASK
;
13391 if (bf_get(lpfc_wcqe_c_bg_re
, wcqe
)) /* Ref Tag Check failed */
13392 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
13393 BGS_REFTAG_ERR_MASK
;
13395 /* Check to see if there was any good data before the error */
13396 if (bf_get(lpfc_wcqe_c_bg_tdpv
, wcqe
)) {
13397 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
13398 BGS_HI_WATER_MARK_PRESENT_MASK
;
13399 pIocbIn
->iocb
.unsli3
.sli3_bg
.bghm
=
13400 wcqe
->total_data_placed
;
13404 * Set ALL the error bits to indicate we don't know what
13405 * type of error it is.
13407 if (!pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
)
13408 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
13409 (BGS_REFTAG_ERR_MASK
| BGS_APPTAG_ERR_MASK
|
13410 BGS_GUARD_ERR_MASK
);
13413 /* Pick up HBA exchange busy condition */
13414 if (bf_get(lpfc_wcqe_c_xb
, wcqe
)) {
13415 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13416 pIocbIn
->iocb_flag
|= LPFC_EXCHANGE_BUSY
;
13417 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13422 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
13423 * @phba: Pointer to HBA context object.
13424 * @irspiocbq: Pointer to work-queue completion queue entry.
13426 * This routine handles an ELS work-queue completion event and construct
13427 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
13428 * discovery engine to handle.
13430 * Return: Pointer to the receive IOCBQ, NULL otherwise.
13432 static struct lpfc_iocbq
*
13433 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba
*phba
,
13434 struct lpfc_iocbq
*irspiocbq
)
13436 struct lpfc_sli_ring
*pring
;
13437 struct lpfc_iocbq
*cmdiocbq
;
13438 struct lpfc_wcqe_complete
*wcqe
;
13439 unsigned long iflags
;
13441 pring
= lpfc_phba_elsring(phba
);
13442 if (unlikely(!pring
))
13445 wcqe
= &irspiocbq
->cq_event
.cqe
.wcqe_cmpl
;
13446 pring
->stats
.iocb_event
++;
13447 /* Look up the ELS command IOCB and create pseudo response IOCB */
13448 cmdiocbq
= lpfc_sli_iocbq_lookup_by_tag(phba
, pring
,
13449 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
13450 if (unlikely(!cmdiocbq
)) {
13451 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
13452 "0386 ELS complete with no corresponding "
13453 "cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
13454 wcqe
->word0
, wcqe
->total_data_placed
,
13455 wcqe
->parameter
, wcqe
->word3
);
13456 lpfc_sli_release_iocbq(phba
, irspiocbq
);
13460 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
13461 /* Put the iocb back on the txcmplq */
13462 lpfc_sli_ringtxcmpl_put(phba
, pring
, cmdiocbq
);
13463 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
13465 /* Fake the irspiocbq and copy necessary response information */
13466 lpfc_sli4_iocb_param_transfer(phba
, irspiocbq
, cmdiocbq
, wcqe
);
13471 inline struct lpfc_cq_event
*
13472 lpfc_cq_event_setup(struct lpfc_hba
*phba
, void *entry
, int size
)
13474 struct lpfc_cq_event
*cq_event
;
13476 /* Allocate a new internal CQ_EVENT entry */
13477 cq_event
= lpfc_sli4_cq_event_alloc(phba
);
13479 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13480 "0602 Failed to alloc CQ_EVENT entry\n");
13484 /* Move the CQE into the event */
13485 memcpy(&cq_event
->cqe
, entry
, size
);
13490 * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
13491 * @phba: Pointer to HBA context object.
13492 * @mcqe: Pointer to mailbox completion queue entry.
13494 * This routine process a mailbox completion queue entry with asynchronous
13497 * Return: true if work posted to worker thread, otherwise false.
13500 lpfc_sli4_sp_handle_async_event(struct lpfc_hba
*phba
, struct lpfc_mcqe
*mcqe
)
13502 struct lpfc_cq_event
*cq_event
;
13503 unsigned long iflags
;
13505 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
13506 "0392 Async Event: word0:x%x, word1:x%x, "
13507 "word2:x%x, word3:x%x\n", mcqe
->word0
,
13508 mcqe
->mcqe_tag0
, mcqe
->mcqe_tag1
, mcqe
->trailer
);
13510 cq_event
= lpfc_cq_event_setup(phba
, mcqe
, sizeof(struct lpfc_mcqe
));
13514 spin_lock_irqsave(&phba
->sli4_hba
.asynce_list_lock
, iflags
);
13515 list_add_tail(&cq_event
->list
, &phba
->sli4_hba
.sp_asynce_work_queue
);
13516 spin_unlock_irqrestore(&phba
->sli4_hba
.asynce_list_lock
, iflags
);
13518 /* Set the async event flag */
13519 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13520 phba
->hba_flag
|= ASYNC_EVENT
;
13521 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13527 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
13528 * @phba: Pointer to HBA context object.
13529 * @mcqe: Pointer to mailbox completion queue entry.
13531 * This routine process a mailbox completion queue entry with mailbox
13532 * completion event.
13534 * Return: true if work posted to worker thread, otherwise false.
13537 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba
*phba
, struct lpfc_mcqe
*mcqe
)
13539 uint32_t mcqe_status
;
13540 MAILBOX_t
*mbox
, *pmbox
;
13541 struct lpfc_mqe
*mqe
;
13542 struct lpfc_vport
*vport
;
13543 struct lpfc_nodelist
*ndlp
;
13544 struct lpfc_dmabuf
*mp
;
13545 unsigned long iflags
;
13547 bool workposted
= false;
13550 /* If not a mailbox complete MCQE, out by checking mailbox consume */
13551 if (!bf_get(lpfc_trailer_completed
, mcqe
))
13552 goto out_no_mqe_complete
;
13554 /* Get the reference to the active mbox command */
13555 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13556 pmb
= phba
->sli
.mbox_active
;
13557 if (unlikely(!pmb
)) {
13558 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13559 "1832 No pending MBOX command to handle\n");
13560 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13561 goto out_no_mqe_complete
;
13563 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13565 pmbox
= (MAILBOX_t
*)&pmb
->u
.mqe
;
13567 vport
= pmb
->vport
;
13569 /* Reset heartbeat timer */
13570 phba
->last_completion_time
= jiffies
;
13571 del_timer(&phba
->sli
.mbox_tmo
);
13573 /* Move mbox data to caller's mailbox region, do endian swapping */
13574 if (pmb
->mbox_cmpl
&& mbox
)
13575 lpfc_sli4_pcimem_bcopy(mbox
, mqe
, sizeof(struct lpfc_mqe
));
13578 * For mcqe errors, conditionally move a modified error code to
13579 * the mbox so that the error will not be missed.
13581 mcqe_status
= bf_get(lpfc_mcqe_status
, mcqe
);
13582 if (mcqe_status
!= MB_CQE_STATUS_SUCCESS
) {
13583 if (bf_get(lpfc_mqe_status
, mqe
) == MBX_SUCCESS
)
13584 bf_set(lpfc_mqe_status
, mqe
,
13585 (LPFC_MBX_ERROR_RANGE
| mcqe_status
));
13587 if (pmb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) {
13588 pmb
->mbox_flag
&= ~LPFC_MBX_IMED_UNREG
;
13589 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_MBOX_VPORT
,
13590 "MBOX dflt rpi: status:x%x rpi:x%x",
13592 pmbox
->un
.varWords
[0], 0);
13593 if (mcqe_status
== MB_CQE_STATUS_SUCCESS
) {
13594 mp
= (struct lpfc_dmabuf
*)(pmb
->ctx_buf
);
13595 ndlp
= (struct lpfc_nodelist
*)pmb
->ctx_ndlp
;
13596 /* Reg_LOGIN of dflt RPI was successful. Now lets get
13597 * RID of the PPI using the same mbox buffer.
13599 lpfc_unreg_login(phba
, vport
->vpi
,
13600 pmbox
->un
.varWords
[0], pmb
);
13601 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
13604 /* No reference taken here. This is a default
13605 * RPI reg/immediate unreg cycle. The reference was
13606 * taken in the reg rpi path and is released when
13607 * this mailbox completes.
13609 pmb
->ctx_ndlp
= ndlp
;
13610 pmb
->vport
= vport
;
13611 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
13612 if (rc
!= MBX_BUSY
)
13613 lpfc_printf_log(phba
, KERN_ERR
,
13616 "have been MBX_BUSY\n");
13617 if (rc
!= MBX_NOT_FINISHED
)
13618 goto send_current_mbox
;
13621 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflags
);
13622 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
13623 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflags
);
13625 /* There is mailbox completion work to do */
13626 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13627 __lpfc_mbox_cmpl_put(phba
, pmb
);
13628 phba
->work_ha
|= HA_MBATT
;
13629 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13633 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13634 /* Release the mailbox command posting token */
13635 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
13636 /* Setting active mailbox pointer need to be in sync to flag clear */
13637 phba
->sli
.mbox_active
= NULL
;
13638 if (bf_get(lpfc_trailer_consumed
, mcqe
))
13639 lpfc_sli4_mq_release(phba
->sli4_hba
.mbx_wq
);
13640 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13641 /* Wake up worker thread to post the next pending mailbox command */
13642 lpfc_worker_wake_up(phba
);
13645 out_no_mqe_complete
:
13646 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13647 if (bf_get(lpfc_trailer_consumed
, mcqe
))
13648 lpfc_sli4_mq_release(phba
->sli4_hba
.mbx_wq
);
13649 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13654 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
13655 * @phba: Pointer to HBA context object.
13656 * @cq: Pointer to associated CQ
13657 * @cqe: Pointer to mailbox completion queue entry.
13659 * This routine process a mailbox completion queue entry, it invokes the
13660 * proper mailbox complete handling or asynchronous event handling routine
13661 * according to the MCQE's async bit.
13663 * Return: true if work posted to worker thread, otherwise false.
13666 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
13667 struct lpfc_cqe
*cqe
)
13669 struct lpfc_mcqe mcqe
;
13674 /* Copy the mailbox MCQE and convert endian order as needed */
13675 lpfc_sli4_pcimem_bcopy(cqe
, &mcqe
, sizeof(struct lpfc_mcqe
));
13677 /* Invoke the proper event handling routine */
13678 if (!bf_get(lpfc_trailer_async
, &mcqe
))
13679 workposted
= lpfc_sli4_sp_handle_mbox_event(phba
, &mcqe
);
13681 workposted
= lpfc_sli4_sp_handle_async_event(phba
, &mcqe
);
13686 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
13687 * @phba: Pointer to HBA context object.
13688 * @cq: Pointer to associated CQ
13689 * @wcqe: Pointer to work-queue completion queue entry.
13691 * This routine handles an ELS work-queue completion event.
13693 * Return: true if work posted to worker thread, otherwise false.
13696 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
13697 struct lpfc_wcqe_complete
*wcqe
)
13699 struct lpfc_iocbq
*irspiocbq
;
13700 unsigned long iflags
;
13701 struct lpfc_sli_ring
*pring
= cq
->pring
;
13703 int txcmplq_cnt
= 0;
13705 /* Check for response status */
13706 if (unlikely(bf_get(lpfc_wcqe_c_status
, wcqe
))) {
13707 /* Log the error status */
13708 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
13709 "0357 ELS CQE error: status=x%x: "
13710 "CQE: %08x %08x %08x %08x\n",
13711 bf_get(lpfc_wcqe_c_status
, wcqe
),
13712 wcqe
->word0
, wcqe
->total_data_placed
,
13713 wcqe
->parameter
, wcqe
->word3
);
13716 /* Get an irspiocbq for later ELS response processing use */
13717 irspiocbq
= lpfc_sli_get_iocbq(phba
);
13719 if (!list_empty(&pring
->txq
))
13721 if (!list_empty(&pring
->txcmplq
))
13723 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13724 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
13725 "els_txcmplq_cnt=%d\n",
13726 txq_cnt
, phba
->iocb_cnt
,
13731 /* Save off the slow-path queue event for work thread to process */
13732 memcpy(&irspiocbq
->cq_event
.cqe
.wcqe_cmpl
, wcqe
, sizeof(*wcqe
));
13733 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13734 list_add_tail(&irspiocbq
->cq_event
.list
,
13735 &phba
->sli4_hba
.sp_queue_event
);
13736 phba
->hba_flag
|= HBA_SP_QUEUE_EVT
;
13737 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13743 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
13744 * @phba: Pointer to HBA context object.
13745 * @wcqe: Pointer to work-queue completion queue entry.
13747 * This routine handles slow-path WQ entry consumed event by invoking the
13748 * proper WQ release routine to the slow-path WQ.
13751 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba
*phba
,
13752 struct lpfc_wcqe_release
*wcqe
)
13754 /* sanity check on queue memory */
13755 if (unlikely(!phba
->sli4_hba
.els_wq
))
13757 /* Check for the slow-path ELS work queue */
13758 if (bf_get(lpfc_wcqe_r_wq_id
, wcqe
) == phba
->sli4_hba
.els_wq
->queue_id
)
13759 lpfc_sli4_wq_release(phba
->sli4_hba
.els_wq
,
13760 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
));
13762 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
13763 "2579 Slow-path wqe consume event carries "
13764 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
13765 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
),
13766 phba
->sli4_hba
.els_wq
->queue_id
);
13770 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
13771 * @phba: Pointer to HBA context object.
13772 * @cq: Pointer to a WQ completion queue.
13773 * @wcqe: Pointer to work-queue completion queue entry.
13775 * This routine handles an XRI abort event.
13777 * Return: true if work posted to worker thread, otherwise false.
13780 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba
*phba
,
13781 struct lpfc_queue
*cq
,
13782 struct sli4_wcqe_xri_aborted
*wcqe
)
13784 bool workposted
= false;
13785 struct lpfc_cq_event
*cq_event
;
13786 unsigned long iflags
;
13788 switch (cq
->subtype
) {
13790 lpfc_sli4_io_xri_aborted(phba
, wcqe
, cq
->hdwq
);
13791 if (phba
->cfg_enable_fc4_type
& LPFC_ENABLE_NVME
) {
13792 /* Notify aborted XRI for NVME work queue */
13793 if (phba
->nvmet_support
)
13794 lpfc_sli4_nvmet_xri_aborted(phba
, wcqe
);
13796 workposted
= false;
13798 case LPFC_NVME_LS
: /* NVME LS uses ELS resources */
13800 cq_event
= lpfc_cq_event_setup(phba
, wcqe
, sizeof(*wcqe
));
13802 workposted
= false;
13805 cq_event
->hdwq
= cq
->hdwq
;
13806 spin_lock_irqsave(&phba
->sli4_hba
.els_xri_abrt_list_lock
,
13808 list_add_tail(&cq_event
->list
,
13809 &phba
->sli4_hba
.sp_els_xri_aborted_work_queue
);
13810 /* Set the els xri abort event flag */
13811 phba
->hba_flag
|= ELS_XRI_ABORT_EVENT
;
13812 spin_unlock_irqrestore(&phba
->sli4_hba
.els_xri_abrt_list_lock
,
13817 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13818 "0603 Invalid CQ subtype %d: "
13819 "%08x %08x %08x %08x\n",
13820 cq
->subtype
, wcqe
->word0
, wcqe
->parameter
,
13821 wcqe
->word2
, wcqe
->word3
);
13822 workposted
= false;
13828 #define FC_RCTL_MDS_DIAGS 0xF4
13831 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
13832 * @phba: Pointer to HBA context object.
13833 * @rcqe: Pointer to receive-queue completion queue entry.
13835 * This routine process a receive-queue completion queue entry.
13837 * Return: true if work posted to worker thread, otherwise false.
13840 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba
*phba
, struct lpfc_rcqe
*rcqe
)
13842 bool workposted
= false;
13843 struct fc_frame_header
*fc_hdr
;
13844 struct lpfc_queue
*hrq
= phba
->sli4_hba
.hdr_rq
;
13845 struct lpfc_queue
*drq
= phba
->sli4_hba
.dat_rq
;
13846 struct lpfc_nvmet_tgtport
*tgtp
;
13847 struct hbq_dmabuf
*dma_buf
;
13848 uint32_t status
, rq_id
;
13849 unsigned long iflags
;
13851 /* sanity check on queue memory */
13852 if (unlikely(!hrq
) || unlikely(!drq
))
13855 if (bf_get(lpfc_cqe_code
, rcqe
) == CQE_CODE_RECEIVE_V1
)
13856 rq_id
= bf_get(lpfc_rcqe_rq_id_v1
, rcqe
);
13858 rq_id
= bf_get(lpfc_rcqe_rq_id
, rcqe
);
13859 if (rq_id
!= hrq
->queue_id
)
13862 status
= bf_get(lpfc_rcqe_status
, rcqe
);
13864 case FC_STATUS_RQ_BUF_LEN_EXCEEDED
:
13865 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13866 "2537 Receive Frame Truncated!!\n");
13868 case FC_STATUS_RQ_SUCCESS
:
13869 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13870 lpfc_sli4_rq_release(hrq
, drq
);
13871 dma_buf
= lpfc_sli_hbqbuf_get(&phba
->hbqs
[0].hbq_buffer_list
);
13873 hrq
->RQ_no_buf_found
++;
13874 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13878 hrq
->RQ_buf_posted
--;
13879 memcpy(&dma_buf
->cq_event
.cqe
.rcqe_cmpl
, rcqe
, sizeof(*rcqe
));
13881 fc_hdr
= (struct fc_frame_header
*)dma_buf
->hbuf
.virt
;
13883 if (fc_hdr
->fh_r_ctl
== FC_RCTL_MDS_DIAGS
||
13884 fc_hdr
->fh_r_ctl
== FC_RCTL_DD_UNSOL_DATA
) {
13885 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13886 /* Handle MDS Loopback frames */
13887 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
13888 lpfc_sli4_handle_mds_loopback(phba
->pport
,
13891 lpfc_in_buf_free(phba
, &dma_buf
->dbuf
);
13895 /* save off the frame for the work thread to process */
13896 list_add_tail(&dma_buf
->cq_event
.list
,
13897 &phba
->sli4_hba
.sp_queue_event
);
13898 /* Frame received */
13899 phba
->hba_flag
|= HBA_SP_QUEUE_EVT
;
13900 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13903 case FC_STATUS_INSUFF_BUF_FRM_DISC
:
13904 if (phba
->nvmet_support
) {
13905 tgtp
= phba
->targetport
->private;
13906 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13907 "6402 RQE Error x%x, posted %d err_cnt "
13909 status
, hrq
->RQ_buf_posted
,
13910 hrq
->RQ_no_posted_buf
,
13911 atomic_read(&tgtp
->rcv_fcp_cmd_in
),
13912 atomic_read(&tgtp
->rcv_fcp_cmd_out
),
13913 atomic_read(&tgtp
->xmt_fcp_release
));
13917 case FC_STATUS_INSUFF_BUF_NEED_BUF
:
13918 hrq
->RQ_no_posted_buf
++;
13919 /* Post more buffers if possible */
13920 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13921 phba
->hba_flag
|= HBA_POST_RECEIVE_BUFFER
;
13922 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13931 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
13932 * @phba: Pointer to HBA context object.
13933 * @cq: Pointer to the completion queue.
13934 * @cqe: Pointer to a completion queue entry.
13936 * This routine process a slow-path work-queue or receive queue completion queue
13939 * Return: true if work posted to worker thread, otherwise false.
13942 lpfc_sli4_sp_handle_cqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
13943 struct lpfc_cqe
*cqe
)
13945 struct lpfc_cqe cqevt
;
13946 bool workposted
= false;
13948 /* Copy the work queue CQE and convert endian order if needed */
13949 lpfc_sli4_pcimem_bcopy(cqe
, &cqevt
, sizeof(struct lpfc_cqe
));
13951 /* Check and process for different type of WCQE and dispatch */
13952 switch (bf_get(lpfc_cqe_code
, &cqevt
)) {
13953 case CQE_CODE_COMPL_WQE
:
13954 /* Process the WQ/RQ complete event */
13955 phba
->last_completion_time
= jiffies
;
13956 workposted
= lpfc_sli4_sp_handle_els_wcqe(phba
, cq
,
13957 (struct lpfc_wcqe_complete
*)&cqevt
);
13959 case CQE_CODE_RELEASE_WQE
:
13960 /* Process the WQ release event */
13961 lpfc_sli4_sp_handle_rel_wcqe(phba
,
13962 (struct lpfc_wcqe_release
*)&cqevt
);
13964 case CQE_CODE_XRI_ABORTED
:
13965 /* Process the WQ XRI abort event */
13966 phba
->last_completion_time
= jiffies
;
13967 workposted
= lpfc_sli4_sp_handle_abort_xri_wcqe(phba
, cq
,
13968 (struct sli4_wcqe_xri_aborted
*)&cqevt
);
13970 case CQE_CODE_RECEIVE
:
13971 case CQE_CODE_RECEIVE_V1
:
13972 /* Process the RQ event */
13973 phba
->last_completion_time
= jiffies
;
13974 workposted
= lpfc_sli4_sp_handle_rcqe(phba
,
13975 (struct lpfc_rcqe
*)&cqevt
);
13978 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
13979 "0388 Not a valid WCQE code: x%x\n",
13980 bf_get(lpfc_cqe_code
, &cqevt
));
13987 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
13988 * @phba: Pointer to HBA context object.
13989 * @eqe: Pointer to fast-path event queue entry.
13990 * @speq: Pointer to slow-path event queue.
13992 * This routine process a event queue entry from the slow-path event queue.
13993 * It will check the MajorCode and MinorCode to determine this is for a
13994 * completion event on a completion queue, if not, an error shall be logged
13995 * and just return. Otherwise, it will get to the corresponding completion
13996 * queue and process all the entries on that completion queue, rearm the
13997 * completion queue, and then return.
14001 lpfc_sli4_sp_handle_eqe(struct lpfc_hba
*phba
, struct lpfc_eqe
*eqe
,
14002 struct lpfc_queue
*speq
)
14004 struct lpfc_queue
*cq
= NULL
, *childq
;
14008 /* Get the reference to the corresponding CQ */
14009 cqid
= bf_get_le32(lpfc_eqe_resource_id
, eqe
);
14011 list_for_each_entry(childq
, &speq
->child_list
, list
) {
14012 if (childq
->queue_id
== cqid
) {
14017 if (unlikely(!cq
)) {
14018 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)
14019 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14020 "0365 Slow-path CQ identifier "
14021 "(%d) does not exist\n", cqid
);
14025 /* Save EQ associated with this CQ */
14026 cq
->assoc_qp
= speq
;
14028 if (is_kdump_kernel())
14029 ret
= queue_work(phba
->wq
, &cq
->spwork
);
14031 ret
= queue_work_on(cq
->chann
, phba
->wq
, &cq
->spwork
);
14034 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14035 "0390 Cannot schedule queue work "
14036 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14037 cqid
, cq
->queue_id
, raw_smp_processor_id());
14041 * __lpfc_sli4_process_cq - Process elements of a CQ
14042 * @phba: Pointer to HBA context object.
14043 * @cq: Pointer to CQ to be processed
14044 * @handler: Routine to process each cqe
14045 * @delay: Pointer to usdelay to set in case of rescheduling of the handler
14046 * @poll_mode: Polling mode we were called from
14048 * This routine processes completion queue entries in a CQ. While a valid
14049 * queue element is found, the handler is called. During processing checks
14050 * are made for periodic doorbell writes to let the hardware know of
14051 * element consumption.
14053 * If the max limit on cqes to process is hit, or there are no more valid
14054 * entries, the loop stops. If we processed a sufficient number of elements,
14055 * meaning there is sufficient load, rather than rearming and generating
14056 * another interrupt, a cq rescheduling delay will be set. A delay of 0
14057 * indicates no rescheduling.
14059 * Returns True if work scheduled, False otherwise.
14062 __lpfc_sli4_process_cq(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
14063 bool (*handler
)(struct lpfc_hba
*, struct lpfc_queue
*,
14064 struct lpfc_cqe
*), unsigned long *delay
,
14065 enum lpfc_poll_mode poll_mode
)
14067 struct lpfc_cqe
*cqe
;
14068 bool workposted
= false;
14069 int count
= 0, consumed
= 0;
14072 /* default - no reschedule */
14075 if (cmpxchg(&cq
->queue_claimed
, 0, 1) != 0)
14076 goto rearm_and_exit
;
14078 /* Process all the entries to the CQ */
14080 cqe
= lpfc_sli4_cq_get(cq
);
14082 workposted
|= handler(phba
, cq
, cqe
);
14083 __lpfc_sli4_consume_cqe(phba
, cq
, cqe
);
14086 if (!(++count
% cq
->max_proc_limit
))
14089 if (!(count
% cq
->notify_interval
)) {
14090 phba
->sli4_hba
.sli4_write_cq_db(phba
, cq
, consumed
,
14093 cq
->assoc_qp
->q_flag
|= HBA_EQ_DELAY_CHK
;
14096 if (count
== LPFC_NVMET_CQ_NOTIFY
)
14097 cq
->q_flag
|= HBA_NVMET_CQ_NOTIFY
;
14099 cqe
= lpfc_sli4_cq_get(cq
);
14101 if (count
>= phba
->cfg_cq_poll_threshold
) {
14106 /* Note: complete the irq_poll softirq before rearming CQ */
14107 if (poll_mode
== LPFC_IRQ_POLL
)
14108 irq_poll_complete(&cq
->iop
);
14110 /* Track the max number of CQEs processed in 1 EQ */
14111 if (count
> cq
->CQ_max_cqe
)
14112 cq
->CQ_max_cqe
= count
;
14114 cq
->assoc_qp
->EQ_cqe_cnt
+= count
;
14116 /* Catch the no cq entry condition */
14117 if (unlikely(count
== 0))
14118 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
14119 "0369 No entry from completion queue "
14120 "qid=%d\n", cq
->queue_id
);
14122 xchg(&cq
->queue_claimed
, 0);
14125 phba
->sli4_hba
.sli4_write_cq_db(phba
, cq
, consumed
,
14126 arm
? LPFC_QUEUE_REARM
: LPFC_QUEUE_NOARM
);
14132 * lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
14133 * @cq: pointer to CQ to process
14135 * This routine calls the cq processing routine with a handler specific
14136 * to the type of queue bound to it.
14138 * The CQ routine returns two values: the first is the calling status,
14139 * which indicates whether work was queued to the background discovery
14140 * thread. If true, the routine should wakeup the discovery thread;
14141 * the second is the delay parameter. If non-zero, rather than rearming
14142 * the CQ and yet another interrupt, the CQ handler should be queued so
14143 * that it is processed in a subsequent polling action. The value of
14144 * the delay indicates when to reschedule it.
14147 __lpfc_sli4_sp_process_cq(struct lpfc_queue
*cq
)
14149 struct lpfc_hba
*phba
= cq
->phba
;
14150 unsigned long delay
;
14151 bool workposted
= false;
14154 /* Process and rearm the CQ */
14155 switch (cq
->type
) {
14157 workposted
|= __lpfc_sli4_process_cq(phba
, cq
,
14158 lpfc_sli4_sp_handle_mcqe
,
14159 &delay
, LPFC_QUEUE_WORK
);
14162 if (cq
->subtype
== LPFC_IO
)
14163 workposted
|= __lpfc_sli4_process_cq(phba
, cq
,
14164 lpfc_sli4_fp_handle_cqe
,
14165 &delay
, LPFC_QUEUE_WORK
);
14167 workposted
|= __lpfc_sli4_process_cq(phba
, cq
,
14168 lpfc_sli4_sp_handle_cqe
,
14169 &delay
, LPFC_QUEUE_WORK
);
14172 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14173 "0370 Invalid completion queue type (%d)\n",
14179 if (is_kdump_kernel())
14180 ret
= queue_delayed_work(phba
->wq
, &cq
->sched_spwork
,
14183 ret
= queue_delayed_work_on(cq
->chann
, phba
->wq
,
14184 &cq
->sched_spwork
, delay
);
14186 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14187 "0394 Cannot schedule queue work "
14188 "for cqid=%d on CPU %d\n",
14189 cq
->queue_id
, cq
->chann
);
14192 /* wake up worker thread if there are works to be done */
14194 lpfc_worker_wake_up(phba
);
14198 * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14200 * @work: pointer to work element
14202 * translates from the work handler and calls the slow-path handler.
14205 lpfc_sli4_sp_process_cq(struct work_struct
*work
)
14207 struct lpfc_queue
*cq
= container_of(work
, struct lpfc_queue
, spwork
);
14209 __lpfc_sli4_sp_process_cq(cq
);
14213 * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
14214 * @work: pointer to work element
14216 * translates from the work handler and calls the slow-path handler.
14219 lpfc_sli4_dly_sp_process_cq(struct work_struct
*work
)
14221 struct lpfc_queue
*cq
= container_of(to_delayed_work(work
),
14222 struct lpfc_queue
, sched_spwork
);
14224 __lpfc_sli4_sp_process_cq(cq
);
14228 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
14229 * @phba: Pointer to HBA context object.
14230 * @cq: Pointer to associated CQ
14231 * @wcqe: Pointer to work-queue completion queue entry.
14233 * This routine process a fast-path work queue completion entry from fast-path
14234 * event queue for FCP command response completion.
14237 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
14238 struct lpfc_wcqe_complete
*wcqe
)
14240 struct lpfc_sli_ring
*pring
= cq
->pring
;
14241 struct lpfc_iocbq
*cmdiocbq
;
14242 struct lpfc_iocbq irspiocbq
;
14243 unsigned long iflags
;
14245 /* Check for response status */
14246 if (unlikely(bf_get(lpfc_wcqe_c_status
, wcqe
))) {
14247 /* If resource errors reported from HBA, reduce queue
14248 * depth of the SCSI device.
14250 if (((bf_get(lpfc_wcqe_c_status
, wcqe
) ==
14251 IOSTAT_LOCAL_REJECT
)) &&
14252 ((wcqe
->parameter
& IOERR_PARAM_MASK
) ==
14253 IOERR_NO_RESOURCES
))
14254 phba
->lpfc_rampdown_queue_depth(phba
);
14256 /* Log the cmpl status */
14257 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
14258 "0373 FCP CQE cmpl: status=x%x: "
14259 "CQE: %08x %08x %08x %08x\n",
14260 bf_get(lpfc_wcqe_c_status
, wcqe
),
14261 wcqe
->word0
, wcqe
->total_data_placed
,
14262 wcqe
->parameter
, wcqe
->word3
);
14265 /* Look up the FCP command IOCB and create pseudo response IOCB */
14266 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
14267 pring
->stats
.iocb_event
++;
14268 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
14269 cmdiocbq
= lpfc_sli_iocbq_lookup_by_tag(phba
, pring
,
14270 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
14271 if (unlikely(!cmdiocbq
)) {
14272 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
14273 "0374 FCP complete with no corresponding "
14274 "cmdiocb: iotag (%d)\n",
14275 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
14278 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
14279 cmdiocbq
->isr_timestamp
= cq
->isr_timestamp
;
14281 if (cmdiocbq
->iocb_cmpl
== NULL
) {
14282 if (cmdiocbq
->wqe_cmpl
) {
14283 /* For FCP the flag is cleared in wqe_cmpl */
14284 if (!(cmdiocbq
->iocb_flag
& LPFC_IO_FCP
) &&
14285 cmdiocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
) {
14286 spin_lock_irqsave(&phba
->hbalock
, iflags
);
14287 cmdiocbq
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
14288 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
14291 /* Pass the cmd_iocb and the wcqe to the upper layer */
14292 (cmdiocbq
->wqe_cmpl
)(phba
, cmdiocbq
, wcqe
);
14295 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
14296 "0375 FCP cmdiocb not callback function "
14298 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
14302 /* Only SLI4 non-IO commands stil use IOCB */
14303 /* Fake the irspiocb and copy necessary response information */
14304 lpfc_sli4_iocb_param_transfer(phba
, &irspiocbq
, cmdiocbq
, wcqe
);
14306 if (cmdiocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
) {
14307 spin_lock_irqsave(&phba
->hbalock
, iflags
);
14308 cmdiocbq
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
14309 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
14312 /* Pass the cmd_iocb and the rsp state to the upper layer */
14313 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
, &irspiocbq
);
14317 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
14318 * @phba: Pointer to HBA context object.
14319 * @cq: Pointer to completion queue.
14320 * @wcqe: Pointer to work-queue completion queue entry.
14322 * This routine handles an fast-path WQ entry consumed event by invoking the
14323 * proper WQ release routine to the slow-path WQ.
14326 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
14327 struct lpfc_wcqe_release
*wcqe
)
14329 struct lpfc_queue
*childwq
;
14330 bool wqid_matched
= false;
14333 /* Check for fast-path FCP work queue release */
14334 hba_wqid
= bf_get(lpfc_wcqe_r_wq_id
, wcqe
);
14335 list_for_each_entry(childwq
, &cq
->child_list
, list
) {
14336 if (childwq
->queue_id
== hba_wqid
) {
14337 lpfc_sli4_wq_release(childwq
,
14338 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
));
14339 if (childwq
->q_flag
& HBA_NVMET_WQFULL
)
14340 lpfc_nvmet_wqfull_process(phba
, childwq
);
14341 wqid_matched
= true;
14345 /* Report warning log message if no match found */
14346 if (wqid_matched
!= true)
14347 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
14348 "2580 Fast-path wqe consume event carries "
14349 "miss-matched qid: wcqe-qid=x%x\n", hba_wqid
);
14353 * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
14354 * @phba: Pointer to HBA context object.
14355 * @cq: Pointer to completion queue.
14356 * @rcqe: Pointer to receive-queue completion queue entry.
14358 * This routine process a receive-queue completion queue entry.
14360 * Return: true if work posted to worker thread, otherwise false.
14363 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
14364 struct lpfc_rcqe
*rcqe
)
14366 bool workposted
= false;
14367 struct lpfc_queue
*hrq
;
14368 struct lpfc_queue
*drq
;
14369 struct rqb_dmabuf
*dma_buf
;
14370 struct fc_frame_header
*fc_hdr
;
14371 struct lpfc_nvmet_tgtport
*tgtp
;
14372 uint32_t status
, rq_id
;
14373 unsigned long iflags
;
14374 uint32_t fctl
, idx
;
14376 if ((phba
->nvmet_support
== 0) ||
14377 (phba
->sli4_hba
.nvmet_cqset
== NULL
))
14380 idx
= cq
->queue_id
- phba
->sli4_hba
.nvmet_cqset
[0]->queue_id
;
14381 hrq
= phba
->sli4_hba
.nvmet_mrq_hdr
[idx
];
14382 drq
= phba
->sli4_hba
.nvmet_mrq_data
[idx
];
14384 /* sanity check on queue memory */
14385 if (unlikely(!hrq
) || unlikely(!drq
))
14388 if (bf_get(lpfc_cqe_code
, rcqe
) == CQE_CODE_RECEIVE_V1
)
14389 rq_id
= bf_get(lpfc_rcqe_rq_id_v1
, rcqe
);
14391 rq_id
= bf_get(lpfc_rcqe_rq_id
, rcqe
);
14393 if ((phba
->nvmet_support
== 0) ||
14394 (rq_id
!= hrq
->queue_id
))
14397 status
= bf_get(lpfc_rcqe_status
, rcqe
);
14399 case FC_STATUS_RQ_BUF_LEN_EXCEEDED
:
14400 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14401 "6126 Receive Frame Truncated!!\n");
14403 case FC_STATUS_RQ_SUCCESS
:
14404 spin_lock_irqsave(&phba
->hbalock
, iflags
);
14405 lpfc_sli4_rq_release(hrq
, drq
);
14406 dma_buf
= lpfc_sli_rqbuf_get(phba
, hrq
);
14408 hrq
->RQ_no_buf_found
++;
14409 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
14412 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
14414 hrq
->RQ_buf_posted
--;
14415 fc_hdr
= (struct fc_frame_header
*)dma_buf
->hbuf
.virt
;
14417 /* Just some basic sanity checks on FCP Command frame */
14418 fctl
= (fc_hdr
->fh_f_ctl
[0] << 16 |
14419 fc_hdr
->fh_f_ctl
[1] << 8 |
14420 fc_hdr
->fh_f_ctl
[2]);
14422 (FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
| FC_FC_SEQ_INIT
)) !=
14423 (FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
| FC_FC_SEQ_INIT
)) ||
14424 (fc_hdr
->fh_seq_cnt
!= 0)) /* 0 byte swapped is still 0 */
14427 if (fc_hdr
->fh_type
== FC_TYPE_FCP
) {
14428 dma_buf
->bytes_recv
= bf_get(lpfc_rcqe_length
, rcqe
);
14429 lpfc_nvmet_unsol_fcp_event(
14430 phba
, idx
, dma_buf
, cq
->isr_timestamp
,
14431 cq
->q_flag
& HBA_NVMET_CQ_NOTIFY
);
14435 lpfc_rq_buf_free(phba
, &dma_buf
->hbuf
);
14437 case FC_STATUS_INSUFF_BUF_FRM_DISC
:
14438 if (phba
->nvmet_support
) {
14439 tgtp
= phba
->targetport
->private;
14440 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14441 "6401 RQE Error x%x, posted %d err_cnt "
14443 status
, hrq
->RQ_buf_posted
,
14444 hrq
->RQ_no_posted_buf
,
14445 atomic_read(&tgtp
->rcv_fcp_cmd_in
),
14446 atomic_read(&tgtp
->rcv_fcp_cmd_out
),
14447 atomic_read(&tgtp
->xmt_fcp_release
));
14451 case FC_STATUS_INSUFF_BUF_NEED_BUF
:
14452 hrq
->RQ_no_posted_buf
++;
14453 /* Post more buffers if possible */
14461 * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
14462 * @phba: adapter with cq
14463 * @cq: Pointer to the completion queue.
14464 * @cqe: Pointer to fast-path completion queue entry.
14466 * This routine process a fast-path work queue completion entry from fast-path
14467 * event queue for FCP command response completion.
14469 * Return: true if work posted to worker thread, otherwise false.
14472 lpfc_sli4_fp_handle_cqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
14473 struct lpfc_cqe
*cqe
)
14475 struct lpfc_wcqe_release wcqe
;
14476 bool workposted
= false;
14478 /* Copy the work queue CQE and convert endian order if needed */
14479 lpfc_sli4_pcimem_bcopy(cqe
, &wcqe
, sizeof(struct lpfc_cqe
));
14481 /* Check and process for different type of WCQE and dispatch */
14482 switch (bf_get(lpfc_wcqe_c_code
, &wcqe
)) {
14483 case CQE_CODE_COMPL_WQE
:
14484 case CQE_CODE_NVME_ERSP
:
14486 /* Process the WQ complete event */
14487 phba
->last_completion_time
= jiffies
;
14488 if (cq
->subtype
== LPFC_IO
|| cq
->subtype
== LPFC_NVME_LS
)
14489 lpfc_sli4_fp_handle_fcp_wcqe(phba
, cq
,
14490 (struct lpfc_wcqe_complete
*)&wcqe
);
14492 case CQE_CODE_RELEASE_WQE
:
14493 cq
->CQ_release_wqe
++;
14494 /* Process the WQ release event */
14495 lpfc_sli4_fp_handle_rel_wcqe(phba
, cq
,
14496 (struct lpfc_wcqe_release
*)&wcqe
);
14498 case CQE_CODE_XRI_ABORTED
:
14499 cq
->CQ_xri_aborted
++;
14500 /* Process the WQ XRI abort event */
14501 phba
->last_completion_time
= jiffies
;
14502 workposted
= lpfc_sli4_sp_handle_abort_xri_wcqe(phba
, cq
,
14503 (struct sli4_wcqe_xri_aborted
*)&wcqe
);
14505 case CQE_CODE_RECEIVE_V1
:
14506 case CQE_CODE_RECEIVE
:
14507 phba
->last_completion_time
= jiffies
;
14508 if (cq
->subtype
== LPFC_NVMET
) {
14509 workposted
= lpfc_sli4_nvmet_handle_rcqe(
14510 phba
, cq
, (struct lpfc_rcqe
*)&wcqe
);
14514 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14515 "0144 Not a valid CQE code: x%x\n",
14516 bf_get(lpfc_wcqe_c_code
, &wcqe
));
14523 * lpfc_sli4_sched_cq_work - Schedules cq work
14524 * @phba: Pointer to HBA context object.
14525 * @cq: Pointer to CQ
14528 * This routine checks the poll mode of the CQ corresponding to
14529 * cq->chann, then either schedules a softirq or queue_work to complete
14532 * queue_work path is taken if in NVMET mode, or if poll_mode is in
14533 * LPFC_QUEUE_WORK mode. Otherwise, softirq path is taken.
14536 static void lpfc_sli4_sched_cq_work(struct lpfc_hba
*phba
,
14537 struct lpfc_queue
*cq
, uint16_t cqid
)
14541 switch (cq
->poll_mode
) {
14542 case LPFC_IRQ_POLL
:
14543 irq_poll_sched(&cq
->iop
);
14545 case LPFC_QUEUE_WORK
:
14547 if (is_kdump_kernel())
14548 ret
= queue_work(phba
->wq
, &cq
->irqwork
);
14550 ret
= queue_work_on(cq
->chann
, phba
->wq
, &cq
->irqwork
);
14552 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14553 "0383 Cannot schedule queue work "
14554 "for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14555 cqid
, cq
->queue_id
,
14556 raw_smp_processor_id());
14561 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
14562 * @phba: Pointer to HBA context object.
14563 * @eq: Pointer to the queue structure.
14564 * @eqe: Pointer to fast-path event queue entry.
14566 * This routine process a event queue entry from the fast-path event queue.
14567 * It will check the MajorCode and MinorCode to determine this is for a
14568 * completion event on a completion queue, if not, an error shall be logged
14569 * and just return. Otherwise, it will get to the corresponding completion
14570 * queue and process all the entries on the completion queue, rearm the
14571 * completion queue, and then return.
14574 lpfc_sli4_hba_handle_eqe(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
,
14575 struct lpfc_eqe
*eqe
)
14577 struct lpfc_queue
*cq
= NULL
;
14578 uint32_t qidx
= eq
->hdwq
;
14581 if (unlikely(bf_get_le32(lpfc_eqe_major_code
, eqe
) != 0)) {
14582 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14583 "0366 Not a valid completion "
14584 "event: majorcode=x%x, minorcode=x%x\n",
14585 bf_get_le32(lpfc_eqe_major_code
, eqe
),
14586 bf_get_le32(lpfc_eqe_minor_code
, eqe
));
14590 /* Get the reference to the corresponding CQ */
14591 cqid
= bf_get_le32(lpfc_eqe_resource_id
, eqe
);
14593 /* Use the fast lookup method first */
14594 if (cqid
<= phba
->sli4_hba
.cq_max
) {
14595 cq
= phba
->sli4_hba
.cq_lookup
[cqid
];
14600 /* Next check for NVMET completion */
14601 if (phba
->cfg_nvmet_mrq
&& phba
->sli4_hba
.nvmet_cqset
) {
14602 id
= phba
->sli4_hba
.nvmet_cqset
[0]->queue_id
;
14603 if ((cqid
>= id
) && (cqid
< (id
+ phba
->cfg_nvmet_mrq
))) {
14604 /* Process NVMET unsol rcv */
14605 cq
= phba
->sli4_hba
.nvmet_cqset
[cqid
- id
];
14610 if (phba
->sli4_hba
.nvmels_cq
&&
14611 (cqid
== phba
->sli4_hba
.nvmels_cq
->queue_id
)) {
14612 /* Process NVME unsol rcv */
14613 cq
= phba
->sli4_hba
.nvmels_cq
;
14616 /* Otherwise this is a Slow path event */
14618 lpfc_sli4_sp_handle_eqe(phba
, eqe
,
14619 phba
->sli4_hba
.hdwq
[qidx
].hba_eq
);
14624 if (unlikely(cqid
!= cq
->queue_id
)) {
14625 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14626 "0368 Miss-matched fast-path completion "
14627 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
14628 cqid
, cq
->queue_id
);
14633 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
14634 if (phba
->ktime_on
)
14635 cq
->isr_timestamp
= ktime_get_ns();
14637 cq
->isr_timestamp
= 0;
14639 lpfc_sli4_sched_cq_work(phba
, cq
, cqid
);
14643 * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
14644 * @cq: Pointer to CQ to be processed
14645 * @poll_mode: Enum lpfc_poll_state to determine poll mode
14647 * This routine calls the cq processing routine with the handler for
14650 * The CQ routine returns two values: the first is the calling status,
14651 * which indicates whether work was queued to the background discovery
14652 * thread. If true, the routine should wakeup the discovery thread;
14653 * the second is the delay parameter. If non-zero, rather than rearming
14654 * the CQ and yet another interrupt, the CQ handler should be queued so
14655 * that it is processed in a subsequent polling action. The value of
14656 * the delay indicates when to reschedule it.
14659 __lpfc_sli4_hba_process_cq(struct lpfc_queue
*cq
,
14660 enum lpfc_poll_mode poll_mode
)
14662 struct lpfc_hba
*phba
= cq
->phba
;
14663 unsigned long delay
;
14664 bool workposted
= false;
14667 /* process and rearm the CQ */
14668 workposted
|= __lpfc_sli4_process_cq(phba
, cq
, lpfc_sli4_fp_handle_cqe
,
14669 &delay
, poll_mode
);
14672 if (is_kdump_kernel())
14673 ret
= queue_delayed_work(phba
->wq
, &cq
->sched_irqwork
,
14676 ret
= queue_delayed_work_on(cq
->chann
, phba
->wq
,
14677 &cq
->sched_irqwork
, delay
);
14679 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
14680 "0367 Cannot schedule queue work "
14681 "for cqid=%d on CPU %d\n",
14682 cq
->queue_id
, cq
->chann
);
14685 /* wake up worker thread if there are works to be done */
14687 lpfc_worker_wake_up(phba
);
14691 * lpfc_sli4_hba_process_cq - fast-path work handler when started by
14693 * @work: pointer to work element
14695 * translates from the work handler and calls the fast-path handler.
14698 lpfc_sli4_hba_process_cq(struct work_struct
*work
)
14700 struct lpfc_queue
*cq
= container_of(work
, struct lpfc_queue
, irqwork
);
14702 __lpfc_sli4_hba_process_cq(cq
, LPFC_QUEUE_WORK
);
14706 * lpfc_sli4_hba_process_cq - fast-path work handler when started by timer
14707 * @work: pointer to work element
14709 * translates from the work handler and calls the fast-path handler.
14712 lpfc_sli4_dly_hba_process_cq(struct work_struct
*work
)
14714 struct lpfc_queue
*cq
= container_of(to_delayed_work(work
),
14715 struct lpfc_queue
, sched_irqwork
);
14717 __lpfc_sli4_hba_process_cq(cq
, LPFC_QUEUE_WORK
);
14721 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
14722 * @irq: Interrupt number.
14723 * @dev_id: The device context pointer.
14725 * This function is directly called from the PCI layer as an interrupt
14726 * service routine when device with SLI-4 interface spec is enabled with
14727 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
14728 * ring event in the HBA. However, when the device is enabled with either
14729 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
14730 * device-level interrupt handler. When the PCI slot is in error recovery
14731 * or the HBA is undergoing initialization, the interrupt handler will not
14732 * process the interrupt. The SCSI FCP fast-path ring event are handled in
14733 * the intrrupt context. This function is called without any lock held.
14734 * It gets the hbalock to access and update SLI data structures. Note that,
14735 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
14736 * equal to that of FCP CQ index.
14738 * The link attention and ELS ring attention events are handled
14739 * by the worker thread. The interrupt handler signals the worker thread
14740 * and returns for these events. This function is called without any lock
14741 * held. It gets the hbalock to access and update SLI data structures.
14743 * This function returns IRQ_HANDLED when interrupt is handled else it
14744 * returns IRQ_NONE.
14747 lpfc_sli4_hba_intr_handler(int irq
, void *dev_id
)
14749 struct lpfc_hba
*phba
;
14750 struct lpfc_hba_eq_hdl
*hba_eq_hdl
;
14751 struct lpfc_queue
*fpeq
;
14752 unsigned long iflag
;
14755 struct lpfc_eq_intr_info
*eqi
;
14757 /* Get the driver's phba structure from the dev_id */
14758 hba_eq_hdl
= (struct lpfc_hba_eq_hdl
*)dev_id
;
14759 phba
= hba_eq_hdl
->phba
;
14760 hba_eqidx
= hba_eq_hdl
->idx
;
14762 if (unlikely(!phba
))
14764 if (unlikely(!phba
->sli4_hba
.hdwq
))
14767 /* Get to the EQ struct associated with this vector */
14768 fpeq
= phba
->sli4_hba
.hba_eq_hdl
[hba_eqidx
].eq
;
14769 if (unlikely(!fpeq
))
14772 /* Check device state for handling interrupt */
14773 if (unlikely(lpfc_intr_state_check(phba
))) {
14774 /* Check again for link_state with lock held */
14775 spin_lock_irqsave(&phba
->hbalock
, iflag
);
14776 if (phba
->link_state
< LPFC_LINK_DOWN
)
14777 /* Flush, clear interrupt, and rearm the EQ */
14778 lpfc_sli4_eqcq_flush(phba
, fpeq
);
14779 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
14783 eqi
= this_cpu_ptr(phba
->sli4_hba
.eq_info
);
14786 fpeq
->last_cpu
= raw_smp_processor_id();
14788 if (eqi
->icnt
> LPFC_EQD_ISR_TRIGGER
&&
14789 fpeq
->q_flag
& HBA_EQ_DELAY_CHK
&&
14790 phba
->cfg_auto_imax
&&
14791 fpeq
->q_mode
!= LPFC_MAX_AUTO_EQ_DELAY
&&
14792 phba
->sli
.sli_flag
& LPFC_SLI_USE_EQDR
)
14793 lpfc_sli4_mod_hba_eq_delay(phba
, fpeq
, LPFC_MAX_AUTO_EQ_DELAY
);
14795 /* process and rearm the EQ */
14796 ecount
= lpfc_sli4_process_eq(phba
, fpeq
, LPFC_QUEUE_REARM
);
14798 if (unlikely(ecount
== 0)) {
14799 fpeq
->EQ_no_entry
++;
14800 if (phba
->intr_type
== MSIX
)
14801 /* MSI-X treated interrupt served as no EQ share INT */
14802 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
14803 "0358 MSI-X interrupt with no EQE\n");
14805 /* Non MSI-X treated on interrupt as EQ share INT */
14809 return IRQ_HANDLED
;
14810 } /* lpfc_sli4_fp_intr_handler */
14813 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
14814 * @irq: Interrupt number.
14815 * @dev_id: The device context pointer.
14817 * This function is the device-level interrupt handler to device with SLI-4
14818 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
14819 * interrupt mode is enabled and there is an event in the HBA which requires
14820 * driver attention. This function invokes the slow-path interrupt attention
14821 * handling function and fast-path interrupt attention handling function in
14822 * turn to process the relevant HBA attention events. This function is called
14823 * without any lock held. It gets the hbalock to access and update SLI data
14826 * This function returns IRQ_HANDLED when interrupt is handled, else it
14827 * returns IRQ_NONE.
14830 lpfc_sli4_intr_handler(int irq
, void *dev_id
)
14832 struct lpfc_hba
*phba
;
14833 irqreturn_t hba_irq_rc
;
14834 bool hba_handled
= false;
14837 /* Get the driver's phba structure from the dev_id */
14838 phba
= (struct lpfc_hba
*)dev_id
;
14840 if (unlikely(!phba
))
14844 * Invoke fast-path host attention interrupt handling as appropriate.
14846 for (qidx
= 0; qidx
< phba
->cfg_irq_chann
; qidx
++) {
14847 hba_irq_rc
= lpfc_sli4_hba_intr_handler(irq
,
14848 &phba
->sli4_hba
.hba_eq_hdl
[qidx
]);
14849 if (hba_irq_rc
== IRQ_HANDLED
)
14850 hba_handled
|= true;
14853 return (hba_handled
== true) ? IRQ_HANDLED
: IRQ_NONE
;
14854 } /* lpfc_sli4_intr_handler */
14856 void lpfc_sli4_poll_hbtimer(struct timer_list
*t
)
14858 struct lpfc_hba
*phba
= from_timer(phba
, t
, cpuhp_poll_timer
);
14859 struct lpfc_queue
*eq
;
14864 list_for_each_entry_rcu(eq
, &phba
->poll_list
, _poll_list
)
14865 i
+= lpfc_sli4_poll_eq(eq
, LPFC_POLL_SLOWPATH
);
14866 if (!list_empty(&phba
->poll_list
))
14867 mod_timer(&phba
->cpuhp_poll_timer
,
14868 jiffies
+ msecs_to_jiffies(LPFC_POLL_HB
));
14873 inline int lpfc_sli4_poll_eq(struct lpfc_queue
*eq
, uint8_t path
)
14875 struct lpfc_hba
*phba
= eq
->phba
;
14879 * Unlocking an irq is one of the entry point to check
14880 * for re-schedule, but we are good for io submission
14881 * path as midlayer does a get_cpu to glue us in. Flush
14882 * out the invalidate queue so we can see the updated
14887 if (READ_ONCE(eq
->mode
) == LPFC_EQ_POLL
)
14888 /* We will not likely get the completion for the caller
14889 * during this iteration but i guess that's fine.
14890 * Future io's coming on this eq should be able to
14891 * pick it up. As for the case of single io's, they
14892 * will be handled through a sched from polling timer
14893 * function which is currently triggered every 1msec.
14895 i
= lpfc_sli4_process_eq(phba
, eq
, LPFC_QUEUE_NOARM
);
14900 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue
*eq
)
14902 struct lpfc_hba
*phba
= eq
->phba
;
14904 /* kickstart slowpath processing if needed */
14905 if (list_empty(&phba
->poll_list
))
14906 mod_timer(&phba
->cpuhp_poll_timer
,
14907 jiffies
+ msecs_to_jiffies(LPFC_POLL_HB
));
14909 list_add_rcu(&eq
->_poll_list
, &phba
->poll_list
);
14913 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue
*eq
)
14915 struct lpfc_hba
*phba
= eq
->phba
;
14917 /* Disable slowpath processing for this eq. Kick start the eq
14918 * by RE-ARMING the eq's ASAP
14920 list_del_rcu(&eq
->_poll_list
);
14923 if (list_empty(&phba
->poll_list
))
14924 del_timer_sync(&phba
->cpuhp_poll_timer
);
14927 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba
*phba
)
14929 struct lpfc_queue
*eq
, *next
;
14931 list_for_each_entry_safe(eq
, next
, &phba
->poll_list
, _poll_list
)
14932 list_del(&eq
->_poll_list
);
14934 INIT_LIST_HEAD(&phba
->poll_list
);
14939 __lpfc_sli4_switch_eqmode(struct lpfc_queue
*eq
, uint8_t mode
)
14941 if (mode
== eq
->mode
)
14944 * currently this function is only called during a hotplug
14945 * event and the cpu on which this function is executing
14946 * is going offline. By now the hotplug has instructed
14947 * the scheduler to remove this cpu from cpu active mask.
14948 * So we don't need to work about being put aside by the
14949 * scheduler for a high priority process. Yes, the inte-
14950 * rrupts could come but they are known to retire ASAP.
14953 /* Disable polling in the fastpath */
14954 WRITE_ONCE(eq
->mode
, mode
);
14955 /* flush out the store buffer */
14959 * Add this eq to the polling list and start polling. For
14960 * a grace period both interrupt handler and poller will
14961 * try to process the eq _but_ that's fine. We have a
14962 * synchronization mechanism in place (queue_claimed) to
14963 * deal with it. This is just a draining phase for int-
14964 * errupt handler (not eq's) as we have guranteed through
14965 * barrier that all the CPUs have seen the new CQ_POLLED
14966 * state. which will effectively disable the REARMING of
14967 * the EQ. The whole idea is eq's die off eventually as
14968 * we are not rearming EQ's anymore.
14970 mode
? lpfc_sli4_add_to_poll_list(eq
) :
14971 lpfc_sli4_remove_from_poll_list(eq
);
14974 void lpfc_sli4_start_polling(struct lpfc_queue
*eq
)
14976 __lpfc_sli4_switch_eqmode(eq
, LPFC_EQ_POLL
);
14979 void lpfc_sli4_stop_polling(struct lpfc_queue
*eq
)
14981 struct lpfc_hba
*phba
= eq
->phba
;
14983 __lpfc_sli4_switch_eqmode(eq
, LPFC_EQ_INTERRUPT
);
14985 /* Kick start for the pending io's in h/w.
14986 * Once we switch back to interrupt processing on a eq
14987 * the io path completion will only arm eq's when it
14988 * receives a completion. But since eq's are in disa-
14989 * rmed state it doesn't receive a completion. This
14990 * creates a deadlock scenaro.
14992 phba
->sli4_hba
.sli4_write_eq_db(phba
, eq
, 0, LPFC_QUEUE_REARM
);
14996 * lpfc_sli4_queue_free - free a queue structure and associated memory
14997 * @queue: The queue structure to free.
14999 * This function frees a queue structure and the DMAable memory used for
15000 * the host resident queue. This function must be called after destroying the
15001 * queue on the HBA.
15004 lpfc_sli4_queue_free(struct lpfc_queue
*queue
)
15006 struct lpfc_dmabuf
*dmabuf
;
15011 if (!list_empty(&queue
->wq_list
))
15012 list_del(&queue
->wq_list
);
15014 while (!list_empty(&queue
->page_list
)) {
15015 list_remove_head(&queue
->page_list
, dmabuf
, struct lpfc_dmabuf
,
15017 dma_free_coherent(&queue
->phba
->pcidev
->dev
, queue
->page_size
,
15018 dmabuf
->virt
, dmabuf
->phys
);
15022 lpfc_free_rq_buffer(queue
->phba
, queue
);
15023 kfree(queue
->rqbp
);
15026 if (!list_empty(&queue
->cpu_list
))
15027 list_del(&queue
->cpu_list
);
15034 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15035 * @phba: The HBA that this queue is being created on.
15036 * @page_size: The size of a queue page
15037 * @entry_size: The size of each queue entry for this queue.
15038 * @entry_count: The number of entries that this queue will handle.
15039 * @cpu: The cpu that will primarily utilize this queue.
15041 * This function allocates a queue structure and the DMAable memory used for
15042 * the host resident queue. This function must be called before creating the
15043 * queue on the HBA.
15045 struct lpfc_queue
*
15046 lpfc_sli4_queue_alloc(struct lpfc_hba
*phba
, uint32_t page_size
,
15047 uint32_t entry_size
, uint32_t entry_count
, int cpu
)
15049 struct lpfc_queue
*queue
;
15050 struct lpfc_dmabuf
*dmabuf
;
15051 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
15054 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
15055 hw_page_size
= page_size
;
15057 pgcnt
= ALIGN(entry_size
* entry_count
, hw_page_size
) / hw_page_size
;
15059 /* If needed, Adjust page count to match the max the adapter supports */
15060 if (pgcnt
> phba
->sli4_hba
.pc_sli4_params
.wqpcnt
)
15061 pgcnt
= phba
->sli4_hba
.pc_sli4_params
.wqpcnt
;
15063 queue
= kzalloc_node(sizeof(*queue
) + (sizeof(void *) * pgcnt
),
15064 GFP_KERNEL
, cpu_to_node(cpu
));
15068 INIT_LIST_HEAD(&queue
->list
);
15069 INIT_LIST_HEAD(&queue
->_poll_list
);
15070 INIT_LIST_HEAD(&queue
->wq_list
);
15071 INIT_LIST_HEAD(&queue
->wqfull_list
);
15072 INIT_LIST_HEAD(&queue
->page_list
);
15073 INIT_LIST_HEAD(&queue
->child_list
);
15074 INIT_LIST_HEAD(&queue
->cpu_list
);
15076 /* Set queue parameters now. If the system cannot provide memory
15077 * resources, the free routine needs to know what was allocated.
15079 queue
->page_count
= pgcnt
;
15080 queue
->q_pgs
= (void **)&queue
[1];
15081 queue
->entry_cnt_per_pg
= hw_page_size
/ entry_size
;
15082 queue
->entry_size
= entry_size
;
15083 queue
->entry_count
= entry_count
;
15084 queue
->page_size
= hw_page_size
;
15085 queue
->phba
= phba
;
15087 for (x
= 0; x
< queue
->page_count
; x
++) {
15088 dmabuf
= kzalloc_node(sizeof(*dmabuf
), GFP_KERNEL
,
15089 dev_to_node(&phba
->pcidev
->dev
));
15092 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
15093 hw_page_size
, &dmabuf
->phys
,
15095 if (!dmabuf
->virt
) {
15099 dmabuf
->buffer_tag
= x
;
15100 list_add_tail(&dmabuf
->list
, &queue
->page_list
);
15101 /* use lpfc_sli4_qe to index a paritcular entry in this page */
15102 queue
->q_pgs
[x
] = dmabuf
->virt
;
15104 INIT_WORK(&queue
->irqwork
, lpfc_sli4_hba_process_cq
);
15105 INIT_WORK(&queue
->spwork
, lpfc_sli4_sp_process_cq
);
15106 INIT_DELAYED_WORK(&queue
->sched_irqwork
, lpfc_sli4_dly_hba_process_cq
);
15107 INIT_DELAYED_WORK(&queue
->sched_spwork
, lpfc_sli4_dly_sp_process_cq
);
15109 /* notify_interval will be set during q creation */
15113 lpfc_sli4_queue_free(queue
);
15118 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15119 * @phba: HBA structure that indicates port to create a queue on.
15120 * @pci_barset: PCI BAR set flag.
15122 * This function shall perform iomap of the specified PCI BAR address to host
15123 * memory address if not already done so and return it. The returned host
15124 * memory address can be NULL.
15126 static void __iomem
*
15127 lpfc_dual_chute_pci_bar_map(struct lpfc_hba
*phba
, uint16_t pci_barset
)
15132 switch (pci_barset
) {
15133 case WQ_PCI_BAR_0_AND_1
:
15134 return phba
->pci_bar0_memmap_p
;
15135 case WQ_PCI_BAR_2_AND_3
:
15136 return phba
->pci_bar2_memmap_p
;
15137 case WQ_PCI_BAR_4_AND_5
:
15138 return phba
->pci_bar4_memmap_p
;
15146 * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15147 * @phba: HBA structure that EQs are on.
15148 * @startq: The starting EQ index to modify
15149 * @numq: The number of EQs (consecutive indexes) to modify
15150 * @usdelay: amount of delay
15152 * This function revises the EQ delay on 1 or more EQs. The EQ delay
15153 * is set either by writing to a register (if supported by the SLI Port)
15154 * or by mailbox command. The mailbox command allows several EQs to be
15157 * The @phba struct is used to send a mailbox command to HBA. The @startq
15158 * is used to get the starting EQ index to change. The @numq value is
15159 * used to specify how many consecutive EQ indexes, starting at EQ index,
15160 * are to be changed. This function is asynchronous and will wait for any
15161 * mailbox commands to finish before returning.
15163 * On success this function will return a zero. If unable to allocate
15164 * enough memory this function will return -ENOMEM. If a mailbox command
15165 * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
15166 * have had their delay multipler changed.
15169 lpfc_modify_hba_eq_delay(struct lpfc_hba
*phba
, uint32_t startq
,
15170 uint32_t numq
, uint32_t usdelay
)
15172 struct lpfc_mbx_modify_eq_delay
*eq_delay
;
15173 LPFC_MBOXQ_t
*mbox
;
15174 struct lpfc_queue
*eq
;
15175 int cnt
= 0, rc
, length
;
15176 uint32_t shdr_status
, shdr_add_status
;
15179 union lpfc_sli4_cfg_shdr
*shdr
;
15181 if (startq
>= phba
->cfg_irq_chann
)
15184 if (usdelay
> 0xFFFF) {
15185 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
| LOG_FCP
| LOG_NVME
,
15186 "6429 usdelay %d too large. Scaled down to "
15187 "0xFFFF.\n", usdelay
);
15191 /* set values by EQ_DELAY register if supported */
15192 if (phba
->sli
.sli_flag
& LPFC_SLI_USE_EQDR
) {
15193 for (qidx
= startq
; qidx
< phba
->cfg_irq_chann
; qidx
++) {
15194 eq
= phba
->sli4_hba
.hba_eq_hdl
[qidx
].eq
;
15198 lpfc_sli4_mod_hba_eq_delay(phba
, eq
, usdelay
);
15206 /* Otherwise, set values by mailbox cmd */
15208 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
15210 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15211 "6428 Failed allocating mailbox cmd buffer."
15212 " EQ delay was not set.\n");
15215 length
= (sizeof(struct lpfc_mbx_modify_eq_delay
) -
15216 sizeof(struct lpfc_sli4_cfg_mhdr
));
15217 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
15218 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY
,
15219 length
, LPFC_SLI4_MBX_EMBED
);
15220 eq_delay
= &mbox
->u
.mqe
.un
.eq_delay
;
15222 /* Calculate delay multiper from maximum interrupt per second */
15223 dmult
= (usdelay
* LPFC_DMULT_CONST
) / LPFC_SEC_TO_USEC
;
15226 if (dmult
> LPFC_DMULT_MAX
)
15227 dmult
= LPFC_DMULT_MAX
;
15229 for (qidx
= startq
; qidx
< phba
->cfg_irq_chann
; qidx
++) {
15230 eq
= phba
->sli4_hba
.hba_eq_hdl
[qidx
].eq
;
15233 eq
->q_mode
= usdelay
;
15234 eq_delay
->u
.request
.eq
[cnt
].eq_id
= eq
->queue_id
;
15235 eq_delay
->u
.request
.eq
[cnt
].phase
= 0;
15236 eq_delay
->u
.request
.eq
[cnt
].delay_multi
= dmult
;
15241 eq_delay
->u
.request
.num_eq
= cnt
;
15243 mbox
->vport
= phba
->pport
;
15244 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
15245 mbox
->ctx_buf
= NULL
;
15246 mbox
->ctx_ndlp
= NULL
;
15247 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
15248 shdr
= (union lpfc_sli4_cfg_shdr
*) &eq_delay
->header
.cfg_shdr
;
15249 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
15250 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
15251 if (shdr_status
|| shdr_add_status
|| rc
) {
15252 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15253 "2512 MODIFY_EQ_DELAY mailbox failed with "
15254 "status x%x add_status x%x, mbx status x%x\n",
15255 shdr_status
, shdr_add_status
, rc
);
15257 mempool_free(mbox
, phba
->mbox_mem_pool
);
15262 * lpfc_eq_create - Create an Event Queue on the HBA
15263 * @phba: HBA structure that indicates port to create a queue on.
15264 * @eq: The queue structure to use to create the event queue.
15265 * @imax: The maximum interrupt per second limit.
15267 * This function creates an event queue, as detailed in @eq, on a port,
15268 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
15270 * The @phba struct is used to send mailbox command to HBA. The @eq struct
15271 * is used to get the entry count and entry size that are necessary to
15272 * determine the number of pages to allocate and use for this queue. This
15273 * function will send the EQ_CREATE mailbox command to the HBA to setup the
15274 * event queue. This function is asynchronous and will wait for the mailbox
15275 * command to finish before continuing.
15277 * On success this function will return a zero. If unable to allocate enough
15278 * memory this function will return -ENOMEM. If the queue create mailbox command
15279 * fails this function will return -ENXIO.
15282 lpfc_eq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
, uint32_t imax
)
15284 struct lpfc_mbx_eq_create
*eq_create
;
15285 LPFC_MBOXQ_t
*mbox
;
15286 int rc
, length
, status
= 0;
15287 struct lpfc_dmabuf
*dmabuf
;
15288 uint32_t shdr_status
, shdr_add_status
;
15289 union lpfc_sli4_cfg_shdr
*shdr
;
15291 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
15293 /* sanity check on queue memory */
15296 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
15297 hw_page_size
= SLI4_PAGE_SIZE
;
15299 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
15302 length
= (sizeof(struct lpfc_mbx_eq_create
) -
15303 sizeof(struct lpfc_sli4_cfg_mhdr
));
15304 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
15305 LPFC_MBOX_OPCODE_EQ_CREATE
,
15306 length
, LPFC_SLI4_MBX_EMBED
);
15307 eq_create
= &mbox
->u
.mqe
.un
.eq_create
;
15308 shdr
= (union lpfc_sli4_cfg_shdr
*) &eq_create
->header
.cfg_shdr
;
15309 bf_set(lpfc_mbx_eq_create_num_pages
, &eq_create
->u
.request
,
15311 bf_set(lpfc_eq_context_size
, &eq_create
->u
.request
.context
,
15313 bf_set(lpfc_eq_context_valid
, &eq_create
->u
.request
.context
, 1);
15315 /* Use version 2 of CREATE_EQ if eqav is set */
15316 if (phba
->sli4_hba
.pc_sli4_params
.eqav
) {
15317 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
15318 LPFC_Q_CREATE_VERSION_2
);
15319 bf_set(lpfc_eq_context_autovalid
, &eq_create
->u
.request
.context
,
15320 phba
->sli4_hba
.pc_sli4_params
.eqav
);
15323 /* don't setup delay multiplier using EQ_CREATE */
15325 bf_set(lpfc_eq_context_delay_multi
, &eq_create
->u
.request
.context
,
15327 switch (eq
->entry_count
) {
15329 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15330 "0360 Unsupported EQ count. (%d)\n",
15332 if (eq
->entry_count
< 256) {
15336 fallthrough
; /* otherwise default to smallest count */
15338 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
15342 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
15346 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
15350 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
15354 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
15358 list_for_each_entry(dmabuf
, &eq
->page_list
, list
) {
15359 memset(dmabuf
->virt
, 0, hw_page_size
);
15360 eq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
15361 putPaddrLow(dmabuf
->phys
);
15362 eq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
15363 putPaddrHigh(dmabuf
->phys
);
15365 mbox
->vport
= phba
->pport
;
15366 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
15367 mbox
->ctx_buf
= NULL
;
15368 mbox
->ctx_ndlp
= NULL
;
15369 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
15370 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
15371 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
15372 if (shdr_status
|| shdr_add_status
|| rc
) {
15373 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15374 "2500 EQ_CREATE mailbox failed with "
15375 "status x%x add_status x%x, mbx status x%x\n",
15376 shdr_status
, shdr_add_status
, rc
);
15379 eq
->type
= LPFC_EQ
;
15380 eq
->subtype
= LPFC_NONE
;
15381 eq
->queue_id
= bf_get(lpfc_mbx_eq_create_q_id
, &eq_create
->u
.response
);
15382 if (eq
->queue_id
== 0xFFFF)
15384 eq
->host_index
= 0;
15385 eq
->notify_interval
= LPFC_EQ_NOTIFY_INTRVL
;
15386 eq
->max_proc_limit
= LPFC_EQ_MAX_PROC_LIMIT
;
15388 mempool_free(mbox
, phba
->mbox_mem_pool
);
15392 static int lpfc_cq_poll_hdler(struct irq_poll
*iop
, int budget
)
15394 struct lpfc_queue
*cq
= container_of(iop
, struct lpfc_queue
, iop
);
15396 __lpfc_sli4_hba_process_cq(cq
, LPFC_IRQ_POLL
);
15402 * lpfc_cq_create - Create a Completion Queue on the HBA
15403 * @phba: HBA structure that indicates port to create a queue on.
15404 * @cq: The queue structure to use to create the completion queue.
15405 * @eq: The event queue to bind this completion queue to.
15406 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15407 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
15409 * This function creates a completion queue, as detailed in @wq, on a port,
15410 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
15412 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15413 * is used to get the entry count and entry size that are necessary to
15414 * determine the number of pages to allocate and use for this queue. The @eq
15415 * is used to indicate which event queue to bind this completion queue to. This
15416 * function will send the CQ_CREATE mailbox command to the HBA to setup the
15417 * completion queue. This function is asynchronous and will wait for the mailbox
15418 * command to finish before continuing.
15420 * On success this function will return a zero. If unable to allocate enough
15421 * memory this function will return -ENOMEM. If the queue create mailbox command
15422 * fails this function will return -ENXIO.
15425 lpfc_cq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
15426 struct lpfc_queue
*eq
, uint32_t type
, uint32_t subtype
)
15428 struct lpfc_mbx_cq_create
*cq_create
;
15429 struct lpfc_dmabuf
*dmabuf
;
15430 LPFC_MBOXQ_t
*mbox
;
15431 int rc
, length
, status
= 0;
15432 uint32_t shdr_status
, shdr_add_status
;
15433 union lpfc_sli4_cfg_shdr
*shdr
;
15435 /* sanity check on queue memory */
15439 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
15442 length
= (sizeof(struct lpfc_mbx_cq_create
) -
15443 sizeof(struct lpfc_sli4_cfg_mhdr
));
15444 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
15445 LPFC_MBOX_OPCODE_CQ_CREATE
,
15446 length
, LPFC_SLI4_MBX_EMBED
);
15447 cq_create
= &mbox
->u
.mqe
.un
.cq_create
;
15448 shdr
= (union lpfc_sli4_cfg_shdr
*) &cq_create
->header
.cfg_shdr
;
15449 bf_set(lpfc_mbx_cq_create_num_pages
, &cq_create
->u
.request
,
15451 bf_set(lpfc_cq_context_event
, &cq_create
->u
.request
.context
, 1);
15452 bf_set(lpfc_cq_context_valid
, &cq_create
->u
.request
.context
, 1);
15453 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
15454 phba
->sli4_hba
.pc_sli4_params
.cqv
);
15455 if (phba
->sli4_hba
.pc_sli4_params
.cqv
== LPFC_Q_CREATE_VERSION_2
) {
15456 bf_set(lpfc_mbx_cq_create_page_size
, &cq_create
->u
.request
,
15457 (cq
->page_size
/ SLI4_PAGE_SIZE
));
15458 bf_set(lpfc_cq_eq_id_2
, &cq_create
->u
.request
.context
,
15460 bf_set(lpfc_cq_context_autovalid
, &cq_create
->u
.request
.context
,
15461 phba
->sli4_hba
.pc_sli4_params
.cqav
);
15463 bf_set(lpfc_cq_eq_id
, &cq_create
->u
.request
.context
,
15466 switch (cq
->entry_count
) {
15469 if (phba
->sli4_hba
.pc_sli4_params
.cqv
==
15470 LPFC_Q_CREATE_VERSION_2
) {
15471 cq_create
->u
.request
.context
.lpfc_cq_context_count
=
15473 bf_set(lpfc_cq_context_count
,
15474 &cq_create
->u
.request
.context
,
15475 LPFC_CQ_CNT_WORD7
);
15480 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15481 "0361 Unsupported CQ count: "
15482 "entry cnt %d sz %d pg cnt %d\n",
15483 cq
->entry_count
, cq
->entry_size
,
15485 if (cq
->entry_count
< 256) {
15489 fallthrough
; /* otherwise default to smallest count */
15491 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
15495 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
15499 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
15503 list_for_each_entry(dmabuf
, &cq
->page_list
, list
) {
15504 memset(dmabuf
->virt
, 0, cq
->page_size
);
15505 cq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
15506 putPaddrLow(dmabuf
->phys
);
15507 cq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
15508 putPaddrHigh(dmabuf
->phys
);
15510 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
15512 /* The IOCTL status is embedded in the mailbox subheader. */
15513 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
15514 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
15515 if (shdr_status
|| shdr_add_status
|| rc
) {
15516 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15517 "2501 CQ_CREATE mailbox failed with "
15518 "status x%x add_status x%x, mbx status x%x\n",
15519 shdr_status
, shdr_add_status
, rc
);
15523 cq
->queue_id
= bf_get(lpfc_mbx_cq_create_q_id
, &cq_create
->u
.response
);
15524 if (cq
->queue_id
== 0xFFFF) {
15528 /* link the cq onto the parent eq child list */
15529 list_add_tail(&cq
->list
, &eq
->child_list
);
15530 /* Set up completion queue's type and subtype */
15532 cq
->subtype
= subtype
;
15533 cq
->queue_id
= bf_get(lpfc_mbx_cq_create_q_id
, &cq_create
->u
.response
);
15534 cq
->assoc_qid
= eq
->queue_id
;
15536 cq
->host_index
= 0;
15537 cq
->notify_interval
= LPFC_CQ_NOTIFY_INTRVL
;
15538 cq
->max_proc_limit
= min(phba
->cfg_cq_max_proc_limit
, cq
->entry_count
);
15540 if (cq
->queue_id
> phba
->sli4_hba
.cq_max
)
15541 phba
->sli4_hba
.cq_max
= cq
->queue_id
;
15543 irq_poll_init(&cq
->iop
, LPFC_IRQ_POLL_WEIGHT
, lpfc_cq_poll_hdler
);
15545 mempool_free(mbox
, phba
->mbox_mem_pool
);
15550 * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
15551 * @phba: HBA structure that indicates port to create a queue on.
15552 * @cqp: The queue structure array to use to create the completion queues.
15553 * @hdwq: The hardware queue array with the EQ to bind completion queues to.
15554 * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
15555 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
15557 * This function creates a set of completion queue, s to support MRQ
15558 * as detailed in @cqp, on a port,
15559 * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
15561 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15562 * is used to get the entry count and entry size that are necessary to
15563 * determine the number of pages to allocate and use for this queue. The @eq
15564 * is used to indicate which event queue to bind this completion queue to. This
15565 * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
15566 * completion queue. This function is asynchronous and will wait for the mailbox
15567 * command to finish before continuing.
15569 * On success this function will return a zero. If unable to allocate enough
15570 * memory this function will return -ENOMEM. If the queue create mailbox command
15571 * fails this function will return -ENXIO.
15574 lpfc_cq_create_set(struct lpfc_hba
*phba
, struct lpfc_queue
**cqp
,
15575 struct lpfc_sli4_hdw_queue
*hdwq
, uint32_t type
,
15578 struct lpfc_queue
*cq
;
15579 struct lpfc_queue
*eq
;
15580 struct lpfc_mbx_cq_create_set
*cq_set
;
15581 struct lpfc_dmabuf
*dmabuf
;
15582 LPFC_MBOXQ_t
*mbox
;
15583 int rc
, length
, alloclen
, status
= 0;
15584 int cnt
, idx
, numcq
, page_idx
= 0;
15585 uint32_t shdr_status
, shdr_add_status
;
15586 union lpfc_sli4_cfg_shdr
*shdr
;
15587 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
15589 /* sanity check on queue memory */
15590 numcq
= phba
->cfg_nvmet_mrq
;
15591 if (!cqp
|| !hdwq
|| !numcq
)
15594 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
15598 length
= sizeof(struct lpfc_mbx_cq_create_set
);
15599 length
+= ((numcq
* cqp
[0]->page_count
) *
15600 sizeof(struct dma_address
));
15601 alloclen
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
15602 LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET
, length
,
15603 LPFC_SLI4_MBX_NEMBED
);
15604 if (alloclen
< length
) {
15605 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15606 "3098 Allocated DMA memory size (%d) is "
15607 "less than the requested DMA memory size "
15608 "(%d)\n", alloclen
, length
);
15612 cq_set
= mbox
->sge_array
->addr
[0];
15613 shdr
= (union lpfc_sli4_cfg_shdr
*)&cq_set
->cfg_shdr
;
15614 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
, 0);
15616 for (idx
= 0; idx
< numcq
; idx
++) {
15618 eq
= hdwq
[idx
].hba_eq
;
15623 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
15624 hw_page_size
= cq
->page_size
;
15628 bf_set(lpfc_mbx_cq_create_set_page_size
,
15629 &cq_set
->u
.request
,
15630 (hw_page_size
/ SLI4_PAGE_SIZE
));
15631 bf_set(lpfc_mbx_cq_create_set_num_pages
,
15632 &cq_set
->u
.request
, cq
->page_count
);
15633 bf_set(lpfc_mbx_cq_create_set_evt
,
15634 &cq_set
->u
.request
, 1);
15635 bf_set(lpfc_mbx_cq_create_set_valid
,
15636 &cq_set
->u
.request
, 1);
15637 bf_set(lpfc_mbx_cq_create_set_cqe_size
,
15638 &cq_set
->u
.request
, 0);
15639 bf_set(lpfc_mbx_cq_create_set_num_cq
,
15640 &cq_set
->u
.request
, numcq
);
15641 bf_set(lpfc_mbx_cq_create_set_autovalid
,
15642 &cq_set
->u
.request
,
15643 phba
->sli4_hba
.pc_sli4_params
.cqav
);
15644 switch (cq
->entry_count
) {
15647 if (phba
->sli4_hba
.pc_sli4_params
.cqv
==
15648 LPFC_Q_CREATE_VERSION_2
) {
15649 bf_set(lpfc_mbx_cq_create_set_cqe_cnt
,
15650 &cq_set
->u
.request
,
15652 bf_set(lpfc_mbx_cq_create_set_cqe_cnt
,
15653 &cq_set
->u
.request
,
15654 LPFC_CQ_CNT_WORD7
);
15659 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15660 "3118 Bad CQ count. (%d)\n",
15662 if (cq
->entry_count
< 256) {
15666 fallthrough
; /* otherwise default to smallest */
15668 bf_set(lpfc_mbx_cq_create_set_cqe_cnt
,
15669 &cq_set
->u
.request
, LPFC_CQ_CNT_256
);
15672 bf_set(lpfc_mbx_cq_create_set_cqe_cnt
,
15673 &cq_set
->u
.request
, LPFC_CQ_CNT_512
);
15676 bf_set(lpfc_mbx_cq_create_set_cqe_cnt
,
15677 &cq_set
->u
.request
, LPFC_CQ_CNT_1024
);
15680 bf_set(lpfc_mbx_cq_create_set_eq_id0
,
15681 &cq_set
->u
.request
, eq
->queue_id
);
15684 bf_set(lpfc_mbx_cq_create_set_eq_id1
,
15685 &cq_set
->u
.request
, eq
->queue_id
);
15688 bf_set(lpfc_mbx_cq_create_set_eq_id2
,
15689 &cq_set
->u
.request
, eq
->queue_id
);
15692 bf_set(lpfc_mbx_cq_create_set_eq_id3
,
15693 &cq_set
->u
.request
, eq
->queue_id
);
15696 bf_set(lpfc_mbx_cq_create_set_eq_id4
,
15697 &cq_set
->u
.request
, eq
->queue_id
);
15700 bf_set(lpfc_mbx_cq_create_set_eq_id5
,
15701 &cq_set
->u
.request
, eq
->queue_id
);
15704 bf_set(lpfc_mbx_cq_create_set_eq_id6
,
15705 &cq_set
->u
.request
, eq
->queue_id
);
15708 bf_set(lpfc_mbx_cq_create_set_eq_id7
,
15709 &cq_set
->u
.request
, eq
->queue_id
);
15712 bf_set(lpfc_mbx_cq_create_set_eq_id8
,
15713 &cq_set
->u
.request
, eq
->queue_id
);
15716 bf_set(lpfc_mbx_cq_create_set_eq_id9
,
15717 &cq_set
->u
.request
, eq
->queue_id
);
15720 bf_set(lpfc_mbx_cq_create_set_eq_id10
,
15721 &cq_set
->u
.request
, eq
->queue_id
);
15724 bf_set(lpfc_mbx_cq_create_set_eq_id11
,
15725 &cq_set
->u
.request
, eq
->queue_id
);
15728 bf_set(lpfc_mbx_cq_create_set_eq_id12
,
15729 &cq_set
->u
.request
, eq
->queue_id
);
15732 bf_set(lpfc_mbx_cq_create_set_eq_id13
,
15733 &cq_set
->u
.request
, eq
->queue_id
);
15736 bf_set(lpfc_mbx_cq_create_set_eq_id14
,
15737 &cq_set
->u
.request
, eq
->queue_id
);
15740 bf_set(lpfc_mbx_cq_create_set_eq_id15
,
15741 &cq_set
->u
.request
, eq
->queue_id
);
15745 /* link the cq onto the parent eq child list */
15746 list_add_tail(&cq
->list
, &eq
->child_list
);
15747 /* Set up completion queue's type and subtype */
15749 cq
->subtype
= subtype
;
15750 cq
->assoc_qid
= eq
->queue_id
;
15752 cq
->host_index
= 0;
15753 cq
->notify_interval
= LPFC_CQ_NOTIFY_INTRVL
;
15754 cq
->max_proc_limit
= min(phba
->cfg_cq_max_proc_limit
,
15759 list_for_each_entry(dmabuf
, &cq
->page_list
, list
) {
15760 memset(dmabuf
->virt
, 0, hw_page_size
);
15761 cnt
= page_idx
+ dmabuf
->buffer_tag
;
15762 cq_set
->u
.request
.page
[cnt
].addr_lo
=
15763 putPaddrLow(dmabuf
->phys
);
15764 cq_set
->u
.request
.page
[cnt
].addr_hi
=
15765 putPaddrHigh(dmabuf
->phys
);
15771 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
15773 /* The IOCTL status is embedded in the mailbox subheader. */
15774 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
15775 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
15776 if (shdr_status
|| shdr_add_status
|| rc
) {
15777 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15778 "3119 CQ_CREATE_SET mailbox failed with "
15779 "status x%x add_status x%x, mbx status x%x\n",
15780 shdr_status
, shdr_add_status
, rc
);
15784 rc
= bf_get(lpfc_mbx_cq_create_set_base_id
, &cq_set
->u
.response
);
15785 if (rc
== 0xFFFF) {
15790 for (idx
= 0; idx
< numcq
; idx
++) {
15792 cq
->queue_id
= rc
+ idx
;
15793 if (cq
->queue_id
> phba
->sli4_hba
.cq_max
)
15794 phba
->sli4_hba
.cq_max
= cq
->queue_id
;
15798 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
15803 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
15804 * @phba: HBA structure that indicates port to create a queue on.
15805 * @mq: The queue structure to use to create the mailbox queue.
15806 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
15807 * @cq: The completion queue to associate with this cq.
15809 * This function provides failback (fb) functionality when the
15810 * mq_create_ext fails on older FW generations. It's purpose is identical
15811 * to mq_create_ext otherwise.
15813 * This routine cannot fail as all attributes were previously accessed and
15814 * initialized in mq_create_ext.
15817 lpfc_mq_create_fb_init(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
,
15818 LPFC_MBOXQ_t
*mbox
, struct lpfc_queue
*cq
)
15820 struct lpfc_mbx_mq_create
*mq_create
;
15821 struct lpfc_dmabuf
*dmabuf
;
15824 length
= (sizeof(struct lpfc_mbx_mq_create
) -
15825 sizeof(struct lpfc_sli4_cfg_mhdr
));
15826 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
15827 LPFC_MBOX_OPCODE_MQ_CREATE
,
15828 length
, LPFC_SLI4_MBX_EMBED
);
15829 mq_create
= &mbox
->u
.mqe
.un
.mq_create
;
15830 bf_set(lpfc_mbx_mq_create_num_pages
, &mq_create
->u
.request
,
15832 bf_set(lpfc_mq_context_cq_id
, &mq_create
->u
.request
.context
,
15834 bf_set(lpfc_mq_context_valid
, &mq_create
->u
.request
.context
, 1);
15835 switch (mq
->entry_count
) {
15837 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
15838 LPFC_MQ_RING_SIZE_16
);
15841 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
15842 LPFC_MQ_RING_SIZE_32
);
15845 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
15846 LPFC_MQ_RING_SIZE_64
);
15849 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
15850 LPFC_MQ_RING_SIZE_128
);
15853 list_for_each_entry(dmabuf
, &mq
->page_list
, list
) {
15854 mq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
15855 putPaddrLow(dmabuf
->phys
);
15856 mq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
15857 putPaddrHigh(dmabuf
->phys
);
15862 * lpfc_mq_create - Create a mailbox Queue on the HBA
15863 * @phba: HBA structure that indicates port to create a queue on.
15864 * @mq: The queue structure to use to create the mailbox queue.
15865 * @cq: The completion queue to associate with this cq.
15866 * @subtype: The queue's subtype.
15868 * This function creates a mailbox queue, as detailed in @mq, on a port,
15869 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
15871 * The @phba struct is used to send mailbox command to HBA. The @cq struct
15872 * is used to get the entry count and entry size that are necessary to
15873 * determine the number of pages to allocate and use for this queue. This
15874 * function will send the MQ_CREATE mailbox command to the HBA to setup the
15875 * mailbox queue. This function is asynchronous and will wait for the mailbox
15876 * command to finish before continuing.
15878 * On success this function will return a zero. If unable to allocate enough
15879 * memory this function will return -ENOMEM. If the queue create mailbox command
15880 * fails this function will return -ENXIO.
15883 lpfc_mq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
,
15884 struct lpfc_queue
*cq
, uint32_t subtype
)
15886 struct lpfc_mbx_mq_create
*mq_create
;
15887 struct lpfc_mbx_mq_create_ext
*mq_create_ext
;
15888 struct lpfc_dmabuf
*dmabuf
;
15889 LPFC_MBOXQ_t
*mbox
;
15890 int rc
, length
, status
= 0;
15891 uint32_t shdr_status
, shdr_add_status
;
15892 union lpfc_sli4_cfg_shdr
*shdr
;
15893 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
15895 /* sanity check on queue memory */
15898 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
15899 hw_page_size
= SLI4_PAGE_SIZE
;
15901 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
15904 length
= (sizeof(struct lpfc_mbx_mq_create_ext
) -
15905 sizeof(struct lpfc_sli4_cfg_mhdr
));
15906 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
15907 LPFC_MBOX_OPCODE_MQ_CREATE_EXT
,
15908 length
, LPFC_SLI4_MBX_EMBED
);
15910 mq_create_ext
= &mbox
->u
.mqe
.un
.mq_create_ext
;
15911 shdr
= (union lpfc_sli4_cfg_shdr
*) &mq_create_ext
->header
.cfg_shdr
;
15912 bf_set(lpfc_mbx_mq_create_ext_num_pages
,
15913 &mq_create_ext
->u
.request
, mq
->page_count
);
15914 bf_set(lpfc_mbx_mq_create_ext_async_evt_link
,
15915 &mq_create_ext
->u
.request
, 1);
15916 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip
,
15917 &mq_create_ext
->u
.request
, 1);
15918 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5
,
15919 &mq_create_ext
->u
.request
, 1);
15920 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc
,
15921 &mq_create_ext
->u
.request
, 1);
15922 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli
,
15923 &mq_create_ext
->u
.request
, 1);
15924 bf_set(lpfc_mq_context_valid
, &mq_create_ext
->u
.request
.context
, 1);
15925 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
15926 phba
->sli4_hba
.pc_sli4_params
.mqv
);
15927 if (phba
->sli4_hba
.pc_sli4_params
.mqv
== LPFC_Q_CREATE_VERSION_1
)
15928 bf_set(lpfc_mbx_mq_create_ext_cq_id
, &mq_create_ext
->u
.request
,
15931 bf_set(lpfc_mq_context_cq_id
, &mq_create_ext
->u
.request
.context
,
15933 switch (mq
->entry_count
) {
15935 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15936 "0362 Unsupported MQ count. (%d)\n",
15938 if (mq
->entry_count
< 16) {
15942 fallthrough
; /* otherwise default to smallest count */
15944 bf_set(lpfc_mq_context_ring_size
,
15945 &mq_create_ext
->u
.request
.context
,
15946 LPFC_MQ_RING_SIZE_16
);
15949 bf_set(lpfc_mq_context_ring_size
,
15950 &mq_create_ext
->u
.request
.context
,
15951 LPFC_MQ_RING_SIZE_32
);
15954 bf_set(lpfc_mq_context_ring_size
,
15955 &mq_create_ext
->u
.request
.context
,
15956 LPFC_MQ_RING_SIZE_64
);
15959 bf_set(lpfc_mq_context_ring_size
,
15960 &mq_create_ext
->u
.request
.context
,
15961 LPFC_MQ_RING_SIZE_128
);
15964 list_for_each_entry(dmabuf
, &mq
->page_list
, list
) {
15965 memset(dmabuf
->virt
, 0, hw_page_size
);
15966 mq_create_ext
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
15967 putPaddrLow(dmabuf
->phys
);
15968 mq_create_ext
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
15969 putPaddrHigh(dmabuf
->phys
);
15971 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
15972 mq
->queue_id
= bf_get(lpfc_mbx_mq_create_q_id
,
15973 &mq_create_ext
->u
.response
);
15974 if (rc
!= MBX_SUCCESS
) {
15975 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
15976 "2795 MQ_CREATE_EXT failed with "
15977 "status x%x. Failback to MQ_CREATE.\n",
15979 lpfc_mq_create_fb_init(phba
, mq
, mbox
, cq
);
15980 mq_create
= &mbox
->u
.mqe
.un
.mq_create
;
15981 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
15982 shdr
= (union lpfc_sli4_cfg_shdr
*) &mq_create
->header
.cfg_shdr
;
15983 mq
->queue_id
= bf_get(lpfc_mbx_mq_create_q_id
,
15984 &mq_create
->u
.response
);
15987 /* The IOCTL status is embedded in the mailbox subheader. */
15988 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
15989 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
15990 if (shdr_status
|| shdr_add_status
|| rc
) {
15991 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
15992 "2502 MQ_CREATE mailbox failed with "
15993 "status x%x add_status x%x, mbx status x%x\n",
15994 shdr_status
, shdr_add_status
, rc
);
15998 if (mq
->queue_id
== 0xFFFF) {
16002 mq
->type
= LPFC_MQ
;
16003 mq
->assoc_qid
= cq
->queue_id
;
16004 mq
->subtype
= subtype
;
16005 mq
->host_index
= 0;
16008 /* link the mq onto the parent cq child list */
16009 list_add_tail(&mq
->list
, &cq
->child_list
);
16011 mempool_free(mbox
, phba
->mbox_mem_pool
);
16016 * lpfc_wq_create - Create a Work Queue on the HBA
16017 * @phba: HBA structure that indicates port to create a queue on.
16018 * @wq: The queue structure to use to create the work queue.
16019 * @cq: The completion queue to bind this work queue to.
16020 * @subtype: The subtype of the work queue indicating its functionality.
16022 * This function creates a work queue, as detailed in @wq, on a port, described
16023 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16025 * The @phba struct is used to send mailbox command to HBA. The @wq struct
16026 * is used to get the entry count and entry size that are necessary to
16027 * determine the number of pages to allocate and use for this queue. The @cq
16028 * is used to indicate which completion queue to bind this work queue to. This
16029 * function will send the WQ_CREATE mailbox command to the HBA to setup the
16030 * work queue. This function is asynchronous and will wait for the mailbox
16031 * command to finish before continuing.
16033 * On success this function will return a zero. If unable to allocate enough
16034 * memory this function will return -ENOMEM. If the queue create mailbox command
16035 * fails this function will return -ENXIO.
16038 lpfc_wq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*wq
,
16039 struct lpfc_queue
*cq
, uint32_t subtype
)
16041 struct lpfc_mbx_wq_create
*wq_create
;
16042 struct lpfc_dmabuf
*dmabuf
;
16043 LPFC_MBOXQ_t
*mbox
;
16044 int rc
, length
, status
= 0;
16045 uint32_t shdr_status
, shdr_add_status
;
16046 union lpfc_sli4_cfg_shdr
*shdr
;
16047 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
16048 struct dma_address
*page
;
16049 void __iomem
*bar_memmap_p
;
16050 uint32_t db_offset
;
16051 uint16_t pci_barset
;
16052 uint8_t dpp_barset
;
16053 uint32_t dpp_offset
;
16054 uint8_t wq_create_version
;
16056 unsigned long pg_addr
;
16059 /* sanity check on queue memory */
16062 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
16063 hw_page_size
= wq
->page_size
;
16065 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16068 length
= (sizeof(struct lpfc_mbx_wq_create
) -
16069 sizeof(struct lpfc_sli4_cfg_mhdr
));
16070 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
16071 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE
,
16072 length
, LPFC_SLI4_MBX_EMBED
);
16073 wq_create
= &mbox
->u
.mqe
.un
.wq_create
;
16074 shdr
= (union lpfc_sli4_cfg_shdr
*) &wq_create
->header
.cfg_shdr
;
16075 bf_set(lpfc_mbx_wq_create_num_pages
, &wq_create
->u
.request
,
16077 bf_set(lpfc_mbx_wq_create_cq_id
, &wq_create
->u
.request
,
16080 /* wqv is the earliest version supported, NOT the latest */
16081 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
16082 phba
->sli4_hba
.pc_sli4_params
.wqv
);
16084 if ((phba
->sli4_hba
.pc_sli4_params
.wqsize
& LPFC_WQ_SZ128_SUPPORT
) ||
16085 (wq
->page_size
> SLI4_PAGE_SIZE
))
16086 wq_create_version
= LPFC_Q_CREATE_VERSION_1
;
16088 wq_create_version
= LPFC_Q_CREATE_VERSION_0
;
16090 switch (wq_create_version
) {
16091 case LPFC_Q_CREATE_VERSION_1
:
16092 bf_set(lpfc_mbx_wq_create_wqe_count
, &wq_create
->u
.request_1
,
16094 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
16095 LPFC_Q_CREATE_VERSION_1
);
16097 switch (wq
->entry_size
) {
16100 bf_set(lpfc_mbx_wq_create_wqe_size
,
16101 &wq_create
->u
.request_1
,
16102 LPFC_WQ_WQE_SIZE_64
);
16105 bf_set(lpfc_mbx_wq_create_wqe_size
,
16106 &wq_create
->u
.request_1
,
16107 LPFC_WQ_WQE_SIZE_128
);
16110 /* Request DPP by default */
16111 bf_set(lpfc_mbx_wq_create_dpp_req
, &wq_create
->u
.request_1
, 1);
16112 bf_set(lpfc_mbx_wq_create_page_size
,
16113 &wq_create
->u
.request_1
,
16114 (wq
->page_size
/ SLI4_PAGE_SIZE
));
16115 page
= wq_create
->u
.request_1
.page
;
16118 page
= wq_create
->u
.request
.page
;
16122 list_for_each_entry(dmabuf
, &wq
->page_list
, list
) {
16123 memset(dmabuf
->virt
, 0, hw_page_size
);
16124 page
[dmabuf
->buffer_tag
].addr_lo
= putPaddrLow(dmabuf
->phys
);
16125 page
[dmabuf
->buffer_tag
].addr_hi
= putPaddrHigh(dmabuf
->phys
);
16128 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
)
16129 bf_set(lpfc_mbx_wq_create_dua
, &wq_create
->u
.request
, 1);
16131 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
16132 /* The IOCTL status is embedded in the mailbox subheader. */
16133 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16134 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16135 if (shdr_status
|| shdr_add_status
|| rc
) {
16136 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16137 "2503 WQ_CREATE mailbox failed with "
16138 "status x%x add_status x%x, mbx status x%x\n",
16139 shdr_status
, shdr_add_status
, rc
);
16144 if (wq_create_version
== LPFC_Q_CREATE_VERSION_0
)
16145 wq
->queue_id
= bf_get(lpfc_mbx_wq_create_q_id
,
16146 &wq_create
->u
.response
);
16148 wq
->queue_id
= bf_get(lpfc_mbx_wq_create_v1_q_id
,
16149 &wq_create
->u
.response_1
);
16151 if (wq
->queue_id
== 0xFFFF) {
16156 wq
->db_format
= LPFC_DB_LIST_FORMAT
;
16157 if (wq_create_version
== LPFC_Q_CREATE_VERSION_0
) {
16158 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
) {
16159 wq
->db_format
= bf_get(lpfc_mbx_wq_create_db_format
,
16160 &wq_create
->u
.response
);
16161 if ((wq
->db_format
!= LPFC_DB_LIST_FORMAT
) &&
16162 (wq
->db_format
!= LPFC_DB_RING_FORMAT
)) {
16163 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16164 "3265 WQ[%d] doorbell format "
16165 "not supported: x%x\n",
16166 wq
->queue_id
, wq
->db_format
);
16170 pci_barset
= bf_get(lpfc_mbx_wq_create_bar_set
,
16171 &wq_create
->u
.response
);
16172 bar_memmap_p
= lpfc_dual_chute_pci_bar_map(phba
,
16174 if (!bar_memmap_p
) {
16175 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16176 "3263 WQ[%d] failed to memmap "
16177 "pci barset:x%x\n",
16178 wq
->queue_id
, pci_barset
);
16182 db_offset
= wq_create
->u
.response
.doorbell_offset
;
16183 if ((db_offset
!= LPFC_ULP0_WQ_DOORBELL
) &&
16184 (db_offset
!= LPFC_ULP1_WQ_DOORBELL
)) {
16185 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16186 "3252 WQ[%d] doorbell offset "
16187 "not supported: x%x\n",
16188 wq
->queue_id
, db_offset
);
16192 wq
->db_regaddr
= bar_memmap_p
+ db_offset
;
16193 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
16194 "3264 WQ[%d]: barset:x%x, offset:x%x, "
16195 "format:x%x\n", wq
->queue_id
,
16196 pci_barset
, db_offset
, wq
->db_format
);
16198 wq
->db_regaddr
= phba
->sli4_hba
.WQDBregaddr
;
16200 /* Check if DPP was honored by the firmware */
16201 wq
->dpp_enable
= bf_get(lpfc_mbx_wq_create_dpp_rsp
,
16202 &wq_create
->u
.response_1
);
16203 if (wq
->dpp_enable
) {
16204 pci_barset
= bf_get(lpfc_mbx_wq_create_v1_bar_set
,
16205 &wq_create
->u
.response_1
);
16206 bar_memmap_p
= lpfc_dual_chute_pci_bar_map(phba
,
16208 if (!bar_memmap_p
) {
16209 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16210 "3267 WQ[%d] failed to memmap "
16211 "pci barset:x%x\n",
16212 wq
->queue_id
, pci_barset
);
16216 db_offset
= wq_create
->u
.response_1
.doorbell_offset
;
16217 wq
->db_regaddr
= bar_memmap_p
+ db_offset
;
16218 wq
->dpp_id
= bf_get(lpfc_mbx_wq_create_dpp_id
,
16219 &wq_create
->u
.response_1
);
16220 dpp_barset
= bf_get(lpfc_mbx_wq_create_dpp_bar
,
16221 &wq_create
->u
.response_1
);
16222 bar_memmap_p
= lpfc_dual_chute_pci_bar_map(phba
,
16224 if (!bar_memmap_p
) {
16225 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16226 "3268 WQ[%d] failed to memmap "
16227 "pci barset:x%x\n",
16228 wq
->queue_id
, dpp_barset
);
16232 dpp_offset
= wq_create
->u
.response_1
.dpp_offset
;
16233 wq
->dpp_regaddr
= bar_memmap_p
+ dpp_offset
;
16234 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
16235 "3271 WQ[%d]: barset:x%x, offset:x%x, "
16236 "dpp_id:x%x dpp_barset:x%x "
16237 "dpp_offset:x%x\n",
16238 wq
->queue_id
, pci_barset
, db_offset
,
16239 wq
->dpp_id
, dpp_barset
, dpp_offset
);
16242 /* Enable combined writes for DPP aperture */
16243 pg_addr
= (unsigned long)(wq
->dpp_regaddr
) & PAGE_MASK
;
16244 rc
= set_memory_wc(pg_addr
, 1);
16246 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16247 "3272 Cannot setup Combined "
16248 "Write on WQ[%d] - disable DPP\n",
16250 phba
->cfg_enable_dpp
= 0;
16253 phba
->cfg_enable_dpp
= 0;
16256 wq
->db_regaddr
= phba
->sli4_hba
.WQDBregaddr
;
16258 wq
->pring
= kzalloc(sizeof(struct lpfc_sli_ring
), GFP_KERNEL
);
16259 if (wq
->pring
== NULL
) {
16263 wq
->type
= LPFC_WQ
;
16264 wq
->assoc_qid
= cq
->queue_id
;
16265 wq
->subtype
= subtype
;
16266 wq
->host_index
= 0;
16268 wq
->notify_interval
= LPFC_WQ_NOTIFY_INTRVL
;
16270 /* link the wq onto the parent cq child list */
16271 list_add_tail(&wq
->list
, &cq
->child_list
);
16273 mempool_free(mbox
, phba
->mbox_mem_pool
);
16278 * lpfc_rq_create - Create a Receive Queue on the HBA
16279 * @phba: HBA structure that indicates port to create a queue on.
16280 * @hrq: The queue structure to use to create the header receive queue.
16281 * @drq: The queue structure to use to create the data receive queue.
16282 * @cq: The completion queue to bind this work queue to.
16283 * @subtype: The subtype of the work queue indicating its functionality.
16285 * This function creates a receive buffer queue pair , as detailed in @hrq and
16286 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16289 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16290 * struct is used to get the entry count that is necessary to determine the
16291 * number of pages to use for this queue. The @cq is used to indicate which
16292 * completion queue to bind received buffers that are posted to these queues to.
16293 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16294 * receive queue pair. This function is asynchronous and will wait for the
16295 * mailbox command to finish before continuing.
16297 * On success this function will return a zero. If unable to allocate enough
16298 * memory this function will return -ENOMEM. If the queue create mailbox command
16299 * fails this function will return -ENXIO.
16302 lpfc_rq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*hrq
,
16303 struct lpfc_queue
*drq
, struct lpfc_queue
*cq
, uint32_t subtype
)
16305 struct lpfc_mbx_rq_create
*rq_create
;
16306 struct lpfc_dmabuf
*dmabuf
;
16307 LPFC_MBOXQ_t
*mbox
;
16308 int rc
, length
, status
= 0;
16309 uint32_t shdr_status
, shdr_add_status
;
16310 union lpfc_sli4_cfg_shdr
*shdr
;
16311 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
16312 void __iomem
*bar_memmap_p
;
16313 uint32_t db_offset
;
16314 uint16_t pci_barset
;
16316 /* sanity check on queue memory */
16317 if (!hrq
|| !drq
|| !cq
)
16319 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
16320 hw_page_size
= SLI4_PAGE_SIZE
;
16322 if (hrq
->entry_count
!= drq
->entry_count
)
16324 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16327 length
= (sizeof(struct lpfc_mbx_rq_create
) -
16328 sizeof(struct lpfc_sli4_cfg_mhdr
));
16329 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
16330 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE
,
16331 length
, LPFC_SLI4_MBX_EMBED
);
16332 rq_create
= &mbox
->u
.mqe
.un
.rq_create
;
16333 shdr
= (union lpfc_sli4_cfg_shdr
*) &rq_create
->header
.cfg_shdr
;
16334 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
16335 phba
->sli4_hba
.pc_sli4_params
.rqv
);
16336 if (phba
->sli4_hba
.pc_sli4_params
.rqv
== LPFC_Q_CREATE_VERSION_1
) {
16337 bf_set(lpfc_rq_context_rqe_count_1
,
16338 &rq_create
->u
.request
.context
,
16340 rq_create
->u
.request
.context
.buffer_size
= LPFC_HDR_BUF_SIZE
;
16341 bf_set(lpfc_rq_context_rqe_size
,
16342 &rq_create
->u
.request
.context
,
16344 bf_set(lpfc_rq_context_page_size
,
16345 &rq_create
->u
.request
.context
,
16346 LPFC_RQ_PAGE_SIZE_4096
);
16348 switch (hrq
->entry_count
) {
16350 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16351 "2535 Unsupported RQ count. (%d)\n",
16353 if (hrq
->entry_count
< 512) {
16357 fallthrough
; /* otherwise default to smallest count */
16359 bf_set(lpfc_rq_context_rqe_count
,
16360 &rq_create
->u
.request
.context
,
16361 LPFC_RQ_RING_SIZE_512
);
16364 bf_set(lpfc_rq_context_rqe_count
,
16365 &rq_create
->u
.request
.context
,
16366 LPFC_RQ_RING_SIZE_1024
);
16369 bf_set(lpfc_rq_context_rqe_count
,
16370 &rq_create
->u
.request
.context
,
16371 LPFC_RQ_RING_SIZE_2048
);
16374 bf_set(lpfc_rq_context_rqe_count
,
16375 &rq_create
->u
.request
.context
,
16376 LPFC_RQ_RING_SIZE_4096
);
16379 bf_set(lpfc_rq_context_buf_size
, &rq_create
->u
.request
.context
,
16380 LPFC_HDR_BUF_SIZE
);
16382 bf_set(lpfc_rq_context_cq_id
, &rq_create
->u
.request
.context
,
16384 bf_set(lpfc_mbx_rq_create_num_pages
, &rq_create
->u
.request
,
16386 list_for_each_entry(dmabuf
, &hrq
->page_list
, list
) {
16387 memset(dmabuf
->virt
, 0, hw_page_size
);
16388 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
16389 putPaddrLow(dmabuf
->phys
);
16390 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
16391 putPaddrHigh(dmabuf
->phys
);
16393 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
)
16394 bf_set(lpfc_mbx_rq_create_dua
, &rq_create
->u
.request
, 1);
16396 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
16397 /* The IOCTL status is embedded in the mailbox subheader. */
16398 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16399 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16400 if (shdr_status
|| shdr_add_status
|| rc
) {
16401 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16402 "2504 RQ_CREATE mailbox failed with "
16403 "status x%x add_status x%x, mbx status x%x\n",
16404 shdr_status
, shdr_add_status
, rc
);
16408 hrq
->queue_id
= bf_get(lpfc_mbx_rq_create_q_id
, &rq_create
->u
.response
);
16409 if (hrq
->queue_id
== 0xFFFF) {
16414 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
) {
16415 hrq
->db_format
= bf_get(lpfc_mbx_rq_create_db_format
,
16416 &rq_create
->u
.response
);
16417 if ((hrq
->db_format
!= LPFC_DB_LIST_FORMAT
) &&
16418 (hrq
->db_format
!= LPFC_DB_RING_FORMAT
)) {
16419 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16420 "3262 RQ [%d] doorbell format not "
16421 "supported: x%x\n", hrq
->queue_id
,
16427 pci_barset
= bf_get(lpfc_mbx_rq_create_bar_set
,
16428 &rq_create
->u
.response
);
16429 bar_memmap_p
= lpfc_dual_chute_pci_bar_map(phba
, pci_barset
);
16430 if (!bar_memmap_p
) {
16431 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16432 "3269 RQ[%d] failed to memmap pci "
16433 "barset:x%x\n", hrq
->queue_id
,
16439 db_offset
= rq_create
->u
.response
.doorbell_offset
;
16440 if ((db_offset
!= LPFC_ULP0_RQ_DOORBELL
) &&
16441 (db_offset
!= LPFC_ULP1_RQ_DOORBELL
)) {
16442 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16443 "3270 RQ[%d] doorbell offset not "
16444 "supported: x%x\n", hrq
->queue_id
,
16449 hrq
->db_regaddr
= bar_memmap_p
+ db_offset
;
16450 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
16451 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
16452 "format:x%x\n", hrq
->queue_id
, pci_barset
,
16453 db_offset
, hrq
->db_format
);
16455 hrq
->db_format
= LPFC_DB_RING_FORMAT
;
16456 hrq
->db_regaddr
= phba
->sli4_hba
.RQDBregaddr
;
16458 hrq
->type
= LPFC_HRQ
;
16459 hrq
->assoc_qid
= cq
->queue_id
;
16460 hrq
->subtype
= subtype
;
16461 hrq
->host_index
= 0;
16462 hrq
->hba_index
= 0;
16463 hrq
->notify_interval
= LPFC_RQ_NOTIFY_INTRVL
;
16465 /* now create the data queue */
16466 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
16467 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE
,
16468 length
, LPFC_SLI4_MBX_EMBED
);
16469 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
16470 phba
->sli4_hba
.pc_sli4_params
.rqv
);
16471 if (phba
->sli4_hba
.pc_sli4_params
.rqv
== LPFC_Q_CREATE_VERSION_1
) {
16472 bf_set(lpfc_rq_context_rqe_count_1
,
16473 &rq_create
->u
.request
.context
, hrq
->entry_count
);
16474 if (subtype
== LPFC_NVMET
)
16475 rq_create
->u
.request
.context
.buffer_size
=
16476 LPFC_NVMET_DATA_BUF_SIZE
;
16478 rq_create
->u
.request
.context
.buffer_size
=
16479 LPFC_DATA_BUF_SIZE
;
16480 bf_set(lpfc_rq_context_rqe_size
, &rq_create
->u
.request
.context
,
16482 bf_set(lpfc_rq_context_page_size
, &rq_create
->u
.request
.context
,
16483 (PAGE_SIZE
/SLI4_PAGE_SIZE
));
16485 switch (drq
->entry_count
) {
16487 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16488 "2536 Unsupported RQ count. (%d)\n",
16490 if (drq
->entry_count
< 512) {
16494 fallthrough
; /* otherwise default to smallest count */
16496 bf_set(lpfc_rq_context_rqe_count
,
16497 &rq_create
->u
.request
.context
,
16498 LPFC_RQ_RING_SIZE_512
);
16501 bf_set(lpfc_rq_context_rqe_count
,
16502 &rq_create
->u
.request
.context
,
16503 LPFC_RQ_RING_SIZE_1024
);
16506 bf_set(lpfc_rq_context_rqe_count
,
16507 &rq_create
->u
.request
.context
,
16508 LPFC_RQ_RING_SIZE_2048
);
16511 bf_set(lpfc_rq_context_rqe_count
,
16512 &rq_create
->u
.request
.context
,
16513 LPFC_RQ_RING_SIZE_4096
);
16516 if (subtype
== LPFC_NVMET
)
16517 bf_set(lpfc_rq_context_buf_size
,
16518 &rq_create
->u
.request
.context
,
16519 LPFC_NVMET_DATA_BUF_SIZE
);
16521 bf_set(lpfc_rq_context_buf_size
,
16522 &rq_create
->u
.request
.context
,
16523 LPFC_DATA_BUF_SIZE
);
16525 bf_set(lpfc_rq_context_cq_id
, &rq_create
->u
.request
.context
,
16527 bf_set(lpfc_mbx_rq_create_num_pages
, &rq_create
->u
.request
,
16529 list_for_each_entry(dmabuf
, &drq
->page_list
, list
) {
16530 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
16531 putPaddrLow(dmabuf
->phys
);
16532 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
16533 putPaddrHigh(dmabuf
->phys
);
16535 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
)
16536 bf_set(lpfc_mbx_rq_create_dua
, &rq_create
->u
.request
, 1);
16537 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
16538 /* The IOCTL status is embedded in the mailbox subheader. */
16539 shdr
= (union lpfc_sli4_cfg_shdr
*) &rq_create
->header
.cfg_shdr
;
16540 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16541 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16542 if (shdr_status
|| shdr_add_status
|| rc
) {
16546 drq
->queue_id
= bf_get(lpfc_mbx_rq_create_q_id
, &rq_create
->u
.response
);
16547 if (drq
->queue_id
== 0xFFFF) {
16551 drq
->type
= LPFC_DRQ
;
16552 drq
->assoc_qid
= cq
->queue_id
;
16553 drq
->subtype
= subtype
;
16554 drq
->host_index
= 0;
16555 drq
->hba_index
= 0;
16556 drq
->notify_interval
= LPFC_RQ_NOTIFY_INTRVL
;
16558 /* link the header and data RQs onto the parent cq child list */
16559 list_add_tail(&hrq
->list
, &cq
->child_list
);
16560 list_add_tail(&drq
->list
, &cq
->child_list
);
16563 mempool_free(mbox
, phba
->mbox_mem_pool
);
16568 * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
16569 * @phba: HBA structure that indicates port to create a queue on.
16570 * @hrqp: The queue structure array to use to create the header receive queues.
16571 * @drqp: The queue structure array to use to create the data receive queues.
16572 * @cqp: The completion queue array to bind these receive queues to.
16573 * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16575 * This function creates a receive buffer queue pair , as detailed in @hrq and
16576 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
16579 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
16580 * struct is used to get the entry count that is necessary to determine the
16581 * number of pages to use for this queue. The @cq is used to indicate which
16582 * completion queue to bind received buffers that are posted to these queues to.
16583 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
16584 * receive queue pair. This function is asynchronous and will wait for the
16585 * mailbox command to finish before continuing.
16587 * On success this function will return a zero. If unable to allocate enough
16588 * memory this function will return -ENOMEM. If the queue create mailbox command
16589 * fails this function will return -ENXIO.
16592 lpfc_mrq_create(struct lpfc_hba
*phba
, struct lpfc_queue
**hrqp
,
16593 struct lpfc_queue
**drqp
, struct lpfc_queue
**cqp
,
16596 struct lpfc_queue
*hrq
, *drq
, *cq
;
16597 struct lpfc_mbx_rq_create_v2
*rq_create
;
16598 struct lpfc_dmabuf
*dmabuf
;
16599 LPFC_MBOXQ_t
*mbox
;
16600 int rc
, length
, alloclen
, status
= 0;
16601 int cnt
, idx
, numrq
, page_idx
= 0;
16602 uint32_t shdr_status
, shdr_add_status
;
16603 union lpfc_sli4_cfg_shdr
*shdr
;
16604 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
16606 numrq
= phba
->cfg_nvmet_mrq
;
16607 /* sanity check on array memory */
16608 if (!hrqp
|| !drqp
|| !cqp
|| !numrq
)
16610 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
16611 hw_page_size
= SLI4_PAGE_SIZE
;
16613 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16617 length
= sizeof(struct lpfc_mbx_rq_create_v2
);
16618 length
+= ((2 * numrq
* hrqp
[0]->page_count
) *
16619 sizeof(struct dma_address
));
16621 alloclen
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
16622 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE
, length
,
16623 LPFC_SLI4_MBX_NEMBED
);
16624 if (alloclen
< length
) {
16625 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16626 "3099 Allocated DMA memory size (%d) is "
16627 "less than the requested DMA memory size "
16628 "(%d)\n", alloclen
, length
);
16635 rq_create
= mbox
->sge_array
->addr
[0];
16636 shdr
= (union lpfc_sli4_cfg_shdr
*)&rq_create
->cfg_shdr
;
16638 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
, LPFC_Q_CREATE_VERSION_2
);
16641 for (idx
= 0; idx
< numrq
; idx
++) {
16646 /* sanity check on queue memory */
16647 if (!hrq
|| !drq
|| !cq
) {
16652 if (hrq
->entry_count
!= drq
->entry_count
) {
16658 bf_set(lpfc_mbx_rq_create_num_pages
,
16659 &rq_create
->u
.request
,
16661 bf_set(lpfc_mbx_rq_create_rq_cnt
,
16662 &rq_create
->u
.request
, (numrq
* 2));
16663 bf_set(lpfc_mbx_rq_create_dnb
, &rq_create
->u
.request
,
16665 bf_set(lpfc_rq_context_base_cq
,
16666 &rq_create
->u
.request
.context
,
16668 bf_set(lpfc_rq_context_data_size
,
16669 &rq_create
->u
.request
.context
,
16670 LPFC_NVMET_DATA_BUF_SIZE
);
16671 bf_set(lpfc_rq_context_hdr_size
,
16672 &rq_create
->u
.request
.context
,
16673 LPFC_HDR_BUF_SIZE
);
16674 bf_set(lpfc_rq_context_rqe_count_1
,
16675 &rq_create
->u
.request
.context
,
16677 bf_set(lpfc_rq_context_rqe_size
,
16678 &rq_create
->u
.request
.context
,
16680 bf_set(lpfc_rq_context_page_size
,
16681 &rq_create
->u
.request
.context
,
16682 (PAGE_SIZE
/SLI4_PAGE_SIZE
));
16685 list_for_each_entry(dmabuf
, &hrq
->page_list
, list
) {
16686 memset(dmabuf
->virt
, 0, hw_page_size
);
16687 cnt
= page_idx
+ dmabuf
->buffer_tag
;
16688 rq_create
->u
.request
.page
[cnt
].addr_lo
=
16689 putPaddrLow(dmabuf
->phys
);
16690 rq_create
->u
.request
.page
[cnt
].addr_hi
=
16691 putPaddrHigh(dmabuf
->phys
);
16697 list_for_each_entry(dmabuf
, &drq
->page_list
, list
) {
16698 memset(dmabuf
->virt
, 0, hw_page_size
);
16699 cnt
= page_idx
+ dmabuf
->buffer_tag
;
16700 rq_create
->u
.request
.page
[cnt
].addr_lo
=
16701 putPaddrLow(dmabuf
->phys
);
16702 rq_create
->u
.request
.page
[cnt
].addr_hi
=
16703 putPaddrHigh(dmabuf
->phys
);
16708 hrq
->db_format
= LPFC_DB_RING_FORMAT
;
16709 hrq
->db_regaddr
= phba
->sli4_hba
.RQDBregaddr
;
16710 hrq
->type
= LPFC_HRQ
;
16711 hrq
->assoc_qid
= cq
->queue_id
;
16712 hrq
->subtype
= subtype
;
16713 hrq
->host_index
= 0;
16714 hrq
->hba_index
= 0;
16715 hrq
->notify_interval
= LPFC_RQ_NOTIFY_INTRVL
;
16717 drq
->db_format
= LPFC_DB_RING_FORMAT
;
16718 drq
->db_regaddr
= phba
->sli4_hba
.RQDBregaddr
;
16719 drq
->type
= LPFC_DRQ
;
16720 drq
->assoc_qid
= cq
->queue_id
;
16721 drq
->subtype
= subtype
;
16722 drq
->host_index
= 0;
16723 drq
->hba_index
= 0;
16724 drq
->notify_interval
= LPFC_RQ_NOTIFY_INTRVL
;
16726 list_add_tail(&hrq
->list
, &cq
->child_list
);
16727 list_add_tail(&drq
->list
, &cq
->child_list
);
16730 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
16731 /* The IOCTL status is embedded in the mailbox subheader. */
16732 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16733 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16734 if (shdr_status
|| shdr_add_status
|| rc
) {
16735 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16736 "3120 RQ_CREATE mailbox failed with "
16737 "status x%x add_status x%x, mbx status x%x\n",
16738 shdr_status
, shdr_add_status
, rc
);
16742 rc
= bf_get(lpfc_mbx_rq_create_q_id
, &rq_create
->u
.response
);
16743 if (rc
== 0xFFFF) {
16748 /* Initialize all RQs with associated queue id */
16749 for (idx
= 0; idx
< numrq
; idx
++) {
16751 hrq
->queue_id
= rc
+ (2 * idx
);
16753 drq
->queue_id
= rc
+ (2 * idx
) + 1;
16757 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
16762 * lpfc_eq_destroy - Destroy an event Queue on the HBA
16763 * @phba: HBA structure that indicates port to destroy a queue on.
16764 * @eq: The queue structure associated with the queue to destroy.
16766 * This function destroys a queue, as detailed in @eq by sending an mailbox
16767 * command, specific to the type of queue, to the HBA.
16769 * The @eq struct is used to get the queue ID of the queue to destroy.
16771 * On success this function will return a zero. If the queue destroy mailbox
16772 * command fails this function will return -ENXIO.
16775 lpfc_eq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
)
16777 LPFC_MBOXQ_t
*mbox
;
16778 int rc
, length
, status
= 0;
16779 uint32_t shdr_status
, shdr_add_status
;
16780 union lpfc_sli4_cfg_shdr
*shdr
;
16782 /* sanity check on queue memory */
16786 mbox
= mempool_alloc(eq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
16789 length
= (sizeof(struct lpfc_mbx_eq_destroy
) -
16790 sizeof(struct lpfc_sli4_cfg_mhdr
));
16791 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
16792 LPFC_MBOX_OPCODE_EQ_DESTROY
,
16793 length
, LPFC_SLI4_MBX_EMBED
);
16794 bf_set(lpfc_mbx_eq_destroy_q_id
, &mbox
->u
.mqe
.un
.eq_destroy
.u
.request
,
16796 mbox
->vport
= eq
->phba
->pport
;
16797 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
16799 rc
= lpfc_sli_issue_mbox(eq
->phba
, mbox
, MBX_POLL
);
16800 /* The IOCTL status is embedded in the mailbox subheader. */
16801 shdr
= (union lpfc_sli4_cfg_shdr
*)
16802 &mbox
->u
.mqe
.un
.eq_destroy
.header
.cfg_shdr
;
16803 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16804 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16805 if (shdr_status
|| shdr_add_status
|| rc
) {
16806 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16807 "2505 EQ_DESTROY mailbox failed with "
16808 "status x%x add_status x%x, mbx status x%x\n",
16809 shdr_status
, shdr_add_status
, rc
);
16813 /* Remove eq from any list */
16814 list_del_init(&eq
->list
);
16815 mempool_free(mbox
, eq
->phba
->mbox_mem_pool
);
16820 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
16821 * @phba: HBA structure that indicates port to destroy a queue on.
16822 * @cq: The queue structure associated with the queue to destroy.
16824 * This function destroys a queue, as detailed in @cq by sending an mailbox
16825 * command, specific to the type of queue, to the HBA.
16827 * The @cq struct is used to get the queue ID of the queue to destroy.
16829 * On success this function will return a zero. If the queue destroy mailbox
16830 * command fails this function will return -ENXIO.
16833 lpfc_cq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
)
16835 LPFC_MBOXQ_t
*mbox
;
16836 int rc
, length
, status
= 0;
16837 uint32_t shdr_status
, shdr_add_status
;
16838 union lpfc_sli4_cfg_shdr
*shdr
;
16840 /* sanity check on queue memory */
16843 mbox
= mempool_alloc(cq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
16846 length
= (sizeof(struct lpfc_mbx_cq_destroy
) -
16847 sizeof(struct lpfc_sli4_cfg_mhdr
));
16848 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
16849 LPFC_MBOX_OPCODE_CQ_DESTROY
,
16850 length
, LPFC_SLI4_MBX_EMBED
);
16851 bf_set(lpfc_mbx_cq_destroy_q_id
, &mbox
->u
.mqe
.un
.cq_destroy
.u
.request
,
16853 mbox
->vport
= cq
->phba
->pport
;
16854 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
16855 rc
= lpfc_sli_issue_mbox(cq
->phba
, mbox
, MBX_POLL
);
16856 /* The IOCTL status is embedded in the mailbox subheader. */
16857 shdr
= (union lpfc_sli4_cfg_shdr
*)
16858 &mbox
->u
.mqe
.un
.wq_create
.header
.cfg_shdr
;
16859 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16860 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16861 if (shdr_status
|| shdr_add_status
|| rc
) {
16862 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16863 "2506 CQ_DESTROY mailbox failed with "
16864 "status x%x add_status x%x, mbx status x%x\n",
16865 shdr_status
, shdr_add_status
, rc
);
16868 /* Remove cq from any list */
16869 list_del_init(&cq
->list
);
16870 mempool_free(mbox
, cq
->phba
->mbox_mem_pool
);
16875 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
16876 * @phba: HBA structure that indicates port to destroy a queue on.
16877 * @mq: The queue structure associated with the queue to destroy.
16879 * This function destroys a queue, as detailed in @mq by sending an mailbox
16880 * command, specific to the type of queue, to the HBA.
16882 * The @mq struct is used to get the queue ID of the queue to destroy.
16884 * On success this function will return a zero. If the queue destroy mailbox
16885 * command fails this function will return -ENXIO.
16888 lpfc_mq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
)
16890 LPFC_MBOXQ_t
*mbox
;
16891 int rc
, length
, status
= 0;
16892 uint32_t shdr_status
, shdr_add_status
;
16893 union lpfc_sli4_cfg_shdr
*shdr
;
16895 /* sanity check on queue memory */
16898 mbox
= mempool_alloc(mq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
16901 length
= (sizeof(struct lpfc_mbx_mq_destroy
) -
16902 sizeof(struct lpfc_sli4_cfg_mhdr
));
16903 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
16904 LPFC_MBOX_OPCODE_MQ_DESTROY
,
16905 length
, LPFC_SLI4_MBX_EMBED
);
16906 bf_set(lpfc_mbx_mq_destroy_q_id
, &mbox
->u
.mqe
.un
.mq_destroy
.u
.request
,
16908 mbox
->vport
= mq
->phba
->pport
;
16909 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
16910 rc
= lpfc_sli_issue_mbox(mq
->phba
, mbox
, MBX_POLL
);
16911 /* The IOCTL status is embedded in the mailbox subheader. */
16912 shdr
= (union lpfc_sli4_cfg_shdr
*)
16913 &mbox
->u
.mqe
.un
.mq_destroy
.header
.cfg_shdr
;
16914 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16915 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16916 if (shdr_status
|| shdr_add_status
|| rc
) {
16917 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16918 "2507 MQ_DESTROY mailbox failed with "
16919 "status x%x add_status x%x, mbx status x%x\n",
16920 shdr_status
, shdr_add_status
, rc
);
16923 /* Remove mq from any list */
16924 list_del_init(&mq
->list
);
16925 mempool_free(mbox
, mq
->phba
->mbox_mem_pool
);
16930 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
16931 * @phba: HBA structure that indicates port to destroy a queue on.
16932 * @wq: The queue structure associated with the queue to destroy.
16934 * This function destroys a queue, as detailed in @wq by sending an mailbox
16935 * command, specific to the type of queue, to the HBA.
16937 * The @wq struct is used to get the queue ID of the queue to destroy.
16939 * On success this function will return a zero. If the queue destroy mailbox
16940 * command fails this function will return -ENXIO.
16943 lpfc_wq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*wq
)
16945 LPFC_MBOXQ_t
*mbox
;
16946 int rc
, length
, status
= 0;
16947 uint32_t shdr_status
, shdr_add_status
;
16948 union lpfc_sli4_cfg_shdr
*shdr
;
16950 /* sanity check on queue memory */
16953 mbox
= mempool_alloc(wq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
16956 length
= (sizeof(struct lpfc_mbx_wq_destroy
) -
16957 sizeof(struct lpfc_sli4_cfg_mhdr
));
16958 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
16959 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY
,
16960 length
, LPFC_SLI4_MBX_EMBED
);
16961 bf_set(lpfc_mbx_wq_destroy_q_id
, &mbox
->u
.mqe
.un
.wq_destroy
.u
.request
,
16963 mbox
->vport
= wq
->phba
->pport
;
16964 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
16965 rc
= lpfc_sli_issue_mbox(wq
->phba
, mbox
, MBX_POLL
);
16966 shdr
= (union lpfc_sli4_cfg_shdr
*)
16967 &mbox
->u
.mqe
.un
.wq_destroy
.header
.cfg_shdr
;
16968 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16969 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16970 if (shdr_status
|| shdr_add_status
|| rc
) {
16971 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
16972 "2508 WQ_DESTROY mailbox failed with "
16973 "status x%x add_status x%x, mbx status x%x\n",
16974 shdr_status
, shdr_add_status
, rc
);
16977 /* Remove wq from any list */
16978 list_del_init(&wq
->list
);
16981 mempool_free(mbox
, wq
->phba
->mbox_mem_pool
);
16986 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
16987 * @phba: HBA structure that indicates port to destroy a queue on.
16988 * @hrq: The queue structure associated with the queue to destroy.
16989 * @drq: The queue structure associated with the queue to destroy.
16991 * This function destroys a queue, as detailed in @rq by sending an mailbox
16992 * command, specific to the type of queue, to the HBA.
16994 * The @rq struct is used to get the queue ID of the queue to destroy.
16996 * On success this function will return a zero. If the queue destroy mailbox
16997 * command fails this function will return -ENXIO.
17000 lpfc_rq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*hrq
,
17001 struct lpfc_queue
*drq
)
17003 LPFC_MBOXQ_t
*mbox
;
17004 int rc
, length
, status
= 0;
17005 uint32_t shdr_status
, shdr_add_status
;
17006 union lpfc_sli4_cfg_shdr
*shdr
;
17008 /* sanity check on queue memory */
17011 mbox
= mempool_alloc(hrq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
17014 length
= (sizeof(struct lpfc_mbx_rq_destroy
) -
17015 sizeof(struct lpfc_sli4_cfg_mhdr
));
17016 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
17017 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY
,
17018 length
, LPFC_SLI4_MBX_EMBED
);
17019 bf_set(lpfc_mbx_rq_destroy_q_id
, &mbox
->u
.mqe
.un
.rq_destroy
.u
.request
,
17021 mbox
->vport
= hrq
->phba
->pport
;
17022 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
17023 rc
= lpfc_sli_issue_mbox(hrq
->phba
, mbox
, MBX_POLL
);
17024 /* The IOCTL status is embedded in the mailbox subheader. */
17025 shdr
= (union lpfc_sli4_cfg_shdr
*)
17026 &mbox
->u
.mqe
.un
.rq_destroy
.header
.cfg_shdr
;
17027 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
17028 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
17029 if (shdr_status
|| shdr_add_status
|| rc
) {
17030 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
17031 "2509 RQ_DESTROY mailbox failed with "
17032 "status x%x add_status x%x, mbx status x%x\n",
17033 shdr_status
, shdr_add_status
, rc
);
17034 if (rc
!= MBX_TIMEOUT
)
17035 mempool_free(mbox
, hrq
->phba
->mbox_mem_pool
);
17038 bf_set(lpfc_mbx_rq_destroy_q_id
, &mbox
->u
.mqe
.un
.rq_destroy
.u
.request
,
17040 rc
= lpfc_sli_issue_mbox(drq
->phba
, mbox
, MBX_POLL
);
17041 shdr
= (union lpfc_sli4_cfg_shdr
*)
17042 &mbox
->u
.mqe
.un
.rq_destroy
.header
.cfg_shdr
;
17043 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
17044 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
17045 if (shdr_status
|| shdr_add_status
|| rc
) {
17046 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
17047 "2510 RQ_DESTROY mailbox failed with "
17048 "status x%x add_status x%x, mbx status x%x\n",
17049 shdr_status
, shdr_add_status
, rc
);
17052 list_del_init(&hrq
->list
);
17053 list_del_init(&drq
->list
);
17054 mempool_free(mbox
, hrq
->phba
->mbox_mem_pool
);
17059 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17060 * @phba: The virtual port for which this call being executed.
17061 * @pdma_phys_addr0: Physical address of the 1st SGL page.
17062 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17063 * @xritag: the xritag that ties this io to the SGL pages.
17065 * This routine will post the sgl pages for the IO that has the xritag
17066 * that is in the iocbq structure. The xritag is assigned during iocbq
17067 * creation and persists for as long as the driver is loaded.
17068 * if the caller has fewer than 256 scatter gather segments to map then
17069 * pdma_phys_addr1 should be 0.
17070 * If the caller needs to map more than 256 scatter gather segment then
17071 * pdma_phys_addr1 should be a valid physical address.
17072 * physical address for SGLs must be 64 byte aligned.
17073 * If you are going to map 2 SGL's then the first one must have 256 entries
17074 * the second sgl can have between 1 and 256 entries.
17078 * -ENXIO, -ENOMEM - Failure
17081 lpfc_sli4_post_sgl(struct lpfc_hba
*phba
,
17082 dma_addr_t pdma_phys_addr0
,
17083 dma_addr_t pdma_phys_addr1
,
17086 struct lpfc_mbx_post_sgl_pages
*post_sgl_pages
;
17087 LPFC_MBOXQ_t
*mbox
;
17089 uint32_t shdr_status
, shdr_add_status
;
17091 union lpfc_sli4_cfg_shdr
*shdr
;
17093 if (xritag
== NO_XRI
) {
17094 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
17095 "0364 Invalid param:\n");
17099 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
17103 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
17104 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
,
17105 sizeof(struct lpfc_mbx_post_sgl_pages
) -
17106 sizeof(struct lpfc_sli4_cfg_mhdr
), LPFC_SLI4_MBX_EMBED
);
17108 post_sgl_pages
= (struct lpfc_mbx_post_sgl_pages
*)
17109 &mbox
->u
.mqe
.un
.post_sgl_pages
;
17110 bf_set(lpfc_post_sgl_pages_xri
, post_sgl_pages
, xritag
);
17111 bf_set(lpfc_post_sgl_pages_xricnt
, post_sgl_pages
, 1);
17113 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg0_addr_lo
=
17114 cpu_to_le32(putPaddrLow(pdma_phys_addr0
));
17115 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg0_addr_hi
=
17116 cpu_to_le32(putPaddrHigh(pdma_phys_addr0
));
17118 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg1_addr_lo
=
17119 cpu_to_le32(putPaddrLow(pdma_phys_addr1
));
17120 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg1_addr_hi
=
17121 cpu_to_le32(putPaddrHigh(pdma_phys_addr1
));
17122 if (!phba
->sli4_hba
.intr_enable
)
17123 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
17125 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
17126 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
17128 /* The IOCTL status is embedded in the mailbox subheader. */
17129 shdr
= (union lpfc_sli4_cfg_shdr
*) &post_sgl_pages
->header
.cfg_shdr
;
17130 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
17131 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
17132 if (rc
!= MBX_TIMEOUT
)
17133 mempool_free(mbox
, phba
->mbox_mem_pool
);
17134 if (shdr_status
|| shdr_add_status
|| rc
) {
17135 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
17136 "2511 POST_SGL mailbox failed with "
17137 "status x%x add_status x%x, mbx status x%x\n",
17138 shdr_status
, shdr_add_status
, rc
);
17144 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
17145 * @phba: pointer to lpfc hba data structure.
17147 * This routine is invoked to post rpi header templates to the
17148 * HBA consistent with the SLI-4 interface spec. This routine
17149 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17150 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17153 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17154 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
17157 lpfc_sli4_alloc_xri(struct lpfc_hba
*phba
)
17162 * Fetch the next logical xri. Because this index is logical,
17163 * the driver starts at 0 each time.
17165 spin_lock_irq(&phba
->hbalock
);
17166 xri
= find_next_zero_bit(phba
->sli4_hba
.xri_bmask
,
17167 phba
->sli4_hba
.max_cfg_param
.max_xri
, 0);
17168 if (xri
>= phba
->sli4_hba
.max_cfg_param
.max_xri
) {
17169 spin_unlock_irq(&phba
->hbalock
);
17172 set_bit(xri
, phba
->sli4_hba
.xri_bmask
);
17173 phba
->sli4_hba
.max_cfg_param
.xri_used
++;
17175 spin_unlock_irq(&phba
->hbalock
);
17180 * lpfc_sli4_free_xri - Release an xri for reuse.
17181 * @phba: pointer to lpfc hba data structure.
17182 * @xri: xri to release.
17184 * This routine is invoked to release an xri to the pool of
17185 * available rpis maintained by the driver.
17188 __lpfc_sli4_free_xri(struct lpfc_hba
*phba
, int xri
)
17190 if (test_and_clear_bit(xri
, phba
->sli4_hba
.xri_bmask
)) {
17191 phba
->sli4_hba
.max_cfg_param
.xri_used
--;
17196 * lpfc_sli4_free_xri - Release an xri for reuse.
17197 * @phba: pointer to lpfc hba data structure.
17198 * @xri: xri to release.
17200 * This routine is invoked to release an xri to the pool of
17201 * available rpis maintained by the driver.
17204 lpfc_sli4_free_xri(struct lpfc_hba
*phba
, int xri
)
17206 spin_lock_irq(&phba
->hbalock
);
17207 __lpfc_sli4_free_xri(phba
, xri
);
17208 spin_unlock_irq(&phba
->hbalock
);
17212 * lpfc_sli4_next_xritag - Get an xritag for the io
17213 * @phba: Pointer to HBA context object.
17215 * This function gets an xritag for the iocb. If there is no unused xritag
17216 * it will return 0xffff.
17217 * The function returns the allocated xritag if successful, else returns zero.
17218 * Zero is not a valid xritag.
17219 * The caller is not required to hold any lock.
17222 lpfc_sli4_next_xritag(struct lpfc_hba
*phba
)
17224 uint16_t xri_index
;
17226 xri_index
= lpfc_sli4_alloc_xri(phba
);
17227 if (xri_index
== NO_XRI
)
17228 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
17229 "2004 Failed to allocate XRI.last XRITAG is %d"
17230 " Max XRI is %d, Used XRI is %d\n",
17232 phba
->sli4_hba
.max_cfg_param
.max_xri
,
17233 phba
->sli4_hba
.max_cfg_param
.xri_used
);
17238 * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
17239 * @phba: pointer to lpfc hba data structure.
17240 * @post_sgl_list: pointer to els sgl entry list.
17241 * @post_cnt: number of els sgl entries on the list.
17243 * This routine is invoked to post a block of driver's sgl pages to the
17244 * HBA using non-embedded mailbox command. No Lock is held. This routine
17245 * is only called when the driver is loading and after all IO has been
17249 lpfc_sli4_post_sgl_list(struct lpfc_hba
*phba
,
17250 struct list_head
*post_sgl_list
,
17253 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_next
= NULL
;
17254 struct lpfc_mbx_post_uembed_sgl_page1
*sgl
;
17255 struct sgl_page_pairs
*sgl_pg_pairs
;
17257 LPFC_MBOXQ_t
*mbox
;
17258 uint32_t reqlen
, alloclen
, pg_pairs
;
17260 uint16_t xritag_start
= 0;
17262 uint32_t shdr_status
, shdr_add_status
;
17263 union lpfc_sli4_cfg_shdr
*shdr
;
17265 reqlen
= post_cnt
* sizeof(struct sgl_page_pairs
) +
17266 sizeof(union lpfc_sli4_cfg_shdr
) + sizeof(uint32_t);
17267 if (reqlen
> SLI4_PAGE_SIZE
) {
17268 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
17269 "2559 Block sgl registration required DMA "
17270 "size (%d) great than a page\n", reqlen
);
17274 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
17278 /* Allocate DMA memory and set up the non-embedded mailbox command */
17279 alloclen
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
17280 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
, reqlen
,
17281 LPFC_SLI4_MBX_NEMBED
);
17283 if (alloclen
< reqlen
) {
17284 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
17285 "0285 Allocated DMA memory size (%d) is "
17286 "less than the requested DMA memory "
17287 "size (%d)\n", alloclen
, reqlen
);
17288 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
17291 /* Set up the SGL pages in the non-embedded DMA pages */
17292 viraddr
= mbox
->sge_array
->addr
[0];
17293 sgl
= (struct lpfc_mbx_post_uembed_sgl_page1
*)viraddr
;
17294 sgl_pg_pairs
= &sgl
->sgl_pg_pairs
;
17297 list_for_each_entry_safe(sglq_entry
, sglq_next
, post_sgl_list
, list
) {
17298 /* Set up the sge entry */
17299 sgl_pg_pairs
->sgl_pg0_addr_lo
=
17300 cpu_to_le32(putPaddrLow(sglq_entry
->phys
));
17301 sgl_pg_pairs
->sgl_pg0_addr_hi
=
17302 cpu_to_le32(putPaddrHigh(sglq_entry
->phys
));
17303 sgl_pg_pairs
->sgl_pg1_addr_lo
=
17304 cpu_to_le32(putPaddrLow(0));
17305 sgl_pg_pairs
->sgl_pg1_addr_hi
=
17306 cpu_to_le32(putPaddrHigh(0));
17308 /* Keep the first xritag on the list */
17310 xritag_start
= sglq_entry
->sli4_xritag
;
17315 /* Complete initialization and perform endian conversion. */
17316 bf_set(lpfc_post_sgl_pages_xri
, sgl
, xritag_start
);
17317 bf_set(lpfc_post_sgl_pages_xricnt
, sgl
, post_cnt
);
17318 sgl
->word0
= cpu_to_le32(sgl
->word0
);
17320 if (!phba
->sli4_hba
.intr_enable
)
17321 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
17323 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
17324 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
17326 shdr
= (union lpfc_sli4_cfg_shdr
*) &sgl
->cfg_shdr
;
17327 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
17328 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
17329 if (rc
!= MBX_TIMEOUT
)
17330 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
17331 if (shdr_status
|| shdr_add_status
|| rc
) {
17332 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
17333 "2513 POST_SGL_BLOCK mailbox command failed "
17334 "status x%x add_status x%x mbx status x%x\n",
17335 shdr_status
, shdr_add_status
, rc
);
17342 * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
17343 * @phba: pointer to lpfc hba data structure.
17344 * @nblist: pointer to nvme buffer list.
17345 * @count: number of scsi buffers on the list.
17347 * This routine is invoked to post a block of @count scsi sgl pages from a
17348 * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
17353 lpfc_sli4_post_io_sgl_block(struct lpfc_hba
*phba
, struct list_head
*nblist
,
17356 struct lpfc_io_buf
*lpfc_ncmd
;
17357 struct lpfc_mbx_post_uembed_sgl_page1
*sgl
;
17358 struct sgl_page_pairs
*sgl_pg_pairs
;
17360 LPFC_MBOXQ_t
*mbox
;
17361 uint32_t reqlen
, alloclen
, pg_pairs
;
17363 uint16_t xritag_start
= 0;
17365 uint32_t shdr_status
, shdr_add_status
;
17366 dma_addr_t pdma_phys_bpl1
;
17367 union lpfc_sli4_cfg_shdr
*shdr
;
17369 /* Calculate the requested length of the dma memory */
17370 reqlen
= count
* sizeof(struct sgl_page_pairs
) +
17371 sizeof(union lpfc_sli4_cfg_shdr
) + sizeof(uint32_t);
17372 if (reqlen
> SLI4_PAGE_SIZE
) {
17373 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
17374 "6118 Block sgl registration required DMA "
17375 "size (%d) great than a page\n", reqlen
);
17378 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
17380 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
17381 "6119 Failed to allocate mbox cmd memory\n");
17385 /* Allocate DMA memory and set up the non-embedded mailbox command */
17386 alloclen
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
17387 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
,
17388 reqlen
, LPFC_SLI4_MBX_NEMBED
);
17390 if (alloclen
< reqlen
) {
17391 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
17392 "6120 Allocated DMA memory size (%d) is "
17393 "less than the requested DMA memory "
17394 "size (%d)\n", alloclen
, reqlen
);
17395 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
17399 /* Get the first SGE entry from the non-embedded DMA memory */
17400 viraddr
= mbox
->sge_array
->addr
[0];
17402 /* Set up the SGL pages in the non-embedded DMA pages */
17403 sgl
= (struct lpfc_mbx_post_uembed_sgl_page1
*)viraddr
;
17404 sgl_pg_pairs
= &sgl
->sgl_pg_pairs
;
17407 list_for_each_entry(lpfc_ncmd
, nblist
, list
) {
17408 /* Set up the sge entry */
17409 sgl_pg_pairs
->sgl_pg0_addr_lo
=
17410 cpu_to_le32(putPaddrLow(lpfc_ncmd
->dma_phys_sgl
));
17411 sgl_pg_pairs
->sgl_pg0_addr_hi
=
17412 cpu_to_le32(putPaddrHigh(lpfc_ncmd
->dma_phys_sgl
));
17413 if (phba
->cfg_sg_dma_buf_size
> SGL_PAGE_SIZE
)
17414 pdma_phys_bpl1
= lpfc_ncmd
->dma_phys_sgl
+
17417 pdma_phys_bpl1
= 0;
17418 sgl_pg_pairs
->sgl_pg1_addr_lo
=
17419 cpu_to_le32(putPaddrLow(pdma_phys_bpl1
));
17420 sgl_pg_pairs
->sgl_pg1_addr_hi
=
17421 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1
));
17422 /* Keep the first xritag on the list */
17424 xritag_start
= lpfc_ncmd
->cur_iocbq
.sli4_xritag
;
17428 bf_set(lpfc_post_sgl_pages_xri
, sgl
, xritag_start
);
17429 bf_set(lpfc_post_sgl_pages_xricnt
, sgl
, pg_pairs
);
17430 /* Perform endian conversion if necessary */
17431 sgl
->word0
= cpu_to_le32(sgl
->word0
);
17433 if (!phba
->sli4_hba
.intr_enable
) {
17434 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
17436 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
17437 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
17439 shdr
= (union lpfc_sli4_cfg_shdr
*)&sgl
->cfg_shdr
;
17440 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
17441 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
17442 if (rc
!= MBX_TIMEOUT
)
17443 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
17444 if (shdr_status
|| shdr_add_status
|| rc
) {
17445 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
17446 "6125 POST_SGL_BLOCK mailbox command failed "
17447 "status x%x add_status x%x mbx status x%x\n",
17448 shdr_status
, shdr_add_status
, rc
);
17455 * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
17456 * @phba: pointer to lpfc hba data structure.
17457 * @post_nblist: pointer to the nvme buffer list.
17458 * @sb_count: number of nvme buffers.
17460 * This routine walks a list of nvme buffers that was passed in. It attempts
17461 * to construct blocks of nvme buffer sgls which contains contiguous xris and
17462 * uses the non-embedded SGL block post mailbox commands to post to the port.
17463 * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
17464 * embedded SGL post mailbox command for posting. The @post_nblist passed in
17465 * must be local list, thus no lock is needed when manipulate the list.
17467 * Returns: 0 = failure, non-zero number of successfully posted buffers.
17470 lpfc_sli4_post_io_sgl_list(struct lpfc_hba
*phba
,
17471 struct list_head
*post_nblist
, int sb_count
)
17473 struct lpfc_io_buf
*lpfc_ncmd
, *lpfc_ncmd_next
;
17474 int status
, sgl_size
;
17475 int post_cnt
= 0, block_cnt
= 0, num_posting
= 0, num_posted
= 0;
17476 dma_addr_t pdma_phys_sgl1
;
17477 int last_xritag
= NO_XRI
;
17479 LIST_HEAD(prep_nblist
);
17480 LIST_HEAD(blck_nblist
);
17481 LIST_HEAD(nvme_nblist
);
17487 sgl_size
= phba
->cfg_sg_dma_buf_size
;
17488 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
, post_nblist
, list
) {
17489 list_del_init(&lpfc_ncmd
->list
);
17491 if ((last_xritag
!= NO_XRI
) &&
17492 (lpfc_ncmd
->cur_iocbq
.sli4_xritag
!= last_xritag
+ 1)) {
17493 /* a hole in xri block, form a sgl posting block */
17494 list_splice_init(&prep_nblist
, &blck_nblist
);
17495 post_cnt
= block_cnt
- 1;
17496 /* prepare list for next posting block */
17497 list_add_tail(&lpfc_ncmd
->list
, &prep_nblist
);
17500 /* prepare list for next posting block */
17501 list_add_tail(&lpfc_ncmd
->list
, &prep_nblist
);
17502 /* enough sgls for non-embed sgl mbox command */
17503 if (block_cnt
== LPFC_NEMBED_MBOX_SGL_CNT
) {
17504 list_splice_init(&prep_nblist
, &blck_nblist
);
17505 post_cnt
= block_cnt
;
17510 last_xritag
= lpfc_ncmd
->cur_iocbq
.sli4_xritag
;
17512 /* end of repost sgl list condition for NVME buffers */
17513 if (num_posting
== sb_count
) {
17514 if (post_cnt
== 0) {
17515 /* last sgl posting block */
17516 list_splice_init(&prep_nblist
, &blck_nblist
);
17517 post_cnt
= block_cnt
;
17518 } else if (block_cnt
== 1) {
17519 /* last single sgl with non-contiguous xri */
17520 if (sgl_size
> SGL_PAGE_SIZE
)
17522 lpfc_ncmd
->dma_phys_sgl
+
17525 pdma_phys_sgl1
= 0;
17526 cur_xritag
= lpfc_ncmd
->cur_iocbq
.sli4_xritag
;
17527 status
= lpfc_sli4_post_sgl(
17528 phba
, lpfc_ncmd
->dma_phys_sgl
,
17529 pdma_phys_sgl1
, cur_xritag
);
17531 /* Post error. Buffer unavailable. */
17532 lpfc_ncmd
->flags
|=
17533 LPFC_SBUF_NOT_POSTED
;
17535 /* Post success. Bffer available. */
17536 lpfc_ncmd
->flags
&=
17537 ~LPFC_SBUF_NOT_POSTED
;
17538 lpfc_ncmd
->status
= IOSTAT_SUCCESS
;
17541 /* success, put on NVME buffer sgl list */
17542 list_add_tail(&lpfc_ncmd
->list
, &nvme_nblist
);
17546 /* continue until a nembed page worth of sgls */
17550 /* post block of NVME buffer list sgls */
17551 status
= lpfc_sli4_post_io_sgl_block(phba
, &blck_nblist
,
17554 /* don't reset xirtag due to hole in xri block */
17555 if (block_cnt
== 0)
17556 last_xritag
= NO_XRI
;
17558 /* reset NVME buffer post count for next round of posting */
17561 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
17562 while (!list_empty(&blck_nblist
)) {
17563 list_remove_head(&blck_nblist
, lpfc_ncmd
,
17564 struct lpfc_io_buf
, list
);
17566 /* Post error. Mark buffer unavailable. */
17567 lpfc_ncmd
->flags
|= LPFC_SBUF_NOT_POSTED
;
17569 /* Post success, Mark buffer available. */
17570 lpfc_ncmd
->flags
&= ~LPFC_SBUF_NOT_POSTED
;
17571 lpfc_ncmd
->status
= IOSTAT_SUCCESS
;
17574 list_add_tail(&lpfc_ncmd
->list
, &nvme_nblist
);
17577 /* Push NVME buffers with sgl posted to the available list */
17578 lpfc_io_buf_replenish(phba
, &nvme_nblist
);
17584 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
17585 * @phba: pointer to lpfc_hba struct that the frame was received on
17586 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17588 * This function checks the fields in the @fc_hdr to see if the FC frame is a
17589 * valid type of frame that the LPFC driver will handle. This function will
17590 * return a zero if the frame is a valid frame or a non zero value when the
17591 * frame does not pass the check.
17594 lpfc_fc_frame_check(struct lpfc_hba
*phba
, struct fc_frame_header
*fc_hdr
)
17596 /* make rctl_names static to save stack space */
17597 struct fc_vft_header
*fc_vft_hdr
;
17598 uint32_t *header
= (uint32_t *) fc_hdr
;
17600 #define FC_RCTL_MDS_DIAGS 0xF4
17602 switch (fc_hdr
->fh_r_ctl
) {
17603 case FC_RCTL_DD_UNCAT
: /* uncategorized information */
17604 case FC_RCTL_DD_SOL_DATA
: /* solicited data */
17605 case FC_RCTL_DD_UNSOL_CTL
: /* unsolicited control */
17606 case FC_RCTL_DD_SOL_CTL
: /* solicited control or reply */
17607 case FC_RCTL_DD_UNSOL_DATA
: /* unsolicited data */
17608 case FC_RCTL_DD_DATA_DESC
: /* data descriptor */
17609 case FC_RCTL_DD_UNSOL_CMD
: /* unsolicited command */
17610 case FC_RCTL_DD_CMD_STATUS
: /* command status */
17611 case FC_RCTL_ELS_REQ
: /* extended link services request */
17612 case FC_RCTL_ELS_REP
: /* extended link services reply */
17613 case FC_RCTL_ELS4_REQ
: /* FC-4 ELS request */
17614 case FC_RCTL_ELS4_REP
: /* FC-4 ELS reply */
17615 case FC_RCTL_BA_NOP
: /* basic link service NOP */
17616 case FC_RCTL_BA_ABTS
: /* basic link service abort */
17617 case FC_RCTL_BA_RMC
: /* remove connection */
17618 case FC_RCTL_BA_ACC
: /* basic accept */
17619 case FC_RCTL_BA_RJT
: /* basic reject */
17620 case FC_RCTL_BA_PRMT
:
17621 case FC_RCTL_ACK_1
: /* acknowledge_1 */
17622 case FC_RCTL_ACK_0
: /* acknowledge_0 */
17623 case FC_RCTL_P_RJT
: /* port reject */
17624 case FC_RCTL_F_RJT
: /* fabric reject */
17625 case FC_RCTL_P_BSY
: /* port busy */
17626 case FC_RCTL_F_BSY
: /* fabric busy to data frame */
17627 case FC_RCTL_F_BSYL
: /* fabric busy to link control frame */
17628 case FC_RCTL_LCR
: /* link credit reset */
17629 case FC_RCTL_MDS_DIAGS
: /* MDS Diagnostics */
17630 case FC_RCTL_END
: /* end */
17632 case FC_RCTL_VFTH
: /* Virtual Fabric tagging Header */
17633 fc_vft_hdr
= (struct fc_vft_header
*)fc_hdr
;
17634 fc_hdr
= &((struct fc_frame_header
*)fc_vft_hdr
)[1];
17635 return lpfc_fc_frame_check(phba
, fc_hdr
);
17640 switch (fc_hdr
->fh_type
) {
17653 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
17654 "2538 Received frame rctl:x%x, type:x%x, "
17655 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
17656 fc_hdr
->fh_r_ctl
, fc_hdr
->fh_type
,
17657 be32_to_cpu(header
[0]), be32_to_cpu(header
[1]),
17658 be32_to_cpu(header
[2]), be32_to_cpu(header
[3]),
17659 be32_to_cpu(header
[4]), be32_to_cpu(header
[5]),
17660 be32_to_cpu(header
[6]));
17663 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
,
17664 "2539 Dropped frame rctl:x%x type:x%x\n",
17665 fc_hdr
->fh_r_ctl
, fc_hdr
->fh_type
);
17670 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
17671 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17673 * This function processes the FC header to retrieve the VFI from the VF
17674 * header, if one exists. This function will return the VFI if one exists
17675 * or 0 if no VSAN Header exists.
17678 lpfc_fc_hdr_get_vfi(struct fc_frame_header
*fc_hdr
)
17680 struct fc_vft_header
*fc_vft_hdr
= (struct fc_vft_header
*)fc_hdr
;
17682 if (fc_hdr
->fh_r_ctl
!= FC_RCTL_VFTH
)
17684 return bf_get(fc_vft_hdr_vf_id
, fc_vft_hdr
);
17688 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
17689 * @phba: Pointer to the HBA structure to search for the vport on
17690 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
17691 * @fcfi: The FC Fabric ID that the frame came from
17692 * @did: Destination ID to match against
17694 * This function searches the @phba for a vport that matches the content of the
17695 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
17696 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
17697 * returns the matching vport pointer or NULL if unable to match frame to a
17700 static struct lpfc_vport
*
17701 lpfc_fc_frame_to_vport(struct lpfc_hba
*phba
, struct fc_frame_header
*fc_hdr
,
17702 uint16_t fcfi
, uint32_t did
)
17704 struct lpfc_vport
**vports
;
17705 struct lpfc_vport
*vport
= NULL
;
17708 if (did
== Fabric_DID
)
17709 return phba
->pport
;
17710 if ((phba
->pport
->fc_flag
& FC_PT2PT
) &&
17711 !(phba
->link_state
== LPFC_HBA_READY
))
17712 return phba
->pport
;
17714 vports
= lpfc_create_vport_work_array(phba
);
17715 if (vports
!= NULL
) {
17716 for (i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
17717 if (phba
->fcf
.fcfi
== fcfi
&&
17718 vports
[i
]->vfi
== lpfc_fc_hdr_get_vfi(fc_hdr
) &&
17719 vports
[i
]->fc_myDID
== did
) {
17725 lpfc_destroy_vport_work_array(phba
, vports
);
17730 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
17731 * @vport: The vport to work on.
17733 * This function updates the receive sequence time stamp for this vport. The
17734 * receive sequence time stamp indicates the time that the last frame of the
17735 * the sequence that has been idle for the longest amount of time was received.
17736 * the driver uses this time stamp to indicate if any received sequences have
17740 lpfc_update_rcv_time_stamp(struct lpfc_vport
*vport
)
17742 struct lpfc_dmabuf
*h_buf
;
17743 struct hbq_dmabuf
*dmabuf
= NULL
;
17745 /* get the oldest sequence on the rcv list */
17746 h_buf
= list_get_first(&vport
->rcv_buffer_list
,
17747 struct lpfc_dmabuf
, list
);
17750 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
17751 vport
->rcv_buffer_time_stamp
= dmabuf
->time_stamp
;
17755 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
17756 * @vport: The vport that the received sequences were sent to.
17758 * This function cleans up all outstanding received sequences. This is called
17759 * by the driver when a link event or user action invalidates all the received
17763 lpfc_cleanup_rcv_buffers(struct lpfc_vport
*vport
)
17765 struct lpfc_dmabuf
*h_buf
, *hnext
;
17766 struct lpfc_dmabuf
*d_buf
, *dnext
;
17767 struct hbq_dmabuf
*dmabuf
= NULL
;
17769 /* start with the oldest sequence on the rcv list */
17770 list_for_each_entry_safe(h_buf
, hnext
, &vport
->rcv_buffer_list
, list
) {
17771 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
17772 list_del_init(&dmabuf
->hbuf
.list
);
17773 list_for_each_entry_safe(d_buf
, dnext
,
17774 &dmabuf
->dbuf
.list
, list
) {
17775 list_del_init(&d_buf
->list
);
17776 lpfc_in_buf_free(vport
->phba
, d_buf
);
17778 lpfc_in_buf_free(vport
->phba
, &dmabuf
->dbuf
);
17783 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
17784 * @vport: The vport that the received sequences were sent to.
17786 * This function determines whether any received sequences have timed out by
17787 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
17788 * indicates that there is at least one timed out sequence this routine will
17789 * go through the received sequences one at a time from most inactive to most
17790 * active to determine which ones need to be cleaned up. Once it has determined
17791 * that a sequence needs to be cleaned up it will simply free up the resources
17792 * without sending an abort.
17795 lpfc_rcv_seq_check_edtov(struct lpfc_vport
*vport
)
17797 struct lpfc_dmabuf
*h_buf
, *hnext
;
17798 struct lpfc_dmabuf
*d_buf
, *dnext
;
17799 struct hbq_dmabuf
*dmabuf
= NULL
;
17800 unsigned long timeout
;
17801 int abort_count
= 0;
17803 timeout
= (msecs_to_jiffies(vport
->phba
->fc_edtov
) +
17804 vport
->rcv_buffer_time_stamp
);
17805 if (list_empty(&vport
->rcv_buffer_list
) ||
17806 time_before(jiffies
, timeout
))
17808 /* start with the oldest sequence on the rcv list */
17809 list_for_each_entry_safe(h_buf
, hnext
, &vport
->rcv_buffer_list
, list
) {
17810 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
17811 timeout
= (msecs_to_jiffies(vport
->phba
->fc_edtov
) +
17812 dmabuf
->time_stamp
);
17813 if (time_before(jiffies
, timeout
))
17816 list_del_init(&dmabuf
->hbuf
.list
);
17817 list_for_each_entry_safe(d_buf
, dnext
,
17818 &dmabuf
->dbuf
.list
, list
) {
17819 list_del_init(&d_buf
->list
);
17820 lpfc_in_buf_free(vport
->phba
, d_buf
);
17822 lpfc_in_buf_free(vport
->phba
, &dmabuf
->dbuf
);
17825 lpfc_update_rcv_time_stamp(vport
);
17829 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
17830 * @vport: pointer to a vitural port
17831 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
17833 * This function searches through the existing incomplete sequences that have
17834 * been sent to this @vport. If the frame matches one of the incomplete
17835 * sequences then the dbuf in the @dmabuf is added to the list of frames that
17836 * make up that sequence. If no sequence is found that matches this frame then
17837 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
17838 * This function returns a pointer to the first dmabuf in the sequence list that
17839 * the frame was linked to.
17841 static struct hbq_dmabuf
*
17842 lpfc_fc_frame_add(struct lpfc_vport
*vport
, struct hbq_dmabuf
*dmabuf
)
17844 struct fc_frame_header
*new_hdr
;
17845 struct fc_frame_header
*temp_hdr
;
17846 struct lpfc_dmabuf
*d_buf
;
17847 struct lpfc_dmabuf
*h_buf
;
17848 struct hbq_dmabuf
*seq_dmabuf
= NULL
;
17849 struct hbq_dmabuf
*temp_dmabuf
= NULL
;
17852 INIT_LIST_HEAD(&dmabuf
->dbuf
.list
);
17853 dmabuf
->time_stamp
= jiffies
;
17854 new_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
17856 /* Use the hdr_buf to find the sequence that this frame belongs to */
17857 list_for_each_entry(h_buf
, &vport
->rcv_buffer_list
, list
) {
17858 temp_hdr
= (struct fc_frame_header
*)h_buf
->virt
;
17859 if ((temp_hdr
->fh_seq_id
!= new_hdr
->fh_seq_id
) ||
17860 (temp_hdr
->fh_ox_id
!= new_hdr
->fh_ox_id
) ||
17861 (memcmp(&temp_hdr
->fh_s_id
, &new_hdr
->fh_s_id
, 3)))
17863 /* found a pending sequence that matches this frame */
17864 seq_dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
17869 * This indicates first frame received for this sequence.
17870 * Queue the buffer on the vport's rcv_buffer_list.
17872 list_add_tail(&dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
17873 lpfc_update_rcv_time_stamp(vport
);
17876 temp_hdr
= seq_dmabuf
->hbuf
.virt
;
17877 if (be16_to_cpu(new_hdr
->fh_seq_cnt
) <
17878 be16_to_cpu(temp_hdr
->fh_seq_cnt
)) {
17879 list_del_init(&seq_dmabuf
->hbuf
.list
);
17880 list_add_tail(&dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
17881 list_add_tail(&dmabuf
->dbuf
.list
, &seq_dmabuf
->dbuf
.list
);
17882 lpfc_update_rcv_time_stamp(vport
);
17885 /* move this sequence to the tail to indicate a young sequence */
17886 list_move_tail(&seq_dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
17887 seq_dmabuf
->time_stamp
= jiffies
;
17888 lpfc_update_rcv_time_stamp(vport
);
17889 if (list_empty(&seq_dmabuf
->dbuf
.list
)) {
17890 temp_hdr
= dmabuf
->hbuf
.virt
;
17891 list_add_tail(&dmabuf
->dbuf
.list
, &seq_dmabuf
->dbuf
.list
);
17894 /* find the correct place in the sequence to insert this frame */
17895 d_buf
= list_entry(seq_dmabuf
->dbuf
.list
.prev
, typeof(*d_buf
), list
);
17897 temp_dmabuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
17898 temp_hdr
= (struct fc_frame_header
*)temp_dmabuf
->hbuf
.virt
;
17900 * If the frame's sequence count is greater than the frame on
17901 * the list then insert the frame right after this frame
17903 if (be16_to_cpu(new_hdr
->fh_seq_cnt
) >
17904 be16_to_cpu(temp_hdr
->fh_seq_cnt
)) {
17905 list_add(&dmabuf
->dbuf
.list
, &temp_dmabuf
->dbuf
.list
);
17910 if (&d_buf
->list
== &seq_dmabuf
->dbuf
.list
)
17912 d_buf
= list_entry(d_buf
->list
.prev
, typeof(*d_buf
), list
);
17921 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
17922 * @vport: pointer to a vitural port
17923 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17925 * This function tries to abort from the partially assembed sequence, described
17926 * by the information from basic abbort @dmabuf. It checks to see whether such
17927 * partially assembled sequence held by the driver. If so, it shall free up all
17928 * the frames from the partially assembled sequence.
17931 * true -- if there is matching partially assembled sequence present and all
17932 * the frames freed with the sequence;
17933 * false -- if there is no matching partially assembled sequence present so
17934 * nothing got aborted in the lower layer driver
17937 lpfc_sli4_abort_partial_seq(struct lpfc_vport
*vport
,
17938 struct hbq_dmabuf
*dmabuf
)
17940 struct fc_frame_header
*new_hdr
;
17941 struct fc_frame_header
*temp_hdr
;
17942 struct lpfc_dmabuf
*d_buf
, *n_buf
, *h_buf
;
17943 struct hbq_dmabuf
*seq_dmabuf
= NULL
;
17945 /* Use the hdr_buf to find the sequence that matches this frame */
17946 INIT_LIST_HEAD(&dmabuf
->dbuf
.list
);
17947 INIT_LIST_HEAD(&dmabuf
->hbuf
.list
);
17948 new_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
17949 list_for_each_entry(h_buf
, &vport
->rcv_buffer_list
, list
) {
17950 temp_hdr
= (struct fc_frame_header
*)h_buf
->virt
;
17951 if ((temp_hdr
->fh_seq_id
!= new_hdr
->fh_seq_id
) ||
17952 (temp_hdr
->fh_ox_id
!= new_hdr
->fh_ox_id
) ||
17953 (memcmp(&temp_hdr
->fh_s_id
, &new_hdr
->fh_s_id
, 3)))
17955 /* found a pending sequence that matches this frame */
17956 seq_dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
17960 /* Free up all the frames from the partially assembled sequence */
17962 list_for_each_entry_safe(d_buf
, n_buf
,
17963 &seq_dmabuf
->dbuf
.list
, list
) {
17964 list_del_init(&d_buf
->list
);
17965 lpfc_in_buf_free(vport
->phba
, d_buf
);
17973 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
17974 * @vport: pointer to a vitural port
17975 * @dmabuf: pointer to a dmabuf that describes the FC sequence
17977 * This function tries to abort from the assembed sequence from upper level
17978 * protocol, described by the information from basic abbort @dmabuf. It
17979 * checks to see whether such pending context exists at upper level protocol.
17980 * If so, it shall clean up the pending context.
17983 * true -- if there is matching pending context of the sequence cleaned
17985 * false -- if there is no matching pending context of the sequence present
17989 lpfc_sli4_abort_ulp_seq(struct lpfc_vport
*vport
, struct hbq_dmabuf
*dmabuf
)
17991 struct lpfc_hba
*phba
= vport
->phba
;
17994 /* Accepting abort at ulp with SLI4 only */
17995 if (phba
->sli_rev
< LPFC_SLI_REV4
)
17998 /* Register all caring upper level protocols to attend abort */
17999 handled
= lpfc_ct_handle_unsol_abort(phba
, dmabuf
);
18007 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
18008 * @phba: Pointer to HBA context object.
18009 * @cmd_iocbq: pointer to the command iocbq structure.
18010 * @rsp_iocbq: pointer to the response iocbq structure.
18012 * This function handles the sequence abort response iocb command complete
18013 * event. It properly releases the memory allocated to the sequence abort
18017 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba
*phba
,
18018 struct lpfc_iocbq
*cmd_iocbq
,
18019 struct lpfc_iocbq
*rsp_iocbq
)
18021 struct lpfc_nodelist
*ndlp
;
18024 ndlp
= (struct lpfc_nodelist
*)cmd_iocbq
->context1
;
18025 lpfc_nlp_put(ndlp
);
18026 lpfc_nlp_not_used(ndlp
);
18027 lpfc_sli_release_iocbq(phba
, cmd_iocbq
);
18030 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
18031 if (rsp_iocbq
&& rsp_iocbq
->iocb
.ulpStatus
)
18032 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
18033 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
18034 rsp_iocbq
->iocb
.ulpStatus
,
18035 rsp_iocbq
->iocb
.un
.ulpWord
[4]);
18039 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18040 * @phba: Pointer to HBA context object.
18041 * @xri: xri id in transaction.
18043 * This function validates the xri maps to the known range of XRIs allocated an
18044 * used by the driver.
18047 lpfc_sli4_xri_inrange(struct lpfc_hba
*phba
,
18052 for (i
= 0; i
< phba
->sli4_hba
.max_cfg_param
.max_xri
; i
++) {
18053 if (xri
== phba
->sli4_hba
.xri_ids
[i
])
18060 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
18061 * @vport: pointer to a vitural port.
18062 * @fc_hdr: pointer to a FC frame header.
18063 * @aborted: was the partially assembled receive sequence successfully aborted
18065 * This function sends a basic response to a previous unsol sequence abort
18066 * event after aborting the sequence handling.
18069 lpfc_sli4_seq_abort_rsp(struct lpfc_vport
*vport
,
18070 struct fc_frame_header
*fc_hdr
, bool aborted
)
18072 struct lpfc_hba
*phba
= vport
->phba
;
18073 struct lpfc_iocbq
*ctiocb
= NULL
;
18074 struct lpfc_nodelist
*ndlp
;
18075 uint16_t oxid
, rxid
, xri
, lxri
;
18076 uint32_t sid
, fctl
;
18080 if (!lpfc_is_link_up(phba
))
18083 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
18084 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
18085 rxid
= be16_to_cpu(fc_hdr
->fh_rx_id
);
18087 ndlp
= lpfc_findnode_did(vport
, sid
);
18089 ndlp
= lpfc_nlp_init(vport
, sid
);
18091 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_ELS
,
18092 "1268 Failed to allocate ndlp for "
18093 "oxid:x%x SID:x%x\n", oxid
, sid
);
18096 /* Put ndlp onto pport node list */
18097 lpfc_enqueue_node(vport
, ndlp
);
18100 /* Allocate buffer for rsp iocb */
18101 ctiocb
= lpfc_sli_get_iocbq(phba
);
18105 /* Extract the F_CTL field from FC_HDR */
18106 fctl
= sli4_fctl_from_fc_hdr(fc_hdr
);
18108 icmd
= &ctiocb
->iocb
;
18109 icmd
->un
.xseq64
.bdl
.bdeSize
= 0;
18110 icmd
->un
.xseq64
.bdl
.ulpIoTag32
= 0;
18111 icmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
18112 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_BA_ACC
;
18113 icmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_BLS
;
18115 /* Fill in the rest of iocb fields */
18116 icmd
->ulpCommand
= CMD_XMIT_BLS_RSP64_CX
;
18117 icmd
->ulpBdeCount
= 0;
18119 icmd
->ulpClass
= CLASS3
;
18120 icmd
->ulpContext
= phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
18121 ctiocb
->context1
= lpfc_nlp_get(ndlp
);
18122 if (!ctiocb
->context1
) {
18123 lpfc_sli_release_iocbq(phba
, ctiocb
);
18127 ctiocb
->vport
= phba
->pport
;
18128 ctiocb
->iocb_cmpl
= lpfc_sli4_seq_abort_rsp_cmpl
;
18129 ctiocb
->sli4_lxritag
= NO_XRI
;
18130 ctiocb
->sli4_xritag
= NO_XRI
;
18132 if (fctl
& FC_FC_EX_CTX
)
18133 /* Exchange responder sent the abort so we
18139 lxri
= lpfc_sli4_xri_inrange(phba
, xri
);
18140 if (lxri
!= NO_XRI
)
18141 lpfc_set_rrq_active(phba
, ndlp
, lxri
,
18142 (xri
== oxid
) ? rxid
: oxid
, 0);
18143 /* For BA_ABTS from exchange responder, if the logical xri with
18144 * the oxid maps to the FCP XRI range, the port no longer has
18145 * that exchange context, send a BLS_RJT. Override the IOCB for
18148 if ((fctl
& FC_FC_EX_CTX
) &&
18149 (lxri
> lpfc_sli4_get_iocb_cnt(phba
))) {
18150 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_BA_RJT
;
18151 bf_set(lpfc_vndr_code
, &icmd
->un
.bls_rsp
, 0);
18152 bf_set(lpfc_rsn_expln
, &icmd
->un
.bls_rsp
, FC_BA_RJT_INV_XID
);
18153 bf_set(lpfc_rsn_code
, &icmd
->un
.bls_rsp
, FC_BA_RJT_UNABLE
);
18156 /* If BA_ABTS failed to abort a partially assembled receive sequence,
18157 * the driver no longer has that exchange, send a BLS_RJT. Override
18158 * the IOCB for a BA_RJT.
18160 if (aborted
== false) {
18161 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_BA_RJT
;
18162 bf_set(lpfc_vndr_code
, &icmd
->un
.bls_rsp
, 0);
18163 bf_set(lpfc_rsn_expln
, &icmd
->un
.bls_rsp
, FC_BA_RJT_INV_XID
);
18164 bf_set(lpfc_rsn_code
, &icmd
->un
.bls_rsp
, FC_BA_RJT_UNABLE
);
18167 if (fctl
& FC_FC_EX_CTX
) {
18168 /* ABTS sent by responder to CT exchange, construction
18169 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
18170 * field and RX_ID from ABTS for RX_ID field.
18172 bf_set(lpfc_abts_orig
, &icmd
->un
.bls_rsp
, LPFC_ABTS_UNSOL_RSP
);
18174 /* ABTS sent by initiator to CT exchange, construction
18175 * of BA_ACC will need to allocate a new XRI as for the
18178 bf_set(lpfc_abts_orig
, &icmd
->un
.bls_rsp
, LPFC_ABTS_UNSOL_INT
);
18180 bf_set(lpfc_abts_rxid
, &icmd
->un
.bls_rsp
, rxid
);
18181 bf_set(lpfc_abts_oxid
, &icmd
->un
.bls_rsp
, oxid
);
18183 /* Xmit CT abts response on exchange <xid> */
18184 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
18185 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
18186 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
, oxid
, phba
->link_state
);
18188 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, ctiocb
, 0);
18189 if (rc
== IOCB_ERROR
) {
18190 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
18191 "2925 Failed to issue CT ABTS RSP x%x on "
18192 "xri x%x, Data x%x\n",
18193 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
, oxid
,
18195 lpfc_nlp_put(ndlp
);
18196 ctiocb
->context1
= NULL
;
18197 lpfc_sli_release_iocbq(phba
, ctiocb
);
18202 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
18203 * @vport: Pointer to the vport on which this sequence was received
18204 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18206 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
18207 * receive sequence is only partially assembed by the driver, it shall abort
18208 * the partially assembled frames for the sequence. Otherwise, if the
18209 * unsolicited receive sequence has been completely assembled and passed to
18210 * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
18211 * unsolicited sequence has been aborted. After that, it will issue a basic
18212 * accept to accept the abort.
18215 lpfc_sli4_handle_unsol_abort(struct lpfc_vport
*vport
,
18216 struct hbq_dmabuf
*dmabuf
)
18218 struct lpfc_hba
*phba
= vport
->phba
;
18219 struct fc_frame_header fc_hdr
;
18223 /* Make a copy of fc_hdr before the dmabuf being released */
18224 memcpy(&fc_hdr
, dmabuf
->hbuf
.virt
, sizeof(struct fc_frame_header
));
18225 fctl
= sli4_fctl_from_fc_hdr(&fc_hdr
);
18227 if (fctl
& FC_FC_EX_CTX
) {
18228 /* ABTS by responder to exchange, no cleanup needed */
18231 /* ABTS by initiator to exchange, need to do cleanup */
18232 aborted
= lpfc_sli4_abort_partial_seq(vport
, dmabuf
);
18233 if (aborted
== false)
18234 aborted
= lpfc_sli4_abort_ulp_seq(vport
, dmabuf
);
18236 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
18238 if (phba
->nvmet_support
) {
18239 lpfc_nvmet_rcv_unsol_abort(vport
, &fc_hdr
);
18243 /* Respond with BA_ACC or BA_RJT accordingly */
18244 lpfc_sli4_seq_abort_rsp(vport
, &fc_hdr
, aborted
);
18248 * lpfc_seq_complete - Indicates if a sequence is complete
18249 * @dmabuf: pointer to a dmabuf that describes the FC sequence
18251 * This function checks the sequence, starting with the frame described by
18252 * @dmabuf, to see if all the frames associated with this sequence are present.
18253 * the frames associated with this sequence are linked to the @dmabuf using the
18254 * dbuf list. This function looks for two major things. 1) That the first frame
18255 * has a sequence count of zero. 2) There is a frame with last frame of sequence
18256 * set. 3) That there are no holes in the sequence count. The function will
18257 * return 1 when the sequence is complete, otherwise it will return 0.
18260 lpfc_seq_complete(struct hbq_dmabuf
*dmabuf
)
18262 struct fc_frame_header
*hdr
;
18263 struct lpfc_dmabuf
*d_buf
;
18264 struct hbq_dmabuf
*seq_dmabuf
;
18268 hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
18269 /* make sure first fame of sequence has a sequence count of zero */
18270 if (hdr
->fh_seq_cnt
!= seq_count
)
18272 fctl
= (hdr
->fh_f_ctl
[0] << 16 |
18273 hdr
->fh_f_ctl
[1] << 8 |
18275 /* If last frame of sequence we can return success. */
18276 if (fctl
& FC_FC_END_SEQ
)
18278 list_for_each_entry(d_buf
, &dmabuf
->dbuf
.list
, list
) {
18279 seq_dmabuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
18280 hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
18281 /* If there is a hole in the sequence count then fail. */
18282 if (++seq_count
!= be16_to_cpu(hdr
->fh_seq_cnt
))
18284 fctl
= (hdr
->fh_f_ctl
[0] << 16 |
18285 hdr
->fh_f_ctl
[1] << 8 |
18287 /* If last frame of sequence we can return success. */
18288 if (fctl
& FC_FC_END_SEQ
)
18295 * lpfc_prep_seq - Prep sequence for ULP processing
18296 * @vport: Pointer to the vport on which this sequence was received
18297 * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
18299 * This function takes a sequence, described by a list of frames, and creates
18300 * a list of iocbq structures to describe the sequence. This iocbq list will be
18301 * used to issue to the generic unsolicited sequence handler. This routine
18302 * returns a pointer to the first iocbq in the list. If the function is unable
18303 * to allocate an iocbq then it throw out the received frames that were not
18304 * able to be described and return a pointer to the first iocbq. If unable to
18305 * allocate any iocbqs (including the first) this function will return NULL.
18307 static struct lpfc_iocbq
*
18308 lpfc_prep_seq(struct lpfc_vport
*vport
, struct hbq_dmabuf
*seq_dmabuf
)
18310 struct hbq_dmabuf
*hbq_buf
;
18311 struct lpfc_dmabuf
*d_buf
, *n_buf
;
18312 struct lpfc_iocbq
*first_iocbq
, *iocbq
;
18313 struct fc_frame_header
*fc_hdr
;
18315 uint32_t len
, tot_len
;
18316 struct ulp_bde64
*pbde
;
18318 fc_hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
18319 /* remove from receive buffer list */
18320 list_del_init(&seq_dmabuf
->hbuf
.list
);
18321 lpfc_update_rcv_time_stamp(vport
);
18322 /* get the Remote Port's SID */
18323 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
18325 /* Get an iocbq struct to fill in. */
18326 first_iocbq
= lpfc_sli_get_iocbq(vport
->phba
);
18328 /* Initialize the first IOCB. */
18329 first_iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
= 0;
18330 first_iocbq
->iocb
.ulpStatus
= IOSTAT_SUCCESS
;
18331 first_iocbq
->vport
= vport
;
18333 /* Check FC Header to see what TYPE of frame we are rcv'ing */
18334 if (sli4_type_from_fc_hdr(fc_hdr
) == FC_TYPE_ELS
) {
18335 first_iocbq
->iocb
.ulpCommand
= CMD_IOCB_RCV_ELS64_CX
;
18336 first_iocbq
->iocb
.un
.rcvels
.parmRo
=
18337 sli4_did_from_fc_hdr(fc_hdr
);
18338 first_iocbq
->iocb
.ulpPU
= PARM_NPIV_DID
;
18340 first_iocbq
->iocb
.ulpCommand
= CMD_IOCB_RCV_SEQ64_CX
;
18341 first_iocbq
->iocb
.ulpContext
= NO_XRI
;
18342 first_iocbq
->iocb
.unsli3
.rcvsli3
.ox_id
=
18343 be16_to_cpu(fc_hdr
->fh_ox_id
);
18344 /* iocbq is prepped for internal consumption. Physical vpi. */
18345 first_iocbq
->iocb
.unsli3
.rcvsli3
.vpi
=
18346 vport
->phba
->vpi_ids
[vport
->vpi
];
18347 /* put the first buffer into the first IOCBq */
18348 tot_len
= bf_get(lpfc_rcqe_length
,
18349 &seq_dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
18351 first_iocbq
->context2
= &seq_dmabuf
->dbuf
;
18352 first_iocbq
->context3
= NULL
;
18353 first_iocbq
->iocb
.ulpBdeCount
= 1;
18354 if (tot_len
> LPFC_DATA_BUF_SIZE
)
18355 first_iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
=
18356 LPFC_DATA_BUF_SIZE
;
18358 first_iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
= tot_len
;
18360 first_iocbq
->iocb
.un
.rcvels
.remoteID
= sid
;
18362 first_iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
= tot_len
;
18364 iocbq
= first_iocbq
;
18366 * Each IOCBq can have two Buffers assigned, so go through the list
18367 * of buffers for this sequence and save two buffers in each IOCBq
18369 list_for_each_entry_safe(d_buf
, n_buf
, &seq_dmabuf
->dbuf
.list
, list
) {
18371 lpfc_in_buf_free(vport
->phba
, d_buf
);
18374 if (!iocbq
->context3
) {
18375 iocbq
->context3
= d_buf
;
18376 iocbq
->iocb
.ulpBdeCount
++;
18377 /* We need to get the size out of the right CQE */
18378 hbq_buf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
18379 len
= bf_get(lpfc_rcqe_length
,
18380 &hbq_buf
->cq_event
.cqe
.rcqe_cmpl
);
18381 pbde
= (struct ulp_bde64
*)
18382 &iocbq
->iocb
.unsli3
.sli3Words
[4];
18383 if (len
> LPFC_DATA_BUF_SIZE
)
18384 pbde
->tus
.f
.bdeSize
= LPFC_DATA_BUF_SIZE
;
18386 pbde
->tus
.f
.bdeSize
= len
;
18388 iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
+= len
;
18391 iocbq
= lpfc_sli_get_iocbq(vport
->phba
);
18394 first_iocbq
->iocb
.ulpStatus
=
18395 IOSTAT_FCP_RSP_ERROR
;
18396 first_iocbq
->iocb
.un
.ulpWord
[4] =
18397 IOERR_NO_RESOURCES
;
18399 lpfc_in_buf_free(vport
->phba
, d_buf
);
18402 /* We need to get the size out of the right CQE */
18403 hbq_buf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
18404 len
= bf_get(lpfc_rcqe_length
,
18405 &hbq_buf
->cq_event
.cqe
.rcqe_cmpl
);
18406 iocbq
->context2
= d_buf
;
18407 iocbq
->context3
= NULL
;
18408 iocbq
->iocb
.ulpBdeCount
= 1;
18409 if (len
> LPFC_DATA_BUF_SIZE
)
18410 iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
=
18411 LPFC_DATA_BUF_SIZE
;
18413 iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
= len
;
18416 iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
= tot_len
;
18418 iocbq
->iocb
.un
.rcvels
.remoteID
= sid
;
18419 list_add_tail(&iocbq
->list
, &first_iocbq
->list
);
18422 /* Free the sequence's header buffer */
18424 lpfc_in_buf_free(vport
->phba
, &seq_dmabuf
->dbuf
);
18426 return first_iocbq
;
18430 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport
*vport
,
18431 struct hbq_dmabuf
*seq_dmabuf
)
18433 struct fc_frame_header
*fc_hdr
;
18434 struct lpfc_iocbq
*iocbq
, *curr_iocb
, *next_iocb
;
18435 struct lpfc_hba
*phba
= vport
->phba
;
18437 fc_hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
18438 iocbq
= lpfc_prep_seq(vport
, seq_dmabuf
);
18440 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
18441 "2707 Ring %d handler: Failed to allocate "
18442 "iocb Rctl x%x Type x%x received\n",
18444 fc_hdr
->fh_r_ctl
, fc_hdr
->fh_type
);
18447 if (!lpfc_complete_unsol_iocb(phba
,
18448 phba
->sli4_hba
.els_wq
->pring
,
18449 iocbq
, fc_hdr
->fh_r_ctl
,
18451 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
18452 "2540 Ring %d handler: unexpected Rctl "
18453 "x%x Type x%x received\n",
18455 fc_hdr
->fh_r_ctl
, fc_hdr
->fh_type
);
18457 /* Free iocb created in lpfc_prep_seq */
18458 list_for_each_entry_safe(curr_iocb
, next_iocb
,
18459 &iocbq
->list
, list
) {
18460 list_del_init(&curr_iocb
->list
);
18461 lpfc_sli_release_iocbq(phba
, curr_iocb
);
18463 lpfc_sli_release_iocbq(phba
, iocbq
);
18467 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
18468 struct lpfc_iocbq
*rspiocb
)
18470 struct lpfc_dmabuf
*pcmd
= cmdiocb
->context2
;
18472 if (pcmd
&& pcmd
->virt
)
18473 dma_pool_free(phba
->lpfc_drb_pool
, pcmd
->virt
, pcmd
->phys
);
18475 lpfc_sli_release_iocbq(phba
, cmdiocb
);
18476 lpfc_drain_txq(phba
);
18480 lpfc_sli4_handle_mds_loopback(struct lpfc_vport
*vport
,
18481 struct hbq_dmabuf
*dmabuf
)
18483 struct fc_frame_header
*fc_hdr
;
18484 struct lpfc_hba
*phba
= vport
->phba
;
18485 struct lpfc_iocbq
*iocbq
= NULL
;
18486 union lpfc_wqe
*wqe
;
18487 struct lpfc_dmabuf
*pcmd
= NULL
;
18488 uint32_t frame_len
;
18490 unsigned long iflags
;
18492 fc_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
18493 frame_len
= bf_get(lpfc_rcqe_length
, &dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
18495 /* Send the received frame back */
18496 iocbq
= lpfc_sli_get_iocbq(phba
);
18498 /* Queue cq event and wakeup worker thread to process it */
18499 spin_lock_irqsave(&phba
->hbalock
, iflags
);
18500 list_add_tail(&dmabuf
->cq_event
.list
,
18501 &phba
->sli4_hba
.sp_queue_event
);
18502 phba
->hba_flag
|= HBA_SP_QUEUE_EVT
;
18503 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
18504 lpfc_worker_wake_up(phba
);
18508 /* Allocate buffer for command payload */
18509 pcmd
= kmalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
18511 pcmd
->virt
= dma_pool_alloc(phba
->lpfc_drb_pool
, GFP_KERNEL
,
18513 if (!pcmd
|| !pcmd
->virt
)
18516 INIT_LIST_HEAD(&pcmd
->list
);
18518 /* copyin the payload */
18519 memcpy(pcmd
->virt
, dmabuf
->dbuf
.virt
, frame_len
);
18521 /* fill in BDE's for command */
18522 iocbq
->iocb
.un
.xseq64
.bdl
.addrHigh
= putPaddrHigh(pcmd
->phys
);
18523 iocbq
->iocb
.un
.xseq64
.bdl
.addrLow
= putPaddrLow(pcmd
->phys
);
18524 iocbq
->iocb
.un
.xseq64
.bdl
.bdeFlags
= BUFF_TYPE_BDE_64
;
18525 iocbq
->iocb
.un
.xseq64
.bdl
.bdeSize
= frame_len
;
18527 iocbq
->context2
= pcmd
;
18528 iocbq
->vport
= vport
;
18529 iocbq
->iocb_flag
&= ~LPFC_FIP_ELS_ID_MASK
;
18530 iocbq
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
18533 * Setup rest of the iocb as though it were a WQE
18534 * Build the SEND_FRAME WQE
18536 wqe
= (union lpfc_wqe
*)&iocbq
->iocb
;
18538 wqe
->send_frame
.frame_len
= frame_len
;
18539 wqe
->send_frame
.fc_hdr_wd0
= be32_to_cpu(*((uint32_t *)fc_hdr
));
18540 wqe
->send_frame
.fc_hdr_wd1
= be32_to_cpu(*((uint32_t *)fc_hdr
+ 1));
18541 wqe
->send_frame
.fc_hdr_wd2
= be32_to_cpu(*((uint32_t *)fc_hdr
+ 2));
18542 wqe
->send_frame
.fc_hdr_wd3
= be32_to_cpu(*((uint32_t *)fc_hdr
+ 3));
18543 wqe
->send_frame
.fc_hdr_wd4
= be32_to_cpu(*((uint32_t *)fc_hdr
+ 4));
18544 wqe
->send_frame
.fc_hdr_wd5
= be32_to_cpu(*((uint32_t *)fc_hdr
+ 5));
18546 iocbq
->iocb
.ulpCommand
= CMD_SEND_FRAME
;
18547 iocbq
->iocb
.ulpLe
= 1;
18548 iocbq
->iocb_cmpl
= lpfc_sli4_mds_loopback_cmpl
;
18549 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, iocbq
, 0);
18550 if (rc
== IOCB_ERROR
)
18553 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
18557 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
18558 "2023 Unable to process MDS loopback frame\n");
18559 if (pcmd
&& pcmd
->virt
)
18560 dma_pool_free(phba
->lpfc_drb_pool
, pcmd
->virt
, pcmd
->phys
);
18563 lpfc_sli_release_iocbq(phba
, iocbq
);
18564 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
18568 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
18569 * @phba: Pointer to HBA context object.
18570 * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
18572 * This function is called with no lock held. This function processes all
18573 * the received buffers and gives it to upper layers when a received buffer
18574 * indicates that it is the final frame in the sequence. The interrupt
18575 * service routine processes received buffers at interrupt contexts.
18576 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
18577 * appropriate receive function when the final frame in a sequence is received.
18580 lpfc_sli4_handle_received_buffer(struct lpfc_hba
*phba
,
18581 struct hbq_dmabuf
*dmabuf
)
18583 struct hbq_dmabuf
*seq_dmabuf
;
18584 struct fc_frame_header
*fc_hdr
;
18585 struct lpfc_vport
*vport
;
18589 /* Process each received buffer */
18590 fc_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
18592 if (fc_hdr
->fh_r_ctl
== FC_RCTL_MDS_DIAGS
||
18593 fc_hdr
->fh_r_ctl
== FC_RCTL_DD_UNSOL_DATA
) {
18594 vport
= phba
->pport
;
18595 /* Handle MDS Loopback frames */
18596 if (!(phba
->pport
->load_flag
& FC_UNLOADING
))
18597 lpfc_sli4_handle_mds_loopback(vport
, dmabuf
);
18599 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
18603 /* check to see if this a valid type of frame */
18604 if (lpfc_fc_frame_check(phba
, fc_hdr
)) {
18605 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
18609 if ((bf_get(lpfc_cqe_code
,
18610 &dmabuf
->cq_event
.cqe
.rcqe_cmpl
) == CQE_CODE_RECEIVE_V1
))
18611 fcfi
= bf_get(lpfc_rcqe_fcf_id_v1
,
18612 &dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
18614 fcfi
= bf_get(lpfc_rcqe_fcf_id
,
18615 &dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
18617 if (fc_hdr
->fh_r_ctl
== 0xF4 && fc_hdr
->fh_type
== 0xFF) {
18618 vport
= phba
->pport
;
18619 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
18620 "2023 MDS Loopback %d bytes\n",
18621 bf_get(lpfc_rcqe_length
,
18622 &dmabuf
->cq_event
.cqe
.rcqe_cmpl
));
18623 /* Handle MDS Loopback frames */
18624 lpfc_sli4_handle_mds_loopback(vport
, dmabuf
);
18628 /* d_id this frame is directed to */
18629 did
= sli4_did_from_fc_hdr(fc_hdr
);
18631 vport
= lpfc_fc_frame_to_vport(phba
, fc_hdr
, fcfi
, did
);
18633 /* throw out the frame */
18634 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
18638 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
18639 if (!(vport
->vpi_state
& LPFC_VPI_REGISTERED
) &&
18640 (did
!= Fabric_DID
)) {
18642 * Throw out the frame if we are not pt2pt.
18643 * The pt2pt protocol allows for discovery frames
18644 * to be received without a registered VPI.
18646 if (!(vport
->fc_flag
& FC_PT2PT
) ||
18647 (phba
->link_state
== LPFC_HBA_READY
)) {
18648 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
18653 /* Handle the basic abort sequence (BA_ABTS) event */
18654 if (fc_hdr
->fh_r_ctl
== FC_RCTL_BA_ABTS
) {
18655 lpfc_sli4_handle_unsol_abort(vport
, dmabuf
);
18659 /* Link this frame */
18660 seq_dmabuf
= lpfc_fc_frame_add(vport
, dmabuf
);
18662 /* unable to add frame to vport - throw it out */
18663 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
18666 /* If not last frame in sequence continue processing frames. */
18667 if (!lpfc_seq_complete(seq_dmabuf
))
18670 /* Send the complete sequence to the upper layer protocol */
18671 lpfc_sli4_send_seq_to_ulp(vport
, seq_dmabuf
);
18675 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
18676 * @phba: pointer to lpfc hba data structure.
18678 * This routine is invoked to post rpi header templates to the
18679 * HBA consistent with the SLI-4 interface spec. This routine
18680 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18681 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18683 * This routine does not require any locks. It's usage is expected
18684 * to be driver load or reset recovery when the driver is
18689 * -EIO - The mailbox failed to complete successfully.
18690 * When this error occurs, the driver is not guaranteed
18691 * to have any rpi regions posted to the device and
18692 * must either attempt to repost the regions or take a
18696 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba
*phba
)
18698 struct lpfc_rpi_hdr
*rpi_page
;
18702 /* SLI4 ports that support extents do not require RPI headers. */
18703 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
18705 if (phba
->sli4_hba
.extents_in_use
)
18708 list_for_each_entry(rpi_page
, &phba
->sli4_hba
.lpfc_rpi_hdr_list
, list
) {
18710 * Assign the rpi headers a physical rpi only if the driver
18711 * has not initialized those resources. A port reset only
18712 * needs the headers posted.
18714 if (bf_get(lpfc_rpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
) !=
18716 rpi_page
->start_rpi
= phba
->sli4_hba
.rpi_ids
[lrpi
];
18718 rc
= lpfc_sli4_post_rpi_hdr(phba
, rpi_page
);
18719 if (rc
!= MBX_SUCCESS
) {
18720 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
18721 "2008 Error %d posting all rpi "
18729 bf_set(lpfc_rpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
,
18730 LPFC_RPI_RSRC_RDY
);
18735 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
18736 * @phba: pointer to lpfc hba data structure.
18737 * @rpi_page: pointer to the rpi memory region.
18739 * This routine is invoked to post a single rpi header to the
18740 * HBA consistent with the SLI-4 interface spec. This memory region
18741 * maps up to 64 rpi context regions.
18745 * -ENOMEM - No available memory
18746 * -EIO - The mailbox failed to complete successfully.
18749 lpfc_sli4_post_rpi_hdr(struct lpfc_hba
*phba
, struct lpfc_rpi_hdr
*rpi_page
)
18751 LPFC_MBOXQ_t
*mboxq
;
18752 struct lpfc_mbx_post_hdr_tmpl
*hdr_tmpl
;
18754 uint32_t shdr_status
, shdr_add_status
;
18755 union lpfc_sli4_cfg_shdr
*shdr
;
18757 /* SLI4 ports that support extents do not require RPI headers. */
18758 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
18760 if (phba
->sli4_hba
.extents_in_use
)
18763 /* The port is notified of the header region via a mailbox command. */
18764 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
18766 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
18767 "2001 Unable to allocate memory for issuing "
18768 "SLI_CONFIG_SPECIAL mailbox command\n");
18772 /* Post all rpi memory regions to the port. */
18773 hdr_tmpl
= &mboxq
->u
.mqe
.un
.hdr_tmpl
;
18774 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
18775 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE
,
18776 sizeof(struct lpfc_mbx_post_hdr_tmpl
) -
18777 sizeof(struct lpfc_sli4_cfg_mhdr
),
18778 LPFC_SLI4_MBX_EMBED
);
18781 /* Post the physical rpi to the port for this rpi header. */
18782 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset
, hdr_tmpl
,
18783 rpi_page
->start_rpi
);
18784 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt
,
18785 hdr_tmpl
, rpi_page
->page_count
);
18787 hdr_tmpl
->rpi_paddr_lo
= putPaddrLow(rpi_page
->dmabuf
->phys
);
18788 hdr_tmpl
->rpi_paddr_hi
= putPaddrHigh(rpi_page
->dmabuf
->phys
);
18789 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
18790 shdr
= (union lpfc_sli4_cfg_shdr
*) &hdr_tmpl
->header
.cfg_shdr
;
18791 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
18792 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
18793 if (rc
!= MBX_TIMEOUT
)
18794 mempool_free(mboxq
, phba
->mbox_mem_pool
);
18795 if (shdr_status
|| shdr_add_status
|| rc
) {
18796 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
18797 "2514 POST_RPI_HDR mailbox failed with "
18798 "status x%x add_status x%x, mbx status x%x\n",
18799 shdr_status
, shdr_add_status
, rc
);
18803 * The next_rpi stores the next logical module-64 rpi value used
18804 * to post physical rpis in subsequent rpi postings.
18806 spin_lock_irq(&phba
->hbalock
);
18807 phba
->sli4_hba
.next_rpi
= rpi_page
->next_rpi
;
18808 spin_unlock_irq(&phba
->hbalock
);
18814 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
18815 * @phba: pointer to lpfc hba data structure.
18817 * This routine is invoked to post rpi header templates to the
18818 * HBA consistent with the SLI-4 interface spec. This routine
18819 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18820 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18823 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
18824 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
18827 lpfc_sli4_alloc_rpi(struct lpfc_hba
*phba
)
18830 uint16_t max_rpi
, rpi_limit
;
18831 uint16_t rpi_remaining
, lrpi
= 0;
18832 struct lpfc_rpi_hdr
*rpi_hdr
;
18833 unsigned long iflag
;
18836 * Fetch the next logical rpi. Because this index is logical,
18837 * the driver starts at 0 each time.
18839 spin_lock_irqsave(&phba
->hbalock
, iflag
);
18840 max_rpi
= phba
->sli4_hba
.max_cfg_param
.max_rpi
;
18841 rpi_limit
= phba
->sli4_hba
.next_rpi
;
18843 rpi
= find_next_zero_bit(phba
->sli4_hba
.rpi_bmask
, rpi_limit
, 0);
18844 if (rpi
>= rpi_limit
)
18845 rpi
= LPFC_RPI_ALLOC_ERROR
;
18847 set_bit(rpi
, phba
->sli4_hba
.rpi_bmask
);
18848 phba
->sli4_hba
.max_cfg_param
.rpi_used
++;
18849 phba
->sli4_hba
.rpi_count
++;
18851 lpfc_printf_log(phba
, KERN_INFO
,
18852 LOG_NODE
| LOG_DISCOVERY
,
18853 "0001 Allocated rpi:x%x max:x%x lim:x%x\n",
18854 (int) rpi
, max_rpi
, rpi_limit
);
18857 * Don't try to allocate more rpi header regions if the device limit
18858 * has been exhausted.
18860 if ((rpi
== LPFC_RPI_ALLOC_ERROR
) &&
18861 (phba
->sli4_hba
.rpi_count
>= max_rpi
)) {
18862 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
18867 * RPI header postings are not required for SLI4 ports capable of
18870 if (!phba
->sli4_hba
.rpi_hdrs_in_use
) {
18871 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
18876 * If the driver is running low on rpi resources, allocate another
18877 * page now. Note that the next_rpi value is used because
18878 * it represents how many are actually in use whereas max_rpi notes
18879 * how many are supported max by the device.
18881 rpi_remaining
= phba
->sli4_hba
.next_rpi
- phba
->sli4_hba
.rpi_count
;
18882 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
18883 if (rpi_remaining
< LPFC_RPI_LOW_WATER_MARK
) {
18884 rpi_hdr
= lpfc_sli4_create_rpi_hdr(phba
);
18886 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
18887 "2002 Error Could not grow rpi "
18890 lrpi
= rpi_hdr
->start_rpi
;
18891 rpi_hdr
->start_rpi
= phba
->sli4_hba
.rpi_ids
[lrpi
];
18892 lpfc_sli4_post_rpi_hdr(phba
, rpi_hdr
);
18900 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18901 * @phba: pointer to lpfc hba data structure.
18902 * @rpi: rpi to free
18904 * This routine is invoked to release an rpi to the pool of
18905 * available rpis maintained by the driver.
18908 __lpfc_sli4_free_rpi(struct lpfc_hba
*phba
, int rpi
)
18911 * if the rpi value indicates a prior unreg has already
18912 * been done, skip the unreg.
18914 if (rpi
== LPFC_RPI_ALLOC_ERROR
)
18917 if (test_and_clear_bit(rpi
, phba
->sli4_hba
.rpi_bmask
)) {
18918 phba
->sli4_hba
.rpi_count
--;
18919 phba
->sli4_hba
.max_cfg_param
.rpi_used
--;
18921 lpfc_printf_log(phba
, KERN_INFO
,
18922 LOG_NODE
| LOG_DISCOVERY
,
18923 "2016 rpi %x not inuse\n",
18929 * lpfc_sli4_free_rpi - Release an rpi for reuse.
18930 * @phba: pointer to lpfc hba data structure.
18931 * @rpi: rpi to free
18933 * This routine is invoked to release an rpi to the pool of
18934 * available rpis maintained by the driver.
18937 lpfc_sli4_free_rpi(struct lpfc_hba
*phba
, int rpi
)
18939 spin_lock_irq(&phba
->hbalock
);
18940 __lpfc_sli4_free_rpi(phba
, rpi
);
18941 spin_unlock_irq(&phba
->hbalock
);
18945 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
18946 * @phba: pointer to lpfc hba data structure.
18948 * This routine is invoked to remove the memory region that
18949 * provided rpi via a bitmask.
18952 lpfc_sli4_remove_rpis(struct lpfc_hba
*phba
)
18954 kfree(phba
->sli4_hba
.rpi_bmask
);
18955 kfree(phba
->sli4_hba
.rpi_ids
);
18956 bf_set(lpfc_rpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
18960 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
18961 * @ndlp: pointer to lpfc nodelist data structure.
18962 * @cmpl: completion call-back.
18963 * @arg: data to load as MBox 'caller buffer information'
18965 * This routine is invoked to remove the memory region that
18966 * provided rpi via a bitmask.
18969 lpfc_sli4_resume_rpi(struct lpfc_nodelist
*ndlp
,
18970 void (*cmpl
)(struct lpfc_hba
*, LPFC_MBOXQ_t
*), void *arg
)
18972 LPFC_MBOXQ_t
*mboxq
;
18973 struct lpfc_hba
*phba
= ndlp
->phba
;
18976 /* The port is notified of the header region via a mailbox command. */
18977 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
18981 /* Post all rpi memory regions to the port. */
18982 lpfc_resume_rpi(mboxq
, ndlp
);
18984 mboxq
->mbox_cmpl
= cmpl
;
18985 mboxq
->ctx_buf
= arg
;
18986 mboxq
->ctx_ndlp
= ndlp
;
18988 mboxq
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
18989 mboxq
->vport
= ndlp
->vport
;
18990 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
18991 if (rc
== MBX_NOT_FINISHED
) {
18992 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
18993 "2010 Resume RPI Mailbox failed "
18994 "status %d, mbxStatus x%x\n", rc
,
18995 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
));
18996 mempool_free(mboxq
, phba
->mbox_mem_pool
);
19003 * lpfc_sli4_init_vpi - Initialize a vpi with the port
19004 * @vport: Pointer to the vport for which the vpi is being initialized
19006 * This routine is invoked to activate a vpi with the port.
19010 * -Evalue otherwise
19013 lpfc_sli4_init_vpi(struct lpfc_vport
*vport
)
19015 LPFC_MBOXQ_t
*mboxq
;
19017 int retval
= MBX_SUCCESS
;
19019 struct lpfc_hba
*phba
= vport
->phba
;
19020 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
19023 lpfc_init_vpi(phba
, mboxq
, vport
->vpi
);
19024 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mboxq
);
19025 rc
= lpfc_sli_issue_mbox_wait(phba
, mboxq
, mbox_tmo
);
19026 if (rc
!= MBX_SUCCESS
) {
19027 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_TRACE_EVENT
,
19028 "2022 INIT VPI Mailbox failed "
19029 "status %d, mbxStatus x%x\n", rc
,
19030 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
));
19033 if (rc
!= MBX_TIMEOUT
)
19034 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
19040 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
19041 * @phba: pointer to lpfc hba data structure.
19042 * @mboxq: Pointer to mailbox object.
19044 * This routine is invoked to manually add a single FCF record. The caller
19045 * must pass a completely initialized FCF_Record. This routine takes
19046 * care of the nonembedded mailbox operations.
19049 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
19052 union lpfc_sli4_cfg_shdr
*shdr
;
19053 uint32_t shdr_status
, shdr_add_status
;
19055 virt_addr
= mboxq
->sge_array
->addr
[0];
19056 /* The IOCTL status is embedded in the mailbox subheader. */
19057 shdr
= (union lpfc_sli4_cfg_shdr
*) virt_addr
;
19058 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
19059 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
19061 if ((shdr_status
|| shdr_add_status
) &&
19062 (shdr_status
!= STATUS_FCF_IN_USE
))
19063 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
19064 "2558 ADD_FCF_RECORD mailbox failed with "
19065 "status x%x add_status x%x\n",
19066 shdr_status
, shdr_add_status
);
19068 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
19072 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
19073 * @phba: pointer to lpfc hba data structure.
19074 * @fcf_record: pointer to the initialized fcf record to add.
19076 * This routine is invoked to manually add a single FCF record. The caller
19077 * must pass a completely initialized FCF_Record. This routine takes
19078 * care of the nonembedded mailbox operations.
19081 lpfc_sli4_add_fcf_record(struct lpfc_hba
*phba
, struct fcf_record
*fcf_record
)
19084 LPFC_MBOXQ_t
*mboxq
;
19087 struct lpfc_mbx_sge sge
;
19088 uint32_t alloc_len
, req_len
;
19091 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
19093 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
19094 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
19098 req_len
= sizeof(struct fcf_record
) + sizeof(union lpfc_sli4_cfg_shdr
) +
19101 /* Allocate DMA memory and set up the non-embedded mailbox command */
19102 alloc_len
= lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
19103 LPFC_MBOX_OPCODE_FCOE_ADD_FCF
,
19104 req_len
, LPFC_SLI4_MBX_NEMBED
);
19105 if (alloc_len
< req_len
) {
19106 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
19107 "2523 Allocated DMA memory size (x%x) is "
19108 "less than the requested DMA memory "
19109 "size (x%x)\n", alloc_len
, req_len
);
19110 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
19115 * Get the first SGE entry from the non-embedded DMA memory. This
19116 * routine only uses a single SGE.
19118 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
19119 virt_addr
= mboxq
->sge_array
->addr
[0];
19121 * Configure the FCF record for FCFI 0. This is the driver's
19122 * hardcoded default and gets used in nonFIP mode.
19124 fcfindex
= bf_get(lpfc_fcf_record_fcf_index
, fcf_record
);
19125 bytep
= virt_addr
+ sizeof(union lpfc_sli4_cfg_shdr
);
19126 lpfc_sli_pcimem_bcopy(&fcfindex
, bytep
, sizeof(uint32_t));
19129 * Copy the fcf_index and the FCF Record Data. The data starts after
19130 * the FCoE header plus word10. The data copy needs to be endian
19133 bytep
+= sizeof(uint32_t);
19134 lpfc_sli_pcimem_bcopy(fcf_record
, bytep
, sizeof(struct fcf_record
));
19135 mboxq
->vport
= phba
->pport
;
19136 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_add_fcf_record
;
19137 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
19138 if (rc
== MBX_NOT_FINISHED
) {
19139 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
19140 "2515 ADD_FCF_RECORD mailbox failed with "
19141 "status 0x%x\n", rc
);
19142 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
19151 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
19152 * @phba: pointer to lpfc hba data structure.
19153 * @fcf_record: pointer to the fcf record to write the default data.
19154 * @fcf_index: FCF table entry index.
19156 * This routine is invoked to build the driver's default FCF record. The
19157 * values used are hardcoded. This routine handles memory initialization.
19161 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba
*phba
,
19162 struct fcf_record
*fcf_record
,
19163 uint16_t fcf_index
)
19165 memset(fcf_record
, 0, sizeof(struct fcf_record
));
19166 fcf_record
->max_rcv_size
= LPFC_FCOE_MAX_RCV_SIZE
;
19167 fcf_record
->fka_adv_period
= LPFC_FCOE_FKA_ADV_PER
;
19168 fcf_record
->fip_priority
= LPFC_FCOE_FIP_PRIORITY
;
19169 bf_set(lpfc_fcf_record_mac_0
, fcf_record
, phba
->fc_map
[0]);
19170 bf_set(lpfc_fcf_record_mac_1
, fcf_record
, phba
->fc_map
[1]);
19171 bf_set(lpfc_fcf_record_mac_2
, fcf_record
, phba
->fc_map
[2]);
19172 bf_set(lpfc_fcf_record_mac_3
, fcf_record
, LPFC_FCOE_FCF_MAC3
);
19173 bf_set(lpfc_fcf_record_mac_4
, fcf_record
, LPFC_FCOE_FCF_MAC4
);
19174 bf_set(lpfc_fcf_record_mac_5
, fcf_record
, LPFC_FCOE_FCF_MAC5
);
19175 bf_set(lpfc_fcf_record_fc_map_0
, fcf_record
, phba
->fc_map
[0]);
19176 bf_set(lpfc_fcf_record_fc_map_1
, fcf_record
, phba
->fc_map
[1]);
19177 bf_set(lpfc_fcf_record_fc_map_2
, fcf_record
, phba
->fc_map
[2]);
19178 bf_set(lpfc_fcf_record_fcf_valid
, fcf_record
, 1);
19179 bf_set(lpfc_fcf_record_fcf_avail
, fcf_record
, 1);
19180 bf_set(lpfc_fcf_record_fcf_index
, fcf_record
, fcf_index
);
19181 bf_set(lpfc_fcf_record_mac_addr_prov
, fcf_record
,
19182 LPFC_FCF_FPMA
| LPFC_FCF_SPMA
);
19183 /* Set the VLAN bit map */
19184 if (phba
->valid_vlan
) {
19185 fcf_record
->vlan_bitmap
[phba
->vlan_id
/ 8]
19186 = 1 << (phba
->vlan_id
% 8);
19191 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
19192 * @phba: pointer to lpfc hba data structure.
19193 * @fcf_index: FCF table entry offset.
19195 * This routine is invoked to scan the entire FCF table by reading FCF
19196 * record and processing it one at a time starting from the @fcf_index
19197 * for initial FCF discovery or fast FCF failover rediscovery.
19199 * Return 0 if the mailbox command is submitted successfully, none 0
19203 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
19206 LPFC_MBOXQ_t
*mboxq
;
19208 phba
->fcoe_eventtag_at_fcf_scan
= phba
->fcoe_eventtag
;
19209 phba
->fcoe_cvl_eventtag_attn
= phba
->fcoe_cvl_eventtag
;
19210 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
19212 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
19213 "2000 Failed to allocate mbox for "
19216 goto fail_fcf_scan
;
19218 /* Construct the read FCF record mailbox command */
19219 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
19222 goto fail_fcf_scan
;
19224 /* Issue the mailbox command asynchronously */
19225 mboxq
->vport
= phba
->pport
;
19226 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_fcf_scan_read_fcf_rec
;
19228 spin_lock_irq(&phba
->hbalock
);
19229 phba
->hba_flag
|= FCF_TS_INPROG
;
19230 spin_unlock_irq(&phba
->hbalock
);
19232 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
19233 if (rc
== MBX_NOT_FINISHED
)
19236 /* Reset eligible FCF count for new scan */
19237 if (fcf_index
== LPFC_FCOE_FCF_GET_FIRST
)
19238 phba
->fcf
.eligible_fcf_cnt
= 0;
19244 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
19245 /* FCF scan failed, clear FCF_TS_INPROG flag */
19246 spin_lock_irq(&phba
->hbalock
);
19247 phba
->hba_flag
&= ~FCF_TS_INPROG
;
19248 spin_unlock_irq(&phba
->hbalock
);
19254 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
19255 * @phba: pointer to lpfc hba data structure.
19256 * @fcf_index: FCF table entry offset.
19258 * This routine is invoked to read an FCF record indicated by @fcf_index
19259 * and to use it for FLOGI roundrobin FCF failover.
19261 * Return 0 if the mailbox command is submitted successfully, none 0
19265 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
19268 LPFC_MBOXQ_t
*mboxq
;
19270 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
19272 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_INIT
,
19273 "2763 Failed to allocate mbox for "
19276 goto fail_fcf_read
;
19278 /* Construct the read FCF record mailbox command */
19279 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
19282 goto fail_fcf_read
;
19284 /* Issue the mailbox command asynchronously */
19285 mboxq
->vport
= phba
->pport
;
19286 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_fcf_rr_read_fcf_rec
;
19287 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
19288 if (rc
== MBX_NOT_FINISHED
)
19294 if (error
&& mboxq
)
19295 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
19300 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
19301 * @phba: pointer to lpfc hba data structure.
19302 * @fcf_index: FCF table entry offset.
19304 * This routine is invoked to read an FCF record indicated by @fcf_index to
19305 * determine whether it's eligible for FLOGI roundrobin failover list.
19307 * Return 0 if the mailbox command is submitted successfully, none 0
19311 lpfc_sli4_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
19314 LPFC_MBOXQ_t
*mboxq
;
19316 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
19318 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_INIT
,
19319 "2758 Failed to allocate mbox for "
19322 goto fail_fcf_read
;
19324 /* Construct the read FCF record mailbox command */
19325 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
19328 goto fail_fcf_read
;
19330 /* Issue the mailbox command asynchronously */
19331 mboxq
->vport
= phba
->pport
;
19332 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_read_fcf_rec
;
19333 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
19334 if (rc
== MBX_NOT_FINISHED
)
19340 if (error
&& mboxq
)
19341 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
19346 * lpfc_check_next_fcf_pri_level
19347 * @phba: pointer to the lpfc_hba struct for this port.
19348 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
19349 * routine when the rr_bmask is empty. The FCF indecies are put into the
19350 * rr_bmask based on their priority level. Starting from the highest priority
19351 * to the lowest. The most likely FCF candidate will be in the highest
19352 * priority group. When this routine is called it searches the fcf_pri list for
19353 * next lowest priority group and repopulates the rr_bmask with only those
19356 * 1=success 0=failure
19359 lpfc_check_next_fcf_pri_level(struct lpfc_hba
*phba
)
19361 uint16_t next_fcf_pri
;
19362 uint16_t last_index
;
19363 struct lpfc_fcf_pri
*fcf_pri
;
19367 last_index
= find_first_bit(phba
->fcf
.fcf_rr_bmask
,
19368 LPFC_SLI4_FCF_TBL_INDX_MAX
);
19369 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
19370 "3060 Last IDX %d\n", last_index
);
19372 /* Verify the priority list has 2 or more entries */
19373 spin_lock_irq(&phba
->hbalock
);
19374 if (list_empty(&phba
->fcf
.fcf_pri_list
) ||
19375 list_is_singular(&phba
->fcf
.fcf_pri_list
)) {
19376 spin_unlock_irq(&phba
->hbalock
);
19377 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
19378 "3061 Last IDX %d\n", last_index
);
19379 return 0; /* Empty rr list */
19381 spin_unlock_irq(&phba
->hbalock
);
19385 * Clear the rr_bmask and set all of the bits that are at this
19388 memset(phba
->fcf
.fcf_rr_bmask
, 0,
19389 sizeof(*phba
->fcf
.fcf_rr_bmask
));
19390 spin_lock_irq(&phba
->hbalock
);
19391 list_for_each_entry(fcf_pri
, &phba
->fcf
.fcf_pri_list
, list
) {
19392 if (fcf_pri
->fcf_rec
.flag
& LPFC_FCF_FLOGI_FAILED
)
19395 * the 1st priority that has not FLOGI failed
19396 * will be the highest.
19399 next_fcf_pri
= fcf_pri
->fcf_rec
.priority
;
19400 spin_unlock_irq(&phba
->hbalock
);
19401 if (fcf_pri
->fcf_rec
.priority
== next_fcf_pri
) {
19402 rc
= lpfc_sli4_fcf_rr_index_set(phba
,
19403 fcf_pri
->fcf_rec
.fcf_index
);
19407 spin_lock_irq(&phba
->hbalock
);
19410 * if next_fcf_pri was not set above and the list is not empty then
19411 * we have failed flogis on all of them. So reset flogi failed
19412 * and start at the beginning.
19414 if (!next_fcf_pri
&& !list_empty(&phba
->fcf
.fcf_pri_list
)) {
19415 list_for_each_entry(fcf_pri
, &phba
->fcf
.fcf_pri_list
, list
) {
19416 fcf_pri
->fcf_rec
.flag
&= ~LPFC_FCF_FLOGI_FAILED
;
19418 * the 1st priority that has not FLOGI failed
19419 * will be the highest.
19422 next_fcf_pri
= fcf_pri
->fcf_rec
.priority
;
19423 spin_unlock_irq(&phba
->hbalock
);
19424 if (fcf_pri
->fcf_rec
.priority
== next_fcf_pri
) {
19425 rc
= lpfc_sli4_fcf_rr_index_set(phba
,
19426 fcf_pri
->fcf_rec
.fcf_index
);
19430 spin_lock_irq(&phba
->hbalock
);
19434 spin_unlock_irq(&phba
->hbalock
);
19439 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
19440 * @phba: pointer to lpfc hba data structure.
19442 * This routine is to get the next eligible FCF record index in a round
19443 * robin fashion. If the next eligible FCF record index equals to the
19444 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
19445 * shall be returned, otherwise, the next eligible FCF record's index
19446 * shall be returned.
19449 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba
*phba
)
19451 uint16_t next_fcf_index
;
19454 /* Search start from next bit of currently registered FCF index */
19455 next_fcf_index
= phba
->fcf
.current_rec
.fcf_indx
;
19458 /* Determine the next fcf index to check */
19459 next_fcf_index
= (next_fcf_index
+ 1) % LPFC_SLI4_FCF_TBL_INDX_MAX
;
19460 next_fcf_index
= find_next_bit(phba
->fcf
.fcf_rr_bmask
,
19461 LPFC_SLI4_FCF_TBL_INDX_MAX
,
19464 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
19465 if (next_fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
19467 * If we have wrapped then we need to clear the bits that
19468 * have been tested so that we can detect when we should
19469 * change the priority level.
19471 next_fcf_index
= find_next_bit(phba
->fcf
.fcf_rr_bmask
,
19472 LPFC_SLI4_FCF_TBL_INDX_MAX
, 0);
19476 /* Check roundrobin failover list empty condition */
19477 if (next_fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
||
19478 next_fcf_index
== phba
->fcf
.current_rec
.fcf_indx
) {
19480 * If next fcf index is not found check if there are lower
19481 * Priority level fcf's in the fcf_priority list.
19482 * Set up the rr_bmask with all of the avaiable fcf bits
19483 * at that level and continue the selection process.
19485 if (lpfc_check_next_fcf_pri_level(phba
))
19486 goto initial_priority
;
19487 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
19488 "2844 No roundrobin failover FCF available\n");
19490 return LPFC_FCOE_FCF_NEXT_NONE
;
19493 if (next_fcf_index
< LPFC_SLI4_FCF_TBL_INDX_MAX
&&
19494 phba
->fcf
.fcf_pri
[next_fcf_index
].fcf_rec
.flag
&
19495 LPFC_FCF_FLOGI_FAILED
) {
19496 if (list_is_singular(&phba
->fcf
.fcf_pri_list
))
19497 return LPFC_FCOE_FCF_NEXT_NONE
;
19499 goto next_priority
;
19502 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
19503 "2845 Get next roundrobin failover FCF (x%x)\n",
19506 return next_fcf_index
;
19510 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
19511 * @phba: pointer to lpfc hba data structure.
19512 * @fcf_index: index into the FCF table to 'set'
19514 * This routine sets the FCF record index in to the eligible bmask for
19515 * roundrobin failover search. It checks to make sure that the index
19516 * does not go beyond the range of the driver allocated bmask dimension
19517 * before setting the bit.
19519 * Returns 0 if the index bit successfully set, otherwise, it returns
19523 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba
*phba
, uint16_t fcf_index
)
19525 if (fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
19526 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
19527 "2610 FCF (x%x) reached driver's book "
19528 "keeping dimension:x%x\n",
19529 fcf_index
, LPFC_SLI4_FCF_TBL_INDX_MAX
);
19532 /* Set the eligible FCF record index bmask */
19533 set_bit(fcf_index
, phba
->fcf
.fcf_rr_bmask
);
19535 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
19536 "2790 Set FCF (x%x) to roundrobin FCF failover "
19537 "bmask\n", fcf_index
);
19543 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
19544 * @phba: pointer to lpfc hba data structure.
19545 * @fcf_index: index into the FCF table to 'clear'
19547 * This routine clears the FCF record index from the eligible bmask for
19548 * roundrobin failover search. It checks to make sure that the index
19549 * does not go beyond the range of the driver allocated bmask dimension
19550 * before clearing the bit.
19553 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba
*phba
, uint16_t fcf_index
)
19555 struct lpfc_fcf_pri
*fcf_pri
, *fcf_pri_next
;
19556 if (fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
19557 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
19558 "2762 FCF (x%x) reached driver's book "
19559 "keeping dimension:x%x\n",
19560 fcf_index
, LPFC_SLI4_FCF_TBL_INDX_MAX
);
19563 /* Clear the eligible FCF record index bmask */
19564 spin_lock_irq(&phba
->hbalock
);
19565 list_for_each_entry_safe(fcf_pri
, fcf_pri_next
, &phba
->fcf
.fcf_pri_list
,
19567 if (fcf_pri
->fcf_rec
.fcf_index
== fcf_index
) {
19568 list_del_init(&fcf_pri
->list
);
19572 spin_unlock_irq(&phba
->hbalock
);
19573 clear_bit(fcf_index
, phba
->fcf
.fcf_rr_bmask
);
19575 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
19576 "2791 Clear FCF (x%x) from roundrobin failover "
19577 "bmask\n", fcf_index
);
19581 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
19582 * @phba: pointer to lpfc hba data structure.
19583 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
19585 * This routine is the completion routine for the rediscover FCF table mailbox
19586 * command. If the mailbox command returned failure, it will try to stop the
19587 * FCF rediscover wait timer.
19590 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mbox
)
19592 struct lpfc_mbx_redisc_fcf_tbl
*redisc_fcf
;
19593 uint32_t shdr_status
, shdr_add_status
;
19595 redisc_fcf
= &mbox
->u
.mqe
.un
.redisc_fcf_tbl
;
19597 shdr_status
= bf_get(lpfc_mbox_hdr_status
,
19598 &redisc_fcf
->header
.cfg_shdr
.response
);
19599 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
,
19600 &redisc_fcf
->header
.cfg_shdr
.response
);
19601 if (shdr_status
|| shdr_add_status
) {
19602 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
19603 "2746 Requesting for FCF rediscovery failed "
19604 "status x%x add_status x%x\n",
19605 shdr_status
, shdr_add_status
);
19606 if (phba
->fcf
.fcf_flag
& FCF_ACVL_DISC
) {
19607 spin_lock_irq(&phba
->hbalock
);
19608 phba
->fcf
.fcf_flag
&= ~FCF_ACVL_DISC
;
19609 spin_unlock_irq(&phba
->hbalock
);
19611 * CVL event triggered FCF rediscover request failed,
19612 * last resort to re-try current registered FCF entry.
19614 lpfc_retry_pport_discovery(phba
);
19616 spin_lock_irq(&phba
->hbalock
);
19617 phba
->fcf
.fcf_flag
&= ~FCF_DEAD_DISC
;
19618 spin_unlock_irq(&phba
->hbalock
);
19620 * DEAD FCF event triggered FCF rediscover request
19621 * failed, last resort to fail over as a link down
19622 * to FCF registration.
19624 lpfc_sli4_fcf_dead_failthrough(phba
);
19627 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
19628 "2775 Start FCF rediscover quiescent timer\n");
19630 * Start FCF rediscovery wait timer for pending FCF
19631 * before rescan FCF record table.
19633 lpfc_fcf_redisc_wait_start_timer(phba
);
19636 mempool_free(mbox
, phba
->mbox_mem_pool
);
19640 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
19641 * @phba: pointer to lpfc hba data structure.
19643 * This routine is invoked to request for rediscovery of the entire FCF table
19647 lpfc_sli4_redisc_fcf_table(struct lpfc_hba
*phba
)
19649 LPFC_MBOXQ_t
*mbox
;
19650 struct lpfc_mbx_redisc_fcf_tbl
*redisc_fcf
;
19653 /* Cancel retry delay timers to all vports before FCF rediscover */
19654 lpfc_cancel_all_vport_retry_delay_timer(phba
);
19656 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
19658 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
19659 "2745 Failed to allocate mbox for "
19660 "requesting FCF rediscover.\n");
19664 length
= (sizeof(struct lpfc_mbx_redisc_fcf_tbl
) -
19665 sizeof(struct lpfc_sli4_cfg_mhdr
));
19666 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
19667 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF
,
19668 length
, LPFC_SLI4_MBX_EMBED
);
19670 redisc_fcf
= &mbox
->u
.mqe
.un
.redisc_fcf_tbl
;
19671 /* Set count to 0 for invalidating the entire FCF database */
19672 bf_set(lpfc_mbx_redisc_fcf_count
, redisc_fcf
, 0);
19674 /* Issue the mailbox command asynchronously */
19675 mbox
->vport
= phba
->pport
;
19676 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_redisc_fcf_table
;
19677 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
19679 if (rc
== MBX_NOT_FINISHED
) {
19680 mempool_free(mbox
, phba
->mbox_mem_pool
);
19687 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
19688 * @phba: pointer to lpfc hba data structure.
19690 * This function is the failover routine as a last resort to the FCF DEAD
19691 * event when driver failed to perform fast FCF failover.
19694 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba
*phba
)
19696 uint32_t link_state
;
19699 * Last resort as FCF DEAD event failover will treat this as
19700 * a link down, but save the link state because we don't want
19701 * it to be changed to Link Down unless it is already down.
19703 link_state
= phba
->link_state
;
19704 lpfc_linkdown(phba
);
19705 phba
->link_state
= link_state
;
19707 /* Unregister FCF if no devices connected to it */
19708 lpfc_unregister_unused_fcf(phba
);
19712 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
19713 * @phba: pointer to lpfc hba data structure.
19714 * @rgn23_data: pointer to configure region 23 data.
19716 * This function gets SLI3 port configure region 23 data through memory dump
19717 * mailbox command. When it successfully retrieves data, the size of the data
19718 * will be returned, otherwise, 0 will be returned.
19721 lpfc_sli_get_config_region23(struct lpfc_hba
*phba
, char *rgn23_data
)
19723 LPFC_MBOXQ_t
*pmb
= NULL
;
19725 uint32_t offset
= 0;
19731 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
19733 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
19734 "2600 failed to allocate mailbox memory\n");
19740 lpfc_dump_mem(phba
, pmb
, offset
, DMP_REGION_23
);
19741 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
19743 if (rc
!= MBX_SUCCESS
) {
19744 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
19745 "2601 failed to read config "
19746 "region 23, rc 0x%x Status 0x%x\n",
19747 rc
, mb
->mbxStatus
);
19748 mb
->un
.varDmp
.word_cnt
= 0;
19751 * dump mem may return a zero when finished or we got a
19752 * mailbox error, either way we are done.
19754 if (mb
->un
.varDmp
.word_cnt
== 0)
19757 i
= mb
->un
.varDmp
.word_cnt
* sizeof(uint32_t);
19758 if (offset
+ i
> DMP_RGN23_SIZE
)
19759 i
= DMP_RGN23_SIZE
- offset
;
19760 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
19761 rgn23_data
+ offset
, i
);
19763 } while (offset
< DMP_RGN23_SIZE
);
19765 mempool_free(pmb
, phba
->mbox_mem_pool
);
19770 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
19771 * @phba: pointer to lpfc hba data structure.
19772 * @rgn23_data: pointer to configure region 23 data.
19774 * This function gets SLI4 port configure region 23 data through memory dump
19775 * mailbox command. When it successfully retrieves data, the size of the data
19776 * will be returned, otherwise, 0 will be returned.
19779 lpfc_sli4_get_config_region23(struct lpfc_hba
*phba
, char *rgn23_data
)
19781 LPFC_MBOXQ_t
*mboxq
= NULL
;
19782 struct lpfc_dmabuf
*mp
= NULL
;
19783 struct lpfc_mqe
*mqe
;
19784 uint32_t data_length
= 0;
19790 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
19792 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
19793 "3105 failed to allocate mailbox memory\n");
19797 if (lpfc_sli4_dump_cfg_rg23(phba
, mboxq
))
19799 mqe
= &mboxq
->u
.mqe
;
19800 mp
= (struct lpfc_dmabuf
*)mboxq
->ctx_buf
;
19801 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
19804 data_length
= mqe
->un
.mb_words
[5];
19805 if (data_length
== 0)
19807 if (data_length
> DMP_RGN23_SIZE
) {
19811 lpfc_sli_pcimem_bcopy((char *)mp
->virt
, rgn23_data
, data_length
);
19813 mempool_free(mboxq
, phba
->mbox_mem_pool
);
19815 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
19818 return data_length
;
19822 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
19823 * @phba: pointer to lpfc hba data structure.
19825 * This function read region 23 and parse TLV for port status to
19826 * decide if the user disaled the port. If the TLV indicates the
19827 * port is disabled, the hba_flag is set accordingly.
19830 lpfc_sli_read_link_ste(struct lpfc_hba
*phba
)
19832 uint8_t *rgn23_data
= NULL
;
19833 uint32_t if_type
, data_size
, sub_tlv_len
, tlv_offset
;
19834 uint32_t offset
= 0;
19836 /* Get adapter Region 23 data */
19837 rgn23_data
= kzalloc(DMP_RGN23_SIZE
, GFP_KERNEL
);
19841 if (phba
->sli_rev
< LPFC_SLI_REV4
)
19842 data_size
= lpfc_sli_get_config_region23(phba
, rgn23_data
);
19844 if_type
= bf_get(lpfc_sli_intf_if_type
,
19845 &phba
->sli4_hba
.sli_intf
);
19846 if (if_type
== LPFC_SLI_INTF_IF_TYPE_0
)
19848 data_size
= lpfc_sli4_get_config_region23(phba
, rgn23_data
);
19854 /* Check the region signature first */
19855 if (memcmp(&rgn23_data
[offset
], LPFC_REGION23_SIGNATURE
, 4)) {
19856 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
19857 "2619 Config region 23 has bad signature\n");
19862 /* Check the data structure version */
19863 if (rgn23_data
[offset
] != LPFC_REGION23_VERSION
) {
19864 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
19865 "2620 Config region 23 has bad version\n");
19870 /* Parse TLV entries in the region */
19871 while (offset
< data_size
) {
19872 if (rgn23_data
[offset
] == LPFC_REGION23_LAST_REC
)
19875 * If the TLV is not driver specific TLV or driver id is
19876 * not linux driver id, skip the record.
19878 if ((rgn23_data
[offset
] != DRIVER_SPECIFIC_TYPE
) ||
19879 (rgn23_data
[offset
+ 2] != LINUX_DRIVER_ID
) ||
19880 (rgn23_data
[offset
+ 3] != 0)) {
19881 offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
19885 /* Driver found a driver specific TLV in the config region */
19886 sub_tlv_len
= rgn23_data
[offset
+ 1] * 4;
19891 * Search for configured port state sub-TLV.
19893 while ((offset
< data_size
) &&
19894 (tlv_offset
< sub_tlv_len
)) {
19895 if (rgn23_data
[offset
] == LPFC_REGION23_LAST_REC
) {
19900 if (rgn23_data
[offset
] != PORT_STE_TYPE
) {
19901 offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
19902 tlv_offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
19906 /* This HBA contains PORT_STE configured */
19907 if (!rgn23_data
[offset
+ 2])
19908 phba
->hba_flag
|= LINK_DISABLED
;
19920 * lpfc_wr_object - write an object to the firmware
19921 * @phba: HBA structure that indicates port to create a queue on.
19922 * @dmabuf_list: list of dmabufs to write to the port.
19923 * @size: the total byte value of the objects to write to the port.
19924 * @offset: the current offset to be used to start the transfer.
19926 * This routine will create a wr_object mailbox command to send to the port.
19927 * the mailbox command will be constructed using the dma buffers described in
19928 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
19929 * BDEs that the imbedded mailbox can support. The @offset variable will be
19930 * used to indicate the starting offset of the transfer and will also return
19931 * the offset after the write object mailbox has completed. @size is used to
19932 * determine the end of the object and whether the eof bit should be set.
19934 * Return 0 is successful and offset will contain the the new offset to use
19935 * for the next write.
19936 * Return negative value for error cases.
19939 lpfc_wr_object(struct lpfc_hba
*phba
, struct list_head
*dmabuf_list
,
19940 uint32_t size
, uint32_t *offset
)
19942 struct lpfc_mbx_wr_object
*wr_object
;
19943 LPFC_MBOXQ_t
*mbox
;
19945 uint32_t shdr_status
, shdr_add_status
, shdr_change_status
, shdr_csf
;
19947 struct lpfc_dmabuf
*dmabuf
;
19948 uint32_t written
= 0;
19949 bool check_change_status
= false;
19951 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
19955 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
19956 LPFC_MBOX_OPCODE_WRITE_OBJECT
,
19957 sizeof(struct lpfc_mbx_wr_object
) -
19958 sizeof(struct lpfc_sli4_cfg_mhdr
), LPFC_SLI4_MBX_EMBED
);
19960 wr_object
= (struct lpfc_mbx_wr_object
*)&mbox
->u
.mqe
.un
.wr_object
;
19961 wr_object
->u
.request
.write_offset
= *offset
;
19962 sprintf((uint8_t *)wr_object
->u
.request
.object_name
, "/");
19963 wr_object
->u
.request
.object_name
[0] =
19964 cpu_to_le32(wr_object
->u
.request
.object_name
[0]);
19965 bf_set(lpfc_wr_object_eof
, &wr_object
->u
.request
, 0);
19966 list_for_each_entry(dmabuf
, dmabuf_list
, list
) {
19967 if (i
>= LPFC_MBX_WR_CONFIG_MAX_BDE
|| written
>= size
)
19969 wr_object
->u
.request
.bde
[i
].addrLow
= putPaddrLow(dmabuf
->phys
);
19970 wr_object
->u
.request
.bde
[i
].addrHigh
=
19971 putPaddrHigh(dmabuf
->phys
);
19972 if (written
+ SLI4_PAGE_SIZE
>= size
) {
19973 wr_object
->u
.request
.bde
[i
].tus
.f
.bdeSize
=
19975 written
+= (size
- written
);
19976 bf_set(lpfc_wr_object_eof
, &wr_object
->u
.request
, 1);
19977 bf_set(lpfc_wr_object_eas
, &wr_object
->u
.request
, 1);
19978 check_change_status
= true;
19980 wr_object
->u
.request
.bde
[i
].tus
.f
.bdeSize
=
19982 written
+= SLI4_PAGE_SIZE
;
19986 wr_object
->u
.request
.bde_count
= i
;
19987 bf_set(lpfc_wr_object_write_length
, &wr_object
->u
.request
, written
);
19988 if (!phba
->sli4_hba
.intr_enable
)
19989 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
19991 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
19992 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
19994 /* The IOCTL status is embedded in the mailbox subheader. */
19995 shdr_status
= bf_get(lpfc_mbox_hdr_status
,
19996 &wr_object
->header
.cfg_shdr
.response
);
19997 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
,
19998 &wr_object
->header
.cfg_shdr
.response
);
19999 if (check_change_status
) {
20000 shdr_change_status
= bf_get(lpfc_wr_object_change_status
,
20001 &wr_object
->u
.response
);
20003 if (shdr_change_status
== LPFC_CHANGE_STATUS_FW_RESET
||
20004 shdr_change_status
== LPFC_CHANGE_STATUS_PORT_MIGRATION
) {
20005 shdr_csf
= bf_get(lpfc_wr_object_csf
,
20006 &wr_object
->u
.response
);
20008 shdr_change_status
=
20009 LPFC_CHANGE_STATUS_PCI_RESET
;
20012 switch (shdr_change_status
) {
20013 case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET
):
20014 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
20015 "3198 Firmware write complete: System "
20016 "reboot required to instantiate\n");
20018 case (LPFC_CHANGE_STATUS_FW_RESET
):
20019 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
20020 "3199 Firmware write complete: Firmware"
20021 " reset required to instantiate\n");
20023 case (LPFC_CHANGE_STATUS_PORT_MIGRATION
):
20024 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
20025 "3200 Firmware write complete: Port "
20026 "Migration or PCI Reset required to "
20029 case (LPFC_CHANGE_STATUS_PCI_RESET
):
20030 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
20031 "3201 Firmware write complete: PCI "
20032 "Reset required to instantiate\n");
20038 if (rc
!= MBX_TIMEOUT
)
20039 mempool_free(mbox
, phba
->mbox_mem_pool
);
20040 if (shdr_status
|| shdr_add_status
|| rc
) {
20041 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
20042 "3025 Write Object mailbox failed with "
20043 "status x%x add_status x%x, mbx status x%x\n",
20044 shdr_status
, shdr_add_status
, rc
);
20046 *offset
= shdr_add_status
;
20048 *offset
+= wr_object
->u
.response
.actual_write_length
;
20053 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
20054 * @vport: pointer to vport data structure.
20056 * This function iterate through the mailboxq and clean up all REG_LOGIN
20057 * and REG_VPI mailbox commands associated with the vport. This function
20058 * is called when driver want to restart discovery of the vport due to
20059 * a Clear Virtual Link event.
20062 lpfc_cleanup_pending_mbox(struct lpfc_vport
*vport
)
20064 struct lpfc_hba
*phba
= vport
->phba
;
20065 LPFC_MBOXQ_t
*mb
, *nextmb
;
20066 struct lpfc_dmabuf
*mp
;
20067 struct lpfc_nodelist
*ndlp
;
20068 struct lpfc_nodelist
*act_mbx_ndlp
= NULL
;
20069 LIST_HEAD(mbox_cmd_list
);
20070 uint8_t restart_loop
;
20072 /* Clean up internally queued mailbox commands with the vport */
20073 spin_lock_irq(&phba
->hbalock
);
20074 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
20075 if (mb
->vport
!= vport
)
20078 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) &&
20079 (mb
->u
.mb
.mbxCommand
!= MBX_REG_VPI
))
20082 list_del(&mb
->list
);
20083 list_add_tail(&mb
->list
, &mbox_cmd_list
);
20085 /* Clean up active mailbox command with the vport */
20086 mb
= phba
->sli
.mbox_active
;
20087 if (mb
&& (mb
->vport
== vport
)) {
20088 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) ||
20089 (mb
->u
.mb
.mbxCommand
== MBX_REG_VPI
))
20090 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
20091 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
20092 act_mbx_ndlp
= (struct lpfc_nodelist
*)mb
->ctx_ndlp
;
20093 /* Put reference count for delayed processing */
20094 act_mbx_ndlp
= lpfc_nlp_get(act_mbx_ndlp
);
20095 /* Unregister the RPI when mailbox complete */
20096 mb
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
20099 /* Cleanup any mailbox completions which are not yet processed */
20102 list_for_each_entry(mb
, &phba
->sli
.mboxq_cmpl
, list
) {
20104 * If this mailox is already processed or it is
20105 * for another vport ignore it.
20107 if ((mb
->vport
!= vport
) ||
20108 (mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
))
20111 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) &&
20112 (mb
->u
.mb
.mbxCommand
!= MBX_REG_VPI
))
20115 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
20116 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
20117 ndlp
= (struct lpfc_nodelist
*)mb
->ctx_ndlp
;
20118 /* Unregister the RPI when mailbox complete */
20119 mb
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
20121 spin_unlock_irq(&phba
->hbalock
);
20122 spin_lock(&ndlp
->lock
);
20123 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
20124 spin_unlock(&ndlp
->lock
);
20125 spin_lock_irq(&phba
->hbalock
);
20129 } while (restart_loop
);
20131 spin_unlock_irq(&phba
->hbalock
);
20133 /* Release the cleaned-up mailbox commands */
20134 while (!list_empty(&mbox_cmd_list
)) {
20135 list_remove_head(&mbox_cmd_list
, mb
, LPFC_MBOXQ_t
, list
);
20136 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
20137 mp
= (struct lpfc_dmabuf
*)(mb
->ctx_buf
);
20139 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
20142 mb
->ctx_buf
= NULL
;
20143 ndlp
= (struct lpfc_nodelist
*)mb
->ctx_ndlp
;
20144 mb
->ctx_ndlp
= NULL
;
20146 spin_lock(&ndlp
->lock
);
20147 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
20148 spin_unlock(&ndlp
->lock
);
20149 lpfc_nlp_put(ndlp
);
20152 mempool_free(mb
, phba
->mbox_mem_pool
);
20155 /* Release the ndlp with the cleaned-up active mailbox command */
20156 if (act_mbx_ndlp
) {
20157 spin_lock(&act_mbx_ndlp
->lock
);
20158 act_mbx_ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
20159 spin_unlock(&act_mbx_ndlp
->lock
);
20160 lpfc_nlp_put(act_mbx_ndlp
);
20165 * lpfc_drain_txq - Drain the txq
20166 * @phba: Pointer to HBA context object.
20168 * This function attempt to submit IOCBs on the txq
20169 * to the adapter. For SLI4 adapters, the txq contains
20170 * ELS IOCBs that have been deferred because the there
20171 * are no SGLs. This congestion can occur with large
20172 * vport counts during node discovery.
20176 lpfc_drain_txq(struct lpfc_hba
*phba
)
20178 LIST_HEAD(completions
);
20179 struct lpfc_sli_ring
*pring
;
20180 struct lpfc_iocbq
*piocbq
= NULL
;
20181 unsigned long iflags
= 0;
20182 char *fail_msg
= NULL
;
20183 struct lpfc_sglq
*sglq
;
20184 union lpfc_wqe128 wqe
;
20185 uint32_t txq_cnt
= 0;
20186 struct lpfc_queue
*wq
;
20188 if (phba
->link_flag
& LS_MDS_LOOPBACK
) {
20189 /* MDS WQE are posted only to first WQ*/
20190 wq
= phba
->sli4_hba
.hdwq
[0].io_wq
;
20195 wq
= phba
->sli4_hba
.els_wq
;
20198 pring
= lpfc_phba_elsring(phba
);
20201 if (unlikely(!pring
) || list_empty(&pring
->txq
))
20204 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
20205 list_for_each_entry(piocbq
, &pring
->txq
, list
) {
20209 if (txq_cnt
> pring
->txq_max
)
20210 pring
->txq_max
= txq_cnt
;
20212 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
20214 while (!list_empty(&pring
->txq
)) {
20215 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
20217 piocbq
= lpfc_sli_ringtx_get(phba
, pring
);
20219 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
20220 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
20221 "2823 txq empty and txq_cnt is %d\n ",
20225 sglq
= __lpfc_sli_get_els_sglq(phba
, piocbq
);
20227 __lpfc_sli_ringtx_put(phba
, pring
, piocbq
);
20228 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
20233 /* The xri and iocb resources secured,
20234 * attempt to issue request
20236 piocbq
->sli4_lxritag
= sglq
->sli4_lxritag
;
20237 piocbq
->sli4_xritag
= sglq
->sli4_xritag
;
20238 if (NO_XRI
== lpfc_sli4_bpl2sgl(phba
, piocbq
, sglq
))
20239 fail_msg
= "to convert bpl to sgl";
20240 else if (lpfc_sli4_iocb2wqe(phba
, piocbq
, &wqe
))
20241 fail_msg
= "to convert iocb to wqe";
20242 else if (lpfc_sli4_wq_put(wq
, &wqe
))
20243 fail_msg
= " - Wq is full";
20245 lpfc_sli_ringtxcmpl_put(phba
, pring
, piocbq
);
20248 /* Failed means we can't issue and need to cancel */
20249 lpfc_printf_log(phba
, KERN_ERR
, LOG_TRACE_EVENT
,
20250 "2822 IOCB failed %s iotag 0x%x "
20253 piocbq
->iotag
, piocbq
->sli4_xritag
);
20254 list_add_tail(&piocbq
->list
, &completions
);
20256 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
20259 /* Cancel all the IOCBs that cannot be issued */
20260 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
20261 IOERR_SLI_ABORTED
);
20267 * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
20268 * @phba: Pointer to HBA context object.
20269 * @pwqeq: Pointer to command WQE.
20270 * @sglq: Pointer to the scatter gather queue object.
20272 * This routine converts the bpl or bde that is in the WQE
20273 * to a sgl list for the sli4 hardware. The physical address
20274 * of the bpl/bde is converted back to a virtual address.
20275 * If the WQE contains a BPL then the list of BDE's is
20276 * converted to sli4_sge's. If the WQE contains a single
20277 * BDE then it is converted to a single sli_sge.
20278 * The WQE is still in cpu endianness so the contents of
20279 * the bpl can be used without byte swapping.
20281 * Returns valid XRI = Success, NO_XRI = Failure.
20284 lpfc_wqe_bpl2sgl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*pwqeq
,
20285 struct lpfc_sglq
*sglq
)
20287 uint16_t xritag
= NO_XRI
;
20288 struct ulp_bde64
*bpl
= NULL
;
20289 struct ulp_bde64 bde
;
20290 struct sli4_sge
*sgl
= NULL
;
20291 struct lpfc_dmabuf
*dmabuf
;
20292 union lpfc_wqe128
*wqe
;
20295 uint32_t offset
= 0; /* accumulated offset in the sg request list */
20296 int inbound
= 0; /* number of sg reply entries inbound from firmware */
20299 if (!pwqeq
|| !sglq
)
20302 sgl
= (struct sli4_sge
*)sglq
->sgl
;
20304 pwqeq
->iocb
.ulpIoTag
= pwqeq
->iotag
;
20306 cmd
= bf_get(wqe_cmnd
, &wqe
->generic
.wqe_com
);
20307 if (cmd
== CMD_XMIT_BLS_RSP64_WQE
)
20308 return sglq
->sli4_xritag
;
20309 numBdes
= pwqeq
->rsvd2
;
20311 /* The addrHigh and addrLow fields within the WQE
20312 * have not been byteswapped yet so there is no
20313 * need to swap them back.
20315 if (pwqeq
->context3
)
20316 dmabuf
= (struct lpfc_dmabuf
*)pwqeq
->context3
;
20320 bpl
= (struct ulp_bde64
*)dmabuf
->virt
;
20324 for (i
= 0; i
< numBdes
; i
++) {
20325 /* Should already be byte swapped. */
20326 sgl
->addr_hi
= bpl
->addrHigh
;
20327 sgl
->addr_lo
= bpl
->addrLow
;
20329 sgl
->word2
= le32_to_cpu(sgl
->word2
);
20330 if ((i
+1) == numBdes
)
20331 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
20333 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
20334 /* swap the size field back to the cpu so we
20335 * can assign it to the sgl.
20337 bde
.tus
.w
= le32_to_cpu(bpl
->tus
.w
);
20338 sgl
->sge_len
= cpu_to_le32(bde
.tus
.f
.bdeSize
);
20339 /* The offsets in the sgl need to be accumulated
20340 * separately for the request and reply lists.
20341 * The request is always first, the reply follows.
20344 case CMD_GEN_REQUEST64_WQE
:
20345 /* add up the reply sg entries */
20346 if (bpl
->tus
.f
.bdeFlags
== BUFF_TYPE_BDE_64I
)
20348 /* first inbound? reset the offset */
20351 bf_set(lpfc_sli4_sge_offset
, sgl
, offset
);
20352 bf_set(lpfc_sli4_sge_type
, sgl
,
20353 LPFC_SGE_TYPE_DATA
);
20354 offset
+= bde
.tus
.f
.bdeSize
;
20356 case CMD_FCP_TRSP64_WQE
:
20357 bf_set(lpfc_sli4_sge_offset
, sgl
, 0);
20358 bf_set(lpfc_sli4_sge_type
, sgl
,
20359 LPFC_SGE_TYPE_DATA
);
20361 case CMD_FCP_TSEND64_WQE
:
20362 case CMD_FCP_TRECEIVE64_WQE
:
20363 bf_set(lpfc_sli4_sge_type
, sgl
,
20364 bpl
->tus
.f
.bdeFlags
);
20368 offset
+= bde
.tus
.f
.bdeSize
;
20369 bf_set(lpfc_sli4_sge_offset
, sgl
, offset
);
20372 sgl
->word2
= cpu_to_le32(sgl
->word2
);
20376 } else if (wqe
->gen_req
.bde
.tus
.f
.bdeFlags
== BUFF_TYPE_BDE_64
) {
20377 /* The addrHigh and addrLow fields of the BDE have not
20378 * been byteswapped yet so they need to be swapped
20379 * before putting them in the sgl.
20381 sgl
->addr_hi
= cpu_to_le32(wqe
->gen_req
.bde
.addrHigh
);
20382 sgl
->addr_lo
= cpu_to_le32(wqe
->gen_req
.bde
.addrLow
);
20383 sgl
->word2
= le32_to_cpu(sgl
->word2
);
20384 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
20385 sgl
->word2
= cpu_to_le32(sgl
->word2
);
20386 sgl
->sge_len
= cpu_to_le32(wqe
->gen_req
.bde
.tus
.f
.bdeSize
);
20388 return sglq
->sli4_xritag
;
20392 * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
20393 * @phba: Pointer to HBA context object.
20394 * @qp: Pointer to HDW queue.
20395 * @pwqe: Pointer to command WQE.
20398 lpfc_sli4_issue_wqe(struct lpfc_hba
*phba
, struct lpfc_sli4_hdw_queue
*qp
,
20399 struct lpfc_iocbq
*pwqe
)
20401 union lpfc_wqe128
*wqe
= &pwqe
->wqe
;
20402 struct lpfc_async_xchg_ctx
*ctxp
;
20403 struct lpfc_queue
*wq
;
20404 struct lpfc_sglq
*sglq
;
20405 struct lpfc_sli_ring
*pring
;
20406 unsigned long iflags
;
20409 /* NVME_LS and NVME_LS ABTS requests. */
20410 if (pwqe
->iocb_flag
& LPFC_IO_NVME_LS
) {
20411 pring
= phba
->sli4_hba
.nvmels_wq
->pring
;
20412 lpfc_qp_spin_lock_irqsave(&pring
->ring_lock
, iflags
,
20414 sglq
= __lpfc_sli_get_els_sglq(phba
, pwqe
);
20416 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
20419 pwqe
->sli4_lxritag
= sglq
->sli4_lxritag
;
20420 pwqe
->sli4_xritag
= sglq
->sli4_xritag
;
20421 if (lpfc_wqe_bpl2sgl(phba
, pwqe
, sglq
) == NO_XRI
) {
20422 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
20425 bf_set(wqe_xri_tag
, &pwqe
->wqe
.xmit_bls_rsp
.wqe_com
,
20426 pwqe
->sli4_xritag
);
20427 ret
= lpfc_sli4_wq_put(phba
->sli4_hba
.nvmels_wq
, wqe
);
20429 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
20433 lpfc_sli_ringtxcmpl_put(phba
, pring
, pwqe
);
20434 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
20436 lpfc_sli4_poll_eq(qp
->hba_eq
, LPFC_POLL_FASTPATH
);
20440 /* NVME_FCREQ and NVME_ABTS requests */
20441 if (pwqe
->iocb_flag
& LPFC_IO_NVME
||
20442 pwqe
->iocb_flag
& LPFC_IO_FCP
) {
20443 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
20447 bf_set(wqe_cqid
, &wqe
->generic
.wqe_com
, qp
->io_cq_map
);
20449 lpfc_qp_spin_lock_irqsave(&pring
->ring_lock
, iflags
,
20451 ret
= lpfc_sli4_wq_put(wq
, wqe
);
20453 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
20456 lpfc_sli_ringtxcmpl_put(phba
, pring
, pwqe
);
20457 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
20459 lpfc_sli4_poll_eq(qp
->hba_eq
, LPFC_POLL_FASTPATH
);
20463 /* NVMET requests */
20464 if (pwqe
->iocb_flag
& LPFC_IO_NVMET
) {
20465 /* Get the IO distribution (hba_wqidx) for WQ assignment. */
20469 ctxp
= pwqe
->context2
;
20470 sglq
= ctxp
->ctxbuf
->sglq
;
20471 if (pwqe
->sli4_xritag
== NO_XRI
) {
20472 pwqe
->sli4_lxritag
= sglq
->sli4_lxritag
;
20473 pwqe
->sli4_xritag
= sglq
->sli4_xritag
;
20475 bf_set(wqe_xri_tag
, &pwqe
->wqe
.xmit_bls_rsp
.wqe_com
,
20476 pwqe
->sli4_xritag
);
20477 bf_set(wqe_cqid
, &wqe
->generic
.wqe_com
, qp
->io_cq_map
);
20479 lpfc_qp_spin_lock_irqsave(&pring
->ring_lock
, iflags
,
20481 ret
= lpfc_sli4_wq_put(wq
, wqe
);
20483 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
20486 lpfc_sli_ringtxcmpl_put(phba
, pring
, pwqe
);
20487 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
20489 lpfc_sli4_poll_eq(qp
->hba_eq
, LPFC_POLL_FASTPATH
);
20496 * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
20497 * @phba: Pointer to HBA context object.
20498 * @cmdiocb: Pointer to driver command iocb object.
20499 * @cmpl: completion function.
20501 * Fill the appropriate fields for the abort WQE and call
20502 * internal routine lpfc_sli4_issue_wqe to send the WQE
20503 * This function is called with hbalock held and no ring_lock held.
20505 * RETURNS 0 - SUCCESS
20509 lpfc_sli4_issue_abort_iotag(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
20512 struct lpfc_vport
*vport
= cmdiocb
->vport
;
20513 struct lpfc_iocbq
*abtsiocb
= NULL
;
20514 union lpfc_wqe128
*abtswqe
;
20515 struct lpfc_io_buf
*lpfc_cmd
;
20516 int retval
= IOCB_ERROR
;
20517 u16 xritag
= cmdiocb
->sli4_xritag
;
20520 * The scsi command can not be in txq and it is in flight because the
20521 * pCmd is still pointing at the SCSI command we have to abort. There
20522 * is no need to search the txcmplq. Just send an abort to the FW.
20525 abtsiocb
= __lpfc_sli_get_iocbq(phba
);
20527 return WQE_NORESOURCE
;
20529 /* Indicate the IO is being aborted by the driver. */
20530 cmdiocb
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
20532 abtswqe
= &abtsiocb
->wqe
;
20533 memset(abtswqe
, 0, sizeof(*abtswqe
));
20535 if (lpfc_is_link_up(phba
))
20536 bf_set(abort_cmd_ia
, &abtswqe
->abort_cmd
, 1);
20538 bf_set(abort_cmd_ia
, &abtswqe
->abort_cmd
, 0);
20539 bf_set(abort_cmd_criteria
, &abtswqe
->abort_cmd
, T_XRI_TAG
);
20540 abtswqe
->abort_cmd
.rsrvd5
= 0;
20541 abtswqe
->abort_cmd
.wqe_com
.abort_tag
= xritag
;
20542 bf_set(wqe_reqtag
, &abtswqe
->abort_cmd
.wqe_com
, abtsiocb
->iotag
);
20543 bf_set(wqe_cmnd
, &abtswqe
->abort_cmd
.wqe_com
, CMD_ABORT_XRI_CX
);
20544 bf_set(wqe_xri_tag
, &abtswqe
->generic
.wqe_com
, 0);
20545 bf_set(wqe_qosd
, &abtswqe
->abort_cmd
.wqe_com
, 1);
20546 bf_set(wqe_lenloc
, &abtswqe
->abort_cmd
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
20547 bf_set(wqe_cmd_type
, &abtswqe
->abort_cmd
.wqe_com
, OTHER_COMMAND
);
20549 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
20550 abtsiocb
->hba_wqidx
= cmdiocb
->hba_wqidx
;
20551 abtsiocb
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
20552 if (cmdiocb
->iocb_flag
& LPFC_IO_FCP
)
20553 abtsiocb
->iocb_flag
|= LPFC_IO_FCP
;
20554 if (cmdiocb
->iocb_flag
& LPFC_IO_NVME
)
20555 abtsiocb
->iocb_flag
|= LPFC_IO_NVME
;
20556 if (cmdiocb
->iocb_flag
& LPFC_IO_FOF
)
20557 abtsiocb
->iocb_flag
|= LPFC_IO_FOF
;
20558 abtsiocb
->vport
= vport
;
20559 abtsiocb
->wqe_cmpl
= cmpl
;
20561 lpfc_cmd
= container_of(cmdiocb
, struct lpfc_io_buf
, cur_iocbq
);
20562 retval
= lpfc_sli4_issue_wqe(phba
, lpfc_cmd
->hdwq
, abtsiocb
);
20564 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
| LOG_NVME_ABTS
| LOG_FCP
,
20565 "0359 Abort xri x%x, original iotag x%x, "
20566 "abort cmd iotag x%x retval x%x\n",
20567 xritag
, cmdiocb
->iotag
, abtsiocb
->iotag
, retval
);
20570 cmdiocb
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
20571 __lpfc_sli_release_iocbq(phba
, abtsiocb
);
20577 #ifdef LPFC_MXP_STAT
20579 * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
20580 * @phba: pointer to lpfc hba data structure.
20581 * @hwqid: belong to which HWQ.
20583 * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
20584 * 15 seconds after a test case is running.
20586 * The user should call lpfc_debugfs_multixripools_write before running a test
20587 * case to clear stat_snapshot_taken. Then the user starts a test case. During
20588 * test case is running, stat_snapshot_taken is incremented by 1 every time when
20589 * this routine is called from heartbeat timer. When stat_snapshot_taken is
20590 * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
20592 void lpfc_snapshot_mxp(struct lpfc_hba
*phba
, u32 hwqid
)
20594 struct lpfc_sli4_hdw_queue
*qp
;
20595 struct lpfc_multixri_pool
*multixri_pool
;
20596 struct lpfc_pvt_pool
*pvt_pool
;
20597 struct lpfc_pbl_pool
*pbl_pool
;
20600 qp
= &phba
->sli4_hba
.hdwq
[hwqid
];
20601 multixri_pool
= qp
->p_multixri_pool
;
20602 if (!multixri_pool
)
20605 if (multixri_pool
->stat_snapshot_taken
== LPFC_MXP_SNAPSHOT_TAKEN
) {
20606 pvt_pool
= &qp
->p_multixri_pool
->pvt_pool
;
20607 pbl_pool
= &qp
->p_multixri_pool
->pbl_pool
;
20608 txcmplq_cnt
= qp
->io_wq
->pring
->txcmplq_cnt
;
20610 multixri_pool
->stat_pbl_count
= pbl_pool
->count
;
20611 multixri_pool
->stat_pvt_count
= pvt_pool
->count
;
20612 multixri_pool
->stat_busy_count
= txcmplq_cnt
;
20615 multixri_pool
->stat_snapshot_taken
++;
20620 * lpfc_adjust_pvt_pool_count - Adjust private pool count
20621 * @phba: pointer to lpfc hba data structure.
20622 * @hwqid: belong to which HWQ.
20624 * This routine moves some XRIs from private to public pool when private pool
20627 void lpfc_adjust_pvt_pool_count(struct lpfc_hba
*phba
, u32 hwqid
)
20629 struct lpfc_multixri_pool
*multixri_pool
;
20631 u32 prev_io_req_count
;
20633 multixri_pool
= phba
->sli4_hba
.hdwq
[hwqid
].p_multixri_pool
;
20634 if (!multixri_pool
)
20636 io_req_count
= multixri_pool
->io_req_count
;
20637 prev_io_req_count
= multixri_pool
->prev_io_req_count
;
20639 if (prev_io_req_count
!= io_req_count
) {
20640 /* Private pool is busy */
20641 multixri_pool
->prev_io_req_count
= io_req_count
;
20643 /* Private pool is not busy.
20644 * Move XRIs from private to public pool.
20646 lpfc_move_xri_pvt_to_pbl(phba
, hwqid
);
20651 * lpfc_adjust_high_watermark - Adjust high watermark
20652 * @phba: pointer to lpfc hba data structure.
20653 * @hwqid: belong to which HWQ.
20655 * This routine sets high watermark as number of outstanding XRIs,
20656 * but make sure the new value is between xri_limit/2 and xri_limit.
20658 void lpfc_adjust_high_watermark(struct lpfc_hba
*phba
, u32 hwqid
)
20666 struct lpfc_multixri_pool
*multixri_pool
;
20667 struct lpfc_sli4_hdw_queue
*qp
;
20669 qp
= &phba
->sli4_hba
.hdwq
[hwqid
];
20670 multixri_pool
= qp
->p_multixri_pool
;
20671 if (!multixri_pool
)
20673 xri_limit
= multixri_pool
->xri_limit
;
20675 watermark_max
= xri_limit
;
20676 watermark_min
= xri_limit
/ 2;
20678 txcmplq_cnt
= qp
->io_wq
->pring
->txcmplq_cnt
;
20679 abts_io_bufs
= qp
->abts_scsi_io_bufs
;
20680 abts_io_bufs
+= qp
->abts_nvme_io_bufs
;
20682 new_watermark
= txcmplq_cnt
+ abts_io_bufs
;
20683 new_watermark
= min(watermark_max
, new_watermark
);
20684 new_watermark
= max(watermark_min
, new_watermark
);
20685 multixri_pool
->pvt_pool
.high_watermark
= new_watermark
;
20687 #ifdef LPFC_MXP_STAT
20688 multixri_pool
->stat_max_hwm
= max(multixri_pool
->stat_max_hwm
,
20694 * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
20695 * @phba: pointer to lpfc hba data structure.
20696 * @hwqid: belong to which HWQ.
20698 * This routine is called from hearbeat timer when pvt_pool is idle.
20699 * All free XRIs are moved from private to public pool on hwqid with 2 steps.
20700 * The first step moves (all - low_watermark) amount of XRIs.
20701 * The second step moves the rest of XRIs.
20703 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba
*phba
, u32 hwqid
)
20705 struct lpfc_pbl_pool
*pbl_pool
;
20706 struct lpfc_pvt_pool
*pvt_pool
;
20707 struct lpfc_sli4_hdw_queue
*qp
;
20708 struct lpfc_io_buf
*lpfc_ncmd
;
20709 struct lpfc_io_buf
*lpfc_ncmd_next
;
20710 unsigned long iflag
;
20711 struct list_head tmp_list
;
20714 qp
= &phba
->sli4_hba
.hdwq
[hwqid
];
20715 pbl_pool
= &qp
->p_multixri_pool
->pbl_pool
;
20716 pvt_pool
= &qp
->p_multixri_pool
->pvt_pool
;
20719 lpfc_qp_spin_lock_irqsave(&pbl_pool
->lock
, iflag
, qp
, mv_to_pub_pool
);
20720 lpfc_qp_spin_lock(&pvt_pool
->lock
, qp
, mv_from_pvt_pool
);
20722 if (pvt_pool
->count
> pvt_pool
->low_watermark
) {
20723 /* Step 1: move (all - low_watermark) from pvt_pool
20727 /* Move low watermark of bufs from pvt_pool to tmp_list */
20728 INIT_LIST_HEAD(&tmp_list
);
20729 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
20730 &pvt_pool
->list
, list
) {
20731 list_move_tail(&lpfc_ncmd
->list
, &tmp_list
);
20733 if (tmp_count
>= pvt_pool
->low_watermark
)
20737 /* Move all bufs from pvt_pool to pbl_pool */
20738 list_splice_init(&pvt_pool
->list
, &pbl_pool
->list
);
20740 /* Move all bufs from tmp_list to pvt_pool */
20741 list_splice(&tmp_list
, &pvt_pool
->list
);
20743 pbl_pool
->count
+= (pvt_pool
->count
- tmp_count
);
20744 pvt_pool
->count
= tmp_count
;
20746 /* Step 2: move the rest from pvt_pool to pbl_pool */
20747 list_splice_init(&pvt_pool
->list
, &pbl_pool
->list
);
20748 pbl_pool
->count
+= pvt_pool
->count
;
20749 pvt_pool
->count
= 0;
20752 spin_unlock(&pvt_pool
->lock
);
20753 spin_unlock_irqrestore(&pbl_pool
->lock
, iflag
);
20757 * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20758 * @phba: pointer to lpfc hba data structure
20759 * @qp: pointer to HDW queue
20760 * @pbl_pool: specified public free XRI pool
20761 * @pvt_pool: specified private free XRI pool
20762 * @count: number of XRIs to move
20764 * This routine tries to move some free common bufs from the specified pbl_pool
20765 * to the specified pvt_pool. It might move less than count XRIs if there's not
20766 * enough in public pool.
20769 * true - if XRIs are successfully moved from the specified pbl_pool to the
20770 * specified pvt_pool
20771 * false - if the specified pbl_pool is empty or locked by someone else
20774 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba
*phba
, struct lpfc_sli4_hdw_queue
*qp
,
20775 struct lpfc_pbl_pool
*pbl_pool
,
20776 struct lpfc_pvt_pool
*pvt_pool
, u32 count
)
20778 struct lpfc_io_buf
*lpfc_ncmd
;
20779 struct lpfc_io_buf
*lpfc_ncmd_next
;
20780 unsigned long iflag
;
20783 ret
= spin_trylock_irqsave(&pbl_pool
->lock
, iflag
);
20785 if (pbl_pool
->count
) {
20786 /* Move a batch of XRIs from public to private pool */
20787 lpfc_qp_spin_lock(&pvt_pool
->lock
, qp
, mv_to_pvt_pool
);
20788 list_for_each_entry_safe(lpfc_ncmd
,
20792 list_move_tail(&lpfc_ncmd
->list
,
20801 spin_unlock(&pvt_pool
->lock
);
20802 spin_unlock_irqrestore(&pbl_pool
->lock
, iflag
);
20805 spin_unlock_irqrestore(&pbl_pool
->lock
, iflag
);
20812 * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
20813 * @phba: pointer to lpfc hba data structure.
20814 * @hwqid: belong to which HWQ.
20815 * @count: number of XRIs to move
20817 * This routine tries to find some free common bufs in one of public pools with
20818 * Round Robin method. The search always starts from local hwqid, then the next
20819 * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
20820 * a batch of free common bufs are moved to private pool on hwqid.
20821 * It might move less than count XRIs if there's not enough in public pool.
20823 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba
*phba
, u32 hwqid
, u32 count
)
20825 struct lpfc_multixri_pool
*multixri_pool
;
20826 struct lpfc_multixri_pool
*next_multixri_pool
;
20827 struct lpfc_pvt_pool
*pvt_pool
;
20828 struct lpfc_pbl_pool
*pbl_pool
;
20829 struct lpfc_sli4_hdw_queue
*qp
;
20834 qp
= &phba
->sli4_hba
.hdwq
[hwqid
];
20835 multixri_pool
= qp
->p_multixri_pool
;
20836 pvt_pool
= &multixri_pool
->pvt_pool
;
20837 pbl_pool
= &multixri_pool
->pbl_pool
;
20839 /* Check if local pbl_pool is available */
20840 ret
= _lpfc_move_xri_pbl_to_pvt(phba
, qp
, pbl_pool
, pvt_pool
, count
);
20842 #ifdef LPFC_MXP_STAT
20843 multixri_pool
->local_pbl_hit_count
++;
20848 hwq_count
= phba
->cfg_hdw_queue
;
20850 /* Get the next hwqid which was found last time */
20851 next_hwqid
= multixri_pool
->rrb_next_hwqid
;
20854 /* Go to next hwq */
20855 next_hwqid
= (next_hwqid
+ 1) % hwq_count
;
20857 next_multixri_pool
=
20858 phba
->sli4_hba
.hdwq
[next_hwqid
].p_multixri_pool
;
20859 pbl_pool
= &next_multixri_pool
->pbl_pool
;
20861 /* Check if the public free xri pool is available */
20862 ret
= _lpfc_move_xri_pbl_to_pvt(
20863 phba
, qp
, pbl_pool
, pvt_pool
, count
);
20865 /* Exit while-loop if success or all hwqid are checked */
20866 } while (!ret
&& next_hwqid
!= multixri_pool
->rrb_next_hwqid
);
20868 /* Starting point for the next time */
20869 multixri_pool
->rrb_next_hwqid
= next_hwqid
;
20872 /* stats: all public pools are empty*/
20873 multixri_pool
->pbl_empty_count
++;
20876 #ifdef LPFC_MXP_STAT
20878 if (next_hwqid
== hwqid
)
20879 multixri_pool
->local_pbl_hit_count
++;
20881 multixri_pool
->other_pbl_hit_count
++;
20887 * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
20888 * @phba: pointer to lpfc hba data structure.
20889 * @hwqid: belong to which HWQ.
20891 * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
20894 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba
*phba
, u32 hwqid
)
20896 struct lpfc_multixri_pool
*multixri_pool
;
20897 struct lpfc_pvt_pool
*pvt_pool
;
20899 multixri_pool
= phba
->sli4_hba
.hdwq
[hwqid
].p_multixri_pool
;
20900 pvt_pool
= &multixri_pool
->pvt_pool
;
20902 if (pvt_pool
->count
< pvt_pool
->low_watermark
)
20903 lpfc_move_xri_pbl_to_pvt(phba
, hwqid
, XRI_BATCH
);
20907 * lpfc_release_io_buf - Return one IO buf back to free pool
20908 * @phba: pointer to lpfc hba data structure.
20909 * @lpfc_ncmd: IO buf to be returned.
20910 * @qp: belong to which HWQ.
20912 * This routine returns one IO buf back to free pool. If this is an urgent IO,
20913 * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
20914 * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
20915 * xri_limit. If cfg_xri_rebalancing==0, the IO buf is returned to
20916 * lpfc_io_buf_list_put.
20918 void lpfc_release_io_buf(struct lpfc_hba
*phba
, struct lpfc_io_buf
*lpfc_ncmd
,
20919 struct lpfc_sli4_hdw_queue
*qp
)
20921 unsigned long iflag
;
20922 struct lpfc_pbl_pool
*pbl_pool
;
20923 struct lpfc_pvt_pool
*pvt_pool
;
20924 struct lpfc_epd_pool
*epd_pool
;
20930 /* MUST zero fields if buffer is reused by another protocol */
20931 lpfc_ncmd
->nvmeCmd
= NULL
;
20932 lpfc_ncmd
->cur_iocbq
.wqe_cmpl
= NULL
;
20933 lpfc_ncmd
->cur_iocbq
.iocb_cmpl
= NULL
;
20935 if (phba
->cfg_xpsgl
&& !phba
->nvmet_support
&&
20936 !list_empty(&lpfc_ncmd
->dma_sgl_xtra_list
))
20937 lpfc_put_sgl_per_hdwq(phba
, lpfc_ncmd
);
20939 if (!list_empty(&lpfc_ncmd
->dma_cmd_rsp_list
))
20940 lpfc_put_cmd_rsp_buf_per_hdwq(phba
, lpfc_ncmd
);
20942 if (phba
->cfg_xri_rebalancing
) {
20943 if (lpfc_ncmd
->expedite
) {
20944 /* Return to expedite pool */
20945 epd_pool
= &phba
->epd_pool
;
20946 spin_lock_irqsave(&epd_pool
->lock
, iflag
);
20947 list_add_tail(&lpfc_ncmd
->list
, &epd_pool
->list
);
20949 spin_unlock_irqrestore(&epd_pool
->lock
, iflag
);
20953 /* Avoid invalid access if an IO sneaks in and is being rejected
20954 * just _after_ xri pools are destroyed in lpfc_offline.
20955 * Nothing much can be done at this point.
20957 if (!qp
->p_multixri_pool
)
20960 pbl_pool
= &qp
->p_multixri_pool
->pbl_pool
;
20961 pvt_pool
= &qp
->p_multixri_pool
->pvt_pool
;
20963 txcmplq_cnt
= qp
->io_wq
->pring
->txcmplq_cnt
;
20964 abts_io_bufs
= qp
->abts_scsi_io_bufs
;
20965 abts_io_bufs
+= qp
->abts_nvme_io_bufs
;
20967 xri_owned
= pvt_pool
->count
+ txcmplq_cnt
+ abts_io_bufs
;
20968 xri_limit
= qp
->p_multixri_pool
->xri_limit
;
20970 #ifdef LPFC_MXP_STAT
20971 if (xri_owned
<= xri_limit
)
20972 qp
->p_multixri_pool
->below_limit_count
++;
20974 qp
->p_multixri_pool
->above_limit_count
++;
20977 /* XRI goes to either public or private free xri pool
20978 * based on watermark and xri_limit
20980 if ((pvt_pool
->count
< pvt_pool
->low_watermark
) ||
20981 (xri_owned
< xri_limit
&&
20982 pvt_pool
->count
< pvt_pool
->high_watermark
)) {
20983 lpfc_qp_spin_lock_irqsave(&pvt_pool
->lock
, iflag
,
20984 qp
, free_pvt_pool
);
20985 list_add_tail(&lpfc_ncmd
->list
,
20988 spin_unlock_irqrestore(&pvt_pool
->lock
, iflag
);
20990 lpfc_qp_spin_lock_irqsave(&pbl_pool
->lock
, iflag
,
20991 qp
, free_pub_pool
);
20992 list_add_tail(&lpfc_ncmd
->list
,
20995 spin_unlock_irqrestore(&pbl_pool
->lock
, iflag
);
20998 lpfc_qp_spin_lock_irqsave(&qp
->io_buf_list_put_lock
, iflag
,
21000 list_add_tail(&lpfc_ncmd
->list
,
21001 &qp
->lpfc_io_buf_list_put
);
21003 spin_unlock_irqrestore(&qp
->io_buf_list_put_lock
,
21009 * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
21010 * @phba: pointer to lpfc hba data structure.
21011 * @qp: pointer to HDW queue
21012 * @pvt_pool: pointer to private pool data structure.
21013 * @ndlp: pointer to lpfc nodelist data structure.
21015 * This routine tries to get one free IO buf from private pool.
21018 * pointer to one free IO buf - if private pool is not empty
21019 * NULL - if private pool is empty
21021 static struct lpfc_io_buf
*
21022 lpfc_get_io_buf_from_private_pool(struct lpfc_hba
*phba
,
21023 struct lpfc_sli4_hdw_queue
*qp
,
21024 struct lpfc_pvt_pool
*pvt_pool
,
21025 struct lpfc_nodelist
*ndlp
)
21027 struct lpfc_io_buf
*lpfc_ncmd
;
21028 struct lpfc_io_buf
*lpfc_ncmd_next
;
21029 unsigned long iflag
;
21031 lpfc_qp_spin_lock_irqsave(&pvt_pool
->lock
, iflag
, qp
, alloc_pvt_pool
);
21032 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
21033 &pvt_pool
->list
, list
) {
21034 if (lpfc_test_rrq_active(
21035 phba
, ndlp
, lpfc_ncmd
->cur_iocbq
.sli4_lxritag
))
21037 list_del(&lpfc_ncmd
->list
);
21039 spin_unlock_irqrestore(&pvt_pool
->lock
, iflag
);
21042 spin_unlock_irqrestore(&pvt_pool
->lock
, iflag
);
21048 * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
21049 * @phba: pointer to lpfc hba data structure.
21051 * This routine tries to get one free IO buf from expedite pool.
21054 * pointer to one free IO buf - if expedite pool is not empty
21055 * NULL - if expedite pool is empty
21057 static struct lpfc_io_buf
*
21058 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba
*phba
)
21060 struct lpfc_io_buf
*lpfc_ncmd
;
21061 struct lpfc_io_buf
*lpfc_ncmd_next
;
21062 unsigned long iflag
;
21063 struct lpfc_epd_pool
*epd_pool
;
21065 epd_pool
= &phba
->epd_pool
;
21068 spin_lock_irqsave(&epd_pool
->lock
, iflag
);
21069 if (epd_pool
->count
> 0) {
21070 list_for_each_entry_safe(lpfc_ncmd
, lpfc_ncmd_next
,
21071 &epd_pool
->list
, list
) {
21072 list_del(&lpfc_ncmd
->list
);
21077 spin_unlock_irqrestore(&epd_pool
->lock
, iflag
);
21083 * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
21084 * @phba: pointer to lpfc hba data structure.
21085 * @ndlp: pointer to lpfc nodelist data structure.
21086 * @hwqid: belong to which HWQ
21087 * @expedite: 1 means this request is urgent.
21089 * This routine will do the following actions and then return a pointer to
21092 * 1. If private free xri count is empty, move some XRIs from public to
21094 * 2. Get one XRI from private free xri pool.
21095 * 3. If we fail to get one from pvt_pool and this is an expedite request,
21096 * get one free xri from expedite pool.
21098 * Note: ndlp is only used on SCSI side for RRQ testing.
21099 * The caller should pass NULL for ndlp on NVME side.
21102 * pointer to one free IO buf - if private pool is not empty
21103 * NULL - if private pool is empty
21105 static struct lpfc_io_buf
*
21106 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba
*phba
,
21107 struct lpfc_nodelist
*ndlp
,
21108 int hwqid
, int expedite
)
21110 struct lpfc_sli4_hdw_queue
*qp
;
21111 struct lpfc_multixri_pool
*multixri_pool
;
21112 struct lpfc_pvt_pool
*pvt_pool
;
21113 struct lpfc_io_buf
*lpfc_ncmd
;
21115 qp
= &phba
->sli4_hba
.hdwq
[hwqid
];
21117 multixri_pool
= qp
->p_multixri_pool
;
21118 pvt_pool
= &multixri_pool
->pvt_pool
;
21119 multixri_pool
->io_req_count
++;
21121 /* If pvt_pool is empty, move some XRIs from public to private pool */
21122 if (pvt_pool
->count
== 0)
21123 lpfc_move_xri_pbl_to_pvt(phba
, hwqid
, XRI_BATCH
);
21125 /* Get one XRI from private free xri pool */
21126 lpfc_ncmd
= lpfc_get_io_buf_from_private_pool(phba
, qp
, pvt_pool
, ndlp
);
21129 lpfc_ncmd
->hdwq
= qp
;
21130 lpfc_ncmd
->hdwq_no
= hwqid
;
21131 } else if (expedite
) {
21132 /* If we fail to get one from pvt_pool and this is an expedite
21133 * request, get one free xri from expedite pool.
21135 lpfc_ncmd
= lpfc_get_io_buf_from_expedite_pool(phba
);
21141 static inline struct lpfc_io_buf
*
21142 lpfc_io_buf(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
, int idx
)
21144 struct lpfc_sli4_hdw_queue
*qp
;
21145 struct lpfc_io_buf
*lpfc_cmd
, *lpfc_cmd_next
;
21147 qp
= &phba
->sli4_hba
.hdwq
[idx
];
21148 list_for_each_entry_safe(lpfc_cmd
, lpfc_cmd_next
,
21149 &qp
->lpfc_io_buf_list_get
, list
) {
21150 if (lpfc_test_rrq_active(phba
, ndlp
,
21151 lpfc_cmd
->cur_iocbq
.sli4_lxritag
))
21154 if (lpfc_cmd
->flags
& LPFC_SBUF_NOT_POSTED
)
21157 list_del_init(&lpfc_cmd
->list
);
21159 lpfc_cmd
->hdwq
= qp
;
21160 lpfc_cmd
->hdwq_no
= idx
;
21167 * lpfc_get_io_buf - Get one IO buffer from free pool
21168 * @phba: The HBA for which this call is being executed.
21169 * @ndlp: pointer to lpfc nodelist data structure.
21170 * @hwqid: belong to which HWQ
21171 * @expedite: 1 means this request is urgent.
21173 * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
21174 * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
21175 * a IO buffer from head of @hdwq io_buf_list and returns to caller.
21177 * Note: ndlp is only used on SCSI side for RRQ testing.
21178 * The caller should pass NULL for ndlp on NVME side.
21182 * Pointer to lpfc_io_buf - Success
21184 struct lpfc_io_buf
*lpfc_get_io_buf(struct lpfc_hba
*phba
,
21185 struct lpfc_nodelist
*ndlp
,
21186 u32 hwqid
, int expedite
)
21188 struct lpfc_sli4_hdw_queue
*qp
;
21189 unsigned long iflag
;
21190 struct lpfc_io_buf
*lpfc_cmd
;
21192 qp
= &phba
->sli4_hba
.hdwq
[hwqid
];
21195 if (phba
->cfg_xri_rebalancing
)
21196 lpfc_cmd
= lpfc_get_io_buf_from_multixri_pools(
21197 phba
, ndlp
, hwqid
, expedite
);
21199 lpfc_qp_spin_lock_irqsave(&qp
->io_buf_list_get_lock
, iflag
,
21200 qp
, alloc_xri_get
);
21201 if (qp
->get_io_bufs
> LPFC_NVME_EXPEDITE_XRICNT
|| expedite
)
21202 lpfc_cmd
= lpfc_io_buf(phba
, ndlp
, hwqid
);
21204 lpfc_qp_spin_lock(&qp
->io_buf_list_put_lock
,
21205 qp
, alloc_xri_put
);
21206 list_splice(&qp
->lpfc_io_buf_list_put
,
21207 &qp
->lpfc_io_buf_list_get
);
21208 qp
->get_io_bufs
+= qp
->put_io_bufs
;
21209 INIT_LIST_HEAD(&qp
->lpfc_io_buf_list_put
);
21210 qp
->put_io_bufs
= 0;
21211 spin_unlock(&qp
->io_buf_list_put_lock
);
21212 if (qp
->get_io_bufs
> LPFC_NVME_EXPEDITE_XRICNT
||
21214 lpfc_cmd
= lpfc_io_buf(phba
, ndlp
, hwqid
);
21216 spin_unlock_irqrestore(&qp
->io_buf_list_get_lock
, iflag
);
21223 * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
21224 * @phba: The HBA for which this call is being executed.
21225 * @lpfc_buf: IO buf structure to append the SGL chunk
21227 * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
21228 * and will allocate an SGL chunk if the pool is empty.
21232 * Pointer to sli4_hybrid_sgl - Success
21234 struct sli4_hybrid_sgl
*
21235 lpfc_get_sgl_per_hdwq(struct lpfc_hba
*phba
, struct lpfc_io_buf
*lpfc_buf
)
21237 struct sli4_hybrid_sgl
*list_entry
= NULL
;
21238 struct sli4_hybrid_sgl
*tmp
= NULL
;
21239 struct sli4_hybrid_sgl
*allocated_sgl
= NULL
;
21240 struct lpfc_sli4_hdw_queue
*hdwq
= lpfc_buf
->hdwq
;
21241 struct list_head
*buf_list
= &hdwq
->sgl_list
;
21242 unsigned long iflags
;
21244 spin_lock_irqsave(&hdwq
->hdwq_lock
, iflags
);
21246 if (likely(!list_empty(buf_list
))) {
21247 /* break off 1 chunk from the sgl_list */
21248 list_for_each_entry_safe(list_entry
, tmp
,
21249 buf_list
, list_node
) {
21250 list_move_tail(&list_entry
->list_node
,
21251 &lpfc_buf
->dma_sgl_xtra_list
);
21255 /* allocate more */
21256 spin_unlock_irqrestore(&hdwq
->hdwq_lock
, iflags
);
21257 tmp
= kmalloc_node(sizeof(*tmp
), GFP_ATOMIC
,
21258 cpu_to_node(hdwq
->io_wq
->chann
));
21260 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
21261 "8353 error kmalloc memory for HDWQ "
21263 lpfc_buf
->hdwq_no
, __func__
);
21267 tmp
->dma_sgl
= dma_pool_alloc(phba
->lpfc_sg_dma_buf_pool
,
21268 GFP_ATOMIC
, &tmp
->dma_phys_sgl
);
21269 if (!tmp
->dma_sgl
) {
21270 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
21271 "8354 error pool_alloc memory for HDWQ "
21273 lpfc_buf
->hdwq_no
, __func__
);
21278 spin_lock_irqsave(&hdwq
->hdwq_lock
, iflags
);
21279 list_add_tail(&tmp
->list_node
, &lpfc_buf
->dma_sgl_xtra_list
);
21282 allocated_sgl
= list_last_entry(&lpfc_buf
->dma_sgl_xtra_list
,
21283 struct sli4_hybrid_sgl
,
21286 spin_unlock_irqrestore(&hdwq
->hdwq_lock
, iflags
);
21288 return allocated_sgl
;
21292 * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
21293 * @phba: The HBA for which this call is being executed.
21294 * @lpfc_buf: IO buf structure with the SGL chunk
21296 * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
21303 lpfc_put_sgl_per_hdwq(struct lpfc_hba
*phba
, struct lpfc_io_buf
*lpfc_buf
)
21306 struct sli4_hybrid_sgl
*list_entry
= NULL
;
21307 struct sli4_hybrid_sgl
*tmp
= NULL
;
21308 struct lpfc_sli4_hdw_queue
*hdwq
= lpfc_buf
->hdwq
;
21309 struct list_head
*buf_list
= &hdwq
->sgl_list
;
21310 unsigned long iflags
;
21312 spin_lock_irqsave(&hdwq
->hdwq_lock
, iflags
);
21314 if (likely(!list_empty(&lpfc_buf
->dma_sgl_xtra_list
))) {
21315 list_for_each_entry_safe(list_entry
, tmp
,
21316 &lpfc_buf
->dma_sgl_xtra_list
,
21318 list_move_tail(&list_entry
->list_node
,
21325 spin_unlock_irqrestore(&hdwq
->hdwq_lock
, iflags
);
21330 * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
21331 * @phba: phba object
21332 * @hdwq: hdwq to cleanup sgl buff resources on
21334 * This routine frees all SGL chunks of hdwq SGL chunk pool.
21340 lpfc_free_sgl_per_hdwq(struct lpfc_hba
*phba
,
21341 struct lpfc_sli4_hdw_queue
*hdwq
)
21343 struct list_head
*buf_list
= &hdwq
->sgl_list
;
21344 struct sli4_hybrid_sgl
*list_entry
= NULL
;
21345 struct sli4_hybrid_sgl
*tmp
= NULL
;
21346 unsigned long iflags
;
21348 spin_lock_irqsave(&hdwq
->hdwq_lock
, iflags
);
21350 /* Free sgl pool */
21351 list_for_each_entry_safe(list_entry
, tmp
,
21352 buf_list
, list_node
) {
21353 dma_pool_free(phba
->lpfc_sg_dma_buf_pool
,
21354 list_entry
->dma_sgl
,
21355 list_entry
->dma_phys_sgl
);
21356 list_del(&list_entry
->list_node
);
21360 spin_unlock_irqrestore(&hdwq
->hdwq_lock
, iflags
);
21364 * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
21365 * @phba: The HBA for which this call is being executed.
21366 * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
21368 * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
21369 * and will allocate an CMD/RSP buffer if the pool is empty.
21373 * Pointer to fcp_cmd_rsp_buf - Success
21375 struct fcp_cmd_rsp_buf
*
21376 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba
*phba
,
21377 struct lpfc_io_buf
*lpfc_buf
)
21379 struct fcp_cmd_rsp_buf
*list_entry
= NULL
;
21380 struct fcp_cmd_rsp_buf
*tmp
= NULL
;
21381 struct fcp_cmd_rsp_buf
*allocated_buf
= NULL
;
21382 struct lpfc_sli4_hdw_queue
*hdwq
= lpfc_buf
->hdwq
;
21383 struct list_head
*buf_list
= &hdwq
->cmd_rsp_buf_list
;
21384 unsigned long iflags
;
21386 spin_lock_irqsave(&hdwq
->hdwq_lock
, iflags
);
21388 if (likely(!list_empty(buf_list
))) {
21389 /* break off 1 chunk from the list */
21390 list_for_each_entry_safe(list_entry
, tmp
,
21393 list_move_tail(&list_entry
->list_node
,
21394 &lpfc_buf
->dma_cmd_rsp_list
);
21398 /* allocate more */
21399 spin_unlock_irqrestore(&hdwq
->hdwq_lock
, iflags
);
21400 tmp
= kmalloc_node(sizeof(*tmp
), GFP_ATOMIC
,
21401 cpu_to_node(hdwq
->io_wq
->chann
));
21403 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
21404 "8355 error kmalloc memory for HDWQ "
21406 lpfc_buf
->hdwq_no
, __func__
);
21410 tmp
->fcp_cmnd
= dma_pool_alloc(phba
->lpfc_cmd_rsp_buf_pool
,
21412 &tmp
->fcp_cmd_rsp_dma_handle
);
21414 if (!tmp
->fcp_cmnd
) {
21415 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
21416 "8356 error pool_alloc memory for HDWQ "
21418 lpfc_buf
->hdwq_no
, __func__
);
21423 tmp
->fcp_rsp
= (struct fcp_rsp
*)((uint8_t *)tmp
->fcp_cmnd
+
21424 sizeof(struct fcp_cmnd
));
21426 spin_lock_irqsave(&hdwq
->hdwq_lock
, iflags
);
21427 list_add_tail(&tmp
->list_node
, &lpfc_buf
->dma_cmd_rsp_list
);
21430 allocated_buf
= list_last_entry(&lpfc_buf
->dma_cmd_rsp_list
,
21431 struct fcp_cmd_rsp_buf
,
21434 spin_unlock_irqrestore(&hdwq
->hdwq_lock
, iflags
);
21436 return allocated_buf
;
21440 * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
21441 * @phba: The HBA for which this call is being executed.
21442 * @lpfc_buf: IO buf structure with the CMD/RSP buf
21444 * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
21451 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba
*phba
,
21452 struct lpfc_io_buf
*lpfc_buf
)
21455 struct fcp_cmd_rsp_buf
*list_entry
= NULL
;
21456 struct fcp_cmd_rsp_buf
*tmp
= NULL
;
21457 struct lpfc_sli4_hdw_queue
*hdwq
= lpfc_buf
->hdwq
;
21458 struct list_head
*buf_list
= &hdwq
->cmd_rsp_buf_list
;
21459 unsigned long iflags
;
21461 spin_lock_irqsave(&hdwq
->hdwq_lock
, iflags
);
21463 if (likely(!list_empty(&lpfc_buf
->dma_cmd_rsp_list
))) {
21464 list_for_each_entry_safe(list_entry
, tmp
,
21465 &lpfc_buf
->dma_cmd_rsp_list
,
21467 list_move_tail(&list_entry
->list_node
,
21474 spin_unlock_irqrestore(&hdwq
->hdwq_lock
, iflags
);
21479 * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
21480 * @phba: phba object
21481 * @hdwq: hdwq to cleanup cmd rsp buff resources on
21483 * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
21489 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba
*phba
,
21490 struct lpfc_sli4_hdw_queue
*hdwq
)
21492 struct list_head
*buf_list
= &hdwq
->cmd_rsp_buf_list
;
21493 struct fcp_cmd_rsp_buf
*list_entry
= NULL
;
21494 struct fcp_cmd_rsp_buf
*tmp
= NULL
;
21495 unsigned long iflags
;
21497 spin_lock_irqsave(&hdwq
->hdwq_lock
, iflags
);
21499 /* Free cmd_rsp buf pool */
21500 list_for_each_entry_safe(list_entry
, tmp
,
21503 dma_pool_free(phba
->lpfc_cmd_rsp_buf_pool
,
21504 list_entry
->fcp_cmnd
,
21505 list_entry
->fcp_cmd_rsp_dma_handle
);
21506 list_del(&list_entry
->list_node
);
21510 spin_unlock_irqrestore(&hdwq
->hdwq_lock
, iflags
);