1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2015 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
33 #include <scsi/fc/fc_fs.h>
34 #include <linux/aer.h>
39 #include "lpfc_sli4.h"
41 #include "lpfc_disc.h"
42 #include "lpfc_scsi.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_logmsg.h"
46 #include "lpfc_compat.h"
47 #include "lpfc_debugfs.h"
48 #include "lpfc_vport.h"
50 /* There are only four IOCB completion types. */
51 typedef enum _lpfc_iocb_type
{
59 /* Provide function prototypes local to this module. */
60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba
*, LPFC_MBOXQ_t
*,
62 static int lpfc_sli4_read_rev(struct lpfc_hba
*, LPFC_MBOXQ_t
*,
63 uint8_t *, uint32_t *);
64 static struct lpfc_iocbq
*lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba
*,
66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport
*,
68 static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba
*, struct lpfc_queue
*,
70 static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba
*, struct list_head
*,
72 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba
*, struct lpfc_eqe
*,
74 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba
*phba
);
75 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba
*phba
);
78 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq
*iocbq
)
84 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
85 * @q: The Work Queue to operate on.
86 * @wqe: The work Queue Entry to put on the Work queue.
88 * This routine will copy the contents of @wqe to the next available entry on
89 * the @q. This function will then ring the Work Queue Doorbell to signal the
90 * HBA to start processing the Work Queue Entry. This function returns 0 if
91 * successful. If no entries are available on @q then this function will return
93 * The caller is expected to hold the hbalock when calling this routine.
96 lpfc_sli4_wq_put(struct lpfc_queue
*q
, union lpfc_wqe
*wqe
)
98 union lpfc_wqe
*temp_wqe
;
99 struct lpfc_register doorbell
;
103 /* sanity check on queue memory */
106 temp_wqe
= q
->qe
[q
->host_index
].wqe
;
108 /* If the host has not yet processed the next entry then we are done */
109 idx
= ((q
->host_index
+ 1) % q
->entry_count
);
110 if (idx
== q
->hba_index
) {
115 /* set consumption flag every once in a while */
116 if (!((q
->host_index
+ 1) % q
->entry_repost
))
117 bf_set(wqe_wqec
, &wqe
->generic
.wqe_com
, 1);
118 if (q
->phba
->sli3_options
& LPFC_SLI4_PHWQ_ENABLED
)
119 bf_set(wqe_wqid
, &wqe
->generic
.wqe_com
, q
->queue_id
);
120 lpfc_sli_pcimem_bcopy(wqe
, temp_wqe
, q
->entry_size
);
122 /* Update the host index before invoking device */
123 host_index
= q
->host_index
;
129 if (q
->db_format
== LPFC_DB_LIST_FORMAT
) {
130 bf_set(lpfc_wq_db_list_fm_num_posted
, &doorbell
, 1);
131 bf_set(lpfc_wq_db_list_fm_index
, &doorbell
, host_index
);
132 bf_set(lpfc_wq_db_list_fm_id
, &doorbell
, q
->queue_id
);
133 } else if (q
->db_format
== LPFC_DB_RING_FORMAT
) {
134 bf_set(lpfc_wq_db_ring_fm_num_posted
, &doorbell
, 1);
135 bf_set(lpfc_wq_db_ring_fm_id
, &doorbell
, q
->queue_id
);
139 writel(doorbell
.word0
, q
->db_regaddr
);
145 * lpfc_sli4_wq_release - Updates internal hba index for WQ
146 * @q: The Work Queue to operate on.
147 * @index: The index to advance the hba index to.
149 * This routine will update the HBA index of a queue to reflect consumption of
150 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
151 * an entry the host calls this function to update the queue's internal
152 * pointers. This routine returns the number of entries that were consumed by
156 lpfc_sli4_wq_release(struct lpfc_queue
*q
, uint32_t index
)
158 uint32_t released
= 0;
160 /* sanity check on queue memory */
164 if (q
->hba_index
== index
)
167 q
->hba_index
= ((q
->hba_index
+ 1) % q
->entry_count
);
169 } while (q
->hba_index
!= index
);
174 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
175 * @q: The Mailbox Queue to operate on.
176 * @wqe: The Mailbox Queue Entry to put on the Work queue.
178 * This routine will copy the contents of @mqe to the next available entry on
179 * the @q. This function will then ring the Work Queue Doorbell to signal the
180 * HBA to start processing the Work Queue Entry. This function returns 0 if
181 * successful. If no entries are available on @q then this function will return
183 * The caller is expected to hold the hbalock when calling this routine.
186 lpfc_sli4_mq_put(struct lpfc_queue
*q
, struct lpfc_mqe
*mqe
)
188 struct lpfc_mqe
*temp_mqe
;
189 struct lpfc_register doorbell
;
191 /* sanity check on queue memory */
194 temp_mqe
= q
->qe
[q
->host_index
].mqe
;
196 /* If the host has not yet processed the next entry then we are done */
197 if (((q
->host_index
+ 1) % q
->entry_count
) == q
->hba_index
)
199 lpfc_sli_pcimem_bcopy(mqe
, temp_mqe
, q
->entry_size
);
200 /* Save off the mailbox pointer for completion */
201 q
->phba
->mbox
= (MAILBOX_t
*)temp_mqe
;
203 /* Update the host index before invoking device */
204 q
->host_index
= ((q
->host_index
+ 1) % q
->entry_count
);
208 bf_set(lpfc_mq_doorbell_num_posted
, &doorbell
, 1);
209 bf_set(lpfc_mq_doorbell_id
, &doorbell
, q
->queue_id
);
210 writel(doorbell
.word0
, q
->phba
->sli4_hba
.MQDBregaddr
);
215 * lpfc_sli4_mq_release - Updates internal hba index for MQ
216 * @q: The Mailbox Queue to operate on.
218 * This routine will update the HBA index of a queue to reflect consumption of
219 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
220 * an entry the host calls this function to update the queue's internal
221 * pointers. This routine returns the number of entries that were consumed by
225 lpfc_sli4_mq_release(struct lpfc_queue
*q
)
227 /* sanity check on queue memory */
231 /* Clear the mailbox pointer for completion */
232 q
->phba
->mbox
= NULL
;
233 q
->hba_index
= ((q
->hba_index
+ 1) % q
->entry_count
);
238 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
239 * @q: The Event Queue to get the first valid EQE from
241 * This routine will get the first valid Event Queue Entry from @q, update
242 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
243 * the Queue (no more work to do), or the Queue is full of EQEs that have been
244 * processed, but not popped back to the HBA then this routine will return NULL.
246 static struct lpfc_eqe
*
247 lpfc_sli4_eq_get(struct lpfc_queue
*q
)
249 struct lpfc_eqe
*eqe
;
252 /* sanity check on queue memory */
255 eqe
= q
->qe
[q
->hba_index
].eqe
;
257 /* If the next EQE is not valid then we are done */
258 if (!bf_get_le32(lpfc_eqe_valid
, eqe
))
260 /* If the host has not yet processed the next entry then we are done */
261 idx
= ((q
->hba_index
+ 1) % q
->entry_count
);
262 if (idx
== q
->host_index
)
268 * insert barrier for instruction interlock : data from the hardware
269 * must have the valid bit checked before it can be copied and acted
270 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
271 * instructions allowing action on content before valid bit checked,
272 * add barrier here as well. May not be needed as "content" is a
273 * single 32-bit entity here (vs multi word structure for cq's).
280 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
281 * @q: The Event Queue to disable interrupts
285 lpfc_sli4_eq_clr_intr(struct lpfc_queue
*q
)
287 struct lpfc_register doorbell
;
290 bf_set(lpfc_eqcq_doorbell_eqci
, &doorbell
, 1);
291 bf_set(lpfc_eqcq_doorbell_qt
, &doorbell
, LPFC_QUEUE_TYPE_EVENT
);
292 bf_set(lpfc_eqcq_doorbell_eqid_hi
, &doorbell
,
293 (q
->queue_id
>> LPFC_EQID_HI_FIELD_SHIFT
));
294 bf_set(lpfc_eqcq_doorbell_eqid_lo
, &doorbell
, q
->queue_id
);
295 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQCQDBregaddr
);
299 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
300 * @q: The Event Queue that the host has completed processing for.
301 * @arm: Indicates whether the host wants to arms this CQ.
303 * This routine will mark all Event Queue Entries on @q, from the last
304 * known completed entry to the last entry that was processed, as completed
305 * by clearing the valid bit for each completion queue entry. Then it will
306 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
307 * The internal host index in the @q will be updated by this routine to indicate
308 * that the host has finished processing the entries. The @arm parameter
309 * indicates that the queue should be rearmed when ringing the doorbell.
311 * This function will return the number of EQEs that were popped.
314 lpfc_sli4_eq_release(struct lpfc_queue
*q
, bool arm
)
316 uint32_t released
= 0;
317 struct lpfc_eqe
*temp_eqe
;
318 struct lpfc_register doorbell
;
320 /* sanity check on queue memory */
324 /* while there are valid entries */
325 while (q
->hba_index
!= q
->host_index
) {
326 temp_eqe
= q
->qe
[q
->host_index
].eqe
;
327 bf_set_le32(lpfc_eqe_valid
, temp_eqe
, 0);
329 q
->host_index
= ((q
->host_index
+ 1) % q
->entry_count
);
331 if (unlikely(released
== 0 && !arm
))
334 /* ring doorbell for number popped */
337 bf_set(lpfc_eqcq_doorbell_arm
, &doorbell
, 1);
338 bf_set(lpfc_eqcq_doorbell_eqci
, &doorbell
, 1);
340 bf_set(lpfc_eqcq_doorbell_num_released
, &doorbell
, released
);
341 bf_set(lpfc_eqcq_doorbell_qt
, &doorbell
, LPFC_QUEUE_TYPE_EVENT
);
342 bf_set(lpfc_eqcq_doorbell_eqid_hi
, &doorbell
,
343 (q
->queue_id
>> LPFC_EQID_HI_FIELD_SHIFT
));
344 bf_set(lpfc_eqcq_doorbell_eqid_lo
, &doorbell
, q
->queue_id
);
345 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQCQDBregaddr
);
346 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
347 if ((q
->phba
->intr_type
== INTx
) && (arm
== LPFC_QUEUE_REARM
))
348 readl(q
->phba
->sli4_hba
.EQCQDBregaddr
);
353 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
354 * @q: The Completion Queue to get the first valid CQE from
356 * This routine will get the first valid Completion Queue Entry from @q, update
357 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
358 * the Queue (no more work to do), or the Queue is full of CQEs that have been
359 * processed, but not popped back to the HBA then this routine will return NULL.
361 static struct lpfc_cqe
*
362 lpfc_sli4_cq_get(struct lpfc_queue
*q
)
364 struct lpfc_cqe
*cqe
;
367 /* sanity check on queue memory */
371 /* If the next CQE is not valid then we are done */
372 if (!bf_get_le32(lpfc_cqe_valid
, q
->qe
[q
->hba_index
].cqe
))
374 /* If the host has not yet processed the next entry then we are done */
375 idx
= ((q
->hba_index
+ 1) % q
->entry_count
);
376 if (idx
== q
->host_index
)
379 cqe
= q
->qe
[q
->hba_index
].cqe
;
383 * insert barrier for instruction interlock : data from the hardware
384 * must have the valid bit checked before it can be copied and acted
385 * upon. Speculative instructions were allowing a bcopy at the start
386 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
387 * after our return, to copy data before the valid bit check above
388 * was done. As such, some of the copied data was stale. The barrier
389 * ensures the check is before any data is copied.
396 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
397 * @q: The Completion Queue that the host has completed processing for.
398 * @arm: Indicates whether the host wants to arms this CQ.
400 * This routine will mark all Completion queue entries on @q, from the last
401 * known completed entry to the last entry that was processed, as completed
402 * by clearing the valid bit for each completion queue entry. Then it will
403 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
404 * The internal host index in the @q will be updated by this routine to indicate
405 * that the host has finished processing the entries. The @arm parameter
406 * indicates that the queue should be rearmed when ringing the doorbell.
408 * This function will return the number of CQEs that were released.
411 lpfc_sli4_cq_release(struct lpfc_queue
*q
, bool arm
)
413 uint32_t released
= 0;
414 struct lpfc_cqe
*temp_qe
;
415 struct lpfc_register doorbell
;
417 /* sanity check on queue memory */
420 /* while there are valid entries */
421 while (q
->hba_index
!= q
->host_index
) {
422 temp_qe
= q
->qe
[q
->host_index
].cqe
;
423 bf_set_le32(lpfc_cqe_valid
, temp_qe
, 0);
425 q
->host_index
= ((q
->host_index
+ 1) % q
->entry_count
);
427 if (unlikely(released
== 0 && !arm
))
430 /* ring doorbell for number popped */
433 bf_set(lpfc_eqcq_doorbell_arm
, &doorbell
, 1);
434 bf_set(lpfc_eqcq_doorbell_num_released
, &doorbell
, released
);
435 bf_set(lpfc_eqcq_doorbell_qt
, &doorbell
, LPFC_QUEUE_TYPE_COMPLETION
);
436 bf_set(lpfc_eqcq_doorbell_cqid_hi
, &doorbell
,
437 (q
->queue_id
>> LPFC_CQID_HI_FIELD_SHIFT
));
438 bf_set(lpfc_eqcq_doorbell_cqid_lo
, &doorbell
, q
->queue_id
);
439 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQCQDBregaddr
);
444 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
445 * @q: The Header Receive Queue to operate on.
446 * @wqe: The Receive Queue Entry to put on the Receive queue.
448 * This routine will copy the contents of @wqe to the next available entry on
449 * the @q. This function will then ring the Receive Queue Doorbell to signal the
450 * HBA to start processing the Receive Queue Entry. This function returns the
451 * index that the rqe was copied to if successful. If no entries are available
452 * on @q then this function will return -ENOMEM.
453 * The caller is expected to hold the hbalock when calling this routine.
456 lpfc_sli4_rq_put(struct lpfc_queue
*hq
, struct lpfc_queue
*dq
,
457 struct lpfc_rqe
*hrqe
, struct lpfc_rqe
*drqe
)
459 struct lpfc_rqe
*temp_hrqe
;
460 struct lpfc_rqe
*temp_drqe
;
461 struct lpfc_register doorbell
;
464 /* sanity check on queue memory */
465 if (unlikely(!hq
) || unlikely(!dq
))
467 put_index
= hq
->host_index
;
468 temp_hrqe
= hq
->qe
[hq
->host_index
].rqe
;
469 temp_drqe
= dq
->qe
[dq
->host_index
].rqe
;
471 if (hq
->type
!= LPFC_HRQ
|| dq
->type
!= LPFC_DRQ
)
473 if (hq
->host_index
!= dq
->host_index
)
475 /* If the host has not yet processed the next entry then we are done */
476 if (((hq
->host_index
+ 1) % hq
->entry_count
) == hq
->hba_index
)
478 lpfc_sli_pcimem_bcopy(hrqe
, temp_hrqe
, hq
->entry_size
);
479 lpfc_sli_pcimem_bcopy(drqe
, temp_drqe
, dq
->entry_size
);
481 /* Update the host index to point to the next slot */
482 hq
->host_index
= ((hq
->host_index
+ 1) % hq
->entry_count
);
483 dq
->host_index
= ((dq
->host_index
+ 1) % dq
->entry_count
);
485 /* Ring The Header Receive Queue Doorbell */
486 if (!(hq
->host_index
% hq
->entry_repost
)) {
488 if (hq
->db_format
== LPFC_DB_RING_FORMAT
) {
489 bf_set(lpfc_rq_db_ring_fm_num_posted
, &doorbell
,
491 bf_set(lpfc_rq_db_ring_fm_id
, &doorbell
, hq
->queue_id
);
492 } else if (hq
->db_format
== LPFC_DB_LIST_FORMAT
) {
493 bf_set(lpfc_rq_db_list_fm_num_posted
, &doorbell
,
495 bf_set(lpfc_rq_db_list_fm_index
, &doorbell
,
497 bf_set(lpfc_rq_db_list_fm_id
, &doorbell
, hq
->queue_id
);
501 writel(doorbell
.word0
, hq
->db_regaddr
);
507 * lpfc_sli4_rq_release - Updates internal hba index for RQ
508 * @q: The Header Receive Queue to operate on.
510 * This routine will update the HBA index of a queue to reflect consumption of
511 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
512 * consumed an entry the host calls this function to update the queue's
513 * internal pointers. This routine returns the number of entries that were
514 * consumed by the HBA.
517 lpfc_sli4_rq_release(struct lpfc_queue
*hq
, struct lpfc_queue
*dq
)
519 /* sanity check on queue memory */
520 if (unlikely(!hq
) || unlikely(!dq
))
523 if ((hq
->type
!= LPFC_HRQ
) || (dq
->type
!= LPFC_DRQ
))
525 hq
->hba_index
= ((hq
->hba_index
+ 1) % hq
->entry_count
);
526 dq
->hba_index
= ((dq
->hba_index
+ 1) % dq
->entry_count
);
531 * lpfc_cmd_iocb - Get next command iocb entry in the ring
532 * @phba: Pointer to HBA context object.
533 * @pring: Pointer to driver SLI ring object.
535 * This function returns pointer to next command iocb entry
536 * in the command ring. The caller must hold hbalock to prevent
537 * other threads consume the next command iocb.
538 * SLI-2/SLI-3 provide different sized iocbs.
540 static inline IOCB_t
*
541 lpfc_cmd_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
543 return (IOCB_t
*) (((char *) pring
->sli
.sli3
.cmdringaddr
) +
544 pring
->sli
.sli3
.cmdidx
* phba
->iocb_cmd_size
);
548 * lpfc_resp_iocb - Get next response iocb entry in the ring
549 * @phba: Pointer to HBA context object.
550 * @pring: Pointer to driver SLI ring object.
552 * This function returns pointer to next response iocb entry
553 * in the response ring. The caller must hold hbalock to make sure
554 * that no other thread consume the next response iocb.
555 * SLI-2/SLI-3 provide different sized iocbs.
557 static inline IOCB_t
*
558 lpfc_resp_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
560 return (IOCB_t
*) (((char *) pring
->sli
.sli3
.rspringaddr
) +
561 pring
->sli
.sli3
.rspidx
* phba
->iocb_rsp_size
);
565 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
566 * @phba: Pointer to HBA context object.
568 * This function is called with hbalock held. This function
569 * allocates a new driver iocb object from the iocb pool. If the
570 * allocation is successful, it returns pointer to the newly
571 * allocated iocb object else it returns NULL.
574 __lpfc_sli_get_iocbq(struct lpfc_hba
*phba
)
576 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
577 struct lpfc_iocbq
* iocbq
= NULL
;
579 list_remove_head(lpfc_iocb_list
, iocbq
, struct lpfc_iocbq
, list
);
582 if (phba
->iocb_cnt
> phba
->iocb_max
)
583 phba
->iocb_max
= phba
->iocb_cnt
;
588 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
589 * @phba: Pointer to HBA context object.
590 * @xritag: XRI value.
592 * This function clears the sglq pointer from the array of acive
593 * sglq's. The xritag that is passed in is used to index into the
594 * array. Before the xritag can be used it needs to be adjusted
595 * by subtracting the xribase.
597 * Returns sglq ponter = success, NULL = Failure.
599 static struct lpfc_sglq
*
600 __lpfc_clear_active_sglq(struct lpfc_hba
*phba
, uint16_t xritag
)
602 struct lpfc_sglq
*sglq
;
604 sglq
= phba
->sli4_hba
.lpfc_sglq_active_list
[xritag
];
605 phba
->sli4_hba
.lpfc_sglq_active_list
[xritag
] = NULL
;
610 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
611 * @phba: Pointer to HBA context object.
612 * @xritag: XRI value.
614 * This function returns the sglq pointer from the array of acive
615 * sglq's. The xritag that is passed in is used to index into the
616 * array. Before the xritag can be used it needs to be adjusted
617 * by subtracting the xribase.
619 * Returns sglq ponter = success, NULL = Failure.
622 __lpfc_get_active_sglq(struct lpfc_hba
*phba
, uint16_t xritag
)
624 struct lpfc_sglq
*sglq
;
626 sglq
= phba
->sli4_hba
.lpfc_sglq_active_list
[xritag
];
631 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
632 * @phba: Pointer to HBA context object.
633 * @xritag: xri used in this exchange.
634 * @rrq: The RRQ to be cleared.
638 lpfc_clr_rrq_active(struct lpfc_hba
*phba
,
640 struct lpfc_node_rrq
*rrq
)
642 struct lpfc_nodelist
*ndlp
= NULL
;
644 if ((rrq
->vport
) && NLP_CHK_NODE_ACT(rrq
->ndlp
))
645 ndlp
= lpfc_findnode_did(rrq
->vport
, rrq
->nlp_DID
);
647 /* The target DID could have been swapped (cable swap)
648 * we should use the ndlp from the findnode if it is
651 if ((!ndlp
) && rrq
->ndlp
)
657 if (test_and_clear_bit(xritag
, ndlp
->active_rrqs_xri_bitmap
)) {
660 rrq
->rrq_stop_time
= 0;
663 mempool_free(rrq
, phba
->rrq_pool
);
667 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
668 * @phba: Pointer to HBA context object.
670 * This function is called with hbalock held. This function
671 * Checks if stop_time (ratov from setting rrq active) has
672 * been reached, if it has and the send_rrq flag is set then
673 * it will call lpfc_send_rrq. If the send_rrq flag is not set
674 * then it will just call the routine to clear the rrq and
675 * free the rrq resource.
676 * The timer is set to the next rrq that is going to expire before
677 * leaving the routine.
681 lpfc_handle_rrq_active(struct lpfc_hba
*phba
)
683 struct lpfc_node_rrq
*rrq
;
684 struct lpfc_node_rrq
*nextrrq
;
685 unsigned long next_time
;
686 unsigned long iflags
;
689 spin_lock_irqsave(&phba
->hbalock
, iflags
);
690 phba
->hba_flag
&= ~HBA_RRQ_ACTIVE
;
691 next_time
= jiffies
+ msecs_to_jiffies(1000 * (phba
->fc_ratov
+ 1));
692 list_for_each_entry_safe(rrq
, nextrrq
,
693 &phba
->active_rrq_list
, list
) {
694 if (time_after(jiffies
, rrq
->rrq_stop_time
))
695 list_move(&rrq
->list
, &send_rrq
);
696 else if (time_before(rrq
->rrq_stop_time
, next_time
))
697 next_time
= rrq
->rrq_stop_time
;
699 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
700 if ((!list_empty(&phba
->active_rrq_list
)) &&
701 (!(phba
->pport
->load_flag
& FC_UNLOADING
)))
702 mod_timer(&phba
->rrq_tmr
, next_time
);
703 list_for_each_entry_safe(rrq
, nextrrq
, &send_rrq
, list
) {
704 list_del(&rrq
->list
);
706 /* this call will free the rrq */
707 lpfc_clr_rrq_active(phba
, rrq
->xritag
, rrq
);
708 else if (lpfc_send_rrq(phba
, rrq
)) {
709 /* if we send the rrq then the completion handler
710 * will clear the bit in the xribitmap.
712 lpfc_clr_rrq_active(phba
, rrq
->xritag
,
719 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
720 * @vport: Pointer to vport context object.
721 * @xri: The xri used in the exchange.
722 * @did: The targets DID for this exchange.
724 * returns NULL = rrq not found in the phba->active_rrq_list.
725 * rrq = rrq for this xri and target.
727 struct lpfc_node_rrq
*
728 lpfc_get_active_rrq(struct lpfc_vport
*vport
, uint16_t xri
, uint32_t did
)
730 struct lpfc_hba
*phba
= vport
->phba
;
731 struct lpfc_node_rrq
*rrq
;
732 struct lpfc_node_rrq
*nextrrq
;
733 unsigned long iflags
;
735 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
737 spin_lock_irqsave(&phba
->hbalock
, iflags
);
738 list_for_each_entry_safe(rrq
, nextrrq
, &phba
->active_rrq_list
, list
) {
739 if (rrq
->vport
== vport
&& rrq
->xritag
== xri
&&
740 rrq
->nlp_DID
== did
){
741 list_del(&rrq
->list
);
742 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
746 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
751 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
752 * @vport: Pointer to vport context object.
753 * @ndlp: Pointer to the lpfc_node_list structure.
754 * If ndlp is NULL Remove all active RRQs for this vport from the
755 * phba->active_rrq_list and clear the rrq.
756 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
759 lpfc_cleanup_vports_rrqs(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
762 struct lpfc_hba
*phba
= vport
->phba
;
763 struct lpfc_node_rrq
*rrq
;
764 struct lpfc_node_rrq
*nextrrq
;
765 unsigned long iflags
;
768 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
771 lpfc_sli4_vport_delete_els_xri_aborted(vport
);
772 lpfc_sli4_vport_delete_fcp_xri_aborted(vport
);
774 spin_lock_irqsave(&phba
->hbalock
, iflags
);
775 list_for_each_entry_safe(rrq
, nextrrq
, &phba
->active_rrq_list
, list
)
776 if ((rrq
->vport
== vport
) && (!ndlp
|| rrq
->ndlp
== ndlp
))
777 list_move(&rrq
->list
, &rrq_list
);
778 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
780 list_for_each_entry_safe(rrq
, nextrrq
, &rrq_list
, list
) {
781 list_del(&rrq
->list
);
782 lpfc_clr_rrq_active(phba
, rrq
->xritag
, rrq
);
787 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
788 * @phba: Pointer to HBA context object.
789 * @ndlp: Targets nodelist pointer for this exchange.
790 * @xritag the xri in the bitmap to test.
792 * This function is called with hbalock held. This function
793 * returns 0 = rrq not active for this xri
794 * 1 = rrq is valid for this xri.
797 lpfc_test_rrq_active(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
,
802 if (!ndlp
->active_rrqs_xri_bitmap
)
804 if (test_bit(xritag
, ndlp
->active_rrqs_xri_bitmap
))
811 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
812 * @phba: Pointer to HBA context object.
813 * @ndlp: nodelist pointer for this target.
814 * @xritag: xri used in this exchange.
815 * @rxid: Remote Exchange ID.
816 * @send_rrq: Flag used to determine if we should send rrq els cmd.
818 * This function takes the hbalock.
819 * The active bit is always set in the active rrq xri_bitmap even
820 * if there is no slot avaiable for the other rrq information.
822 * returns 0 rrq actived for this xri
823 * < 0 No memory or invalid ndlp.
826 lpfc_set_rrq_active(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
,
827 uint16_t xritag
, uint16_t rxid
, uint16_t send_rrq
)
829 unsigned long iflags
;
830 struct lpfc_node_rrq
*rrq
;
836 if (!phba
->cfg_enable_rrq
)
839 spin_lock_irqsave(&phba
->hbalock
, iflags
);
840 if (phba
->pport
->load_flag
& FC_UNLOADING
) {
841 phba
->hba_flag
&= ~HBA_RRQ_ACTIVE
;
846 * set the active bit even if there is no mem available.
848 if (NLP_CHK_FREE_REQ(ndlp
))
851 if (ndlp
->vport
&& (ndlp
->vport
->load_flag
& FC_UNLOADING
))
854 if (!ndlp
->active_rrqs_xri_bitmap
)
857 if (test_and_set_bit(xritag
, ndlp
->active_rrqs_xri_bitmap
))
860 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
861 rrq
= mempool_alloc(phba
->rrq_pool
, GFP_KERNEL
);
863 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
864 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
865 " DID:0x%x Send:%d\n",
866 xritag
, rxid
, ndlp
->nlp_DID
, send_rrq
);
869 if (phba
->cfg_enable_rrq
== 1)
870 rrq
->send_rrq
= send_rrq
;
873 rrq
->xritag
= xritag
;
874 rrq
->rrq_stop_time
= jiffies
+
875 msecs_to_jiffies(1000 * (phba
->fc_ratov
+ 1));
877 rrq
->nlp_DID
= ndlp
->nlp_DID
;
878 rrq
->vport
= ndlp
->vport
;
880 spin_lock_irqsave(&phba
->hbalock
, iflags
);
881 empty
= list_empty(&phba
->active_rrq_list
);
882 list_add_tail(&rrq
->list
, &phba
->active_rrq_list
);
883 phba
->hba_flag
|= HBA_RRQ_ACTIVE
;
885 lpfc_worker_wake_up(phba
);
886 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
889 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
890 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
891 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
892 " DID:0x%x Send:%d\n",
893 xritag
, rxid
, ndlp
->nlp_DID
, send_rrq
);
898 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
899 * @phba: Pointer to HBA context object.
900 * @piocb: Pointer to the iocbq.
902 * This function is called with the ring lock held. This function
903 * gets a new driver sglq object from the sglq list. If the
904 * list is not empty then it is successful, it returns pointer to the newly
905 * allocated sglq object else it returns NULL.
907 static struct lpfc_sglq
*
908 __lpfc_sli_get_sglq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocbq
)
910 struct list_head
*lpfc_sgl_list
= &phba
->sli4_hba
.lpfc_sgl_list
;
911 struct lpfc_sglq
*sglq
= NULL
;
912 struct lpfc_sglq
*start_sglq
= NULL
;
913 struct lpfc_scsi_buf
*lpfc_cmd
;
914 struct lpfc_nodelist
*ndlp
;
917 if (piocbq
->iocb_flag
& LPFC_IO_FCP
) {
918 lpfc_cmd
= (struct lpfc_scsi_buf
*) piocbq
->context1
;
919 ndlp
= lpfc_cmd
->rdata
->pnode
;
920 } else if ((piocbq
->iocb
.ulpCommand
== CMD_GEN_REQUEST64_CR
) &&
921 !(piocbq
->iocb_flag
& LPFC_IO_LIBDFC
)) {
922 ndlp
= piocbq
->context_un
.ndlp
;
923 } else if (piocbq
->iocb_flag
& LPFC_IO_LIBDFC
) {
924 if (piocbq
->iocb_flag
& LPFC_IO_LOOPBACK
)
927 ndlp
= piocbq
->context_un
.ndlp
;
929 ndlp
= piocbq
->context1
;
932 list_remove_head(lpfc_sgl_list
, sglq
, struct lpfc_sglq
, list
);
937 if (lpfc_test_rrq_active(phba
, ndlp
, sglq
->sli4_lxritag
)) {
938 /* This xri has an rrq outstanding for this DID.
939 * put it back in the list and get another xri.
941 list_add_tail(&sglq
->list
, lpfc_sgl_list
);
943 list_remove_head(lpfc_sgl_list
, sglq
,
944 struct lpfc_sglq
, list
);
945 if (sglq
== start_sglq
) {
953 phba
->sli4_hba
.lpfc_sglq_active_list
[sglq
->sli4_lxritag
] = sglq
;
954 sglq
->state
= SGL_ALLOCATED
;
960 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
961 * @phba: Pointer to HBA context object.
963 * This function is called with no lock held. This function
964 * allocates a new driver iocb object from the iocb pool. If the
965 * allocation is successful, it returns pointer to the newly
966 * allocated iocb object else it returns NULL.
969 lpfc_sli_get_iocbq(struct lpfc_hba
*phba
)
971 struct lpfc_iocbq
* iocbq
= NULL
;
972 unsigned long iflags
;
974 spin_lock_irqsave(&phba
->hbalock
, iflags
);
975 iocbq
= __lpfc_sli_get_iocbq(phba
);
976 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
981 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
982 * @phba: Pointer to HBA context object.
983 * @iocbq: Pointer to driver iocb object.
985 * This function is called with hbalock held to release driver
986 * iocb object to the iocb pool. The iotag in the iocb object
987 * does not change for each use of the iocb object. This function
988 * clears all other fields of the iocb object when it is freed.
989 * The sqlq structure that holds the xritag and phys and virtual
990 * mappings for the scatter gather list is retrieved from the
991 * active array of sglq. The get of the sglq pointer also clears
992 * the entry in the array. If the status of the IO indiactes that
993 * this IO was aborted then the sglq entry it put on the
994 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
995 * IO has good status or fails for any other reason then the sglq
996 * entry is added to the free list (lpfc_sgl_list).
999 __lpfc_sli_release_iocbq_s4(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1001 struct lpfc_sglq
*sglq
;
1002 size_t start_clean
= offsetof(struct lpfc_iocbq
, iocb
);
1003 unsigned long iflag
= 0;
1004 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
1006 if (iocbq
->sli4_xritag
== NO_XRI
)
1009 sglq
= __lpfc_clear_active_sglq(phba
, iocbq
->sli4_lxritag
);
1013 if ((iocbq
->iocb_flag
& LPFC_EXCHANGE_BUSY
) &&
1014 (sglq
->state
!= SGL_XRI_ABORTED
)) {
1015 spin_lock_irqsave(&phba
->sli4_hba
.abts_sgl_list_lock
,
1017 list_add(&sglq
->list
,
1018 &phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
1019 spin_unlock_irqrestore(
1020 &phba
->sli4_hba
.abts_sgl_list_lock
, iflag
);
1022 spin_lock_irqsave(&pring
->ring_lock
, iflag
);
1023 sglq
->state
= SGL_FREED
;
1025 list_add_tail(&sglq
->list
,
1026 &phba
->sli4_hba
.lpfc_sgl_list
);
1027 spin_unlock_irqrestore(&pring
->ring_lock
, iflag
);
1029 /* Check if TXQ queue needs to be serviced */
1030 if (!list_empty(&pring
->txq
))
1031 lpfc_worker_wake_up(phba
);
1037 * Clean all volatile data fields, preserve iotag and node struct.
1039 memset((char *)iocbq
+ start_clean
, 0, sizeof(*iocbq
) - start_clean
);
1040 iocbq
->sli4_lxritag
= NO_XRI
;
1041 iocbq
->sli4_xritag
= NO_XRI
;
1042 list_add_tail(&iocbq
->list
, &phba
->lpfc_iocb_list
);
1047 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1048 * @phba: Pointer to HBA context object.
1049 * @iocbq: Pointer to driver iocb object.
1051 * This function is called with hbalock held to release driver
1052 * iocb object to the iocb pool. The iotag in the iocb object
1053 * does not change for each use of the iocb object. This function
1054 * clears all other fields of the iocb object when it is freed.
1057 __lpfc_sli_release_iocbq_s3(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1059 size_t start_clean
= offsetof(struct lpfc_iocbq
, iocb
);
1063 * Clean all volatile data fields, preserve iotag and node struct.
1065 memset((char*)iocbq
+ start_clean
, 0, sizeof(*iocbq
) - start_clean
);
1066 iocbq
->sli4_xritag
= NO_XRI
;
1067 list_add_tail(&iocbq
->list
, &phba
->lpfc_iocb_list
);
1071 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1072 * @phba: Pointer to HBA context object.
1073 * @iocbq: Pointer to driver iocb object.
1075 * This function is called with hbalock held to release driver
1076 * iocb object to the iocb pool. The iotag in the iocb object
1077 * does not change for each use of the iocb object. This function
1078 * clears all other fields of the iocb object when it is freed.
1081 __lpfc_sli_release_iocbq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1083 phba
->__lpfc_sli_release_iocbq(phba
, iocbq
);
1088 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1089 * @phba: Pointer to HBA context object.
1090 * @iocbq: Pointer to driver iocb object.
1092 * This function is called with no lock held to release the iocb to
1096 lpfc_sli_release_iocbq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1098 unsigned long iflags
;
1101 * Clean all volatile data fields, preserve iotag and node struct.
1103 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1104 __lpfc_sli_release_iocbq(phba
, iocbq
);
1105 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1109 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1110 * @phba: Pointer to HBA context object.
1111 * @iocblist: List of IOCBs.
1112 * @ulpstatus: ULP status in IOCB command field.
1113 * @ulpWord4: ULP word-4 in IOCB command field.
1115 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1116 * on the list by invoking the complete callback function associated with the
1117 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1121 lpfc_sli_cancel_iocbs(struct lpfc_hba
*phba
, struct list_head
*iocblist
,
1122 uint32_t ulpstatus
, uint32_t ulpWord4
)
1124 struct lpfc_iocbq
*piocb
;
1126 while (!list_empty(iocblist
)) {
1127 list_remove_head(iocblist
, piocb
, struct lpfc_iocbq
, list
);
1128 if (!piocb
->iocb_cmpl
)
1129 lpfc_sli_release_iocbq(phba
, piocb
);
1131 piocb
->iocb
.ulpStatus
= ulpstatus
;
1132 piocb
->iocb
.un
.ulpWord
[4] = ulpWord4
;
1133 (piocb
->iocb_cmpl
) (phba
, piocb
, piocb
);
1140 * lpfc_sli_iocb_cmd_type - Get the iocb type
1141 * @iocb_cmnd: iocb command code.
1143 * This function is called by ring event handler function to get the iocb type.
1144 * This function translates the iocb command to an iocb command type used to
1145 * decide the final disposition of each completed IOCB.
1146 * The function returns
1147 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1148 * LPFC_SOL_IOCB if it is a solicited iocb completion
1149 * LPFC_ABORT_IOCB if it is an abort iocb
1150 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1152 * The caller is not required to hold any lock.
1154 static lpfc_iocb_type
1155 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd
)
1157 lpfc_iocb_type type
= LPFC_UNKNOWN_IOCB
;
1159 if (iocb_cmnd
> CMD_MAX_IOCB_CMD
)
1162 switch (iocb_cmnd
) {
1163 case CMD_XMIT_SEQUENCE_CR
:
1164 case CMD_XMIT_SEQUENCE_CX
:
1165 case CMD_XMIT_BCAST_CN
:
1166 case CMD_XMIT_BCAST_CX
:
1167 case CMD_ELS_REQUEST_CR
:
1168 case CMD_ELS_REQUEST_CX
:
1169 case CMD_CREATE_XRI_CR
:
1170 case CMD_CREATE_XRI_CX
:
1171 case CMD_GET_RPI_CN
:
1172 case CMD_XMIT_ELS_RSP_CX
:
1173 case CMD_GET_RPI_CR
:
1174 case CMD_FCP_IWRITE_CR
:
1175 case CMD_FCP_IWRITE_CX
:
1176 case CMD_FCP_IREAD_CR
:
1177 case CMD_FCP_IREAD_CX
:
1178 case CMD_FCP_ICMND_CR
:
1179 case CMD_FCP_ICMND_CX
:
1180 case CMD_FCP_TSEND_CX
:
1181 case CMD_FCP_TRSP_CX
:
1182 case CMD_FCP_TRECEIVE_CX
:
1183 case CMD_FCP_AUTO_TRSP_CX
:
1184 case CMD_ADAPTER_MSG
:
1185 case CMD_ADAPTER_DUMP
:
1186 case CMD_XMIT_SEQUENCE64_CR
:
1187 case CMD_XMIT_SEQUENCE64_CX
:
1188 case CMD_XMIT_BCAST64_CN
:
1189 case CMD_XMIT_BCAST64_CX
:
1190 case CMD_ELS_REQUEST64_CR
:
1191 case CMD_ELS_REQUEST64_CX
:
1192 case CMD_FCP_IWRITE64_CR
:
1193 case CMD_FCP_IWRITE64_CX
:
1194 case CMD_FCP_IREAD64_CR
:
1195 case CMD_FCP_IREAD64_CX
:
1196 case CMD_FCP_ICMND64_CR
:
1197 case CMD_FCP_ICMND64_CX
:
1198 case CMD_FCP_TSEND64_CX
:
1199 case CMD_FCP_TRSP64_CX
:
1200 case CMD_FCP_TRECEIVE64_CX
:
1201 case CMD_GEN_REQUEST64_CR
:
1202 case CMD_GEN_REQUEST64_CX
:
1203 case CMD_XMIT_ELS_RSP64_CX
:
1204 case DSSCMD_IWRITE64_CR
:
1205 case DSSCMD_IWRITE64_CX
:
1206 case DSSCMD_IREAD64_CR
:
1207 case DSSCMD_IREAD64_CX
:
1208 type
= LPFC_SOL_IOCB
;
1210 case CMD_ABORT_XRI_CN
:
1211 case CMD_ABORT_XRI_CX
:
1212 case CMD_CLOSE_XRI_CN
:
1213 case CMD_CLOSE_XRI_CX
:
1214 case CMD_XRI_ABORTED_CX
:
1215 case CMD_ABORT_MXRI64_CN
:
1216 case CMD_XMIT_BLS_RSP64_CX
:
1217 type
= LPFC_ABORT_IOCB
;
1219 case CMD_RCV_SEQUENCE_CX
:
1220 case CMD_RCV_ELS_REQ_CX
:
1221 case CMD_RCV_SEQUENCE64_CX
:
1222 case CMD_RCV_ELS_REQ64_CX
:
1223 case CMD_ASYNC_STATUS
:
1224 case CMD_IOCB_RCV_SEQ64_CX
:
1225 case CMD_IOCB_RCV_ELS64_CX
:
1226 case CMD_IOCB_RCV_CONT64_CX
:
1227 case CMD_IOCB_RET_XRI64_CX
:
1228 type
= LPFC_UNSOL_IOCB
;
1230 case CMD_IOCB_XMIT_MSEQ64_CR
:
1231 case CMD_IOCB_XMIT_MSEQ64_CX
:
1232 case CMD_IOCB_RCV_SEQ_LIST64_CX
:
1233 case CMD_IOCB_RCV_ELS_LIST64_CX
:
1234 case CMD_IOCB_CLOSE_EXTENDED_CN
:
1235 case CMD_IOCB_ABORT_EXTENDED_CN
:
1236 case CMD_IOCB_RET_HBQE64_CN
:
1237 case CMD_IOCB_FCP_IBIDIR64_CR
:
1238 case CMD_IOCB_FCP_IBIDIR64_CX
:
1239 case CMD_IOCB_FCP_ITASKMGT64_CX
:
1240 case CMD_IOCB_LOGENTRY_CN
:
1241 case CMD_IOCB_LOGENTRY_ASYNC_CN
:
1242 printk("%s - Unhandled SLI-3 Command x%x\n",
1243 __func__
, iocb_cmnd
);
1244 type
= LPFC_UNKNOWN_IOCB
;
1247 type
= LPFC_UNKNOWN_IOCB
;
1255 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1256 * @phba: Pointer to HBA context object.
1258 * This function is called from SLI initialization code
1259 * to configure every ring of the HBA's SLI interface. The
1260 * caller is not required to hold any lock. This function issues
1261 * a config_ring mailbox command for each ring.
1262 * This function returns zero if successful else returns a negative
1266 lpfc_sli_ring_map(struct lpfc_hba
*phba
)
1268 struct lpfc_sli
*psli
= &phba
->sli
;
1273 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1277 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
1278 for (i
= 0; i
< psli
->num_rings
; i
++) {
1279 lpfc_config_ring(phba
, i
, pmb
);
1280 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
1281 if (rc
!= MBX_SUCCESS
) {
1282 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1283 "0446 Adapter failed to init (%d), "
1284 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1286 rc
, pmbox
->mbxCommand
,
1287 pmbox
->mbxStatus
, i
);
1288 phba
->link_state
= LPFC_HBA_ERROR
;
1293 mempool_free(pmb
, phba
->mbox_mem_pool
);
1298 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1299 * @phba: Pointer to HBA context object.
1300 * @pring: Pointer to driver SLI ring object.
1301 * @piocb: Pointer to the driver iocb object.
1303 * This function is called with hbalock held. The function adds the
1304 * new iocb to txcmplq of the given ring. This function always returns
1305 * 0. If this function is called for ELS ring, this function checks if
1306 * there is a vport associated with the ELS command. This function also
1307 * starts els_tmofunc timer if this is an ELS command.
1310 lpfc_sli_ringtxcmpl_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1311 struct lpfc_iocbq
*piocb
)
1313 list_add_tail(&piocb
->list
, &pring
->txcmplq
);
1314 piocb
->iocb_flag
|= LPFC_IO_ON_TXCMPLQ
;
1316 if ((unlikely(pring
->ringno
== LPFC_ELS_RING
)) &&
1317 (piocb
->iocb
.ulpCommand
!= CMD_ABORT_XRI_CN
) &&
1318 (piocb
->iocb
.ulpCommand
!= CMD_CLOSE_XRI_CN
) &&
1319 (!(piocb
->vport
->load_flag
& FC_UNLOADING
))) {
1323 mod_timer(&piocb
->vport
->els_tmofunc
,
1325 msecs_to_jiffies(1000 * (phba
->fc_ratov
<< 1)));
1333 * lpfc_sli_ringtx_get - Get first element of the txq
1334 * @phba: Pointer to HBA context object.
1335 * @pring: Pointer to driver SLI ring object.
1337 * This function is called with hbalock held to get next
1338 * iocb in txq of the given ring. If there is any iocb in
1339 * the txq, the function returns first iocb in the list after
1340 * removing the iocb from the list, else it returns NULL.
1343 lpfc_sli_ringtx_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1345 struct lpfc_iocbq
*cmd_iocb
;
1347 list_remove_head((&pring
->txq
), cmd_iocb
, struct lpfc_iocbq
, list
);
1352 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1353 * @phba: Pointer to HBA context object.
1354 * @pring: Pointer to driver SLI ring object.
1356 * This function is called with hbalock held and the caller must post the
1357 * iocb without releasing the lock. If the caller releases the lock,
1358 * iocb slot returned by the function is not guaranteed to be available.
1359 * The function returns pointer to the next available iocb slot if there
1360 * is available slot in the ring, else it returns NULL.
1361 * If the get index of the ring is ahead of the put index, the function
1362 * will post an error attention event to the worker thread to take the
1363 * HBA to offline state.
1366 lpfc_sli_next_iocb_slot (struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1368 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
1369 uint32_t max_cmd_idx
= pring
->sli
.sli3
.numCiocb
;
1370 if ((pring
->sli
.sli3
.next_cmdidx
== pring
->sli
.sli3
.cmdidx
) &&
1371 (++pring
->sli
.sli3
.next_cmdidx
>= max_cmd_idx
))
1372 pring
->sli
.sli3
.next_cmdidx
= 0;
1374 if (unlikely(pring
->sli
.sli3
.local_getidx
==
1375 pring
->sli
.sli3
.next_cmdidx
)) {
1377 pring
->sli
.sli3
.local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
1379 if (unlikely(pring
->sli
.sli3
.local_getidx
>= max_cmd_idx
)) {
1380 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
1381 "0315 Ring %d issue: portCmdGet %d "
1382 "is bigger than cmd ring %d\n",
1384 pring
->sli
.sli3
.local_getidx
,
1387 phba
->link_state
= LPFC_HBA_ERROR
;
1389 * All error attention handlers are posted to
1392 phba
->work_ha
|= HA_ERATT
;
1393 phba
->work_hs
= HS_FFER3
;
1395 lpfc_worker_wake_up(phba
);
1400 if (pring
->sli
.sli3
.local_getidx
== pring
->sli
.sli3
.next_cmdidx
)
1404 return lpfc_cmd_iocb(phba
, pring
);
1408 * lpfc_sli_next_iotag - Get an iotag for the iocb
1409 * @phba: Pointer to HBA context object.
1410 * @iocbq: Pointer to driver iocb object.
1412 * This function gets an iotag for the iocb. If there is no unused iotag and
1413 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1414 * array and assigns a new iotag.
1415 * The function returns the allocated iotag if successful, else returns zero.
1416 * Zero is not a valid iotag.
1417 * The caller is not required to hold any lock.
1420 lpfc_sli_next_iotag(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1422 struct lpfc_iocbq
**new_arr
;
1423 struct lpfc_iocbq
**old_arr
;
1425 struct lpfc_sli
*psli
= &phba
->sli
;
1428 spin_lock_irq(&phba
->hbalock
);
1429 iotag
= psli
->last_iotag
;
1430 if(++iotag
< psli
->iocbq_lookup_len
) {
1431 psli
->last_iotag
= iotag
;
1432 psli
->iocbq_lookup
[iotag
] = iocbq
;
1433 spin_unlock_irq(&phba
->hbalock
);
1434 iocbq
->iotag
= iotag
;
1436 } else if (psli
->iocbq_lookup_len
< (0xffff
1437 - LPFC_IOCBQ_LOOKUP_INCREMENT
)) {
1438 new_len
= psli
->iocbq_lookup_len
+ LPFC_IOCBQ_LOOKUP_INCREMENT
;
1439 spin_unlock_irq(&phba
->hbalock
);
1440 new_arr
= kzalloc(new_len
* sizeof (struct lpfc_iocbq
*),
1443 spin_lock_irq(&phba
->hbalock
);
1444 old_arr
= psli
->iocbq_lookup
;
1445 if (new_len
<= psli
->iocbq_lookup_len
) {
1446 /* highly unprobable case */
1448 iotag
= psli
->last_iotag
;
1449 if(++iotag
< psli
->iocbq_lookup_len
) {
1450 psli
->last_iotag
= iotag
;
1451 psli
->iocbq_lookup
[iotag
] = iocbq
;
1452 spin_unlock_irq(&phba
->hbalock
);
1453 iocbq
->iotag
= iotag
;
1456 spin_unlock_irq(&phba
->hbalock
);
1459 if (psli
->iocbq_lookup
)
1460 memcpy(new_arr
, old_arr
,
1461 ((psli
->last_iotag
+ 1) *
1462 sizeof (struct lpfc_iocbq
*)));
1463 psli
->iocbq_lookup
= new_arr
;
1464 psli
->iocbq_lookup_len
= new_len
;
1465 psli
->last_iotag
= iotag
;
1466 psli
->iocbq_lookup
[iotag
] = iocbq
;
1467 spin_unlock_irq(&phba
->hbalock
);
1468 iocbq
->iotag
= iotag
;
1473 spin_unlock_irq(&phba
->hbalock
);
1475 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
1476 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1483 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1484 * @phba: Pointer to HBA context object.
1485 * @pring: Pointer to driver SLI ring object.
1486 * @iocb: Pointer to iocb slot in the ring.
1487 * @nextiocb: Pointer to driver iocb object which need to be
1488 * posted to firmware.
1490 * This function is called with hbalock held to post a new iocb to
1491 * the firmware. This function copies the new iocb to ring iocb slot and
1492 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1493 * a completion call back for this iocb else the function will free the
1497 lpfc_sli_submit_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1498 IOCB_t
*iocb
, struct lpfc_iocbq
*nextiocb
)
1503 nextiocb
->iocb
.ulpIoTag
= (nextiocb
->iocb_cmpl
) ? nextiocb
->iotag
: 0;
1506 if (pring
->ringno
== LPFC_ELS_RING
) {
1507 lpfc_debugfs_slow_ring_trc(phba
,
1508 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1509 *(((uint32_t *) &nextiocb
->iocb
) + 4),
1510 *(((uint32_t *) &nextiocb
->iocb
) + 6),
1511 *(((uint32_t *) &nextiocb
->iocb
) + 7));
1515 * Issue iocb command to adapter
1517 lpfc_sli_pcimem_bcopy(&nextiocb
->iocb
, iocb
, phba
->iocb_cmd_size
);
1519 pring
->stats
.iocb_cmd
++;
1522 * If there is no completion routine to call, we can release the
1523 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1524 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1526 if (nextiocb
->iocb_cmpl
)
1527 lpfc_sli_ringtxcmpl_put(phba
, pring
, nextiocb
);
1529 __lpfc_sli_release_iocbq(phba
, nextiocb
);
1532 * Let the HBA know what IOCB slot will be the next one the
1533 * driver will put a command into.
1535 pring
->sli
.sli3
.cmdidx
= pring
->sli
.sli3
.next_cmdidx
;
1536 writel(pring
->sli
.sli3
.cmdidx
, &phba
->host_gp
[pring
->ringno
].cmdPutInx
);
1540 * lpfc_sli_update_full_ring - Update the chip attention register
1541 * @phba: Pointer to HBA context object.
1542 * @pring: Pointer to driver SLI ring object.
1544 * The caller is not required to hold any lock for calling this function.
1545 * This function updates the chip attention bits for the ring to inform firmware
1546 * that there are pending work to be done for this ring and requests an
1547 * interrupt when there is space available in the ring. This function is
1548 * called when the driver is unable to post more iocbs to the ring due
1549 * to unavailability of space in the ring.
1552 lpfc_sli_update_full_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1554 int ringno
= pring
->ringno
;
1556 pring
->flag
|= LPFC_CALL_RING_AVAILABLE
;
1561 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1562 * The HBA will tell us when an IOCB entry is available.
1564 writel((CA_R0ATT
|CA_R0CE_REQ
) << (ringno
*4), phba
->CAregaddr
);
1565 readl(phba
->CAregaddr
); /* flush */
1567 pring
->stats
.iocb_cmd_full
++;
1571 * lpfc_sli_update_ring - Update chip attention register
1572 * @phba: Pointer to HBA context object.
1573 * @pring: Pointer to driver SLI ring object.
1575 * This function updates the chip attention register bit for the
1576 * given ring to inform HBA that there is more work to be done
1577 * in this ring. The caller is not required to hold any lock.
1580 lpfc_sli_update_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1582 int ringno
= pring
->ringno
;
1585 * Tell the HBA that there is work to do in this ring.
1587 if (!(phba
->sli3_options
& LPFC_SLI3_CRP_ENABLED
)) {
1589 writel(CA_R0ATT
<< (ringno
* 4), phba
->CAregaddr
);
1590 readl(phba
->CAregaddr
); /* flush */
1595 * lpfc_sli_resume_iocb - Process iocbs in the txq
1596 * @phba: Pointer to HBA context object.
1597 * @pring: Pointer to driver SLI ring object.
1599 * This function is called with hbalock held to post pending iocbs
1600 * in the txq to the firmware. This function is called when driver
1601 * detects space available in the ring.
1604 lpfc_sli_resume_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1607 struct lpfc_iocbq
*nextiocb
;
1611 * (a) there is anything on the txq to send
1613 * (c) link attention events can be processed (fcp ring only)
1614 * (d) IOCB processing is not blocked by the outstanding mbox command.
1617 if (lpfc_is_link_up(phba
) &&
1618 (!list_empty(&pring
->txq
)) &&
1619 (pring
->ringno
!= phba
->sli
.fcp_ring
||
1620 phba
->sli
.sli_flag
& LPFC_PROCESS_LA
)) {
1622 while ((iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)) &&
1623 (nextiocb
= lpfc_sli_ringtx_get(phba
, pring
)))
1624 lpfc_sli_submit_iocb(phba
, pring
, iocb
, nextiocb
);
1627 lpfc_sli_update_ring(phba
, pring
);
1629 lpfc_sli_update_full_ring(phba
, pring
);
1636 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1637 * @phba: Pointer to HBA context object.
1638 * @hbqno: HBQ number.
1640 * This function is called with hbalock held to get the next
1641 * available slot for the given HBQ. If there is free slot
1642 * available for the HBQ it will return pointer to the next available
1643 * HBQ entry else it will return NULL.
1645 static struct lpfc_hbq_entry
*
1646 lpfc_sli_next_hbq_slot(struct lpfc_hba
*phba
, uint32_t hbqno
)
1648 struct hbq_s
*hbqp
= &phba
->hbqs
[hbqno
];
1650 if (hbqp
->next_hbqPutIdx
== hbqp
->hbqPutIdx
&&
1651 ++hbqp
->next_hbqPutIdx
>= hbqp
->entry_count
)
1652 hbqp
->next_hbqPutIdx
= 0;
1654 if (unlikely(hbqp
->local_hbqGetIdx
== hbqp
->next_hbqPutIdx
)) {
1655 uint32_t raw_index
= phba
->hbq_get
[hbqno
];
1656 uint32_t getidx
= le32_to_cpu(raw_index
);
1658 hbqp
->local_hbqGetIdx
= getidx
;
1660 if (unlikely(hbqp
->local_hbqGetIdx
>= hbqp
->entry_count
)) {
1661 lpfc_printf_log(phba
, KERN_ERR
,
1662 LOG_SLI
| LOG_VPORT
,
1663 "1802 HBQ %d: local_hbqGetIdx "
1664 "%u is > than hbqp->entry_count %u\n",
1665 hbqno
, hbqp
->local_hbqGetIdx
,
1668 phba
->link_state
= LPFC_HBA_ERROR
;
1672 if (hbqp
->local_hbqGetIdx
== hbqp
->next_hbqPutIdx
)
1676 return (struct lpfc_hbq_entry
*) phba
->hbqs
[hbqno
].hbq_virt
+
1681 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1682 * @phba: Pointer to HBA context object.
1684 * This function is called with no lock held to free all the
1685 * hbq buffers while uninitializing the SLI interface. It also
1686 * frees the HBQ buffers returned by the firmware but not yet
1687 * processed by the upper layers.
1690 lpfc_sli_hbqbuf_free_all(struct lpfc_hba
*phba
)
1692 struct lpfc_dmabuf
*dmabuf
, *next_dmabuf
;
1693 struct hbq_dmabuf
*hbq_buf
;
1694 unsigned long flags
;
1698 hbq_count
= lpfc_sli_hbq_count();
1699 /* Return all memory used by all HBQs */
1700 spin_lock_irqsave(&phba
->hbalock
, flags
);
1701 for (i
= 0; i
< hbq_count
; ++i
) {
1702 list_for_each_entry_safe(dmabuf
, next_dmabuf
,
1703 &phba
->hbqs
[i
].hbq_buffer_list
, list
) {
1704 hbq_buf
= container_of(dmabuf
, struct hbq_dmabuf
, dbuf
);
1705 list_del(&hbq_buf
->dbuf
.list
);
1706 (phba
->hbqs
[i
].hbq_free_buffer
)(phba
, hbq_buf
);
1708 phba
->hbqs
[i
].buffer_count
= 0;
1710 /* Return all HBQ buffer that are in-fly */
1711 list_for_each_entry_safe(dmabuf
, next_dmabuf
, &phba
->rb_pend_list
,
1713 hbq_buf
= container_of(dmabuf
, struct hbq_dmabuf
, dbuf
);
1714 list_del(&hbq_buf
->dbuf
.list
);
1715 if (hbq_buf
->tag
== -1) {
1716 (phba
->hbqs
[LPFC_ELS_HBQ
].hbq_free_buffer
)
1719 hbqno
= hbq_buf
->tag
>> 16;
1720 if (hbqno
>= LPFC_MAX_HBQS
)
1721 (phba
->hbqs
[LPFC_ELS_HBQ
].hbq_free_buffer
)
1724 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
,
1729 /* Mark the HBQs not in use */
1730 phba
->hbq_in_use
= 0;
1731 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1735 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1736 * @phba: Pointer to HBA context object.
1737 * @hbqno: HBQ number.
1738 * @hbq_buf: Pointer to HBQ buffer.
1740 * This function is called with the hbalock held to post a
1741 * hbq buffer to the firmware. If the function finds an empty
1742 * slot in the HBQ, it will post the buffer. The function will return
1743 * pointer to the hbq entry if it successfully post the buffer
1744 * else it will return NULL.
1747 lpfc_sli_hbq_to_firmware(struct lpfc_hba
*phba
, uint32_t hbqno
,
1748 struct hbq_dmabuf
*hbq_buf
)
1750 return phba
->lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buf
);
1754 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1755 * @phba: Pointer to HBA context object.
1756 * @hbqno: HBQ number.
1757 * @hbq_buf: Pointer to HBQ buffer.
1759 * This function is called with the hbalock held to post a hbq buffer to the
1760 * firmware. If the function finds an empty slot in the HBQ, it will post the
1761 * buffer and place it on the hbq_buffer_list. The function will return zero if
1762 * it successfully post the buffer else it will return an error.
1765 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba
*phba
, uint32_t hbqno
,
1766 struct hbq_dmabuf
*hbq_buf
)
1768 struct lpfc_hbq_entry
*hbqe
;
1769 dma_addr_t physaddr
= hbq_buf
->dbuf
.phys
;
1771 /* Get next HBQ entry slot to use */
1772 hbqe
= lpfc_sli_next_hbq_slot(phba
, hbqno
);
1774 struct hbq_s
*hbqp
= &phba
->hbqs
[hbqno
];
1776 hbqe
->bde
.addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
1777 hbqe
->bde
.addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
1778 hbqe
->bde
.tus
.f
.bdeSize
= hbq_buf
->size
;
1779 hbqe
->bde
.tus
.f
.bdeFlags
= 0;
1780 hbqe
->bde
.tus
.w
= le32_to_cpu(hbqe
->bde
.tus
.w
);
1781 hbqe
->buffer_tag
= le32_to_cpu(hbq_buf
->tag
);
1783 hbqp
->hbqPutIdx
= hbqp
->next_hbqPutIdx
;
1784 writel(hbqp
->hbqPutIdx
, phba
->hbq_put
+ hbqno
);
1786 readl(phba
->hbq_put
+ hbqno
);
1787 list_add_tail(&hbq_buf
->dbuf
.list
, &hbqp
->hbq_buffer_list
);
1794 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1795 * @phba: Pointer to HBA context object.
1796 * @hbqno: HBQ number.
1797 * @hbq_buf: Pointer to HBQ buffer.
1799 * This function is called with the hbalock held to post an RQE to the SLI4
1800 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1801 * the hbq_buffer_list and return zero, otherwise it will return an error.
1804 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba
*phba
, uint32_t hbqno
,
1805 struct hbq_dmabuf
*hbq_buf
)
1808 struct lpfc_rqe hrqe
;
1809 struct lpfc_rqe drqe
;
1811 hrqe
.address_lo
= putPaddrLow(hbq_buf
->hbuf
.phys
);
1812 hrqe
.address_hi
= putPaddrHigh(hbq_buf
->hbuf
.phys
);
1813 drqe
.address_lo
= putPaddrLow(hbq_buf
->dbuf
.phys
);
1814 drqe
.address_hi
= putPaddrHigh(hbq_buf
->dbuf
.phys
);
1815 rc
= lpfc_sli4_rq_put(phba
->sli4_hba
.hdr_rq
, phba
->sli4_hba
.dat_rq
,
1820 list_add_tail(&hbq_buf
->dbuf
.list
, &phba
->hbqs
[hbqno
].hbq_buffer_list
);
1824 /* HBQ for ELS and CT traffic. */
1825 static struct lpfc_hbq_init lpfc_els_hbq
= {
1830 .ring_mask
= (1 << LPFC_ELS_RING
),
1836 /* HBQ for the extra ring if needed */
1837 static struct lpfc_hbq_init lpfc_extra_hbq
= {
1842 .ring_mask
= (1 << LPFC_EXTRA_RING
),
1849 struct lpfc_hbq_init
*lpfc_hbq_defs
[] = {
1855 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1856 * @phba: Pointer to HBA context object.
1857 * @hbqno: HBQ number.
1858 * @count: Number of HBQ buffers to be posted.
1860 * This function is called with no lock held to post more hbq buffers to the
1861 * given HBQ. The function returns the number of HBQ buffers successfully
1865 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba
*phba
, uint32_t hbqno
, uint32_t count
)
1867 uint32_t i
, posted
= 0;
1868 unsigned long flags
;
1869 struct hbq_dmabuf
*hbq_buffer
;
1870 LIST_HEAD(hbq_buf_list
);
1871 if (!phba
->hbqs
[hbqno
].hbq_alloc_buffer
)
1874 if ((phba
->hbqs
[hbqno
].buffer_count
+ count
) >
1875 lpfc_hbq_defs
[hbqno
]->entry_count
)
1876 count
= lpfc_hbq_defs
[hbqno
]->entry_count
-
1877 phba
->hbqs
[hbqno
].buffer_count
;
1880 /* Allocate HBQ entries */
1881 for (i
= 0; i
< count
; i
++) {
1882 hbq_buffer
= (phba
->hbqs
[hbqno
].hbq_alloc_buffer
)(phba
);
1885 list_add_tail(&hbq_buffer
->dbuf
.list
, &hbq_buf_list
);
1887 /* Check whether HBQ is still in use */
1888 spin_lock_irqsave(&phba
->hbalock
, flags
);
1889 if (!phba
->hbq_in_use
)
1891 while (!list_empty(&hbq_buf_list
)) {
1892 list_remove_head(&hbq_buf_list
, hbq_buffer
, struct hbq_dmabuf
,
1894 hbq_buffer
->tag
= (phba
->hbqs
[hbqno
].buffer_count
|
1896 if (!lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buffer
)) {
1897 phba
->hbqs
[hbqno
].buffer_count
++;
1900 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
1902 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1905 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1906 while (!list_empty(&hbq_buf_list
)) {
1907 list_remove_head(&hbq_buf_list
, hbq_buffer
, struct hbq_dmabuf
,
1909 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
1915 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1916 * @phba: Pointer to HBA context object.
1919 * This function posts more buffers to the HBQ. This function
1920 * is called with no lock held. The function returns the number of HBQ entries
1921 * successfully allocated.
1924 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba
*phba
, uint32_t qno
)
1926 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1929 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
1930 lpfc_hbq_defs
[qno
]->add_count
);
1934 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1935 * @phba: Pointer to HBA context object.
1936 * @qno: HBQ queue number.
1938 * This function is called from SLI initialization code path with
1939 * no lock held to post initial HBQ buffers to firmware. The
1940 * function returns the number of HBQ entries successfully allocated.
1943 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba
*phba
, uint32_t qno
)
1945 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1946 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
1947 lpfc_hbq_defs
[qno
]->entry_count
);
1949 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
1950 lpfc_hbq_defs
[qno
]->init_count
);
1954 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1955 * @phba: Pointer to HBA context object.
1956 * @hbqno: HBQ number.
1958 * This function removes the first hbq buffer on an hbq list and returns a
1959 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1961 static struct hbq_dmabuf
*
1962 lpfc_sli_hbqbuf_get(struct list_head
*rb_list
)
1964 struct lpfc_dmabuf
*d_buf
;
1966 list_remove_head(rb_list
, d_buf
, struct lpfc_dmabuf
, list
);
1969 return container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
1973 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
1974 * @phba: Pointer to HBA context object.
1975 * @tag: Tag of the hbq buffer.
1977 * This function is called with hbalock held. This function searches
1978 * for the hbq buffer associated with the given tag in the hbq buffer
1979 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1982 static struct hbq_dmabuf
*
1983 lpfc_sli_hbqbuf_find(struct lpfc_hba
*phba
, uint32_t tag
)
1985 struct lpfc_dmabuf
*d_buf
;
1986 struct hbq_dmabuf
*hbq_buf
;
1990 if (hbqno
>= LPFC_MAX_HBQS
)
1993 spin_lock_irq(&phba
->hbalock
);
1994 list_for_each_entry(d_buf
, &phba
->hbqs
[hbqno
].hbq_buffer_list
, list
) {
1995 hbq_buf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
1996 if (hbq_buf
->tag
== tag
) {
1997 spin_unlock_irq(&phba
->hbalock
);
2001 spin_unlock_irq(&phba
->hbalock
);
2002 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
| LOG_VPORT
,
2003 "1803 Bad hbq tag. Data: x%x x%x\n",
2004 tag
, phba
->hbqs
[tag
>> 16].buffer_count
);
2009 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2010 * @phba: Pointer to HBA context object.
2011 * @hbq_buffer: Pointer to HBQ buffer.
2013 * This function is called with hbalock. This function gives back
2014 * the hbq buffer to firmware. If the HBQ does not have space to
2015 * post the buffer, it will free the buffer.
2018 lpfc_sli_free_hbq(struct lpfc_hba
*phba
, struct hbq_dmabuf
*hbq_buffer
)
2023 hbqno
= hbq_buffer
->tag
>> 16;
2024 if (lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buffer
))
2025 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
2030 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2031 * @mbxCommand: mailbox command code.
2033 * This function is called by the mailbox event handler function to verify
2034 * that the completed mailbox command is a legitimate mailbox command. If the
2035 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2036 * and the mailbox event handler will take the HBA offline.
2039 lpfc_sli_chk_mbx_command(uint8_t mbxCommand
)
2043 switch (mbxCommand
) {
2047 case MBX_WRITE_VPARMS
:
2048 case MBX_RUN_BIU_DIAG
:
2051 case MBX_CONFIG_LINK
:
2052 case MBX_CONFIG_RING
:
2053 case MBX_RESET_RING
:
2054 case MBX_READ_CONFIG
:
2055 case MBX_READ_RCONFIG
:
2056 case MBX_READ_SPARM
:
2057 case MBX_READ_STATUS
:
2061 case MBX_READ_LNK_STAT
:
2063 case MBX_UNREG_LOGIN
:
2065 case MBX_DUMP_MEMORY
:
2066 case MBX_DUMP_CONTEXT
:
2069 case MBX_UPDATE_CFG
:
2071 case MBX_DEL_LD_ENTRY
:
2072 case MBX_RUN_PROGRAM
:
2074 case MBX_SET_VARIABLE
:
2075 case MBX_UNREG_D_ID
:
2076 case MBX_KILL_BOARD
:
2077 case MBX_CONFIG_FARP
:
2080 case MBX_RUN_BIU_DIAG64
:
2081 case MBX_CONFIG_PORT
:
2082 case MBX_READ_SPARM64
:
2083 case MBX_READ_RPI64
:
2084 case MBX_REG_LOGIN64
:
2085 case MBX_READ_TOPOLOGY
:
2088 case MBX_LOAD_EXP_ROM
:
2089 case MBX_ASYNCEVT_ENABLE
:
2093 case MBX_PORT_CAPABILITIES
:
2094 case MBX_PORT_IOV_CONTROL
:
2095 case MBX_SLI4_CONFIG
:
2096 case MBX_SLI4_REQ_FTRS
:
2098 case MBX_UNREG_FCFI
:
2103 case MBX_RESUME_RPI
:
2104 case MBX_READ_EVENT_LOG_STATUS
:
2105 case MBX_READ_EVENT_LOG
:
2106 case MBX_SECURITY_MGMT
:
2108 case MBX_ACCESS_VDATA
:
2119 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2120 * @phba: Pointer to HBA context object.
2121 * @pmboxq: Pointer to mailbox command.
2123 * This is completion handler function for mailbox commands issued from
2124 * lpfc_sli_issue_mbox_wait function. This function is called by the
2125 * mailbox event handler function with no lock held. This function
2126 * will wake up thread waiting on the wait queue pointed by context1
2130 lpfc_sli_wake_mbox_wait(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
2132 wait_queue_head_t
*pdone_q
;
2133 unsigned long drvr_flag
;
2136 * If pdone_q is empty, the driver thread gave up waiting and
2137 * continued running.
2139 pmboxq
->mbox_flag
|= LPFC_MBX_WAKE
;
2140 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
2141 pdone_q
= (wait_queue_head_t
*) pmboxq
->context1
;
2143 wake_up_interruptible(pdone_q
);
2144 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
2150 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2151 * @phba: Pointer to HBA context object.
2152 * @pmb: Pointer to mailbox object.
2154 * This function is the default mailbox completion handler. It
2155 * frees the memory resources associated with the completed mailbox
2156 * command. If the completed command is a REG_LOGIN mailbox command,
2157 * this function will issue a UREG_LOGIN to re-claim the RPI.
2160 lpfc_sli_def_mbox_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2162 struct lpfc_vport
*vport
= pmb
->vport
;
2163 struct lpfc_dmabuf
*mp
;
2164 struct lpfc_nodelist
*ndlp
;
2165 struct Scsi_Host
*shost
;
2169 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2172 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2177 * If a REG_LOGIN succeeded after node is destroyed or node
2178 * is in re-discovery driver need to cleanup the RPI.
2180 if (!(phba
->pport
->load_flag
& FC_UNLOADING
) &&
2181 pmb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
&&
2182 !pmb
->u
.mb
.mbxStatus
) {
2183 rpi
= pmb
->u
.mb
.un
.varWords
[0];
2184 vpi
= pmb
->u
.mb
.un
.varRegLogin
.vpi
;
2185 lpfc_unreg_login(phba
, vpi
, rpi
, pmb
);
2186 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
2187 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
2188 if (rc
!= MBX_NOT_FINISHED
)
2192 if ((pmb
->u
.mb
.mbxCommand
== MBX_REG_VPI
) &&
2193 !(phba
->pport
->load_flag
& FC_UNLOADING
) &&
2194 !pmb
->u
.mb
.mbxStatus
) {
2195 shost
= lpfc_shost_from_vport(vport
);
2196 spin_lock_irq(shost
->host_lock
);
2197 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2198 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2199 spin_unlock_irq(shost
->host_lock
);
2202 if (pmb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
2203 ndlp
= (struct lpfc_nodelist
*)pmb
->context2
;
2205 pmb
->context2
= NULL
;
2208 /* Check security permission status on INIT_LINK mailbox command */
2209 if ((pmb
->u
.mb
.mbxCommand
== MBX_INIT_LINK
) &&
2210 (pmb
->u
.mb
.mbxStatus
== MBXERR_SEC_NO_PERMISSION
))
2211 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
2212 "2860 SLI authentication is required "
2213 "for INIT_LINK but has not done yet\n");
2215 if (bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
) == MBX_SLI4_CONFIG
)
2216 lpfc_sli4_mbox_cmd_free(phba
, pmb
);
2218 mempool_free(pmb
, phba
->mbox_mem_pool
);
2221 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2222 * @phba: Pointer to HBA context object.
2223 * @pmb: Pointer to mailbox object.
2225 * This function is the unreg rpi mailbox completion handler. It
2226 * frees the memory resources associated with the completed mailbox
2227 * command. An additional refrenece is put on the ndlp to prevent
2228 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2229 * the unreg mailbox command completes, this routine puts the
2234 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2236 struct lpfc_vport
*vport
= pmb
->vport
;
2237 struct lpfc_nodelist
*ndlp
;
2239 ndlp
= pmb
->context1
;
2240 if (pmb
->u
.mb
.mbxCommand
== MBX_UNREG_LOGIN
) {
2241 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
2242 (bf_get(lpfc_sli_intf_if_type
,
2243 &phba
->sli4_hba
.sli_intf
) ==
2244 LPFC_SLI_INTF_IF_TYPE_2
)) {
2246 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
2247 "0010 UNREG_LOGIN vpi:%x "
2248 "rpi:%x DID:%x map:%x %p\n",
2249 vport
->vpi
, ndlp
->nlp_rpi
,
2251 ndlp
->nlp_usg_map
, ndlp
);
2252 ndlp
->nlp_flag
&= ~NLP_LOGO_ACC
;
2258 mempool_free(pmb
, phba
->mbox_mem_pool
);
2262 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2263 * @phba: Pointer to HBA context object.
2265 * This function is called with no lock held. This function processes all
2266 * the completed mailbox commands and gives it to upper layers. The interrupt
2267 * service routine processes mailbox completion interrupt and adds completed
2268 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2269 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2270 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2271 * function returns the mailbox commands to the upper layer by calling the
2272 * completion handler function of each mailbox.
2275 lpfc_sli_handle_mb_event(struct lpfc_hba
*phba
)
2282 phba
->sli
.slistat
.mbox_event
++;
2284 /* Get all completed mailboxe buffers into the cmplq */
2285 spin_lock_irq(&phba
->hbalock
);
2286 list_splice_init(&phba
->sli
.mboxq_cmpl
, &cmplq
);
2287 spin_unlock_irq(&phba
->hbalock
);
2289 /* Get a Mailbox buffer to setup mailbox commands for callback */
2291 list_remove_head(&cmplq
, pmb
, LPFC_MBOXQ_t
, list
);
2297 if (pmbox
->mbxCommand
!= MBX_HEARTBEAT
) {
2299 lpfc_debugfs_disc_trc(pmb
->vport
,
2300 LPFC_DISC_TRC_MBOX_VPORT
,
2301 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2302 (uint32_t)pmbox
->mbxCommand
,
2303 pmbox
->un
.varWords
[0],
2304 pmbox
->un
.varWords
[1]);
2307 lpfc_debugfs_disc_trc(phba
->pport
,
2309 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2310 (uint32_t)pmbox
->mbxCommand
,
2311 pmbox
->un
.varWords
[0],
2312 pmbox
->un
.varWords
[1]);
2317 * It is a fatal error if unknown mbox command completion.
2319 if (lpfc_sli_chk_mbx_command(pmbox
->mbxCommand
) ==
2321 /* Unknown mailbox command compl */
2322 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
2323 "(%d):0323 Unknown Mailbox command "
2324 "x%x (x%x/x%x) Cmpl\n",
2325 pmb
->vport
? pmb
->vport
->vpi
: 0,
2327 lpfc_sli_config_mbox_subsys_get(phba
,
2329 lpfc_sli_config_mbox_opcode_get(phba
,
2331 phba
->link_state
= LPFC_HBA_ERROR
;
2332 phba
->work_hs
= HS_FFER3
;
2333 lpfc_handle_eratt(phba
);
2337 if (pmbox
->mbxStatus
) {
2338 phba
->sli
.slistat
.mbox_stat_err
++;
2339 if (pmbox
->mbxStatus
== MBXERR_NO_RESOURCES
) {
2340 /* Mbox cmd cmpl error - RETRYing */
2341 lpfc_printf_log(phba
, KERN_INFO
,
2343 "(%d):0305 Mbox cmd cmpl "
2344 "error - RETRYing Data: x%x "
2345 "(x%x/x%x) x%x x%x x%x\n",
2346 pmb
->vport
? pmb
->vport
->vpi
: 0,
2348 lpfc_sli_config_mbox_subsys_get(phba
,
2350 lpfc_sli_config_mbox_opcode_get(phba
,
2353 pmbox
->un
.varWords
[0],
2354 pmb
->vport
->port_state
);
2355 pmbox
->mbxStatus
= 0;
2356 pmbox
->mbxOwner
= OWN_HOST
;
2357 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
2358 if (rc
!= MBX_NOT_FINISHED
)
2363 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2364 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
2365 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2366 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2368 pmb
->vport
? pmb
->vport
->vpi
: 0,
2370 lpfc_sli_config_mbox_subsys_get(phba
, pmb
),
2371 lpfc_sli_config_mbox_opcode_get(phba
, pmb
),
2373 *((uint32_t *) pmbox
),
2374 pmbox
->un
.varWords
[0],
2375 pmbox
->un
.varWords
[1],
2376 pmbox
->un
.varWords
[2],
2377 pmbox
->un
.varWords
[3],
2378 pmbox
->un
.varWords
[4],
2379 pmbox
->un
.varWords
[5],
2380 pmbox
->un
.varWords
[6],
2381 pmbox
->un
.varWords
[7],
2382 pmbox
->un
.varWords
[8],
2383 pmbox
->un
.varWords
[9],
2384 pmbox
->un
.varWords
[10]);
2387 pmb
->mbox_cmpl(phba
,pmb
);
2393 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2394 * @phba: Pointer to HBA context object.
2395 * @pring: Pointer to driver SLI ring object.
2398 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2399 * is set in the tag the buffer is posted for a particular exchange,
2400 * the function will return the buffer without replacing the buffer.
2401 * If the buffer is for unsolicited ELS or CT traffic, this function
2402 * returns the buffer and also posts another buffer to the firmware.
2404 static struct lpfc_dmabuf
*
2405 lpfc_sli_get_buff(struct lpfc_hba
*phba
,
2406 struct lpfc_sli_ring
*pring
,
2409 struct hbq_dmabuf
*hbq_entry
;
2411 if (tag
& QUE_BUFTAG_BIT
)
2412 return lpfc_sli_ring_taggedbuf_get(phba
, pring
, tag
);
2413 hbq_entry
= lpfc_sli_hbqbuf_find(phba
, tag
);
2416 return &hbq_entry
->dbuf
;
2420 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2421 * @phba: Pointer to HBA context object.
2422 * @pring: Pointer to driver SLI ring object.
2423 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2424 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2425 * @fch_type: the type for the first frame of the sequence.
2427 * This function is called with no lock held. This function uses the r_ctl and
2428 * type of the received sequence to find the correct callback function to call
2429 * to process the sequence.
2432 lpfc_complete_unsol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2433 struct lpfc_iocbq
*saveq
, uint32_t fch_r_ctl
,
2438 /* unSolicited Responses */
2439 if (pring
->prt
[0].profile
) {
2440 if (pring
->prt
[0].lpfc_sli_rcv_unsol_event
)
2441 (pring
->prt
[0].lpfc_sli_rcv_unsol_event
) (phba
, pring
,
2445 /* We must search, based on rctl / type
2446 for the right routine */
2447 for (i
= 0; i
< pring
->num_mask
; i
++) {
2448 if ((pring
->prt
[i
].rctl
== fch_r_ctl
) &&
2449 (pring
->prt
[i
].type
== fch_type
)) {
2450 if (pring
->prt
[i
].lpfc_sli_rcv_unsol_event
)
2451 (pring
->prt
[i
].lpfc_sli_rcv_unsol_event
)
2452 (phba
, pring
, saveq
);
2460 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2461 * @phba: Pointer to HBA context object.
2462 * @pring: Pointer to driver SLI ring object.
2463 * @saveq: Pointer to the unsolicited iocb.
2465 * This function is called with no lock held by the ring event handler
2466 * when there is an unsolicited iocb posted to the response ring by the
2467 * firmware. This function gets the buffer associated with the iocbs
2468 * and calls the event handler for the ring. This function handles both
2469 * qring buffers and hbq buffers.
2470 * When the function returns 1 the caller can free the iocb object otherwise
2471 * upper layer functions will free the iocb objects.
2474 lpfc_sli_process_unsol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2475 struct lpfc_iocbq
*saveq
)
2479 uint32_t Rctl
, Type
;
2480 struct lpfc_iocbq
*iocbq
;
2481 struct lpfc_dmabuf
*dmzbuf
;
2483 irsp
= &(saveq
->iocb
);
2485 if (irsp
->ulpCommand
== CMD_ASYNC_STATUS
) {
2486 if (pring
->lpfc_sli_rcv_async_status
)
2487 pring
->lpfc_sli_rcv_async_status(phba
, pring
, saveq
);
2489 lpfc_printf_log(phba
,
2492 "0316 Ring %d handler: unexpected "
2493 "ASYNC_STATUS iocb received evt_code "
2496 irsp
->un
.asyncstat
.evt_code
);
2500 if ((irsp
->ulpCommand
== CMD_IOCB_RET_XRI64_CX
) &&
2501 (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)) {
2502 if (irsp
->ulpBdeCount
> 0) {
2503 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
2504 irsp
->un
.ulpWord
[3]);
2505 lpfc_in_buf_free(phba
, dmzbuf
);
2508 if (irsp
->ulpBdeCount
> 1) {
2509 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
2510 irsp
->unsli3
.sli3Words
[3]);
2511 lpfc_in_buf_free(phba
, dmzbuf
);
2514 if (irsp
->ulpBdeCount
> 2) {
2515 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
2516 irsp
->unsli3
.sli3Words
[7]);
2517 lpfc_in_buf_free(phba
, dmzbuf
);
2523 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
2524 if (irsp
->ulpBdeCount
!= 0) {
2525 saveq
->context2
= lpfc_sli_get_buff(phba
, pring
,
2526 irsp
->un
.ulpWord
[3]);
2527 if (!saveq
->context2
)
2528 lpfc_printf_log(phba
,
2531 "0341 Ring %d Cannot find buffer for "
2532 "an unsolicited iocb. tag 0x%x\n",
2534 irsp
->un
.ulpWord
[3]);
2536 if (irsp
->ulpBdeCount
== 2) {
2537 saveq
->context3
= lpfc_sli_get_buff(phba
, pring
,
2538 irsp
->unsli3
.sli3Words
[7]);
2539 if (!saveq
->context3
)
2540 lpfc_printf_log(phba
,
2543 "0342 Ring %d Cannot find buffer for an"
2544 " unsolicited iocb. tag 0x%x\n",
2546 irsp
->unsli3
.sli3Words
[7]);
2548 list_for_each_entry(iocbq
, &saveq
->list
, list
) {
2549 irsp
= &(iocbq
->iocb
);
2550 if (irsp
->ulpBdeCount
!= 0) {
2551 iocbq
->context2
= lpfc_sli_get_buff(phba
, pring
,
2552 irsp
->un
.ulpWord
[3]);
2553 if (!iocbq
->context2
)
2554 lpfc_printf_log(phba
,
2557 "0343 Ring %d Cannot find "
2558 "buffer for an unsolicited iocb"
2559 ". tag 0x%x\n", pring
->ringno
,
2560 irsp
->un
.ulpWord
[3]);
2562 if (irsp
->ulpBdeCount
== 2) {
2563 iocbq
->context3
= lpfc_sli_get_buff(phba
, pring
,
2564 irsp
->unsli3
.sli3Words
[7]);
2565 if (!iocbq
->context3
)
2566 lpfc_printf_log(phba
,
2569 "0344 Ring %d Cannot find "
2570 "buffer for an unsolicited "
2573 irsp
->unsli3
.sli3Words
[7]);
2577 if (irsp
->ulpBdeCount
!= 0 &&
2578 (irsp
->ulpCommand
== CMD_IOCB_RCV_CONT64_CX
||
2579 irsp
->ulpStatus
== IOSTAT_INTERMED_RSP
)) {
2582 /* search continue save q for same XRI */
2583 list_for_each_entry(iocbq
, &pring
->iocb_continue_saveq
, clist
) {
2584 if (iocbq
->iocb
.unsli3
.rcvsli3
.ox_id
==
2585 saveq
->iocb
.unsli3
.rcvsli3
.ox_id
) {
2586 list_add_tail(&saveq
->list
, &iocbq
->list
);
2592 list_add_tail(&saveq
->clist
,
2593 &pring
->iocb_continue_saveq
);
2594 if (saveq
->iocb
.ulpStatus
!= IOSTAT_INTERMED_RSP
) {
2595 list_del_init(&iocbq
->clist
);
2597 irsp
= &(saveq
->iocb
);
2601 if ((irsp
->ulpCommand
== CMD_RCV_ELS_REQ64_CX
) ||
2602 (irsp
->ulpCommand
== CMD_RCV_ELS_REQ_CX
) ||
2603 (irsp
->ulpCommand
== CMD_IOCB_RCV_ELS64_CX
)) {
2604 Rctl
= FC_RCTL_ELS_REQ
;
2607 w5p
= (WORD5
*)&(saveq
->iocb
.un
.ulpWord
[5]);
2608 Rctl
= w5p
->hcsw
.Rctl
;
2609 Type
= w5p
->hcsw
.Type
;
2611 /* Firmware Workaround */
2612 if ((Rctl
== 0) && (pring
->ringno
== LPFC_ELS_RING
) &&
2613 (irsp
->ulpCommand
== CMD_RCV_SEQUENCE64_CX
||
2614 irsp
->ulpCommand
== CMD_IOCB_RCV_SEQ64_CX
)) {
2615 Rctl
= FC_RCTL_ELS_REQ
;
2617 w5p
->hcsw
.Rctl
= Rctl
;
2618 w5p
->hcsw
.Type
= Type
;
2622 if (!lpfc_complete_unsol_iocb(phba
, pring
, saveq
, Rctl
, Type
))
2623 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
2624 "0313 Ring %d handler: unexpected Rctl x%x "
2625 "Type x%x received\n",
2626 pring
->ringno
, Rctl
, Type
);
2632 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2633 * @phba: Pointer to HBA context object.
2634 * @pring: Pointer to driver SLI ring object.
2635 * @prspiocb: Pointer to response iocb object.
2637 * This function looks up the iocb_lookup table to get the command iocb
2638 * corresponding to the given response iocb using the iotag of the
2639 * response iocb. This function is called with the hbalock held.
2640 * This function returns the command iocb object if it finds the command
2641 * iocb else returns NULL.
2643 static struct lpfc_iocbq
*
2644 lpfc_sli_iocbq_lookup(struct lpfc_hba
*phba
,
2645 struct lpfc_sli_ring
*pring
,
2646 struct lpfc_iocbq
*prspiocb
)
2648 struct lpfc_iocbq
*cmd_iocb
= NULL
;
2651 iotag
= prspiocb
->iocb
.ulpIoTag
;
2653 if (iotag
!= 0 && iotag
<= phba
->sli
.last_iotag
) {
2654 cmd_iocb
= phba
->sli
.iocbq_lookup
[iotag
];
2655 list_del_init(&cmd_iocb
->list
);
2656 if (cmd_iocb
->iocb_flag
& LPFC_IO_ON_TXCMPLQ
) {
2657 cmd_iocb
->iocb_flag
&= ~LPFC_IO_ON_TXCMPLQ
;
2662 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2663 "0317 iotag x%x is out off "
2664 "range: max iotag x%x wd0 x%x\n",
2665 iotag
, phba
->sli
.last_iotag
,
2666 *(((uint32_t *) &prspiocb
->iocb
) + 7));
2671 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2672 * @phba: Pointer to HBA context object.
2673 * @pring: Pointer to driver SLI ring object.
2676 * This function looks up the iocb_lookup table to get the command iocb
2677 * corresponding to the given iotag. This function is called with the
2679 * This function returns the command iocb object if it finds the command
2680 * iocb else returns NULL.
2682 static struct lpfc_iocbq
*
2683 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba
*phba
,
2684 struct lpfc_sli_ring
*pring
, uint16_t iotag
)
2686 struct lpfc_iocbq
*cmd_iocb
;
2688 if (iotag
!= 0 && iotag
<= phba
->sli
.last_iotag
) {
2689 cmd_iocb
= phba
->sli
.iocbq_lookup
[iotag
];
2690 if (cmd_iocb
->iocb_flag
& LPFC_IO_ON_TXCMPLQ
) {
2691 /* remove from txcmpl queue list */
2692 list_del_init(&cmd_iocb
->list
);
2693 cmd_iocb
->iocb_flag
&= ~LPFC_IO_ON_TXCMPLQ
;
2697 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2698 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2699 iotag
, phba
->sli
.last_iotag
);
2704 * lpfc_sli_process_sol_iocb - process solicited iocb completion
2705 * @phba: Pointer to HBA context object.
2706 * @pring: Pointer to driver SLI ring object.
2707 * @saveq: Pointer to the response iocb to be processed.
2709 * This function is called by the ring event handler for non-fcp
2710 * rings when there is a new response iocb in the response ring.
2711 * The caller is not required to hold any locks. This function
2712 * gets the command iocb associated with the response iocb and
2713 * calls the completion handler for the command iocb. If there
2714 * is no completion handler, the function will free the resources
2715 * associated with command iocb. If the response iocb is for
2716 * an already aborted command iocb, the status of the completion
2717 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2718 * This function always returns 1.
2721 lpfc_sli_process_sol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2722 struct lpfc_iocbq
*saveq
)
2724 struct lpfc_iocbq
*cmdiocbp
;
2726 unsigned long iflag
;
2728 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2729 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2730 cmdiocbp
= lpfc_sli_iocbq_lookup(phba
, pring
, saveq
);
2731 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2734 if (cmdiocbp
->iocb_cmpl
) {
2736 * If an ELS command failed send an event to mgmt
2739 if (saveq
->iocb
.ulpStatus
&&
2740 (pring
->ringno
== LPFC_ELS_RING
) &&
2741 (cmdiocbp
->iocb
.ulpCommand
==
2742 CMD_ELS_REQUEST64_CR
))
2743 lpfc_send_els_failure_event(phba
,
2747 * Post all ELS completions to the worker thread.
2748 * All other are passed to the completion callback.
2750 if (pring
->ringno
== LPFC_ELS_RING
) {
2751 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
2752 (cmdiocbp
->iocb_flag
&
2753 LPFC_DRIVER_ABORTED
)) {
2754 spin_lock_irqsave(&phba
->hbalock
,
2756 cmdiocbp
->iocb_flag
&=
2757 ~LPFC_DRIVER_ABORTED
;
2758 spin_unlock_irqrestore(&phba
->hbalock
,
2760 saveq
->iocb
.ulpStatus
=
2761 IOSTAT_LOCAL_REJECT
;
2762 saveq
->iocb
.un
.ulpWord
[4] =
2765 /* Firmware could still be in progress
2766 * of DMAing payload, so don't free data
2767 * buffer till after a hbeat.
2769 spin_lock_irqsave(&phba
->hbalock
,
2771 saveq
->iocb_flag
|= LPFC_DELAY_MEM_FREE
;
2772 spin_unlock_irqrestore(&phba
->hbalock
,
2775 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2776 if (saveq
->iocb_flag
&
2777 LPFC_EXCHANGE_BUSY
) {
2778 /* Set cmdiocb flag for the
2779 * exchange busy so sgl (xri)
2780 * will not be released until
2781 * the abort xri is received
2785 &phba
->hbalock
, iflag
);
2786 cmdiocbp
->iocb_flag
|=
2788 spin_unlock_irqrestore(
2789 &phba
->hbalock
, iflag
);
2791 if (cmdiocbp
->iocb_flag
&
2792 LPFC_DRIVER_ABORTED
) {
2794 * Clear LPFC_DRIVER_ABORTED
2795 * bit in case it was driver
2799 &phba
->hbalock
, iflag
);
2800 cmdiocbp
->iocb_flag
&=
2801 ~LPFC_DRIVER_ABORTED
;
2802 spin_unlock_irqrestore(
2803 &phba
->hbalock
, iflag
);
2804 cmdiocbp
->iocb
.ulpStatus
=
2805 IOSTAT_LOCAL_REJECT
;
2806 cmdiocbp
->iocb
.un
.ulpWord
[4] =
2807 IOERR_ABORT_REQUESTED
;
2809 * For SLI4, irsiocb contains
2810 * NO_XRI in sli_xritag, it
2811 * shall not affect releasing
2812 * sgl (xri) process.
2814 saveq
->iocb
.ulpStatus
=
2815 IOSTAT_LOCAL_REJECT
;
2816 saveq
->iocb
.un
.ulpWord
[4] =
2819 &phba
->hbalock
, iflag
);
2821 LPFC_DELAY_MEM_FREE
;
2822 spin_unlock_irqrestore(
2823 &phba
->hbalock
, iflag
);
2827 (cmdiocbp
->iocb_cmpl
) (phba
, cmdiocbp
, saveq
);
2829 lpfc_sli_release_iocbq(phba
, cmdiocbp
);
2832 * Unknown initiating command based on the response iotag.
2833 * This could be the case on the ELS ring because of
2836 if (pring
->ringno
!= LPFC_ELS_RING
) {
2838 * Ring <ringno> handler: unexpected completion IoTag
2841 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
2842 "0322 Ring %d handler: "
2843 "unexpected completion IoTag x%x "
2844 "Data: x%x x%x x%x x%x\n",
2846 saveq
->iocb
.ulpIoTag
,
2847 saveq
->iocb
.ulpStatus
,
2848 saveq
->iocb
.un
.ulpWord
[4],
2849 saveq
->iocb
.ulpCommand
,
2850 saveq
->iocb
.ulpContext
);
2858 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2859 * @phba: Pointer to HBA context object.
2860 * @pring: Pointer to driver SLI ring object.
2862 * This function is called from the iocb ring event handlers when
2863 * put pointer is ahead of the get pointer for a ring. This function signal
2864 * an error attention condition to the worker thread and the worker
2865 * thread will transition the HBA to offline state.
2868 lpfc_sli_rsp_pointers_error(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
2870 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
2872 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2873 * rsp ring <portRspMax>
2875 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2876 "0312 Ring %d handler: portRspPut %d "
2877 "is bigger than rsp ring %d\n",
2878 pring
->ringno
, le32_to_cpu(pgp
->rspPutInx
),
2879 pring
->sli
.sli3
.numRiocb
);
2881 phba
->link_state
= LPFC_HBA_ERROR
;
2884 * All error attention handlers are posted to
2887 phba
->work_ha
|= HA_ERATT
;
2888 phba
->work_hs
= HS_FFER3
;
2890 lpfc_worker_wake_up(phba
);
2896 * lpfc_poll_eratt - Error attention polling timer timeout handler
2897 * @ptr: Pointer to address of HBA context object.
2899 * This function is invoked by the Error Attention polling timer when the
2900 * timer times out. It will check the SLI Error Attention register for
2901 * possible attention events. If so, it will post an Error Attention event
2902 * and wake up worker thread to process it. Otherwise, it will set up the
2903 * Error Attention polling timer for the next poll.
2905 void lpfc_poll_eratt(unsigned long ptr
)
2907 struct lpfc_hba
*phba
;
2909 uint64_t sli_intr
, cnt
;
2911 phba
= (struct lpfc_hba
*)ptr
;
2913 /* Here we will also keep track of interrupts per sec of the hba */
2914 sli_intr
= phba
->sli
.slistat
.sli_intr
;
2916 if (phba
->sli
.slistat
.sli_prev_intr
> sli_intr
)
2917 cnt
= (((uint64_t)(-1) - phba
->sli
.slistat
.sli_prev_intr
) +
2920 cnt
= (sli_intr
- phba
->sli
.slistat
.sli_prev_intr
);
2922 /* 64-bit integer division not supporte on 32-bit x86 - use do_div */
2923 do_div(cnt
, LPFC_ERATT_POLL_INTERVAL
);
2924 phba
->sli
.slistat
.sli_ips
= cnt
;
2926 phba
->sli
.slistat
.sli_prev_intr
= sli_intr
;
2928 /* Check chip HA register for error event */
2929 eratt
= lpfc_sli_check_eratt(phba
);
2932 /* Tell the worker thread there is work to do */
2933 lpfc_worker_wake_up(phba
);
2935 /* Restart the timer for next eratt poll */
2936 mod_timer(&phba
->eratt_poll
,
2938 msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL
));
2944 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
2945 * @phba: Pointer to HBA context object.
2946 * @pring: Pointer to driver SLI ring object.
2947 * @mask: Host attention register mask for this ring.
2949 * This function is called from the interrupt context when there is a ring
2950 * event for the fcp ring. The caller does not hold any lock.
2951 * The function processes each response iocb in the response ring until it
2952 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
2953 * LE bit set. The function will call the completion handler of the command iocb
2954 * if the response iocb indicates a completion for a command iocb or it is
2955 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2956 * function if this is an unsolicited iocb.
2957 * This routine presumes LPFC_FCP_RING handling and doesn't bother
2958 * to check it explicitly.
2961 lpfc_sli_handle_fast_ring_event(struct lpfc_hba
*phba
,
2962 struct lpfc_sli_ring
*pring
, uint32_t mask
)
2964 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
2965 IOCB_t
*irsp
= NULL
;
2966 IOCB_t
*entry
= NULL
;
2967 struct lpfc_iocbq
*cmdiocbq
= NULL
;
2968 struct lpfc_iocbq rspiocbq
;
2970 uint32_t portRspPut
, portRspMax
;
2972 lpfc_iocb_type type
;
2973 unsigned long iflag
;
2974 uint32_t rsp_cmpl
= 0;
2976 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2977 pring
->stats
.iocb_event
++;
2980 * The next available response entry should never exceed the maximum
2981 * entries. If it does, treat it as an adapter hardware error.
2983 portRspMax
= pring
->sli
.sli3
.numRiocb
;
2984 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
2985 if (unlikely(portRspPut
>= portRspMax
)) {
2986 lpfc_sli_rsp_pointers_error(phba
, pring
);
2987 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2990 if (phba
->fcp_ring_in_use
) {
2991 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2994 phba
->fcp_ring_in_use
= 1;
2997 while (pring
->sli
.sli3
.rspidx
!= portRspPut
) {
2999 * Fetch an entry off the ring and copy it into a local data
3000 * structure. The copy involves a byte-swap since the
3001 * network byte order and pci byte orders are different.
3003 entry
= lpfc_resp_iocb(phba
, pring
);
3004 phba
->last_completion_time
= jiffies
;
3006 if (++pring
->sli
.sli3
.rspidx
>= portRspMax
)
3007 pring
->sli
.sli3
.rspidx
= 0;
3009 lpfc_sli_pcimem_bcopy((uint32_t *) entry
,
3010 (uint32_t *) &rspiocbq
.iocb
,
3011 phba
->iocb_rsp_size
);
3012 INIT_LIST_HEAD(&(rspiocbq
.list
));
3013 irsp
= &rspiocbq
.iocb
;
3015 type
= lpfc_sli_iocb_cmd_type(irsp
->ulpCommand
& CMD_IOCB_MASK
);
3016 pring
->stats
.iocb_rsp
++;
3019 if (unlikely(irsp
->ulpStatus
)) {
3021 * If resource errors reported from HBA, reduce
3022 * queuedepths of the SCSI device.
3024 if ((irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) &&
3025 ((irsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) ==
3026 IOERR_NO_RESOURCES
)) {
3027 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3028 phba
->lpfc_rampdown_queue_depth(phba
);
3029 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3032 /* Rsp ring <ringno> error: IOCB */
3033 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3034 "0336 Rsp Ring %d error: IOCB Data: "
3035 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3037 irsp
->un
.ulpWord
[0],
3038 irsp
->un
.ulpWord
[1],
3039 irsp
->un
.ulpWord
[2],
3040 irsp
->un
.ulpWord
[3],
3041 irsp
->un
.ulpWord
[4],
3042 irsp
->un
.ulpWord
[5],
3043 *(uint32_t *)&irsp
->un1
,
3044 *((uint32_t *)&irsp
->un1
+ 1));
3048 case LPFC_ABORT_IOCB
:
3051 * Idle exchange closed via ABTS from port. No iocb
3052 * resources need to be recovered.
3054 if (unlikely(irsp
->ulpCommand
== CMD_XRI_ABORTED_CX
)) {
3055 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3056 "0333 IOCB cmd 0x%x"
3057 " processed. Skipping"
3063 cmdiocbq
= lpfc_sli_iocbq_lookup(phba
, pring
,
3065 if (unlikely(!cmdiocbq
))
3067 if (cmdiocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
)
3068 cmdiocbq
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
3069 if (cmdiocbq
->iocb_cmpl
) {
3070 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3071 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
,
3073 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3076 case LPFC_UNSOL_IOCB
:
3077 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3078 lpfc_sli_process_unsol_iocb(phba
, pring
, &rspiocbq
);
3079 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3082 if (irsp
->ulpCommand
== CMD_ADAPTER_MSG
) {
3083 char adaptermsg
[LPFC_MAX_ADPTMSG
];
3084 memset(adaptermsg
, 0, LPFC_MAX_ADPTMSG
);
3085 memcpy(&adaptermsg
[0], (uint8_t *) irsp
,
3087 dev_warn(&((phba
->pcidev
)->dev
),
3089 phba
->brd_no
, adaptermsg
);
3091 /* Unknown IOCB command */
3092 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3093 "0334 Unknown IOCB command "
3094 "Data: x%x, x%x x%x x%x x%x\n",
3095 type
, irsp
->ulpCommand
,
3104 * The response IOCB has been processed. Update the ring
3105 * pointer in SLIM. If the port response put pointer has not
3106 * been updated, sync the pgp->rspPutInx and fetch the new port
3107 * response put pointer.
3109 writel(pring
->sli
.sli3
.rspidx
,
3110 &phba
->host_gp
[pring
->ringno
].rspGetInx
);
3112 if (pring
->sli
.sli3
.rspidx
== portRspPut
)
3113 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
3116 if ((rsp_cmpl
> 0) && (mask
& HA_R0RE_REQ
)) {
3117 pring
->stats
.iocb_rsp_full
++;
3118 status
= ((CA_R0ATT
| CA_R0RE_RSP
) << (pring
->ringno
* 4));
3119 writel(status
, phba
->CAregaddr
);
3120 readl(phba
->CAregaddr
);
3122 if ((mask
& HA_R0CE_RSP
) && (pring
->flag
& LPFC_CALL_RING_AVAILABLE
)) {
3123 pring
->flag
&= ~LPFC_CALL_RING_AVAILABLE
;
3124 pring
->stats
.iocb_cmd_empty
++;
3126 /* Force update of the local copy of cmdGetInx */
3127 pring
->sli
.sli3
.local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
3128 lpfc_sli_resume_iocb(phba
, pring
);
3130 if ((pring
->lpfc_sli_cmd_available
))
3131 (pring
->lpfc_sli_cmd_available
) (phba
, pring
);
3135 phba
->fcp_ring_in_use
= 0;
3136 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3141 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3142 * @phba: Pointer to HBA context object.
3143 * @pring: Pointer to driver SLI ring object.
3144 * @rspiocbp: Pointer to driver response IOCB object.
3146 * This function is called from the worker thread when there is a slow-path
3147 * response IOCB to process. This function chains all the response iocbs until
3148 * seeing the iocb with the LE bit set. The function will call
3149 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3150 * completion of a command iocb. The function will call the
3151 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3152 * The function frees the resources or calls the completion handler if this
3153 * iocb is an abort completion. The function returns NULL when the response
3154 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3155 * this function shall chain the iocb on to the iocb_continueq and return the
3156 * response iocb passed in.
3158 static struct lpfc_iocbq
*
3159 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
3160 struct lpfc_iocbq
*rspiocbp
)
3162 struct lpfc_iocbq
*saveq
;
3163 struct lpfc_iocbq
*cmdiocbp
;
3164 struct lpfc_iocbq
*next_iocb
;
3165 IOCB_t
*irsp
= NULL
;
3166 uint32_t free_saveq
;
3167 uint8_t iocb_cmd_type
;
3168 lpfc_iocb_type type
;
3169 unsigned long iflag
;
3172 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3173 /* First add the response iocb to the countinueq list */
3174 list_add_tail(&rspiocbp
->list
, &(pring
->iocb_continueq
));
3175 pring
->iocb_continueq_cnt
++;
3177 /* Now, determine whether the list is completed for processing */
3178 irsp
= &rspiocbp
->iocb
;
3181 * By default, the driver expects to free all resources
3182 * associated with this iocb completion.
3185 saveq
= list_get_first(&pring
->iocb_continueq
,
3186 struct lpfc_iocbq
, list
);
3187 irsp
= &(saveq
->iocb
);
3188 list_del_init(&pring
->iocb_continueq
);
3189 pring
->iocb_continueq_cnt
= 0;
3191 pring
->stats
.iocb_rsp
++;
3194 * If resource errors reported from HBA, reduce
3195 * queuedepths of the SCSI device.
3197 if ((irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) &&
3198 ((irsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) ==
3199 IOERR_NO_RESOURCES
)) {
3200 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3201 phba
->lpfc_rampdown_queue_depth(phba
);
3202 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3205 if (irsp
->ulpStatus
) {
3206 /* Rsp ring <ringno> error: IOCB */
3207 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3208 "0328 Rsp Ring %d error: "
3213 "x%x x%x x%x x%x\n",
3215 irsp
->un
.ulpWord
[0],
3216 irsp
->un
.ulpWord
[1],
3217 irsp
->un
.ulpWord
[2],
3218 irsp
->un
.ulpWord
[3],
3219 irsp
->un
.ulpWord
[4],
3220 irsp
->un
.ulpWord
[5],
3221 *(((uint32_t *) irsp
) + 6),
3222 *(((uint32_t *) irsp
) + 7),
3223 *(((uint32_t *) irsp
) + 8),
3224 *(((uint32_t *) irsp
) + 9),
3225 *(((uint32_t *) irsp
) + 10),
3226 *(((uint32_t *) irsp
) + 11),
3227 *(((uint32_t *) irsp
) + 12),
3228 *(((uint32_t *) irsp
) + 13),
3229 *(((uint32_t *) irsp
) + 14),
3230 *(((uint32_t *) irsp
) + 15));
3234 * Fetch the IOCB command type and call the correct completion
3235 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3236 * get freed back to the lpfc_iocb_list by the discovery
3239 iocb_cmd_type
= irsp
->ulpCommand
& CMD_IOCB_MASK
;
3240 type
= lpfc_sli_iocb_cmd_type(iocb_cmd_type
);
3243 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3244 rc
= lpfc_sli_process_sol_iocb(phba
, pring
, saveq
);
3245 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3248 case LPFC_UNSOL_IOCB
:
3249 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3250 rc
= lpfc_sli_process_unsol_iocb(phba
, pring
, saveq
);
3251 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3256 case LPFC_ABORT_IOCB
:
3258 if (irsp
->ulpCommand
!= CMD_XRI_ABORTED_CX
)
3259 cmdiocbp
= lpfc_sli_iocbq_lookup(phba
, pring
,
3262 /* Call the specified completion routine */
3263 if (cmdiocbp
->iocb_cmpl
) {
3264 spin_unlock_irqrestore(&phba
->hbalock
,
3266 (cmdiocbp
->iocb_cmpl
)(phba
, cmdiocbp
,
3268 spin_lock_irqsave(&phba
->hbalock
,
3271 __lpfc_sli_release_iocbq(phba
,
3276 case LPFC_UNKNOWN_IOCB
:
3277 if (irsp
->ulpCommand
== CMD_ADAPTER_MSG
) {
3278 char adaptermsg
[LPFC_MAX_ADPTMSG
];
3279 memset(adaptermsg
, 0, LPFC_MAX_ADPTMSG
);
3280 memcpy(&adaptermsg
[0], (uint8_t *)irsp
,
3282 dev_warn(&((phba
->pcidev
)->dev
),
3284 phba
->brd_no
, adaptermsg
);
3286 /* Unknown IOCB command */
3287 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3288 "0335 Unknown IOCB "
3289 "command Data: x%x "
3300 list_for_each_entry_safe(rspiocbp
, next_iocb
,
3301 &saveq
->list
, list
) {
3302 list_del_init(&rspiocbp
->list
);
3303 __lpfc_sli_release_iocbq(phba
, rspiocbp
);
3305 __lpfc_sli_release_iocbq(phba
, saveq
);
3309 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3314 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3315 * @phba: Pointer to HBA context object.
3316 * @pring: Pointer to driver SLI ring object.
3317 * @mask: Host attention register mask for this ring.
3319 * This routine wraps the actual slow_ring event process routine from the
3320 * API jump table function pointer from the lpfc_hba struct.
3323 lpfc_sli_handle_slow_ring_event(struct lpfc_hba
*phba
,
3324 struct lpfc_sli_ring
*pring
, uint32_t mask
)
3326 phba
->lpfc_sli_handle_slow_ring_event(phba
, pring
, mask
);
3330 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3331 * @phba: Pointer to HBA context object.
3332 * @pring: Pointer to driver SLI ring object.
3333 * @mask: Host attention register mask for this ring.
3335 * This function is called from the worker thread when there is a ring event
3336 * for non-fcp rings. The caller does not hold any lock. The function will
3337 * remove each response iocb in the response ring and calls the handle
3338 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3341 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba
*phba
,
3342 struct lpfc_sli_ring
*pring
, uint32_t mask
)
3344 struct lpfc_pgp
*pgp
;
3346 IOCB_t
*irsp
= NULL
;
3347 struct lpfc_iocbq
*rspiocbp
= NULL
;
3348 uint32_t portRspPut
, portRspMax
;
3349 unsigned long iflag
;
3352 pgp
= &phba
->port_gp
[pring
->ringno
];
3353 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3354 pring
->stats
.iocb_event
++;
3357 * The next available response entry should never exceed the maximum
3358 * entries. If it does, treat it as an adapter hardware error.
3360 portRspMax
= pring
->sli
.sli3
.numRiocb
;
3361 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
3362 if (portRspPut
>= portRspMax
) {
3364 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3365 * rsp ring <portRspMax>
3367 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3368 "0303 Ring %d handler: portRspPut %d "
3369 "is bigger than rsp ring %d\n",
3370 pring
->ringno
, portRspPut
, portRspMax
);
3372 phba
->link_state
= LPFC_HBA_ERROR
;
3373 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3375 phba
->work_hs
= HS_FFER3
;
3376 lpfc_handle_eratt(phba
);
3382 while (pring
->sli
.sli3
.rspidx
!= portRspPut
) {
3384 * Build a completion list and call the appropriate handler.
3385 * The process is to get the next available response iocb, get
3386 * a free iocb from the list, copy the response data into the
3387 * free iocb, insert to the continuation list, and update the
3388 * next response index to slim. This process makes response
3389 * iocb's in the ring available to DMA as fast as possible but
3390 * pays a penalty for a copy operation. Since the iocb is
3391 * only 32 bytes, this penalty is considered small relative to
3392 * the PCI reads for register values and a slim write. When
3393 * the ulpLe field is set, the entire Command has been
3396 entry
= lpfc_resp_iocb(phba
, pring
);
3398 phba
->last_completion_time
= jiffies
;
3399 rspiocbp
= __lpfc_sli_get_iocbq(phba
);
3400 if (rspiocbp
== NULL
) {
3401 printk(KERN_ERR
"%s: out of buffers! Failing "
3402 "completion.\n", __func__
);
3406 lpfc_sli_pcimem_bcopy(entry
, &rspiocbp
->iocb
,
3407 phba
->iocb_rsp_size
);
3408 irsp
= &rspiocbp
->iocb
;
3410 if (++pring
->sli
.sli3
.rspidx
>= portRspMax
)
3411 pring
->sli
.sli3
.rspidx
= 0;
3413 if (pring
->ringno
== LPFC_ELS_RING
) {
3414 lpfc_debugfs_slow_ring_trc(phba
,
3415 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3416 *(((uint32_t *) irsp
) + 4),
3417 *(((uint32_t *) irsp
) + 6),
3418 *(((uint32_t *) irsp
) + 7));
3421 writel(pring
->sli
.sli3
.rspidx
,
3422 &phba
->host_gp
[pring
->ringno
].rspGetInx
);
3424 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3425 /* Handle the response IOCB */
3426 rspiocbp
= lpfc_sli_sp_handle_rspiocb(phba
, pring
, rspiocbp
);
3427 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3430 * If the port response put pointer has not been updated, sync
3431 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3432 * response put pointer.
3434 if (pring
->sli
.sli3
.rspidx
== portRspPut
) {
3435 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
3437 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3439 if ((rspiocbp
!= NULL
) && (mask
& HA_R0RE_REQ
)) {
3440 /* At least one response entry has been freed */
3441 pring
->stats
.iocb_rsp_full
++;
3442 /* SET RxRE_RSP in Chip Att register */
3443 status
= ((CA_R0ATT
| CA_R0RE_RSP
) << (pring
->ringno
* 4));
3444 writel(status
, phba
->CAregaddr
);
3445 readl(phba
->CAregaddr
); /* flush */
3447 if ((mask
& HA_R0CE_RSP
) && (pring
->flag
& LPFC_CALL_RING_AVAILABLE
)) {
3448 pring
->flag
&= ~LPFC_CALL_RING_AVAILABLE
;
3449 pring
->stats
.iocb_cmd_empty
++;
3451 /* Force update of the local copy of cmdGetInx */
3452 pring
->sli
.sli3
.local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
3453 lpfc_sli_resume_iocb(phba
, pring
);
3455 if ((pring
->lpfc_sli_cmd_available
))
3456 (pring
->lpfc_sli_cmd_available
) (phba
, pring
);
3460 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3465 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3466 * @phba: Pointer to HBA context object.
3467 * @pring: Pointer to driver SLI ring object.
3468 * @mask: Host attention register mask for this ring.
3470 * This function is called from the worker thread when there is a pending
3471 * ELS response iocb on the driver internal slow-path response iocb worker
3472 * queue. The caller does not hold any lock. The function will remove each
3473 * response iocb from the response worker queue and calls the handle
3474 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3477 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba
*phba
,
3478 struct lpfc_sli_ring
*pring
, uint32_t mask
)
3480 struct lpfc_iocbq
*irspiocbq
;
3481 struct hbq_dmabuf
*dmabuf
;
3482 struct lpfc_cq_event
*cq_event
;
3483 unsigned long iflag
;
3485 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3486 phba
->hba_flag
&= ~HBA_SP_QUEUE_EVT
;
3487 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3488 while (!list_empty(&phba
->sli4_hba
.sp_queue_event
)) {
3489 /* Get the response iocb from the head of work queue */
3490 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3491 list_remove_head(&phba
->sli4_hba
.sp_queue_event
,
3492 cq_event
, struct lpfc_cq_event
, list
);
3493 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3495 switch (bf_get(lpfc_wcqe_c_code
, &cq_event
->cqe
.wcqe_cmpl
)) {
3496 case CQE_CODE_COMPL_WQE
:
3497 irspiocbq
= container_of(cq_event
, struct lpfc_iocbq
,
3499 /* Translate ELS WCQE to response IOCBQ */
3500 irspiocbq
= lpfc_sli4_els_wcqe_to_rspiocbq(phba
,
3503 lpfc_sli_sp_handle_rspiocb(phba
, pring
,
3506 case CQE_CODE_RECEIVE
:
3507 case CQE_CODE_RECEIVE_V1
:
3508 dmabuf
= container_of(cq_event
, struct hbq_dmabuf
,
3510 lpfc_sli4_handle_received_buffer(phba
, dmabuf
);
3519 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3520 * @phba: Pointer to HBA context object.
3521 * @pring: Pointer to driver SLI ring object.
3523 * This function aborts all iocbs in the given ring and frees all the iocb
3524 * objects in txq. This function issues an abort iocb for all the iocb commands
3525 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3526 * the return of this function. The caller is not required to hold any locks.
3529 lpfc_sli_abort_iocb_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
3531 LIST_HEAD(completions
);
3532 struct lpfc_iocbq
*iocb
, *next_iocb
;
3534 if (pring
->ringno
== LPFC_ELS_RING
) {
3535 lpfc_fabric_abort_hba(phba
);
3538 /* Error everything on txq and txcmplq
3541 if (phba
->sli_rev
>= LPFC_SLI_REV4
) {
3542 spin_lock_irq(&pring
->ring_lock
);
3543 list_splice_init(&pring
->txq
, &completions
);
3545 spin_unlock_irq(&pring
->ring_lock
);
3547 spin_lock_irq(&phba
->hbalock
);
3548 /* Next issue ABTS for everything on the txcmplq */
3549 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
)
3550 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
3551 spin_unlock_irq(&phba
->hbalock
);
3553 spin_lock_irq(&phba
->hbalock
);
3554 list_splice_init(&pring
->txq
, &completions
);
3557 /* Next issue ABTS for everything on the txcmplq */
3558 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
)
3559 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
3560 spin_unlock_irq(&phba
->hbalock
);
3563 /* Cancel all the IOCBs from the completions list */
3564 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
3569 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3570 * @phba: Pointer to HBA context object.
3571 * @pring: Pointer to driver SLI ring object.
3573 * This function aborts all iocbs in FCP rings and frees all the iocb
3574 * objects in txq. This function issues an abort iocb for all the iocb commands
3575 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3576 * the return of this function. The caller is not required to hold any locks.
3579 lpfc_sli_abort_fcp_rings(struct lpfc_hba
*phba
)
3581 struct lpfc_sli
*psli
= &phba
->sli
;
3582 struct lpfc_sli_ring
*pring
;
3585 /* Look on all the FCP Rings for the iotag */
3586 if (phba
->sli_rev
>= LPFC_SLI_REV4
) {
3587 for (i
= 0; i
< phba
->cfg_fcp_io_channel
; i
++) {
3588 pring
= &psli
->ring
[i
+ MAX_SLI3_CONFIGURED_RINGS
];
3589 lpfc_sli_abort_iocb_ring(phba
, pring
);
3592 pring
= &psli
->ring
[psli
->fcp_ring
];
3593 lpfc_sli_abort_iocb_ring(phba
, pring
);
3599 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3600 * @phba: Pointer to HBA context object.
3602 * This function flushes all iocbs in the fcp ring and frees all the iocb
3603 * objects in txq and txcmplq. This function will not issue abort iocbs
3604 * for all the iocb commands in txcmplq, they will just be returned with
3605 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3606 * slot has been permanently disabled.
3609 lpfc_sli_flush_fcp_rings(struct lpfc_hba
*phba
)
3613 struct lpfc_sli
*psli
= &phba
->sli
;
3614 struct lpfc_sli_ring
*pring
;
3617 spin_lock_irq(&phba
->hbalock
);
3618 /* Indicate the I/O queues are flushed */
3619 phba
->hba_flag
|= HBA_FCP_IOQ_FLUSH
;
3620 spin_unlock_irq(&phba
->hbalock
);
3622 /* Look on all the FCP Rings for the iotag */
3623 if (phba
->sli_rev
>= LPFC_SLI_REV4
) {
3624 for (i
= 0; i
< phba
->cfg_fcp_io_channel
; i
++) {
3625 pring
= &psli
->ring
[i
+ MAX_SLI3_CONFIGURED_RINGS
];
3627 spin_lock_irq(&pring
->ring_lock
);
3628 /* Retrieve everything on txq */
3629 list_splice_init(&pring
->txq
, &txq
);
3630 /* Retrieve everything on the txcmplq */
3631 list_splice_init(&pring
->txcmplq
, &txcmplq
);
3633 pring
->txcmplq_cnt
= 0;
3634 spin_unlock_irq(&pring
->ring_lock
);
3637 lpfc_sli_cancel_iocbs(phba
, &txq
,
3638 IOSTAT_LOCAL_REJECT
,
3640 /* Flush the txcmpq */
3641 lpfc_sli_cancel_iocbs(phba
, &txcmplq
,
3642 IOSTAT_LOCAL_REJECT
,
3646 pring
= &psli
->ring
[psli
->fcp_ring
];
3648 spin_lock_irq(&phba
->hbalock
);
3649 /* Retrieve everything on txq */
3650 list_splice_init(&pring
->txq
, &txq
);
3651 /* Retrieve everything on the txcmplq */
3652 list_splice_init(&pring
->txcmplq
, &txcmplq
);
3654 pring
->txcmplq_cnt
= 0;
3655 spin_unlock_irq(&phba
->hbalock
);
3658 lpfc_sli_cancel_iocbs(phba
, &txq
, IOSTAT_LOCAL_REJECT
,
3660 /* Flush the txcmpq */
3661 lpfc_sli_cancel_iocbs(phba
, &txcmplq
, IOSTAT_LOCAL_REJECT
,
3667 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
3668 * @phba: Pointer to HBA context object.
3669 * @mask: Bit mask to be checked.
3671 * This function reads the host status register and compares
3672 * with the provided bit mask to check if HBA completed
3673 * the restart. This function will wait in a loop for the
3674 * HBA to complete restart. If the HBA does not restart within
3675 * 15 iterations, the function will reset the HBA again. The
3676 * function returns 1 when HBA fail to restart otherwise returns
3680 lpfc_sli_brdready_s3(struct lpfc_hba
*phba
, uint32_t mask
)
3686 /* Read the HBA Host Status Register */
3687 if (lpfc_readl(phba
->HSregaddr
, &status
))
3691 * Check status register every 100ms for 5 retries, then every
3692 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3693 * every 2.5 sec for 4.
3694 * Break our of the loop if errors occurred during init.
3696 while (((status
& mask
) != mask
) &&
3697 !(status
& HS_FFERM
) &&
3709 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
3710 lpfc_sli_brdrestart(phba
);
3712 /* Read the HBA Host Status Register */
3713 if (lpfc_readl(phba
->HSregaddr
, &status
)) {
3719 /* Check to see if any errors occurred during init */
3720 if ((status
& HS_FFERM
) || (i
>= 20)) {
3721 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3722 "2751 Adapter failed to restart, "
3723 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3725 readl(phba
->MBslimaddr
+ 0xa8),
3726 readl(phba
->MBslimaddr
+ 0xac));
3727 phba
->link_state
= LPFC_HBA_ERROR
;
3735 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3736 * @phba: Pointer to HBA context object.
3737 * @mask: Bit mask to be checked.
3739 * This function checks the host status register to check if HBA is
3740 * ready. This function will wait in a loop for the HBA to be ready
3741 * If the HBA is not ready , the function will will reset the HBA PCI
3742 * function again. The function returns 1 when HBA fail to be ready
3743 * otherwise returns zero.
3746 lpfc_sli_brdready_s4(struct lpfc_hba
*phba
, uint32_t mask
)
3751 /* Read the HBA Host Status Register */
3752 status
= lpfc_sli4_post_status_check(phba
);
3755 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
3756 lpfc_sli_brdrestart(phba
);
3757 status
= lpfc_sli4_post_status_check(phba
);
3760 /* Check to see if any errors occurred during init */
3762 phba
->link_state
= LPFC_HBA_ERROR
;
3765 phba
->sli4_hba
.intr_enable
= 0;
3771 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3772 * @phba: Pointer to HBA context object.
3773 * @mask: Bit mask to be checked.
3775 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3776 * from the API jump table function pointer from the lpfc_hba struct.
3779 lpfc_sli_brdready(struct lpfc_hba
*phba
, uint32_t mask
)
3781 return phba
->lpfc_sli_brdready(phba
, mask
);
3784 #define BARRIER_TEST_PATTERN (0xdeadbeef)
3787 * lpfc_reset_barrier - Make HBA ready for HBA reset
3788 * @phba: Pointer to HBA context object.
3790 * This function is called before resetting an HBA. This function is called
3791 * with hbalock held and requests HBA to quiesce DMAs before a reset.
3793 void lpfc_reset_barrier(struct lpfc_hba
*phba
)
3795 uint32_t __iomem
*resp_buf
;
3796 uint32_t __iomem
*mbox_buf
;
3797 volatile uint32_t mbox
;
3798 uint32_t hc_copy
, ha_copy
, resp_data
;
3802 pci_read_config_byte(phba
->pcidev
, PCI_HEADER_TYPE
, &hdrtype
);
3803 if (hdrtype
!= 0x80 ||
3804 (FC_JEDEC_ID(phba
->vpd
.rev
.biuRev
) != HELIOS_JEDEC_ID
&&
3805 FC_JEDEC_ID(phba
->vpd
.rev
.biuRev
) != THOR_JEDEC_ID
))
3809 * Tell the other part of the chip to suspend temporarily all
3812 resp_buf
= phba
->MBslimaddr
;
3814 /* Disable the error attention */
3815 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
))
3817 writel((hc_copy
& ~HC_ERINT_ENA
), phba
->HCregaddr
);
3818 readl(phba
->HCregaddr
); /* flush */
3819 phba
->link_flag
|= LS_IGNORE_ERATT
;
3821 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
3823 if (ha_copy
& HA_ERATT
) {
3824 /* Clear Chip error bit */
3825 writel(HA_ERATT
, phba
->HAregaddr
);
3826 phba
->pport
->stopped
= 1;
3830 ((MAILBOX_t
*)&mbox
)->mbxCommand
= MBX_KILL_BOARD
;
3831 ((MAILBOX_t
*)&mbox
)->mbxOwner
= OWN_CHIP
;
3833 writel(BARRIER_TEST_PATTERN
, (resp_buf
+ 1));
3834 mbox_buf
= phba
->MBslimaddr
;
3835 writel(mbox
, mbox_buf
);
3837 for (i
= 0; i
< 50; i
++) {
3838 if (lpfc_readl((resp_buf
+ 1), &resp_data
))
3840 if (resp_data
!= ~(BARRIER_TEST_PATTERN
))
3846 if (lpfc_readl((resp_buf
+ 1), &resp_data
))
3848 if (resp_data
!= ~(BARRIER_TEST_PATTERN
)) {
3849 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
||
3850 phba
->pport
->stopped
)
3856 ((MAILBOX_t
*)&mbox
)->mbxOwner
= OWN_HOST
;
3858 for (i
= 0; i
< 500; i
++) {
3859 if (lpfc_readl(resp_buf
, &resp_data
))
3861 if (resp_data
!= mbox
)
3870 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
3872 if (!(ha_copy
& HA_ERATT
))
3878 if (readl(phba
->HAregaddr
) & HA_ERATT
) {
3879 writel(HA_ERATT
, phba
->HAregaddr
);
3880 phba
->pport
->stopped
= 1;
3884 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
3885 writel(hc_copy
, phba
->HCregaddr
);
3886 readl(phba
->HCregaddr
); /* flush */
3890 * lpfc_sli_brdkill - Issue a kill_board mailbox command
3891 * @phba: Pointer to HBA context object.
3893 * This function issues a kill_board mailbox command and waits for
3894 * the error attention interrupt. This function is called for stopping
3895 * the firmware processing. The caller is not required to hold any
3896 * locks. This function calls lpfc_hba_down_post function to free
3897 * any pending commands after the kill. The function will return 1 when it
3898 * fails to kill the board else will return 0.
3901 lpfc_sli_brdkill(struct lpfc_hba
*phba
)
3903 struct lpfc_sli
*psli
;
3913 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3914 "0329 Kill HBA Data: x%x x%x\n",
3915 phba
->pport
->port_state
, psli
->sli_flag
);
3917 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3921 /* Disable the error attention */
3922 spin_lock_irq(&phba
->hbalock
);
3923 if (lpfc_readl(phba
->HCregaddr
, &status
)) {
3924 spin_unlock_irq(&phba
->hbalock
);
3925 mempool_free(pmb
, phba
->mbox_mem_pool
);
3928 status
&= ~HC_ERINT_ENA
;
3929 writel(status
, phba
->HCregaddr
);
3930 readl(phba
->HCregaddr
); /* flush */
3931 phba
->link_flag
|= LS_IGNORE_ERATT
;
3932 spin_unlock_irq(&phba
->hbalock
);
3934 lpfc_kill_board(phba
, pmb
);
3935 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3936 retval
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
3938 if (retval
!= MBX_SUCCESS
) {
3939 if (retval
!= MBX_BUSY
)
3940 mempool_free(pmb
, phba
->mbox_mem_pool
);
3941 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3942 "2752 KILL_BOARD command failed retval %d\n",
3944 spin_lock_irq(&phba
->hbalock
);
3945 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
3946 spin_unlock_irq(&phba
->hbalock
);
3950 spin_lock_irq(&phba
->hbalock
);
3951 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
3952 spin_unlock_irq(&phba
->hbalock
);
3954 mempool_free(pmb
, phba
->mbox_mem_pool
);
3956 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3957 * attention every 100ms for 3 seconds. If we don't get ERATT after
3958 * 3 seconds we still set HBA_ERROR state because the status of the
3959 * board is now undefined.
3961 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
3963 while ((i
++ < 30) && !(ha_copy
& HA_ERATT
)) {
3965 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
3969 del_timer_sync(&psli
->mbox_tmo
);
3970 if (ha_copy
& HA_ERATT
) {
3971 writel(HA_ERATT
, phba
->HAregaddr
);
3972 phba
->pport
->stopped
= 1;
3974 spin_lock_irq(&phba
->hbalock
);
3975 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
3976 psli
->mbox_active
= NULL
;
3977 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
3978 spin_unlock_irq(&phba
->hbalock
);
3980 lpfc_hba_down_post(phba
);
3981 phba
->link_state
= LPFC_HBA_ERROR
;
3983 return ha_copy
& HA_ERATT
? 0 : 1;
3987 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
3988 * @phba: Pointer to HBA context object.
3990 * This function resets the HBA by writing HC_INITFF to the control
3991 * register. After the HBA resets, this function resets all the iocb ring
3992 * indices. This function disables PCI layer parity checking during
3994 * This function returns 0 always.
3995 * The caller is not required to hold any locks.
3998 lpfc_sli_brdreset(struct lpfc_hba
*phba
)
4000 struct lpfc_sli
*psli
;
4001 struct lpfc_sli_ring
*pring
;
4008 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4009 "0325 Reset HBA Data: x%x x%x\n",
4010 phba
->pport
->port_state
, psli
->sli_flag
);
4012 /* perform board reset */
4013 phba
->fc_eventTag
= 0;
4014 phba
->link_events
= 0;
4015 phba
->pport
->fc_myDID
= 0;
4016 phba
->pport
->fc_prevDID
= 0;
4018 /* Turn off parity checking and serr during the physical reset */
4019 pci_read_config_word(phba
->pcidev
, PCI_COMMAND
, &cfg_value
);
4020 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
,
4022 ~(PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
)));
4024 psli
->sli_flag
&= ~(LPFC_SLI_ACTIVE
| LPFC_PROCESS_LA
);
4026 /* Now toggle INITFF bit in the Host Control Register */
4027 writel(HC_INITFF
, phba
->HCregaddr
);
4029 readl(phba
->HCregaddr
); /* flush */
4030 writel(0, phba
->HCregaddr
);
4031 readl(phba
->HCregaddr
); /* flush */
4033 /* Restore PCI cmd register */
4034 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, cfg_value
);
4036 /* Initialize relevant SLI info */
4037 for (i
= 0; i
< psli
->num_rings
; i
++) {
4038 pring
= &psli
->ring
[i
];
4040 pring
->sli
.sli3
.rspidx
= 0;
4041 pring
->sli
.sli3
.next_cmdidx
= 0;
4042 pring
->sli
.sli3
.local_getidx
= 0;
4043 pring
->sli
.sli3
.cmdidx
= 0;
4044 pring
->missbufcnt
= 0;
4047 phba
->link_state
= LPFC_WARM_START
;
4052 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4053 * @phba: Pointer to HBA context object.
4055 * This function resets a SLI4 HBA. This function disables PCI layer parity
4056 * checking during resets the device. The caller is not required to hold
4059 * This function returns 0 always.
4062 lpfc_sli4_brdreset(struct lpfc_hba
*phba
)
4064 struct lpfc_sli
*psli
= &phba
->sli
;
4069 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4070 "0295 Reset HBA Data: x%x x%x x%x\n",
4071 phba
->pport
->port_state
, psli
->sli_flag
,
4074 /* perform board reset */
4075 phba
->fc_eventTag
= 0;
4076 phba
->link_events
= 0;
4077 phba
->pport
->fc_myDID
= 0;
4078 phba
->pport
->fc_prevDID
= 0;
4080 spin_lock_irq(&phba
->hbalock
);
4081 psli
->sli_flag
&= ~(LPFC_PROCESS_LA
);
4082 phba
->fcf
.fcf_flag
= 0;
4083 spin_unlock_irq(&phba
->hbalock
);
4085 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4086 if (phba
->hba_flag
& HBA_FW_DUMP_OP
) {
4087 phba
->hba_flag
&= ~HBA_FW_DUMP_OP
;
4091 /* Now physically reset the device */
4092 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4093 "0389 Performing PCI function reset!\n");
4095 /* Turn off parity checking and serr during the physical reset */
4096 pci_read_config_word(phba
->pcidev
, PCI_COMMAND
, &cfg_value
);
4097 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, (cfg_value
&
4098 ~(PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
)));
4100 /* Perform FCoE PCI function reset before freeing queue memory */
4101 rc
= lpfc_pci_function_reset(phba
);
4102 lpfc_sli4_queue_destroy(phba
);
4104 /* Restore PCI cmd register */
4105 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, cfg_value
);
4111 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4112 * @phba: Pointer to HBA context object.
4114 * This function is called in the SLI initialization code path to
4115 * restart the HBA. The caller is not required to hold any lock.
4116 * This function writes MBX_RESTART mailbox command to the SLIM and
4117 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4118 * function to free any pending commands. The function enables
4119 * POST only during the first initialization. The function returns zero.
4120 * The function does not guarantee completion of MBX_RESTART mailbox
4121 * command before the return of this function.
4124 lpfc_sli_brdrestart_s3(struct lpfc_hba
*phba
)
4127 struct lpfc_sli
*psli
;
4128 volatile uint32_t word0
;
4129 void __iomem
*to_slim
;
4130 uint32_t hba_aer_enabled
;
4132 spin_lock_irq(&phba
->hbalock
);
4134 /* Take PCIe device Advanced Error Reporting (AER) state */
4135 hba_aer_enabled
= phba
->hba_flag
& HBA_AER_ENABLED
;
4140 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4141 "0337 Restart HBA Data: x%x x%x\n",
4142 phba
->pport
->port_state
, psli
->sli_flag
);
4145 mb
= (MAILBOX_t
*) &word0
;
4146 mb
->mbxCommand
= MBX_RESTART
;
4149 lpfc_reset_barrier(phba
);
4151 to_slim
= phba
->MBslimaddr
;
4152 writel(*(uint32_t *) mb
, to_slim
);
4153 readl(to_slim
); /* flush */
4155 /* Only skip post after fc_ffinit is completed */
4156 if (phba
->pport
->port_state
)
4157 word0
= 1; /* This is really setting up word1 */
4159 word0
= 0; /* This is really setting up word1 */
4160 to_slim
= phba
->MBslimaddr
+ sizeof (uint32_t);
4161 writel(*(uint32_t *) mb
, to_slim
);
4162 readl(to_slim
); /* flush */
4164 lpfc_sli_brdreset(phba
);
4165 phba
->pport
->stopped
= 0;
4166 phba
->link_state
= LPFC_INIT_START
;
4168 spin_unlock_irq(&phba
->hbalock
);
4170 memset(&psli
->lnk_stat_offsets
, 0, sizeof(psli
->lnk_stat_offsets
));
4171 psli
->stats_start
= get_seconds();
4173 /* Give the INITFF and Post time to settle. */
4176 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4177 if (hba_aer_enabled
)
4178 pci_disable_pcie_error_reporting(phba
->pcidev
);
4180 lpfc_hba_down_post(phba
);
4186 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4187 * @phba: Pointer to HBA context object.
4189 * This function is called in the SLI initialization code path to restart
4190 * a SLI4 HBA. The caller is not required to hold any lock.
4191 * At the end of the function, it calls lpfc_hba_down_post function to
4192 * free any pending commands.
4195 lpfc_sli_brdrestart_s4(struct lpfc_hba
*phba
)
4197 struct lpfc_sli
*psli
= &phba
->sli
;
4198 uint32_t hba_aer_enabled
;
4202 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4203 "0296 Restart HBA Data: x%x x%x\n",
4204 phba
->pport
->port_state
, psli
->sli_flag
);
4206 /* Take PCIe device Advanced Error Reporting (AER) state */
4207 hba_aer_enabled
= phba
->hba_flag
& HBA_AER_ENABLED
;
4209 rc
= lpfc_sli4_brdreset(phba
);
4211 spin_lock_irq(&phba
->hbalock
);
4212 phba
->pport
->stopped
= 0;
4213 phba
->link_state
= LPFC_INIT_START
;
4215 spin_unlock_irq(&phba
->hbalock
);
4217 memset(&psli
->lnk_stat_offsets
, 0, sizeof(psli
->lnk_stat_offsets
));
4218 psli
->stats_start
= get_seconds();
4220 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4221 if (hba_aer_enabled
)
4222 pci_disable_pcie_error_reporting(phba
->pcidev
);
4224 lpfc_hba_down_post(phba
);
4230 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4231 * @phba: Pointer to HBA context object.
4233 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4234 * API jump table function pointer from the lpfc_hba struct.
4237 lpfc_sli_brdrestart(struct lpfc_hba
*phba
)
4239 return phba
->lpfc_sli_brdrestart(phba
);
4243 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4244 * @phba: Pointer to HBA context object.
4246 * This function is called after a HBA restart to wait for successful
4247 * restart of the HBA. Successful restart of the HBA is indicated by
4248 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4249 * iteration, the function will restart the HBA again. The function returns
4250 * zero if HBA successfully restarted else returns negative error code.
4253 lpfc_sli_chipset_init(struct lpfc_hba
*phba
)
4255 uint32_t status
, i
= 0;
4257 /* Read the HBA Host Status Register */
4258 if (lpfc_readl(phba
->HSregaddr
, &status
))
4261 /* Check status register to see what current state is */
4263 while ((status
& (HS_FFRDY
| HS_MBRDY
)) != (HS_FFRDY
| HS_MBRDY
)) {
4265 /* Check every 10ms for 10 retries, then every 100ms for 90
4266 * retries, then every 1 sec for 50 retires for a total of
4267 * ~60 seconds before reset the board again and check every
4268 * 1 sec for 50 retries. The up to 60 seconds before the
4269 * board ready is required by the Falcon FIPS zeroization
4270 * complete, and any reset the board in between shall cause
4271 * restart of zeroization, further delay the board ready.
4274 /* Adapter failed to init, timeout, status reg
4276 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4277 "0436 Adapter failed to init, "
4278 "timeout, status reg x%x, "
4279 "FW Data: A8 x%x AC x%x\n", status
,
4280 readl(phba
->MBslimaddr
+ 0xa8),
4281 readl(phba
->MBslimaddr
+ 0xac));
4282 phba
->link_state
= LPFC_HBA_ERROR
;
4286 /* Check to see if any errors occurred during init */
4287 if (status
& HS_FFERM
) {
4288 /* ERROR: During chipset initialization */
4289 /* Adapter failed to init, chipset, status reg
4291 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4292 "0437 Adapter failed to init, "
4293 "chipset, status reg x%x, "
4294 "FW Data: A8 x%x AC x%x\n", status
,
4295 readl(phba
->MBslimaddr
+ 0xa8),
4296 readl(phba
->MBslimaddr
+ 0xac));
4297 phba
->link_state
= LPFC_HBA_ERROR
;
4310 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
4311 lpfc_sli_brdrestart(phba
);
4313 /* Read the HBA Host Status Register */
4314 if (lpfc_readl(phba
->HSregaddr
, &status
))
4318 /* Check to see if any errors occurred during init */
4319 if (status
& HS_FFERM
) {
4320 /* ERROR: During chipset initialization */
4321 /* Adapter failed to init, chipset, status reg <status> */
4322 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4323 "0438 Adapter failed to init, chipset, "
4325 "FW Data: A8 x%x AC x%x\n", status
,
4326 readl(phba
->MBslimaddr
+ 0xa8),
4327 readl(phba
->MBslimaddr
+ 0xac));
4328 phba
->link_state
= LPFC_HBA_ERROR
;
4332 /* Clear all interrupt enable conditions */
4333 writel(0, phba
->HCregaddr
);
4334 readl(phba
->HCregaddr
); /* flush */
4336 /* setup host attn register */
4337 writel(0xffffffff, phba
->HAregaddr
);
4338 readl(phba
->HAregaddr
); /* flush */
4343 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4345 * This function calculates and returns the number of HBQs required to be
4349 lpfc_sli_hbq_count(void)
4351 return ARRAY_SIZE(lpfc_hbq_defs
);
4355 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4357 * This function adds the number of hbq entries in every HBQ to get
4358 * the total number of hbq entries required for the HBA and returns
4362 lpfc_sli_hbq_entry_count(void)
4364 int hbq_count
= lpfc_sli_hbq_count();
4368 for (i
= 0; i
< hbq_count
; ++i
)
4369 count
+= lpfc_hbq_defs
[i
]->entry_count
;
4374 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4376 * This function calculates amount of memory required for all hbq entries
4377 * to be configured and returns the total memory required.
4380 lpfc_sli_hbq_size(void)
4382 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry
);
4386 * lpfc_sli_hbq_setup - configure and initialize HBQs
4387 * @phba: Pointer to HBA context object.
4389 * This function is called during the SLI initialization to configure
4390 * all the HBQs and post buffers to the HBQ. The caller is not
4391 * required to hold any locks. This function will return zero if successful
4392 * else it will return negative error code.
4395 lpfc_sli_hbq_setup(struct lpfc_hba
*phba
)
4397 int hbq_count
= lpfc_sli_hbq_count();
4401 uint32_t hbq_entry_index
;
4403 /* Get a Mailbox buffer to setup mailbox
4404 * commands for HBA initialization
4406 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4413 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4414 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
4415 phba
->hbq_in_use
= 1;
4417 hbq_entry_index
= 0;
4418 for (hbqno
= 0; hbqno
< hbq_count
; ++hbqno
) {
4419 phba
->hbqs
[hbqno
].next_hbqPutIdx
= 0;
4420 phba
->hbqs
[hbqno
].hbqPutIdx
= 0;
4421 phba
->hbqs
[hbqno
].local_hbqGetIdx
= 0;
4422 phba
->hbqs
[hbqno
].entry_count
=
4423 lpfc_hbq_defs
[hbqno
]->entry_count
;
4424 lpfc_config_hbq(phba
, hbqno
, lpfc_hbq_defs
[hbqno
],
4425 hbq_entry_index
, pmb
);
4426 hbq_entry_index
+= phba
->hbqs
[hbqno
].entry_count
;
4428 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
4429 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4430 mbxStatus <status>, ring <num> */
4432 lpfc_printf_log(phba
, KERN_ERR
,
4433 LOG_SLI
| LOG_VPORT
,
4434 "1805 Adapter failed to init. "
4435 "Data: x%x x%x x%x\n",
4437 pmbox
->mbxStatus
, hbqno
);
4439 phba
->link_state
= LPFC_HBA_ERROR
;
4440 mempool_free(pmb
, phba
->mbox_mem_pool
);
4444 phba
->hbq_count
= hbq_count
;
4446 mempool_free(pmb
, phba
->mbox_mem_pool
);
4448 /* Initially populate or replenish the HBQs */
4449 for (hbqno
= 0; hbqno
< hbq_count
; ++hbqno
)
4450 lpfc_sli_hbqbuf_init_hbqs(phba
, hbqno
);
4455 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4456 * @phba: Pointer to HBA context object.
4458 * This function is called during the SLI initialization to configure
4459 * all the HBQs and post buffers to the HBQ. The caller is not
4460 * required to hold any locks. This function will return zero if successful
4461 * else it will return negative error code.
4464 lpfc_sli4_rb_setup(struct lpfc_hba
*phba
)
4466 phba
->hbq_in_use
= 1;
4467 phba
->hbqs
[0].entry_count
= lpfc_hbq_defs
[0]->entry_count
;
4468 phba
->hbq_count
= 1;
4469 /* Initially populate or replenish the HBQs */
4470 lpfc_sli_hbqbuf_init_hbqs(phba
, 0);
4475 * lpfc_sli_config_port - Issue config port mailbox command
4476 * @phba: Pointer to HBA context object.
4477 * @sli_mode: sli mode - 2/3
4479 * This function is called by the sli intialization code path
4480 * to issue config_port mailbox command. This function restarts the
4481 * HBA firmware and issues a config_port mailbox command to configure
4482 * the SLI interface in the sli mode specified by sli_mode
4483 * variable. The caller is not required to hold any locks.
4484 * The function returns 0 if successful, else returns negative error
4488 lpfc_sli_config_port(struct lpfc_hba
*phba
, int sli_mode
)
4491 uint32_t resetcount
= 0, rc
= 0, done
= 0;
4493 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4495 phba
->link_state
= LPFC_HBA_ERROR
;
4499 phba
->sli_rev
= sli_mode
;
4500 while (resetcount
< 2 && !done
) {
4501 spin_lock_irq(&phba
->hbalock
);
4502 phba
->sli
.sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
4503 spin_unlock_irq(&phba
->hbalock
);
4504 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
4505 lpfc_sli_brdrestart(phba
);
4506 rc
= lpfc_sli_chipset_init(phba
);
4510 spin_lock_irq(&phba
->hbalock
);
4511 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
4512 spin_unlock_irq(&phba
->hbalock
);
4515 /* Call pre CONFIG_PORT mailbox command initialization. A
4516 * value of 0 means the call was successful. Any other
4517 * nonzero value is a failure, but if ERESTART is returned,
4518 * the driver may reset the HBA and try again.
4520 rc
= lpfc_config_port_prep(phba
);
4521 if (rc
== -ERESTART
) {
4522 phba
->link_state
= LPFC_LINK_UNKNOWN
;
4527 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
4528 lpfc_config_port(phba
, pmb
);
4529 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
4530 phba
->sli3_options
&= ~(LPFC_SLI3_NPIV_ENABLED
|
4531 LPFC_SLI3_HBQ_ENABLED
|
4532 LPFC_SLI3_CRP_ENABLED
|
4533 LPFC_SLI3_BG_ENABLED
|
4534 LPFC_SLI3_DSS_ENABLED
);
4535 if (rc
!= MBX_SUCCESS
) {
4536 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4537 "0442 Adapter failed to init, mbxCmd x%x "
4538 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4539 pmb
->u
.mb
.mbxCommand
, pmb
->u
.mb
.mbxStatus
, 0);
4540 spin_lock_irq(&phba
->hbalock
);
4541 phba
->sli
.sli_flag
&= ~LPFC_SLI_ACTIVE
;
4542 spin_unlock_irq(&phba
->hbalock
);
4545 /* Allow asynchronous mailbox command to go through */
4546 spin_lock_irq(&phba
->hbalock
);
4547 phba
->sli
.sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
4548 spin_unlock_irq(&phba
->hbalock
);
4551 if ((pmb
->u
.mb
.un
.varCfgPort
.casabt
== 1) &&
4552 (pmb
->u
.mb
.un
.varCfgPort
.gasabt
== 0))
4553 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
4554 "3110 Port did not grant ASABT\n");
4559 goto do_prep_failed
;
4561 if (pmb
->u
.mb
.un
.varCfgPort
.sli_mode
== 3) {
4562 if (!pmb
->u
.mb
.un
.varCfgPort
.cMA
) {
4564 goto do_prep_failed
;
4566 if (phba
->max_vpi
&& pmb
->u
.mb
.un
.varCfgPort
.gmv
) {
4567 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
4568 phba
->max_vpi
= pmb
->u
.mb
.un
.varCfgPort
.max_vpi
;
4569 phba
->max_vports
= (phba
->max_vpi
> phba
->max_vports
) ?
4570 phba
->max_vpi
: phba
->max_vports
;
4574 phba
->fips_level
= 0;
4575 phba
->fips_spec_rev
= 0;
4576 if (pmb
->u
.mb
.un
.varCfgPort
.gdss
) {
4577 phba
->sli3_options
|= LPFC_SLI3_DSS_ENABLED
;
4578 phba
->fips_level
= pmb
->u
.mb
.un
.varCfgPort
.fips_level
;
4579 phba
->fips_spec_rev
= pmb
->u
.mb
.un
.varCfgPort
.fips_rev
;
4580 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4581 "2850 Security Crypto Active. FIPS x%d "
4583 phba
->fips_level
, phba
->fips_spec_rev
);
4585 if (pmb
->u
.mb
.un
.varCfgPort
.sec_err
) {
4586 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4587 "2856 Config Port Security Crypto "
4589 pmb
->u
.mb
.un
.varCfgPort
.sec_err
);
4591 if (pmb
->u
.mb
.un
.varCfgPort
.gerbm
)
4592 phba
->sli3_options
|= LPFC_SLI3_HBQ_ENABLED
;
4593 if (pmb
->u
.mb
.un
.varCfgPort
.gcrp
)
4594 phba
->sli3_options
|= LPFC_SLI3_CRP_ENABLED
;
4596 phba
->hbq_get
= phba
->mbox
->us
.s3_pgp
.hbq_get
;
4597 phba
->port_gp
= phba
->mbox
->us
.s3_pgp
.port
;
4599 if (phba
->cfg_enable_bg
) {
4600 if (pmb
->u
.mb
.un
.varCfgPort
.gbg
)
4601 phba
->sli3_options
|= LPFC_SLI3_BG_ENABLED
;
4603 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4604 "0443 Adapter did not grant "
4608 phba
->hbq_get
= NULL
;
4609 phba
->port_gp
= phba
->mbox
->us
.s2
.port
;
4613 mempool_free(pmb
, phba
->mbox_mem_pool
);
4619 * lpfc_sli_hba_setup - SLI intialization function
4620 * @phba: Pointer to HBA context object.
4622 * This function is the main SLI intialization function. This function
4623 * is called by the HBA intialization code, HBA reset code and HBA
4624 * error attention handler code. Caller is not required to hold any
4625 * locks. This function issues config_port mailbox command to configure
4626 * the SLI, setup iocb rings and HBQ rings. In the end the function
4627 * calls the config_port_post function to issue init_link mailbox
4628 * command and to start the discovery. The function will return zero
4629 * if successful, else it will return negative error code.
4632 lpfc_sli_hba_setup(struct lpfc_hba
*phba
)
4638 switch (lpfc_sli_mode
) {
4640 if (phba
->cfg_enable_npiv
) {
4641 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_VPORT
,
4642 "1824 NPIV enabled: Override lpfc_sli_mode "
4643 "parameter (%d) to auto (0).\n",
4653 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_VPORT
,
4654 "1819 Unrecognized lpfc_sli_mode "
4655 "parameter: %d.\n", lpfc_sli_mode
);
4660 rc
= lpfc_sli_config_port(phba
, mode
);
4662 if (rc
&& lpfc_sli_mode
== 3)
4663 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_VPORT
,
4664 "1820 Unable to select SLI-3. "
4665 "Not supported by adapter.\n");
4666 if (rc
&& mode
!= 2)
4667 rc
= lpfc_sli_config_port(phba
, 2);
4669 goto lpfc_sli_hba_setup_error
;
4671 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4672 if (phba
->cfg_aer_support
== 1 && !(phba
->hba_flag
& HBA_AER_ENABLED
)) {
4673 rc
= pci_enable_pcie_error_reporting(phba
->pcidev
);
4675 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4676 "2709 This device supports "
4677 "Advanced Error Reporting (AER)\n");
4678 spin_lock_irq(&phba
->hbalock
);
4679 phba
->hba_flag
|= HBA_AER_ENABLED
;
4680 spin_unlock_irq(&phba
->hbalock
);
4682 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4683 "2708 This device does not support "
4684 "Advanced Error Reporting (AER): %d\n",
4686 phba
->cfg_aer_support
= 0;
4690 if (phba
->sli_rev
== 3) {
4691 phba
->iocb_cmd_size
= SLI3_IOCB_CMD_SIZE
;
4692 phba
->iocb_rsp_size
= SLI3_IOCB_RSP_SIZE
;
4694 phba
->iocb_cmd_size
= SLI2_IOCB_CMD_SIZE
;
4695 phba
->iocb_rsp_size
= SLI2_IOCB_RSP_SIZE
;
4696 phba
->sli3_options
= 0;
4699 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4700 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4701 phba
->sli_rev
, phba
->max_vpi
);
4702 rc
= lpfc_sli_ring_map(phba
);
4705 goto lpfc_sli_hba_setup_error
;
4707 /* Initialize VPIs. */
4708 if (phba
->sli_rev
== LPFC_SLI_REV3
) {
4710 * The VPI bitmask and physical ID array are allocated
4711 * and initialized once only - at driver load. A port
4712 * reset doesn't need to reinitialize this memory.
4714 if ((phba
->vpi_bmask
== NULL
) && (phba
->vpi_ids
== NULL
)) {
4715 longs
= (phba
->max_vpi
+ BITS_PER_LONG
) / BITS_PER_LONG
;
4716 phba
->vpi_bmask
= kzalloc(longs
* sizeof(unsigned long),
4718 if (!phba
->vpi_bmask
) {
4720 goto lpfc_sli_hba_setup_error
;
4723 phba
->vpi_ids
= kzalloc(
4724 (phba
->max_vpi
+1) * sizeof(uint16_t),
4726 if (!phba
->vpi_ids
) {
4727 kfree(phba
->vpi_bmask
);
4729 goto lpfc_sli_hba_setup_error
;
4731 for (i
= 0; i
< phba
->max_vpi
; i
++)
4732 phba
->vpi_ids
[i
] = i
;
4737 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
4738 rc
= lpfc_sli_hbq_setup(phba
);
4740 goto lpfc_sli_hba_setup_error
;
4742 spin_lock_irq(&phba
->hbalock
);
4743 phba
->sli
.sli_flag
|= LPFC_PROCESS_LA
;
4744 spin_unlock_irq(&phba
->hbalock
);
4746 rc
= lpfc_config_port_post(phba
);
4748 goto lpfc_sli_hba_setup_error
;
4752 lpfc_sli_hba_setup_error
:
4753 phba
->link_state
= LPFC_HBA_ERROR
;
4754 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4755 "0445 Firmware initialization failed\n");
4760 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4761 * @phba: Pointer to HBA context object.
4762 * @mboxq: mailbox pointer.
4763 * This function issue a dump mailbox command to read config region
4764 * 23 and parse the records in the region and populate driver
4768 lpfc_sli4_read_fcoe_params(struct lpfc_hba
*phba
)
4770 LPFC_MBOXQ_t
*mboxq
;
4771 struct lpfc_dmabuf
*mp
;
4772 struct lpfc_mqe
*mqe
;
4773 uint32_t data_length
;
4776 /* Program the default value of vlan_id and fc_map */
4777 phba
->valid_vlan
= 0;
4778 phba
->fc_map
[0] = LPFC_FCOE_FCF_MAP0
;
4779 phba
->fc_map
[1] = LPFC_FCOE_FCF_MAP1
;
4780 phba
->fc_map
[2] = LPFC_FCOE_FCF_MAP2
;
4782 mboxq
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4786 mqe
= &mboxq
->u
.mqe
;
4787 if (lpfc_sli4_dump_cfg_rg23(phba
, mboxq
)) {
4789 goto out_free_mboxq
;
4792 mp
= (struct lpfc_dmabuf
*) mboxq
->context1
;
4793 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
4795 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
4796 "(%d):2571 Mailbox cmd x%x Status x%x "
4797 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4798 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4799 "CQ: x%x x%x x%x x%x\n",
4800 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
4801 bf_get(lpfc_mqe_command
, mqe
),
4802 bf_get(lpfc_mqe_status
, mqe
),
4803 mqe
->un
.mb_words
[0], mqe
->un
.mb_words
[1],
4804 mqe
->un
.mb_words
[2], mqe
->un
.mb_words
[3],
4805 mqe
->un
.mb_words
[4], mqe
->un
.mb_words
[5],
4806 mqe
->un
.mb_words
[6], mqe
->un
.mb_words
[7],
4807 mqe
->un
.mb_words
[8], mqe
->un
.mb_words
[9],
4808 mqe
->un
.mb_words
[10], mqe
->un
.mb_words
[11],
4809 mqe
->un
.mb_words
[12], mqe
->un
.mb_words
[13],
4810 mqe
->un
.mb_words
[14], mqe
->un
.mb_words
[15],
4811 mqe
->un
.mb_words
[16], mqe
->un
.mb_words
[50],
4813 mboxq
->mcqe
.mcqe_tag0
, mboxq
->mcqe
.mcqe_tag1
,
4814 mboxq
->mcqe
.trailer
);
4817 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4820 goto out_free_mboxq
;
4822 data_length
= mqe
->un
.mb_words
[5];
4823 if (data_length
> DMP_RGN23_SIZE
) {
4824 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4827 goto out_free_mboxq
;
4830 lpfc_parse_fcoe_conf(phba
, mp
->virt
, data_length
);
4831 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4836 mempool_free(mboxq
, phba
->mbox_mem_pool
);
4841 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4842 * @phba: pointer to lpfc hba data structure.
4843 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4844 * @vpd: pointer to the memory to hold resulting port vpd data.
4845 * @vpd_size: On input, the number of bytes allocated to @vpd.
4846 * On output, the number of data bytes in @vpd.
4848 * This routine executes a READ_REV SLI4 mailbox command. In
4849 * addition, this routine gets the port vpd data.
4853 * -ENOMEM - could not allocated memory.
4856 lpfc_sli4_read_rev(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
4857 uint8_t *vpd
, uint32_t *vpd_size
)
4861 struct lpfc_dmabuf
*dmabuf
;
4862 struct lpfc_mqe
*mqe
;
4864 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
4869 * Get a DMA buffer for the vpd data resulting from the READ_REV
4872 dma_size
= *vpd_size
;
4873 dmabuf
->virt
= dma_zalloc_coherent(&phba
->pcidev
->dev
, dma_size
,
4874 &dmabuf
->phys
, GFP_KERNEL
);
4875 if (!dmabuf
->virt
) {
4881 * The SLI4 implementation of READ_REV conflicts at word1,
4882 * bits 31:16 and SLI4 adds vpd functionality not present
4883 * in SLI3. This code corrects the conflicts.
4885 lpfc_read_rev(phba
, mboxq
);
4886 mqe
= &mboxq
->u
.mqe
;
4887 mqe
->un
.read_rev
.vpd_paddr_high
= putPaddrHigh(dmabuf
->phys
);
4888 mqe
->un
.read_rev
.vpd_paddr_low
= putPaddrLow(dmabuf
->phys
);
4889 mqe
->un
.read_rev
.word1
&= 0x0000FFFF;
4890 bf_set(lpfc_mbx_rd_rev_vpd
, &mqe
->un
.read_rev
, 1);
4891 bf_set(lpfc_mbx_rd_rev_avail_len
, &mqe
->un
.read_rev
, dma_size
);
4893 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
4895 dma_free_coherent(&phba
->pcidev
->dev
, dma_size
,
4896 dmabuf
->virt
, dmabuf
->phys
);
4902 * The available vpd length cannot be bigger than the
4903 * DMA buffer passed to the port. Catch the less than
4904 * case and update the caller's size.
4906 if (mqe
->un
.read_rev
.avail_vpd_len
< *vpd_size
)
4907 *vpd_size
= mqe
->un
.read_rev
.avail_vpd_len
;
4909 memcpy(vpd
, dmabuf
->virt
, *vpd_size
);
4911 dma_free_coherent(&phba
->pcidev
->dev
, dma_size
,
4912 dmabuf
->virt
, dmabuf
->phys
);
4918 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
4919 * @phba: pointer to lpfc hba data structure.
4921 * This routine retrieves SLI4 device physical port name this PCI function
4926 * otherwise - failed to retrieve physical port name
4929 lpfc_sli4_retrieve_pport_name(struct lpfc_hba
*phba
)
4931 LPFC_MBOXQ_t
*mboxq
;
4932 struct lpfc_mbx_get_cntl_attributes
*mbx_cntl_attr
;
4933 struct lpfc_controller_attribute
*cntl_attr
;
4934 struct lpfc_mbx_get_port_name
*get_port_name
;
4935 void *virtaddr
= NULL
;
4936 uint32_t alloclen
, reqlen
;
4937 uint32_t shdr_status
, shdr_add_status
;
4938 union lpfc_sli4_cfg_shdr
*shdr
;
4939 char cport_name
= 0;
4942 /* We assume nothing at this point */
4943 phba
->sli4_hba
.lnk_info
.lnk_dv
= LPFC_LNK_DAT_INVAL
;
4944 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_NON
;
4946 mboxq
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4949 /* obtain link type and link number via READ_CONFIG */
4950 phba
->sli4_hba
.lnk_info
.lnk_dv
= LPFC_LNK_DAT_INVAL
;
4951 lpfc_sli4_read_config(phba
);
4952 if (phba
->sli4_hba
.lnk_info
.lnk_dv
== LPFC_LNK_DAT_VAL
)
4953 goto retrieve_ppname
;
4955 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
4956 reqlen
= sizeof(struct lpfc_mbx_get_cntl_attributes
);
4957 alloclen
= lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
4958 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES
, reqlen
,
4959 LPFC_SLI4_MBX_NEMBED
);
4960 if (alloclen
< reqlen
) {
4961 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4962 "3084 Allocated DMA memory size (%d) is "
4963 "less than the requested DMA memory size "
4964 "(%d)\n", alloclen
, reqlen
);
4966 goto out_free_mboxq
;
4968 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
4969 virtaddr
= mboxq
->sge_array
->addr
[0];
4970 mbx_cntl_attr
= (struct lpfc_mbx_get_cntl_attributes
*)virtaddr
;
4971 shdr
= &mbx_cntl_attr
->cfg_shdr
;
4972 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
4973 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
4974 if (shdr_status
|| shdr_add_status
|| rc
) {
4975 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
4976 "3085 Mailbox x%x (x%x/x%x) failed, "
4977 "rc:x%x, status:x%x, add_status:x%x\n",
4978 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
4979 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
4980 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
4981 rc
, shdr_status
, shdr_add_status
);
4983 goto out_free_mboxq
;
4985 cntl_attr
= &mbx_cntl_attr
->cntl_attr
;
4986 phba
->sli4_hba
.lnk_info
.lnk_dv
= LPFC_LNK_DAT_VAL
;
4987 phba
->sli4_hba
.lnk_info
.lnk_tp
=
4988 bf_get(lpfc_cntl_attr_lnk_type
, cntl_attr
);
4989 phba
->sli4_hba
.lnk_info
.lnk_no
=
4990 bf_get(lpfc_cntl_attr_lnk_numb
, cntl_attr
);
4991 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4992 "3086 lnk_type:%d, lnk_numb:%d\n",
4993 phba
->sli4_hba
.lnk_info
.lnk_tp
,
4994 phba
->sli4_hba
.lnk_info
.lnk_no
);
4997 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
4998 LPFC_MBOX_OPCODE_GET_PORT_NAME
,
4999 sizeof(struct lpfc_mbx_get_port_name
) -
5000 sizeof(struct lpfc_sli4_cfg_mhdr
),
5001 LPFC_SLI4_MBX_EMBED
);
5002 get_port_name
= &mboxq
->u
.mqe
.un
.get_port_name
;
5003 shdr
= (union lpfc_sli4_cfg_shdr
*)&get_port_name
->header
.cfg_shdr
;
5004 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
, LPFC_OPCODE_VERSION_1
);
5005 bf_set(lpfc_mbx_get_port_name_lnk_type
, &get_port_name
->u
.request
,
5006 phba
->sli4_hba
.lnk_info
.lnk_tp
);
5007 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
5008 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
5009 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
5010 if (shdr_status
|| shdr_add_status
|| rc
) {
5011 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
5012 "3087 Mailbox x%x (x%x/x%x) failed: "
5013 "rc:x%x, status:x%x, add_status:x%x\n",
5014 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
5015 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
5016 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
5017 rc
, shdr_status
, shdr_add_status
);
5019 goto out_free_mboxq
;
5021 switch (phba
->sli4_hba
.lnk_info
.lnk_no
) {
5022 case LPFC_LINK_NUMBER_0
:
5023 cport_name
= bf_get(lpfc_mbx_get_port_name_name0
,
5024 &get_port_name
->u
.response
);
5025 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_GET
;
5027 case LPFC_LINK_NUMBER_1
:
5028 cport_name
= bf_get(lpfc_mbx_get_port_name_name1
,
5029 &get_port_name
->u
.response
);
5030 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_GET
;
5032 case LPFC_LINK_NUMBER_2
:
5033 cport_name
= bf_get(lpfc_mbx_get_port_name_name2
,
5034 &get_port_name
->u
.response
);
5035 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_GET
;
5037 case LPFC_LINK_NUMBER_3
:
5038 cport_name
= bf_get(lpfc_mbx_get_port_name_name3
,
5039 &get_port_name
->u
.response
);
5040 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_GET
;
5046 if (phba
->sli4_hba
.pport_name_sta
== LPFC_SLI4_PPNAME_GET
) {
5047 phba
->Port
[0] = cport_name
;
5048 phba
->Port
[1] = '\0';
5049 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5050 "3091 SLI get port name: %s\n", phba
->Port
);
5054 if (rc
!= MBX_TIMEOUT
) {
5055 if (bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
) == MBX_SLI4_CONFIG
)
5056 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
5058 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5064 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5065 * @phba: pointer to lpfc hba data structure.
5067 * This routine is called to explicitly arm the SLI4 device's completion and
5071 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba
*phba
)
5075 lpfc_sli4_cq_release(phba
->sli4_hba
.mbx_cq
, LPFC_QUEUE_REARM
);
5076 lpfc_sli4_cq_release(phba
->sli4_hba
.els_cq
, LPFC_QUEUE_REARM
);
5078 if (phba
->sli4_hba
.fcp_cq
) {
5080 lpfc_sli4_cq_release(phba
->sli4_hba
.fcp_cq
[fcp_eqidx
],
5082 } while (++fcp_eqidx
< phba
->cfg_fcp_io_channel
);
5086 lpfc_sli4_cq_release(phba
->sli4_hba
.oas_cq
, LPFC_QUEUE_REARM
);
5088 if (phba
->sli4_hba
.hba_eq
) {
5089 for (fcp_eqidx
= 0; fcp_eqidx
< phba
->cfg_fcp_io_channel
;
5091 lpfc_sli4_eq_release(phba
->sli4_hba
.hba_eq
[fcp_eqidx
],
5096 lpfc_sli4_eq_release(phba
->sli4_hba
.fof_eq
, LPFC_QUEUE_REARM
);
5100 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5101 * @phba: Pointer to HBA context object.
5102 * @type: The resource extent type.
5103 * @extnt_count: buffer to hold port available extent count.
5104 * @extnt_size: buffer to hold element count per extent.
5106 * This function calls the port and retrievs the number of available
5107 * extents and their size for a particular extent type.
5109 * Returns: 0 if successful. Nonzero otherwise.
5112 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba
*phba
, uint16_t type
,
5113 uint16_t *extnt_count
, uint16_t *extnt_size
)
5118 struct lpfc_mbx_get_rsrc_extent_info
*rsrc_info
;
5121 mbox
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5125 /* Find out how many extents are available for this resource type */
5126 length
= (sizeof(struct lpfc_mbx_get_rsrc_extent_info
) -
5127 sizeof(struct lpfc_sli4_cfg_mhdr
));
5128 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
5129 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO
,
5130 length
, LPFC_SLI4_MBX_EMBED
);
5132 /* Send an extents count of 0 - the GET doesn't use it. */
5133 rc
= lpfc_sli4_mbox_rsrc_extent(phba
, mbox
, 0, type
,
5134 LPFC_SLI4_MBX_EMBED
);
5140 if (!phba
->sli4_hba
.intr_enable
)
5141 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
5143 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
5144 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
5151 rsrc_info
= &mbox
->u
.mqe
.un
.rsrc_extent_info
;
5152 if (bf_get(lpfc_mbox_hdr_status
,
5153 &rsrc_info
->header
.cfg_shdr
.response
)) {
5154 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_INIT
,
5155 "2930 Failed to get resource extents "
5156 "Status 0x%x Add'l Status 0x%x\n",
5157 bf_get(lpfc_mbox_hdr_status
,
5158 &rsrc_info
->header
.cfg_shdr
.response
),
5159 bf_get(lpfc_mbox_hdr_add_status
,
5160 &rsrc_info
->header
.cfg_shdr
.response
));
5165 *extnt_count
= bf_get(lpfc_mbx_get_rsrc_extent_info_cnt
,
5167 *extnt_size
= bf_get(lpfc_mbx_get_rsrc_extent_info_size
,
5170 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5171 "3162 Retrieved extents type-%d from port: count:%d, "
5172 "size:%d\n", type
, *extnt_count
, *extnt_size
);
5175 mempool_free(mbox
, phba
->mbox_mem_pool
);
5180 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5181 * @phba: Pointer to HBA context object.
5182 * @type: The extent type to check.
5184 * This function reads the current available extents from the port and checks
5185 * if the extent count or extent size has changed since the last access.
5186 * Callers use this routine post port reset to understand if there is a
5187 * extent reprovisioning requirement.
5190 * -Error: error indicates problem.
5191 * 1: Extent count or size has changed.
5195 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba
*phba
, uint16_t type
)
5197 uint16_t curr_ext_cnt
, rsrc_ext_cnt
;
5198 uint16_t size_diff
, rsrc_ext_size
;
5200 struct lpfc_rsrc_blks
*rsrc_entry
;
5201 struct list_head
*rsrc_blk_list
= NULL
;
5205 rc
= lpfc_sli4_get_avail_extnt_rsrc(phba
, type
,
5212 case LPFC_RSC_TYPE_FCOE_RPI
:
5213 rsrc_blk_list
= &phba
->sli4_hba
.lpfc_rpi_blk_list
;
5215 case LPFC_RSC_TYPE_FCOE_VPI
:
5216 rsrc_blk_list
= &phba
->lpfc_vpi_blk_list
;
5218 case LPFC_RSC_TYPE_FCOE_XRI
:
5219 rsrc_blk_list
= &phba
->sli4_hba
.lpfc_xri_blk_list
;
5221 case LPFC_RSC_TYPE_FCOE_VFI
:
5222 rsrc_blk_list
= &phba
->sli4_hba
.lpfc_vfi_blk_list
;
5228 list_for_each_entry(rsrc_entry
, rsrc_blk_list
, list
) {
5230 if (rsrc_entry
->rsrc_size
!= rsrc_ext_size
)
5234 if (curr_ext_cnt
!= rsrc_ext_cnt
|| size_diff
!= 0)
5241 * lpfc_sli4_cfg_post_extnts -
5242 * @phba: Pointer to HBA context object.
5243 * @extnt_cnt - number of available extents.
5244 * @type - the extent type (rpi, xri, vfi, vpi).
5245 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5246 * @mbox - pointer to the caller's allocated mailbox structure.
5248 * This function executes the extents allocation request. It also
5249 * takes care of the amount of memory needed to allocate or get the
5250 * allocated extents. It is the caller's responsibility to evaluate
5254 * -Error: Error value describes the condition found.
5258 lpfc_sli4_cfg_post_extnts(struct lpfc_hba
*phba
, uint16_t extnt_cnt
,
5259 uint16_t type
, bool *emb
, LPFC_MBOXQ_t
*mbox
)
5264 uint32_t alloc_len
, mbox_tmo
;
5266 /* Calculate the total requested length of the dma memory */
5267 req_len
= extnt_cnt
* sizeof(uint16_t);
5270 * Calculate the size of an embedded mailbox. The uint32_t
5271 * accounts for extents-specific word.
5273 emb_len
= sizeof(MAILBOX_t
) - sizeof(struct mbox_header
) -
5277 * Presume the allocation and response will fit into an embedded
5278 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5280 *emb
= LPFC_SLI4_MBX_EMBED
;
5281 if (req_len
> emb_len
) {
5282 req_len
= extnt_cnt
* sizeof(uint16_t) +
5283 sizeof(union lpfc_sli4_cfg_shdr
) +
5285 *emb
= LPFC_SLI4_MBX_NEMBED
;
5288 alloc_len
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
5289 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT
,
5291 if (alloc_len
< req_len
) {
5292 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5293 "2982 Allocated DMA memory size (x%x) is "
5294 "less than the requested DMA memory "
5295 "size (x%x)\n", alloc_len
, req_len
);
5298 rc
= lpfc_sli4_mbox_rsrc_extent(phba
, mbox
, extnt_cnt
, type
, *emb
);
5302 if (!phba
->sli4_hba
.intr_enable
)
5303 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
5305 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
5306 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
5315 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5316 * @phba: Pointer to HBA context object.
5317 * @type: The resource extent type to allocate.
5319 * This function allocates the number of elements for the specified
5323 lpfc_sli4_alloc_extent(struct lpfc_hba
*phba
, uint16_t type
)
5326 uint16_t rsrc_id_cnt
, rsrc_cnt
, rsrc_size
;
5327 uint16_t rsrc_id
, rsrc_start
, j
, k
;
5330 unsigned long longs
;
5331 unsigned long *bmask
;
5332 struct lpfc_rsrc_blks
*rsrc_blks
;
5335 struct lpfc_id_range
*id_array
= NULL
;
5336 void *virtaddr
= NULL
;
5337 struct lpfc_mbx_nembed_rsrc_extent
*n_rsrc
;
5338 struct lpfc_mbx_alloc_rsrc_extents
*rsrc_ext
;
5339 struct list_head
*ext_blk_list
;
5341 rc
= lpfc_sli4_get_avail_extnt_rsrc(phba
, type
,
5347 if ((rsrc_cnt
== 0) || (rsrc_size
== 0)) {
5348 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_INIT
,
5349 "3009 No available Resource Extents "
5350 "for resource type 0x%x: Count: 0x%x, "
5351 "Size 0x%x\n", type
, rsrc_cnt
,
5356 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_INIT
| LOG_SLI
,
5357 "2903 Post resource extents type-0x%x: "
5358 "count:%d, size %d\n", type
, rsrc_cnt
, rsrc_size
);
5360 mbox
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5364 rc
= lpfc_sli4_cfg_post_extnts(phba
, rsrc_cnt
, type
, &emb
, mbox
);
5371 * Figure out where the response is located. Then get local pointers
5372 * to the response data. The port does not guarantee to respond to
5373 * all extents counts request so update the local variable with the
5374 * allocated count from the port.
5376 if (emb
== LPFC_SLI4_MBX_EMBED
) {
5377 rsrc_ext
= &mbox
->u
.mqe
.un
.alloc_rsrc_extents
;
5378 id_array
= &rsrc_ext
->u
.rsp
.id
[0];
5379 rsrc_cnt
= bf_get(lpfc_mbx_rsrc_cnt
, &rsrc_ext
->u
.rsp
);
5381 virtaddr
= mbox
->sge_array
->addr
[0];
5382 n_rsrc
= (struct lpfc_mbx_nembed_rsrc_extent
*) virtaddr
;
5383 rsrc_cnt
= bf_get(lpfc_mbx_rsrc_cnt
, n_rsrc
);
5384 id_array
= &n_rsrc
->id
;
5387 longs
= ((rsrc_cnt
* rsrc_size
) + BITS_PER_LONG
- 1) / BITS_PER_LONG
;
5388 rsrc_id_cnt
= rsrc_cnt
* rsrc_size
;
5391 * Based on the resource size and count, correct the base and max
5394 length
= sizeof(struct lpfc_rsrc_blks
);
5396 case LPFC_RSC_TYPE_FCOE_RPI
:
5397 phba
->sli4_hba
.rpi_bmask
= kzalloc(longs
*
5398 sizeof(unsigned long),
5400 if (unlikely(!phba
->sli4_hba
.rpi_bmask
)) {
5404 phba
->sli4_hba
.rpi_ids
= kzalloc(rsrc_id_cnt
*
5407 if (unlikely(!phba
->sli4_hba
.rpi_ids
)) {
5408 kfree(phba
->sli4_hba
.rpi_bmask
);
5414 * The next_rpi was initialized with the maximum available
5415 * count but the port may allocate a smaller number. Catch
5416 * that case and update the next_rpi.
5418 phba
->sli4_hba
.next_rpi
= rsrc_id_cnt
;
5420 /* Initialize local ptrs for common extent processing later. */
5421 bmask
= phba
->sli4_hba
.rpi_bmask
;
5422 ids
= phba
->sli4_hba
.rpi_ids
;
5423 ext_blk_list
= &phba
->sli4_hba
.lpfc_rpi_blk_list
;
5425 case LPFC_RSC_TYPE_FCOE_VPI
:
5426 phba
->vpi_bmask
= kzalloc(longs
*
5427 sizeof(unsigned long),
5429 if (unlikely(!phba
->vpi_bmask
)) {
5433 phba
->vpi_ids
= kzalloc(rsrc_id_cnt
*
5436 if (unlikely(!phba
->vpi_ids
)) {
5437 kfree(phba
->vpi_bmask
);
5442 /* Initialize local ptrs for common extent processing later. */
5443 bmask
= phba
->vpi_bmask
;
5444 ids
= phba
->vpi_ids
;
5445 ext_blk_list
= &phba
->lpfc_vpi_blk_list
;
5447 case LPFC_RSC_TYPE_FCOE_XRI
:
5448 phba
->sli4_hba
.xri_bmask
= kzalloc(longs
*
5449 sizeof(unsigned long),
5451 if (unlikely(!phba
->sli4_hba
.xri_bmask
)) {
5455 phba
->sli4_hba
.max_cfg_param
.xri_used
= 0;
5456 phba
->sli4_hba
.xri_ids
= kzalloc(rsrc_id_cnt
*
5459 if (unlikely(!phba
->sli4_hba
.xri_ids
)) {
5460 kfree(phba
->sli4_hba
.xri_bmask
);
5465 /* Initialize local ptrs for common extent processing later. */
5466 bmask
= phba
->sli4_hba
.xri_bmask
;
5467 ids
= phba
->sli4_hba
.xri_ids
;
5468 ext_blk_list
= &phba
->sli4_hba
.lpfc_xri_blk_list
;
5470 case LPFC_RSC_TYPE_FCOE_VFI
:
5471 phba
->sli4_hba
.vfi_bmask
= kzalloc(longs
*
5472 sizeof(unsigned long),
5474 if (unlikely(!phba
->sli4_hba
.vfi_bmask
)) {
5478 phba
->sli4_hba
.vfi_ids
= kzalloc(rsrc_id_cnt
*
5481 if (unlikely(!phba
->sli4_hba
.vfi_ids
)) {
5482 kfree(phba
->sli4_hba
.vfi_bmask
);
5487 /* Initialize local ptrs for common extent processing later. */
5488 bmask
= phba
->sli4_hba
.vfi_bmask
;
5489 ids
= phba
->sli4_hba
.vfi_ids
;
5490 ext_blk_list
= &phba
->sli4_hba
.lpfc_vfi_blk_list
;
5493 /* Unsupported Opcode. Fail call. */
5497 ext_blk_list
= NULL
;
5502 * Complete initializing the extent configuration with the
5503 * allocated ids assigned to this function. The bitmask serves
5504 * as an index into the array and manages the available ids. The
5505 * array just stores the ids communicated to the port via the wqes.
5507 for (i
= 0, j
= 0, k
= 0; i
< rsrc_cnt
; i
++) {
5509 rsrc_id
= bf_get(lpfc_mbx_rsrc_id_word4_0
,
5512 rsrc_id
= bf_get(lpfc_mbx_rsrc_id_word4_1
,
5515 rsrc_blks
= kzalloc(length
, GFP_KERNEL
);
5516 if (unlikely(!rsrc_blks
)) {
5522 rsrc_blks
->rsrc_start
= rsrc_id
;
5523 rsrc_blks
->rsrc_size
= rsrc_size
;
5524 list_add_tail(&rsrc_blks
->list
, ext_blk_list
);
5525 rsrc_start
= rsrc_id
;
5526 if ((type
== LPFC_RSC_TYPE_FCOE_XRI
) && (j
== 0))
5527 phba
->sli4_hba
.scsi_xri_start
= rsrc_start
+
5528 lpfc_sli4_get_els_iocb_cnt(phba
);
5530 while (rsrc_id
< (rsrc_start
+ rsrc_size
)) {
5535 /* Entire word processed. Get next word.*/
5540 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
5545 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5546 * @phba: Pointer to HBA context object.
5547 * @type: the extent's type.
5549 * This function deallocates all extents of a particular resource type.
5550 * SLI4 does not allow for deallocating a particular extent range. It
5551 * is the caller's responsibility to release all kernel memory resources.
5554 lpfc_sli4_dealloc_extent(struct lpfc_hba
*phba
, uint16_t type
)
5557 uint32_t length
, mbox_tmo
= 0;
5559 struct lpfc_mbx_dealloc_rsrc_extents
*dealloc_rsrc
;
5560 struct lpfc_rsrc_blks
*rsrc_blk
, *rsrc_blk_next
;
5562 mbox
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5567 * This function sends an embedded mailbox because it only sends the
5568 * the resource type. All extents of this type are released by the
5571 length
= (sizeof(struct lpfc_mbx_dealloc_rsrc_extents
) -
5572 sizeof(struct lpfc_sli4_cfg_mhdr
));
5573 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
5574 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT
,
5575 length
, LPFC_SLI4_MBX_EMBED
);
5577 /* Send an extents count of 0 - the dealloc doesn't use it. */
5578 rc
= lpfc_sli4_mbox_rsrc_extent(phba
, mbox
, 0, type
,
5579 LPFC_SLI4_MBX_EMBED
);
5584 if (!phba
->sli4_hba
.intr_enable
)
5585 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
5587 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
5588 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
5595 dealloc_rsrc
= &mbox
->u
.mqe
.un
.dealloc_rsrc_extents
;
5596 if (bf_get(lpfc_mbox_hdr_status
,
5597 &dealloc_rsrc
->header
.cfg_shdr
.response
)) {
5598 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_INIT
,
5599 "2919 Failed to release resource extents "
5600 "for type %d - Status 0x%x Add'l Status 0x%x. "
5601 "Resource memory not released.\n",
5603 bf_get(lpfc_mbox_hdr_status
,
5604 &dealloc_rsrc
->header
.cfg_shdr
.response
),
5605 bf_get(lpfc_mbox_hdr_add_status
,
5606 &dealloc_rsrc
->header
.cfg_shdr
.response
));
5611 /* Release kernel memory resources for the specific type. */
5613 case LPFC_RSC_TYPE_FCOE_VPI
:
5614 kfree(phba
->vpi_bmask
);
5615 kfree(phba
->vpi_ids
);
5616 bf_set(lpfc_vpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
5617 list_for_each_entry_safe(rsrc_blk
, rsrc_blk_next
,
5618 &phba
->lpfc_vpi_blk_list
, list
) {
5619 list_del_init(&rsrc_blk
->list
);
5622 phba
->sli4_hba
.max_cfg_param
.vpi_used
= 0;
5624 case LPFC_RSC_TYPE_FCOE_XRI
:
5625 kfree(phba
->sli4_hba
.xri_bmask
);
5626 kfree(phba
->sli4_hba
.xri_ids
);
5627 list_for_each_entry_safe(rsrc_blk
, rsrc_blk_next
,
5628 &phba
->sli4_hba
.lpfc_xri_blk_list
, list
) {
5629 list_del_init(&rsrc_blk
->list
);
5633 case LPFC_RSC_TYPE_FCOE_VFI
:
5634 kfree(phba
->sli4_hba
.vfi_bmask
);
5635 kfree(phba
->sli4_hba
.vfi_ids
);
5636 bf_set(lpfc_vfi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
5637 list_for_each_entry_safe(rsrc_blk
, rsrc_blk_next
,
5638 &phba
->sli4_hba
.lpfc_vfi_blk_list
, list
) {
5639 list_del_init(&rsrc_blk
->list
);
5643 case LPFC_RSC_TYPE_FCOE_RPI
:
5644 /* RPI bitmask and physical id array are cleaned up earlier. */
5645 list_for_each_entry_safe(rsrc_blk
, rsrc_blk_next
,
5646 &phba
->sli4_hba
.lpfc_rpi_blk_list
, list
) {
5647 list_del_init(&rsrc_blk
->list
);
5655 bf_set(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
5658 mempool_free(mbox
, phba
->mbox_mem_pool
);
5663 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5664 * @phba: Pointer to HBA context object.
5666 * This function allocates all SLI4 resource identifiers.
5669 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba
*phba
)
5671 int i
, rc
, error
= 0;
5672 uint16_t count
, base
;
5673 unsigned long longs
;
5675 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
5676 phba
->sli4_hba
.next_rpi
= phba
->sli4_hba
.max_cfg_param
.max_rpi
;
5677 if (phba
->sli4_hba
.extents_in_use
) {
5679 * The port supports resource extents. The XRI, VPI, VFI, RPI
5680 * resource extent count must be read and allocated before
5681 * provisioning the resource id arrays.
5683 if (bf_get(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
) ==
5684 LPFC_IDX_RSRC_RDY
) {
5686 * Extent-based resources are set - the driver could
5687 * be in a port reset. Figure out if any corrective
5688 * actions need to be taken.
5690 rc
= lpfc_sli4_chk_avail_extnt_rsrc(phba
,
5691 LPFC_RSC_TYPE_FCOE_VFI
);
5694 rc
= lpfc_sli4_chk_avail_extnt_rsrc(phba
,
5695 LPFC_RSC_TYPE_FCOE_VPI
);
5698 rc
= lpfc_sli4_chk_avail_extnt_rsrc(phba
,
5699 LPFC_RSC_TYPE_FCOE_XRI
);
5702 rc
= lpfc_sli4_chk_avail_extnt_rsrc(phba
,
5703 LPFC_RSC_TYPE_FCOE_RPI
);
5708 * It's possible that the number of resources
5709 * provided to this port instance changed between
5710 * resets. Detect this condition and reallocate
5711 * resources. Otherwise, there is no action.
5714 lpfc_printf_log(phba
, KERN_INFO
,
5715 LOG_MBOX
| LOG_INIT
,
5716 "2931 Detected extent resource "
5717 "change. Reallocating all "
5719 rc
= lpfc_sli4_dealloc_extent(phba
,
5720 LPFC_RSC_TYPE_FCOE_VFI
);
5721 rc
= lpfc_sli4_dealloc_extent(phba
,
5722 LPFC_RSC_TYPE_FCOE_VPI
);
5723 rc
= lpfc_sli4_dealloc_extent(phba
,
5724 LPFC_RSC_TYPE_FCOE_XRI
);
5725 rc
= lpfc_sli4_dealloc_extent(phba
,
5726 LPFC_RSC_TYPE_FCOE_RPI
);
5731 rc
= lpfc_sli4_alloc_extent(phba
, LPFC_RSC_TYPE_FCOE_VFI
);
5735 rc
= lpfc_sli4_alloc_extent(phba
, LPFC_RSC_TYPE_FCOE_VPI
);
5739 rc
= lpfc_sli4_alloc_extent(phba
, LPFC_RSC_TYPE_FCOE_RPI
);
5743 rc
= lpfc_sli4_alloc_extent(phba
, LPFC_RSC_TYPE_FCOE_XRI
);
5746 bf_set(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
,
5751 * The port does not support resource extents. The XRI, VPI,
5752 * VFI, RPI resource ids were determined from READ_CONFIG.
5753 * Just allocate the bitmasks and provision the resource id
5754 * arrays. If a port reset is active, the resources don't
5755 * need any action - just exit.
5757 if (bf_get(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
) ==
5758 LPFC_IDX_RSRC_RDY
) {
5759 lpfc_sli4_dealloc_resource_identifiers(phba
);
5760 lpfc_sli4_remove_rpis(phba
);
5763 count
= phba
->sli4_hba
.max_cfg_param
.max_rpi
;
5765 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5766 "3279 Invalid provisioning of "
5771 base
= phba
->sli4_hba
.max_cfg_param
.rpi_base
;
5772 longs
= (count
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
5773 phba
->sli4_hba
.rpi_bmask
= kzalloc(longs
*
5774 sizeof(unsigned long),
5776 if (unlikely(!phba
->sli4_hba
.rpi_bmask
)) {
5780 phba
->sli4_hba
.rpi_ids
= kzalloc(count
*
5783 if (unlikely(!phba
->sli4_hba
.rpi_ids
)) {
5785 goto free_rpi_bmask
;
5788 for (i
= 0; i
< count
; i
++)
5789 phba
->sli4_hba
.rpi_ids
[i
] = base
+ i
;
5792 count
= phba
->sli4_hba
.max_cfg_param
.max_vpi
;
5794 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5795 "3280 Invalid provisioning of "
5800 base
= phba
->sli4_hba
.max_cfg_param
.vpi_base
;
5801 longs
= (count
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
5802 phba
->vpi_bmask
= kzalloc(longs
*
5803 sizeof(unsigned long),
5805 if (unlikely(!phba
->vpi_bmask
)) {
5809 phba
->vpi_ids
= kzalloc(count
*
5812 if (unlikely(!phba
->vpi_ids
)) {
5814 goto free_vpi_bmask
;
5817 for (i
= 0; i
< count
; i
++)
5818 phba
->vpi_ids
[i
] = base
+ i
;
5821 count
= phba
->sli4_hba
.max_cfg_param
.max_xri
;
5823 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5824 "3281 Invalid provisioning of "
5829 base
= phba
->sli4_hba
.max_cfg_param
.xri_base
;
5830 longs
= (count
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
5831 phba
->sli4_hba
.xri_bmask
= kzalloc(longs
*
5832 sizeof(unsigned long),
5834 if (unlikely(!phba
->sli4_hba
.xri_bmask
)) {
5838 phba
->sli4_hba
.max_cfg_param
.xri_used
= 0;
5839 phba
->sli4_hba
.xri_ids
= kzalloc(count
*
5842 if (unlikely(!phba
->sli4_hba
.xri_ids
)) {
5844 goto free_xri_bmask
;
5847 for (i
= 0; i
< count
; i
++)
5848 phba
->sli4_hba
.xri_ids
[i
] = base
+ i
;
5851 count
= phba
->sli4_hba
.max_cfg_param
.max_vfi
;
5853 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5854 "3282 Invalid provisioning of "
5859 base
= phba
->sli4_hba
.max_cfg_param
.vfi_base
;
5860 longs
= (count
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
5861 phba
->sli4_hba
.vfi_bmask
= kzalloc(longs
*
5862 sizeof(unsigned long),
5864 if (unlikely(!phba
->sli4_hba
.vfi_bmask
)) {
5868 phba
->sli4_hba
.vfi_ids
= kzalloc(count
*
5871 if (unlikely(!phba
->sli4_hba
.vfi_ids
)) {
5873 goto free_vfi_bmask
;
5876 for (i
= 0; i
< count
; i
++)
5877 phba
->sli4_hba
.vfi_ids
[i
] = base
+ i
;
5880 * Mark all resources ready. An HBA reset doesn't need
5881 * to reset the initialization.
5883 bf_set(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
,
5889 kfree(phba
->sli4_hba
.vfi_bmask
);
5891 kfree(phba
->sli4_hba
.xri_ids
);
5893 kfree(phba
->sli4_hba
.xri_bmask
);
5895 kfree(phba
->vpi_ids
);
5897 kfree(phba
->vpi_bmask
);
5899 kfree(phba
->sli4_hba
.rpi_ids
);
5901 kfree(phba
->sli4_hba
.rpi_bmask
);
5907 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
5908 * @phba: Pointer to HBA context object.
5910 * This function allocates the number of elements for the specified
5914 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba
*phba
)
5916 if (phba
->sli4_hba
.extents_in_use
) {
5917 lpfc_sli4_dealloc_extent(phba
, LPFC_RSC_TYPE_FCOE_VPI
);
5918 lpfc_sli4_dealloc_extent(phba
, LPFC_RSC_TYPE_FCOE_RPI
);
5919 lpfc_sli4_dealloc_extent(phba
, LPFC_RSC_TYPE_FCOE_XRI
);
5920 lpfc_sli4_dealloc_extent(phba
, LPFC_RSC_TYPE_FCOE_VFI
);
5922 kfree(phba
->vpi_bmask
);
5923 phba
->sli4_hba
.max_cfg_param
.vpi_used
= 0;
5924 kfree(phba
->vpi_ids
);
5925 bf_set(lpfc_vpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
5926 kfree(phba
->sli4_hba
.xri_bmask
);
5927 kfree(phba
->sli4_hba
.xri_ids
);
5928 kfree(phba
->sli4_hba
.vfi_bmask
);
5929 kfree(phba
->sli4_hba
.vfi_ids
);
5930 bf_set(lpfc_vfi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
5931 bf_set(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
5938 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
5939 * @phba: Pointer to HBA context object.
5940 * @type: The resource extent type.
5941 * @extnt_count: buffer to hold port extent count response
5942 * @extnt_size: buffer to hold port extent size response.
5944 * This function calls the port to read the host allocated extents
5945 * for a particular type.
5948 lpfc_sli4_get_allocated_extnts(struct lpfc_hba
*phba
, uint16_t type
,
5949 uint16_t *extnt_cnt
, uint16_t *extnt_size
)
5953 uint16_t curr_blks
= 0;
5954 uint32_t req_len
, emb_len
;
5955 uint32_t alloc_len
, mbox_tmo
;
5956 struct list_head
*blk_list_head
;
5957 struct lpfc_rsrc_blks
*rsrc_blk
;
5959 void *virtaddr
= NULL
;
5960 struct lpfc_mbx_nembed_rsrc_extent
*n_rsrc
;
5961 struct lpfc_mbx_alloc_rsrc_extents
*rsrc_ext
;
5962 union lpfc_sli4_cfg_shdr
*shdr
;
5965 case LPFC_RSC_TYPE_FCOE_VPI
:
5966 blk_list_head
= &phba
->lpfc_vpi_blk_list
;
5968 case LPFC_RSC_TYPE_FCOE_XRI
:
5969 blk_list_head
= &phba
->sli4_hba
.lpfc_xri_blk_list
;
5971 case LPFC_RSC_TYPE_FCOE_VFI
:
5972 blk_list_head
= &phba
->sli4_hba
.lpfc_vfi_blk_list
;
5974 case LPFC_RSC_TYPE_FCOE_RPI
:
5975 blk_list_head
= &phba
->sli4_hba
.lpfc_rpi_blk_list
;
5981 /* Count the number of extents currently allocatd for this type. */
5982 list_for_each_entry(rsrc_blk
, blk_list_head
, list
) {
5983 if (curr_blks
== 0) {
5985 * The GET_ALLOCATED mailbox does not return the size,
5986 * just the count. The size should be just the size
5987 * stored in the current allocated block and all sizes
5988 * for an extent type are the same so set the return
5991 *extnt_size
= rsrc_blk
->rsrc_size
;
5997 * Calculate the size of an embedded mailbox. The uint32_t
5998 * accounts for extents-specific word.
6000 emb_len
= sizeof(MAILBOX_t
) - sizeof(struct mbox_header
) -
6004 * Presume the allocation and response will fit into an embedded
6005 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6007 emb
= LPFC_SLI4_MBX_EMBED
;
6009 if (req_len
> emb_len
) {
6010 req_len
= curr_blks
* sizeof(uint16_t) +
6011 sizeof(union lpfc_sli4_cfg_shdr
) +
6013 emb
= LPFC_SLI4_MBX_NEMBED
;
6016 mbox
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6019 memset(mbox
, 0, sizeof(LPFC_MBOXQ_t
));
6021 alloc_len
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
6022 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT
,
6024 if (alloc_len
< req_len
) {
6025 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6026 "2983 Allocated DMA memory size (x%x) is "
6027 "less than the requested DMA memory "
6028 "size (x%x)\n", alloc_len
, req_len
);
6032 rc
= lpfc_sli4_mbox_rsrc_extent(phba
, mbox
, curr_blks
, type
, emb
);
6038 if (!phba
->sli4_hba
.intr_enable
)
6039 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
6041 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
6042 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
6051 * Figure out where the response is located. Then get local pointers
6052 * to the response data. The port does not guarantee to respond to
6053 * all extents counts request so update the local variable with the
6054 * allocated count from the port.
6056 if (emb
== LPFC_SLI4_MBX_EMBED
) {
6057 rsrc_ext
= &mbox
->u
.mqe
.un
.alloc_rsrc_extents
;
6058 shdr
= &rsrc_ext
->header
.cfg_shdr
;
6059 *extnt_cnt
= bf_get(lpfc_mbx_rsrc_cnt
, &rsrc_ext
->u
.rsp
);
6061 virtaddr
= mbox
->sge_array
->addr
[0];
6062 n_rsrc
= (struct lpfc_mbx_nembed_rsrc_extent
*) virtaddr
;
6063 shdr
= &n_rsrc
->cfg_shdr
;
6064 *extnt_cnt
= bf_get(lpfc_mbx_rsrc_cnt
, n_rsrc
);
6067 if (bf_get(lpfc_mbox_hdr_status
, &shdr
->response
)) {
6068 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_INIT
,
6069 "2984 Failed to read allocated resources "
6070 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6072 bf_get(lpfc_mbox_hdr_status
, &shdr
->response
),
6073 bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
));
6078 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
6083 * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
6084 * @phba: pointer to lpfc hba data structure.
6086 * This routine walks the list of els buffers that have been allocated and
6087 * repost them to the port by using SGL block post. This is needed after a
6088 * pci_function_reset/warm_start or start. It attempts to construct blocks
6089 * of els buffer sgls which contains contiguous xris and uses the non-embedded
6090 * SGL block post mailbox commands to post them to the port. For single els
6091 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6092 * mailbox command for posting.
6094 * Returns: 0 = success, non-zero failure.
6097 lpfc_sli4_repost_els_sgl_list(struct lpfc_hba
*phba
)
6099 struct lpfc_sglq
*sglq_entry
= NULL
;
6100 struct lpfc_sglq
*sglq_entry_next
= NULL
;
6101 struct lpfc_sglq
*sglq_entry_first
= NULL
;
6102 int status
, total_cnt
, post_cnt
= 0, num_posted
= 0, block_cnt
= 0;
6103 int last_xritag
= NO_XRI
;
6104 struct lpfc_sli_ring
*pring
;
6105 LIST_HEAD(prep_sgl_list
);
6106 LIST_HEAD(blck_sgl_list
);
6107 LIST_HEAD(allc_sgl_list
);
6108 LIST_HEAD(post_sgl_list
);
6109 LIST_HEAD(free_sgl_list
);
6111 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
6112 spin_lock_irq(&phba
->hbalock
);
6113 spin_lock(&pring
->ring_lock
);
6114 list_splice_init(&phba
->sli4_hba
.lpfc_sgl_list
, &allc_sgl_list
);
6115 spin_unlock(&pring
->ring_lock
);
6116 spin_unlock_irq(&phba
->hbalock
);
6118 total_cnt
= phba
->sli4_hba
.els_xri_cnt
;
6119 list_for_each_entry_safe(sglq_entry
, sglq_entry_next
,
6120 &allc_sgl_list
, list
) {
6121 list_del_init(&sglq_entry
->list
);
6123 if ((last_xritag
!= NO_XRI
) &&
6124 (sglq_entry
->sli4_xritag
!= last_xritag
+ 1)) {
6125 /* a hole in xri block, form a sgl posting block */
6126 list_splice_init(&prep_sgl_list
, &blck_sgl_list
);
6127 post_cnt
= block_cnt
- 1;
6128 /* prepare list for next posting block */
6129 list_add_tail(&sglq_entry
->list
, &prep_sgl_list
);
6132 /* prepare list for next posting block */
6133 list_add_tail(&sglq_entry
->list
, &prep_sgl_list
);
6134 /* enough sgls for non-embed sgl mbox command */
6135 if (block_cnt
== LPFC_NEMBED_MBOX_SGL_CNT
) {
6136 list_splice_init(&prep_sgl_list
,
6138 post_cnt
= block_cnt
;
6144 /* keep track of last sgl's xritag */
6145 last_xritag
= sglq_entry
->sli4_xritag
;
6147 /* end of repost sgl list condition for els buffers */
6148 if (num_posted
== phba
->sli4_hba
.els_xri_cnt
) {
6149 if (post_cnt
== 0) {
6150 list_splice_init(&prep_sgl_list
,
6152 post_cnt
= block_cnt
;
6153 } else if (block_cnt
== 1) {
6154 status
= lpfc_sli4_post_sgl(phba
,
6155 sglq_entry
->phys
, 0,
6156 sglq_entry
->sli4_xritag
);
6158 /* successful, put sgl to posted list */
6159 list_add_tail(&sglq_entry
->list
,
6162 /* Failure, put sgl to free list */
6163 lpfc_printf_log(phba
, KERN_WARNING
,
6165 "3159 Failed to post els "
6166 "sgl, xritag:x%x\n",
6167 sglq_entry
->sli4_xritag
);
6168 list_add_tail(&sglq_entry
->list
,
6175 /* continue until a nembed page worth of sgls */
6179 /* post the els buffer list sgls as a block */
6180 status
= lpfc_sli4_post_els_sgl_list(phba
, &blck_sgl_list
,
6184 /* success, put sgl list to posted sgl list */
6185 list_splice_init(&blck_sgl_list
, &post_sgl_list
);
6187 /* Failure, put sgl list to free sgl list */
6188 sglq_entry_first
= list_first_entry(&blck_sgl_list
,
6191 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
6192 "3160 Failed to post els sgl-list, "
6194 sglq_entry_first
->sli4_xritag
,
6195 (sglq_entry_first
->sli4_xritag
+
6197 list_splice_init(&blck_sgl_list
, &free_sgl_list
);
6198 total_cnt
-= post_cnt
;
6201 /* don't reset xirtag due to hole in xri block */
6203 last_xritag
= NO_XRI
;
6205 /* reset els sgl post count for next round of posting */
6208 /* update the number of XRIs posted for ELS */
6209 phba
->sli4_hba
.els_xri_cnt
= total_cnt
;
6211 /* free the els sgls failed to post */
6212 lpfc_free_sgl_list(phba
, &free_sgl_list
);
6214 /* push els sgls posted to the availble list */
6215 if (!list_empty(&post_sgl_list
)) {
6216 spin_lock_irq(&phba
->hbalock
);
6217 spin_lock(&pring
->ring_lock
);
6218 list_splice_init(&post_sgl_list
,
6219 &phba
->sli4_hba
.lpfc_sgl_list
);
6220 spin_unlock(&pring
->ring_lock
);
6221 spin_unlock_irq(&phba
->hbalock
);
6223 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6224 "3161 Failure to post els sgl to port.\n");
6231 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
6232 * @phba: Pointer to HBA context object.
6234 * This function is the main SLI4 device intialization PCI function. This
6235 * function is called by the HBA intialization code, HBA reset code and
6236 * HBA error attention handler code. Caller is not required to hold any
6240 lpfc_sli4_hba_setup(struct lpfc_hba
*phba
)
6243 LPFC_MBOXQ_t
*mboxq
;
6244 struct lpfc_mqe
*mqe
;
6247 uint32_t ftr_rsp
= 0;
6248 struct Scsi_Host
*shost
= lpfc_shost_from_vport(phba
->pport
);
6249 struct lpfc_vport
*vport
= phba
->pport
;
6250 struct lpfc_dmabuf
*mp
;
6252 /* Perform a PCI function reset to start from clean */
6253 rc
= lpfc_pci_function_reset(phba
);
6257 /* Check the HBA Host Status Register for readyness */
6258 rc
= lpfc_sli4_post_status_check(phba
);
6262 spin_lock_irq(&phba
->hbalock
);
6263 phba
->sli
.sli_flag
|= LPFC_SLI_ACTIVE
;
6264 spin_unlock_irq(&phba
->hbalock
);
6268 * Allocate a single mailbox container for initializing the
6271 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6275 /* Issue READ_REV to collect vpd and FW information. */
6276 vpd_size
= SLI4_PAGE_SIZE
;
6277 vpd
= kzalloc(vpd_size
, GFP_KERNEL
);
6283 rc
= lpfc_sli4_read_rev(phba
, mboxq
, vpd
, &vpd_size
);
6289 mqe
= &mboxq
->u
.mqe
;
6290 phba
->sli_rev
= bf_get(lpfc_mbx_rd_rev_sli_lvl
, &mqe
->un
.read_rev
);
6291 if (bf_get(lpfc_mbx_rd_rev_fcoe
, &mqe
->un
.read_rev
))
6292 phba
->hba_flag
|= HBA_FCOE_MODE
;
6294 phba
->hba_flag
&= ~HBA_FCOE_MODE
;
6296 if (bf_get(lpfc_mbx_rd_rev_cee_ver
, &mqe
->un
.read_rev
) ==
6298 phba
->hba_flag
|= HBA_FIP_SUPPORT
;
6300 phba
->hba_flag
&= ~HBA_FIP_SUPPORT
;
6302 phba
->hba_flag
&= ~HBA_FCP_IOQ_FLUSH
;
6304 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
6305 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6306 "0376 READ_REV Error. SLI Level %d "
6307 "FCoE enabled %d\n",
6308 phba
->sli_rev
, phba
->hba_flag
& HBA_FCOE_MODE
);
6315 * Continue initialization with default values even if driver failed
6316 * to read FCoE param config regions, only read parameters if the
6319 if (phba
->hba_flag
& HBA_FCOE_MODE
&&
6320 lpfc_sli4_read_fcoe_params(phba
))
6321 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_INIT
,
6322 "2570 Failed to read FCoE parameters\n");
6325 * Retrieve sli4 device physical port name, failure of doing it
6326 * is considered as non-fatal.
6328 rc
= lpfc_sli4_retrieve_pport_name(phba
);
6330 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
6331 "3080 Successful retrieving SLI4 device "
6332 "physical port name: %s.\n", phba
->Port
);
6335 * Evaluate the read rev and vpd data. Populate the driver
6336 * state with the results. If this routine fails, the failure
6337 * is not fatal as the driver will use generic values.
6339 rc
= lpfc_parse_vpd(phba
, vpd
, vpd_size
);
6340 if (unlikely(!rc
)) {
6341 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6342 "0377 Error %d parsing vpd. "
6343 "Using defaults.\n", rc
);
6348 /* Save information as VPD data */
6349 phba
->vpd
.rev
.biuRev
= mqe
->un
.read_rev
.first_hw_rev
;
6350 phba
->vpd
.rev
.smRev
= mqe
->un
.read_rev
.second_hw_rev
;
6351 phba
->vpd
.rev
.endecRev
= mqe
->un
.read_rev
.third_hw_rev
;
6352 phba
->vpd
.rev
.fcphHigh
= bf_get(lpfc_mbx_rd_rev_fcph_high
,
6354 phba
->vpd
.rev
.fcphLow
= bf_get(lpfc_mbx_rd_rev_fcph_low
,
6356 phba
->vpd
.rev
.feaLevelHigh
= bf_get(lpfc_mbx_rd_rev_ftr_lvl_high
,
6358 phba
->vpd
.rev
.feaLevelLow
= bf_get(lpfc_mbx_rd_rev_ftr_lvl_low
,
6360 phba
->vpd
.rev
.sli1FwRev
= mqe
->un
.read_rev
.fw_id_rev
;
6361 memcpy(phba
->vpd
.rev
.sli1FwName
, mqe
->un
.read_rev
.fw_name
, 16);
6362 phba
->vpd
.rev
.sli2FwRev
= mqe
->un
.read_rev
.ulp_fw_id_rev
;
6363 memcpy(phba
->vpd
.rev
.sli2FwName
, mqe
->un
.read_rev
.ulp_fw_name
, 16);
6364 phba
->vpd
.rev
.opFwRev
= mqe
->un
.read_rev
.fw_id_rev
;
6365 memcpy(phba
->vpd
.rev
.opFwName
, mqe
->un
.read_rev
.fw_name
, 16);
6366 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
6367 "(%d):0380 READ_REV Status x%x "
6368 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6369 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
6370 bf_get(lpfc_mqe_status
, mqe
),
6371 phba
->vpd
.rev
.opFwName
,
6372 phba
->vpd
.rev
.fcphHigh
, phba
->vpd
.rev
.fcphLow
,
6373 phba
->vpd
.rev
.feaLevelHigh
, phba
->vpd
.rev
.feaLevelLow
);
6375 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
6376 rc
= (phba
->sli4_hba
.max_cfg_param
.max_xri
>> 3);
6377 if (phba
->pport
->cfg_lun_queue_depth
> rc
) {
6378 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
6379 "3362 LUN queue depth changed from %d to %d\n",
6380 phba
->pport
->cfg_lun_queue_depth
, rc
);
6381 phba
->pport
->cfg_lun_queue_depth
= rc
;
6386 * Discover the port's supported feature set and match it against the
6389 lpfc_request_features(phba
, mboxq
);
6390 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
6397 * The port must support FCP initiator mode as this is the
6398 * only mode running in the host.
6400 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi
, &mqe
->un
.req_ftrs
))) {
6401 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
6402 "0378 No support for fcpi mode.\n");
6405 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh
, &mqe
->un
.req_ftrs
))
6406 phba
->sli3_options
|= LPFC_SLI4_PERFH_ENABLED
;
6408 phba
->sli3_options
&= ~LPFC_SLI4_PERFH_ENABLED
;
6410 * If the port cannot support the host's requested features
6411 * then turn off the global config parameters to disable the
6412 * feature in the driver. This is not a fatal error.
6414 phba
->sli3_options
&= ~LPFC_SLI3_BG_ENABLED
;
6415 if (phba
->cfg_enable_bg
) {
6416 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif
, &mqe
->un
.req_ftrs
))
6417 phba
->sli3_options
|= LPFC_SLI3_BG_ENABLED
;
6422 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
6423 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv
, &mqe
->un
.req_ftrs
)))
6427 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
6428 "0379 Feature Mismatch Data: x%08x %08x "
6429 "x%x x%x x%x\n", mqe
->un
.req_ftrs
.word2
,
6430 mqe
->un
.req_ftrs
.word3
, phba
->cfg_enable_bg
,
6431 phba
->cfg_enable_npiv
, phba
->max_vpi
);
6432 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif
, &mqe
->un
.req_ftrs
)))
6433 phba
->cfg_enable_bg
= 0;
6434 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv
, &mqe
->un
.req_ftrs
)))
6435 phba
->cfg_enable_npiv
= 0;
6438 /* These SLI3 features are assumed in SLI4 */
6439 spin_lock_irq(&phba
->hbalock
);
6440 phba
->sli3_options
|= (LPFC_SLI3_NPIV_ENABLED
| LPFC_SLI3_HBQ_ENABLED
);
6441 spin_unlock_irq(&phba
->hbalock
);
6444 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
6445 * calls depends on these resources to complete port setup.
6447 rc
= lpfc_sli4_alloc_resource_identifiers(phba
);
6449 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6450 "2920 Failed to alloc Resource IDs "
6455 /* Read the port's service parameters. */
6456 rc
= lpfc_read_sparam(phba
, mboxq
, vport
->vpi
);
6458 phba
->link_state
= LPFC_HBA_ERROR
;
6463 mboxq
->vport
= vport
;
6464 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
6465 mp
= (struct lpfc_dmabuf
*) mboxq
->context1
;
6466 if (rc
== MBX_SUCCESS
) {
6467 memcpy(&vport
->fc_sparam
, mp
->virt
, sizeof(struct serv_parm
));
6472 * This memory was allocated by the lpfc_read_sparam routine. Release
6473 * it to the mbuf pool.
6475 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
6477 mboxq
->context1
= NULL
;
6479 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6480 "0382 READ_SPARAM command failed "
6481 "status %d, mbxStatus x%x\n",
6482 rc
, bf_get(lpfc_mqe_status
, mqe
));
6483 phba
->link_state
= LPFC_HBA_ERROR
;
6488 lpfc_update_vport_wwn(vport
);
6490 /* Update the fc_host data structures with new wwn. */
6491 fc_host_node_name(shost
) = wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
6492 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
6494 /* update host els and scsi xri-sgl sizes and mappings */
6495 rc
= lpfc_sli4_xri_sgl_update(phba
);
6497 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6498 "1400 Failed to update xri-sgl size and "
6499 "mapping: %d\n", rc
);
6503 /* register the els sgl pool to the port */
6504 rc
= lpfc_sli4_repost_els_sgl_list(phba
);
6506 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6507 "0582 Error %d during els sgl post "
6513 /* register the allocated scsi sgl pool to the port */
6514 rc
= lpfc_sli4_repost_scsi_sgl_list(phba
);
6516 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6517 "0383 Error %d during scsi sgl post "
6519 /* Some Scsi buffers were moved to the abort scsi list */
6520 /* A pci function reset will repost them */
6525 /* Post the rpi header region to the device. */
6526 rc
= lpfc_sli4_post_all_rpi_hdrs(phba
);
6528 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6529 "0393 Error %d during rpi post operation\n",
6534 lpfc_sli4_node_prep(phba
);
6536 /* Create all the SLI4 queues */
6537 rc
= lpfc_sli4_queue_create(phba
);
6539 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6540 "3089 Failed to allocate queues\n");
6542 goto out_stop_timers
;
6544 /* Set up all the queues to the device */
6545 rc
= lpfc_sli4_queue_setup(phba
);
6547 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6548 "0381 Error %d during queue setup.\n ", rc
);
6549 goto out_destroy_queue
;
6552 /* Arm the CQs and then EQs on device */
6553 lpfc_sli4_arm_cqeq_intr(phba
);
6555 /* Indicate device interrupt mode */
6556 phba
->sli4_hba
.intr_enable
= 1;
6558 /* Allow asynchronous mailbox command to go through */
6559 spin_lock_irq(&phba
->hbalock
);
6560 phba
->sli
.sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
6561 spin_unlock_irq(&phba
->hbalock
);
6563 /* Post receive buffers to the device */
6564 lpfc_sli4_rb_setup(phba
);
6566 /* Reset HBA FCF states after HBA reset */
6567 phba
->fcf
.fcf_flag
= 0;
6568 phba
->fcf
.current_rec
.flag
= 0;
6570 /* Start the ELS watchdog timer */
6571 mod_timer(&vport
->els_tmofunc
,
6572 jiffies
+ msecs_to_jiffies(1000 * (phba
->fc_ratov
* 2)));
6574 /* Start heart beat timer */
6575 mod_timer(&phba
->hb_tmofunc
,
6576 jiffies
+ msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
));
6577 phba
->hb_outstanding
= 0;
6578 phba
->last_completion_time
= jiffies
;
6580 /* Start error attention (ERATT) polling timer */
6581 mod_timer(&phba
->eratt_poll
,
6582 jiffies
+ msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL
));
6584 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
6585 if (phba
->cfg_aer_support
== 1 && !(phba
->hba_flag
& HBA_AER_ENABLED
)) {
6586 rc
= pci_enable_pcie_error_reporting(phba
->pcidev
);
6588 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
6589 "2829 This device supports "
6590 "Advanced Error Reporting (AER)\n");
6591 spin_lock_irq(&phba
->hbalock
);
6592 phba
->hba_flag
|= HBA_AER_ENABLED
;
6593 spin_unlock_irq(&phba
->hbalock
);
6595 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
6596 "2830 This device does not support "
6597 "Advanced Error Reporting (AER)\n");
6598 phba
->cfg_aer_support
= 0;
6603 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
6605 * The FC Port needs to register FCFI (index 0)
6607 lpfc_reg_fcfi(phba
, mboxq
);
6608 mboxq
->vport
= phba
->pport
;
6609 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
6610 if (rc
!= MBX_SUCCESS
)
6611 goto out_unset_queue
;
6613 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
,
6614 &mboxq
->u
.mqe
.un
.reg_fcfi
);
6616 /* Check if the port is configured to be disabled */
6617 lpfc_sli_read_link_ste(phba
);
6621 * The port is ready, set the host's link state to LINK_DOWN
6622 * in preparation for link interrupts.
6624 spin_lock_irq(&phba
->hbalock
);
6625 phba
->link_state
= LPFC_LINK_DOWN
;
6626 spin_unlock_irq(&phba
->hbalock
);
6627 if (!(phba
->hba_flag
& HBA_FCOE_MODE
) &&
6628 (phba
->hba_flag
& LINK_DISABLED
)) {
6629 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_SLI
,
6630 "3103 Adapter Link is disabled.\n");
6631 lpfc_down_link(phba
, mboxq
);
6632 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
6633 if (rc
!= MBX_SUCCESS
) {
6634 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_SLI
,
6635 "3104 Adapter failed to issue "
6636 "DOWN_LINK mbox cmd, rc:x%x\n", rc
);
6637 goto out_unset_queue
;
6639 } else if (phba
->cfg_suppress_link_up
== LPFC_INITIALIZE_LINK
) {
6640 /* don't perform init_link on SLI4 FC port loopback test */
6641 if (!(phba
->link_flag
& LS_LOOPBACK_MODE
)) {
6642 rc
= phba
->lpfc_hba_init_link(phba
, MBX_NOWAIT
);
6644 goto out_unset_queue
;
6647 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6650 /* Unset all the queues set up in this routine when error out */
6651 lpfc_sli4_queue_unset(phba
);
6653 lpfc_sli4_queue_destroy(phba
);
6655 lpfc_stop_hba_timers(phba
);
6657 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6662 * lpfc_mbox_timeout - Timeout call back function for mbox timer
6663 * @ptr: context object - pointer to hba structure.
6665 * This is the callback function for mailbox timer. The mailbox
6666 * timer is armed when a new mailbox command is issued and the timer
6667 * is deleted when the mailbox complete. The function is called by
6668 * the kernel timer code when a mailbox does not complete within
6669 * expected time. This function wakes up the worker thread to
6670 * process the mailbox timeout and returns. All the processing is
6671 * done by the worker thread function lpfc_mbox_timeout_handler.
6674 lpfc_mbox_timeout(unsigned long ptr
)
6676 struct lpfc_hba
*phba
= (struct lpfc_hba
*) ptr
;
6677 unsigned long iflag
;
6678 uint32_t tmo_posted
;
6680 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflag
);
6681 tmo_posted
= phba
->pport
->work_port_events
& WORKER_MBOX_TMO
;
6683 phba
->pport
->work_port_events
|= WORKER_MBOX_TMO
;
6684 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflag
);
6687 lpfc_worker_wake_up(phba
);
6692 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
6694 * @phba: Pointer to HBA context object.
6696 * This function checks if any mailbox completions are present on the mailbox
6700 lpfc_sli4_mbox_completions_pending(struct lpfc_hba
*phba
)
6704 struct lpfc_queue
*mcq
;
6705 struct lpfc_mcqe
*mcqe
;
6706 bool pending_completions
= false;
6708 if (unlikely(!phba
) || (phba
->sli_rev
!= LPFC_SLI_REV4
))
6711 /* Check for completions on mailbox completion queue */
6713 mcq
= phba
->sli4_hba
.mbx_cq
;
6714 idx
= mcq
->hba_index
;
6715 while (bf_get_le32(lpfc_cqe_valid
, mcq
->qe
[idx
].cqe
)) {
6716 mcqe
= (struct lpfc_mcqe
*)mcq
->qe
[idx
].cqe
;
6717 if (bf_get_le32(lpfc_trailer_completed
, mcqe
) &&
6718 (!bf_get_le32(lpfc_trailer_async
, mcqe
))) {
6719 pending_completions
= true;
6722 idx
= (idx
+ 1) % mcq
->entry_count
;
6723 if (mcq
->hba_index
== idx
)
6726 return pending_completions
;
6731 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
6733 * @phba: Pointer to HBA context object.
6735 * For sli4, it is possible to miss an interrupt. As such mbox completions
6736 * maybe missed causing erroneous mailbox timeouts to occur. This function
6737 * checks to see if mbox completions are on the mailbox completion queue
6738 * and will process all the completions associated with the eq for the
6739 * mailbox completion queue.
6742 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba
*phba
)
6746 struct lpfc_queue
*fpeq
= NULL
;
6747 struct lpfc_eqe
*eqe
;
6750 if (unlikely(!phba
) || (phba
->sli_rev
!= LPFC_SLI_REV4
))
6753 /* Find the eq associated with the mcq */
6755 if (phba
->sli4_hba
.hba_eq
)
6756 for (eqidx
= 0; eqidx
< phba
->cfg_fcp_io_channel
; eqidx
++)
6757 if (phba
->sli4_hba
.hba_eq
[eqidx
]->queue_id
==
6758 phba
->sli4_hba
.mbx_cq
->assoc_qid
) {
6759 fpeq
= phba
->sli4_hba
.hba_eq
[eqidx
];
6765 /* Turn off interrupts from this EQ */
6767 lpfc_sli4_eq_clr_intr(fpeq
);
6769 /* Check to see if a mbox completion is pending */
6771 mbox_pending
= lpfc_sli4_mbox_completions_pending(phba
);
6774 * If a mbox completion is pending, process all the events on EQ
6775 * associated with the mbox completion queue (this could include
6776 * mailbox commands, async events, els commands, receive queue data
6781 while ((eqe
= lpfc_sli4_eq_get(fpeq
))) {
6782 lpfc_sli4_hba_handle_eqe(phba
, eqe
, eqidx
);
6783 fpeq
->EQ_processed
++;
6786 /* Always clear and re-arm the EQ */
6788 lpfc_sli4_eq_release(fpeq
, LPFC_QUEUE_REARM
);
6790 return mbox_pending
;
6795 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
6796 * @phba: Pointer to HBA context object.
6798 * This function is called from worker thread when a mailbox command times out.
6799 * The caller is not required to hold any locks. This function will reset the
6800 * HBA and recover all the pending commands.
6803 lpfc_mbox_timeout_handler(struct lpfc_hba
*phba
)
6805 LPFC_MBOXQ_t
*pmbox
= phba
->sli
.mbox_active
;
6806 MAILBOX_t
*mb
= NULL
;
6808 struct lpfc_sli
*psli
= &phba
->sli
;
6810 /* If the mailbox completed, process the completion and return */
6811 if (lpfc_sli4_process_missed_mbox_completions(phba
))
6816 /* Check the pmbox pointer first. There is a race condition
6817 * between the mbox timeout handler getting executed in the
6818 * worklist and the mailbox actually completing. When this
6819 * race condition occurs, the mbox_active will be NULL.
6821 spin_lock_irq(&phba
->hbalock
);
6822 if (pmbox
== NULL
) {
6823 lpfc_printf_log(phba
, KERN_WARNING
,
6825 "0353 Active Mailbox cleared - mailbox timeout "
6827 spin_unlock_irq(&phba
->hbalock
);
6831 /* Mbox cmd <mbxCommand> timeout */
6832 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6833 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
6835 phba
->pport
->port_state
,
6837 phba
->sli
.mbox_active
);
6838 spin_unlock_irq(&phba
->hbalock
);
6840 /* Setting state unknown so lpfc_sli_abort_iocb_ring
6841 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
6842 * it to fail all outstanding SCSI IO.
6844 spin_lock_irq(&phba
->pport
->work_port_lock
);
6845 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
6846 spin_unlock_irq(&phba
->pport
->work_port_lock
);
6847 spin_lock_irq(&phba
->hbalock
);
6848 phba
->link_state
= LPFC_LINK_UNKNOWN
;
6849 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
6850 spin_unlock_irq(&phba
->hbalock
);
6852 lpfc_sli_abort_fcp_rings(phba
);
6854 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6855 "0345 Resetting board due to mailbox timeout\n");
6857 /* Reset the HBA device */
6858 lpfc_reset_hba(phba
);
6862 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
6863 * @phba: Pointer to HBA context object.
6864 * @pmbox: Pointer to mailbox object.
6865 * @flag: Flag indicating how the mailbox need to be processed.
6867 * This function is called by discovery code and HBA management code
6868 * to submit a mailbox command to firmware with SLI-3 interface spec. This
6869 * function gets the hbalock to protect the data structures.
6870 * The mailbox command can be submitted in polling mode, in which case
6871 * this function will wait in a polling loop for the completion of the
6873 * If the mailbox is submitted in no_wait mode (not polling) the
6874 * function will submit the command and returns immediately without waiting
6875 * for the mailbox completion. The no_wait is supported only when HBA
6876 * is in SLI2/SLI3 mode - interrupts are enabled.
6877 * The SLI interface allows only one mailbox pending at a time. If the
6878 * mailbox is issued in polling mode and there is already a mailbox
6879 * pending, then the function will return an error. If the mailbox is issued
6880 * in NO_WAIT mode and there is a mailbox pending already, the function
6881 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
6882 * The sli layer owns the mailbox object until the completion of mailbox
6883 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
6884 * return codes the caller owns the mailbox command after the return of
6888 lpfc_sli_issue_mbox_s3(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmbox
,
6892 struct lpfc_sli
*psli
= &phba
->sli
;
6893 uint32_t status
, evtctr
;
6894 uint32_t ha_copy
, hc_copy
;
6896 unsigned long timeout
;
6897 unsigned long drvr_flag
= 0;
6898 uint32_t word0
, ldata
;
6899 void __iomem
*to_slim
;
6900 int processing_queue
= 0;
6902 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
6904 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
6905 /* processing mbox queue from intr_handler */
6906 if (unlikely(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
6907 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
6910 processing_queue
= 1;
6911 pmbox
= lpfc_mbox_get(phba
);
6913 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
6918 if (pmbox
->mbox_cmpl
&& pmbox
->mbox_cmpl
!= lpfc_sli_def_mbox_cmpl
&&
6919 pmbox
->mbox_cmpl
!= lpfc_sli_wake_mbox_wait
) {
6921 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
6922 lpfc_printf_log(phba
, KERN_ERR
,
6923 LOG_MBOX
| LOG_VPORT
,
6924 "1806 Mbox x%x failed. No vport\n",
6925 pmbox
->u
.mb
.mbxCommand
);
6927 goto out_not_finished
;
6931 /* If the PCI channel is in offline state, do not post mbox. */
6932 if (unlikely(pci_channel_offline(phba
->pcidev
))) {
6933 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
6934 goto out_not_finished
;
6937 /* If HBA has a deferred error attention, fail the iocb. */
6938 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
6939 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
6940 goto out_not_finished
;
6946 status
= MBX_SUCCESS
;
6948 if (phba
->link_state
== LPFC_HBA_ERROR
) {
6949 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
6951 /* Mbox command <mbxCommand> cannot issue */
6952 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6953 "(%d):0311 Mailbox command x%x cannot "
6954 "issue Data: x%x x%x\n",
6955 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
6956 pmbox
->u
.mb
.mbxCommand
, psli
->sli_flag
, flag
);
6957 goto out_not_finished
;
6960 if (mbx
->mbxCommand
!= MBX_KILL_BOARD
&& flag
& MBX_NOWAIT
) {
6961 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
) ||
6962 !(hc_copy
& HC_MBINT_ENA
)) {
6963 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
6964 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6965 "(%d):2528 Mailbox command x%x cannot "
6966 "issue Data: x%x x%x\n",
6967 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
6968 pmbox
->u
.mb
.mbxCommand
, psli
->sli_flag
, flag
);
6969 goto out_not_finished
;
6973 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
6974 /* Polling for a mbox command when another one is already active
6975 * is not allowed in SLI. Also, the driver must have established
6976 * SLI2 mode to queue and process multiple mbox commands.
6979 if (flag
& MBX_POLL
) {
6980 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
6982 /* Mbox command <mbxCommand> cannot issue */
6983 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6984 "(%d):2529 Mailbox command x%x "
6985 "cannot issue Data: x%x x%x\n",
6986 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
6987 pmbox
->u
.mb
.mbxCommand
,
6988 psli
->sli_flag
, flag
);
6989 goto out_not_finished
;
6992 if (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
)) {
6993 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
6994 /* Mbox command <mbxCommand> cannot issue */
6995 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6996 "(%d):2530 Mailbox command x%x "
6997 "cannot issue Data: x%x x%x\n",
6998 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
6999 pmbox
->u
.mb
.mbxCommand
,
7000 psli
->sli_flag
, flag
);
7001 goto out_not_finished
;
7004 /* Another mailbox command is still being processed, queue this
7005 * command to be processed later.
7007 lpfc_mbox_put(phba
, pmbox
);
7009 /* Mbox cmd issue - BUSY */
7010 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
7011 "(%d):0308 Mbox cmd issue - BUSY Data: "
7012 "x%x x%x x%x x%x\n",
7013 pmbox
->vport
? pmbox
->vport
->vpi
: 0xffffff,
7014 mbx
->mbxCommand
, phba
->pport
->port_state
,
7015 psli
->sli_flag
, flag
);
7017 psli
->slistat
.mbox_busy
++;
7018 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
7021 lpfc_debugfs_disc_trc(pmbox
->vport
,
7022 LPFC_DISC_TRC_MBOX_VPORT
,
7023 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
7024 (uint32_t)mbx
->mbxCommand
,
7025 mbx
->un
.varWords
[0], mbx
->un
.varWords
[1]);
7028 lpfc_debugfs_disc_trc(phba
->pport
,
7030 "MBOX Bsy: cmd:x%x mb:x%x x%x",
7031 (uint32_t)mbx
->mbxCommand
,
7032 mbx
->un
.varWords
[0], mbx
->un
.varWords
[1]);
7038 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
7040 /* If we are not polling, we MUST be in SLI2 mode */
7041 if (flag
!= MBX_POLL
) {
7042 if (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
) &&
7043 (mbx
->mbxCommand
!= MBX_KILL_BOARD
)) {
7044 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
7045 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
7046 /* Mbox command <mbxCommand> cannot issue */
7047 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7048 "(%d):2531 Mailbox command x%x "
7049 "cannot issue Data: x%x x%x\n",
7050 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
7051 pmbox
->u
.mb
.mbxCommand
,
7052 psli
->sli_flag
, flag
);
7053 goto out_not_finished
;
7055 /* timeout active mbox command */
7056 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, pmbox
) *
7058 mod_timer(&psli
->mbox_tmo
, jiffies
+ timeout
);
7061 /* Mailbox cmd <cmd> issue */
7062 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
7063 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
7065 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
7066 mbx
->mbxCommand
, phba
->pport
->port_state
,
7067 psli
->sli_flag
, flag
);
7069 if (mbx
->mbxCommand
!= MBX_HEARTBEAT
) {
7071 lpfc_debugfs_disc_trc(pmbox
->vport
,
7072 LPFC_DISC_TRC_MBOX_VPORT
,
7073 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7074 (uint32_t)mbx
->mbxCommand
,
7075 mbx
->un
.varWords
[0], mbx
->un
.varWords
[1]);
7078 lpfc_debugfs_disc_trc(phba
->pport
,
7080 "MBOX Send: cmd:x%x mb:x%x x%x",
7081 (uint32_t)mbx
->mbxCommand
,
7082 mbx
->un
.varWords
[0], mbx
->un
.varWords
[1]);
7086 psli
->slistat
.mbox_cmd
++;
7087 evtctr
= psli
->slistat
.mbox_event
;
7089 /* next set own bit for the adapter and copy over command word */
7090 mbx
->mbxOwner
= OWN_CHIP
;
7092 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
7093 /* Populate mbox extension offset word. */
7094 if (pmbox
->in_ext_byte_len
|| pmbox
->out_ext_byte_len
) {
7095 *(((uint32_t *)mbx
) + pmbox
->mbox_offset_word
)
7096 = (uint8_t *)phba
->mbox_ext
7097 - (uint8_t *)phba
->mbox
;
7100 /* Copy the mailbox extension data */
7101 if (pmbox
->in_ext_byte_len
&& pmbox
->context2
) {
7102 lpfc_sli_pcimem_bcopy(pmbox
->context2
,
7103 (uint8_t *)phba
->mbox_ext
,
7104 pmbox
->in_ext_byte_len
);
7106 /* Copy command data to host SLIM area */
7107 lpfc_sli_pcimem_bcopy(mbx
, phba
->mbox
, MAILBOX_CMD_SIZE
);
7109 /* Populate mbox extension offset word. */
7110 if (pmbox
->in_ext_byte_len
|| pmbox
->out_ext_byte_len
)
7111 *(((uint32_t *)mbx
) + pmbox
->mbox_offset_word
)
7112 = MAILBOX_HBA_EXT_OFFSET
;
7114 /* Copy the mailbox extension data */
7115 if (pmbox
->in_ext_byte_len
&& pmbox
->context2
) {
7116 lpfc_memcpy_to_slim(phba
->MBslimaddr
+
7117 MAILBOX_HBA_EXT_OFFSET
,
7118 pmbox
->context2
, pmbox
->in_ext_byte_len
);
7121 if (mbx
->mbxCommand
== MBX_CONFIG_PORT
) {
7122 /* copy command data into host mbox for cmpl */
7123 lpfc_sli_pcimem_bcopy(mbx
, phba
->mbox
, MAILBOX_CMD_SIZE
);
7126 /* First copy mbox command data to HBA SLIM, skip past first
7128 to_slim
= phba
->MBslimaddr
+ sizeof (uint32_t);
7129 lpfc_memcpy_to_slim(to_slim
, &mbx
->un
.varWords
[0],
7130 MAILBOX_CMD_SIZE
- sizeof (uint32_t));
7132 /* Next copy over first word, with mbxOwner set */
7133 ldata
= *((uint32_t *)mbx
);
7134 to_slim
= phba
->MBslimaddr
;
7135 writel(ldata
, to_slim
);
7136 readl(to_slim
); /* flush */
7138 if (mbx
->mbxCommand
== MBX_CONFIG_PORT
) {
7139 /* switch over to host mailbox */
7140 psli
->sli_flag
|= LPFC_SLI_ACTIVE
;
7148 /* Set up reference to mailbox command */
7149 psli
->mbox_active
= pmbox
;
7150 /* Interrupt board to do it */
7151 writel(CA_MBATT
, phba
->CAregaddr
);
7152 readl(phba
->CAregaddr
); /* flush */
7153 /* Don't wait for it to finish, just return */
7157 /* Set up null reference to mailbox command */
7158 psli
->mbox_active
= NULL
;
7159 /* Interrupt board to do it */
7160 writel(CA_MBATT
, phba
->CAregaddr
);
7161 readl(phba
->CAregaddr
); /* flush */
7163 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
7164 /* First read mbox status word */
7165 word0
= *((uint32_t *)phba
->mbox
);
7166 word0
= le32_to_cpu(word0
);
7168 /* First read mbox status word */
7169 if (lpfc_readl(phba
->MBslimaddr
, &word0
)) {
7170 spin_unlock_irqrestore(&phba
->hbalock
,
7172 goto out_not_finished
;
7176 /* Read the HBA Host Attention Register */
7177 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
)) {
7178 spin_unlock_irqrestore(&phba
->hbalock
,
7180 goto out_not_finished
;
7182 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, pmbox
) *
7185 /* Wait for command to complete */
7186 while (((word0
& OWN_CHIP
) == OWN_CHIP
) ||
7187 (!(ha_copy
& HA_MBATT
) &&
7188 (phba
->link_state
> LPFC_WARM_START
))) {
7189 if (time_after(jiffies
, timeout
)) {
7190 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
7191 spin_unlock_irqrestore(&phba
->hbalock
,
7193 goto out_not_finished
;
7196 /* Check if we took a mbox interrupt while we were
7198 if (((word0
& OWN_CHIP
) != OWN_CHIP
)
7199 && (evtctr
!= psli
->slistat
.mbox_event
))
7203 spin_unlock_irqrestore(&phba
->hbalock
,
7206 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
7209 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
7210 /* First copy command data */
7211 word0
= *((uint32_t *)phba
->mbox
);
7212 word0
= le32_to_cpu(word0
);
7213 if (mbx
->mbxCommand
== MBX_CONFIG_PORT
) {
7216 /* Check real SLIM for any errors */
7217 slimword0
= readl(phba
->MBslimaddr
);
7218 slimmb
= (MAILBOX_t
*) & slimword0
;
7219 if (((slimword0
& OWN_CHIP
) != OWN_CHIP
)
7220 && slimmb
->mbxStatus
) {
7227 /* First copy command data */
7228 word0
= readl(phba
->MBslimaddr
);
7230 /* Read the HBA Host Attention Register */
7231 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
)) {
7232 spin_unlock_irqrestore(&phba
->hbalock
,
7234 goto out_not_finished
;
7238 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
7239 /* copy results back to user */
7240 lpfc_sli_pcimem_bcopy(phba
->mbox
, mbx
, MAILBOX_CMD_SIZE
);
7241 /* Copy the mailbox extension data */
7242 if (pmbox
->out_ext_byte_len
&& pmbox
->context2
) {
7243 lpfc_sli_pcimem_bcopy(phba
->mbox_ext
,
7245 pmbox
->out_ext_byte_len
);
7248 /* First copy command data */
7249 lpfc_memcpy_from_slim(mbx
, phba
->MBslimaddr
,
7251 /* Copy the mailbox extension data */
7252 if (pmbox
->out_ext_byte_len
&& pmbox
->context2
) {
7253 lpfc_memcpy_from_slim(pmbox
->context2
,
7255 MAILBOX_HBA_EXT_OFFSET
,
7256 pmbox
->out_ext_byte_len
);
7260 writel(HA_MBATT
, phba
->HAregaddr
);
7261 readl(phba
->HAregaddr
); /* flush */
7263 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
7264 status
= mbx
->mbxStatus
;
7267 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
7271 if (processing_queue
) {
7272 pmbox
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
7273 lpfc_mbox_cmpl_put(phba
, pmbox
);
7275 return MBX_NOT_FINISHED
;
7279 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
7280 * @phba: Pointer to HBA context object.
7282 * The function blocks the posting of SLI4 asynchronous mailbox commands from
7283 * the driver internal pending mailbox queue. It will then try to wait out the
7284 * possible outstanding mailbox command before return.
7287 * 0 - the outstanding mailbox command completed; otherwise, the wait for
7288 * the outstanding mailbox command timed out.
7291 lpfc_sli4_async_mbox_block(struct lpfc_hba
*phba
)
7293 struct lpfc_sli
*psli
= &phba
->sli
;
7295 unsigned long timeout
= 0;
7297 /* Mark the asynchronous mailbox command posting as blocked */
7298 spin_lock_irq(&phba
->hbalock
);
7299 psli
->sli_flag
|= LPFC_SLI_ASYNC_MBX_BLK
;
7300 /* Determine how long we might wait for the active mailbox
7301 * command to be gracefully completed by firmware.
7303 if (phba
->sli
.mbox_active
)
7304 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
,
7305 phba
->sli
.mbox_active
) *
7307 spin_unlock_irq(&phba
->hbalock
);
7309 /* Make sure the mailbox is really active */
7311 lpfc_sli4_process_missed_mbox_completions(phba
);
7313 /* Wait for the outstnading mailbox command to complete */
7314 while (phba
->sli
.mbox_active
) {
7315 /* Check active mailbox complete status every 2ms */
7317 if (time_after(jiffies
, timeout
)) {
7318 /* Timeout, marked the outstanding cmd not complete */
7324 /* Can not cleanly block async mailbox command, fails it */
7326 spin_lock_irq(&phba
->hbalock
);
7327 psli
->sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
7328 spin_unlock_irq(&phba
->hbalock
);
7334 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
7335 * @phba: Pointer to HBA context object.
7337 * The function unblocks and resume posting of SLI4 asynchronous mailbox
7338 * commands from the driver internal pending mailbox queue. It makes sure
7339 * that there is no outstanding mailbox command before resuming posting
7340 * asynchronous mailbox commands. If, for any reason, there is outstanding
7341 * mailbox command, it will try to wait it out before resuming asynchronous
7342 * mailbox command posting.
7345 lpfc_sli4_async_mbox_unblock(struct lpfc_hba
*phba
)
7347 struct lpfc_sli
*psli
= &phba
->sli
;
7349 spin_lock_irq(&phba
->hbalock
);
7350 if (!(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
7351 /* Asynchronous mailbox posting is not blocked, do nothing */
7352 spin_unlock_irq(&phba
->hbalock
);
7356 /* Outstanding synchronous mailbox command is guaranteed to be done,
7357 * successful or timeout, after timing-out the outstanding mailbox
7358 * command shall always be removed, so just unblock posting async
7359 * mailbox command and resume
7361 psli
->sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
7362 spin_unlock_irq(&phba
->hbalock
);
7364 /* wake up worker thread to post asynchronlous mailbox command */
7365 lpfc_worker_wake_up(phba
);
7369 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7370 * @phba: Pointer to HBA context object.
7371 * @mboxq: Pointer to mailbox object.
7373 * The function waits for the bootstrap mailbox register ready bit from
7374 * port for twice the regular mailbox command timeout value.
7376 * 0 - no timeout on waiting for bootstrap mailbox register ready.
7377 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7380 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
7383 unsigned long timeout
;
7384 struct lpfc_register bmbx_reg
;
7386 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, mboxq
)
7390 bmbx_reg
.word0
= readl(phba
->sli4_hba
.BMBXregaddr
);
7391 db_ready
= bf_get(lpfc_bmbx_rdy
, &bmbx_reg
);
7395 if (time_after(jiffies
, timeout
))
7396 return MBXERR_ERROR
;
7397 } while (!db_ready
);
7403 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7404 * @phba: Pointer to HBA context object.
7405 * @mboxq: Pointer to mailbox object.
7407 * The function posts a mailbox to the port. The mailbox is expected
7408 * to be comletely filled in and ready for the port to operate on it.
7409 * This routine executes a synchronous completion operation on the
7410 * mailbox by polling for its completion.
7412 * The caller must not be holding any locks when calling this routine.
7415 * MBX_SUCCESS - mailbox posted successfully
7416 * Any of the MBX error values.
7419 lpfc_sli4_post_sync_mbox(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
7421 int rc
= MBX_SUCCESS
;
7422 unsigned long iflag
;
7423 uint32_t mcqe_status
;
7425 struct lpfc_sli
*psli
= &phba
->sli
;
7426 struct lpfc_mqe
*mb
= &mboxq
->u
.mqe
;
7427 struct lpfc_bmbx_create
*mbox_rgn
;
7428 struct dma_address
*dma_address
;
7431 * Only one mailbox can be active to the bootstrap mailbox region
7432 * at a time and there is no queueing provided.
7434 spin_lock_irqsave(&phba
->hbalock
, iflag
);
7435 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
7436 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
7437 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7438 "(%d):2532 Mailbox command x%x (x%x/x%x) "
7439 "cannot issue Data: x%x x%x\n",
7440 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
7441 mboxq
->u
.mb
.mbxCommand
,
7442 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
7443 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
7444 psli
->sli_flag
, MBX_POLL
);
7445 return MBXERR_ERROR
;
7447 /* The server grabs the token and owns it until release */
7448 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
7449 phba
->sli
.mbox_active
= mboxq
;
7450 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
7452 /* wait for bootstrap mbox register for readyness */
7453 rc
= lpfc_sli4_wait_bmbx_ready(phba
, mboxq
);
7458 * Initialize the bootstrap memory region to avoid stale data areas
7459 * in the mailbox post. Then copy the caller's mailbox contents to
7460 * the bmbx mailbox region.
7462 mbx_cmnd
= bf_get(lpfc_mqe_command
, mb
);
7463 memset(phba
->sli4_hba
.bmbx
.avirt
, 0, sizeof(struct lpfc_bmbx_create
));
7464 lpfc_sli_pcimem_bcopy(mb
, phba
->sli4_hba
.bmbx
.avirt
,
7465 sizeof(struct lpfc_mqe
));
7467 /* Post the high mailbox dma address to the port and wait for ready. */
7468 dma_address
= &phba
->sli4_hba
.bmbx
.dma_address
;
7469 writel(dma_address
->addr_hi
, phba
->sli4_hba
.BMBXregaddr
);
7471 /* wait for bootstrap mbox register for hi-address write done */
7472 rc
= lpfc_sli4_wait_bmbx_ready(phba
, mboxq
);
7476 /* Post the low mailbox dma address to the port. */
7477 writel(dma_address
->addr_lo
, phba
->sli4_hba
.BMBXregaddr
);
7479 /* wait for bootstrap mbox register for low address write done */
7480 rc
= lpfc_sli4_wait_bmbx_ready(phba
, mboxq
);
7485 * Read the CQ to ensure the mailbox has completed.
7486 * If so, update the mailbox status so that the upper layers
7487 * can complete the request normally.
7489 lpfc_sli_pcimem_bcopy(phba
->sli4_hba
.bmbx
.avirt
, mb
,
7490 sizeof(struct lpfc_mqe
));
7491 mbox_rgn
= (struct lpfc_bmbx_create
*) phba
->sli4_hba
.bmbx
.avirt
;
7492 lpfc_sli_pcimem_bcopy(&mbox_rgn
->mcqe
, &mboxq
->mcqe
,
7493 sizeof(struct lpfc_mcqe
));
7494 mcqe_status
= bf_get(lpfc_mcqe_status
, &mbox_rgn
->mcqe
);
7496 * When the CQE status indicates a failure and the mailbox status
7497 * indicates success then copy the CQE status into the mailbox status
7498 * (and prefix it with x4000).
7500 if (mcqe_status
!= MB_CQE_STATUS_SUCCESS
) {
7501 if (bf_get(lpfc_mqe_status
, mb
) == MBX_SUCCESS
)
7502 bf_set(lpfc_mqe_status
, mb
,
7503 (LPFC_MBX_ERROR_RANGE
| mcqe_status
));
7506 lpfc_sli4_swap_str(phba
, mboxq
);
7508 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
7509 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
7510 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
7511 " x%x x%x CQ: x%x x%x x%x x%x\n",
7512 mboxq
->vport
? mboxq
->vport
->vpi
: 0, mbx_cmnd
,
7513 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
7514 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
7515 bf_get(lpfc_mqe_status
, mb
),
7516 mb
->un
.mb_words
[0], mb
->un
.mb_words
[1],
7517 mb
->un
.mb_words
[2], mb
->un
.mb_words
[3],
7518 mb
->un
.mb_words
[4], mb
->un
.mb_words
[5],
7519 mb
->un
.mb_words
[6], mb
->un
.mb_words
[7],
7520 mb
->un
.mb_words
[8], mb
->un
.mb_words
[9],
7521 mb
->un
.mb_words
[10], mb
->un
.mb_words
[11],
7522 mb
->un
.mb_words
[12], mboxq
->mcqe
.word0
,
7523 mboxq
->mcqe
.mcqe_tag0
, mboxq
->mcqe
.mcqe_tag1
,
7524 mboxq
->mcqe
.trailer
);
7526 /* We are holding the token, no needed for lock when release */
7527 spin_lock_irqsave(&phba
->hbalock
, iflag
);
7528 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
7529 phba
->sli
.mbox_active
= NULL
;
7530 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
7535 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
7536 * @phba: Pointer to HBA context object.
7537 * @pmbox: Pointer to mailbox object.
7538 * @flag: Flag indicating how the mailbox need to be processed.
7540 * This function is called by discovery code and HBA management code to submit
7541 * a mailbox command to firmware with SLI-4 interface spec.
7543 * Return codes the caller owns the mailbox command after the return of the
7547 lpfc_sli_issue_mbox_s4(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
7550 struct lpfc_sli
*psli
= &phba
->sli
;
7551 unsigned long iflags
;
7554 /* dump from issue mailbox command if setup */
7555 lpfc_idiag_mbxacc_dump_issue_mbox(phba
, &mboxq
->u
.mb
);
7557 rc
= lpfc_mbox_dev_check(phba
);
7559 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7560 "(%d):2544 Mailbox command x%x (x%x/x%x) "
7561 "cannot issue Data: x%x x%x\n",
7562 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
7563 mboxq
->u
.mb
.mbxCommand
,
7564 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
7565 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
7566 psli
->sli_flag
, flag
);
7567 goto out_not_finished
;
7570 /* Detect polling mode and jump to a handler */
7571 if (!phba
->sli4_hba
.intr_enable
) {
7572 if (flag
== MBX_POLL
)
7573 rc
= lpfc_sli4_post_sync_mbox(phba
, mboxq
);
7576 if (rc
!= MBX_SUCCESS
)
7577 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
7578 "(%d):2541 Mailbox command x%x "
7579 "(x%x/x%x) failure: "
7580 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7582 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
7583 mboxq
->u
.mb
.mbxCommand
,
7584 lpfc_sli_config_mbox_subsys_get(phba
,
7586 lpfc_sli_config_mbox_opcode_get(phba
,
7588 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
),
7589 bf_get(lpfc_mcqe_status
, &mboxq
->mcqe
),
7590 bf_get(lpfc_mcqe_ext_status
,
7592 psli
->sli_flag
, flag
);
7594 } else if (flag
== MBX_POLL
) {
7595 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
7596 "(%d):2542 Try to issue mailbox command "
7597 "x%x (x%x/x%x) synchronously ahead of async"
7598 "mailbox command queue: x%x x%x\n",
7599 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
7600 mboxq
->u
.mb
.mbxCommand
,
7601 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
7602 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
7603 psli
->sli_flag
, flag
);
7604 /* Try to block the asynchronous mailbox posting */
7605 rc
= lpfc_sli4_async_mbox_block(phba
);
7607 /* Successfully blocked, now issue sync mbox cmd */
7608 rc
= lpfc_sli4_post_sync_mbox(phba
, mboxq
);
7609 if (rc
!= MBX_SUCCESS
)
7610 lpfc_printf_log(phba
, KERN_WARNING
,
7612 "(%d):2597 Sync Mailbox command "
7613 "x%x (x%x/x%x) failure: "
7614 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7616 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
7617 mboxq
->u
.mb
.mbxCommand
,
7618 lpfc_sli_config_mbox_subsys_get(phba
,
7620 lpfc_sli_config_mbox_opcode_get(phba
,
7622 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
),
7623 bf_get(lpfc_mcqe_status
, &mboxq
->mcqe
),
7624 bf_get(lpfc_mcqe_ext_status
,
7626 psli
->sli_flag
, flag
);
7627 /* Unblock the async mailbox posting afterward */
7628 lpfc_sli4_async_mbox_unblock(phba
);
7633 /* Now, interrupt mode asynchrous mailbox command */
7634 rc
= lpfc_mbox_cmd_check(phba
, mboxq
);
7636 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7637 "(%d):2543 Mailbox command x%x (x%x/x%x) "
7638 "cannot issue Data: x%x x%x\n",
7639 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
7640 mboxq
->u
.mb
.mbxCommand
,
7641 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
7642 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
7643 psli
->sli_flag
, flag
);
7644 goto out_not_finished
;
7647 /* Put the mailbox command to the driver internal FIFO */
7648 psli
->slistat
.mbox_busy
++;
7649 spin_lock_irqsave(&phba
->hbalock
, iflags
);
7650 lpfc_mbox_put(phba
, mboxq
);
7651 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7652 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
7653 "(%d):0354 Mbox cmd issue - Enqueue Data: "
7654 "x%x (x%x/x%x) x%x x%x x%x\n",
7655 mboxq
->vport
? mboxq
->vport
->vpi
: 0xffffff,
7656 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
7657 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
7658 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
7659 phba
->pport
->port_state
,
7660 psli
->sli_flag
, MBX_NOWAIT
);
7661 /* Wake up worker thread to transport mailbox command from head */
7662 lpfc_worker_wake_up(phba
);
7667 return MBX_NOT_FINISHED
;
7671 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
7672 * @phba: Pointer to HBA context object.
7674 * This function is called by worker thread to send a mailbox command to
7675 * SLI4 HBA firmware.
7679 lpfc_sli4_post_async_mbox(struct lpfc_hba
*phba
)
7681 struct lpfc_sli
*psli
= &phba
->sli
;
7682 LPFC_MBOXQ_t
*mboxq
;
7683 int rc
= MBX_SUCCESS
;
7684 unsigned long iflags
;
7685 struct lpfc_mqe
*mqe
;
7688 /* Check interrupt mode before post async mailbox command */
7689 if (unlikely(!phba
->sli4_hba
.intr_enable
))
7690 return MBX_NOT_FINISHED
;
7692 /* Check for mailbox command service token */
7693 spin_lock_irqsave(&phba
->hbalock
, iflags
);
7694 if (unlikely(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
7695 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7696 return MBX_NOT_FINISHED
;
7698 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
7699 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7700 return MBX_NOT_FINISHED
;
7702 if (unlikely(phba
->sli
.mbox_active
)) {
7703 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7704 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7705 "0384 There is pending active mailbox cmd\n");
7706 return MBX_NOT_FINISHED
;
7708 /* Take the mailbox command service token */
7709 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
7711 /* Get the next mailbox command from head of queue */
7712 mboxq
= lpfc_mbox_get(phba
);
7714 /* If no more mailbox command waiting for post, we're done */
7716 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
7717 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7720 phba
->sli
.mbox_active
= mboxq
;
7721 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7723 /* Check device readiness for posting mailbox command */
7724 rc
= lpfc_mbox_dev_check(phba
);
7726 /* Driver clean routine will clean up pending mailbox */
7727 goto out_not_finished
;
7729 /* Prepare the mbox command to be posted */
7730 mqe
= &mboxq
->u
.mqe
;
7731 mbx_cmnd
= bf_get(lpfc_mqe_command
, mqe
);
7733 /* Start timer for the mbox_tmo and log some mailbox post messages */
7734 mod_timer(&psli
->mbox_tmo
, (jiffies
+
7735 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba
, mboxq
))));
7737 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
7738 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
7740 mboxq
->vport
? mboxq
->vport
->vpi
: 0, mbx_cmnd
,
7741 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
7742 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
7743 phba
->pport
->port_state
, psli
->sli_flag
);
7745 if (mbx_cmnd
!= MBX_HEARTBEAT
) {
7747 lpfc_debugfs_disc_trc(mboxq
->vport
,
7748 LPFC_DISC_TRC_MBOX_VPORT
,
7749 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7750 mbx_cmnd
, mqe
->un
.mb_words
[0],
7751 mqe
->un
.mb_words
[1]);
7753 lpfc_debugfs_disc_trc(phba
->pport
,
7755 "MBOX Send: cmd:x%x mb:x%x x%x",
7756 mbx_cmnd
, mqe
->un
.mb_words
[0],
7757 mqe
->un
.mb_words
[1]);
7760 psli
->slistat
.mbox_cmd
++;
7762 /* Post the mailbox command to the port */
7763 rc
= lpfc_sli4_mq_put(phba
->sli4_hba
.mbx_wq
, mqe
);
7764 if (rc
!= MBX_SUCCESS
) {
7765 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7766 "(%d):2533 Mailbox command x%x (x%x/x%x) "
7767 "cannot issue Data: x%x x%x\n",
7768 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
7769 mboxq
->u
.mb
.mbxCommand
,
7770 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
7771 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
7772 psli
->sli_flag
, MBX_NOWAIT
);
7773 goto out_not_finished
;
7779 spin_lock_irqsave(&phba
->hbalock
, iflags
);
7780 if (phba
->sli
.mbox_active
) {
7781 mboxq
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
7782 __lpfc_mbox_cmpl_put(phba
, mboxq
);
7783 /* Release the token */
7784 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
7785 phba
->sli
.mbox_active
= NULL
;
7787 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7789 return MBX_NOT_FINISHED
;
7793 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
7794 * @phba: Pointer to HBA context object.
7795 * @pmbox: Pointer to mailbox object.
7796 * @flag: Flag indicating how the mailbox need to be processed.
7798 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
7799 * the API jump table function pointer from the lpfc_hba struct.
7801 * Return codes the caller owns the mailbox command after the return of the
7805 lpfc_sli_issue_mbox(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmbox
, uint32_t flag
)
7807 return phba
->lpfc_sli_issue_mbox(phba
, pmbox
, flag
);
7811 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
7812 * @phba: The hba struct for which this call is being executed.
7813 * @dev_grp: The HBA PCI-Device group number.
7815 * This routine sets up the mbox interface API function jump table in @phba
7817 * Returns: 0 - success, -ENODEV - failure.
7820 lpfc_mbox_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
7824 case LPFC_PCI_DEV_LP
:
7825 phba
->lpfc_sli_issue_mbox
= lpfc_sli_issue_mbox_s3
;
7826 phba
->lpfc_sli_handle_slow_ring_event
=
7827 lpfc_sli_handle_slow_ring_event_s3
;
7828 phba
->lpfc_sli_hbq_to_firmware
= lpfc_sli_hbq_to_firmware_s3
;
7829 phba
->lpfc_sli_brdrestart
= lpfc_sli_brdrestart_s3
;
7830 phba
->lpfc_sli_brdready
= lpfc_sli_brdready_s3
;
7832 case LPFC_PCI_DEV_OC
:
7833 phba
->lpfc_sli_issue_mbox
= lpfc_sli_issue_mbox_s4
;
7834 phba
->lpfc_sli_handle_slow_ring_event
=
7835 lpfc_sli_handle_slow_ring_event_s4
;
7836 phba
->lpfc_sli_hbq_to_firmware
= lpfc_sli_hbq_to_firmware_s4
;
7837 phba
->lpfc_sli_brdrestart
= lpfc_sli_brdrestart_s4
;
7838 phba
->lpfc_sli_brdready
= lpfc_sli_brdready_s4
;
7841 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7842 "1420 Invalid HBA PCI-device group: 0x%x\n",
7851 * __lpfc_sli_ringtx_put - Add an iocb to the txq
7852 * @phba: Pointer to HBA context object.
7853 * @pring: Pointer to driver SLI ring object.
7854 * @piocb: Pointer to address of newly added command iocb.
7856 * This function is called with hbalock held to add a command
7857 * iocb to the txq when SLI layer cannot submit the command iocb
7861 __lpfc_sli_ringtx_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
7862 struct lpfc_iocbq
*piocb
)
7864 /* Insert the caller's iocb in the txq tail for later processing. */
7865 list_add_tail(&piocb
->list
, &pring
->txq
);
7869 * lpfc_sli_next_iocb - Get the next iocb in the txq
7870 * @phba: Pointer to HBA context object.
7871 * @pring: Pointer to driver SLI ring object.
7872 * @piocb: Pointer to address of newly added command iocb.
7874 * This function is called with hbalock held before a new
7875 * iocb is submitted to the firmware. This function checks
7876 * txq to flush the iocbs in txq to Firmware before
7877 * submitting new iocbs to the Firmware.
7878 * If there are iocbs in the txq which need to be submitted
7879 * to firmware, lpfc_sli_next_iocb returns the first element
7880 * of the txq after dequeuing it from txq.
7881 * If there is no iocb in the txq then the function will return
7882 * *piocb and *piocb is set to NULL. Caller needs to check
7883 * *piocb to find if there are more commands in the txq.
7885 static struct lpfc_iocbq
*
7886 lpfc_sli_next_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
7887 struct lpfc_iocbq
**piocb
)
7889 struct lpfc_iocbq
* nextiocb
;
7891 nextiocb
= lpfc_sli_ringtx_get(phba
, pring
);
7901 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
7902 * @phba: Pointer to HBA context object.
7903 * @ring_number: SLI ring number to issue iocb on.
7904 * @piocb: Pointer to command iocb.
7905 * @flag: Flag indicating if this command can be put into txq.
7907 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
7908 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
7909 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
7910 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
7911 * this function allows only iocbs for posting buffers. This function finds
7912 * next available slot in the command ring and posts the command to the
7913 * available slot and writes the port attention register to request HBA start
7914 * processing new iocb. If there is no slot available in the ring and
7915 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
7916 * the function returns IOCB_BUSY.
7918 * This function is called with hbalock held. The function will return success
7919 * after it successfully submit the iocb to firmware or after adding to the
7923 __lpfc_sli_issue_iocb_s3(struct lpfc_hba
*phba
, uint32_t ring_number
,
7924 struct lpfc_iocbq
*piocb
, uint32_t flag
)
7926 struct lpfc_iocbq
*nextiocb
;
7928 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[ring_number
];
7930 if (piocb
->iocb_cmpl
&& (!piocb
->vport
) &&
7931 (piocb
->iocb
.ulpCommand
!= CMD_ABORT_XRI_CN
) &&
7932 (piocb
->iocb
.ulpCommand
!= CMD_CLOSE_XRI_CN
)) {
7933 lpfc_printf_log(phba
, KERN_ERR
,
7934 LOG_SLI
| LOG_VPORT
,
7935 "1807 IOCB x%x failed. No vport\n",
7936 piocb
->iocb
.ulpCommand
);
7942 /* If the PCI channel is in offline state, do not post iocbs. */
7943 if (unlikely(pci_channel_offline(phba
->pcidev
)))
7946 /* If HBA has a deferred error attention, fail the iocb. */
7947 if (unlikely(phba
->hba_flag
& DEFER_ERATT
))
7951 * We should never get an IOCB if we are in a < LINK_DOWN state
7953 if (unlikely(phba
->link_state
< LPFC_LINK_DOWN
))
7957 * Check to see if we are blocking IOCB processing because of a
7958 * outstanding event.
7960 if (unlikely(pring
->flag
& LPFC_STOP_IOCB_EVENT
))
7963 if (unlikely(phba
->link_state
== LPFC_LINK_DOWN
)) {
7965 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
7966 * can be issued if the link is not up.
7968 switch (piocb
->iocb
.ulpCommand
) {
7969 case CMD_GEN_REQUEST64_CR
:
7970 case CMD_GEN_REQUEST64_CX
:
7971 if (!(phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
) ||
7972 (piocb
->iocb
.un
.genreq64
.w5
.hcsw
.Rctl
!=
7973 FC_RCTL_DD_UNSOL_CMD
) ||
7974 (piocb
->iocb
.un
.genreq64
.w5
.hcsw
.Type
!=
7975 MENLO_TRANSPORT_TYPE
))
7979 case CMD_QUE_RING_BUF_CN
:
7980 case CMD_QUE_RING_BUF64_CN
:
7982 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
7983 * completion, iocb_cmpl MUST be 0.
7985 if (piocb
->iocb_cmpl
)
7986 piocb
->iocb_cmpl
= NULL
;
7988 case CMD_CREATE_XRI_CR
:
7989 case CMD_CLOSE_XRI_CN
:
7990 case CMD_CLOSE_XRI_CX
:
7997 * For FCP commands, we must be in a state where we can process link
8000 } else if (unlikely(pring
->ringno
== phba
->sli
.fcp_ring
&&
8001 !(phba
->sli
.sli_flag
& LPFC_PROCESS_LA
))) {
8005 while ((iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)) &&
8006 (nextiocb
= lpfc_sli_next_iocb(phba
, pring
, &piocb
)))
8007 lpfc_sli_submit_iocb(phba
, pring
, iocb
, nextiocb
);
8010 lpfc_sli_update_ring(phba
, pring
);
8012 lpfc_sli_update_full_ring(phba
, pring
);
8015 return IOCB_SUCCESS
;
8020 pring
->stats
.iocb_cmd_delay
++;
8024 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
8025 __lpfc_sli_ringtx_put(phba
, pring
, piocb
);
8026 return IOCB_SUCCESS
;
8033 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
8034 * @phba: Pointer to HBA context object.
8035 * @piocb: Pointer to command iocb.
8036 * @sglq: Pointer to the scatter gather queue object.
8038 * This routine converts the bpl or bde that is in the IOCB
8039 * to a sgl list for the sli4 hardware. The physical address
8040 * of the bpl/bde is converted back to a virtual address.
8041 * If the IOCB contains a BPL then the list of BDE's is
8042 * converted to sli4_sge's. If the IOCB contains a single
8043 * BDE then it is converted to a single sli_sge.
8044 * The IOCB is still in cpu endianess so the contents of
8045 * the bpl can be used without byte swapping.
8047 * Returns valid XRI = Success, NO_XRI = Failure.
8050 lpfc_sli4_bpl2sgl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocbq
,
8051 struct lpfc_sglq
*sglq
)
8053 uint16_t xritag
= NO_XRI
;
8054 struct ulp_bde64
*bpl
= NULL
;
8055 struct ulp_bde64 bde
;
8056 struct sli4_sge
*sgl
= NULL
;
8057 struct lpfc_dmabuf
*dmabuf
;
8061 uint32_t offset
= 0; /* accumulated offset in the sg request list */
8062 int inbound
= 0; /* number of sg reply entries inbound from firmware */
8064 if (!piocbq
|| !sglq
)
8067 sgl
= (struct sli4_sge
*)sglq
->sgl
;
8068 icmd
= &piocbq
->iocb
;
8069 if (icmd
->ulpCommand
== CMD_XMIT_BLS_RSP64_CX
)
8070 return sglq
->sli4_xritag
;
8071 if (icmd
->un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BLP_64
) {
8072 numBdes
= icmd
->un
.genreq64
.bdl
.bdeSize
/
8073 sizeof(struct ulp_bde64
);
8074 /* The addrHigh and addrLow fields within the IOCB
8075 * have not been byteswapped yet so there is no
8076 * need to swap them back.
8078 if (piocbq
->context3
)
8079 dmabuf
= (struct lpfc_dmabuf
*)piocbq
->context3
;
8083 bpl
= (struct ulp_bde64
*)dmabuf
->virt
;
8087 for (i
= 0; i
< numBdes
; i
++) {
8088 /* Should already be byte swapped. */
8089 sgl
->addr_hi
= bpl
->addrHigh
;
8090 sgl
->addr_lo
= bpl
->addrLow
;
8092 sgl
->word2
= le32_to_cpu(sgl
->word2
);
8093 if ((i
+1) == numBdes
)
8094 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
8096 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
8097 /* swap the size field back to the cpu so we
8098 * can assign it to the sgl.
8100 bde
.tus
.w
= le32_to_cpu(bpl
->tus
.w
);
8101 sgl
->sge_len
= cpu_to_le32(bde
.tus
.f
.bdeSize
);
8102 /* The offsets in the sgl need to be accumulated
8103 * separately for the request and reply lists.
8104 * The request is always first, the reply follows.
8106 if (piocbq
->iocb
.ulpCommand
== CMD_GEN_REQUEST64_CR
) {
8107 /* add up the reply sg entries */
8108 if (bpl
->tus
.f
.bdeFlags
== BUFF_TYPE_BDE_64I
)
8110 /* first inbound? reset the offset */
8113 bf_set(lpfc_sli4_sge_offset
, sgl
, offset
);
8114 bf_set(lpfc_sli4_sge_type
, sgl
,
8115 LPFC_SGE_TYPE_DATA
);
8116 offset
+= bde
.tus
.f
.bdeSize
;
8118 sgl
->word2
= cpu_to_le32(sgl
->word2
);
8122 } else if (icmd
->un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BDE_64
) {
8123 /* The addrHigh and addrLow fields of the BDE have not
8124 * been byteswapped yet so they need to be swapped
8125 * before putting them in the sgl.
8128 cpu_to_le32(icmd
->un
.genreq64
.bdl
.addrHigh
);
8130 cpu_to_le32(icmd
->un
.genreq64
.bdl
.addrLow
);
8131 sgl
->word2
= le32_to_cpu(sgl
->word2
);
8132 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
8133 sgl
->word2
= cpu_to_le32(sgl
->word2
);
8135 cpu_to_le32(icmd
->un
.genreq64
.bdl
.bdeSize
);
8137 return sglq
->sli4_xritag
;
8141 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
8142 * @phba: Pointer to HBA context object.
8143 * @piocb: Pointer to command iocb.
8144 * @wqe: Pointer to the work queue entry.
8146 * This routine converts the iocb command to its Work Queue Entry
8147 * equivalent. The wqe pointer should not have any fields set when
8148 * this routine is called because it will memcpy over them.
8149 * This routine does not set the CQ_ID or the WQEC bits in the
8152 * Returns: 0 = Success, IOCB_ERROR = Failure.
8155 lpfc_sli4_iocb2wqe(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
,
8156 union lpfc_wqe
*wqe
)
8158 uint32_t xmit_len
= 0, total_len
= 0;
8162 uint8_t command_type
= ELS_COMMAND_NON_FIP
;
8165 uint16_t abrt_iotag
;
8166 struct lpfc_iocbq
*abrtiocbq
;
8167 struct ulp_bde64
*bpl
= NULL
;
8168 uint32_t els_id
= LPFC_ELS_ID_DEFAULT
;
8170 struct ulp_bde64 bde
;
8171 struct lpfc_nodelist
*ndlp
;
8175 fip
= phba
->hba_flag
& HBA_FIP_SUPPORT
;
8176 /* The fcp commands will set command type */
8177 if (iocbq
->iocb_flag
& LPFC_IO_FCP
)
8178 command_type
= FCP_COMMAND
;
8179 else if (fip
&& (iocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
))
8180 command_type
= ELS_COMMAND_FIP
;
8182 command_type
= ELS_COMMAND_NON_FIP
;
8184 /* Some of the fields are in the right position already */
8185 memcpy(wqe
, &iocbq
->iocb
, sizeof(union lpfc_wqe
));
8186 abort_tag
= (uint32_t) iocbq
->iotag
;
8187 xritag
= iocbq
->sli4_xritag
;
8188 wqe
->generic
.wqe_com
.word7
= 0; /* The ct field has moved so reset */
8189 wqe
->generic
.wqe_com
.word10
= 0;
8190 /* words0-2 bpl convert bde */
8191 if (iocbq
->iocb
.un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BLP_64
) {
8192 numBdes
= iocbq
->iocb
.un
.genreq64
.bdl
.bdeSize
/
8193 sizeof(struct ulp_bde64
);
8194 bpl
= (struct ulp_bde64
*)
8195 ((struct lpfc_dmabuf
*)iocbq
->context3
)->virt
;
8199 /* Should already be byte swapped. */
8200 wqe
->generic
.bde
.addrHigh
= le32_to_cpu(bpl
->addrHigh
);
8201 wqe
->generic
.bde
.addrLow
= le32_to_cpu(bpl
->addrLow
);
8202 /* swap the size field back to the cpu so we
8203 * can assign it to the sgl.
8205 wqe
->generic
.bde
.tus
.w
= le32_to_cpu(bpl
->tus
.w
);
8206 xmit_len
= wqe
->generic
.bde
.tus
.f
.bdeSize
;
8208 for (i
= 0; i
< numBdes
; i
++) {
8209 bde
.tus
.w
= le32_to_cpu(bpl
[i
].tus
.w
);
8210 total_len
+= bde
.tus
.f
.bdeSize
;
8213 xmit_len
= iocbq
->iocb
.un
.fcpi64
.bdl
.bdeSize
;
8215 iocbq
->iocb
.ulpIoTag
= iocbq
->iotag
;
8216 cmnd
= iocbq
->iocb
.ulpCommand
;
8218 switch (iocbq
->iocb
.ulpCommand
) {
8219 case CMD_ELS_REQUEST64_CR
:
8220 if (iocbq
->iocb_flag
& LPFC_IO_LIBDFC
)
8221 ndlp
= iocbq
->context_un
.ndlp
;
8223 ndlp
= (struct lpfc_nodelist
*)iocbq
->context1
;
8224 if (!iocbq
->iocb
.ulpLe
) {
8225 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
8226 "2007 Only Limited Edition cmd Format"
8227 " supported 0x%x\n",
8228 iocbq
->iocb
.ulpCommand
);
8232 wqe
->els_req
.payload_len
= xmit_len
;
8233 /* Els_reguest64 has a TMO */
8234 bf_set(wqe_tmo
, &wqe
->els_req
.wqe_com
,
8235 iocbq
->iocb
.ulpTimeout
);
8236 /* Need a VF for word 4 set the vf bit*/
8237 bf_set(els_req64_vf
, &wqe
->els_req
, 0);
8238 /* And a VFID for word 12 */
8239 bf_set(els_req64_vfid
, &wqe
->els_req
, 0);
8240 ct
= ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
);
8241 bf_set(wqe_ctxt_tag
, &wqe
->els_req
.wqe_com
,
8242 iocbq
->iocb
.ulpContext
);
8243 bf_set(wqe_ct
, &wqe
->els_req
.wqe_com
, ct
);
8244 bf_set(wqe_pu
, &wqe
->els_req
.wqe_com
, 0);
8245 /* CCP CCPE PV PRI in word10 were set in the memcpy */
8246 if (command_type
== ELS_COMMAND_FIP
)
8247 els_id
= ((iocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
)
8248 >> LPFC_FIP_ELS_ID_SHIFT
);
8249 pcmd
= (uint32_t *) (((struct lpfc_dmabuf
*)
8250 iocbq
->context2
)->virt
);
8251 if_type
= bf_get(lpfc_sli_intf_if_type
,
8252 &phba
->sli4_hba
.sli_intf
);
8253 if (if_type
== LPFC_SLI_INTF_IF_TYPE_2
) {
8254 if (pcmd
&& (*pcmd
== ELS_CMD_FLOGI
||
8255 *pcmd
== ELS_CMD_SCR
||
8256 *pcmd
== ELS_CMD_FDISC
||
8257 *pcmd
== ELS_CMD_LOGO
||
8258 *pcmd
== ELS_CMD_PLOGI
)) {
8259 bf_set(els_req64_sp
, &wqe
->els_req
, 1);
8260 bf_set(els_req64_sid
, &wqe
->els_req
,
8261 iocbq
->vport
->fc_myDID
);
8262 if ((*pcmd
== ELS_CMD_FLOGI
) &&
8263 !(phba
->fc_topology
==
8264 LPFC_TOPOLOGY_LOOP
))
8265 bf_set(els_req64_sid
, &wqe
->els_req
, 0);
8266 bf_set(wqe_ct
, &wqe
->els_req
.wqe_com
, 1);
8267 bf_set(wqe_ctxt_tag
, &wqe
->els_req
.wqe_com
,
8268 phba
->vpi_ids
[iocbq
->vport
->vpi
]);
8269 } else if (pcmd
&& iocbq
->context1
) {
8270 bf_set(wqe_ct
, &wqe
->els_req
.wqe_com
, 0);
8271 bf_set(wqe_ctxt_tag
, &wqe
->els_req
.wqe_com
,
8272 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
8275 bf_set(wqe_temp_rpi
, &wqe
->els_req
.wqe_com
,
8276 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
8277 bf_set(wqe_els_id
, &wqe
->els_req
.wqe_com
, els_id
);
8278 bf_set(wqe_dbde
, &wqe
->els_req
.wqe_com
, 1);
8279 bf_set(wqe_iod
, &wqe
->els_req
.wqe_com
, LPFC_WQE_IOD_READ
);
8280 bf_set(wqe_qosd
, &wqe
->els_req
.wqe_com
, 1);
8281 bf_set(wqe_lenloc
, &wqe
->els_req
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
8282 bf_set(wqe_ebde_cnt
, &wqe
->els_req
.wqe_com
, 0);
8283 wqe
->els_req
.max_response_payload_len
= total_len
- xmit_len
;
8285 case CMD_XMIT_SEQUENCE64_CX
:
8286 bf_set(wqe_ctxt_tag
, &wqe
->xmit_sequence
.wqe_com
,
8287 iocbq
->iocb
.un
.ulpWord
[3]);
8288 bf_set(wqe_rcvoxid
, &wqe
->xmit_sequence
.wqe_com
,
8289 iocbq
->iocb
.unsli3
.rcvsli3
.ox_id
);
8290 /* The entire sequence is transmitted for this IOCB */
8291 xmit_len
= total_len
;
8292 cmnd
= CMD_XMIT_SEQUENCE64_CR
;
8293 if (phba
->link_flag
& LS_LOOPBACK_MODE
)
8294 bf_set(wqe_xo
, &wqe
->xmit_sequence
.wge_ctl
, 1);
8295 case CMD_XMIT_SEQUENCE64_CR
:
8296 /* word3 iocb=io_tag32 wqe=reserved */
8297 wqe
->xmit_sequence
.rsvd3
= 0;
8298 /* word4 relative_offset memcpy */
8299 /* word5 r_ctl/df_ctl memcpy */
8300 bf_set(wqe_pu
, &wqe
->xmit_sequence
.wqe_com
, 0);
8301 bf_set(wqe_dbde
, &wqe
->xmit_sequence
.wqe_com
, 1);
8302 bf_set(wqe_iod
, &wqe
->xmit_sequence
.wqe_com
,
8303 LPFC_WQE_IOD_WRITE
);
8304 bf_set(wqe_lenloc
, &wqe
->xmit_sequence
.wqe_com
,
8305 LPFC_WQE_LENLOC_WORD12
);
8306 bf_set(wqe_ebde_cnt
, &wqe
->xmit_sequence
.wqe_com
, 0);
8307 wqe
->xmit_sequence
.xmit_len
= xmit_len
;
8308 command_type
= OTHER_COMMAND
;
8310 case CMD_XMIT_BCAST64_CN
:
8311 /* word3 iocb=iotag32 wqe=seq_payload_len */
8312 wqe
->xmit_bcast64
.seq_payload_len
= xmit_len
;
8313 /* word4 iocb=rsvd wqe=rsvd */
8314 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
8315 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
8316 bf_set(wqe_ct
, &wqe
->xmit_bcast64
.wqe_com
,
8317 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
8318 bf_set(wqe_dbde
, &wqe
->xmit_bcast64
.wqe_com
, 1);
8319 bf_set(wqe_iod
, &wqe
->xmit_bcast64
.wqe_com
, LPFC_WQE_IOD_WRITE
);
8320 bf_set(wqe_lenloc
, &wqe
->xmit_bcast64
.wqe_com
,
8321 LPFC_WQE_LENLOC_WORD3
);
8322 bf_set(wqe_ebde_cnt
, &wqe
->xmit_bcast64
.wqe_com
, 0);
8324 case CMD_FCP_IWRITE64_CR
:
8325 command_type
= FCP_COMMAND_DATA_OUT
;
8326 /* word3 iocb=iotag wqe=payload_offset_len */
8327 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8328 bf_set(payload_offset_len
, &wqe
->fcp_iwrite
,
8329 xmit_len
+ sizeof(struct fcp_rsp
));
8330 bf_set(cmd_buff_len
, &wqe
->fcp_iwrite
,
8332 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8333 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8334 bf_set(wqe_erp
, &wqe
->fcp_iwrite
.wqe_com
,
8335 iocbq
->iocb
.ulpFCP2Rcvy
);
8336 bf_set(wqe_lnk
, &wqe
->fcp_iwrite
.wqe_com
, iocbq
->iocb
.ulpXS
);
8337 /* Always open the exchange */
8338 bf_set(wqe_xc
, &wqe
->fcp_iwrite
.wqe_com
, 0);
8339 bf_set(wqe_iod
, &wqe
->fcp_iwrite
.wqe_com
, LPFC_WQE_IOD_WRITE
);
8340 bf_set(wqe_lenloc
, &wqe
->fcp_iwrite
.wqe_com
,
8341 LPFC_WQE_LENLOC_WORD4
);
8342 bf_set(wqe_ebde_cnt
, &wqe
->fcp_iwrite
.wqe_com
, 0);
8343 bf_set(wqe_pu
, &wqe
->fcp_iwrite
.wqe_com
, iocbq
->iocb
.ulpPU
);
8344 bf_set(wqe_dbde
, &wqe
->fcp_iwrite
.wqe_com
, 1);
8345 if (iocbq
->iocb_flag
& LPFC_IO_OAS
) {
8346 bf_set(wqe_oas
, &wqe
->fcp_iwrite
.wqe_com
, 1);
8347 if (phba
->cfg_XLanePriority
) {
8348 bf_set(wqe_ccpe
, &wqe
->fcp_iwrite
.wqe_com
, 1);
8349 bf_set(wqe_ccp
, &wqe
->fcp_iwrite
.wqe_com
,
8350 (phba
->cfg_XLanePriority
<< 1));
8354 case CMD_FCP_IREAD64_CR
:
8355 /* word3 iocb=iotag wqe=payload_offset_len */
8356 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8357 bf_set(payload_offset_len
, &wqe
->fcp_iread
,
8358 xmit_len
+ sizeof(struct fcp_rsp
));
8359 bf_set(cmd_buff_len
, &wqe
->fcp_iread
,
8361 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8362 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8363 bf_set(wqe_erp
, &wqe
->fcp_iread
.wqe_com
,
8364 iocbq
->iocb
.ulpFCP2Rcvy
);
8365 bf_set(wqe_lnk
, &wqe
->fcp_iread
.wqe_com
, iocbq
->iocb
.ulpXS
);
8366 /* Always open the exchange */
8367 bf_set(wqe_xc
, &wqe
->fcp_iread
.wqe_com
, 0);
8368 bf_set(wqe_iod
, &wqe
->fcp_iread
.wqe_com
, LPFC_WQE_IOD_READ
);
8369 bf_set(wqe_lenloc
, &wqe
->fcp_iread
.wqe_com
,
8370 LPFC_WQE_LENLOC_WORD4
);
8371 bf_set(wqe_ebde_cnt
, &wqe
->fcp_iread
.wqe_com
, 0);
8372 bf_set(wqe_pu
, &wqe
->fcp_iread
.wqe_com
, iocbq
->iocb
.ulpPU
);
8373 bf_set(wqe_dbde
, &wqe
->fcp_iread
.wqe_com
, 1);
8374 if (iocbq
->iocb_flag
& LPFC_IO_OAS
) {
8375 bf_set(wqe_oas
, &wqe
->fcp_iread
.wqe_com
, 1);
8376 if (phba
->cfg_XLanePriority
) {
8377 bf_set(wqe_ccpe
, &wqe
->fcp_iread
.wqe_com
, 1);
8378 bf_set(wqe_ccp
, &wqe
->fcp_iread
.wqe_com
,
8379 (phba
->cfg_XLanePriority
<< 1));
8383 case CMD_FCP_ICMND64_CR
:
8384 /* word3 iocb=iotag wqe=payload_offset_len */
8385 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8386 bf_set(payload_offset_len
, &wqe
->fcp_icmd
,
8387 xmit_len
+ sizeof(struct fcp_rsp
));
8388 bf_set(cmd_buff_len
, &wqe
->fcp_icmd
,
8390 /* word3 iocb=IO_TAG wqe=reserved */
8391 bf_set(wqe_pu
, &wqe
->fcp_icmd
.wqe_com
, 0);
8392 /* Always open the exchange */
8393 bf_set(wqe_xc
, &wqe
->fcp_icmd
.wqe_com
, 0);
8394 bf_set(wqe_dbde
, &wqe
->fcp_icmd
.wqe_com
, 1);
8395 bf_set(wqe_iod
, &wqe
->fcp_icmd
.wqe_com
, LPFC_WQE_IOD_WRITE
);
8396 bf_set(wqe_qosd
, &wqe
->fcp_icmd
.wqe_com
, 1);
8397 bf_set(wqe_lenloc
, &wqe
->fcp_icmd
.wqe_com
,
8398 LPFC_WQE_LENLOC_NONE
);
8399 bf_set(wqe_ebde_cnt
, &wqe
->fcp_icmd
.wqe_com
, 0);
8400 bf_set(wqe_erp
, &wqe
->fcp_icmd
.wqe_com
,
8401 iocbq
->iocb
.ulpFCP2Rcvy
);
8402 if (iocbq
->iocb_flag
& LPFC_IO_OAS
) {
8403 bf_set(wqe_oas
, &wqe
->fcp_icmd
.wqe_com
, 1);
8404 if (phba
->cfg_XLanePriority
) {
8405 bf_set(wqe_ccpe
, &wqe
->fcp_icmd
.wqe_com
, 1);
8406 bf_set(wqe_ccp
, &wqe
->fcp_icmd
.wqe_com
,
8407 (phba
->cfg_XLanePriority
<< 1));
8411 case CMD_GEN_REQUEST64_CR
:
8412 /* For this command calculate the xmit length of the
8416 numBdes
= iocbq
->iocb
.un
.genreq64
.bdl
.bdeSize
/
8417 sizeof(struct ulp_bde64
);
8418 for (i
= 0; i
< numBdes
; i
++) {
8419 bde
.tus
.w
= le32_to_cpu(bpl
[i
].tus
.w
);
8420 if (bde
.tus
.f
.bdeFlags
!= BUFF_TYPE_BDE_64
)
8422 xmit_len
+= bde
.tus
.f
.bdeSize
;
8424 /* word3 iocb=IO_TAG wqe=request_payload_len */
8425 wqe
->gen_req
.request_payload_len
= xmit_len
;
8426 /* word4 iocb=parameter wqe=relative_offset memcpy */
8427 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
8428 /* word6 context tag copied in memcpy */
8429 if (iocbq
->iocb
.ulpCt_h
|| iocbq
->iocb
.ulpCt_l
) {
8430 ct
= ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
);
8431 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
8432 "2015 Invalid CT %x command 0x%x\n",
8433 ct
, iocbq
->iocb
.ulpCommand
);
8436 bf_set(wqe_ct
, &wqe
->gen_req
.wqe_com
, 0);
8437 bf_set(wqe_tmo
, &wqe
->gen_req
.wqe_com
, iocbq
->iocb
.ulpTimeout
);
8438 bf_set(wqe_pu
, &wqe
->gen_req
.wqe_com
, iocbq
->iocb
.ulpPU
);
8439 bf_set(wqe_dbde
, &wqe
->gen_req
.wqe_com
, 1);
8440 bf_set(wqe_iod
, &wqe
->gen_req
.wqe_com
, LPFC_WQE_IOD_READ
);
8441 bf_set(wqe_qosd
, &wqe
->gen_req
.wqe_com
, 1);
8442 bf_set(wqe_lenloc
, &wqe
->gen_req
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
8443 bf_set(wqe_ebde_cnt
, &wqe
->gen_req
.wqe_com
, 0);
8444 wqe
->gen_req
.max_response_payload_len
= total_len
- xmit_len
;
8445 command_type
= OTHER_COMMAND
;
8447 case CMD_XMIT_ELS_RSP64_CX
:
8448 ndlp
= (struct lpfc_nodelist
*)iocbq
->context1
;
8449 /* words0-2 BDE memcpy */
8450 /* word3 iocb=iotag32 wqe=response_payload_len */
8451 wqe
->xmit_els_rsp
.response_payload_len
= xmit_len
;
8453 wqe
->xmit_els_rsp
.word4
= 0;
8454 /* word5 iocb=rsvd wge=did */
8455 bf_set(wqe_els_did
, &wqe
->xmit_els_rsp
.wqe_dest
,
8456 iocbq
->iocb
.un
.xseq64
.xmit_els_remoteID
);
8458 if_type
= bf_get(lpfc_sli_intf_if_type
,
8459 &phba
->sli4_hba
.sli_intf
);
8460 if (if_type
== LPFC_SLI_INTF_IF_TYPE_2
) {
8461 if (iocbq
->vport
->fc_flag
& FC_PT2PT
) {
8462 bf_set(els_rsp64_sp
, &wqe
->xmit_els_rsp
, 1);
8463 bf_set(els_rsp64_sid
, &wqe
->xmit_els_rsp
,
8464 iocbq
->vport
->fc_myDID
);
8465 if (iocbq
->vport
->fc_myDID
== Fabric_DID
) {
8467 &wqe
->xmit_els_rsp
.wqe_dest
, 0);
8471 bf_set(wqe_ct
, &wqe
->xmit_els_rsp
.wqe_com
,
8472 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
8473 bf_set(wqe_pu
, &wqe
->xmit_els_rsp
.wqe_com
, iocbq
->iocb
.ulpPU
);
8474 bf_set(wqe_rcvoxid
, &wqe
->xmit_els_rsp
.wqe_com
,
8475 iocbq
->iocb
.unsli3
.rcvsli3
.ox_id
);
8476 if (!iocbq
->iocb
.ulpCt_h
&& iocbq
->iocb
.ulpCt_l
)
8477 bf_set(wqe_ctxt_tag
, &wqe
->xmit_els_rsp
.wqe_com
,
8478 phba
->vpi_ids
[iocbq
->vport
->vpi
]);
8479 bf_set(wqe_dbde
, &wqe
->xmit_els_rsp
.wqe_com
, 1);
8480 bf_set(wqe_iod
, &wqe
->xmit_els_rsp
.wqe_com
, LPFC_WQE_IOD_WRITE
);
8481 bf_set(wqe_qosd
, &wqe
->xmit_els_rsp
.wqe_com
, 1);
8482 bf_set(wqe_lenloc
, &wqe
->xmit_els_rsp
.wqe_com
,
8483 LPFC_WQE_LENLOC_WORD3
);
8484 bf_set(wqe_ebde_cnt
, &wqe
->xmit_els_rsp
.wqe_com
, 0);
8485 bf_set(wqe_rsp_temp_rpi
, &wqe
->xmit_els_rsp
,
8486 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
8487 pcmd
= (uint32_t *) (((struct lpfc_dmabuf
*)
8488 iocbq
->context2
)->virt
);
8489 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
8490 bf_set(els_rsp64_sp
, &wqe
->xmit_els_rsp
, 1);
8491 bf_set(els_rsp64_sid
, &wqe
->xmit_els_rsp
,
8492 iocbq
->vport
->fc_myDID
);
8493 bf_set(wqe_ct
, &wqe
->xmit_els_rsp
.wqe_com
, 1);
8494 bf_set(wqe_ctxt_tag
, &wqe
->xmit_els_rsp
.wqe_com
,
8495 phba
->vpi_ids
[phba
->pport
->vpi
]);
8497 command_type
= OTHER_COMMAND
;
8499 case CMD_CLOSE_XRI_CN
:
8500 case CMD_ABORT_XRI_CN
:
8501 case CMD_ABORT_XRI_CX
:
8502 /* words 0-2 memcpy should be 0 rserved */
8503 /* port will send abts */
8504 abrt_iotag
= iocbq
->iocb
.un
.acxri
.abortContextTag
;
8505 if (abrt_iotag
!= 0 && abrt_iotag
<= phba
->sli
.last_iotag
) {
8506 abrtiocbq
= phba
->sli
.iocbq_lookup
[abrt_iotag
];
8507 fip
= abrtiocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
;
8511 if ((iocbq
->iocb
.ulpCommand
== CMD_CLOSE_XRI_CN
) || fip
)
8513 * The link is down, or the command was ELS_FIP
8514 * so the fw does not need to send abts
8517 bf_set(abort_cmd_ia
, &wqe
->abort_cmd
, 1);
8519 bf_set(abort_cmd_ia
, &wqe
->abort_cmd
, 0);
8520 bf_set(abort_cmd_criteria
, &wqe
->abort_cmd
, T_XRI_TAG
);
8521 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
8522 wqe
->abort_cmd
.rsrvd5
= 0;
8523 bf_set(wqe_ct
, &wqe
->abort_cmd
.wqe_com
,
8524 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
8525 abort_tag
= iocbq
->iocb
.un
.acxri
.abortIoTag
;
8527 * The abort handler will send us CMD_ABORT_XRI_CN or
8528 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
8530 bf_set(wqe_cmnd
, &wqe
->abort_cmd
.wqe_com
, CMD_ABORT_XRI_CX
);
8531 bf_set(wqe_qosd
, &wqe
->abort_cmd
.wqe_com
, 1);
8532 bf_set(wqe_lenloc
, &wqe
->abort_cmd
.wqe_com
,
8533 LPFC_WQE_LENLOC_NONE
);
8534 cmnd
= CMD_ABORT_XRI_CX
;
8535 command_type
= OTHER_COMMAND
;
8538 case CMD_XMIT_BLS_RSP64_CX
:
8539 ndlp
= (struct lpfc_nodelist
*)iocbq
->context1
;
8540 /* As BLS ABTS RSP WQE is very different from other WQEs,
8541 * we re-construct this WQE here based on information in
8542 * iocbq from scratch.
8544 memset(wqe
, 0, sizeof(union lpfc_wqe
));
8545 /* OX_ID is invariable to who sent ABTS to CT exchange */
8546 bf_set(xmit_bls_rsp64_oxid
, &wqe
->xmit_bls_rsp
,
8547 bf_get(lpfc_abts_oxid
, &iocbq
->iocb
.un
.bls_rsp
));
8548 if (bf_get(lpfc_abts_orig
, &iocbq
->iocb
.un
.bls_rsp
) ==
8549 LPFC_ABTS_UNSOL_INT
) {
8550 /* ABTS sent by initiator to CT exchange, the
8551 * RX_ID field will be filled with the newly
8552 * allocated responder XRI.
8554 bf_set(xmit_bls_rsp64_rxid
, &wqe
->xmit_bls_rsp
,
8555 iocbq
->sli4_xritag
);
8557 /* ABTS sent by responder to CT exchange, the
8558 * RX_ID field will be filled with the responder
8561 bf_set(xmit_bls_rsp64_rxid
, &wqe
->xmit_bls_rsp
,
8562 bf_get(lpfc_abts_rxid
, &iocbq
->iocb
.un
.bls_rsp
));
8564 bf_set(xmit_bls_rsp64_seqcnthi
, &wqe
->xmit_bls_rsp
, 0xffff);
8565 bf_set(wqe_xmit_bls_pt
, &wqe
->xmit_bls_rsp
.wqe_dest
, 0x1);
8568 bf_set(wqe_els_did
, &wqe
->xmit_bls_rsp
.wqe_dest
,
8570 bf_set(xmit_bls_rsp64_temprpi
, &wqe
->xmit_bls_rsp
,
8571 iocbq
->iocb
.ulpContext
);
8572 bf_set(wqe_ct
, &wqe
->xmit_bls_rsp
.wqe_com
, 1);
8573 bf_set(wqe_ctxt_tag
, &wqe
->xmit_bls_rsp
.wqe_com
,
8574 phba
->vpi_ids
[phba
->pport
->vpi
]);
8575 bf_set(wqe_qosd
, &wqe
->xmit_bls_rsp
.wqe_com
, 1);
8576 bf_set(wqe_lenloc
, &wqe
->xmit_bls_rsp
.wqe_com
,
8577 LPFC_WQE_LENLOC_NONE
);
8578 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
8579 command_type
= OTHER_COMMAND
;
8580 if (iocbq
->iocb
.un
.xseq64
.w5
.hcsw
.Rctl
== FC_RCTL_BA_RJT
) {
8581 bf_set(xmit_bls_rsp64_rjt_vspec
, &wqe
->xmit_bls_rsp
,
8582 bf_get(lpfc_vndr_code
, &iocbq
->iocb
.un
.bls_rsp
));
8583 bf_set(xmit_bls_rsp64_rjt_expc
, &wqe
->xmit_bls_rsp
,
8584 bf_get(lpfc_rsn_expln
, &iocbq
->iocb
.un
.bls_rsp
));
8585 bf_set(xmit_bls_rsp64_rjt_rsnc
, &wqe
->xmit_bls_rsp
,
8586 bf_get(lpfc_rsn_code
, &iocbq
->iocb
.un
.bls_rsp
));
8590 case CMD_XRI_ABORTED_CX
:
8591 case CMD_CREATE_XRI_CR
: /* Do we expect to use this? */
8592 case CMD_IOCB_FCP_IBIDIR64_CR
: /* bidirectional xfer */
8593 case CMD_FCP_TSEND64_CX
: /* Target mode send xfer-ready */
8594 case CMD_FCP_TRSP64_CX
: /* Target mode rcv */
8595 case CMD_FCP_AUTO_TRSP_CX
: /* Auto target rsp */
8597 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
8598 "2014 Invalid command 0x%x\n",
8599 iocbq
->iocb
.ulpCommand
);
8604 if (iocbq
->iocb_flag
& LPFC_IO_DIF_PASS
)
8605 bf_set(wqe_dif
, &wqe
->generic
.wqe_com
, LPFC_WQE_DIF_PASSTHRU
);
8606 else if (iocbq
->iocb_flag
& LPFC_IO_DIF_STRIP
)
8607 bf_set(wqe_dif
, &wqe
->generic
.wqe_com
, LPFC_WQE_DIF_STRIP
);
8608 else if (iocbq
->iocb_flag
& LPFC_IO_DIF_INSERT
)
8609 bf_set(wqe_dif
, &wqe
->generic
.wqe_com
, LPFC_WQE_DIF_INSERT
);
8610 iocbq
->iocb_flag
&= ~(LPFC_IO_DIF_PASS
| LPFC_IO_DIF_STRIP
|
8611 LPFC_IO_DIF_INSERT
);
8612 bf_set(wqe_xri_tag
, &wqe
->generic
.wqe_com
, xritag
);
8613 bf_set(wqe_reqtag
, &wqe
->generic
.wqe_com
, iocbq
->iotag
);
8614 wqe
->generic
.wqe_com
.abort_tag
= abort_tag
;
8615 bf_set(wqe_cmd_type
, &wqe
->generic
.wqe_com
, command_type
);
8616 bf_set(wqe_cmnd
, &wqe
->generic
.wqe_com
, cmnd
);
8617 bf_set(wqe_class
, &wqe
->generic
.wqe_com
, iocbq
->iocb
.ulpClass
);
8618 bf_set(wqe_cqid
, &wqe
->generic
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
8623 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
8624 * @phba: Pointer to HBA context object.
8625 * @ring_number: SLI ring number to issue iocb on.
8626 * @piocb: Pointer to command iocb.
8627 * @flag: Flag indicating if this command can be put into txq.
8629 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
8630 * an iocb command to an HBA with SLI-4 interface spec.
8632 * This function is called with hbalock held. The function will return success
8633 * after it successfully submit the iocb to firmware or after adding to the
8637 __lpfc_sli_issue_iocb_s4(struct lpfc_hba
*phba
, uint32_t ring_number
,
8638 struct lpfc_iocbq
*piocb
, uint32_t flag
)
8640 struct lpfc_sglq
*sglq
;
8642 struct lpfc_queue
*wq
;
8643 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[ring_number
];
8645 if (piocb
->sli4_xritag
== NO_XRI
) {
8646 if (piocb
->iocb
.ulpCommand
== CMD_ABORT_XRI_CN
||
8647 piocb
->iocb
.ulpCommand
== CMD_CLOSE_XRI_CN
)
8650 if (!list_empty(&pring
->txq
)) {
8651 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
8652 __lpfc_sli_ringtx_put(phba
,
8654 return IOCB_SUCCESS
;
8659 sglq
= __lpfc_sli_get_sglq(phba
, piocb
);
8661 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
8662 __lpfc_sli_ringtx_put(phba
,
8665 return IOCB_SUCCESS
;
8671 } else if (piocb
->iocb_flag
& LPFC_IO_FCP
) {
8672 /* These IO's already have an XRI and a mapped sgl. */
8676 * This is a continuation of a commandi,(CX) so this
8677 * sglq is on the active list
8679 sglq
= __lpfc_get_active_sglq(phba
, piocb
->sli4_lxritag
);
8685 piocb
->sli4_lxritag
= sglq
->sli4_lxritag
;
8686 piocb
->sli4_xritag
= sglq
->sli4_xritag
;
8687 if (NO_XRI
== lpfc_sli4_bpl2sgl(phba
, piocb
, sglq
))
8691 if (lpfc_sli4_iocb2wqe(phba
, piocb
, &wqe
))
8694 if ((piocb
->iocb_flag
& LPFC_IO_FCP
) ||
8695 (piocb
->iocb_flag
& LPFC_USE_FCPWQIDX
)) {
8696 if (!phba
->cfg_fof
|| (!(piocb
->iocb_flag
& LPFC_IO_OAS
))) {
8697 wq
= phba
->sli4_hba
.fcp_wq
[piocb
->fcp_wqidx
];
8699 wq
= phba
->sli4_hba
.oas_wq
;
8701 if (lpfc_sli4_wq_put(wq
, &wqe
))
8704 if (unlikely(!phba
->sli4_hba
.els_wq
))
8706 if (lpfc_sli4_wq_put(phba
->sli4_hba
.els_wq
, &wqe
))
8709 lpfc_sli_ringtxcmpl_put(phba
, pring
, piocb
);
8715 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
8717 * This routine wraps the actual lockless version for issusing IOCB function
8718 * pointer from the lpfc_hba struct.
8721 * IOCB_ERROR - Error
8722 * IOCB_SUCCESS - Success
8726 __lpfc_sli_issue_iocb(struct lpfc_hba
*phba
, uint32_t ring_number
,
8727 struct lpfc_iocbq
*piocb
, uint32_t flag
)
8729 return phba
->__lpfc_sli_issue_iocb(phba
, ring_number
, piocb
, flag
);
8733 * lpfc_sli_api_table_setup - Set up sli api function jump table
8734 * @phba: The hba struct for which this call is being executed.
8735 * @dev_grp: The HBA PCI-Device group number.
8737 * This routine sets up the SLI interface API function jump table in @phba
8739 * Returns: 0 - success, -ENODEV - failure.
8742 lpfc_sli_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
8746 case LPFC_PCI_DEV_LP
:
8747 phba
->__lpfc_sli_issue_iocb
= __lpfc_sli_issue_iocb_s3
;
8748 phba
->__lpfc_sli_release_iocbq
= __lpfc_sli_release_iocbq_s3
;
8750 case LPFC_PCI_DEV_OC
:
8751 phba
->__lpfc_sli_issue_iocb
= __lpfc_sli_issue_iocb_s4
;
8752 phba
->__lpfc_sli_release_iocbq
= __lpfc_sli_release_iocbq_s4
;
8755 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8756 "1419 Invalid HBA PCI-device group: 0x%x\n",
8761 phba
->lpfc_get_iocb_from_iocbq
= lpfc_get_iocb_from_iocbq
;
8766 * lpfc_sli_calc_ring - Calculates which ring to use
8767 * @phba: Pointer to HBA context object.
8768 * @ring_number: Initial ring
8769 * @piocb: Pointer to command iocb.
8771 * For SLI4, FCP IO can deferred to one fo many WQs, based on
8772 * fcp_wqidx, thus we need to calculate the corresponding ring.
8773 * Since ABORTS must go on the same WQ of the command they are
8774 * aborting, we use command's fcp_wqidx.
8777 lpfc_sli_calc_ring(struct lpfc_hba
*phba
, uint32_t ring_number
,
8778 struct lpfc_iocbq
*piocb
)
8780 if (phba
->sli_rev
< LPFC_SLI_REV4
)
8783 if (piocb
->iocb_flag
& (LPFC_IO_FCP
| LPFC_USE_FCPWQIDX
)) {
8784 if (!(phba
->cfg_fof
) ||
8785 (!(piocb
->iocb_flag
& LPFC_IO_FOF
))) {
8786 if (unlikely(!phba
->sli4_hba
.fcp_wq
))
8787 return LPFC_HBA_ERROR
;
8789 * for abort iocb fcp_wqidx should already
8790 * be setup based on what work queue we used.
8792 if (!(piocb
->iocb_flag
& LPFC_USE_FCPWQIDX
))
8794 lpfc_sli4_scmd_to_wqidx_distr(phba
,
8796 ring_number
= MAX_SLI3_CONFIGURED_RINGS
+
8799 if (unlikely(!phba
->sli4_hba
.oas_wq
))
8800 return LPFC_HBA_ERROR
;
8801 piocb
->fcp_wqidx
= 0;
8802 ring_number
= LPFC_FCP_OAS_RING
;
8809 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
8810 * @phba: Pointer to HBA context object.
8811 * @pring: Pointer to driver SLI ring object.
8812 * @piocb: Pointer to command iocb.
8813 * @flag: Flag indicating if this command can be put into txq.
8815 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
8816 * function. This function gets the hbalock and calls
8817 * __lpfc_sli_issue_iocb function and will return the error returned
8818 * by __lpfc_sli_issue_iocb function. This wrapper is used by
8819 * functions which do not hold hbalock.
8822 lpfc_sli_issue_iocb(struct lpfc_hba
*phba
, uint32_t ring_number
,
8823 struct lpfc_iocbq
*piocb
, uint32_t flag
)
8825 struct lpfc_fcp_eq_hdl
*fcp_eq_hdl
;
8826 struct lpfc_sli_ring
*pring
;
8827 struct lpfc_queue
*fpeq
;
8828 struct lpfc_eqe
*eqe
;
8829 unsigned long iflags
;
8832 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
8833 ring_number
= lpfc_sli_calc_ring(phba
, ring_number
, piocb
);
8834 if (unlikely(ring_number
== LPFC_HBA_ERROR
))
8836 idx
= piocb
->fcp_wqidx
;
8838 pring
= &phba
->sli
.ring
[ring_number
];
8839 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
8840 rc
= __lpfc_sli_issue_iocb(phba
, ring_number
, piocb
, flag
);
8841 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
8843 if (lpfc_fcp_look_ahead
&& (piocb
->iocb_flag
& LPFC_IO_FCP
)) {
8844 fcp_eq_hdl
= &phba
->sli4_hba
.fcp_eq_hdl
[idx
];
8846 if (atomic_dec_and_test(&fcp_eq_hdl
->
8849 /* Get associated EQ with this index */
8850 fpeq
= phba
->sli4_hba
.hba_eq
[idx
];
8852 /* Turn off interrupts from this EQ */
8853 lpfc_sli4_eq_clr_intr(fpeq
);
8856 * Process all the events on FCP EQ
8858 while ((eqe
= lpfc_sli4_eq_get(fpeq
))) {
8859 lpfc_sli4_hba_handle_eqe(phba
,
8861 fpeq
->EQ_processed
++;
8864 /* Always clear and re-arm the EQ */
8865 lpfc_sli4_eq_release(fpeq
,
8868 atomic_inc(&fcp_eq_hdl
->fcp_eq_in_use
);
8871 /* For now, SLI2/3 will still use hbalock */
8872 spin_lock_irqsave(&phba
->hbalock
, iflags
);
8873 rc
= __lpfc_sli_issue_iocb(phba
, ring_number
, piocb
, flag
);
8874 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8880 * lpfc_extra_ring_setup - Extra ring setup function
8881 * @phba: Pointer to HBA context object.
8883 * This function is called while driver attaches with the
8884 * HBA to setup the extra ring. The extra ring is used
8885 * only when driver needs to support target mode functionality
8886 * or IP over FC functionalities.
8888 * This function is called with no lock held.
8891 lpfc_extra_ring_setup( struct lpfc_hba
*phba
)
8893 struct lpfc_sli
*psli
;
8894 struct lpfc_sli_ring
*pring
;
8898 /* Adjust cmd/rsp ring iocb entries more evenly */
8900 /* Take some away from the FCP ring */
8901 pring
= &psli
->ring
[psli
->fcp_ring
];
8902 pring
->sli
.sli3
.numCiocb
-= SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
8903 pring
->sli
.sli3
.numRiocb
-= SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
8904 pring
->sli
.sli3
.numCiocb
-= SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
8905 pring
->sli
.sli3
.numRiocb
-= SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
8907 /* and give them to the extra ring */
8908 pring
= &psli
->ring
[psli
->extra_ring
];
8910 pring
->sli
.sli3
.numCiocb
+= SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
8911 pring
->sli
.sli3
.numRiocb
+= SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
8912 pring
->sli
.sli3
.numCiocb
+= SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
8913 pring
->sli
.sli3
.numRiocb
+= SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
8915 /* Setup default profile for this ring */
8916 pring
->iotag_max
= 4096;
8917 pring
->num_mask
= 1;
8918 pring
->prt
[0].profile
= 0; /* Mask 0 */
8919 pring
->prt
[0].rctl
= phba
->cfg_multi_ring_rctl
;
8920 pring
->prt
[0].type
= phba
->cfg_multi_ring_type
;
8921 pring
->prt
[0].lpfc_sli_rcv_unsol_event
= NULL
;
8925 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
8926 * @phba: Pointer to HBA context object.
8927 * @iocbq: Pointer to iocb object.
8929 * The async_event handler calls this routine when it receives
8930 * an ASYNC_STATUS_CN event from the port. The port generates
8931 * this event when an Abort Sequence request to an rport fails
8932 * twice in succession. The abort could be originated by the
8933 * driver or by the port. The ABTS could have been for an ELS
8934 * or FCP IO. The port only generates this event when an ABTS
8935 * fails to complete after one retry.
8938 lpfc_sli_abts_err_handler(struct lpfc_hba
*phba
,
8939 struct lpfc_iocbq
*iocbq
)
8941 struct lpfc_nodelist
*ndlp
= NULL
;
8942 uint16_t rpi
= 0, vpi
= 0;
8943 struct lpfc_vport
*vport
= NULL
;
8945 /* The rpi in the ulpContext is vport-sensitive. */
8946 vpi
= iocbq
->iocb
.un
.asyncstat
.sub_ctxt_tag
;
8947 rpi
= iocbq
->iocb
.ulpContext
;
8949 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
8950 "3092 Port generated ABTS async event "
8951 "on vpi %d rpi %d status 0x%x\n",
8952 vpi
, rpi
, iocbq
->iocb
.ulpStatus
);
8954 vport
= lpfc_find_vport_by_vpid(phba
, vpi
);
8957 ndlp
= lpfc_findnode_rpi(vport
, rpi
);
8958 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
8961 if (iocbq
->iocb
.ulpStatus
== IOSTAT_LOCAL_REJECT
)
8962 lpfc_sli_abts_recover_port(vport
, ndlp
);
8966 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
8967 "3095 Event Context not found, no "
8968 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
8969 iocbq
->iocb
.ulpContext
, iocbq
->iocb
.ulpStatus
,
8973 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
8974 * @phba: pointer to HBA context object.
8975 * @ndlp: nodelist pointer for the impacted rport.
8976 * @axri: pointer to the wcqe containing the failed exchange.
8978 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
8979 * port. The port generates this event when an abort exchange request to an
8980 * rport fails twice in succession with no reply. The abort could be originated
8981 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
8984 lpfc_sli4_abts_err_handler(struct lpfc_hba
*phba
,
8985 struct lpfc_nodelist
*ndlp
,
8986 struct sli4_wcqe_xri_aborted
*axri
)
8988 struct lpfc_vport
*vport
;
8989 uint32_t ext_status
= 0;
8991 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
8992 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
8993 "3115 Node Context not found, driver "
8994 "ignoring abts err event\n");
8998 vport
= ndlp
->vport
;
8999 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9000 "3116 Port generated FCP XRI ABORT event on "
9001 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
9002 ndlp
->vport
->vpi
, phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
],
9003 bf_get(lpfc_wcqe_xa_xri
, axri
),
9004 bf_get(lpfc_wcqe_xa_status
, axri
),
9008 * Catch the ABTS protocol failure case. Older OCe FW releases returned
9009 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
9010 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
9012 ext_status
= axri
->parameter
& IOERR_PARAM_MASK
;
9013 if ((bf_get(lpfc_wcqe_xa_status
, axri
) == IOSTAT_LOCAL_REJECT
) &&
9014 ((ext_status
== IOERR_SEQUENCE_TIMEOUT
) || (ext_status
== 0)))
9015 lpfc_sli_abts_recover_port(vport
, ndlp
);
9019 * lpfc_sli_async_event_handler - ASYNC iocb handler function
9020 * @phba: Pointer to HBA context object.
9021 * @pring: Pointer to driver SLI ring object.
9022 * @iocbq: Pointer to iocb object.
9024 * This function is called by the slow ring event handler
9025 * function when there is an ASYNC event iocb in the ring.
9026 * This function is called with no lock held.
9027 * Currently this function handles only temperature related
9028 * ASYNC events. The function decodes the temperature sensor
9029 * event message and posts events for the management applications.
9032 lpfc_sli_async_event_handler(struct lpfc_hba
* phba
,
9033 struct lpfc_sli_ring
* pring
, struct lpfc_iocbq
* iocbq
)
9037 struct temp_event temp_event_data
;
9038 struct Scsi_Host
*shost
;
9041 icmd
= &iocbq
->iocb
;
9042 evt_code
= icmd
->un
.asyncstat
.evt_code
;
9045 case ASYNC_TEMP_WARN
:
9046 case ASYNC_TEMP_SAFE
:
9047 temp_event_data
.data
= (uint32_t) icmd
->ulpContext
;
9048 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
9049 if (evt_code
== ASYNC_TEMP_WARN
) {
9050 temp_event_data
.event_code
= LPFC_THRESHOLD_TEMP
;
9051 lpfc_printf_log(phba
, KERN_ERR
, LOG_TEMP
,
9052 "0347 Adapter is very hot, please take "
9053 "corrective action. temperature : %d Celsius\n",
9054 (uint32_t) icmd
->ulpContext
);
9056 temp_event_data
.event_code
= LPFC_NORMAL_TEMP
;
9057 lpfc_printf_log(phba
, KERN_ERR
, LOG_TEMP
,
9058 "0340 Adapter temperature is OK now. "
9059 "temperature : %d Celsius\n",
9060 (uint32_t) icmd
->ulpContext
);
9063 /* Send temperature change event to applications */
9064 shost
= lpfc_shost_from_vport(phba
->pport
);
9065 fc_host_post_vendor_event(shost
, fc_get_event_number(),
9066 sizeof(temp_event_data
), (char *) &temp_event_data
,
9069 case ASYNC_STATUS_CN
:
9070 lpfc_sli_abts_err_handler(phba
, iocbq
);
9073 iocb_w
= (uint32_t *) icmd
;
9074 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9075 "0346 Ring %d handler: unexpected ASYNC_STATUS"
9077 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
9078 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
9079 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
9080 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
9081 pring
->ringno
, icmd
->un
.asyncstat
.evt_code
,
9082 iocb_w
[0], iocb_w
[1], iocb_w
[2], iocb_w
[3],
9083 iocb_w
[4], iocb_w
[5], iocb_w
[6], iocb_w
[7],
9084 iocb_w
[8], iocb_w
[9], iocb_w
[10], iocb_w
[11],
9085 iocb_w
[12], iocb_w
[13], iocb_w
[14], iocb_w
[15]);
9093 * lpfc_sli_setup - SLI ring setup function
9094 * @phba: Pointer to HBA context object.
9096 * lpfc_sli_setup sets up rings of the SLI interface with
9097 * number of iocbs per ring and iotags. This function is
9098 * called while driver attach to the HBA and before the
9099 * interrupts are enabled. So there is no need for locking.
9101 * This function always returns 0.
9104 lpfc_sli_setup(struct lpfc_hba
*phba
)
9106 int i
, totiocbsize
= 0;
9107 struct lpfc_sli
*psli
= &phba
->sli
;
9108 struct lpfc_sli_ring
*pring
;
9110 psli
->num_rings
= MAX_SLI3_CONFIGURED_RINGS
;
9111 if (phba
->sli_rev
== LPFC_SLI_REV4
)
9112 psli
->num_rings
+= phba
->cfg_fcp_io_channel
;
9114 psli
->fcp_ring
= LPFC_FCP_RING
;
9115 psli
->next_ring
= LPFC_FCP_NEXT_RING
;
9116 psli
->extra_ring
= LPFC_EXTRA_RING
;
9118 psli
->iocbq_lookup
= NULL
;
9119 psli
->iocbq_lookup_len
= 0;
9120 psli
->last_iotag
= 0;
9122 for (i
= 0; i
< psli
->num_rings
; i
++) {
9123 pring
= &psli
->ring
[i
];
9125 case LPFC_FCP_RING
: /* ring 0 - FCP */
9126 /* numCiocb and numRiocb are used in config_port */
9127 pring
->sli
.sli3
.numCiocb
= SLI2_IOCB_CMD_R0_ENTRIES
;
9128 pring
->sli
.sli3
.numRiocb
= SLI2_IOCB_RSP_R0_ENTRIES
;
9129 pring
->sli
.sli3
.numCiocb
+=
9130 SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
9131 pring
->sli
.sli3
.numRiocb
+=
9132 SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
9133 pring
->sli
.sli3
.numCiocb
+=
9134 SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
9135 pring
->sli
.sli3
.numRiocb
+=
9136 SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
9137 pring
->sli
.sli3
.sizeCiocb
= (phba
->sli_rev
== 3) ?
9138 SLI3_IOCB_CMD_SIZE
:
9140 pring
->sli
.sli3
.sizeRiocb
= (phba
->sli_rev
== 3) ?
9141 SLI3_IOCB_RSP_SIZE
:
9143 pring
->iotag_ctr
= 0;
9145 (phba
->cfg_hba_queue_depth
* 2);
9146 pring
->fast_iotag
= pring
->iotag_max
;
9147 pring
->num_mask
= 0;
9149 case LPFC_EXTRA_RING
: /* ring 1 - EXTRA */
9150 /* numCiocb and numRiocb are used in config_port */
9151 pring
->sli
.sli3
.numCiocb
= SLI2_IOCB_CMD_R1_ENTRIES
;
9152 pring
->sli
.sli3
.numRiocb
= SLI2_IOCB_RSP_R1_ENTRIES
;
9153 pring
->sli
.sli3
.sizeCiocb
= (phba
->sli_rev
== 3) ?
9154 SLI3_IOCB_CMD_SIZE
:
9156 pring
->sli
.sli3
.sizeRiocb
= (phba
->sli_rev
== 3) ?
9157 SLI3_IOCB_RSP_SIZE
:
9159 pring
->iotag_max
= phba
->cfg_hba_queue_depth
;
9160 pring
->num_mask
= 0;
9162 case LPFC_ELS_RING
: /* ring 2 - ELS / CT */
9163 /* numCiocb and numRiocb are used in config_port */
9164 pring
->sli
.sli3
.numCiocb
= SLI2_IOCB_CMD_R2_ENTRIES
;
9165 pring
->sli
.sli3
.numRiocb
= SLI2_IOCB_RSP_R2_ENTRIES
;
9166 pring
->sli
.sli3
.sizeCiocb
= (phba
->sli_rev
== 3) ?
9167 SLI3_IOCB_CMD_SIZE
:
9169 pring
->sli
.sli3
.sizeRiocb
= (phba
->sli_rev
== 3) ?
9170 SLI3_IOCB_RSP_SIZE
:
9172 pring
->fast_iotag
= 0;
9173 pring
->iotag_ctr
= 0;
9174 pring
->iotag_max
= 4096;
9175 pring
->lpfc_sli_rcv_async_status
=
9176 lpfc_sli_async_event_handler
;
9177 pring
->num_mask
= LPFC_MAX_RING_MASK
;
9178 pring
->prt
[0].profile
= 0; /* Mask 0 */
9179 pring
->prt
[0].rctl
= FC_RCTL_ELS_REQ
;
9180 pring
->prt
[0].type
= FC_TYPE_ELS
;
9181 pring
->prt
[0].lpfc_sli_rcv_unsol_event
=
9182 lpfc_els_unsol_event
;
9183 pring
->prt
[1].profile
= 0; /* Mask 1 */
9184 pring
->prt
[1].rctl
= FC_RCTL_ELS_REP
;
9185 pring
->prt
[1].type
= FC_TYPE_ELS
;
9186 pring
->prt
[1].lpfc_sli_rcv_unsol_event
=
9187 lpfc_els_unsol_event
;
9188 pring
->prt
[2].profile
= 0; /* Mask 2 */
9189 /* NameServer Inquiry */
9190 pring
->prt
[2].rctl
= FC_RCTL_DD_UNSOL_CTL
;
9192 pring
->prt
[2].type
= FC_TYPE_CT
;
9193 pring
->prt
[2].lpfc_sli_rcv_unsol_event
=
9194 lpfc_ct_unsol_event
;
9195 pring
->prt
[3].profile
= 0; /* Mask 3 */
9196 /* NameServer response */
9197 pring
->prt
[3].rctl
= FC_RCTL_DD_SOL_CTL
;
9199 pring
->prt
[3].type
= FC_TYPE_CT
;
9200 pring
->prt
[3].lpfc_sli_rcv_unsol_event
=
9201 lpfc_ct_unsol_event
;
9204 totiocbsize
+= (pring
->sli
.sli3
.numCiocb
*
9205 pring
->sli
.sli3
.sizeCiocb
) +
9206 (pring
->sli
.sli3
.numRiocb
* pring
->sli
.sli3
.sizeRiocb
);
9208 if (totiocbsize
> MAX_SLIM_IOCB_SIZE
) {
9209 /* Too many cmd / rsp ring entries in SLI2 SLIM */
9210 printk(KERN_ERR
"%d:0462 Too many cmd / rsp ring entries in "
9211 "SLI2 SLIM Data: x%x x%lx\n",
9212 phba
->brd_no
, totiocbsize
,
9213 (unsigned long) MAX_SLIM_IOCB_SIZE
);
9215 if (phba
->cfg_multi_ring_support
== 2)
9216 lpfc_extra_ring_setup(phba
);
9222 * lpfc_sli_queue_setup - Queue initialization function
9223 * @phba: Pointer to HBA context object.
9225 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
9226 * ring. This function also initializes ring indices of each ring.
9227 * This function is called during the initialization of the SLI
9228 * interface of an HBA.
9229 * This function is called with no lock held and always returns
9233 lpfc_sli_queue_setup(struct lpfc_hba
*phba
)
9235 struct lpfc_sli
*psli
;
9236 struct lpfc_sli_ring
*pring
;
9240 spin_lock_irq(&phba
->hbalock
);
9241 INIT_LIST_HEAD(&psli
->mboxq
);
9242 INIT_LIST_HEAD(&psli
->mboxq_cmpl
);
9243 /* Initialize list headers for txq and txcmplq as double linked lists */
9244 for (i
= 0; i
< psli
->num_rings
; i
++) {
9245 pring
= &psli
->ring
[i
];
9247 pring
->sli
.sli3
.next_cmdidx
= 0;
9248 pring
->sli
.sli3
.local_getidx
= 0;
9249 pring
->sli
.sli3
.cmdidx
= 0;
9251 INIT_LIST_HEAD(&pring
->txq
);
9252 INIT_LIST_HEAD(&pring
->txcmplq
);
9253 INIT_LIST_HEAD(&pring
->iocb_continueq
);
9254 INIT_LIST_HEAD(&pring
->iocb_continue_saveq
);
9255 INIT_LIST_HEAD(&pring
->postbufq
);
9256 spin_lock_init(&pring
->ring_lock
);
9258 spin_unlock_irq(&phba
->hbalock
);
9263 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
9264 * @phba: Pointer to HBA context object.
9266 * This routine flushes the mailbox command subsystem. It will unconditionally
9267 * flush all the mailbox commands in the three possible stages in the mailbox
9268 * command sub-system: pending mailbox command queue; the outstanding mailbox
9269 * command; and completed mailbox command queue. It is caller's responsibility
9270 * to make sure that the driver is in the proper state to flush the mailbox
9271 * command sub-system. Namely, the posting of mailbox commands into the
9272 * pending mailbox command queue from the various clients must be stopped;
9273 * either the HBA is in a state that it will never works on the outstanding
9274 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
9275 * mailbox command has been completed.
9278 lpfc_sli_mbox_sys_flush(struct lpfc_hba
*phba
)
9280 LIST_HEAD(completions
);
9281 struct lpfc_sli
*psli
= &phba
->sli
;
9283 unsigned long iflag
;
9285 /* Flush all the mailbox commands in the mbox system */
9286 spin_lock_irqsave(&phba
->hbalock
, iflag
);
9287 /* The pending mailbox command queue */
9288 list_splice_init(&phba
->sli
.mboxq
, &completions
);
9289 /* The outstanding active mailbox command */
9290 if (psli
->mbox_active
) {
9291 list_add_tail(&psli
->mbox_active
->list
, &completions
);
9292 psli
->mbox_active
= NULL
;
9293 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
9295 /* The completed mailbox command queue */
9296 list_splice_init(&phba
->sli
.mboxq_cmpl
, &completions
);
9297 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
9299 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
9300 while (!list_empty(&completions
)) {
9301 list_remove_head(&completions
, pmb
, LPFC_MBOXQ_t
, list
);
9302 pmb
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
9304 pmb
->mbox_cmpl(phba
, pmb
);
9309 * lpfc_sli_host_down - Vport cleanup function
9310 * @vport: Pointer to virtual port object.
9312 * lpfc_sli_host_down is called to clean up the resources
9313 * associated with a vport before destroying virtual
9314 * port data structures.
9315 * This function does following operations:
9316 * - Free discovery resources associated with this virtual
9318 * - Free iocbs associated with this virtual port in
9320 * - Send abort for all iocb commands associated with this
9323 * This function is called with no lock held and always returns 1.
9326 lpfc_sli_host_down(struct lpfc_vport
*vport
)
9328 LIST_HEAD(completions
);
9329 struct lpfc_hba
*phba
= vport
->phba
;
9330 struct lpfc_sli
*psli
= &phba
->sli
;
9331 struct lpfc_sli_ring
*pring
;
9332 struct lpfc_iocbq
*iocb
, *next_iocb
;
9334 unsigned long flags
= 0;
9335 uint16_t prev_pring_flag
;
9337 lpfc_cleanup_discovery_resources(vport
);
9339 spin_lock_irqsave(&phba
->hbalock
, flags
);
9340 for (i
= 0; i
< psli
->num_rings
; i
++) {
9341 pring
= &psli
->ring
[i
];
9342 prev_pring_flag
= pring
->flag
;
9343 /* Only slow rings */
9344 if (pring
->ringno
== LPFC_ELS_RING
) {
9345 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
9346 /* Set the lpfc data pending flag */
9347 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
9350 * Error everything on the txq since these iocbs have not been
9351 * given to the FW yet.
9353 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
9354 if (iocb
->vport
!= vport
)
9356 list_move_tail(&iocb
->list
, &completions
);
9359 /* Next issue ABTS for everything on the txcmplq */
9360 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
,
9362 if (iocb
->vport
!= vport
)
9364 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
9367 pring
->flag
= prev_pring_flag
;
9370 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
9372 /* Cancel all the IOCBs from the completions list */
9373 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
9379 * lpfc_sli_hba_down - Resource cleanup function for the HBA
9380 * @phba: Pointer to HBA context object.
9382 * This function cleans up all iocb, buffers, mailbox commands
9383 * while shutting down the HBA. This function is called with no
9384 * lock held and always returns 1.
9385 * This function does the following to cleanup driver resources:
9386 * - Free discovery resources for each virtual port
9387 * - Cleanup any pending fabric iocbs
9388 * - Iterate through the iocb txq and free each entry
9390 * - Free up any buffer posted to the HBA
9391 * - Free mailbox commands in the mailbox queue.
9394 lpfc_sli_hba_down(struct lpfc_hba
*phba
)
9396 LIST_HEAD(completions
);
9397 struct lpfc_sli
*psli
= &phba
->sli
;
9398 struct lpfc_sli_ring
*pring
;
9399 struct lpfc_dmabuf
*buf_ptr
;
9400 unsigned long flags
= 0;
9403 /* Shutdown the mailbox command sub-system */
9404 lpfc_sli_mbox_sys_shutdown(phba
, LPFC_MBX_WAIT
);
9406 lpfc_hba_down_prep(phba
);
9408 lpfc_fabric_abort_hba(phba
);
9410 spin_lock_irqsave(&phba
->hbalock
, flags
);
9411 for (i
= 0; i
< psli
->num_rings
; i
++) {
9412 pring
= &psli
->ring
[i
];
9413 /* Only slow rings */
9414 if (pring
->ringno
== LPFC_ELS_RING
) {
9415 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
9416 /* Set the lpfc data pending flag */
9417 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
9421 * Error everything on the txq since these iocbs have not been
9422 * given to the FW yet.
9424 list_splice_init(&pring
->txq
, &completions
);
9426 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
9428 /* Cancel all the IOCBs from the completions list */
9429 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
9432 spin_lock_irqsave(&phba
->hbalock
, flags
);
9433 list_splice_init(&phba
->elsbuf
, &completions
);
9434 phba
->elsbuf_cnt
= 0;
9435 phba
->elsbuf_prev_cnt
= 0;
9436 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
9438 while (!list_empty(&completions
)) {
9439 list_remove_head(&completions
, buf_ptr
,
9440 struct lpfc_dmabuf
, list
);
9441 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
9445 /* Return any active mbox cmds */
9446 del_timer_sync(&psli
->mbox_tmo
);
9448 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
9449 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
9450 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
9456 * lpfc_sli_pcimem_bcopy - SLI memory copy function
9457 * @srcp: Source memory pointer.
9458 * @destp: Destination memory pointer.
9459 * @cnt: Number of words required to be copied.
9461 * This function is used for copying data between driver memory
9462 * and the SLI memory. This function also changes the endianness
9463 * of each word if native endianness is different from SLI
9464 * endianness. This function can be called with or without
9468 lpfc_sli_pcimem_bcopy(void *srcp
, void *destp
, uint32_t cnt
)
9470 uint32_t *src
= srcp
;
9471 uint32_t *dest
= destp
;
9475 for (i
= 0; i
< (int)cnt
; i
+= sizeof (uint32_t)) {
9477 ldata
= le32_to_cpu(ldata
);
9486 * lpfc_sli_bemem_bcopy - SLI memory copy function
9487 * @srcp: Source memory pointer.
9488 * @destp: Destination memory pointer.
9489 * @cnt: Number of words required to be copied.
9491 * This function is used for copying data between a data structure
9492 * with big endian representation to local endianness.
9493 * This function can be called with or without lock.
9496 lpfc_sli_bemem_bcopy(void *srcp
, void *destp
, uint32_t cnt
)
9498 uint32_t *src
= srcp
;
9499 uint32_t *dest
= destp
;
9503 for (i
= 0; i
< (int)cnt
; i
+= sizeof(uint32_t)) {
9505 ldata
= be32_to_cpu(ldata
);
9513 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
9514 * @phba: Pointer to HBA context object.
9515 * @pring: Pointer to driver SLI ring object.
9516 * @mp: Pointer to driver buffer object.
9518 * This function is called with no lock held.
9519 * It always return zero after adding the buffer to the postbufq
9523 lpfc_sli_ringpostbuf_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
9524 struct lpfc_dmabuf
*mp
)
9526 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
9528 spin_lock_irq(&phba
->hbalock
);
9529 list_add_tail(&mp
->list
, &pring
->postbufq
);
9530 pring
->postbufq_cnt
++;
9531 spin_unlock_irq(&phba
->hbalock
);
9536 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
9537 * @phba: Pointer to HBA context object.
9539 * When HBQ is enabled, buffers are searched based on tags. This function
9540 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
9541 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
9542 * does not conflict with tags of buffer posted for unsolicited events.
9543 * The function returns the allocated tag. The function is called with
9547 lpfc_sli_get_buffer_tag(struct lpfc_hba
*phba
)
9549 spin_lock_irq(&phba
->hbalock
);
9550 phba
->buffer_tag_count
++;
9552 * Always set the QUE_BUFTAG_BIT to distiguish between
9553 * a tag assigned by HBQ.
9555 phba
->buffer_tag_count
|= QUE_BUFTAG_BIT
;
9556 spin_unlock_irq(&phba
->hbalock
);
9557 return phba
->buffer_tag_count
;
9561 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
9562 * @phba: Pointer to HBA context object.
9563 * @pring: Pointer to driver SLI ring object.
9566 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
9567 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
9568 * iocb is posted to the response ring with the tag of the buffer.
9569 * This function searches the pring->postbufq list using the tag
9570 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
9571 * iocb. If the buffer is found then lpfc_dmabuf object of the
9572 * buffer is returned to the caller else NULL is returned.
9573 * This function is called with no lock held.
9575 struct lpfc_dmabuf
*
9576 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
9579 struct lpfc_dmabuf
*mp
, *next_mp
;
9580 struct list_head
*slp
= &pring
->postbufq
;
9582 /* Search postbufq, from the beginning, looking for a match on tag */
9583 spin_lock_irq(&phba
->hbalock
);
9584 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
9585 if (mp
->buffer_tag
== tag
) {
9586 list_del_init(&mp
->list
);
9587 pring
->postbufq_cnt
--;
9588 spin_unlock_irq(&phba
->hbalock
);
9593 spin_unlock_irq(&phba
->hbalock
);
9594 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9595 "0402 Cannot find virtual addr for buffer tag on "
9596 "ring %d Data x%lx x%p x%p x%x\n",
9597 pring
->ringno
, (unsigned long) tag
,
9598 slp
->next
, slp
->prev
, pring
->postbufq_cnt
);
9604 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
9605 * @phba: Pointer to HBA context object.
9606 * @pring: Pointer to driver SLI ring object.
9607 * @phys: DMA address of the buffer.
9609 * This function searches the buffer list using the dma_address
9610 * of unsolicited event to find the driver's lpfc_dmabuf object
9611 * corresponding to the dma_address. The function returns the
9612 * lpfc_dmabuf object if a buffer is found else it returns NULL.
9613 * This function is called by the ct and els unsolicited event
9614 * handlers to get the buffer associated with the unsolicited
9617 * This function is called with no lock held.
9619 struct lpfc_dmabuf
*
9620 lpfc_sli_ringpostbuf_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
9623 struct lpfc_dmabuf
*mp
, *next_mp
;
9624 struct list_head
*slp
= &pring
->postbufq
;
9626 /* Search postbufq, from the beginning, looking for a match on phys */
9627 spin_lock_irq(&phba
->hbalock
);
9628 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
9629 if (mp
->phys
== phys
) {
9630 list_del_init(&mp
->list
);
9631 pring
->postbufq_cnt
--;
9632 spin_unlock_irq(&phba
->hbalock
);
9637 spin_unlock_irq(&phba
->hbalock
);
9638 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9639 "0410 Cannot find virtual addr for mapped buf on "
9640 "ring %d Data x%llx x%p x%p x%x\n",
9641 pring
->ringno
, (unsigned long long)phys
,
9642 slp
->next
, slp
->prev
, pring
->postbufq_cnt
);
9647 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
9648 * @phba: Pointer to HBA context object.
9649 * @cmdiocb: Pointer to driver command iocb object.
9650 * @rspiocb: Pointer to driver response iocb object.
9652 * This function is the completion handler for the abort iocbs for
9653 * ELS commands. This function is called from the ELS ring event
9654 * handler with no lock held. This function frees memory resources
9655 * associated with the abort iocb.
9658 lpfc_sli_abort_els_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
9659 struct lpfc_iocbq
*rspiocb
)
9661 IOCB_t
*irsp
= &rspiocb
->iocb
;
9662 uint16_t abort_iotag
, abort_context
;
9663 struct lpfc_iocbq
*abort_iocb
= NULL
;
9665 if (irsp
->ulpStatus
) {
9668 * Assume that the port already completed and returned, or
9669 * will return the iocb. Just Log the message.
9671 abort_context
= cmdiocb
->iocb
.un
.acxri
.abortContextTag
;
9672 abort_iotag
= cmdiocb
->iocb
.un
.acxri
.abortIoTag
;
9674 spin_lock_irq(&phba
->hbalock
);
9675 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
9676 if (abort_iotag
!= 0 &&
9677 abort_iotag
<= phba
->sli
.last_iotag
)
9679 phba
->sli
.iocbq_lookup
[abort_iotag
];
9681 /* For sli4 the abort_tag is the XRI,
9682 * so the abort routine puts the iotag of the iocb
9683 * being aborted in the context field of the abort
9686 abort_iocb
= phba
->sli
.iocbq_lookup
[abort_context
];
9688 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
| LOG_SLI
,
9689 "0327 Cannot abort els iocb %p "
9690 "with tag %x context %x, abort status %x, "
9692 abort_iocb
, abort_iotag
, abort_context
,
9693 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4]);
9695 spin_unlock_irq(&phba
->hbalock
);
9697 lpfc_sli_release_iocbq(phba
, cmdiocb
);
9702 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
9703 * @phba: Pointer to HBA context object.
9704 * @cmdiocb: Pointer to driver command iocb object.
9705 * @rspiocb: Pointer to driver response iocb object.
9707 * The function is called from SLI ring event handler with no
9708 * lock held. This function is the completion handler for ELS commands
9709 * which are aborted. The function frees memory resources used for
9710 * the aborted ELS commands.
9713 lpfc_ignore_els_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
9714 struct lpfc_iocbq
*rspiocb
)
9716 IOCB_t
*irsp
= &rspiocb
->iocb
;
9718 /* ELS cmd tag <ulpIoTag> completes */
9719 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
9720 "0139 Ignoring ELS cmd tag x%x completion Data: "
9722 irsp
->ulpIoTag
, irsp
->ulpStatus
,
9723 irsp
->un
.ulpWord
[4], irsp
->ulpTimeout
);
9724 if (cmdiocb
->iocb
.ulpCommand
== CMD_GEN_REQUEST64_CR
)
9725 lpfc_ct_free_iocb(phba
, cmdiocb
);
9727 lpfc_els_free_iocb(phba
, cmdiocb
);
9732 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
9733 * @phba: Pointer to HBA context object.
9734 * @pring: Pointer to driver SLI ring object.
9735 * @cmdiocb: Pointer to driver command iocb object.
9737 * This function issues an abort iocb for the provided command iocb down to
9738 * the port. Other than the case the outstanding command iocb is an abort
9739 * request, this function issues abort out unconditionally. This function is
9740 * called with hbalock held. The function returns 0 when it fails due to
9741 * memory allocation failure or when the command iocb is an abort request.
9744 lpfc_sli_abort_iotag_issue(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
9745 struct lpfc_iocbq
*cmdiocb
)
9747 struct lpfc_vport
*vport
= cmdiocb
->vport
;
9748 struct lpfc_iocbq
*abtsiocbp
;
9749 IOCB_t
*icmd
= NULL
;
9750 IOCB_t
*iabt
= NULL
;
9753 unsigned long iflags
;
9756 * There are certain command types we don't want to abort. And we
9757 * don't want to abort commands that are already in the process of
9760 icmd
= &cmdiocb
->iocb
;
9761 if (icmd
->ulpCommand
== CMD_ABORT_XRI_CN
||
9762 icmd
->ulpCommand
== CMD_CLOSE_XRI_CN
||
9763 (cmdiocb
->iocb_flag
& LPFC_DRIVER_ABORTED
) != 0)
9766 /* issue ABTS for this IOCB based on iotag */
9767 abtsiocbp
= __lpfc_sli_get_iocbq(phba
);
9768 if (abtsiocbp
== NULL
)
9771 /* This signals the response to set the correct status
9772 * before calling the completion handler
9774 cmdiocb
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
9776 iabt
= &abtsiocbp
->iocb
;
9777 iabt
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
9778 iabt
->un
.acxri
.abortContextTag
= icmd
->ulpContext
;
9779 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
9780 iabt
->un
.acxri
.abortIoTag
= cmdiocb
->sli4_xritag
;
9781 iabt
->un
.acxri
.abortContextTag
= cmdiocb
->iotag
;
9784 iabt
->un
.acxri
.abortIoTag
= icmd
->ulpIoTag
;
9786 iabt
->ulpClass
= icmd
->ulpClass
;
9788 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9789 abtsiocbp
->fcp_wqidx
= cmdiocb
->fcp_wqidx
;
9790 if (cmdiocb
->iocb_flag
& LPFC_IO_FCP
)
9791 abtsiocbp
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
9792 if (cmdiocb
->iocb_flag
& LPFC_IO_FOF
)
9793 abtsiocbp
->iocb_flag
|= LPFC_IO_FOF
;
9795 if (phba
->link_state
>= LPFC_LINK_UP
)
9796 iabt
->ulpCommand
= CMD_ABORT_XRI_CN
;
9798 iabt
->ulpCommand
= CMD_CLOSE_XRI_CN
;
9800 abtsiocbp
->iocb_cmpl
= lpfc_sli_abort_els_cmpl
;
9802 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
9803 "0339 Abort xri x%x, original iotag x%x, "
9804 "abort cmd iotag x%x\n",
9805 iabt
->un
.acxri
.abortIoTag
,
9806 iabt
->un
.acxri
.abortContextTag
,
9809 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
9811 lpfc_sli_calc_ring(phba
, pring
->ringno
, abtsiocbp
);
9812 if (unlikely(ring_number
== LPFC_HBA_ERROR
))
9814 pring
= &phba
->sli
.ring
[ring_number
];
9815 /* Note: both hbalock and ring_lock need to be set here */
9816 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
9817 retval
= __lpfc_sli_issue_iocb(phba
, pring
->ringno
,
9819 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
9821 retval
= __lpfc_sli_issue_iocb(phba
, pring
->ringno
,
9826 __lpfc_sli_release_iocbq(phba
, abtsiocbp
);
9829 * Caller to this routine should check for IOCB_ERROR
9830 * and handle it properly. This routine no longer removes
9831 * iocb off txcmplq and call compl in case of IOCB_ERROR.
9837 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
9838 * @phba: Pointer to HBA context object.
9839 * @pring: Pointer to driver SLI ring object.
9840 * @cmdiocb: Pointer to driver command iocb object.
9842 * This function issues an abort iocb for the provided command iocb. In case
9843 * of unloading, the abort iocb will not be issued to commands on the ELS
9844 * ring. Instead, the callback function shall be changed to those commands
9845 * so that nothing happens when them finishes. This function is called with
9846 * hbalock held. The function returns 0 when the command iocb is an abort
9850 lpfc_sli_issue_abort_iotag(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
9851 struct lpfc_iocbq
*cmdiocb
)
9853 struct lpfc_vport
*vport
= cmdiocb
->vport
;
9854 int retval
= IOCB_ERROR
;
9855 IOCB_t
*icmd
= NULL
;
9858 * There are certain command types we don't want to abort. And we
9859 * don't want to abort commands that are already in the process of
9862 icmd
= &cmdiocb
->iocb
;
9863 if (icmd
->ulpCommand
== CMD_ABORT_XRI_CN
||
9864 icmd
->ulpCommand
== CMD_CLOSE_XRI_CN
||
9865 (cmdiocb
->iocb_flag
& LPFC_DRIVER_ABORTED
) != 0)
9869 * If we're unloading, don't abort iocb on the ELS ring, but change
9870 * the callback so that nothing happens when it finishes.
9872 if ((vport
->load_flag
& FC_UNLOADING
) &&
9873 (pring
->ringno
== LPFC_ELS_RING
)) {
9874 if (cmdiocb
->iocb_flag
& LPFC_IO_FABRIC
)
9875 cmdiocb
->fabric_iocb_cmpl
= lpfc_ignore_els_cmpl
;
9877 cmdiocb
->iocb_cmpl
= lpfc_ignore_els_cmpl
;
9878 goto abort_iotag_exit
;
9881 /* Now, we try to issue the abort to the cmdiocb out */
9882 retval
= lpfc_sli_abort_iotag_issue(phba
, pring
, cmdiocb
);
9886 * Caller to this routine should check for IOCB_ERROR
9887 * and handle it properly. This routine no longer removes
9888 * iocb off txcmplq and call compl in case of IOCB_ERROR.
9894 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
9895 * @phba: pointer to lpfc HBA data structure.
9897 * This routine will abort all pending and outstanding iocbs to an HBA.
9900 lpfc_sli_hba_iocb_abort(struct lpfc_hba
*phba
)
9902 struct lpfc_sli
*psli
= &phba
->sli
;
9903 struct lpfc_sli_ring
*pring
;
9906 for (i
= 0; i
< psli
->num_rings
; i
++) {
9907 pring
= &psli
->ring
[i
];
9908 lpfc_sli_abort_iocb_ring(phba
, pring
);
9913 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
9914 * @iocbq: Pointer to driver iocb object.
9915 * @vport: Pointer to driver virtual port object.
9916 * @tgt_id: SCSI ID of the target.
9917 * @lun_id: LUN ID of the scsi device.
9918 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
9920 * This function acts as an iocb filter for functions which abort or count
9921 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
9922 * 0 if the filtering criteria is met for the given iocb and will return
9923 * 1 if the filtering criteria is not met.
9924 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
9925 * given iocb is for the SCSI device specified by vport, tgt_id and
9927 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
9928 * given iocb is for the SCSI target specified by vport and tgt_id
9930 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
9931 * given iocb is for the SCSI host associated with the given vport.
9932 * This function is called with no locks held.
9935 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq
*iocbq
, struct lpfc_vport
*vport
,
9936 uint16_t tgt_id
, uint64_t lun_id
,
9937 lpfc_ctx_cmd ctx_cmd
)
9939 struct lpfc_scsi_buf
*lpfc_cmd
;
9942 if (!(iocbq
->iocb_flag
& LPFC_IO_FCP
))
9945 if (iocbq
->vport
!= vport
)
9948 lpfc_cmd
= container_of(iocbq
, struct lpfc_scsi_buf
, cur_iocbq
);
9950 if (lpfc_cmd
->pCmd
== NULL
)
9955 if ((lpfc_cmd
->rdata
->pnode
) &&
9956 (lpfc_cmd
->rdata
->pnode
->nlp_sid
== tgt_id
) &&
9957 (scsilun_to_int(&lpfc_cmd
->fcp_cmnd
->fcp_lun
) == lun_id
))
9961 if ((lpfc_cmd
->rdata
->pnode
) &&
9962 (lpfc_cmd
->rdata
->pnode
->nlp_sid
== tgt_id
))
9969 printk(KERN_ERR
"%s: Unknown context cmd type, value %d\n",
9978 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
9979 * @vport: Pointer to virtual port.
9980 * @tgt_id: SCSI ID of the target.
9981 * @lun_id: LUN ID of the scsi device.
9982 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9984 * This function returns number of FCP commands pending for the vport.
9985 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
9986 * commands pending on the vport associated with SCSI device specified
9987 * by tgt_id and lun_id parameters.
9988 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
9989 * commands pending on the vport associated with SCSI target specified
9990 * by tgt_id parameter.
9991 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
9992 * commands pending on the vport.
9993 * This function returns the number of iocbs which satisfy the filter.
9994 * This function is called without any lock held.
9997 lpfc_sli_sum_iocb(struct lpfc_vport
*vport
, uint16_t tgt_id
, uint64_t lun_id
,
9998 lpfc_ctx_cmd ctx_cmd
)
10000 struct lpfc_hba
*phba
= vport
->phba
;
10001 struct lpfc_iocbq
*iocbq
;
10004 for (i
= 1, sum
= 0; i
<= phba
->sli
.last_iotag
; i
++) {
10005 iocbq
= phba
->sli
.iocbq_lookup
[i
];
10007 if (lpfc_sli_validate_fcp_iocb (iocbq
, vport
, tgt_id
, lun_id
,
10016 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
10017 * @phba: Pointer to HBA context object
10018 * @cmdiocb: Pointer to command iocb object.
10019 * @rspiocb: Pointer to response iocb object.
10021 * This function is called when an aborted FCP iocb completes. This
10022 * function is called by the ring event handler with no lock held.
10023 * This function frees the iocb.
10026 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
10027 struct lpfc_iocbq
*rspiocb
)
10029 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
10030 "3096 ABORT_XRI_CN completing on rpi x%x "
10031 "original iotag x%x, abort cmd iotag x%x "
10032 "status 0x%x, reason 0x%x\n",
10033 cmdiocb
->iocb
.un
.acxri
.abortContextTag
,
10034 cmdiocb
->iocb
.un
.acxri
.abortIoTag
,
10035 cmdiocb
->iotag
, rspiocb
->iocb
.ulpStatus
,
10036 rspiocb
->iocb
.un
.ulpWord
[4]);
10037 lpfc_sli_release_iocbq(phba
, cmdiocb
);
10042 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
10043 * @vport: Pointer to virtual port.
10044 * @pring: Pointer to driver SLI ring object.
10045 * @tgt_id: SCSI ID of the target.
10046 * @lun_id: LUN ID of the scsi device.
10047 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10049 * This function sends an abort command for every SCSI command
10050 * associated with the given virtual port pending on the ring
10051 * filtered by lpfc_sli_validate_fcp_iocb function.
10052 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
10053 * FCP iocbs associated with lun specified by tgt_id and lun_id
10055 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
10056 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10057 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
10058 * FCP iocbs associated with virtual port.
10059 * This function returns number of iocbs it failed to abort.
10060 * This function is called with no locks held.
10063 lpfc_sli_abort_iocb(struct lpfc_vport
*vport
, struct lpfc_sli_ring
*pring
,
10064 uint16_t tgt_id
, uint64_t lun_id
, lpfc_ctx_cmd abort_cmd
)
10066 struct lpfc_hba
*phba
= vport
->phba
;
10067 struct lpfc_iocbq
*iocbq
;
10068 struct lpfc_iocbq
*abtsiocb
;
10069 IOCB_t
*cmd
= NULL
;
10070 int errcnt
= 0, ret_val
= 0;
10073 for (i
= 1; i
<= phba
->sli
.last_iotag
; i
++) {
10074 iocbq
= phba
->sli
.iocbq_lookup
[i
];
10076 if (lpfc_sli_validate_fcp_iocb(iocbq
, vport
, tgt_id
, lun_id
,
10081 * If the iocbq is already being aborted, don't take a second
10082 * action, but do count it.
10084 if (iocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
)
10087 /* issue ABTS for this IOCB based on iotag */
10088 abtsiocb
= lpfc_sli_get_iocbq(phba
);
10089 if (abtsiocb
== NULL
) {
10094 /* indicate the IO is being aborted by the driver. */
10095 iocbq
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
10097 cmd
= &iocbq
->iocb
;
10098 abtsiocb
->iocb
.un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
10099 abtsiocb
->iocb
.un
.acxri
.abortContextTag
= cmd
->ulpContext
;
10100 if (phba
->sli_rev
== LPFC_SLI_REV4
)
10101 abtsiocb
->iocb
.un
.acxri
.abortIoTag
= iocbq
->sli4_xritag
;
10103 abtsiocb
->iocb
.un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
10104 abtsiocb
->iocb
.ulpLe
= 1;
10105 abtsiocb
->iocb
.ulpClass
= cmd
->ulpClass
;
10106 abtsiocb
->vport
= vport
;
10108 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10109 abtsiocb
->fcp_wqidx
= iocbq
->fcp_wqidx
;
10110 if (iocbq
->iocb_flag
& LPFC_IO_FCP
)
10111 abtsiocb
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
10112 if (iocbq
->iocb_flag
& LPFC_IO_FOF
)
10113 abtsiocb
->iocb_flag
|= LPFC_IO_FOF
;
10115 if (lpfc_is_link_up(phba
))
10116 abtsiocb
->iocb
.ulpCommand
= CMD_ABORT_XRI_CN
;
10118 abtsiocb
->iocb
.ulpCommand
= CMD_CLOSE_XRI_CN
;
10120 /* Setup callback routine and issue the command. */
10121 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
10122 ret_val
= lpfc_sli_issue_iocb(phba
, pring
->ringno
,
10124 if (ret_val
== IOCB_ERROR
) {
10125 lpfc_sli_release_iocbq(phba
, abtsiocb
);
10135 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
10136 * @vport: Pointer to virtual port.
10137 * @pring: Pointer to driver SLI ring object.
10138 * @tgt_id: SCSI ID of the target.
10139 * @lun_id: LUN ID of the scsi device.
10140 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10142 * This function sends an abort command for every SCSI command
10143 * associated with the given virtual port pending on the ring
10144 * filtered by lpfc_sli_validate_fcp_iocb function.
10145 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
10146 * FCP iocbs associated with lun specified by tgt_id and lun_id
10148 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
10149 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10150 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
10151 * FCP iocbs associated with virtual port.
10152 * This function returns number of iocbs it aborted .
10153 * This function is called with no locks held right after a taskmgmt
10157 lpfc_sli_abort_taskmgmt(struct lpfc_vport
*vport
, struct lpfc_sli_ring
*pring
,
10158 uint16_t tgt_id
, uint64_t lun_id
, lpfc_ctx_cmd cmd
)
10160 struct lpfc_hba
*phba
= vport
->phba
;
10161 struct lpfc_scsi_buf
*lpfc_cmd
;
10162 struct lpfc_iocbq
*abtsiocbq
;
10163 struct lpfc_nodelist
*ndlp
;
10164 struct lpfc_iocbq
*iocbq
;
10166 int sum
, i
, ret_val
;
10167 unsigned long iflags
;
10168 struct lpfc_sli_ring
*pring_s4
;
10169 uint32_t ring_number
;
10171 spin_lock_irq(&phba
->hbalock
);
10173 /* all I/Os are in process of being flushed */
10174 if (phba
->hba_flag
& HBA_FCP_IOQ_FLUSH
) {
10175 spin_unlock_irq(&phba
->hbalock
);
10180 for (i
= 1; i
<= phba
->sli
.last_iotag
; i
++) {
10181 iocbq
= phba
->sli
.iocbq_lookup
[i
];
10183 if (lpfc_sli_validate_fcp_iocb(iocbq
, vport
, tgt_id
, lun_id
,
10188 * If the iocbq is already being aborted, don't take a second
10189 * action, but do count it.
10191 if (iocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
)
10194 /* issue ABTS for this IOCB based on iotag */
10195 abtsiocbq
= __lpfc_sli_get_iocbq(phba
);
10196 if (abtsiocbq
== NULL
)
10199 icmd
= &iocbq
->iocb
;
10200 abtsiocbq
->iocb
.un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
10201 abtsiocbq
->iocb
.un
.acxri
.abortContextTag
= icmd
->ulpContext
;
10202 if (phba
->sli_rev
== LPFC_SLI_REV4
)
10203 abtsiocbq
->iocb
.un
.acxri
.abortIoTag
=
10204 iocbq
->sli4_xritag
;
10206 abtsiocbq
->iocb
.un
.acxri
.abortIoTag
= icmd
->ulpIoTag
;
10207 abtsiocbq
->iocb
.ulpLe
= 1;
10208 abtsiocbq
->iocb
.ulpClass
= icmd
->ulpClass
;
10209 abtsiocbq
->vport
= vport
;
10211 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10212 abtsiocbq
->fcp_wqidx
= iocbq
->fcp_wqidx
;
10213 if (iocbq
->iocb_flag
& LPFC_IO_FCP
)
10214 abtsiocbq
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
10215 if (iocbq
->iocb_flag
& LPFC_IO_FOF
)
10216 abtsiocbq
->iocb_flag
|= LPFC_IO_FOF
;
10218 lpfc_cmd
= container_of(iocbq
, struct lpfc_scsi_buf
, cur_iocbq
);
10219 ndlp
= lpfc_cmd
->rdata
->pnode
;
10221 if (lpfc_is_link_up(phba
) &&
10222 (ndlp
&& ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
))
10223 abtsiocbq
->iocb
.ulpCommand
= CMD_ABORT_XRI_CN
;
10225 abtsiocbq
->iocb
.ulpCommand
= CMD_CLOSE_XRI_CN
;
10227 /* Setup callback routine and issue the command. */
10228 abtsiocbq
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
10231 * Indicate the IO is being aborted by the driver and set
10232 * the caller's flag into the aborted IO.
10234 iocbq
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
10236 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
10237 ring_number
= MAX_SLI3_CONFIGURED_RINGS
+
10239 pring_s4
= &phba
->sli
.ring
[ring_number
];
10240 /* Note: both hbalock and ring_lock must be set here */
10241 spin_lock_irqsave(&pring_s4
->ring_lock
, iflags
);
10242 ret_val
= __lpfc_sli_issue_iocb(phba
, pring_s4
->ringno
,
10244 spin_unlock_irqrestore(&pring_s4
->ring_lock
, iflags
);
10246 ret_val
= __lpfc_sli_issue_iocb(phba
, pring
->ringno
,
10251 if (ret_val
== IOCB_ERROR
)
10252 __lpfc_sli_release_iocbq(phba
, abtsiocbq
);
10256 spin_unlock_irq(&phba
->hbalock
);
10261 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
10262 * @phba: Pointer to HBA context object.
10263 * @cmdiocbq: Pointer to command iocb.
10264 * @rspiocbq: Pointer to response iocb.
10266 * This function is the completion handler for iocbs issued using
10267 * lpfc_sli_issue_iocb_wait function. This function is called by the
10268 * ring event handler function without any lock held. This function
10269 * can be called from both worker thread context and interrupt
10270 * context. This function also can be called from other thread which
10271 * cleans up the SLI layer objects.
10272 * This function copy the contents of the response iocb to the
10273 * response iocb memory object provided by the caller of
10274 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
10275 * sleeps for the iocb completion.
10278 lpfc_sli_wake_iocb_wait(struct lpfc_hba
*phba
,
10279 struct lpfc_iocbq
*cmdiocbq
,
10280 struct lpfc_iocbq
*rspiocbq
)
10282 wait_queue_head_t
*pdone_q
;
10283 unsigned long iflags
;
10284 struct lpfc_scsi_buf
*lpfc_cmd
;
10286 spin_lock_irqsave(&phba
->hbalock
, iflags
);
10287 if (cmdiocbq
->iocb_flag
& LPFC_IO_WAKE_TMO
) {
10290 * A time out has occurred for the iocb. If a time out
10291 * completion handler has been supplied, call it. Otherwise,
10292 * just free the iocbq.
10295 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
10296 cmdiocbq
->iocb_cmpl
= cmdiocbq
->wait_iocb_cmpl
;
10297 cmdiocbq
->wait_iocb_cmpl
= NULL
;
10298 if (cmdiocbq
->iocb_cmpl
)
10299 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
, NULL
);
10301 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
10305 cmdiocbq
->iocb_flag
|= LPFC_IO_WAKE
;
10306 if (cmdiocbq
->context2
&& rspiocbq
)
10307 memcpy(&((struct lpfc_iocbq
*)cmdiocbq
->context2
)->iocb
,
10308 &rspiocbq
->iocb
, sizeof(IOCB_t
));
10310 /* Set the exchange busy flag for task management commands */
10311 if ((cmdiocbq
->iocb_flag
& LPFC_IO_FCP
) &&
10312 !(cmdiocbq
->iocb_flag
& LPFC_IO_LIBDFC
)) {
10313 lpfc_cmd
= container_of(cmdiocbq
, struct lpfc_scsi_buf
,
10315 lpfc_cmd
->exch_busy
= rspiocbq
->iocb_flag
& LPFC_EXCHANGE_BUSY
;
10318 pdone_q
= cmdiocbq
->context_un
.wait_queue
;
10321 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
10326 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
10327 * @phba: Pointer to HBA context object..
10328 * @piocbq: Pointer to command iocb.
10329 * @flag: Flag to test.
10331 * This routine grabs the hbalock and then test the iocb_flag to
10332 * see if the passed in flag is set.
10334 * 1 if flag is set.
10335 * 0 if flag is not set.
10338 lpfc_chk_iocb_flg(struct lpfc_hba
*phba
,
10339 struct lpfc_iocbq
*piocbq
, uint32_t flag
)
10341 unsigned long iflags
;
10344 spin_lock_irqsave(&phba
->hbalock
, iflags
);
10345 ret
= piocbq
->iocb_flag
& flag
;
10346 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
10352 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
10353 * @phba: Pointer to HBA context object..
10354 * @pring: Pointer to sli ring.
10355 * @piocb: Pointer to command iocb.
10356 * @prspiocbq: Pointer to response iocb.
10357 * @timeout: Timeout in number of seconds.
10359 * This function issues the iocb to firmware and waits for the
10360 * iocb to complete. The iocb_cmpl field of the shall be used
10361 * to handle iocbs which time out. If the field is NULL, the
10362 * function shall free the iocbq structure. If more clean up is
10363 * needed, the caller is expected to provide a completion function
10364 * that will provide the needed clean up. If the iocb command is
10365 * not completed within timeout seconds, the function will either
10366 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
10367 * completion function set in the iocb_cmpl field and then return
10368 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
10369 * resources if this function returns IOCB_TIMEDOUT.
10370 * The function waits for the iocb completion using an
10371 * non-interruptible wait.
10372 * This function will sleep while waiting for iocb completion.
10373 * So, this function should not be called from any context which
10374 * does not allow sleeping. Due to the same reason, this function
10375 * cannot be called with interrupt disabled.
10376 * This function assumes that the iocb completions occur while
10377 * this function sleep. So, this function cannot be called from
10378 * the thread which process iocb completion for this ring.
10379 * This function clears the iocb_flag of the iocb object before
10380 * issuing the iocb and the iocb completion handler sets this
10381 * flag and wakes this thread when the iocb completes.
10382 * The contents of the response iocb will be copied to prspiocbq
10383 * by the completion handler when the command completes.
10384 * This function returns IOCB_SUCCESS when success.
10385 * This function is called with no lock held.
10388 lpfc_sli_issue_iocb_wait(struct lpfc_hba
*phba
,
10389 uint32_t ring_number
,
10390 struct lpfc_iocbq
*piocb
,
10391 struct lpfc_iocbq
*prspiocbq
,
10394 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q
);
10395 long timeleft
, timeout_req
= 0;
10396 int retval
= IOCB_SUCCESS
;
10398 struct lpfc_iocbq
*iocb
;
10400 int txcmplq_cnt
= 0;
10401 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
10402 unsigned long iflags
;
10403 bool iocb_completed
= true;
10406 * If the caller has provided a response iocbq buffer, then context2
10407 * is NULL or its an error.
10410 if (piocb
->context2
)
10412 piocb
->context2
= prspiocbq
;
10415 piocb
->wait_iocb_cmpl
= piocb
->iocb_cmpl
;
10416 piocb
->iocb_cmpl
= lpfc_sli_wake_iocb_wait
;
10417 piocb
->context_un
.wait_queue
= &done_q
;
10418 piocb
->iocb_flag
&= ~(LPFC_IO_WAKE
| LPFC_IO_WAKE_TMO
);
10420 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
10421 if (lpfc_readl(phba
->HCregaddr
, &creg_val
))
10423 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
10424 writel(creg_val
, phba
->HCregaddr
);
10425 readl(phba
->HCregaddr
); /* flush */
10428 retval
= lpfc_sli_issue_iocb(phba
, ring_number
, piocb
,
10429 SLI_IOCB_RET_IOCB
);
10430 if (retval
== IOCB_SUCCESS
) {
10431 timeout_req
= msecs_to_jiffies(timeout
* 1000);
10432 timeleft
= wait_event_timeout(done_q
,
10433 lpfc_chk_iocb_flg(phba
, piocb
, LPFC_IO_WAKE
),
10435 spin_lock_irqsave(&phba
->hbalock
, iflags
);
10436 if (!(piocb
->iocb_flag
& LPFC_IO_WAKE
)) {
10439 * IOCB timed out. Inform the wake iocb wait
10440 * completion function and set local status
10443 iocb_completed
= false;
10444 piocb
->iocb_flag
|= LPFC_IO_WAKE_TMO
;
10446 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
10447 if (iocb_completed
) {
10448 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
10449 "0331 IOCB wake signaled\n");
10450 /* Note: we are not indicating if the IOCB has a success
10451 * status or not - that's for the caller to check.
10452 * IOCB_SUCCESS means just that the command was sent and
10453 * completed. Not that it completed successfully.
10455 } else if (timeleft
== 0) {
10456 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10457 "0338 IOCB wait timeout error - no "
10458 "wake response Data x%x\n", timeout
);
10459 retval
= IOCB_TIMEDOUT
;
10461 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10462 "0330 IOCB wake NOT set, "
10464 timeout
, (timeleft
/ jiffies
));
10465 retval
= IOCB_TIMEDOUT
;
10467 } else if (retval
== IOCB_BUSY
) {
10468 if (phba
->cfg_log_verbose
& LOG_SLI
) {
10469 list_for_each_entry(iocb
, &pring
->txq
, list
) {
10472 list_for_each_entry(iocb
, &pring
->txcmplq
, list
) {
10475 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
10476 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
10477 phba
->iocb_cnt
, txq_cnt
, txcmplq_cnt
);
10481 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
10482 "0332 IOCB wait issue failed, Data x%x\n",
10484 retval
= IOCB_ERROR
;
10487 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
10488 if (lpfc_readl(phba
->HCregaddr
, &creg_val
))
10490 creg_val
&= ~(HC_R0INT_ENA
<< LPFC_FCP_RING
);
10491 writel(creg_val
, phba
->HCregaddr
);
10492 readl(phba
->HCregaddr
); /* flush */
10496 piocb
->context2
= NULL
;
10498 piocb
->context_un
.wait_queue
= NULL
;
10499 piocb
->iocb_cmpl
= NULL
;
10504 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
10505 * @phba: Pointer to HBA context object.
10506 * @pmboxq: Pointer to driver mailbox object.
10507 * @timeout: Timeout in number of seconds.
10509 * This function issues the mailbox to firmware and waits for the
10510 * mailbox command to complete. If the mailbox command is not
10511 * completed within timeout seconds, it returns MBX_TIMEOUT.
10512 * The function waits for the mailbox completion using an
10513 * interruptible wait. If the thread is woken up due to a
10514 * signal, MBX_TIMEOUT error is returned to the caller. Caller
10515 * should not free the mailbox resources, if this function returns
10517 * This function will sleep while waiting for mailbox completion.
10518 * So, this function should not be called from any context which
10519 * does not allow sleeping. Due to the same reason, this function
10520 * cannot be called with interrupt disabled.
10521 * This function assumes that the mailbox completion occurs while
10522 * this function sleep. So, this function cannot be called from
10523 * the worker thread which processes mailbox completion.
10524 * This function is called in the context of HBA management
10526 * This function returns MBX_SUCCESS when successful.
10527 * This function is called with no lock held.
10530 lpfc_sli_issue_mbox_wait(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
,
10533 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q
);
10534 MAILBOX_t
*mb
= NULL
;
10536 unsigned long flag
;
10538 /* The caller might set context1 for extended buffer */
10539 if (pmboxq
->context1
)
10540 mb
= (MAILBOX_t
*)pmboxq
->context1
;
10542 pmboxq
->mbox_flag
&= ~LPFC_MBX_WAKE
;
10543 /* setup wake call as IOCB callback */
10544 pmboxq
->mbox_cmpl
= lpfc_sli_wake_mbox_wait
;
10545 /* setup context field to pass wait_queue pointer to wake function */
10546 pmboxq
->context1
= &done_q
;
10548 /* now issue the command */
10549 retval
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
10550 if (retval
== MBX_BUSY
|| retval
== MBX_SUCCESS
) {
10551 wait_event_interruptible_timeout(done_q
,
10552 pmboxq
->mbox_flag
& LPFC_MBX_WAKE
,
10553 msecs_to_jiffies(timeout
* 1000));
10555 spin_lock_irqsave(&phba
->hbalock
, flag
);
10556 /* restore the possible extended buffer for free resource */
10557 pmboxq
->context1
= (uint8_t *)mb
;
10559 * if LPFC_MBX_WAKE flag is set the mailbox is completed
10560 * else do not free the resources.
10562 if (pmboxq
->mbox_flag
& LPFC_MBX_WAKE
) {
10563 retval
= MBX_SUCCESS
;
10565 retval
= MBX_TIMEOUT
;
10566 pmboxq
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
10568 spin_unlock_irqrestore(&phba
->hbalock
, flag
);
10570 /* restore the possible extended buffer for free resource */
10571 pmboxq
->context1
= (uint8_t *)mb
;
10578 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
10579 * @phba: Pointer to HBA context.
10581 * This function is called to shutdown the driver's mailbox sub-system.
10582 * It first marks the mailbox sub-system is in a block state to prevent
10583 * the asynchronous mailbox command from issued off the pending mailbox
10584 * command queue. If the mailbox command sub-system shutdown is due to
10585 * HBA error conditions such as EEH or ERATT, this routine shall invoke
10586 * the mailbox sub-system flush routine to forcefully bring down the
10587 * mailbox sub-system. Otherwise, if it is due to normal condition (such
10588 * as with offline or HBA function reset), this routine will wait for the
10589 * outstanding mailbox command to complete before invoking the mailbox
10590 * sub-system flush routine to gracefully bring down mailbox sub-system.
10593 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba
*phba
, int mbx_action
)
10595 struct lpfc_sli
*psli
= &phba
->sli
;
10596 unsigned long timeout
;
10598 if (mbx_action
== LPFC_MBX_NO_WAIT
) {
10599 /* delay 100ms for port state */
10601 lpfc_sli_mbox_sys_flush(phba
);
10604 timeout
= msecs_to_jiffies(LPFC_MBOX_TMO
* 1000) + jiffies
;
10606 spin_lock_irq(&phba
->hbalock
);
10607 psli
->sli_flag
|= LPFC_SLI_ASYNC_MBX_BLK
;
10609 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
10610 /* Determine how long we might wait for the active mailbox
10611 * command to be gracefully completed by firmware.
10613 if (phba
->sli
.mbox_active
)
10614 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
,
10615 phba
->sli
.mbox_active
) *
10617 spin_unlock_irq(&phba
->hbalock
);
10619 while (phba
->sli
.mbox_active
) {
10620 /* Check active mailbox complete status every 2ms */
10622 if (time_after(jiffies
, timeout
))
10623 /* Timeout, let the mailbox flush routine to
10624 * forcefully release active mailbox command
10629 spin_unlock_irq(&phba
->hbalock
);
10631 lpfc_sli_mbox_sys_flush(phba
);
10635 * lpfc_sli_eratt_read - read sli-3 error attention events
10636 * @phba: Pointer to HBA context.
10638 * This function is called to read the SLI3 device error attention registers
10639 * for possible error attention events. The caller must hold the hostlock
10640 * with spin_lock_irq().
10642 * This function returns 1 when there is Error Attention in the Host Attention
10643 * Register and returns 0 otherwise.
10646 lpfc_sli_eratt_read(struct lpfc_hba
*phba
)
10650 /* Read chip Host Attention (HA) register */
10651 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
10654 if (ha_copy
& HA_ERATT
) {
10655 /* Read host status register to retrieve error event */
10656 if (lpfc_sli_read_hs(phba
))
10659 /* Check if there is a deferred error condition is active */
10660 if ((HS_FFER1
& phba
->work_hs
) &&
10661 ((HS_FFER2
| HS_FFER3
| HS_FFER4
| HS_FFER5
|
10662 HS_FFER6
| HS_FFER7
| HS_FFER8
) & phba
->work_hs
)) {
10663 phba
->hba_flag
|= DEFER_ERATT
;
10664 /* Clear all interrupt enable conditions */
10665 writel(0, phba
->HCregaddr
);
10666 readl(phba
->HCregaddr
);
10669 /* Set the driver HA work bitmap */
10670 phba
->work_ha
|= HA_ERATT
;
10671 /* Indicate polling handles this ERATT */
10672 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
10678 /* Set the driver HS work bitmap */
10679 phba
->work_hs
|= UNPLUG_ERR
;
10680 /* Set the driver HA work bitmap */
10681 phba
->work_ha
|= HA_ERATT
;
10682 /* Indicate polling handles this ERATT */
10683 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
10688 * lpfc_sli4_eratt_read - read sli-4 error attention events
10689 * @phba: Pointer to HBA context.
10691 * This function is called to read the SLI4 device error attention registers
10692 * for possible error attention events. The caller must hold the hostlock
10693 * with spin_lock_irq().
10695 * This function returns 1 when there is Error Attention in the Host Attention
10696 * Register and returns 0 otherwise.
10699 lpfc_sli4_eratt_read(struct lpfc_hba
*phba
)
10701 uint32_t uerr_sta_hi
, uerr_sta_lo
;
10702 uint32_t if_type
, portsmphr
;
10703 struct lpfc_register portstat_reg
;
10706 * For now, use the SLI4 device internal unrecoverable error
10707 * registers for error attention. This can be changed later.
10709 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
10711 case LPFC_SLI_INTF_IF_TYPE_0
:
10712 if (lpfc_readl(phba
->sli4_hba
.u
.if_type0
.UERRLOregaddr
,
10714 lpfc_readl(phba
->sli4_hba
.u
.if_type0
.UERRHIregaddr
,
10716 phba
->work_hs
|= UNPLUG_ERR
;
10717 phba
->work_ha
|= HA_ERATT
;
10718 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
10721 if ((~phba
->sli4_hba
.ue_mask_lo
& uerr_sta_lo
) ||
10722 (~phba
->sli4_hba
.ue_mask_hi
& uerr_sta_hi
)) {
10723 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10724 "1423 HBA Unrecoverable error: "
10725 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
10726 "ue_mask_lo_reg=0x%x, "
10727 "ue_mask_hi_reg=0x%x\n",
10728 uerr_sta_lo
, uerr_sta_hi
,
10729 phba
->sli4_hba
.ue_mask_lo
,
10730 phba
->sli4_hba
.ue_mask_hi
);
10731 phba
->work_status
[0] = uerr_sta_lo
;
10732 phba
->work_status
[1] = uerr_sta_hi
;
10733 phba
->work_ha
|= HA_ERATT
;
10734 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
10738 case LPFC_SLI_INTF_IF_TYPE_2
:
10739 if (lpfc_readl(phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
,
10740 &portstat_reg
.word0
) ||
10741 lpfc_readl(phba
->sli4_hba
.PSMPHRregaddr
,
10743 phba
->work_hs
|= UNPLUG_ERR
;
10744 phba
->work_ha
|= HA_ERATT
;
10745 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
10748 if (bf_get(lpfc_sliport_status_err
, &portstat_reg
)) {
10749 phba
->work_status
[0] =
10750 readl(phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
);
10751 phba
->work_status
[1] =
10752 readl(phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
);
10753 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10754 "2885 Port Status Event: "
10755 "port status reg 0x%x, "
10756 "port smphr reg 0x%x, "
10757 "error 1=0x%x, error 2=0x%x\n",
10758 portstat_reg
.word0
,
10760 phba
->work_status
[0],
10761 phba
->work_status
[1]);
10762 phba
->work_ha
|= HA_ERATT
;
10763 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
10767 case LPFC_SLI_INTF_IF_TYPE_1
:
10769 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10770 "2886 HBA Error Attention on unsupported "
10771 "if type %d.", if_type
);
10779 * lpfc_sli_check_eratt - check error attention events
10780 * @phba: Pointer to HBA context.
10782 * This function is called from timer soft interrupt context to check HBA's
10783 * error attention register bit for error attention events.
10785 * This function returns 1 when there is Error Attention in the Host Attention
10786 * Register and returns 0 otherwise.
10789 lpfc_sli_check_eratt(struct lpfc_hba
*phba
)
10793 /* If somebody is waiting to handle an eratt, don't process it
10794 * here. The brdkill function will do this.
10796 if (phba
->link_flag
& LS_IGNORE_ERATT
)
10799 /* Check if interrupt handler handles this ERATT */
10800 spin_lock_irq(&phba
->hbalock
);
10801 if (phba
->hba_flag
& HBA_ERATT_HANDLED
) {
10802 /* Interrupt handler has handled ERATT */
10803 spin_unlock_irq(&phba
->hbalock
);
10808 * If there is deferred error attention, do not check for error
10811 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
10812 spin_unlock_irq(&phba
->hbalock
);
10816 /* If PCI channel is offline, don't process it */
10817 if (unlikely(pci_channel_offline(phba
->pcidev
))) {
10818 spin_unlock_irq(&phba
->hbalock
);
10822 switch (phba
->sli_rev
) {
10823 case LPFC_SLI_REV2
:
10824 case LPFC_SLI_REV3
:
10825 /* Read chip Host Attention (HA) register */
10826 ha_copy
= lpfc_sli_eratt_read(phba
);
10828 case LPFC_SLI_REV4
:
10829 /* Read device Uncoverable Error (UERR) registers */
10830 ha_copy
= lpfc_sli4_eratt_read(phba
);
10833 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10834 "0299 Invalid SLI revision (%d)\n",
10839 spin_unlock_irq(&phba
->hbalock
);
10845 * lpfc_intr_state_check - Check device state for interrupt handling
10846 * @phba: Pointer to HBA context.
10848 * This inline routine checks whether a device or its PCI slot is in a state
10849 * that the interrupt should be handled.
10851 * This function returns 0 if the device or the PCI slot is in a state that
10852 * interrupt should be handled, otherwise -EIO.
10855 lpfc_intr_state_check(struct lpfc_hba
*phba
)
10857 /* If the pci channel is offline, ignore all the interrupts */
10858 if (unlikely(pci_channel_offline(phba
->pcidev
)))
10861 /* Update device level interrupt statistics */
10862 phba
->sli
.slistat
.sli_intr
++;
10864 /* Ignore all interrupts during initialization. */
10865 if (unlikely(phba
->link_state
< LPFC_LINK_DOWN
))
10872 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
10873 * @irq: Interrupt number.
10874 * @dev_id: The device context pointer.
10876 * This function is directly called from the PCI layer as an interrupt
10877 * service routine when device with SLI-3 interface spec is enabled with
10878 * MSI-X multi-message interrupt mode and there are slow-path events in
10879 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
10880 * interrupt mode, this function is called as part of the device-level
10881 * interrupt handler. When the PCI slot is in error recovery or the HBA
10882 * is undergoing initialization, the interrupt handler will not process
10883 * the interrupt. The link attention and ELS ring attention events are
10884 * handled by the worker thread. The interrupt handler signals the worker
10885 * thread and returns for these events. This function is called without
10886 * any lock held. It gets the hbalock to access and update SLI data
10889 * This function returns IRQ_HANDLED when interrupt is handled else it
10890 * returns IRQ_NONE.
10893 lpfc_sli_sp_intr_handler(int irq
, void *dev_id
)
10895 struct lpfc_hba
*phba
;
10896 uint32_t ha_copy
, hc_copy
;
10897 uint32_t work_ha_copy
;
10898 unsigned long status
;
10899 unsigned long iflag
;
10902 MAILBOX_t
*mbox
, *pmbox
;
10903 struct lpfc_vport
*vport
;
10904 struct lpfc_nodelist
*ndlp
;
10905 struct lpfc_dmabuf
*mp
;
10910 * Get the driver's phba structure from the dev_id and
10911 * assume the HBA is not interrupting.
10913 phba
= (struct lpfc_hba
*)dev_id
;
10915 if (unlikely(!phba
))
10919 * Stuff needs to be attented to when this function is invoked as an
10920 * individual interrupt handler in MSI-X multi-message interrupt mode
10922 if (phba
->intr_type
== MSIX
) {
10923 /* Check device state for handling interrupt */
10924 if (lpfc_intr_state_check(phba
))
10926 /* Need to read HA REG for slow-path events */
10927 spin_lock_irqsave(&phba
->hbalock
, iflag
);
10928 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
10930 /* If somebody is waiting to handle an eratt don't process it
10931 * here. The brdkill function will do this.
10933 if (phba
->link_flag
& LS_IGNORE_ERATT
)
10934 ha_copy
&= ~HA_ERATT
;
10935 /* Check the need for handling ERATT in interrupt handler */
10936 if (ha_copy
& HA_ERATT
) {
10937 if (phba
->hba_flag
& HBA_ERATT_HANDLED
)
10938 /* ERATT polling has handled ERATT */
10939 ha_copy
&= ~HA_ERATT
;
10941 /* Indicate interrupt handler handles ERATT */
10942 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
10946 * If there is deferred error attention, do not check for any
10949 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
10950 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
10954 /* Clear up only attention source related to slow-path */
10955 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
))
10958 writel(hc_copy
& ~(HC_MBINT_ENA
| HC_R2INT_ENA
|
10959 HC_LAINT_ENA
| HC_ERINT_ENA
),
10961 writel((ha_copy
& (HA_MBATT
| HA_R2_CLR_MSK
)),
10963 writel(hc_copy
, phba
->HCregaddr
);
10964 readl(phba
->HAregaddr
); /* flush */
10965 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
10967 ha_copy
= phba
->ha_copy
;
10969 work_ha_copy
= ha_copy
& phba
->work_ha_mask
;
10971 if (work_ha_copy
) {
10972 if (work_ha_copy
& HA_LATT
) {
10973 if (phba
->sli
.sli_flag
& LPFC_PROCESS_LA
) {
10975 * Turn off Link Attention interrupts
10976 * until CLEAR_LA done
10978 spin_lock_irqsave(&phba
->hbalock
, iflag
);
10979 phba
->sli
.sli_flag
&= ~LPFC_PROCESS_LA
;
10980 if (lpfc_readl(phba
->HCregaddr
, &control
))
10982 control
&= ~HC_LAINT_ENA
;
10983 writel(control
, phba
->HCregaddr
);
10984 readl(phba
->HCregaddr
); /* flush */
10985 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
10988 work_ha_copy
&= ~HA_LATT
;
10991 if (work_ha_copy
& ~(HA_ERATT
| HA_MBATT
| HA_LATT
)) {
10993 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
10994 * the only slow ring.
10996 status
= (work_ha_copy
&
10997 (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
10998 status
>>= (4*LPFC_ELS_RING
);
10999 if (status
& HA_RXMASK
) {
11000 spin_lock_irqsave(&phba
->hbalock
, iflag
);
11001 if (lpfc_readl(phba
->HCregaddr
, &control
))
11004 lpfc_debugfs_slow_ring_trc(phba
,
11005 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
11007 (uint32_t)phba
->sli
.slistat
.sli_intr
);
11009 if (control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
)) {
11010 lpfc_debugfs_slow_ring_trc(phba
,
11011 "ISR Disable ring:"
11012 "pwork:x%x hawork:x%x wait:x%x",
11013 phba
->work_ha
, work_ha_copy
,
11014 (uint32_t)((unsigned long)
11015 &phba
->work_waitq
));
11018 ~(HC_R0INT_ENA
<< LPFC_ELS_RING
);
11019 writel(control
, phba
->HCregaddr
);
11020 readl(phba
->HCregaddr
); /* flush */
11023 lpfc_debugfs_slow_ring_trc(phba
,
11024 "ISR slow ring: pwork:"
11025 "x%x hawork:x%x wait:x%x",
11026 phba
->work_ha
, work_ha_copy
,
11027 (uint32_t)((unsigned long)
11028 &phba
->work_waitq
));
11030 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11033 spin_lock_irqsave(&phba
->hbalock
, iflag
);
11034 if (work_ha_copy
& HA_ERATT
) {
11035 if (lpfc_sli_read_hs(phba
))
11038 * Check if there is a deferred error condition
11041 if ((HS_FFER1
& phba
->work_hs
) &&
11042 ((HS_FFER2
| HS_FFER3
| HS_FFER4
| HS_FFER5
|
11043 HS_FFER6
| HS_FFER7
| HS_FFER8
) &
11045 phba
->hba_flag
|= DEFER_ERATT
;
11046 /* Clear all interrupt enable conditions */
11047 writel(0, phba
->HCregaddr
);
11048 readl(phba
->HCregaddr
);
11052 if ((work_ha_copy
& HA_MBATT
) && (phba
->sli
.mbox_active
)) {
11053 pmb
= phba
->sli
.mbox_active
;
11054 pmbox
= &pmb
->u
.mb
;
11056 vport
= pmb
->vport
;
11058 /* First check out the status word */
11059 lpfc_sli_pcimem_bcopy(mbox
, pmbox
, sizeof(uint32_t));
11060 if (pmbox
->mbxOwner
!= OWN_HOST
) {
11061 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11063 * Stray Mailbox Interrupt, mbxCommand <cmd>
11064 * mbxStatus <status>
11066 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
|
11068 "(%d):0304 Stray Mailbox "
11069 "Interrupt mbxCommand x%x "
11071 (vport
? vport
->vpi
: 0),
11074 /* clear mailbox attention bit */
11075 work_ha_copy
&= ~HA_MBATT
;
11077 phba
->sli
.mbox_active
= NULL
;
11078 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11079 phba
->last_completion_time
= jiffies
;
11080 del_timer(&phba
->sli
.mbox_tmo
);
11081 if (pmb
->mbox_cmpl
) {
11082 lpfc_sli_pcimem_bcopy(mbox
, pmbox
,
11084 if (pmb
->out_ext_byte_len
&&
11086 lpfc_sli_pcimem_bcopy(
11089 pmb
->out_ext_byte_len
);
11091 if (pmb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) {
11092 pmb
->mbox_flag
&= ~LPFC_MBX_IMED_UNREG
;
11094 lpfc_debugfs_disc_trc(vport
,
11095 LPFC_DISC_TRC_MBOX_VPORT
,
11096 "MBOX dflt rpi: : "
11097 "status:x%x rpi:x%x",
11098 (uint32_t)pmbox
->mbxStatus
,
11099 pmbox
->un
.varWords
[0], 0);
11101 if (!pmbox
->mbxStatus
) {
11102 mp
= (struct lpfc_dmabuf
*)
11104 ndlp
= (struct lpfc_nodelist
*)
11107 /* Reg_LOGIN of dflt RPI was
11108 * successful. new lets get
11109 * rid of the RPI using the
11110 * same mbox buffer.
11112 lpfc_unreg_login(phba
,
11114 pmbox
->un
.varWords
[0],
11117 lpfc_mbx_cmpl_dflt_rpi
;
11118 pmb
->context1
= mp
;
11119 pmb
->context2
= ndlp
;
11120 pmb
->vport
= vport
;
11121 rc
= lpfc_sli_issue_mbox(phba
,
11124 if (rc
!= MBX_BUSY
)
11125 lpfc_printf_log(phba
,
11127 LOG_MBOX
| LOG_SLI
,
11128 "0350 rc should have"
11129 "been MBX_BUSY\n");
11130 if (rc
!= MBX_NOT_FINISHED
)
11131 goto send_current_mbox
;
11135 &phba
->pport
->work_port_lock
,
11137 phba
->pport
->work_port_events
&=
11139 spin_unlock_irqrestore(
11140 &phba
->pport
->work_port_lock
,
11142 lpfc_mbox_cmpl_put(phba
, pmb
);
11145 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11147 if ((work_ha_copy
& HA_MBATT
) &&
11148 (phba
->sli
.mbox_active
== NULL
)) {
11150 /* Process next mailbox command if there is one */
11152 rc
= lpfc_sli_issue_mbox(phba
, NULL
,
11154 } while (rc
== MBX_NOT_FINISHED
);
11155 if (rc
!= MBX_SUCCESS
)
11156 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
|
11157 LOG_SLI
, "0349 rc should be "
11161 spin_lock_irqsave(&phba
->hbalock
, iflag
);
11162 phba
->work_ha
|= work_ha_copy
;
11163 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11164 lpfc_worker_wake_up(phba
);
11166 return IRQ_HANDLED
;
11168 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11169 return IRQ_HANDLED
;
11171 } /* lpfc_sli_sp_intr_handler */
11174 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
11175 * @irq: Interrupt number.
11176 * @dev_id: The device context pointer.
11178 * This function is directly called from the PCI layer as an interrupt
11179 * service routine when device with SLI-3 interface spec is enabled with
11180 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
11181 * ring event in the HBA. However, when the device is enabled with either
11182 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
11183 * device-level interrupt handler. When the PCI slot is in error recovery
11184 * or the HBA is undergoing initialization, the interrupt handler will not
11185 * process the interrupt. The SCSI FCP fast-path ring event are handled in
11186 * the intrrupt context. This function is called without any lock held.
11187 * It gets the hbalock to access and update SLI data structures.
11189 * This function returns IRQ_HANDLED when interrupt is handled else it
11190 * returns IRQ_NONE.
11193 lpfc_sli_fp_intr_handler(int irq
, void *dev_id
)
11195 struct lpfc_hba
*phba
;
11197 unsigned long status
;
11198 unsigned long iflag
;
11200 /* Get the driver's phba structure from the dev_id and
11201 * assume the HBA is not interrupting.
11203 phba
= (struct lpfc_hba
*) dev_id
;
11205 if (unlikely(!phba
))
11209 * Stuff needs to be attented to when this function is invoked as an
11210 * individual interrupt handler in MSI-X multi-message interrupt mode
11212 if (phba
->intr_type
== MSIX
) {
11213 /* Check device state for handling interrupt */
11214 if (lpfc_intr_state_check(phba
))
11216 /* Need to read HA REG for FCP ring and other ring events */
11217 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
11218 return IRQ_HANDLED
;
11219 /* Clear up only attention source related to fast-path */
11220 spin_lock_irqsave(&phba
->hbalock
, iflag
);
11222 * If there is deferred error attention, do not check for
11225 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
11226 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11229 writel((ha_copy
& (HA_R0_CLR_MSK
| HA_R1_CLR_MSK
)),
11231 readl(phba
->HAregaddr
); /* flush */
11232 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11234 ha_copy
= phba
->ha_copy
;
11237 * Process all events on FCP ring. Take the optimized path for FCP IO.
11239 ha_copy
&= ~(phba
->work_ha_mask
);
11241 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_FCP_RING
)));
11242 status
>>= (4*LPFC_FCP_RING
);
11243 if (status
& HA_RXMASK
)
11244 lpfc_sli_handle_fast_ring_event(phba
,
11245 &phba
->sli
.ring
[LPFC_FCP_RING
],
11248 if (phba
->cfg_multi_ring_support
== 2) {
11250 * Process all events on extra ring. Take the optimized path
11251 * for extra ring IO.
11253 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_EXTRA_RING
)));
11254 status
>>= (4*LPFC_EXTRA_RING
);
11255 if (status
& HA_RXMASK
) {
11256 lpfc_sli_handle_fast_ring_event(phba
,
11257 &phba
->sli
.ring
[LPFC_EXTRA_RING
],
11261 return IRQ_HANDLED
;
11262 } /* lpfc_sli_fp_intr_handler */
11265 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
11266 * @irq: Interrupt number.
11267 * @dev_id: The device context pointer.
11269 * This function is the HBA device-level interrupt handler to device with
11270 * SLI-3 interface spec, called from the PCI layer when either MSI or
11271 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
11272 * requires driver attention. This function invokes the slow-path interrupt
11273 * attention handling function and fast-path interrupt attention handling
11274 * function in turn to process the relevant HBA attention events. This
11275 * function is called without any lock held. It gets the hbalock to access
11276 * and update SLI data structures.
11278 * This function returns IRQ_HANDLED when interrupt is handled, else it
11279 * returns IRQ_NONE.
11282 lpfc_sli_intr_handler(int irq
, void *dev_id
)
11284 struct lpfc_hba
*phba
;
11285 irqreturn_t sp_irq_rc
, fp_irq_rc
;
11286 unsigned long status1
, status2
;
11290 * Get the driver's phba structure from the dev_id and
11291 * assume the HBA is not interrupting.
11293 phba
= (struct lpfc_hba
*) dev_id
;
11295 if (unlikely(!phba
))
11298 /* Check device state for handling interrupt */
11299 if (lpfc_intr_state_check(phba
))
11302 spin_lock(&phba
->hbalock
);
11303 if (lpfc_readl(phba
->HAregaddr
, &phba
->ha_copy
)) {
11304 spin_unlock(&phba
->hbalock
);
11305 return IRQ_HANDLED
;
11308 if (unlikely(!phba
->ha_copy
)) {
11309 spin_unlock(&phba
->hbalock
);
11311 } else if (phba
->ha_copy
& HA_ERATT
) {
11312 if (phba
->hba_flag
& HBA_ERATT_HANDLED
)
11313 /* ERATT polling has handled ERATT */
11314 phba
->ha_copy
&= ~HA_ERATT
;
11316 /* Indicate interrupt handler handles ERATT */
11317 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
11321 * If there is deferred error attention, do not check for any interrupt.
11323 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
11324 spin_unlock(&phba
->hbalock
);
11328 /* Clear attention sources except link and error attentions */
11329 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
)) {
11330 spin_unlock(&phba
->hbalock
);
11331 return IRQ_HANDLED
;
11333 writel(hc_copy
& ~(HC_MBINT_ENA
| HC_R0INT_ENA
| HC_R1INT_ENA
11334 | HC_R2INT_ENA
| HC_LAINT_ENA
| HC_ERINT_ENA
),
11336 writel((phba
->ha_copy
& ~(HA_LATT
| HA_ERATT
)), phba
->HAregaddr
);
11337 writel(hc_copy
, phba
->HCregaddr
);
11338 readl(phba
->HAregaddr
); /* flush */
11339 spin_unlock(&phba
->hbalock
);
11342 * Invokes slow-path host attention interrupt handling as appropriate.
11345 /* status of events with mailbox and link attention */
11346 status1
= phba
->ha_copy
& (HA_MBATT
| HA_LATT
| HA_ERATT
);
11348 /* status of events with ELS ring */
11349 status2
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
11350 status2
>>= (4*LPFC_ELS_RING
);
11352 if (status1
|| (status2
& HA_RXMASK
))
11353 sp_irq_rc
= lpfc_sli_sp_intr_handler(irq
, dev_id
);
11355 sp_irq_rc
= IRQ_NONE
;
11358 * Invoke fast-path host attention interrupt handling as appropriate.
11361 /* status of events with FCP ring */
11362 status1
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_FCP_RING
)));
11363 status1
>>= (4*LPFC_FCP_RING
);
11365 /* status of events with extra ring */
11366 if (phba
->cfg_multi_ring_support
== 2) {
11367 status2
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_EXTRA_RING
)));
11368 status2
>>= (4*LPFC_EXTRA_RING
);
11372 if ((status1
& HA_RXMASK
) || (status2
& HA_RXMASK
))
11373 fp_irq_rc
= lpfc_sli_fp_intr_handler(irq
, dev_id
);
11375 fp_irq_rc
= IRQ_NONE
;
11377 /* Return device-level interrupt handling status */
11378 return (sp_irq_rc
== IRQ_HANDLED
) ? sp_irq_rc
: fp_irq_rc
;
11379 } /* lpfc_sli_intr_handler */
11382 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
11383 * @phba: pointer to lpfc hba data structure.
11385 * This routine is invoked by the worker thread to process all the pending
11386 * SLI4 FCP abort XRI events.
11388 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba
*phba
)
11390 struct lpfc_cq_event
*cq_event
;
11392 /* First, declare the fcp xri abort event has been handled */
11393 spin_lock_irq(&phba
->hbalock
);
11394 phba
->hba_flag
&= ~FCP_XRI_ABORT_EVENT
;
11395 spin_unlock_irq(&phba
->hbalock
);
11396 /* Now, handle all the fcp xri abort events */
11397 while (!list_empty(&phba
->sli4_hba
.sp_fcp_xri_aborted_work_queue
)) {
11398 /* Get the first event from the head of the event queue */
11399 spin_lock_irq(&phba
->hbalock
);
11400 list_remove_head(&phba
->sli4_hba
.sp_fcp_xri_aborted_work_queue
,
11401 cq_event
, struct lpfc_cq_event
, list
);
11402 spin_unlock_irq(&phba
->hbalock
);
11403 /* Notify aborted XRI for FCP work queue */
11404 lpfc_sli4_fcp_xri_aborted(phba
, &cq_event
->cqe
.wcqe_axri
);
11405 /* Free the event processed back to the free pool */
11406 lpfc_sli4_cq_event_release(phba
, cq_event
);
11411 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
11412 * @phba: pointer to lpfc hba data structure.
11414 * This routine is invoked by the worker thread to process all the pending
11415 * SLI4 els abort xri events.
11417 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba
*phba
)
11419 struct lpfc_cq_event
*cq_event
;
11421 /* First, declare the els xri abort event has been handled */
11422 spin_lock_irq(&phba
->hbalock
);
11423 phba
->hba_flag
&= ~ELS_XRI_ABORT_EVENT
;
11424 spin_unlock_irq(&phba
->hbalock
);
11425 /* Now, handle all the els xri abort events */
11426 while (!list_empty(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
)) {
11427 /* Get the first event from the head of the event queue */
11428 spin_lock_irq(&phba
->hbalock
);
11429 list_remove_head(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
,
11430 cq_event
, struct lpfc_cq_event
, list
);
11431 spin_unlock_irq(&phba
->hbalock
);
11432 /* Notify aborted XRI for ELS work queue */
11433 lpfc_sli4_els_xri_aborted(phba
, &cq_event
->cqe
.wcqe_axri
);
11434 /* Free the event processed back to the free pool */
11435 lpfc_sli4_cq_event_release(phba
, cq_event
);
11440 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
11441 * @phba: pointer to lpfc hba data structure
11442 * @pIocbIn: pointer to the rspiocbq
11443 * @pIocbOut: pointer to the cmdiocbq
11444 * @wcqe: pointer to the complete wcqe
11446 * This routine transfers the fields of a command iocbq to a response iocbq
11447 * by copying all the IOCB fields from command iocbq and transferring the
11448 * completion status information from the complete wcqe.
11451 lpfc_sli4_iocb_param_transfer(struct lpfc_hba
*phba
,
11452 struct lpfc_iocbq
*pIocbIn
,
11453 struct lpfc_iocbq
*pIocbOut
,
11454 struct lpfc_wcqe_complete
*wcqe
)
11457 unsigned long iflags
;
11458 uint32_t status
, max_response
;
11459 struct lpfc_dmabuf
*dmabuf
;
11460 struct ulp_bde64
*bpl
, bde
;
11461 size_t offset
= offsetof(struct lpfc_iocbq
, iocb
);
11463 memcpy((char *)pIocbIn
+ offset
, (char *)pIocbOut
+ offset
,
11464 sizeof(struct lpfc_iocbq
) - offset
);
11465 /* Map WCQE parameters into irspiocb parameters */
11466 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
11467 pIocbIn
->iocb
.ulpStatus
= (status
& LPFC_IOCB_STATUS_MASK
);
11468 if (pIocbOut
->iocb_flag
& LPFC_IO_FCP
)
11469 if (pIocbIn
->iocb
.ulpStatus
== IOSTAT_FCP_RSP_ERROR
)
11470 pIocbIn
->iocb
.un
.fcpi
.fcpi_parm
=
11471 pIocbOut
->iocb
.un
.fcpi
.fcpi_parm
-
11472 wcqe
->total_data_placed
;
11474 pIocbIn
->iocb
.un
.ulpWord
[4] = wcqe
->parameter
;
11476 pIocbIn
->iocb
.un
.ulpWord
[4] = wcqe
->parameter
;
11477 switch (pIocbOut
->iocb
.ulpCommand
) {
11478 case CMD_ELS_REQUEST64_CR
:
11479 dmabuf
= (struct lpfc_dmabuf
*)pIocbOut
->context3
;
11480 bpl
= (struct ulp_bde64
*)dmabuf
->virt
;
11481 bde
.tus
.w
= le32_to_cpu(bpl
[1].tus
.w
);
11482 max_response
= bde
.tus
.f
.bdeSize
;
11484 case CMD_GEN_REQUEST64_CR
:
11486 if (!pIocbOut
->context3
)
11488 numBdes
= pIocbOut
->iocb
.un
.genreq64
.bdl
.bdeSize
/
11489 sizeof(struct ulp_bde64
);
11490 dmabuf
= (struct lpfc_dmabuf
*)pIocbOut
->context3
;
11491 bpl
= (struct ulp_bde64
*)dmabuf
->virt
;
11492 for (i
= 0; i
< numBdes
; i
++) {
11493 bde
.tus
.w
= le32_to_cpu(bpl
[i
].tus
.w
);
11494 if (bde
.tus
.f
.bdeFlags
!= BUFF_TYPE_BDE_64
)
11495 max_response
+= bde
.tus
.f
.bdeSize
;
11499 max_response
= wcqe
->total_data_placed
;
11502 if (max_response
< wcqe
->total_data_placed
)
11503 pIocbIn
->iocb
.un
.genreq64
.bdl
.bdeSize
= max_response
;
11505 pIocbIn
->iocb
.un
.genreq64
.bdl
.bdeSize
=
11506 wcqe
->total_data_placed
;
11509 /* Convert BG errors for completion status */
11510 if (status
== CQE_STATUS_DI_ERROR
) {
11511 pIocbIn
->iocb
.ulpStatus
= IOSTAT_LOCAL_REJECT
;
11513 if (bf_get(lpfc_wcqe_c_bg_edir
, wcqe
))
11514 pIocbIn
->iocb
.un
.ulpWord
[4] = IOERR_RX_DMA_FAILED
;
11516 pIocbIn
->iocb
.un
.ulpWord
[4] = IOERR_TX_DMA_FAILED
;
11518 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
= 0;
11519 if (bf_get(lpfc_wcqe_c_bg_ge
, wcqe
)) /* Guard Check failed */
11520 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
11521 BGS_GUARD_ERR_MASK
;
11522 if (bf_get(lpfc_wcqe_c_bg_ae
, wcqe
)) /* App Tag Check failed */
11523 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
11524 BGS_APPTAG_ERR_MASK
;
11525 if (bf_get(lpfc_wcqe_c_bg_re
, wcqe
)) /* Ref Tag Check failed */
11526 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
11527 BGS_REFTAG_ERR_MASK
;
11529 /* Check to see if there was any good data before the error */
11530 if (bf_get(lpfc_wcqe_c_bg_tdpv
, wcqe
)) {
11531 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
11532 BGS_HI_WATER_MARK_PRESENT_MASK
;
11533 pIocbIn
->iocb
.unsli3
.sli3_bg
.bghm
=
11534 wcqe
->total_data_placed
;
11538 * Set ALL the error bits to indicate we don't know what
11539 * type of error it is.
11541 if (!pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
)
11542 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
11543 (BGS_REFTAG_ERR_MASK
| BGS_APPTAG_ERR_MASK
|
11544 BGS_GUARD_ERR_MASK
);
11547 /* Pick up HBA exchange busy condition */
11548 if (bf_get(lpfc_wcqe_c_xb
, wcqe
)) {
11549 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11550 pIocbIn
->iocb_flag
|= LPFC_EXCHANGE_BUSY
;
11551 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11556 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
11557 * @phba: Pointer to HBA context object.
11558 * @wcqe: Pointer to work-queue completion queue entry.
11560 * This routine handles an ELS work-queue completion event and construct
11561 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
11562 * discovery engine to handle.
11564 * Return: Pointer to the receive IOCBQ, NULL otherwise.
11566 static struct lpfc_iocbq
*
11567 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba
*phba
,
11568 struct lpfc_iocbq
*irspiocbq
)
11570 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
11571 struct lpfc_iocbq
*cmdiocbq
;
11572 struct lpfc_wcqe_complete
*wcqe
;
11573 unsigned long iflags
;
11575 wcqe
= &irspiocbq
->cq_event
.cqe
.wcqe_cmpl
;
11576 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
11577 pring
->stats
.iocb_event
++;
11578 /* Look up the ELS command IOCB and create pseudo response IOCB */
11579 cmdiocbq
= lpfc_sli_iocbq_lookup_by_tag(phba
, pring
,
11580 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
11581 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
11583 if (unlikely(!cmdiocbq
)) {
11584 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
11585 "0386 ELS complete with no corresponding "
11586 "cmdiocb: iotag (%d)\n",
11587 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
11588 lpfc_sli_release_iocbq(phba
, irspiocbq
);
11592 /* Fake the irspiocbq and copy necessary response information */
11593 lpfc_sli4_iocb_param_transfer(phba
, irspiocbq
, cmdiocbq
, wcqe
);
11599 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
11600 * @phba: Pointer to HBA context object.
11601 * @cqe: Pointer to mailbox completion queue entry.
11603 * This routine process a mailbox completion queue entry with asynchrous
11606 * Return: true if work posted to worker thread, otherwise false.
11609 lpfc_sli4_sp_handle_async_event(struct lpfc_hba
*phba
, struct lpfc_mcqe
*mcqe
)
11611 struct lpfc_cq_event
*cq_event
;
11612 unsigned long iflags
;
11614 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
11615 "0392 Async Event: word0:x%x, word1:x%x, "
11616 "word2:x%x, word3:x%x\n", mcqe
->word0
,
11617 mcqe
->mcqe_tag0
, mcqe
->mcqe_tag1
, mcqe
->trailer
);
11619 /* Allocate a new internal CQ_EVENT entry */
11620 cq_event
= lpfc_sli4_cq_event_alloc(phba
);
11622 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11623 "0394 Failed to allocate CQ_EVENT entry\n");
11627 /* Move the CQE into an asynchronous event entry */
11628 memcpy(&cq_event
->cqe
, mcqe
, sizeof(struct lpfc_mcqe
));
11629 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11630 list_add_tail(&cq_event
->list
, &phba
->sli4_hba
.sp_asynce_work_queue
);
11631 /* Set the async event flag */
11632 phba
->hba_flag
|= ASYNC_EVENT
;
11633 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11639 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
11640 * @phba: Pointer to HBA context object.
11641 * @cqe: Pointer to mailbox completion queue entry.
11643 * This routine process a mailbox completion queue entry with mailbox
11644 * completion event.
11646 * Return: true if work posted to worker thread, otherwise false.
11649 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba
*phba
, struct lpfc_mcqe
*mcqe
)
11651 uint32_t mcqe_status
;
11652 MAILBOX_t
*mbox
, *pmbox
;
11653 struct lpfc_mqe
*mqe
;
11654 struct lpfc_vport
*vport
;
11655 struct lpfc_nodelist
*ndlp
;
11656 struct lpfc_dmabuf
*mp
;
11657 unsigned long iflags
;
11659 bool workposted
= false;
11662 /* If not a mailbox complete MCQE, out by checking mailbox consume */
11663 if (!bf_get(lpfc_trailer_completed
, mcqe
))
11664 goto out_no_mqe_complete
;
11666 /* Get the reference to the active mbox command */
11667 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11668 pmb
= phba
->sli
.mbox_active
;
11669 if (unlikely(!pmb
)) {
11670 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
11671 "1832 No pending MBOX command to handle\n");
11672 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11673 goto out_no_mqe_complete
;
11675 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11677 pmbox
= (MAILBOX_t
*)&pmb
->u
.mqe
;
11679 vport
= pmb
->vport
;
11681 /* Reset heartbeat timer */
11682 phba
->last_completion_time
= jiffies
;
11683 del_timer(&phba
->sli
.mbox_tmo
);
11685 /* Move mbox data to caller's mailbox region, do endian swapping */
11686 if (pmb
->mbox_cmpl
&& mbox
)
11687 lpfc_sli_pcimem_bcopy(mbox
, mqe
, sizeof(struct lpfc_mqe
));
11690 * For mcqe errors, conditionally move a modified error code to
11691 * the mbox so that the error will not be missed.
11693 mcqe_status
= bf_get(lpfc_mcqe_status
, mcqe
);
11694 if (mcqe_status
!= MB_CQE_STATUS_SUCCESS
) {
11695 if (bf_get(lpfc_mqe_status
, mqe
) == MBX_SUCCESS
)
11696 bf_set(lpfc_mqe_status
, mqe
,
11697 (LPFC_MBX_ERROR_RANGE
| mcqe_status
));
11699 if (pmb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) {
11700 pmb
->mbox_flag
&= ~LPFC_MBX_IMED_UNREG
;
11701 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_MBOX_VPORT
,
11702 "MBOX dflt rpi: status:x%x rpi:x%x",
11704 pmbox
->un
.varWords
[0], 0);
11705 if (mcqe_status
== MB_CQE_STATUS_SUCCESS
) {
11706 mp
= (struct lpfc_dmabuf
*)(pmb
->context1
);
11707 ndlp
= (struct lpfc_nodelist
*)pmb
->context2
;
11708 /* Reg_LOGIN of dflt RPI was successful. Now lets get
11709 * RID of the PPI using the same mbox buffer.
11711 lpfc_unreg_login(phba
, vport
->vpi
,
11712 pmbox
->un
.varWords
[0], pmb
);
11713 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
11714 pmb
->context1
= mp
;
11715 pmb
->context2
= ndlp
;
11716 pmb
->vport
= vport
;
11717 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
11718 if (rc
!= MBX_BUSY
)
11719 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
|
11720 LOG_SLI
, "0385 rc should "
11721 "have been MBX_BUSY\n");
11722 if (rc
!= MBX_NOT_FINISHED
)
11723 goto send_current_mbox
;
11726 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflags
);
11727 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
11728 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflags
);
11730 /* There is mailbox completion work to do */
11731 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11732 __lpfc_mbox_cmpl_put(phba
, pmb
);
11733 phba
->work_ha
|= HA_MBATT
;
11734 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11738 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11739 /* Release the mailbox command posting token */
11740 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
11741 /* Setting active mailbox pointer need to be in sync to flag clear */
11742 phba
->sli
.mbox_active
= NULL
;
11743 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11744 /* Wake up worker thread to post the next pending mailbox command */
11745 lpfc_worker_wake_up(phba
);
11746 out_no_mqe_complete
:
11747 if (bf_get(lpfc_trailer_consumed
, mcqe
))
11748 lpfc_sli4_mq_release(phba
->sli4_hba
.mbx_wq
);
11753 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
11754 * @phba: Pointer to HBA context object.
11755 * @cqe: Pointer to mailbox completion queue entry.
11757 * This routine process a mailbox completion queue entry, it invokes the
11758 * proper mailbox complete handling or asynchrous event handling routine
11759 * according to the MCQE's async bit.
11761 * Return: true if work posted to worker thread, otherwise false.
11764 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba
*phba
, struct lpfc_cqe
*cqe
)
11766 struct lpfc_mcqe mcqe
;
11769 /* Copy the mailbox MCQE and convert endian order as needed */
11770 lpfc_sli_pcimem_bcopy(cqe
, &mcqe
, sizeof(struct lpfc_mcqe
));
11772 /* Invoke the proper event handling routine */
11773 if (!bf_get(lpfc_trailer_async
, &mcqe
))
11774 workposted
= lpfc_sli4_sp_handle_mbox_event(phba
, &mcqe
);
11776 workposted
= lpfc_sli4_sp_handle_async_event(phba
, &mcqe
);
11781 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
11782 * @phba: Pointer to HBA context object.
11783 * @cq: Pointer to associated CQ
11784 * @wcqe: Pointer to work-queue completion queue entry.
11786 * This routine handles an ELS work-queue completion event.
11788 * Return: true if work posted to worker thread, otherwise false.
11791 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
11792 struct lpfc_wcqe_complete
*wcqe
)
11794 struct lpfc_iocbq
*irspiocbq
;
11795 unsigned long iflags
;
11796 struct lpfc_sli_ring
*pring
= cq
->pring
;
11798 int txcmplq_cnt
= 0;
11799 int fcp_txcmplq_cnt
= 0;
11801 /* Get an irspiocbq for later ELS response processing use */
11802 irspiocbq
= lpfc_sli_get_iocbq(phba
);
11804 if (!list_empty(&pring
->txq
))
11806 if (!list_empty(&pring
->txcmplq
))
11808 if (!list_empty(&phba
->sli
.ring
[LPFC_FCP_RING
].txcmplq
))
11810 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11811 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
11812 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
11813 txq_cnt
, phba
->iocb_cnt
,
11819 /* Save off the slow-path queue event for work thread to process */
11820 memcpy(&irspiocbq
->cq_event
.cqe
.wcqe_cmpl
, wcqe
, sizeof(*wcqe
));
11821 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11822 list_add_tail(&irspiocbq
->cq_event
.list
,
11823 &phba
->sli4_hba
.sp_queue_event
);
11824 phba
->hba_flag
|= HBA_SP_QUEUE_EVT
;
11825 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11831 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
11832 * @phba: Pointer to HBA context object.
11833 * @wcqe: Pointer to work-queue completion queue entry.
11835 * This routine handles slow-path WQ entry comsumed event by invoking the
11836 * proper WQ release routine to the slow-path WQ.
11839 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba
*phba
,
11840 struct lpfc_wcqe_release
*wcqe
)
11842 /* sanity check on queue memory */
11843 if (unlikely(!phba
->sli4_hba
.els_wq
))
11845 /* Check for the slow-path ELS work queue */
11846 if (bf_get(lpfc_wcqe_r_wq_id
, wcqe
) == phba
->sli4_hba
.els_wq
->queue_id
)
11847 lpfc_sli4_wq_release(phba
->sli4_hba
.els_wq
,
11848 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
));
11850 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
11851 "2579 Slow-path wqe consume event carries "
11852 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
11853 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
),
11854 phba
->sli4_hba
.els_wq
->queue_id
);
11858 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
11859 * @phba: Pointer to HBA context object.
11860 * @cq: Pointer to a WQ completion queue.
11861 * @wcqe: Pointer to work-queue completion queue entry.
11863 * This routine handles an XRI abort event.
11865 * Return: true if work posted to worker thread, otherwise false.
11868 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba
*phba
,
11869 struct lpfc_queue
*cq
,
11870 struct sli4_wcqe_xri_aborted
*wcqe
)
11872 bool workposted
= false;
11873 struct lpfc_cq_event
*cq_event
;
11874 unsigned long iflags
;
11876 /* Allocate a new internal CQ_EVENT entry */
11877 cq_event
= lpfc_sli4_cq_event_alloc(phba
);
11879 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11880 "0602 Failed to allocate CQ_EVENT entry\n");
11884 /* Move the CQE into the proper xri abort event list */
11885 memcpy(&cq_event
->cqe
, wcqe
, sizeof(struct sli4_wcqe_xri_aborted
));
11886 switch (cq
->subtype
) {
11888 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11889 list_add_tail(&cq_event
->list
,
11890 &phba
->sli4_hba
.sp_fcp_xri_aborted_work_queue
);
11891 /* Set the fcp xri abort event flag */
11892 phba
->hba_flag
|= FCP_XRI_ABORT_EVENT
;
11893 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11897 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11898 list_add_tail(&cq_event
->list
,
11899 &phba
->sli4_hba
.sp_els_xri_aborted_work_queue
);
11900 /* Set the els xri abort event flag */
11901 phba
->hba_flag
|= ELS_XRI_ABORT_EVENT
;
11902 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11906 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11907 "0603 Invalid work queue CQE subtype (x%x)\n",
11909 workposted
= false;
11916 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
11917 * @phba: Pointer to HBA context object.
11918 * @rcqe: Pointer to receive-queue completion queue entry.
11920 * This routine process a receive-queue completion queue entry.
11922 * Return: true if work posted to worker thread, otherwise false.
11925 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba
*phba
, struct lpfc_rcqe
*rcqe
)
11927 bool workposted
= false;
11928 struct lpfc_queue
*hrq
= phba
->sli4_hba
.hdr_rq
;
11929 struct lpfc_queue
*drq
= phba
->sli4_hba
.dat_rq
;
11930 struct hbq_dmabuf
*dma_buf
;
11931 uint32_t status
, rq_id
;
11932 unsigned long iflags
;
11934 /* sanity check on queue memory */
11935 if (unlikely(!hrq
) || unlikely(!drq
))
11938 if (bf_get(lpfc_cqe_code
, rcqe
) == CQE_CODE_RECEIVE_V1
)
11939 rq_id
= bf_get(lpfc_rcqe_rq_id_v1
, rcqe
);
11941 rq_id
= bf_get(lpfc_rcqe_rq_id
, rcqe
);
11942 if (rq_id
!= hrq
->queue_id
)
11945 status
= bf_get(lpfc_rcqe_status
, rcqe
);
11947 case FC_STATUS_RQ_BUF_LEN_EXCEEDED
:
11948 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11949 "2537 Receive Frame Truncated!!\n");
11950 hrq
->RQ_buf_trunc
++;
11951 case FC_STATUS_RQ_SUCCESS
:
11952 lpfc_sli4_rq_release(hrq
, drq
);
11953 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11954 dma_buf
= lpfc_sli_hbqbuf_get(&phba
->hbqs
[0].hbq_buffer_list
);
11956 hrq
->RQ_no_buf_found
++;
11957 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11961 memcpy(&dma_buf
->cq_event
.cqe
.rcqe_cmpl
, rcqe
, sizeof(*rcqe
));
11962 /* save off the frame for the word thread to process */
11963 list_add_tail(&dma_buf
->cq_event
.list
,
11964 &phba
->sli4_hba
.sp_queue_event
);
11965 /* Frame received */
11966 phba
->hba_flag
|= HBA_SP_QUEUE_EVT
;
11967 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11970 case FC_STATUS_INSUFF_BUF_NEED_BUF
:
11971 case FC_STATUS_INSUFF_BUF_FRM_DISC
:
11972 hrq
->RQ_no_posted_buf
++;
11973 /* Post more buffers if possible */
11974 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11975 phba
->hba_flag
|= HBA_POST_RECEIVE_BUFFER
;
11976 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11985 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
11986 * @phba: Pointer to HBA context object.
11987 * @cq: Pointer to the completion queue.
11988 * @wcqe: Pointer to a completion queue entry.
11990 * This routine process a slow-path work-queue or receive queue completion queue
11993 * Return: true if work posted to worker thread, otherwise false.
11996 lpfc_sli4_sp_handle_cqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
11997 struct lpfc_cqe
*cqe
)
11999 struct lpfc_cqe cqevt
;
12000 bool workposted
= false;
12002 /* Copy the work queue CQE and convert endian order if needed */
12003 lpfc_sli_pcimem_bcopy(cqe
, &cqevt
, sizeof(struct lpfc_cqe
));
12005 /* Check and process for different type of WCQE and dispatch */
12006 switch (bf_get(lpfc_cqe_code
, &cqevt
)) {
12007 case CQE_CODE_COMPL_WQE
:
12008 /* Process the WQ/RQ complete event */
12009 phba
->last_completion_time
= jiffies
;
12010 workposted
= lpfc_sli4_sp_handle_els_wcqe(phba
, cq
,
12011 (struct lpfc_wcqe_complete
*)&cqevt
);
12013 case CQE_CODE_RELEASE_WQE
:
12014 /* Process the WQ release event */
12015 lpfc_sli4_sp_handle_rel_wcqe(phba
,
12016 (struct lpfc_wcqe_release
*)&cqevt
);
12018 case CQE_CODE_XRI_ABORTED
:
12019 /* Process the WQ XRI abort event */
12020 phba
->last_completion_time
= jiffies
;
12021 workposted
= lpfc_sli4_sp_handle_abort_xri_wcqe(phba
, cq
,
12022 (struct sli4_wcqe_xri_aborted
*)&cqevt
);
12024 case CQE_CODE_RECEIVE
:
12025 case CQE_CODE_RECEIVE_V1
:
12026 /* Process the RQ event */
12027 phba
->last_completion_time
= jiffies
;
12028 workposted
= lpfc_sli4_sp_handle_rcqe(phba
,
12029 (struct lpfc_rcqe
*)&cqevt
);
12032 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12033 "0388 Not a valid WCQE code: x%x\n",
12034 bf_get(lpfc_cqe_code
, &cqevt
));
12041 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
12042 * @phba: Pointer to HBA context object.
12043 * @eqe: Pointer to fast-path event queue entry.
12045 * This routine process a event queue entry from the slow-path event queue.
12046 * It will check the MajorCode and MinorCode to determine this is for a
12047 * completion event on a completion queue, if not, an error shall be logged
12048 * and just return. Otherwise, it will get to the corresponding completion
12049 * queue and process all the entries on that completion queue, rearm the
12050 * completion queue, and then return.
12054 lpfc_sli4_sp_handle_eqe(struct lpfc_hba
*phba
, struct lpfc_eqe
*eqe
,
12055 struct lpfc_queue
*speq
)
12057 struct lpfc_queue
*cq
= NULL
, *childq
;
12058 struct lpfc_cqe
*cqe
;
12059 bool workposted
= false;
12063 /* Get the reference to the corresponding CQ */
12064 cqid
= bf_get_le32(lpfc_eqe_resource_id
, eqe
);
12066 list_for_each_entry(childq
, &speq
->child_list
, list
) {
12067 if (childq
->queue_id
== cqid
) {
12072 if (unlikely(!cq
)) {
12073 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)
12074 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12075 "0365 Slow-path CQ identifier "
12076 "(%d) does not exist\n", cqid
);
12080 /* Process all the entries to the CQ */
12081 switch (cq
->type
) {
12083 while ((cqe
= lpfc_sli4_cq_get(cq
))) {
12084 workposted
|= lpfc_sli4_sp_handle_mcqe(phba
, cqe
);
12085 if (!(++ecount
% cq
->entry_repost
))
12086 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_NOARM
);
12091 while ((cqe
= lpfc_sli4_cq_get(cq
))) {
12092 if (cq
->subtype
== LPFC_FCP
)
12093 workposted
|= lpfc_sli4_fp_handle_wcqe(phba
, cq
,
12096 workposted
|= lpfc_sli4_sp_handle_cqe(phba
, cq
,
12098 if (!(++ecount
% cq
->entry_repost
))
12099 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_NOARM
);
12102 /* Track the max number of CQEs processed in 1 EQ */
12103 if (ecount
> cq
->CQ_max_cqe
)
12104 cq
->CQ_max_cqe
= ecount
;
12107 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12108 "0370 Invalid completion queue type (%d)\n",
12113 /* Catch the no cq entry condition, log an error */
12114 if (unlikely(ecount
== 0))
12115 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12116 "0371 No entry from the CQ: identifier "
12117 "(x%x), type (%d)\n", cq
->queue_id
, cq
->type
);
12119 /* In any case, flash and re-arm the RCQ */
12120 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_REARM
);
12122 /* wake up worker thread if there are works to be done */
12124 lpfc_worker_wake_up(phba
);
12128 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
12129 * @phba: Pointer to HBA context object.
12130 * @cq: Pointer to associated CQ
12131 * @wcqe: Pointer to work-queue completion queue entry.
12133 * This routine process a fast-path work queue completion entry from fast-path
12134 * event queue for FCP command response completion.
12137 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
12138 struct lpfc_wcqe_complete
*wcqe
)
12140 struct lpfc_sli_ring
*pring
= cq
->pring
;
12141 struct lpfc_iocbq
*cmdiocbq
;
12142 struct lpfc_iocbq irspiocbq
;
12143 unsigned long iflags
;
12145 /* Check for response status */
12146 if (unlikely(bf_get(lpfc_wcqe_c_status
, wcqe
))) {
12147 /* If resource errors reported from HBA, reduce queue
12148 * depth of the SCSI device.
12150 if (((bf_get(lpfc_wcqe_c_status
, wcqe
) ==
12151 IOSTAT_LOCAL_REJECT
)) &&
12152 ((wcqe
->parameter
& IOERR_PARAM_MASK
) ==
12153 IOERR_NO_RESOURCES
))
12154 phba
->lpfc_rampdown_queue_depth(phba
);
12156 /* Log the error status */
12157 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
12158 "0373 FCP complete error: status=x%x, "
12159 "hw_status=x%x, total_data_specified=%d, "
12160 "parameter=x%x, word3=x%x\n",
12161 bf_get(lpfc_wcqe_c_status
, wcqe
),
12162 bf_get(lpfc_wcqe_c_hw_status
, wcqe
),
12163 wcqe
->total_data_placed
, wcqe
->parameter
,
12167 /* Look up the FCP command IOCB and create pseudo response IOCB */
12168 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
12169 pring
->stats
.iocb_event
++;
12170 cmdiocbq
= lpfc_sli_iocbq_lookup_by_tag(phba
, pring
,
12171 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
12172 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
12173 if (unlikely(!cmdiocbq
)) {
12174 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
12175 "0374 FCP complete with no corresponding "
12176 "cmdiocb: iotag (%d)\n",
12177 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
12180 if (unlikely(!cmdiocbq
->iocb_cmpl
)) {
12181 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
12182 "0375 FCP cmdiocb not callback function "
12184 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
12188 /* Fake the irspiocb and copy necessary response information */
12189 lpfc_sli4_iocb_param_transfer(phba
, &irspiocbq
, cmdiocbq
, wcqe
);
12191 if (cmdiocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
) {
12192 spin_lock_irqsave(&phba
->hbalock
, iflags
);
12193 cmdiocbq
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
12194 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
12197 /* Pass the cmd_iocb and the rsp state to the upper layer */
12198 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
, &irspiocbq
);
12202 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
12203 * @phba: Pointer to HBA context object.
12204 * @cq: Pointer to completion queue.
12205 * @wcqe: Pointer to work-queue completion queue entry.
12207 * This routine handles an fast-path WQ entry comsumed event by invoking the
12208 * proper WQ release routine to the slow-path WQ.
12211 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
12212 struct lpfc_wcqe_release
*wcqe
)
12214 struct lpfc_queue
*childwq
;
12215 bool wqid_matched
= false;
12218 /* Check for fast-path FCP work queue release */
12219 fcp_wqid
= bf_get(lpfc_wcqe_r_wq_id
, wcqe
);
12220 list_for_each_entry(childwq
, &cq
->child_list
, list
) {
12221 if (childwq
->queue_id
== fcp_wqid
) {
12222 lpfc_sli4_wq_release(childwq
,
12223 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
));
12224 wqid_matched
= true;
12228 /* Report warning log message if no match found */
12229 if (wqid_matched
!= true)
12230 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
12231 "2580 Fast-path wqe consume event carries "
12232 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid
);
12236 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
12237 * @cq: Pointer to the completion queue.
12238 * @eqe: Pointer to fast-path completion queue entry.
12240 * This routine process a fast-path work queue completion entry from fast-path
12241 * event queue for FCP command response completion.
12244 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
12245 struct lpfc_cqe
*cqe
)
12247 struct lpfc_wcqe_release wcqe
;
12248 bool workposted
= false;
12250 /* Copy the work queue CQE and convert endian order if needed */
12251 lpfc_sli_pcimem_bcopy(cqe
, &wcqe
, sizeof(struct lpfc_cqe
));
12253 /* Check and process for different type of WCQE and dispatch */
12254 switch (bf_get(lpfc_wcqe_c_code
, &wcqe
)) {
12255 case CQE_CODE_COMPL_WQE
:
12257 /* Process the WQ complete event */
12258 phba
->last_completion_time
= jiffies
;
12259 lpfc_sli4_fp_handle_fcp_wcqe(phba
, cq
,
12260 (struct lpfc_wcqe_complete
*)&wcqe
);
12262 case CQE_CODE_RELEASE_WQE
:
12263 cq
->CQ_release_wqe
++;
12264 /* Process the WQ release event */
12265 lpfc_sli4_fp_handle_rel_wcqe(phba
, cq
,
12266 (struct lpfc_wcqe_release
*)&wcqe
);
12268 case CQE_CODE_XRI_ABORTED
:
12269 cq
->CQ_xri_aborted
++;
12270 /* Process the WQ XRI abort event */
12271 phba
->last_completion_time
= jiffies
;
12272 workposted
= lpfc_sli4_sp_handle_abort_xri_wcqe(phba
, cq
,
12273 (struct sli4_wcqe_xri_aborted
*)&wcqe
);
12276 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12277 "0144 Not a valid WCQE code: x%x\n",
12278 bf_get(lpfc_wcqe_c_code
, &wcqe
));
12285 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
12286 * @phba: Pointer to HBA context object.
12287 * @eqe: Pointer to fast-path event queue entry.
12289 * This routine process a event queue entry from the fast-path event queue.
12290 * It will check the MajorCode and MinorCode to determine this is for a
12291 * completion event on a completion queue, if not, an error shall be logged
12292 * and just return. Otherwise, it will get to the corresponding completion
12293 * queue and process all the entries on the completion queue, rearm the
12294 * completion queue, and then return.
12297 lpfc_sli4_hba_handle_eqe(struct lpfc_hba
*phba
, struct lpfc_eqe
*eqe
,
12300 struct lpfc_queue
*cq
;
12301 struct lpfc_cqe
*cqe
;
12302 bool workposted
= false;
12306 if (unlikely(bf_get_le32(lpfc_eqe_major_code
, eqe
) != 0)) {
12307 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12308 "0366 Not a valid completion "
12309 "event: majorcode=x%x, minorcode=x%x\n",
12310 bf_get_le32(lpfc_eqe_major_code
, eqe
),
12311 bf_get_le32(lpfc_eqe_minor_code
, eqe
));
12315 /* Get the reference to the corresponding CQ */
12316 cqid
= bf_get_le32(lpfc_eqe_resource_id
, eqe
);
12318 /* Check if this is a Slow path event */
12319 if (unlikely(cqid
!= phba
->sli4_hba
.fcp_cq_map
[qidx
])) {
12320 lpfc_sli4_sp_handle_eqe(phba
, eqe
,
12321 phba
->sli4_hba
.hba_eq
[qidx
]);
12325 if (unlikely(!phba
->sli4_hba
.fcp_cq
)) {
12326 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
12327 "3146 Fast-path completion queues "
12328 "does not exist\n");
12331 cq
= phba
->sli4_hba
.fcp_cq
[qidx
];
12332 if (unlikely(!cq
)) {
12333 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)
12334 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12335 "0367 Fast-path completion queue "
12336 "(%d) does not exist\n", qidx
);
12340 if (unlikely(cqid
!= cq
->queue_id
)) {
12341 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12342 "0368 Miss-matched fast-path completion "
12343 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
12344 cqid
, cq
->queue_id
);
12348 /* Process all the entries to the CQ */
12349 while ((cqe
= lpfc_sli4_cq_get(cq
))) {
12350 workposted
|= lpfc_sli4_fp_handle_wcqe(phba
, cq
, cqe
);
12351 if (!(++ecount
% cq
->entry_repost
))
12352 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_NOARM
);
12355 /* Track the max number of CQEs processed in 1 EQ */
12356 if (ecount
> cq
->CQ_max_cqe
)
12357 cq
->CQ_max_cqe
= ecount
;
12359 /* Catch the no cq entry condition */
12360 if (unlikely(ecount
== 0))
12361 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12362 "0369 No entry from fast-path completion "
12363 "queue fcpcqid=%d\n", cq
->queue_id
);
12365 /* In any case, flash and re-arm the CQ */
12366 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_REARM
);
12368 /* wake up worker thread if there are works to be done */
12370 lpfc_worker_wake_up(phba
);
12374 lpfc_sli4_eq_flush(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
)
12376 struct lpfc_eqe
*eqe
;
12378 /* walk all the EQ entries and drop on the floor */
12379 while ((eqe
= lpfc_sli4_eq_get(eq
)))
12382 /* Clear and re-arm the EQ */
12383 lpfc_sli4_eq_release(eq
, LPFC_QUEUE_REARM
);
12388 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
12390 * @phba: Pointer to HBA context object.
12391 * @eqe: Pointer to fast-path event queue entry.
12393 * This routine process a event queue entry from the Flash Optimized Fabric
12394 * event queue. It will check the MajorCode and MinorCode to determine this
12395 * is for a completion event on a completion queue, if not, an error shall be
12396 * logged and just return. Otherwise, it will get to the corresponding
12397 * completion queue and process all the entries on the completion queue, rearm
12398 * the completion queue, and then return.
12401 lpfc_sli4_fof_handle_eqe(struct lpfc_hba
*phba
, struct lpfc_eqe
*eqe
)
12403 struct lpfc_queue
*cq
;
12404 struct lpfc_cqe
*cqe
;
12405 bool workposted
= false;
12409 if (unlikely(bf_get_le32(lpfc_eqe_major_code
, eqe
) != 0)) {
12410 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12411 "9147 Not a valid completion "
12412 "event: majorcode=x%x, minorcode=x%x\n",
12413 bf_get_le32(lpfc_eqe_major_code
, eqe
),
12414 bf_get_le32(lpfc_eqe_minor_code
, eqe
));
12418 /* Get the reference to the corresponding CQ */
12419 cqid
= bf_get_le32(lpfc_eqe_resource_id
, eqe
);
12421 /* Next check for OAS */
12422 cq
= phba
->sli4_hba
.oas_cq
;
12423 if (unlikely(!cq
)) {
12424 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)
12425 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12426 "9148 OAS completion queue "
12427 "does not exist\n");
12431 if (unlikely(cqid
!= cq
->queue_id
)) {
12432 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12433 "9149 Miss-matched fast-path compl "
12434 "queue id: eqcqid=%d, fcpcqid=%d\n",
12435 cqid
, cq
->queue_id
);
12439 /* Process all the entries to the OAS CQ */
12440 while ((cqe
= lpfc_sli4_cq_get(cq
))) {
12441 workposted
|= lpfc_sli4_fp_handle_wcqe(phba
, cq
, cqe
);
12442 if (!(++ecount
% cq
->entry_repost
))
12443 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_NOARM
);
12446 /* Track the max number of CQEs processed in 1 EQ */
12447 if (ecount
> cq
->CQ_max_cqe
)
12448 cq
->CQ_max_cqe
= ecount
;
12450 /* Catch the no cq entry condition */
12451 if (unlikely(ecount
== 0))
12452 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12453 "9153 No entry from fast-path completion "
12454 "queue fcpcqid=%d\n", cq
->queue_id
);
12456 /* In any case, flash and re-arm the CQ */
12457 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_REARM
);
12459 /* wake up worker thread if there are works to be done */
12461 lpfc_worker_wake_up(phba
);
12465 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
12466 * @irq: Interrupt number.
12467 * @dev_id: The device context pointer.
12469 * This function is directly called from the PCI layer as an interrupt
12470 * service routine when device with SLI-4 interface spec is enabled with
12471 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
12472 * IOCB ring event in the HBA. However, when the device is enabled with either
12473 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12474 * device-level interrupt handler. When the PCI slot is in error recovery
12475 * or the HBA is undergoing initialization, the interrupt handler will not
12476 * process the interrupt. The Flash Optimized Fabric ring event are handled in
12477 * the intrrupt context. This function is called without any lock held.
12478 * It gets the hbalock to access and update SLI data structures. Note that,
12479 * the EQ to CQ are one-to-one map such that the EQ index is
12480 * equal to that of CQ index.
12482 * This function returns IRQ_HANDLED when interrupt is handled else it
12483 * returns IRQ_NONE.
12486 lpfc_sli4_fof_intr_handler(int irq
, void *dev_id
)
12488 struct lpfc_hba
*phba
;
12489 struct lpfc_fcp_eq_hdl
*fcp_eq_hdl
;
12490 struct lpfc_queue
*eq
;
12491 struct lpfc_eqe
*eqe
;
12492 unsigned long iflag
;
12495 /* Get the driver's phba structure from the dev_id */
12496 fcp_eq_hdl
= (struct lpfc_fcp_eq_hdl
*)dev_id
;
12497 phba
= fcp_eq_hdl
->phba
;
12499 if (unlikely(!phba
))
12502 /* Get to the EQ struct associated with this vector */
12503 eq
= phba
->sli4_hba
.fof_eq
;
12507 /* Check device state for handling interrupt */
12508 if (unlikely(lpfc_intr_state_check(phba
))) {
12510 /* Check again for link_state with lock held */
12511 spin_lock_irqsave(&phba
->hbalock
, iflag
);
12512 if (phba
->link_state
< LPFC_LINK_DOWN
)
12513 /* Flush, clear interrupt, and rearm the EQ */
12514 lpfc_sli4_eq_flush(phba
, eq
);
12515 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12520 * Process all the event on FCP fast-path EQ
12522 while ((eqe
= lpfc_sli4_eq_get(eq
))) {
12523 lpfc_sli4_fof_handle_eqe(phba
, eqe
);
12524 if (!(++ecount
% eq
->entry_repost
))
12525 lpfc_sli4_eq_release(eq
, LPFC_QUEUE_NOARM
);
12526 eq
->EQ_processed
++;
12529 /* Track the max number of EQEs processed in 1 intr */
12530 if (ecount
> eq
->EQ_max_eqe
)
12531 eq
->EQ_max_eqe
= ecount
;
12534 if (unlikely(ecount
== 0)) {
12537 if (phba
->intr_type
== MSIX
)
12538 /* MSI-X treated interrupt served as no EQ share INT */
12539 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
12540 "9145 MSI-X interrupt with no EQE\n");
12542 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12543 "9146 ISR interrupt with no EQE\n");
12544 /* Non MSI-X treated on interrupt as EQ share INT */
12548 /* Always clear and re-arm the fast-path EQ */
12549 lpfc_sli4_eq_release(eq
, LPFC_QUEUE_REARM
);
12550 return IRQ_HANDLED
;
12554 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
12555 * @irq: Interrupt number.
12556 * @dev_id: The device context pointer.
12558 * This function is directly called from the PCI layer as an interrupt
12559 * service routine when device with SLI-4 interface spec is enabled with
12560 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12561 * ring event in the HBA. However, when the device is enabled with either
12562 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12563 * device-level interrupt handler. When the PCI slot is in error recovery
12564 * or the HBA is undergoing initialization, the interrupt handler will not
12565 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12566 * the intrrupt context. This function is called without any lock held.
12567 * It gets the hbalock to access and update SLI data structures. Note that,
12568 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
12569 * equal to that of FCP CQ index.
12571 * The link attention and ELS ring attention events are handled
12572 * by the worker thread. The interrupt handler signals the worker thread
12573 * and returns for these events. This function is called without any lock
12574 * held. It gets the hbalock to access and update SLI data structures.
12576 * This function returns IRQ_HANDLED when interrupt is handled else it
12577 * returns IRQ_NONE.
12580 lpfc_sli4_hba_intr_handler(int irq
, void *dev_id
)
12582 struct lpfc_hba
*phba
;
12583 struct lpfc_fcp_eq_hdl
*fcp_eq_hdl
;
12584 struct lpfc_queue
*fpeq
;
12585 struct lpfc_eqe
*eqe
;
12586 unsigned long iflag
;
12590 /* Get the driver's phba structure from the dev_id */
12591 fcp_eq_hdl
= (struct lpfc_fcp_eq_hdl
*)dev_id
;
12592 phba
= fcp_eq_hdl
->phba
;
12593 fcp_eqidx
= fcp_eq_hdl
->idx
;
12595 if (unlikely(!phba
))
12597 if (unlikely(!phba
->sli4_hba
.hba_eq
))
12600 /* Get to the EQ struct associated with this vector */
12601 fpeq
= phba
->sli4_hba
.hba_eq
[fcp_eqidx
];
12602 if (unlikely(!fpeq
))
12605 if (lpfc_fcp_look_ahead
) {
12606 if (atomic_dec_and_test(&fcp_eq_hdl
->fcp_eq_in_use
))
12607 lpfc_sli4_eq_clr_intr(fpeq
);
12609 atomic_inc(&fcp_eq_hdl
->fcp_eq_in_use
);
12614 /* Check device state for handling interrupt */
12615 if (unlikely(lpfc_intr_state_check(phba
))) {
12616 fpeq
->EQ_badstate
++;
12617 /* Check again for link_state with lock held */
12618 spin_lock_irqsave(&phba
->hbalock
, iflag
);
12619 if (phba
->link_state
< LPFC_LINK_DOWN
)
12620 /* Flush, clear interrupt, and rearm the EQ */
12621 lpfc_sli4_eq_flush(phba
, fpeq
);
12622 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12623 if (lpfc_fcp_look_ahead
)
12624 atomic_inc(&fcp_eq_hdl
->fcp_eq_in_use
);
12629 * Process all the event on FCP fast-path EQ
12631 while ((eqe
= lpfc_sli4_eq_get(fpeq
))) {
12635 lpfc_sli4_hba_handle_eqe(phba
, eqe
, fcp_eqidx
);
12636 if (!(++ecount
% fpeq
->entry_repost
))
12637 lpfc_sli4_eq_release(fpeq
, LPFC_QUEUE_NOARM
);
12638 fpeq
->EQ_processed
++;
12641 /* Track the max number of EQEs processed in 1 intr */
12642 if (ecount
> fpeq
->EQ_max_eqe
)
12643 fpeq
->EQ_max_eqe
= ecount
;
12645 /* Always clear and re-arm the fast-path EQ */
12646 lpfc_sli4_eq_release(fpeq
, LPFC_QUEUE_REARM
);
12648 if (unlikely(ecount
== 0)) {
12649 fpeq
->EQ_no_entry
++;
12651 if (lpfc_fcp_look_ahead
) {
12652 atomic_inc(&fcp_eq_hdl
->fcp_eq_in_use
);
12656 if (phba
->intr_type
== MSIX
)
12657 /* MSI-X treated interrupt served as no EQ share INT */
12658 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
12659 "0358 MSI-X interrupt with no EQE\n");
12661 /* Non MSI-X treated on interrupt as EQ share INT */
12665 if (lpfc_fcp_look_ahead
)
12666 atomic_inc(&fcp_eq_hdl
->fcp_eq_in_use
);
12667 return IRQ_HANDLED
;
12668 } /* lpfc_sli4_fp_intr_handler */
12671 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
12672 * @irq: Interrupt number.
12673 * @dev_id: The device context pointer.
12675 * This function is the device-level interrupt handler to device with SLI-4
12676 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
12677 * interrupt mode is enabled and there is an event in the HBA which requires
12678 * driver attention. This function invokes the slow-path interrupt attention
12679 * handling function and fast-path interrupt attention handling function in
12680 * turn to process the relevant HBA attention events. This function is called
12681 * without any lock held. It gets the hbalock to access and update SLI data
12684 * This function returns IRQ_HANDLED when interrupt is handled, else it
12685 * returns IRQ_NONE.
12688 lpfc_sli4_intr_handler(int irq
, void *dev_id
)
12690 struct lpfc_hba
*phba
;
12691 irqreturn_t hba_irq_rc
;
12692 bool hba_handled
= false;
12695 /* Get the driver's phba structure from the dev_id */
12696 phba
= (struct lpfc_hba
*)dev_id
;
12698 if (unlikely(!phba
))
12702 * Invoke fast-path host attention interrupt handling as appropriate.
12704 for (fcp_eqidx
= 0; fcp_eqidx
< phba
->cfg_fcp_io_channel
; fcp_eqidx
++) {
12705 hba_irq_rc
= lpfc_sli4_hba_intr_handler(irq
,
12706 &phba
->sli4_hba
.fcp_eq_hdl
[fcp_eqidx
]);
12707 if (hba_irq_rc
== IRQ_HANDLED
)
12708 hba_handled
|= true;
12711 if (phba
->cfg_fof
) {
12712 hba_irq_rc
= lpfc_sli4_fof_intr_handler(irq
,
12713 &phba
->sli4_hba
.fcp_eq_hdl
[0]);
12714 if (hba_irq_rc
== IRQ_HANDLED
)
12715 hba_handled
|= true;
12718 return (hba_handled
== true) ? IRQ_HANDLED
: IRQ_NONE
;
12719 } /* lpfc_sli4_intr_handler */
12722 * lpfc_sli4_queue_free - free a queue structure and associated memory
12723 * @queue: The queue structure to free.
12725 * This function frees a queue structure and the DMAable memory used for
12726 * the host resident queue. This function must be called after destroying the
12727 * queue on the HBA.
12730 lpfc_sli4_queue_free(struct lpfc_queue
*queue
)
12732 struct lpfc_dmabuf
*dmabuf
;
12737 while (!list_empty(&queue
->page_list
)) {
12738 list_remove_head(&queue
->page_list
, dmabuf
, struct lpfc_dmabuf
,
12740 dma_free_coherent(&queue
->phba
->pcidev
->dev
, SLI4_PAGE_SIZE
,
12741 dmabuf
->virt
, dmabuf
->phys
);
12749 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
12750 * @phba: The HBA that this queue is being created on.
12751 * @entry_size: The size of each queue entry for this queue.
12752 * @entry count: The number of entries that this queue will handle.
12754 * This function allocates a queue structure and the DMAable memory used for
12755 * the host resident queue. This function must be called before creating the
12756 * queue on the HBA.
12758 struct lpfc_queue
*
12759 lpfc_sli4_queue_alloc(struct lpfc_hba
*phba
, uint32_t entry_size
,
12760 uint32_t entry_count
)
12762 struct lpfc_queue
*queue
;
12763 struct lpfc_dmabuf
*dmabuf
;
12764 int x
, total_qe_count
;
12766 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
12768 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
12769 hw_page_size
= SLI4_PAGE_SIZE
;
12771 queue
= kzalloc(sizeof(struct lpfc_queue
) +
12772 (sizeof(union sli4_qe
) * entry_count
), GFP_KERNEL
);
12775 queue
->page_count
= (ALIGN(entry_size
* entry_count
,
12776 hw_page_size
))/hw_page_size
;
12777 INIT_LIST_HEAD(&queue
->list
);
12778 INIT_LIST_HEAD(&queue
->page_list
);
12779 INIT_LIST_HEAD(&queue
->child_list
);
12780 for (x
= 0, total_qe_count
= 0; x
< queue
->page_count
; x
++) {
12781 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
12784 dmabuf
->virt
= dma_zalloc_coherent(&phba
->pcidev
->dev
,
12785 hw_page_size
, &dmabuf
->phys
,
12787 if (!dmabuf
->virt
) {
12791 dmabuf
->buffer_tag
= x
;
12792 list_add_tail(&dmabuf
->list
, &queue
->page_list
);
12793 /* initialize queue's entry array */
12794 dma_pointer
= dmabuf
->virt
;
12795 for (; total_qe_count
< entry_count
&&
12796 dma_pointer
< (hw_page_size
+ dmabuf
->virt
);
12797 total_qe_count
++, dma_pointer
+= entry_size
) {
12798 queue
->qe
[total_qe_count
].address
= dma_pointer
;
12801 queue
->entry_size
= entry_size
;
12802 queue
->entry_count
= entry_count
;
12805 * entry_repost is calculated based on the number of entries in the
12806 * queue. This works out except for RQs. If buffers are NOT initially
12807 * posted for every RQE, entry_repost should be adjusted accordingly.
12809 queue
->entry_repost
= (entry_count
>> 3);
12810 if (queue
->entry_repost
< LPFC_QUEUE_MIN_REPOST
)
12811 queue
->entry_repost
= LPFC_QUEUE_MIN_REPOST
;
12812 queue
->phba
= phba
;
12816 lpfc_sli4_queue_free(queue
);
12821 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
12822 * @phba: HBA structure that indicates port to create a queue on.
12823 * @pci_barset: PCI BAR set flag.
12825 * This function shall perform iomap of the specified PCI BAR address to host
12826 * memory address if not already done so and return it. The returned host
12827 * memory address can be NULL.
12829 static void __iomem
*
12830 lpfc_dual_chute_pci_bar_map(struct lpfc_hba
*phba
, uint16_t pci_barset
)
12835 switch (pci_barset
) {
12836 case WQ_PCI_BAR_0_AND_1
:
12837 return phba
->pci_bar0_memmap_p
;
12838 case WQ_PCI_BAR_2_AND_3
:
12839 return phba
->pci_bar2_memmap_p
;
12840 case WQ_PCI_BAR_4_AND_5
:
12841 return phba
->pci_bar4_memmap_p
;
12849 * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
12850 * @phba: HBA structure that indicates port to create a queue on.
12851 * @startq: The starting FCP EQ to modify
12853 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
12855 * The @phba struct is used to send mailbox command to HBA. The @startq
12856 * is used to get the starting FCP EQ to change.
12857 * This function is asynchronous and will wait for the mailbox
12858 * command to finish before continuing.
12860 * On success this function will return a zero. If unable to allocate enough
12861 * memory this function will return -ENOMEM. If the queue create mailbox command
12862 * fails this function will return -ENXIO.
12865 lpfc_modify_fcp_eq_delay(struct lpfc_hba
*phba
, uint32_t startq
)
12867 struct lpfc_mbx_modify_eq_delay
*eq_delay
;
12868 LPFC_MBOXQ_t
*mbox
;
12869 struct lpfc_queue
*eq
;
12870 int cnt
, rc
, length
, status
= 0;
12871 uint32_t shdr_status
, shdr_add_status
;
12874 union lpfc_sli4_cfg_shdr
*shdr
;
12877 if (startq
>= phba
->cfg_fcp_io_channel
)
12880 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12883 length
= (sizeof(struct lpfc_mbx_modify_eq_delay
) -
12884 sizeof(struct lpfc_sli4_cfg_mhdr
));
12885 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
12886 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY
,
12887 length
, LPFC_SLI4_MBX_EMBED
);
12888 eq_delay
= &mbox
->u
.mqe
.un
.eq_delay
;
12890 /* Calculate delay multiper from maximum interrupt per second */
12891 result
= phba
->cfg_fcp_imax
/ phba
->cfg_fcp_io_channel
;
12892 if (result
> LPFC_DMULT_CONST
)
12895 dmult
= LPFC_DMULT_CONST
/result
- 1;
12898 for (fcp_eqidx
= startq
; fcp_eqidx
< phba
->cfg_fcp_io_channel
;
12900 eq
= phba
->sli4_hba
.hba_eq
[fcp_eqidx
];
12903 eq_delay
->u
.request
.eq
[cnt
].eq_id
= eq
->queue_id
;
12904 eq_delay
->u
.request
.eq
[cnt
].phase
= 0;
12905 eq_delay
->u
.request
.eq
[cnt
].delay_multi
= dmult
;
12907 if (cnt
>= LPFC_MAX_EQ_DELAY
)
12910 eq_delay
->u
.request
.num_eq
= cnt
;
12912 mbox
->vport
= phba
->pport
;
12913 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
12914 mbox
->context1
= NULL
;
12915 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
12916 shdr
= (union lpfc_sli4_cfg_shdr
*) &eq_delay
->header
.cfg_shdr
;
12917 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
12918 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
12919 if (shdr_status
|| shdr_add_status
|| rc
) {
12920 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12921 "2512 MODIFY_EQ_DELAY mailbox failed with "
12922 "status x%x add_status x%x, mbx status x%x\n",
12923 shdr_status
, shdr_add_status
, rc
);
12926 mempool_free(mbox
, phba
->mbox_mem_pool
);
12931 * lpfc_eq_create - Create an Event Queue on the HBA
12932 * @phba: HBA structure that indicates port to create a queue on.
12933 * @eq: The queue structure to use to create the event queue.
12934 * @imax: The maximum interrupt per second limit.
12936 * This function creates an event queue, as detailed in @eq, on a port,
12937 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
12939 * The @phba struct is used to send mailbox command to HBA. The @eq struct
12940 * is used to get the entry count and entry size that are necessary to
12941 * determine the number of pages to allocate and use for this queue. This
12942 * function will send the EQ_CREATE mailbox command to the HBA to setup the
12943 * event queue. This function is asynchronous and will wait for the mailbox
12944 * command to finish before continuing.
12946 * On success this function will return a zero. If unable to allocate enough
12947 * memory this function will return -ENOMEM. If the queue create mailbox command
12948 * fails this function will return -ENXIO.
12951 lpfc_eq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
, uint32_t imax
)
12953 struct lpfc_mbx_eq_create
*eq_create
;
12954 LPFC_MBOXQ_t
*mbox
;
12955 int rc
, length
, status
= 0;
12956 struct lpfc_dmabuf
*dmabuf
;
12957 uint32_t shdr_status
, shdr_add_status
;
12958 union lpfc_sli4_cfg_shdr
*shdr
;
12960 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
12962 /* sanity check on queue memory */
12965 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
12966 hw_page_size
= SLI4_PAGE_SIZE
;
12968 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12971 length
= (sizeof(struct lpfc_mbx_eq_create
) -
12972 sizeof(struct lpfc_sli4_cfg_mhdr
));
12973 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
12974 LPFC_MBOX_OPCODE_EQ_CREATE
,
12975 length
, LPFC_SLI4_MBX_EMBED
);
12976 eq_create
= &mbox
->u
.mqe
.un
.eq_create
;
12977 bf_set(lpfc_mbx_eq_create_num_pages
, &eq_create
->u
.request
,
12979 bf_set(lpfc_eq_context_size
, &eq_create
->u
.request
.context
,
12981 bf_set(lpfc_eq_context_valid
, &eq_create
->u
.request
.context
, 1);
12982 /* don't setup delay multiplier using EQ_CREATE */
12984 bf_set(lpfc_eq_context_delay_multi
, &eq_create
->u
.request
.context
,
12986 switch (eq
->entry_count
) {
12988 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12989 "0360 Unsupported EQ count. (%d)\n",
12991 if (eq
->entry_count
< 256)
12993 /* otherwise default to smallest count (drop through) */
12995 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
12999 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
13003 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
13007 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
13011 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
13015 list_for_each_entry(dmabuf
, &eq
->page_list
, list
) {
13016 memset(dmabuf
->virt
, 0, hw_page_size
);
13017 eq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
13018 putPaddrLow(dmabuf
->phys
);
13019 eq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
13020 putPaddrHigh(dmabuf
->phys
);
13022 mbox
->vport
= phba
->pport
;
13023 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
13024 mbox
->context1
= NULL
;
13025 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
13026 shdr
= (union lpfc_sli4_cfg_shdr
*) &eq_create
->header
.cfg_shdr
;
13027 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
13028 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
13029 if (shdr_status
|| shdr_add_status
|| rc
) {
13030 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13031 "2500 EQ_CREATE mailbox failed with "
13032 "status x%x add_status x%x, mbx status x%x\n",
13033 shdr_status
, shdr_add_status
, rc
);
13036 eq
->type
= LPFC_EQ
;
13037 eq
->subtype
= LPFC_NONE
;
13038 eq
->queue_id
= bf_get(lpfc_mbx_eq_create_q_id
, &eq_create
->u
.response
);
13039 if (eq
->queue_id
== 0xFFFF)
13041 eq
->host_index
= 0;
13044 mempool_free(mbox
, phba
->mbox_mem_pool
);
13049 * lpfc_cq_create - Create a Completion Queue on the HBA
13050 * @phba: HBA structure that indicates port to create a queue on.
13051 * @cq: The queue structure to use to create the completion queue.
13052 * @eq: The event queue to bind this completion queue to.
13054 * This function creates a completion queue, as detailed in @wq, on a port,
13055 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
13057 * The @phba struct is used to send mailbox command to HBA. The @cq struct
13058 * is used to get the entry count and entry size that are necessary to
13059 * determine the number of pages to allocate and use for this queue. The @eq
13060 * is used to indicate which event queue to bind this completion queue to. This
13061 * function will send the CQ_CREATE mailbox command to the HBA to setup the
13062 * completion queue. This function is asynchronous and will wait for the mailbox
13063 * command to finish before continuing.
13065 * On success this function will return a zero. If unable to allocate enough
13066 * memory this function will return -ENOMEM. If the queue create mailbox command
13067 * fails this function will return -ENXIO.
13070 lpfc_cq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
13071 struct lpfc_queue
*eq
, uint32_t type
, uint32_t subtype
)
13073 struct lpfc_mbx_cq_create
*cq_create
;
13074 struct lpfc_dmabuf
*dmabuf
;
13075 LPFC_MBOXQ_t
*mbox
;
13076 int rc
, length
, status
= 0;
13077 uint32_t shdr_status
, shdr_add_status
;
13078 union lpfc_sli4_cfg_shdr
*shdr
;
13079 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
13081 /* sanity check on queue memory */
13084 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
13085 hw_page_size
= SLI4_PAGE_SIZE
;
13087 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
13090 length
= (sizeof(struct lpfc_mbx_cq_create
) -
13091 sizeof(struct lpfc_sli4_cfg_mhdr
));
13092 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
13093 LPFC_MBOX_OPCODE_CQ_CREATE
,
13094 length
, LPFC_SLI4_MBX_EMBED
);
13095 cq_create
= &mbox
->u
.mqe
.un
.cq_create
;
13096 shdr
= (union lpfc_sli4_cfg_shdr
*) &cq_create
->header
.cfg_shdr
;
13097 bf_set(lpfc_mbx_cq_create_num_pages
, &cq_create
->u
.request
,
13099 bf_set(lpfc_cq_context_event
, &cq_create
->u
.request
.context
, 1);
13100 bf_set(lpfc_cq_context_valid
, &cq_create
->u
.request
.context
, 1);
13101 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
13102 phba
->sli4_hba
.pc_sli4_params
.cqv
);
13103 if (phba
->sli4_hba
.pc_sli4_params
.cqv
== LPFC_Q_CREATE_VERSION_2
) {
13104 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
13105 bf_set(lpfc_mbx_cq_create_page_size
, &cq_create
->u
.request
, 1);
13106 bf_set(lpfc_cq_eq_id_2
, &cq_create
->u
.request
.context
,
13109 bf_set(lpfc_cq_eq_id
, &cq_create
->u
.request
.context
,
13112 switch (cq
->entry_count
) {
13114 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13115 "0361 Unsupported CQ count. (%d)\n",
13117 if (cq
->entry_count
< 256) {
13121 /* otherwise default to smallest count (drop through) */
13123 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
13127 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
13131 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
13135 list_for_each_entry(dmabuf
, &cq
->page_list
, list
) {
13136 memset(dmabuf
->virt
, 0, hw_page_size
);
13137 cq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
13138 putPaddrLow(dmabuf
->phys
);
13139 cq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
13140 putPaddrHigh(dmabuf
->phys
);
13142 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
13144 /* The IOCTL status is embedded in the mailbox subheader. */
13145 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
13146 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
13147 if (shdr_status
|| shdr_add_status
|| rc
) {
13148 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13149 "2501 CQ_CREATE mailbox failed with "
13150 "status x%x add_status x%x, mbx status x%x\n",
13151 shdr_status
, shdr_add_status
, rc
);
13155 cq
->queue_id
= bf_get(lpfc_mbx_cq_create_q_id
, &cq_create
->u
.response
);
13156 if (cq
->queue_id
== 0xFFFF) {
13160 /* link the cq onto the parent eq child list */
13161 list_add_tail(&cq
->list
, &eq
->child_list
);
13162 /* Set up completion queue's type and subtype */
13164 cq
->subtype
= subtype
;
13165 cq
->queue_id
= bf_get(lpfc_mbx_cq_create_q_id
, &cq_create
->u
.response
);
13166 cq
->assoc_qid
= eq
->queue_id
;
13167 cq
->host_index
= 0;
13171 mempool_free(mbox
, phba
->mbox_mem_pool
);
13176 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
13177 * @phba: HBA structure that indicates port to create a queue on.
13178 * @mq: The queue structure to use to create the mailbox queue.
13179 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
13180 * @cq: The completion queue to associate with this cq.
13182 * This function provides failback (fb) functionality when the
13183 * mq_create_ext fails on older FW generations. It's purpose is identical
13184 * to mq_create_ext otherwise.
13186 * This routine cannot fail as all attributes were previously accessed and
13187 * initialized in mq_create_ext.
13190 lpfc_mq_create_fb_init(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
,
13191 LPFC_MBOXQ_t
*mbox
, struct lpfc_queue
*cq
)
13193 struct lpfc_mbx_mq_create
*mq_create
;
13194 struct lpfc_dmabuf
*dmabuf
;
13197 length
= (sizeof(struct lpfc_mbx_mq_create
) -
13198 sizeof(struct lpfc_sli4_cfg_mhdr
));
13199 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
13200 LPFC_MBOX_OPCODE_MQ_CREATE
,
13201 length
, LPFC_SLI4_MBX_EMBED
);
13202 mq_create
= &mbox
->u
.mqe
.un
.mq_create
;
13203 bf_set(lpfc_mbx_mq_create_num_pages
, &mq_create
->u
.request
,
13205 bf_set(lpfc_mq_context_cq_id
, &mq_create
->u
.request
.context
,
13207 bf_set(lpfc_mq_context_valid
, &mq_create
->u
.request
.context
, 1);
13208 switch (mq
->entry_count
) {
13210 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
13211 LPFC_MQ_RING_SIZE_16
);
13214 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
13215 LPFC_MQ_RING_SIZE_32
);
13218 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
13219 LPFC_MQ_RING_SIZE_64
);
13222 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
13223 LPFC_MQ_RING_SIZE_128
);
13226 list_for_each_entry(dmabuf
, &mq
->page_list
, list
) {
13227 mq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
13228 putPaddrLow(dmabuf
->phys
);
13229 mq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
13230 putPaddrHigh(dmabuf
->phys
);
13235 * lpfc_mq_create - Create a mailbox Queue on the HBA
13236 * @phba: HBA structure that indicates port to create a queue on.
13237 * @mq: The queue structure to use to create the mailbox queue.
13238 * @cq: The completion queue to associate with this cq.
13239 * @subtype: The queue's subtype.
13241 * This function creates a mailbox queue, as detailed in @mq, on a port,
13242 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
13244 * The @phba struct is used to send mailbox command to HBA. The @cq struct
13245 * is used to get the entry count and entry size that are necessary to
13246 * determine the number of pages to allocate and use for this queue. This
13247 * function will send the MQ_CREATE mailbox command to the HBA to setup the
13248 * mailbox queue. This function is asynchronous and will wait for the mailbox
13249 * command to finish before continuing.
13251 * On success this function will return a zero. If unable to allocate enough
13252 * memory this function will return -ENOMEM. If the queue create mailbox command
13253 * fails this function will return -ENXIO.
13256 lpfc_mq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
,
13257 struct lpfc_queue
*cq
, uint32_t subtype
)
13259 struct lpfc_mbx_mq_create
*mq_create
;
13260 struct lpfc_mbx_mq_create_ext
*mq_create_ext
;
13261 struct lpfc_dmabuf
*dmabuf
;
13262 LPFC_MBOXQ_t
*mbox
;
13263 int rc
, length
, status
= 0;
13264 uint32_t shdr_status
, shdr_add_status
;
13265 union lpfc_sli4_cfg_shdr
*shdr
;
13266 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
13268 /* sanity check on queue memory */
13271 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
13272 hw_page_size
= SLI4_PAGE_SIZE
;
13274 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
13277 length
= (sizeof(struct lpfc_mbx_mq_create_ext
) -
13278 sizeof(struct lpfc_sli4_cfg_mhdr
));
13279 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
13280 LPFC_MBOX_OPCODE_MQ_CREATE_EXT
,
13281 length
, LPFC_SLI4_MBX_EMBED
);
13283 mq_create_ext
= &mbox
->u
.mqe
.un
.mq_create_ext
;
13284 shdr
= (union lpfc_sli4_cfg_shdr
*) &mq_create_ext
->header
.cfg_shdr
;
13285 bf_set(lpfc_mbx_mq_create_ext_num_pages
,
13286 &mq_create_ext
->u
.request
, mq
->page_count
);
13287 bf_set(lpfc_mbx_mq_create_ext_async_evt_link
,
13288 &mq_create_ext
->u
.request
, 1);
13289 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip
,
13290 &mq_create_ext
->u
.request
, 1);
13291 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5
,
13292 &mq_create_ext
->u
.request
, 1);
13293 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc
,
13294 &mq_create_ext
->u
.request
, 1);
13295 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli
,
13296 &mq_create_ext
->u
.request
, 1);
13297 bf_set(lpfc_mq_context_valid
, &mq_create_ext
->u
.request
.context
, 1);
13298 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
13299 phba
->sli4_hba
.pc_sli4_params
.mqv
);
13300 if (phba
->sli4_hba
.pc_sli4_params
.mqv
== LPFC_Q_CREATE_VERSION_1
)
13301 bf_set(lpfc_mbx_mq_create_ext_cq_id
, &mq_create_ext
->u
.request
,
13304 bf_set(lpfc_mq_context_cq_id
, &mq_create_ext
->u
.request
.context
,
13306 switch (mq
->entry_count
) {
13308 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13309 "0362 Unsupported MQ count. (%d)\n",
13311 if (mq
->entry_count
< 16) {
13315 /* otherwise default to smallest count (drop through) */
13317 bf_set(lpfc_mq_context_ring_size
,
13318 &mq_create_ext
->u
.request
.context
,
13319 LPFC_MQ_RING_SIZE_16
);
13322 bf_set(lpfc_mq_context_ring_size
,
13323 &mq_create_ext
->u
.request
.context
,
13324 LPFC_MQ_RING_SIZE_32
);
13327 bf_set(lpfc_mq_context_ring_size
,
13328 &mq_create_ext
->u
.request
.context
,
13329 LPFC_MQ_RING_SIZE_64
);
13332 bf_set(lpfc_mq_context_ring_size
,
13333 &mq_create_ext
->u
.request
.context
,
13334 LPFC_MQ_RING_SIZE_128
);
13337 list_for_each_entry(dmabuf
, &mq
->page_list
, list
) {
13338 memset(dmabuf
->virt
, 0, hw_page_size
);
13339 mq_create_ext
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
13340 putPaddrLow(dmabuf
->phys
);
13341 mq_create_ext
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
13342 putPaddrHigh(dmabuf
->phys
);
13344 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
13345 mq
->queue_id
= bf_get(lpfc_mbx_mq_create_q_id
,
13346 &mq_create_ext
->u
.response
);
13347 if (rc
!= MBX_SUCCESS
) {
13348 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
13349 "2795 MQ_CREATE_EXT failed with "
13350 "status x%x. Failback to MQ_CREATE.\n",
13352 lpfc_mq_create_fb_init(phba
, mq
, mbox
, cq
);
13353 mq_create
= &mbox
->u
.mqe
.un
.mq_create
;
13354 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
13355 shdr
= (union lpfc_sli4_cfg_shdr
*) &mq_create
->header
.cfg_shdr
;
13356 mq
->queue_id
= bf_get(lpfc_mbx_mq_create_q_id
,
13357 &mq_create
->u
.response
);
13360 /* The IOCTL status is embedded in the mailbox subheader. */
13361 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
13362 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
13363 if (shdr_status
|| shdr_add_status
|| rc
) {
13364 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13365 "2502 MQ_CREATE mailbox failed with "
13366 "status x%x add_status x%x, mbx status x%x\n",
13367 shdr_status
, shdr_add_status
, rc
);
13371 if (mq
->queue_id
== 0xFFFF) {
13375 mq
->type
= LPFC_MQ
;
13376 mq
->assoc_qid
= cq
->queue_id
;
13377 mq
->subtype
= subtype
;
13378 mq
->host_index
= 0;
13381 /* link the mq onto the parent cq child list */
13382 list_add_tail(&mq
->list
, &cq
->child_list
);
13384 mempool_free(mbox
, phba
->mbox_mem_pool
);
13389 * lpfc_wq_create - Create a Work Queue on the HBA
13390 * @phba: HBA structure that indicates port to create a queue on.
13391 * @wq: The queue structure to use to create the work queue.
13392 * @cq: The completion queue to bind this work queue to.
13393 * @subtype: The subtype of the work queue indicating its functionality.
13395 * This function creates a work queue, as detailed in @wq, on a port, described
13396 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
13398 * The @phba struct is used to send mailbox command to HBA. The @wq struct
13399 * is used to get the entry count and entry size that are necessary to
13400 * determine the number of pages to allocate and use for this queue. The @cq
13401 * is used to indicate which completion queue to bind this work queue to. This
13402 * function will send the WQ_CREATE mailbox command to the HBA to setup the
13403 * work queue. This function is asynchronous and will wait for the mailbox
13404 * command to finish before continuing.
13406 * On success this function will return a zero. If unable to allocate enough
13407 * memory this function will return -ENOMEM. If the queue create mailbox command
13408 * fails this function will return -ENXIO.
13411 lpfc_wq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*wq
,
13412 struct lpfc_queue
*cq
, uint32_t subtype
)
13414 struct lpfc_mbx_wq_create
*wq_create
;
13415 struct lpfc_dmabuf
*dmabuf
;
13416 LPFC_MBOXQ_t
*mbox
;
13417 int rc
, length
, status
= 0;
13418 uint32_t shdr_status
, shdr_add_status
;
13419 union lpfc_sli4_cfg_shdr
*shdr
;
13420 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
13421 struct dma_address
*page
;
13422 void __iomem
*bar_memmap_p
;
13423 uint32_t db_offset
;
13424 uint16_t pci_barset
;
13426 /* sanity check on queue memory */
13429 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
13430 hw_page_size
= SLI4_PAGE_SIZE
;
13432 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
13435 length
= (sizeof(struct lpfc_mbx_wq_create
) -
13436 sizeof(struct lpfc_sli4_cfg_mhdr
));
13437 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
13438 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE
,
13439 length
, LPFC_SLI4_MBX_EMBED
);
13440 wq_create
= &mbox
->u
.mqe
.un
.wq_create
;
13441 shdr
= (union lpfc_sli4_cfg_shdr
*) &wq_create
->header
.cfg_shdr
;
13442 bf_set(lpfc_mbx_wq_create_num_pages
, &wq_create
->u
.request
,
13444 bf_set(lpfc_mbx_wq_create_cq_id
, &wq_create
->u
.request
,
13447 /* wqv is the earliest version supported, NOT the latest */
13448 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
13449 phba
->sli4_hba
.pc_sli4_params
.wqv
);
13451 switch (phba
->sli4_hba
.pc_sli4_params
.wqv
) {
13452 case LPFC_Q_CREATE_VERSION_0
:
13453 switch (wq
->entry_size
) {
13456 /* Nothing to do, version 0 ONLY supports 64 byte */
13457 page
= wq_create
->u
.request
.page
;
13460 if (!(phba
->sli4_hba
.pc_sli4_params
.wqsize
&
13461 LPFC_WQ_SZ128_SUPPORT
)) {
13465 /* If we get here the HBA MUST also support V1 and
13468 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
13469 LPFC_Q_CREATE_VERSION_1
);
13471 bf_set(lpfc_mbx_wq_create_wqe_count
,
13472 &wq_create
->u
.request_1
, wq
->entry_count
);
13473 bf_set(lpfc_mbx_wq_create_wqe_size
,
13474 &wq_create
->u
.request_1
,
13475 LPFC_WQ_WQE_SIZE_128
);
13476 bf_set(lpfc_mbx_wq_create_page_size
,
13477 &wq_create
->u
.request_1
,
13478 (PAGE_SIZE
/SLI4_PAGE_SIZE
));
13479 page
= wq_create
->u
.request_1
.page
;
13483 case LPFC_Q_CREATE_VERSION_1
:
13484 bf_set(lpfc_mbx_wq_create_wqe_count
, &wq_create
->u
.request_1
,
13486 switch (wq
->entry_size
) {
13489 bf_set(lpfc_mbx_wq_create_wqe_size
,
13490 &wq_create
->u
.request_1
,
13491 LPFC_WQ_WQE_SIZE_64
);
13494 if (!(phba
->sli4_hba
.pc_sli4_params
.wqsize
&
13495 LPFC_WQ_SZ128_SUPPORT
)) {
13499 bf_set(lpfc_mbx_wq_create_wqe_size
,
13500 &wq_create
->u
.request_1
,
13501 LPFC_WQ_WQE_SIZE_128
);
13504 bf_set(lpfc_mbx_wq_create_page_size
, &wq_create
->u
.request_1
,
13505 (PAGE_SIZE
/SLI4_PAGE_SIZE
));
13506 page
= wq_create
->u
.request_1
.page
;
13513 list_for_each_entry(dmabuf
, &wq
->page_list
, list
) {
13514 memset(dmabuf
->virt
, 0, hw_page_size
);
13515 page
[dmabuf
->buffer_tag
].addr_lo
= putPaddrLow(dmabuf
->phys
);
13516 page
[dmabuf
->buffer_tag
].addr_hi
= putPaddrHigh(dmabuf
->phys
);
13519 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
)
13520 bf_set(lpfc_mbx_wq_create_dua
, &wq_create
->u
.request
, 1);
13522 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
13523 /* The IOCTL status is embedded in the mailbox subheader. */
13524 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
13525 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
13526 if (shdr_status
|| shdr_add_status
|| rc
) {
13527 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13528 "2503 WQ_CREATE mailbox failed with "
13529 "status x%x add_status x%x, mbx status x%x\n",
13530 shdr_status
, shdr_add_status
, rc
);
13534 wq
->queue_id
= bf_get(lpfc_mbx_wq_create_q_id
, &wq_create
->u
.response
);
13535 if (wq
->queue_id
== 0xFFFF) {
13539 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
) {
13540 wq
->db_format
= bf_get(lpfc_mbx_wq_create_db_format
,
13541 &wq_create
->u
.response
);
13542 if ((wq
->db_format
!= LPFC_DB_LIST_FORMAT
) &&
13543 (wq
->db_format
!= LPFC_DB_RING_FORMAT
)) {
13544 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13545 "3265 WQ[%d] doorbell format not "
13546 "supported: x%x\n", wq
->queue_id
,
13551 pci_barset
= bf_get(lpfc_mbx_wq_create_bar_set
,
13552 &wq_create
->u
.response
);
13553 bar_memmap_p
= lpfc_dual_chute_pci_bar_map(phba
, pci_barset
);
13554 if (!bar_memmap_p
) {
13555 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13556 "3263 WQ[%d] failed to memmap pci "
13557 "barset:x%x\n", wq
->queue_id
,
13562 db_offset
= wq_create
->u
.response
.doorbell_offset
;
13563 if ((db_offset
!= LPFC_ULP0_WQ_DOORBELL
) &&
13564 (db_offset
!= LPFC_ULP1_WQ_DOORBELL
)) {
13565 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13566 "3252 WQ[%d] doorbell offset not "
13567 "supported: x%x\n", wq
->queue_id
,
13572 wq
->db_regaddr
= bar_memmap_p
+ db_offset
;
13573 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
13574 "3264 WQ[%d]: barset:x%x, offset:x%x, "
13575 "format:x%x\n", wq
->queue_id
, pci_barset
,
13576 db_offset
, wq
->db_format
);
13578 wq
->db_format
= LPFC_DB_LIST_FORMAT
;
13579 wq
->db_regaddr
= phba
->sli4_hba
.WQDBregaddr
;
13581 wq
->type
= LPFC_WQ
;
13582 wq
->assoc_qid
= cq
->queue_id
;
13583 wq
->subtype
= subtype
;
13584 wq
->host_index
= 0;
13586 wq
->entry_repost
= LPFC_RELEASE_NOTIFICATION_INTERVAL
;
13588 /* link the wq onto the parent cq child list */
13589 list_add_tail(&wq
->list
, &cq
->child_list
);
13591 mempool_free(mbox
, phba
->mbox_mem_pool
);
13596 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
13597 * @phba: HBA structure that indicates port to create a queue on.
13598 * @rq: The queue structure to use for the receive queue.
13599 * @qno: The associated HBQ number
13602 * For SLI4 we need to adjust the RQ repost value based on
13603 * the number of buffers that are initially posted to the RQ.
13606 lpfc_rq_adjust_repost(struct lpfc_hba
*phba
, struct lpfc_queue
*rq
, int qno
)
13610 /* sanity check on queue memory */
13613 cnt
= lpfc_hbq_defs
[qno
]->entry_count
;
13615 /* Recalc repost for RQs based on buffers initially posted */
13617 if (cnt
< LPFC_QUEUE_MIN_REPOST
)
13618 cnt
= LPFC_QUEUE_MIN_REPOST
;
13620 rq
->entry_repost
= cnt
;
13624 * lpfc_rq_create - Create a Receive Queue on the HBA
13625 * @phba: HBA structure that indicates port to create a queue on.
13626 * @hrq: The queue structure to use to create the header receive queue.
13627 * @drq: The queue structure to use to create the data receive queue.
13628 * @cq: The completion queue to bind this work queue to.
13630 * This function creates a receive buffer queue pair , as detailed in @hrq and
13631 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
13634 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
13635 * struct is used to get the entry count that is necessary to determine the
13636 * number of pages to use for this queue. The @cq is used to indicate which
13637 * completion queue to bind received buffers that are posted to these queues to.
13638 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
13639 * receive queue pair. This function is asynchronous and will wait for the
13640 * mailbox command to finish before continuing.
13642 * On success this function will return a zero. If unable to allocate enough
13643 * memory this function will return -ENOMEM. If the queue create mailbox command
13644 * fails this function will return -ENXIO.
13647 lpfc_rq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*hrq
,
13648 struct lpfc_queue
*drq
, struct lpfc_queue
*cq
, uint32_t subtype
)
13650 struct lpfc_mbx_rq_create
*rq_create
;
13651 struct lpfc_dmabuf
*dmabuf
;
13652 LPFC_MBOXQ_t
*mbox
;
13653 int rc
, length
, status
= 0;
13654 uint32_t shdr_status
, shdr_add_status
;
13655 union lpfc_sli4_cfg_shdr
*shdr
;
13656 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
13657 void __iomem
*bar_memmap_p
;
13658 uint32_t db_offset
;
13659 uint16_t pci_barset
;
13661 /* sanity check on queue memory */
13662 if (!hrq
|| !drq
|| !cq
)
13664 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
13665 hw_page_size
= SLI4_PAGE_SIZE
;
13667 if (hrq
->entry_count
!= drq
->entry_count
)
13669 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
13672 length
= (sizeof(struct lpfc_mbx_rq_create
) -
13673 sizeof(struct lpfc_sli4_cfg_mhdr
));
13674 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
13675 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE
,
13676 length
, LPFC_SLI4_MBX_EMBED
);
13677 rq_create
= &mbox
->u
.mqe
.un
.rq_create
;
13678 shdr
= (union lpfc_sli4_cfg_shdr
*) &rq_create
->header
.cfg_shdr
;
13679 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
13680 phba
->sli4_hba
.pc_sli4_params
.rqv
);
13681 if (phba
->sli4_hba
.pc_sli4_params
.rqv
== LPFC_Q_CREATE_VERSION_1
) {
13682 bf_set(lpfc_rq_context_rqe_count_1
,
13683 &rq_create
->u
.request
.context
,
13685 rq_create
->u
.request
.context
.buffer_size
= LPFC_HDR_BUF_SIZE
;
13686 bf_set(lpfc_rq_context_rqe_size
,
13687 &rq_create
->u
.request
.context
,
13689 bf_set(lpfc_rq_context_page_size
,
13690 &rq_create
->u
.request
.context
,
13691 (PAGE_SIZE
/SLI4_PAGE_SIZE
));
13693 switch (hrq
->entry_count
) {
13695 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13696 "2535 Unsupported RQ count. (%d)\n",
13698 if (hrq
->entry_count
< 512) {
13702 /* otherwise default to smallest count (drop through) */
13704 bf_set(lpfc_rq_context_rqe_count
,
13705 &rq_create
->u
.request
.context
,
13706 LPFC_RQ_RING_SIZE_512
);
13709 bf_set(lpfc_rq_context_rqe_count
,
13710 &rq_create
->u
.request
.context
,
13711 LPFC_RQ_RING_SIZE_1024
);
13714 bf_set(lpfc_rq_context_rqe_count
,
13715 &rq_create
->u
.request
.context
,
13716 LPFC_RQ_RING_SIZE_2048
);
13719 bf_set(lpfc_rq_context_rqe_count
,
13720 &rq_create
->u
.request
.context
,
13721 LPFC_RQ_RING_SIZE_4096
);
13724 bf_set(lpfc_rq_context_buf_size
, &rq_create
->u
.request
.context
,
13725 LPFC_HDR_BUF_SIZE
);
13727 bf_set(lpfc_rq_context_cq_id
, &rq_create
->u
.request
.context
,
13729 bf_set(lpfc_mbx_rq_create_num_pages
, &rq_create
->u
.request
,
13731 list_for_each_entry(dmabuf
, &hrq
->page_list
, list
) {
13732 memset(dmabuf
->virt
, 0, hw_page_size
);
13733 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
13734 putPaddrLow(dmabuf
->phys
);
13735 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
13736 putPaddrHigh(dmabuf
->phys
);
13738 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
)
13739 bf_set(lpfc_mbx_rq_create_dua
, &rq_create
->u
.request
, 1);
13741 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
13742 /* The IOCTL status is embedded in the mailbox subheader. */
13743 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
13744 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
13745 if (shdr_status
|| shdr_add_status
|| rc
) {
13746 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13747 "2504 RQ_CREATE mailbox failed with "
13748 "status x%x add_status x%x, mbx status x%x\n",
13749 shdr_status
, shdr_add_status
, rc
);
13753 hrq
->queue_id
= bf_get(lpfc_mbx_rq_create_q_id
, &rq_create
->u
.response
);
13754 if (hrq
->queue_id
== 0xFFFF) {
13759 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
) {
13760 hrq
->db_format
= bf_get(lpfc_mbx_rq_create_db_format
,
13761 &rq_create
->u
.response
);
13762 if ((hrq
->db_format
!= LPFC_DB_LIST_FORMAT
) &&
13763 (hrq
->db_format
!= LPFC_DB_RING_FORMAT
)) {
13764 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13765 "3262 RQ [%d] doorbell format not "
13766 "supported: x%x\n", hrq
->queue_id
,
13772 pci_barset
= bf_get(lpfc_mbx_rq_create_bar_set
,
13773 &rq_create
->u
.response
);
13774 bar_memmap_p
= lpfc_dual_chute_pci_bar_map(phba
, pci_barset
);
13775 if (!bar_memmap_p
) {
13776 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13777 "3269 RQ[%d] failed to memmap pci "
13778 "barset:x%x\n", hrq
->queue_id
,
13784 db_offset
= rq_create
->u
.response
.doorbell_offset
;
13785 if ((db_offset
!= LPFC_ULP0_RQ_DOORBELL
) &&
13786 (db_offset
!= LPFC_ULP1_RQ_DOORBELL
)) {
13787 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13788 "3270 RQ[%d] doorbell offset not "
13789 "supported: x%x\n", hrq
->queue_id
,
13794 hrq
->db_regaddr
= bar_memmap_p
+ db_offset
;
13795 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
13796 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
13797 "format:x%x\n", hrq
->queue_id
, pci_barset
,
13798 db_offset
, hrq
->db_format
);
13800 hrq
->db_format
= LPFC_DB_RING_FORMAT
;
13801 hrq
->db_regaddr
= phba
->sli4_hba
.RQDBregaddr
;
13803 hrq
->type
= LPFC_HRQ
;
13804 hrq
->assoc_qid
= cq
->queue_id
;
13805 hrq
->subtype
= subtype
;
13806 hrq
->host_index
= 0;
13807 hrq
->hba_index
= 0;
13809 /* now create the data queue */
13810 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
13811 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE
,
13812 length
, LPFC_SLI4_MBX_EMBED
);
13813 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
13814 phba
->sli4_hba
.pc_sli4_params
.rqv
);
13815 if (phba
->sli4_hba
.pc_sli4_params
.rqv
== LPFC_Q_CREATE_VERSION_1
) {
13816 bf_set(lpfc_rq_context_rqe_count_1
,
13817 &rq_create
->u
.request
.context
, hrq
->entry_count
);
13818 rq_create
->u
.request
.context
.buffer_size
= LPFC_DATA_BUF_SIZE
;
13819 bf_set(lpfc_rq_context_rqe_size
, &rq_create
->u
.request
.context
,
13821 bf_set(lpfc_rq_context_page_size
, &rq_create
->u
.request
.context
,
13822 (PAGE_SIZE
/SLI4_PAGE_SIZE
));
13824 switch (drq
->entry_count
) {
13826 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13827 "2536 Unsupported RQ count. (%d)\n",
13829 if (drq
->entry_count
< 512) {
13833 /* otherwise default to smallest count (drop through) */
13835 bf_set(lpfc_rq_context_rqe_count
,
13836 &rq_create
->u
.request
.context
,
13837 LPFC_RQ_RING_SIZE_512
);
13840 bf_set(lpfc_rq_context_rqe_count
,
13841 &rq_create
->u
.request
.context
,
13842 LPFC_RQ_RING_SIZE_1024
);
13845 bf_set(lpfc_rq_context_rqe_count
,
13846 &rq_create
->u
.request
.context
,
13847 LPFC_RQ_RING_SIZE_2048
);
13850 bf_set(lpfc_rq_context_rqe_count
,
13851 &rq_create
->u
.request
.context
,
13852 LPFC_RQ_RING_SIZE_4096
);
13855 bf_set(lpfc_rq_context_buf_size
, &rq_create
->u
.request
.context
,
13856 LPFC_DATA_BUF_SIZE
);
13858 bf_set(lpfc_rq_context_cq_id
, &rq_create
->u
.request
.context
,
13860 bf_set(lpfc_mbx_rq_create_num_pages
, &rq_create
->u
.request
,
13862 list_for_each_entry(dmabuf
, &drq
->page_list
, list
) {
13863 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
13864 putPaddrLow(dmabuf
->phys
);
13865 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
13866 putPaddrHigh(dmabuf
->phys
);
13868 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
)
13869 bf_set(lpfc_mbx_rq_create_dua
, &rq_create
->u
.request
, 1);
13870 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
13871 /* The IOCTL status is embedded in the mailbox subheader. */
13872 shdr
= (union lpfc_sli4_cfg_shdr
*) &rq_create
->header
.cfg_shdr
;
13873 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
13874 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
13875 if (shdr_status
|| shdr_add_status
|| rc
) {
13879 drq
->queue_id
= bf_get(lpfc_mbx_rq_create_q_id
, &rq_create
->u
.response
);
13880 if (drq
->queue_id
== 0xFFFF) {
13884 drq
->type
= LPFC_DRQ
;
13885 drq
->assoc_qid
= cq
->queue_id
;
13886 drq
->subtype
= subtype
;
13887 drq
->host_index
= 0;
13888 drq
->hba_index
= 0;
13890 /* link the header and data RQs onto the parent cq child list */
13891 list_add_tail(&hrq
->list
, &cq
->child_list
);
13892 list_add_tail(&drq
->list
, &cq
->child_list
);
13895 mempool_free(mbox
, phba
->mbox_mem_pool
);
13900 * lpfc_eq_destroy - Destroy an event Queue on the HBA
13901 * @eq: The queue structure associated with the queue to destroy.
13903 * This function destroys a queue, as detailed in @eq by sending an mailbox
13904 * command, specific to the type of queue, to the HBA.
13906 * The @eq struct is used to get the queue ID of the queue to destroy.
13908 * On success this function will return a zero. If the queue destroy mailbox
13909 * command fails this function will return -ENXIO.
13912 lpfc_eq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
)
13914 LPFC_MBOXQ_t
*mbox
;
13915 int rc
, length
, status
= 0;
13916 uint32_t shdr_status
, shdr_add_status
;
13917 union lpfc_sli4_cfg_shdr
*shdr
;
13919 /* sanity check on queue memory */
13922 mbox
= mempool_alloc(eq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
13925 length
= (sizeof(struct lpfc_mbx_eq_destroy
) -
13926 sizeof(struct lpfc_sli4_cfg_mhdr
));
13927 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
13928 LPFC_MBOX_OPCODE_EQ_DESTROY
,
13929 length
, LPFC_SLI4_MBX_EMBED
);
13930 bf_set(lpfc_mbx_eq_destroy_q_id
, &mbox
->u
.mqe
.un
.eq_destroy
.u
.request
,
13932 mbox
->vport
= eq
->phba
->pport
;
13933 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
13935 rc
= lpfc_sli_issue_mbox(eq
->phba
, mbox
, MBX_POLL
);
13936 /* The IOCTL status is embedded in the mailbox subheader. */
13937 shdr
= (union lpfc_sli4_cfg_shdr
*)
13938 &mbox
->u
.mqe
.un
.eq_destroy
.header
.cfg_shdr
;
13939 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
13940 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
13941 if (shdr_status
|| shdr_add_status
|| rc
) {
13942 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13943 "2505 EQ_DESTROY mailbox failed with "
13944 "status x%x add_status x%x, mbx status x%x\n",
13945 shdr_status
, shdr_add_status
, rc
);
13949 /* Remove eq from any list */
13950 list_del_init(&eq
->list
);
13951 mempool_free(mbox
, eq
->phba
->mbox_mem_pool
);
13956 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
13957 * @cq: The queue structure associated with the queue to destroy.
13959 * This function destroys a queue, as detailed in @cq by sending an mailbox
13960 * command, specific to the type of queue, to the HBA.
13962 * The @cq struct is used to get the queue ID of the queue to destroy.
13964 * On success this function will return a zero. If the queue destroy mailbox
13965 * command fails this function will return -ENXIO.
13968 lpfc_cq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
)
13970 LPFC_MBOXQ_t
*mbox
;
13971 int rc
, length
, status
= 0;
13972 uint32_t shdr_status
, shdr_add_status
;
13973 union lpfc_sli4_cfg_shdr
*shdr
;
13975 /* sanity check on queue memory */
13978 mbox
= mempool_alloc(cq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
13981 length
= (sizeof(struct lpfc_mbx_cq_destroy
) -
13982 sizeof(struct lpfc_sli4_cfg_mhdr
));
13983 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
13984 LPFC_MBOX_OPCODE_CQ_DESTROY
,
13985 length
, LPFC_SLI4_MBX_EMBED
);
13986 bf_set(lpfc_mbx_cq_destroy_q_id
, &mbox
->u
.mqe
.un
.cq_destroy
.u
.request
,
13988 mbox
->vport
= cq
->phba
->pport
;
13989 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
13990 rc
= lpfc_sli_issue_mbox(cq
->phba
, mbox
, MBX_POLL
);
13991 /* The IOCTL status is embedded in the mailbox subheader. */
13992 shdr
= (union lpfc_sli4_cfg_shdr
*)
13993 &mbox
->u
.mqe
.un
.wq_create
.header
.cfg_shdr
;
13994 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
13995 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
13996 if (shdr_status
|| shdr_add_status
|| rc
) {
13997 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13998 "2506 CQ_DESTROY mailbox failed with "
13999 "status x%x add_status x%x, mbx status x%x\n",
14000 shdr_status
, shdr_add_status
, rc
);
14003 /* Remove cq from any list */
14004 list_del_init(&cq
->list
);
14005 mempool_free(mbox
, cq
->phba
->mbox_mem_pool
);
14010 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
14011 * @qm: The queue structure associated with the queue to destroy.
14013 * This function destroys a queue, as detailed in @mq by sending an mailbox
14014 * command, specific to the type of queue, to the HBA.
14016 * The @mq struct is used to get the queue ID of the queue to destroy.
14018 * On success this function will return a zero. If the queue destroy mailbox
14019 * command fails this function will return -ENXIO.
14022 lpfc_mq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
)
14024 LPFC_MBOXQ_t
*mbox
;
14025 int rc
, length
, status
= 0;
14026 uint32_t shdr_status
, shdr_add_status
;
14027 union lpfc_sli4_cfg_shdr
*shdr
;
14029 /* sanity check on queue memory */
14032 mbox
= mempool_alloc(mq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
14035 length
= (sizeof(struct lpfc_mbx_mq_destroy
) -
14036 sizeof(struct lpfc_sli4_cfg_mhdr
));
14037 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
14038 LPFC_MBOX_OPCODE_MQ_DESTROY
,
14039 length
, LPFC_SLI4_MBX_EMBED
);
14040 bf_set(lpfc_mbx_mq_destroy_q_id
, &mbox
->u
.mqe
.un
.mq_destroy
.u
.request
,
14042 mbox
->vport
= mq
->phba
->pport
;
14043 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
14044 rc
= lpfc_sli_issue_mbox(mq
->phba
, mbox
, MBX_POLL
);
14045 /* The IOCTL status is embedded in the mailbox subheader. */
14046 shdr
= (union lpfc_sli4_cfg_shdr
*)
14047 &mbox
->u
.mqe
.un
.mq_destroy
.header
.cfg_shdr
;
14048 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
14049 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
14050 if (shdr_status
|| shdr_add_status
|| rc
) {
14051 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14052 "2507 MQ_DESTROY mailbox failed with "
14053 "status x%x add_status x%x, mbx status x%x\n",
14054 shdr_status
, shdr_add_status
, rc
);
14057 /* Remove mq from any list */
14058 list_del_init(&mq
->list
);
14059 mempool_free(mbox
, mq
->phba
->mbox_mem_pool
);
14064 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
14065 * @wq: The queue structure associated with the queue to destroy.
14067 * This function destroys a queue, as detailed in @wq by sending an mailbox
14068 * command, specific to the type of queue, to the HBA.
14070 * The @wq struct is used to get the queue ID of the queue to destroy.
14072 * On success this function will return a zero. If the queue destroy mailbox
14073 * command fails this function will return -ENXIO.
14076 lpfc_wq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*wq
)
14078 LPFC_MBOXQ_t
*mbox
;
14079 int rc
, length
, status
= 0;
14080 uint32_t shdr_status
, shdr_add_status
;
14081 union lpfc_sli4_cfg_shdr
*shdr
;
14083 /* sanity check on queue memory */
14086 mbox
= mempool_alloc(wq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
14089 length
= (sizeof(struct lpfc_mbx_wq_destroy
) -
14090 sizeof(struct lpfc_sli4_cfg_mhdr
));
14091 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
14092 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY
,
14093 length
, LPFC_SLI4_MBX_EMBED
);
14094 bf_set(lpfc_mbx_wq_destroy_q_id
, &mbox
->u
.mqe
.un
.wq_destroy
.u
.request
,
14096 mbox
->vport
= wq
->phba
->pport
;
14097 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
14098 rc
= lpfc_sli_issue_mbox(wq
->phba
, mbox
, MBX_POLL
);
14099 shdr
= (union lpfc_sli4_cfg_shdr
*)
14100 &mbox
->u
.mqe
.un
.wq_destroy
.header
.cfg_shdr
;
14101 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
14102 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
14103 if (shdr_status
|| shdr_add_status
|| rc
) {
14104 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14105 "2508 WQ_DESTROY mailbox failed with "
14106 "status x%x add_status x%x, mbx status x%x\n",
14107 shdr_status
, shdr_add_status
, rc
);
14110 /* Remove wq from any list */
14111 list_del_init(&wq
->list
);
14112 mempool_free(mbox
, wq
->phba
->mbox_mem_pool
);
14117 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
14118 * @rq: The queue structure associated with the queue to destroy.
14120 * This function destroys a queue, as detailed in @rq by sending an mailbox
14121 * command, specific to the type of queue, to the HBA.
14123 * The @rq struct is used to get the queue ID of the queue to destroy.
14125 * On success this function will return a zero. If the queue destroy mailbox
14126 * command fails this function will return -ENXIO.
14129 lpfc_rq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*hrq
,
14130 struct lpfc_queue
*drq
)
14132 LPFC_MBOXQ_t
*mbox
;
14133 int rc
, length
, status
= 0;
14134 uint32_t shdr_status
, shdr_add_status
;
14135 union lpfc_sli4_cfg_shdr
*shdr
;
14137 /* sanity check on queue memory */
14140 mbox
= mempool_alloc(hrq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
14143 length
= (sizeof(struct lpfc_mbx_rq_destroy
) -
14144 sizeof(struct lpfc_sli4_cfg_mhdr
));
14145 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
14146 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY
,
14147 length
, LPFC_SLI4_MBX_EMBED
);
14148 bf_set(lpfc_mbx_rq_destroy_q_id
, &mbox
->u
.mqe
.un
.rq_destroy
.u
.request
,
14150 mbox
->vport
= hrq
->phba
->pport
;
14151 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
14152 rc
= lpfc_sli_issue_mbox(hrq
->phba
, mbox
, MBX_POLL
);
14153 /* The IOCTL status is embedded in the mailbox subheader. */
14154 shdr
= (union lpfc_sli4_cfg_shdr
*)
14155 &mbox
->u
.mqe
.un
.rq_destroy
.header
.cfg_shdr
;
14156 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
14157 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
14158 if (shdr_status
|| shdr_add_status
|| rc
) {
14159 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14160 "2509 RQ_DESTROY mailbox failed with "
14161 "status x%x add_status x%x, mbx status x%x\n",
14162 shdr_status
, shdr_add_status
, rc
);
14163 if (rc
!= MBX_TIMEOUT
)
14164 mempool_free(mbox
, hrq
->phba
->mbox_mem_pool
);
14167 bf_set(lpfc_mbx_rq_destroy_q_id
, &mbox
->u
.mqe
.un
.rq_destroy
.u
.request
,
14169 rc
= lpfc_sli_issue_mbox(drq
->phba
, mbox
, MBX_POLL
);
14170 shdr
= (union lpfc_sli4_cfg_shdr
*)
14171 &mbox
->u
.mqe
.un
.rq_destroy
.header
.cfg_shdr
;
14172 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
14173 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
14174 if (shdr_status
|| shdr_add_status
|| rc
) {
14175 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14176 "2510 RQ_DESTROY mailbox failed with "
14177 "status x%x add_status x%x, mbx status x%x\n",
14178 shdr_status
, shdr_add_status
, rc
);
14181 list_del_init(&hrq
->list
);
14182 list_del_init(&drq
->list
);
14183 mempool_free(mbox
, hrq
->phba
->mbox_mem_pool
);
14188 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
14189 * @phba: The virtual port for which this call being executed.
14190 * @pdma_phys_addr0: Physical address of the 1st SGL page.
14191 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
14192 * @xritag: the xritag that ties this io to the SGL pages.
14194 * This routine will post the sgl pages for the IO that has the xritag
14195 * that is in the iocbq structure. The xritag is assigned during iocbq
14196 * creation and persists for as long as the driver is loaded.
14197 * if the caller has fewer than 256 scatter gather segments to map then
14198 * pdma_phys_addr1 should be 0.
14199 * If the caller needs to map more than 256 scatter gather segment then
14200 * pdma_phys_addr1 should be a valid physical address.
14201 * physical address for SGLs must be 64 byte aligned.
14202 * If you are going to map 2 SGL's then the first one must have 256 entries
14203 * the second sgl can have between 1 and 256 entries.
14207 * -ENXIO, -ENOMEM - Failure
14210 lpfc_sli4_post_sgl(struct lpfc_hba
*phba
,
14211 dma_addr_t pdma_phys_addr0
,
14212 dma_addr_t pdma_phys_addr1
,
14215 struct lpfc_mbx_post_sgl_pages
*post_sgl_pages
;
14216 LPFC_MBOXQ_t
*mbox
;
14218 uint32_t shdr_status
, shdr_add_status
;
14220 union lpfc_sli4_cfg_shdr
*shdr
;
14222 if (xritag
== NO_XRI
) {
14223 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
14224 "0364 Invalid param:\n");
14228 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
14232 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
14233 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
,
14234 sizeof(struct lpfc_mbx_post_sgl_pages
) -
14235 sizeof(struct lpfc_sli4_cfg_mhdr
), LPFC_SLI4_MBX_EMBED
);
14237 post_sgl_pages
= (struct lpfc_mbx_post_sgl_pages
*)
14238 &mbox
->u
.mqe
.un
.post_sgl_pages
;
14239 bf_set(lpfc_post_sgl_pages_xri
, post_sgl_pages
, xritag
);
14240 bf_set(lpfc_post_sgl_pages_xricnt
, post_sgl_pages
, 1);
14242 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg0_addr_lo
=
14243 cpu_to_le32(putPaddrLow(pdma_phys_addr0
));
14244 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg0_addr_hi
=
14245 cpu_to_le32(putPaddrHigh(pdma_phys_addr0
));
14247 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg1_addr_lo
=
14248 cpu_to_le32(putPaddrLow(pdma_phys_addr1
));
14249 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg1_addr_hi
=
14250 cpu_to_le32(putPaddrHigh(pdma_phys_addr1
));
14251 if (!phba
->sli4_hba
.intr_enable
)
14252 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
14254 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
14255 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
14257 /* The IOCTL status is embedded in the mailbox subheader. */
14258 shdr
= (union lpfc_sli4_cfg_shdr
*) &post_sgl_pages
->header
.cfg_shdr
;
14259 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
14260 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
14261 if (rc
!= MBX_TIMEOUT
)
14262 mempool_free(mbox
, phba
->mbox_mem_pool
);
14263 if (shdr_status
|| shdr_add_status
|| rc
) {
14264 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14265 "2511 POST_SGL mailbox failed with "
14266 "status x%x add_status x%x, mbx status x%x\n",
14267 shdr_status
, shdr_add_status
, rc
);
14273 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
14274 * @phba: pointer to lpfc hba data structure.
14276 * This routine is invoked to post rpi header templates to the
14277 * HBA consistent with the SLI-4 interface spec. This routine
14278 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14279 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
14282 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
14283 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
14286 lpfc_sli4_alloc_xri(struct lpfc_hba
*phba
)
14291 * Fetch the next logical xri. Because this index is logical,
14292 * the driver starts at 0 each time.
14294 spin_lock_irq(&phba
->hbalock
);
14295 xri
= find_next_zero_bit(phba
->sli4_hba
.xri_bmask
,
14296 phba
->sli4_hba
.max_cfg_param
.max_xri
, 0);
14297 if (xri
>= phba
->sli4_hba
.max_cfg_param
.max_xri
) {
14298 spin_unlock_irq(&phba
->hbalock
);
14301 set_bit(xri
, phba
->sli4_hba
.xri_bmask
);
14302 phba
->sli4_hba
.max_cfg_param
.xri_used
++;
14304 spin_unlock_irq(&phba
->hbalock
);
14309 * lpfc_sli4_free_xri - Release an xri for reuse.
14310 * @phba: pointer to lpfc hba data structure.
14312 * This routine is invoked to release an xri to the pool of
14313 * available rpis maintained by the driver.
14316 __lpfc_sli4_free_xri(struct lpfc_hba
*phba
, int xri
)
14318 if (test_and_clear_bit(xri
, phba
->sli4_hba
.xri_bmask
)) {
14319 phba
->sli4_hba
.max_cfg_param
.xri_used
--;
14324 * lpfc_sli4_free_xri - Release an xri for reuse.
14325 * @phba: pointer to lpfc hba data structure.
14327 * This routine is invoked to release an xri to the pool of
14328 * available rpis maintained by the driver.
14331 lpfc_sli4_free_xri(struct lpfc_hba
*phba
, int xri
)
14333 spin_lock_irq(&phba
->hbalock
);
14334 __lpfc_sli4_free_xri(phba
, xri
);
14335 spin_unlock_irq(&phba
->hbalock
);
14339 * lpfc_sli4_next_xritag - Get an xritag for the io
14340 * @phba: Pointer to HBA context object.
14342 * This function gets an xritag for the iocb. If there is no unused xritag
14343 * it will return 0xffff.
14344 * The function returns the allocated xritag if successful, else returns zero.
14345 * Zero is not a valid xritag.
14346 * The caller is not required to hold any lock.
14349 lpfc_sli4_next_xritag(struct lpfc_hba
*phba
)
14351 uint16_t xri_index
;
14353 xri_index
= lpfc_sli4_alloc_xri(phba
);
14354 if (xri_index
== NO_XRI
)
14355 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
14356 "2004 Failed to allocate XRI.last XRITAG is %d"
14357 " Max XRI is %d, Used XRI is %d\n",
14359 phba
->sli4_hba
.max_cfg_param
.max_xri
,
14360 phba
->sli4_hba
.max_cfg_param
.xri_used
);
14365 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
14366 * @phba: pointer to lpfc hba data structure.
14367 * @post_sgl_list: pointer to els sgl entry list.
14368 * @count: number of els sgl entries on the list.
14370 * This routine is invoked to post a block of driver's sgl pages to the
14371 * HBA using non-embedded mailbox command. No Lock is held. This routine
14372 * is only called when the driver is loading and after all IO has been
14376 lpfc_sli4_post_els_sgl_list(struct lpfc_hba
*phba
,
14377 struct list_head
*post_sgl_list
,
14380 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_next
= NULL
;
14381 struct lpfc_mbx_post_uembed_sgl_page1
*sgl
;
14382 struct sgl_page_pairs
*sgl_pg_pairs
;
14384 LPFC_MBOXQ_t
*mbox
;
14385 uint32_t reqlen
, alloclen
, pg_pairs
;
14387 uint16_t xritag_start
= 0;
14389 uint32_t shdr_status
, shdr_add_status
;
14390 union lpfc_sli4_cfg_shdr
*shdr
;
14392 reqlen
= phba
->sli4_hba
.els_xri_cnt
* sizeof(struct sgl_page_pairs
) +
14393 sizeof(union lpfc_sli4_cfg_shdr
) + sizeof(uint32_t);
14394 if (reqlen
> SLI4_PAGE_SIZE
) {
14395 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
14396 "2559 Block sgl registration required DMA "
14397 "size (%d) great than a page\n", reqlen
);
14400 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
14404 /* Allocate DMA memory and set up the non-embedded mailbox command */
14405 alloclen
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
14406 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
, reqlen
,
14407 LPFC_SLI4_MBX_NEMBED
);
14409 if (alloclen
< reqlen
) {
14410 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14411 "0285 Allocated DMA memory size (%d) is "
14412 "less than the requested DMA memory "
14413 "size (%d)\n", alloclen
, reqlen
);
14414 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
14417 /* Set up the SGL pages in the non-embedded DMA pages */
14418 viraddr
= mbox
->sge_array
->addr
[0];
14419 sgl
= (struct lpfc_mbx_post_uembed_sgl_page1
*)viraddr
;
14420 sgl_pg_pairs
= &sgl
->sgl_pg_pairs
;
14423 list_for_each_entry_safe(sglq_entry
, sglq_next
, post_sgl_list
, list
) {
14424 /* Set up the sge entry */
14425 sgl_pg_pairs
->sgl_pg0_addr_lo
=
14426 cpu_to_le32(putPaddrLow(sglq_entry
->phys
));
14427 sgl_pg_pairs
->sgl_pg0_addr_hi
=
14428 cpu_to_le32(putPaddrHigh(sglq_entry
->phys
));
14429 sgl_pg_pairs
->sgl_pg1_addr_lo
=
14430 cpu_to_le32(putPaddrLow(0));
14431 sgl_pg_pairs
->sgl_pg1_addr_hi
=
14432 cpu_to_le32(putPaddrHigh(0));
14434 /* Keep the first xritag on the list */
14436 xritag_start
= sglq_entry
->sli4_xritag
;
14441 /* Complete initialization and perform endian conversion. */
14442 bf_set(lpfc_post_sgl_pages_xri
, sgl
, xritag_start
);
14443 bf_set(lpfc_post_sgl_pages_xricnt
, sgl
, phba
->sli4_hba
.els_xri_cnt
);
14444 sgl
->word0
= cpu_to_le32(sgl
->word0
);
14445 if (!phba
->sli4_hba
.intr_enable
)
14446 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
14448 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
14449 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
14451 shdr
= (union lpfc_sli4_cfg_shdr
*) &sgl
->cfg_shdr
;
14452 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
14453 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
14454 if (rc
!= MBX_TIMEOUT
)
14455 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
14456 if (shdr_status
|| shdr_add_status
|| rc
) {
14457 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
14458 "2513 POST_SGL_BLOCK mailbox command failed "
14459 "status x%x add_status x%x mbx status x%x\n",
14460 shdr_status
, shdr_add_status
, rc
);
14467 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
14468 * @phba: pointer to lpfc hba data structure.
14469 * @sblist: pointer to scsi buffer list.
14470 * @count: number of scsi buffers on the list.
14472 * This routine is invoked to post a block of @count scsi sgl pages from a
14473 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
14478 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba
*phba
,
14479 struct list_head
*sblist
,
14482 struct lpfc_scsi_buf
*psb
;
14483 struct lpfc_mbx_post_uembed_sgl_page1
*sgl
;
14484 struct sgl_page_pairs
*sgl_pg_pairs
;
14486 LPFC_MBOXQ_t
*mbox
;
14487 uint32_t reqlen
, alloclen
, pg_pairs
;
14489 uint16_t xritag_start
= 0;
14491 uint32_t shdr_status
, shdr_add_status
;
14492 dma_addr_t pdma_phys_bpl1
;
14493 union lpfc_sli4_cfg_shdr
*shdr
;
14495 /* Calculate the requested length of the dma memory */
14496 reqlen
= count
* sizeof(struct sgl_page_pairs
) +
14497 sizeof(union lpfc_sli4_cfg_shdr
) + sizeof(uint32_t);
14498 if (reqlen
> SLI4_PAGE_SIZE
) {
14499 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
14500 "0217 Block sgl registration required DMA "
14501 "size (%d) great than a page\n", reqlen
);
14504 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
14506 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14507 "0283 Failed to allocate mbox cmd memory\n");
14511 /* Allocate DMA memory and set up the non-embedded mailbox command */
14512 alloclen
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
14513 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
, reqlen
,
14514 LPFC_SLI4_MBX_NEMBED
);
14516 if (alloclen
< reqlen
) {
14517 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14518 "2561 Allocated DMA memory size (%d) is "
14519 "less than the requested DMA memory "
14520 "size (%d)\n", alloclen
, reqlen
);
14521 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
14525 /* Get the first SGE entry from the non-embedded DMA memory */
14526 viraddr
= mbox
->sge_array
->addr
[0];
14528 /* Set up the SGL pages in the non-embedded DMA pages */
14529 sgl
= (struct lpfc_mbx_post_uembed_sgl_page1
*)viraddr
;
14530 sgl_pg_pairs
= &sgl
->sgl_pg_pairs
;
14533 list_for_each_entry(psb
, sblist
, list
) {
14534 /* Set up the sge entry */
14535 sgl_pg_pairs
->sgl_pg0_addr_lo
=
14536 cpu_to_le32(putPaddrLow(psb
->dma_phys_bpl
));
14537 sgl_pg_pairs
->sgl_pg0_addr_hi
=
14538 cpu_to_le32(putPaddrHigh(psb
->dma_phys_bpl
));
14539 if (phba
->cfg_sg_dma_buf_size
> SGL_PAGE_SIZE
)
14540 pdma_phys_bpl1
= psb
->dma_phys_bpl
+ SGL_PAGE_SIZE
;
14542 pdma_phys_bpl1
= 0;
14543 sgl_pg_pairs
->sgl_pg1_addr_lo
=
14544 cpu_to_le32(putPaddrLow(pdma_phys_bpl1
));
14545 sgl_pg_pairs
->sgl_pg1_addr_hi
=
14546 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1
));
14547 /* Keep the first xritag on the list */
14549 xritag_start
= psb
->cur_iocbq
.sli4_xritag
;
14553 bf_set(lpfc_post_sgl_pages_xri
, sgl
, xritag_start
);
14554 bf_set(lpfc_post_sgl_pages_xricnt
, sgl
, pg_pairs
);
14555 /* Perform endian conversion if necessary */
14556 sgl
->word0
= cpu_to_le32(sgl
->word0
);
14558 if (!phba
->sli4_hba
.intr_enable
)
14559 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
14561 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
14562 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
14564 shdr
= (union lpfc_sli4_cfg_shdr
*) &sgl
->cfg_shdr
;
14565 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
14566 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
14567 if (rc
!= MBX_TIMEOUT
)
14568 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
14569 if (shdr_status
|| shdr_add_status
|| rc
) {
14570 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
14571 "2564 POST_SGL_BLOCK mailbox command failed "
14572 "status x%x add_status x%x mbx status x%x\n",
14573 shdr_status
, shdr_add_status
, rc
);
14580 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
14581 * @phba: pointer to lpfc_hba struct that the frame was received on
14582 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
14584 * This function checks the fields in the @fc_hdr to see if the FC frame is a
14585 * valid type of frame that the LPFC driver will handle. This function will
14586 * return a zero if the frame is a valid frame or a non zero value when the
14587 * frame does not pass the check.
14590 lpfc_fc_frame_check(struct lpfc_hba
*phba
, struct fc_frame_header
*fc_hdr
)
14592 /* make rctl_names static to save stack space */
14593 static char *rctl_names
[] = FC_RCTL_NAMES_INIT
;
14594 char *type_names
[] = FC_TYPE_NAMES_INIT
;
14595 struct fc_vft_header
*fc_vft_hdr
;
14596 uint32_t *header
= (uint32_t *) fc_hdr
;
14598 switch (fc_hdr
->fh_r_ctl
) {
14599 case FC_RCTL_DD_UNCAT
: /* uncategorized information */
14600 case FC_RCTL_DD_SOL_DATA
: /* solicited data */
14601 case FC_RCTL_DD_UNSOL_CTL
: /* unsolicited control */
14602 case FC_RCTL_DD_SOL_CTL
: /* solicited control or reply */
14603 case FC_RCTL_DD_UNSOL_DATA
: /* unsolicited data */
14604 case FC_RCTL_DD_DATA_DESC
: /* data descriptor */
14605 case FC_RCTL_DD_UNSOL_CMD
: /* unsolicited command */
14606 case FC_RCTL_DD_CMD_STATUS
: /* command status */
14607 case FC_RCTL_ELS_REQ
: /* extended link services request */
14608 case FC_RCTL_ELS_REP
: /* extended link services reply */
14609 case FC_RCTL_ELS4_REQ
: /* FC-4 ELS request */
14610 case FC_RCTL_ELS4_REP
: /* FC-4 ELS reply */
14611 case FC_RCTL_BA_NOP
: /* basic link service NOP */
14612 case FC_RCTL_BA_ABTS
: /* basic link service abort */
14613 case FC_RCTL_BA_RMC
: /* remove connection */
14614 case FC_RCTL_BA_ACC
: /* basic accept */
14615 case FC_RCTL_BA_RJT
: /* basic reject */
14616 case FC_RCTL_BA_PRMT
:
14617 case FC_RCTL_ACK_1
: /* acknowledge_1 */
14618 case FC_RCTL_ACK_0
: /* acknowledge_0 */
14619 case FC_RCTL_P_RJT
: /* port reject */
14620 case FC_RCTL_F_RJT
: /* fabric reject */
14621 case FC_RCTL_P_BSY
: /* port busy */
14622 case FC_RCTL_F_BSY
: /* fabric busy to data frame */
14623 case FC_RCTL_F_BSYL
: /* fabric busy to link control frame */
14624 case FC_RCTL_LCR
: /* link credit reset */
14625 case FC_RCTL_END
: /* end */
14627 case FC_RCTL_VFTH
: /* Virtual Fabric tagging Header */
14628 fc_vft_hdr
= (struct fc_vft_header
*)fc_hdr
;
14629 fc_hdr
= &((struct fc_frame_header
*)fc_vft_hdr
)[1];
14630 return lpfc_fc_frame_check(phba
, fc_hdr
);
14634 switch (fc_hdr
->fh_type
) {
14646 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
14647 "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
14648 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
14649 rctl_names
[fc_hdr
->fh_r_ctl
], fc_hdr
->fh_r_ctl
,
14650 type_names
[fc_hdr
->fh_type
], fc_hdr
->fh_type
,
14651 be32_to_cpu(header
[0]), be32_to_cpu(header
[1]),
14652 be32_to_cpu(header
[2]), be32_to_cpu(header
[3]),
14653 be32_to_cpu(header
[4]), be32_to_cpu(header
[5]),
14654 be32_to_cpu(header
[6]));
14657 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
,
14658 "2539 Dropped frame rctl:%s type:%s\n",
14659 rctl_names
[fc_hdr
->fh_r_ctl
],
14660 type_names
[fc_hdr
->fh_type
]);
14665 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
14666 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
14668 * This function processes the FC header to retrieve the VFI from the VF
14669 * header, if one exists. This function will return the VFI if one exists
14670 * or 0 if no VSAN Header exists.
14673 lpfc_fc_hdr_get_vfi(struct fc_frame_header
*fc_hdr
)
14675 struct fc_vft_header
*fc_vft_hdr
= (struct fc_vft_header
*)fc_hdr
;
14677 if (fc_hdr
->fh_r_ctl
!= FC_RCTL_VFTH
)
14679 return bf_get(fc_vft_hdr_vf_id
, fc_vft_hdr
);
14683 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
14684 * @phba: Pointer to the HBA structure to search for the vport on
14685 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
14686 * @fcfi: The FC Fabric ID that the frame came from
14688 * This function searches the @phba for a vport that matches the content of the
14689 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
14690 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
14691 * returns the matching vport pointer or NULL if unable to match frame to a
14694 static struct lpfc_vport
*
14695 lpfc_fc_frame_to_vport(struct lpfc_hba
*phba
, struct fc_frame_header
*fc_hdr
,
14698 struct lpfc_vport
**vports
;
14699 struct lpfc_vport
*vport
= NULL
;
14701 uint32_t did
= (fc_hdr
->fh_d_id
[0] << 16 |
14702 fc_hdr
->fh_d_id
[1] << 8 |
14703 fc_hdr
->fh_d_id
[2]);
14705 if (did
== Fabric_DID
)
14706 return phba
->pport
;
14707 if ((phba
->pport
->fc_flag
& FC_PT2PT
) &&
14708 !(phba
->link_state
== LPFC_HBA_READY
))
14709 return phba
->pport
;
14711 vports
= lpfc_create_vport_work_array(phba
);
14712 if (vports
!= NULL
)
14713 for (i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
14714 if (phba
->fcf
.fcfi
== fcfi
&&
14715 vports
[i
]->vfi
== lpfc_fc_hdr_get_vfi(fc_hdr
) &&
14716 vports
[i
]->fc_myDID
== did
) {
14721 lpfc_destroy_vport_work_array(phba
, vports
);
14726 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
14727 * @vport: The vport to work on.
14729 * This function updates the receive sequence time stamp for this vport. The
14730 * receive sequence time stamp indicates the time that the last frame of the
14731 * the sequence that has been idle for the longest amount of time was received.
14732 * the driver uses this time stamp to indicate if any received sequences have
14736 lpfc_update_rcv_time_stamp(struct lpfc_vport
*vport
)
14738 struct lpfc_dmabuf
*h_buf
;
14739 struct hbq_dmabuf
*dmabuf
= NULL
;
14741 /* get the oldest sequence on the rcv list */
14742 h_buf
= list_get_first(&vport
->rcv_buffer_list
,
14743 struct lpfc_dmabuf
, list
);
14746 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
14747 vport
->rcv_buffer_time_stamp
= dmabuf
->time_stamp
;
14751 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
14752 * @vport: The vport that the received sequences were sent to.
14754 * This function cleans up all outstanding received sequences. This is called
14755 * by the driver when a link event or user action invalidates all the received
14759 lpfc_cleanup_rcv_buffers(struct lpfc_vport
*vport
)
14761 struct lpfc_dmabuf
*h_buf
, *hnext
;
14762 struct lpfc_dmabuf
*d_buf
, *dnext
;
14763 struct hbq_dmabuf
*dmabuf
= NULL
;
14765 /* start with the oldest sequence on the rcv list */
14766 list_for_each_entry_safe(h_buf
, hnext
, &vport
->rcv_buffer_list
, list
) {
14767 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
14768 list_del_init(&dmabuf
->hbuf
.list
);
14769 list_for_each_entry_safe(d_buf
, dnext
,
14770 &dmabuf
->dbuf
.list
, list
) {
14771 list_del_init(&d_buf
->list
);
14772 lpfc_in_buf_free(vport
->phba
, d_buf
);
14774 lpfc_in_buf_free(vport
->phba
, &dmabuf
->dbuf
);
14779 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
14780 * @vport: The vport that the received sequences were sent to.
14782 * This function determines whether any received sequences have timed out by
14783 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
14784 * indicates that there is at least one timed out sequence this routine will
14785 * go through the received sequences one at a time from most inactive to most
14786 * active to determine which ones need to be cleaned up. Once it has determined
14787 * that a sequence needs to be cleaned up it will simply free up the resources
14788 * without sending an abort.
14791 lpfc_rcv_seq_check_edtov(struct lpfc_vport
*vport
)
14793 struct lpfc_dmabuf
*h_buf
, *hnext
;
14794 struct lpfc_dmabuf
*d_buf
, *dnext
;
14795 struct hbq_dmabuf
*dmabuf
= NULL
;
14796 unsigned long timeout
;
14797 int abort_count
= 0;
14799 timeout
= (msecs_to_jiffies(vport
->phba
->fc_edtov
) +
14800 vport
->rcv_buffer_time_stamp
);
14801 if (list_empty(&vport
->rcv_buffer_list
) ||
14802 time_before(jiffies
, timeout
))
14804 /* start with the oldest sequence on the rcv list */
14805 list_for_each_entry_safe(h_buf
, hnext
, &vport
->rcv_buffer_list
, list
) {
14806 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
14807 timeout
= (msecs_to_jiffies(vport
->phba
->fc_edtov
) +
14808 dmabuf
->time_stamp
);
14809 if (time_before(jiffies
, timeout
))
14812 list_del_init(&dmabuf
->hbuf
.list
);
14813 list_for_each_entry_safe(d_buf
, dnext
,
14814 &dmabuf
->dbuf
.list
, list
) {
14815 list_del_init(&d_buf
->list
);
14816 lpfc_in_buf_free(vport
->phba
, d_buf
);
14818 lpfc_in_buf_free(vport
->phba
, &dmabuf
->dbuf
);
14821 lpfc_update_rcv_time_stamp(vport
);
14825 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
14826 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
14828 * This function searches through the existing incomplete sequences that have
14829 * been sent to this @vport. If the frame matches one of the incomplete
14830 * sequences then the dbuf in the @dmabuf is added to the list of frames that
14831 * make up that sequence. If no sequence is found that matches this frame then
14832 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
14833 * This function returns a pointer to the first dmabuf in the sequence list that
14834 * the frame was linked to.
14836 static struct hbq_dmabuf
*
14837 lpfc_fc_frame_add(struct lpfc_vport
*vport
, struct hbq_dmabuf
*dmabuf
)
14839 struct fc_frame_header
*new_hdr
;
14840 struct fc_frame_header
*temp_hdr
;
14841 struct lpfc_dmabuf
*d_buf
;
14842 struct lpfc_dmabuf
*h_buf
;
14843 struct hbq_dmabuf
*seq_dmabuf
= NULL
;
14844 struct hbq_dmabuf
*temp_dmabuf
= NULL
;
14846 INIT_LIST_HEAD(&dmabuf
->dbuf
.list
);
14847 dmabuf
->time_stamp
= jiffies
;
14848 new_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
14849 /* Use the hdr_buf to find the sequence that this frame belongs to */
14850 list_for_each_entry(h_buf
, &vport
->rcv_buffer_list
, list
) {
14851 temp_hdr
= (struct fc_frame_header
*)h_buf
->virt
;
14852 if ((temp_hdr
->fh_seq_id
!= new_hdr
->fh_seq_id
) ||
14853 (temp_hdr
->fh_ox_id
!= new_hdr
->fh_ox_id
) ||
14854 (memcmp(&temp_hdr
->fh_s_id
, &new_hdr
->fh_s_id
, 3)))
14856 /* found a pending sequence that matches this frame */
14857 seq_dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
14862 * This indicates first frame received for this sequence.
14863 * Queue the buffer on the vport's rcv_buffer_list.
14865 list_add_tail(&dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
14866 lpfc_update_rcv_time_stamp(vport
);
14869 temp_hdr
= seq_dmabuf
->hbuf
.virt
;
14870 if (be16_to_cpu(new_hdr
->fh_seq_cnt
) <
14871 be16_to_cpu(temp_hdr
->fh_seq_cnt
)) {
14872 list_del_init(&seq_dmabuf
->hbuf
.list
);
14873 list_add_tail(&dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
14874 list_add_tail(&dmabuf
->dbuf
.list
, &seq_dmabuf
->dbuf
.list
);
14875 lpfc_update_rcv_time_stamp(vport
);
14878 /* move this sequence to the tail to indicate a young sequence */
14879 list_move_tail(&seq_dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
14880 seq_dmabuf
->time_stamp
= jiffies
;
14881 lpfc_update_rcv_time_stamp(vport
);
14882 if (list_empty(&seq_dmabuf
->dbuf
.list
)) {
14883 temp_hdr
= dmabuf
->hbuf
.virt
;
14884 list_add_tail(&dmabuf
->dbuf
.list
, &seq_dmabuf
->dbuf
.list
);
14887 /* find the correct place in the sequence to insert this frame */
14888 list_for_each_entry_reverse(d_buf
, &seq_dmabuf
->dbuf
.list
, list
) {
14889 temp_dmabuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
14890 temp_hdr
= (struct fc_frame_header
*)temp_dmabuf
->hbuf
.virt
;
14892 * If the frame's sequence count is greater than the frame on
14893 * the list then insert the frame right after this frame
14895 if (be16_to_cpu(new_hdr
->fh_seq_cnt
) >
14896 be16_to_cpu(temp_hdr
->fh_seq_cnt
)) {
14897 list_add(&dmabuf
->dbuf
.list
, &temp_dmabuf
->dbuf
.list
);
14905 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
14906 * @vport: pointer to a vitural port
14907 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14909 * This function tries to abort from the partially assembed sequence, described
14910 * by the information from basic abbort @dmabuf. It checks to see whether such
14911 * partially assembled sequence held by the driver. If so, it shall free up all
14912 * the frames from the partially assembled sequence.
14915 * true -- if there is matching partially assembled sequence present and all
14916 * the frames freed with the sequence;
14917 * false -- if there is no matching partially assembled sequence present so
14918 * nothing got aborted in the lower layer driver
14921 lpfc_sli4_abort_partial_seq(struct lpfc_vport
*vport
,
14922 struct hbq_dmabuf
*dmabuf
)
14924 struct fc_frame_header
*new_hdr
;
14925 struct fc_frame_header
*temp_hdr
;
14926 struct lpfc_dmabuf
*d_buf
, *n_buf
, *h_buf
;
14927 struct hbq_dmabuf
*seq_dmabuf
= NULL
;
14929 /* Use the hdr_buf to find the sequence that matches this frame */
14930 INIT_LIST_HEAD(&dmabuf
->dbuf
.list
);
14931 INIT_LIST_HEAD(&dmabuf
->hbuf
.list
);
14932 new_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
14933 list_for_each_entry(h_buf
, &vport
->rcv_buffer_list
, list
) {
14934 temp_hdr
= (struct fc_frame_header
*)h_buf
->virt
;
14935 if ((temp_hdr
->fh_seq_id
!= new_hdr
->fh_seq_id
) ||
14936 (temp_hdr
->fh_ox_id
!= new_hdr
->fh_ox_id
) ||
14937 (memcmp(&temp_hdr
->fh_s_id
, &new_hdr
->fh_s_id
, 3)))
14939 /* found a pending sequence that matches this frame */
14940 seq_dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
14944 /* Free up all the frames from the partially assembled sequence */
14946 list_for_each_entry_safe(d_buf
, n_buf
,
14947 &seq_dmabuf
->dbuf
.list
, list
) {
14948 list_del_init(&d_buf
->list
);
14949 lpfc_in_buf_free(vport
->phba
, d_buf
);
14957 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
14958 * @vport: pointer to a vitural port
14959 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14961 * This function tries to abort from the assembed sequence from upper level
14962 * protocol, described by the information from basic abbort @dmabuf. It
14963 * checks to see whether such pending context exists at upper level protocol.
14964 * If so, it shall clean up the pending context.
14967 * true -- if there is matching pending context of the sequence cleaned
14969 * false -- if there is no matching pending context of the sequence present
14973 lpfc_sli4_abort_ulp_seq(struct lpfc_vport
*vport
, struct hbq_dmabuf
*dmabuf
)
14975 struct lpfc_hba
*phba
= vport
->phba
;
14978 /* Accepting abort at ulp with SLI4 only */
14979 if (phba
->sli_rev
< LPFC_SLI_REV4
)
14982 /* Register all caring upper level protocols to attend abort */
14983 handled
= lpfc_ct_handle_unsol_abort(phba
, dmabuf
);
14991 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
14992 * @phba: Pointer to HBA context object.
14993 * @cmd_iocbq: pointer to the command iocbq structure.
14994 * @rsp_iocbq: pointer to the response iocbq structure.
14996 * This function handles the sequence abort response iocb command complete
14997 * event. It properly releases the memory allocated to the sequence abort
15001 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba
*phba
,
15002 struct lpfc_iocbq
*cmd_iocbq
,
15003 struct lpfc_iocbq
*rsp_iocbq
)
15005 struct lpfc_nodelist
*ndlp
;
15008 ndlp
= (struct lpfc_nodelist
*)cmd_iocbq
->context1
;
15009 lpfc_nlp_put(ndlp
);
15010 lpfc_nlp_not_used(ndlp
);
15011 lpfc_sli_release_iocbq(phba
, cmd_iocbq
);
15014 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
15015 if (rsp_iocbq
&& rsp_iocbq
->iocb
.ulpStatus
)
15016 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15017 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
15018 rsp_iocbq
->iocb
.ulpStatus
,
15019 rsp_iocbq
->iocb
.un
.ulpWord
[4]);
15023 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
15024 * @phba: Pointer to HBA context object.
15025 * @xri: xri id in transaction.
15027 * This function validates the xri maps to the known range of XRIs allocated an
15028 * used by the driver.
15031 lpfc_sli4_xri_inrange(struct lpfc_hba
*phba
,
15036 for (i
= 0; i
< phba
->sli4_hba
.max_cfg_param
.max_xri
; i
++) {
15037 if (xri
== phba
->sli4_hba
.xri_ids
[i
])
15044 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
15045 * @phba: Pointer to HBA context object.
15046 * @fc_hdr: pointer to a FC frame header.
15048 * This function sends a basic response to a previous unsol sequence abort
15049 * event after aborting the sequence handling.
15052 lpfc_sli4_seq_abort_rsp(struct lpfc_vport
*vport
,
15053 struct fc_frame_header
*fc_hdr
, bool aborted
)
15055 struct lpfc_hba
*phba
= vport
->phba
;
15056 struct lpfc_iocbq
*ctiocb
= NULL
;
15057 struct lpfc_nodelist
*ndlp
;
15058 uint16_t oxid
, rxid
, xri
, lxri
;
15059 uint32_t sid
, fctl
;
15063 if (!lpfc_is_link_up(phba
))
15066 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
15067 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
15068 rxid
= be16_to_cpu(fc_hdr
->fh_rx_id
);
15070 ndlp
= lpfc_findnode_did(vport
, sid
);
15072 ndlp
= mempool_alloc(phba
->nlp_mem_pool
, GFP_KERNEL
);
15074 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_ELS
,
15075 "1268 Failed to allocate ndlp for "
15076 "oxid:x%x SID:x%x\n", oxid
, sid
);
15079 lpfc_nlp_init(vport
, ndlp
, sid
);
15080 /* Put ndlp onto pport node list */
15081 lpfc_enqueue_node(vport
, ndlp
);
15082 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
15083 /* re-setup ndlp without removing from node list */
15084 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
15086 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_ELS
,
15087 "3275 Failed to active ndlp found "
15088 "for oxid:x%x SID:x%x\n", oxid
, sid
);
15093 /* Allocate buffer for rsp iocb */
15094 ctiocb
= lpfc_sli_get_iocbq(phba
);
15098 /* Extract the F_CTL field from FC_HDR */
15099 fctl
= sli4_fctl_from_fc_hdr(fc_hdr
);
15101 icmd
= &ctiocb
->iocb
;
15102 icmd
->un
.xseq64
.bdl
.bdeSize
= 0;
15103 icmd
->un
.xseq64
.bdl
.ulpIoTag32
= 0;
15104 icmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
15105 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_BA_ACC
;
15106 icmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_BLS
;
15108 /* Fill in the rest of iocb fields */
15109 icmd
->ulpCommand
= CMD_XMIT_BLS_RSP64_CX
;
15110 icmd
->ulpBdeCount
= 0;
15112 icmd
->ulpClass
= CLASS3
;
15113 icmd
->ulpContext
= phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
15114 ctiocb
->context1
= lpfc_nlp_get(ndlp
);
15116 ctiocb
->iocb_cmpl
= NULL
;
15117 ctiocb
->vport
= phba
->pport
;
15118 ctiocb
->iocb_cmpl
= lpfc_sli4_seq_abort_rsp_cmpl
;
15119 ctiocb
->sli4_lxritag
= NO_XRI
;
15120 ctiocb
->sli4_xritag
= NO_XRI
;
15122 if (fctl
& FC_FC_EX_CTX
)
15123 /* Exchange responder sent the abort so we
15129 lxri
= lpfc_sli4_xri_inrange(phba
, xri
);
15130 if (lxri
!= NO_XRI
)
15131 lpfc_set_rrq_active(phba
, ndlp
, lxri
,
15132 (xri
== oxid
) ? rxid
: oxid
, 0);
15133 /* For BA_ABTS from exchange responder, if the logical xri with
15134 * the oxid maps to the FCP XRI range, the port no longer has
15135 * that exchange context, send a BLS_RJT. Override the IOCB for
15138 if ((fctl
& FC_FC_EX_CTX
) &&
15139 (lxri
> lpfc_sli4_get_els_iocb_cnt(phba
))) {
15140 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_BA_RJT
;
15141 bf_set(lpfc_vndr_code
, &icmd
->un
.bls_rsp
, 0);
15142 bf_set(lpfc_rsn_expln
, &icmd
->un
.bls_rsp
, FC_BA_RJT_INV_XID
);
15143 bf_set(lpfc_rsn_code
, &icmd
->un
.bls_rsp
, FC_BA_RJT_UNABLE
);
15146 /* If BA_ABTS failed to abort a partially assembled receive sequence,
15147 * the driver no longer has that exchange, send a BLS_RJT. Override
15148 * the IOCB for a BA_RJT.
15150 if (aborted
== false) {
15151 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_BA_RJT
;
15152 bf_set(lpfc_vndr_code
, &icmd
->un
.bls_rsp
, 0);
15153 bf_set(lpfc_rsn_expln
, &icmd
->un
.bls_rsp
, FC_BA_RJT_INV_XID
);
15154 bf_set(lpfc_rsn_code
, &icmd
->un
.bls_rsp
, FC_BA_RJT_UNABLE
);
15157 if (fctl
& FC_FC_EX_CTX
) {
15158 /* ABTS sent by responder to CT exchange, construction
15159 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
15160 * field and RX_ID from ABTS for RX_ID field.
15162 bf_set(lpfc_abts_orig
, &icmd
->un
.bls_rsp
, LPFC_ABTS_UNSOL_RSP
);
15164 /* ABTS sent by initiator to CT exchange, construction
15165 * of BA_ACC will need to allocate a new XRI as for the
15168 bf_set(lpfc_abts_orig
, &icmd
->un
.bls_rsp
, LPFC_ABTS_UNSOL_INT
);
15170 bf_set(lpfc_abts_rxid
, &icmd
->un
.bls_rsp
, rxid
);
15171 bf_set(lpfc_abts_oxid
, &icmd
->un
.bls_rsp
, oxid
);
15173 /* Xmit CT abts response on exchange <xid> */
15174 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
15175 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
15176 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
, oxid
, phba
->link_state
);
15178 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, ctiocb
, 0);
15179 if (rc
== IOCB_ERROR
) {
15180 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
15181 "2925 Failed to issue CT ABTS RSP x%x on "
15182 "xri x%x, Data x%x\n",
15183 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
, oxid
,
15185 lpfc_nlp_put(ndlp
);
15186 ctiocb
->context1
= NULL
;
15187 lpfc_sli_release_iocbq(phba
, ctiocb
);
15192 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
15193 * @vport: Pointer to the vport on which this sequence was received
15194 * @dmabuf: pointer to a dmabuf that describes the FC sequence
15196 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
15197 * receive sequence is only partially assembed by the driver, it shall abort
15198 * the partially assembled frames for the sequence. Otherwise, if the
15199 * unsolicited receive sequence has been completely assembled and passed to
15200 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
15201 * unsolicited sequence has been aborted. After that, it will issue a basic
15202 * accept to accept the abort.
15205 lpfc_sli4_handle_unsol_abort(struct lpfc_vport
*vport
,
15206 struct hbq_dmabuf
*dmabuf
)
15208 struct lpfc_hba
*phba
= vport
->phba
;
15209 struct fc_frame_header fc_hdr
;
15213 /* Make a copy of fc_hdr before the dmabuf being released */
15214 memcpy(&fc_hdr
, dmabuf
->hbuf
.virt
, sizeof(struct fc_frame_header
));
15215 fctl
= sli4_fctl_from_fc_hdr(&fc_hdr
);
15217 if (fctl
& FC_FC_EX_CTX
) {
15218 /* ABTS by responder to exchange, no cleanup needed */
15221 /* ABTS by initiator to exchange, need to do cleanup */
15222 aborted
= lpfc_sli4_abort_partial_seq(vport
, dmabuf
);
15223 if (aborted
== false)
15224 aborted
= lpfc_sli4_abort_ulp_seq(vport
, dmabuf
);
15226 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
15228 /* Respond with BA_ACC or BA_RJT accordingly */
15229 lpfc_sli4_seq_abort_rsp(vport
, &fc_hdr
, aborted
);
15233 * lpfc_seq_complete - Indicates if a sequence is complete
15234 * @dmabuf: pointer to a dmabuf that describes the FC sequence
15236 * This function checks the sequence, starting with the frame described by
15237 * @dmabuf, to see if all the frames associated with this sequence are present.
15238 * the frames associated with this sequence are linked to the @dmabuf using the
15239 * dbuf list. This function looks for two major things. 1) That the first frame
15240 * has a sequence count of zero. 2) There is a frame with last frame of sequence
15241 * set. 3) That there are no holes in the sequence count. The function will
15242 * return 1 when the sequence is complete, otherwise it will return 0.
15245 lpfc_seq_complete(struct hbq_dmabuf
*dmabuf
)
15247 struct fc_frame_header
*hdr
;
15248 struct lpfc_dmabuf
*d_buf
;
15249 struct hbq_dmabuf
*seq_dmabuf
;
15253 hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
15254 /* make sure first fame of sequence has a sequence count of zero */
15255 if (hdr
->fh_seq_cnt
!= seq_count
)
15257 fctl
= (hdr
->fh_f_ctl
[0] << 16 |
15258 hdr
->fh_f_ctl
[1] << 8 |
15260 /* If last frame of sequence we can return success. */
15261 if (fctl
& FC_FC_END_SEQ
)
15263 list_for_each_entry(d_buf
, &dmabuf
->dbuf
.list
, list
) {
15264 seq_dmabuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
15265 hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
15266 /* If there is a hole in the sequence count then fail. */
15267 if (++seq_count
!= be16_to_cpu(hdr
->fh_seq_cnt
))
15269 fctl
= (hdr
->fh_f_ctl
[0] << 16 |
15270 hdr
->fh_f_ctl
[1] << 8 |
15272 /* If last frame of sequence we can return success. */
15273 if (fctl
& FC_FC_END_SEQ
)
15280 * lpfc_prep_seq - Prep sequence for ULP processing
15281 * @vport: Pointer to the vport on which this sequence was received
15282 * @dmabuf: pointer to a dmabuf that describes the FC sequence
15284 * This function takes a sequence, described by a list of frames, and creates
15285 * a list of iocbq structures to describe the sequence. This iocbq list will be
15286 * used to issue to the generic unsolicited sequence handler. This routine
15287 * returns a pointer to the first iocbq in the list. If the function is unable
15288 * to allocate an iocbq then it throw out the received frames that were not
15289 * able to be described and return a pointer to the first iocbq. If unable to
15290 * allocate any iocbqs (including the first) this function will return NULL.
15292 static struct lpfc_iocbq
*
15293 lpfc_prep_seq(struct lpfc_vport
*vport
, struct hbq_dmabuf
*seq_dmabuf
)
15295 struct hbq_dmabuf
*hbq_buf
;
15296 struct lpfc_dmabuf
*d_buf
, *n_buf
;
15297 struct lpfc_iocbq
*first_iocbq
, *iocbq
;
15298 struct fc_frame_header
*fc_hdr
;
15300 uint32_t len
, tot_len
;
15301 struct ulp_bde64
*pbde
;
15303 fc_hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
15304 /* remove from receive buffer list */
15305 list_del_init(&seq_dmabuf
->hbuf
.list
);
15306 lpfc_update_rcv_time_stamp(vport
);
15307 /* get the Remote Port's SID */
15308 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
15310 /* Get an iocbq struct to fill in. */
15311 first_iocbq
= lpfc_sli_get_iocbq(vport
->phba
);
15313 /* Initialize the first IOCB. */
15314 first_iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
= 0;
15315 first_iocbq
->iocb
.ulpStatus
= IOSTAT_SUCCESS
;
15317 /* Check FC Header to see what TYPE of frame we are rcv'ing */
15318 if (sli4_type_from_fc_hdr(fc_hdr
) == FC_TYPE_ELS
) {
15319 first_iocbq
->iocb
.ulpCommand
= CMD_IOCB_RCV_ELS64_CX
;
15320 first_iocbq
->iocb
.un
.rcvels
.parmRo
=
15321 sli4_did_from_fc_hdr(fc_hdr
);
15322 first_iocbq
->iocb
.ulpPU
= PARM_NPIV_DID
;
15324 first_iocbq
->iocb
.ulpCommand
= CMD_IOCB_RCV_SEQ64_CX
;
15325 first_iocbq
->iocb
.ulpContext
= NO_XRI
;
15326 first_iocbq
->iocb
.unsli3
.rcvsli3
.ox_id
=
15327 be16_to_cpu(fc_hdr
->fh_ox_id
);
15328 /* iocbq is prepped for internal consumption. Physical vpi. */
15329 first_iocbq
->iocb
.unsli3
.rcvsli3
.vpi
=
15330 vport
->phba
->vpi_ids
[vport
->vpi
];
15331 /* put the first buffer into the first IOCBq */
15332 tot_len
= bf_get(lpfc_rcqe_length
,
15333 &seq_dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
15335 first_iocbq
->context2
= &seq_dmabuf
->dbuf
;
15336 first_iocbq
->context3
= NULL
;
15337 first_iocbq
->iocb
.ulpBdeCount
= 1;
15338 if (tot_len
> LPFC_DATA_BUF_SIZE
)
15339 first_iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
=
15340 LPFC_DATA_BUF_SIZE
;
15342 first_iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
= tot_len
;
15344 first_iocbq
->iocb
.un
.rcvels
.remoteID
= sid
;
15346 first_iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
= tot_len
;
15348 iocbq
= first_iocbq
;
15350 * Each IOCBq can have two Buffers assigned, so go through the list
15351 * of buffers for this sequence and save two buffers in each IOCBq
15353 list_for_each_entry_safe(d_buf
, n_buf
, &seq_dmabuf
->dbuf
.list
, list
) {
15355 lpfc_in_buf_free(vport
->phba
, d_buf
);
15358 if (!iocbq
->context3
) {
15359 iocbq
->context3
= d_buf
;
15360 iocbq
->iocb
.ulpBdeCount
++;
15361 /* We need to get the size out of the right CQE */
15362 hbq_buf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
15363 len
= bf_get(lpfc_rcqe_length
,
15364 &hbq_buf
->cq_event
.cqe
.rcqe_cmpl
);
15365 pbde
= (struct ulp_bde64
*)
15366 &iocbq
->iocb
.unsli3
.sli3Words
[4];
15367 if (len
> LPFC_DATA_BUF_SIZE
)
15368 pbde
->tus
.f
.bdeSize
= LPFC_DATA_BUF_SIZE
;
15370 pbde
->tus
.f
.bdeSize
= len
;
15372 iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
+= len
;
15375 iocbq
= lpfc_sli_get_iocbq(vport
->phba
);
15378 first_iocbq
->iocb
.ulpStatus
=
15379 IOSTAT_FCP_RSP_ERROR
;
15380 first_iocbq
->iocb
.un
.ulpWord
[4] =
15381 IOERR_NO_RESOURCES
;
15383 lpfc_in_buf_free(vport
->phba
, d_buf
);
15386 /* We need to get the size out of the right CQE */
15387 hbq_buf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
15388 len
= bf_get(lpfc_rcqe_length
,
15389 &hbq_buf
->cq_event
.cqe
.rcqe_cmpl
);
15390 iocbq
->context2
= d_buf
;
15391 iocbq
->context3
= NULL
;
15392 iocbq
->iocb
.ulpBdeCount
= 1;
15393 if (len
> LPFC_DATA_BUF_SIZE
)
15394 iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
=
15395 LPFC_DATA_BUF_SIZE
;
15397 iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
= len
;
15400 iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
= tot_len
;
15402 iocbq
->iocb
.un
.rcvels
.remoteID
= sid
;
15403 list_add_tail(&iocbq
->list
, &first_iocbq
->list
);
15406 return first_iocbq
;
15410 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport
*vport
,
15411 struct hbq_dmabuf
*seq_dmabuf
)
15413 struct fc_frame_header
*fc_hdr
;
15414 struct lpfc_iocbq
*iocbq
, *curr_iocb
, *next_iocb
;
15415 struct lpfc_hba
*phba
= vport
->phba
;
15417 fc_hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
15418 iocbq
= lpfc_prep_seq(vport
, seq_dmabuf
);
15420 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15421 "2707 Ring %d handler: Failed to allocate "
15422 "iocb Rctl x%x Type x%x received\n",
15424 fc_hdr
->fh_r_ctl
, fc_hdr
->fh_type
);
15427 if (!lpfc_complete_unsol_iocb(phba
,
15428 &phba
->sli
.ring
[LPFC_ELS_RING
],
15429 iocbq
, fc_hdr
->fh_r_ctl
,
15431 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15432 "2540 Ring %d handler: unexpected Rctl "
15433 "x%x Type x%x received\n",
15435 fc_hdr
->fh_r_ctl
, fc_hdr
->fh_type
);
15437 /* Free iocb created in lpfc_prep_seq */
15438 list_for_each_entry_safe(curr_iocb
, next_iocb
,
15439 &iocbq
->list
, list
) {
15440 list_del_init(&curr_iocb
->list
);
15441 lpfc_sli_release_iocbq(phba
, curr_iocb
);
15443 lpfc_sli_release_iocbq(phba
, iocbq
);
15447 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
15448 * @phba: Pointer to HBA context object.
15450 * This function is called with no lock held. This function processes all
15451 * the received buffers and gives it to upper layers when a received buffer
15452 * indicates that it is the final frame in the sequence. The interrupt
15453 * service routine processes received buffers at interrupt contexts and adds
15454 * received dma buffers to the rb_pend_list queue and signals the worker thread.
15455 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
15456 * appropriate receive function when the final frame in a sequence is received.
15459 lpfc_sli4_handle_received_buffer(struct lpfc_hba
*phba
,
15460 struct hbq_dmabuf
*dmabuf
)
15462 struct hbq_dmabuf
*seq_dmabuf
;
15463 struct fc_frame_header
*fc_hdr
;
15464 struct lpfc_vport
*vport
;
15468 /* Process each received buffer */
15469 fc_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
15470 /* check to see if this a valid type of frame */
15471 if (lpfc_fc_frame_check(phba
, fc_hdr
)) {
15472 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
15475 if ((bf_get(lpfc_cqe_code
,
15476 &dmabuf
->cq_event
.cqe
.rcqe_cmpl
) == CQE_CODE_RECEIVE_V1
))
15477 fcfi
= bf_get(lpfc_rcqe_fcf_id_v1
,
15478 &dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
15480 fcfi
= bf_get(lpfc_rcqe_fcf_id
,
15481 &dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
15483 vport
= lpfc_fc_frame_to_vport(phba
, fc_hdr
, fcfi
);
15485 /* throw out the frame */
15486 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
15490 /* d_id this frame is directed to */
15491 did
= sli4_did_from_fc_hdr(fc_hdr
);
15493 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
15494 if (!(vport
->vpi_state
& LPFC_VPI_REGISTERED
) &&
15495 (did
!= Fabric_DID
)) {
15497 * Throw out the frame if we are not pt2pt.
15498 * The pt2pt protocol allows for discovery frames
15499 * to be received without a registered VPI.
15501 if (!(vport
->fc_flag
& FC_PT2PT
) ||
15502 (phba
->link_state
== LPFC_HBA_READY
)) {
15503 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
15508 /* Handle the basic abort sequence (BA_ABTS) event */
15509 if (fc_hdr
->fh_r_ctl
== FC_RCTL_BA_ABTS
) {
15510 lpfc_sli4_handle_unsol_abort(vport
, dmabuf
);
15514 /* Link this frame */
15515 seq_dmabuf
= lpfc_fc_frame_add(vport
, dmabuf
);
15517 /* unable to add frame to vport - throw it out */
15518 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
15521 /* If not last frame in sequence continue processing frames. */
15522 if (!lpfc_seq_complete(seq_dmabuf
))
15525 /* Send the complete sequence to the upper layer protocol */
15526 lpfc_sli4_send_seq_to_ulp(vport
, seq_dmabuf
);
15530 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
15531 * @phba: pointer to lpfc hba data structure.
15533 * This routine is invoked to post rpi header templates to the
15534 * HBA consistent with the SLI-4 interface spec. This routine
15535 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
15536 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
15538 * This routine does not require any locks. It's usage is expected
15539 * to be driver load or reset recovery when the driver is
15544 * -EIO - The mailbox failed to complete successfully.
15545 * When this error occurs, the driver is not guaranteed
15546 * to have any rpi regions posted to the device and
15547 * must either attempt to repost the regions or take a
15551 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba
*phba
)
15553 struct lpfc_rpi_hdr
*rpi_page
;
15557 /* SLI4 ports that support extents do not require RPI headers. */
15558 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
15560 if (phba
->sli4_hba
.extents_in_use
)
15563 list_for_each_entry(rpi_page
, &phba
->sli4_hba
.lpfc_rpi_hdr_list
, list
) {
15565 * Assign the rpi headers a physical rpi only if the driver
15566 * has not initialized those resources. A port reset only
15567 * needs the headers posted.
15569 if (bf_get(lpfc_rpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
) !=
15571 rpi_page
->start_rpi
= phba
->sli4_hba
.rpi_ids
[lrpi
];
15573 rc
= lpfc_sli4_post_rpi_hdr(phba
, rpi_page
);
15574 if (rc
!= MBX_SUCCESS
) {
15575 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15576 "2008 Error %d posting all rpi "
15584 bf_set(lpfc_rpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
,
15585 LPFC_RPI_RSRC_RDY
);
15590 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
15591 * @phba: pointer to lpfc hba data structure.
15592 * @rpi_page: pointer to the rpi memory region.
15594 * This routine is invoked to post a single rpi header to the
15595 * HBA consistent with the SLI-4 interface spec. This memory region
15596 * maps up to 64 rpi context regions.
15600 * -ENOMEM - No available memory
15601 * -EIO - The mailbox failed to complete successfully.
15604 lpfc_sli4_post_rpi_hdr(struct lpfc_hba
*phba
, struct lpfc_rpi_hdr
*rpi_page
)
15606 LPFC_MBOXQ_t
*mboxq
;
15607 struct lpfc_mbx_post_hdr_tmpl
*hdr_tmpl
;
15609 uint32_t shdr_status
, shdr_add_status
;
15610 union lpfc_sli4_cfg_shdr
*shdr
;
15612 /* SLI4 ports that support extents do not require RPI headers. */
15613 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
15615 if (phba
->sli4_hba
.extents_in_use
)
15618 /* The port is notified of the header region via a mailbox command. */
15619 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
15621 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15622 "2001 Unable to allocate memory for issuing "
15623 "SLI_CONFIG_SPECIAL mailbox command\n");
15627 /* Post all rpi memory regions to the port. */
15628 hdr_tmpl
= &mboxq
->u
.mqe
.un
.hdr_tmpl
;
15629 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
15630 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE
,
15631 sizeof(struct lpfc_mbx_post_hdr_tmpl
) -
15632 sizeof(struct lpfc_sli4_cfg_mhdr
),
15633 LPFC_SLI4_MBX_EMBED
);
15636 /* Post the physical rpi to the port for this rpi header. */
15637 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset
, hdr_tmpl
,
15638 rpi_page
->start_rpi
);
15639 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt
,
15640 hdr_tmpl
, rpi_page
->page_count
);
15642 hdr_tmpl
->rpi_paddr_lo
= putPaddrLow(rpi_page
->dmabuf
->phys
);
15643 hdr_tmpl
->rpi_paddr_hi
= putPaddrHigh(rpi_page
->dmabuf
->phys
);
15644 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
15645 shdr
= (union lpfc_sli4_cfg_shdr
*) &hdr_tmpl
->header
.cfg_shdr
;
15646 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
15647 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
15648 if (rc
!= MBX_TIMEOUT
)
15649 mempool_free(mboxq
, phba
->mbox_mem_pool
);
15650 if (shdr_status
|| shdr_add_status
|| rc
) {
15651 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15652 "2514 POST_RPI_HDR mailbox failed with "
15653 "status x%x add_status x%x, mbx status x%x\n",
15654 shdr_status
, shdr_add_status
, rc
);
15661 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
15662 * @phba: pointer to lpfc hba data structure.
15664 * This routine is invoked to post rpi header templates to the
15665 * HBA consistent with the SLI-4 interface spec. This routine
15666 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
15667 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
15670 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
15671 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
15674 lpfc_sli4_alloc_rpi(struct lpfc_hba
*phba
)
15677 uint16_t max_rpi
, rpi_limit
;
15678 uint16_t rpi_remaining
, lrpi
= 0;
15679 struct lpfc_rpi_hdr
*rpi_hdr
;
15680 unsigned long iflag
;
15683 * Fetch the next logical rpi. Because this index is logical,
15684 * the driver starts at 0 each time.
15686 spin_lock_irqsave(&phba
->hbalock
, iflag
);
15687 max_rpi
= phba
->sli4_hba
.max_cfg_param
.max_rpi
;
15688 rpi_limit
= phba
->sli4_hba
.next_rpi
;
15690 rpi
= find_next_zero_bit(phba
->sli4_hba
.rpi_bmask
, rpi_limit
, 0);
15691 if (rpi
>= rpi_limit
)
15692 rpi
= LPFC_RPI_ALLOC_ERROR
;
15694 set_bit(rpi
, phba
->sli4_hba
.rpi_bmask
);
15695 phba
->sli4_hba
.max_cfg_param
.rpi_used
++;
15696 phba
->sli4_hba
.rpi_count
++;
15698 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
15699 "0001 rpi:%x max:%x lim:%x\n",
15700 (int) rpi
, max_rpi
, rpi_limit
);
15703 * Don't try to allocate more rpi header regions if the device limit
15704 * has been exhausted.
15706 if ((rpi
== LPFC_RPI_ALLOC_ERROR
) &&
15707 (phba
->sli4_hba
.rpi_count
>= max_rpi
)) {
15708 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
15713 * RPI header postings are not required for SLI4 ports capable of
15716 if (!phba
->sli4_hba
.rpi_hdrs_in_use
) {
15717 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
15722 * If the driver is running low on rpi resources, allocate another
15723 * page now. Note that the next_rpi value is used because
15724 * it represents how many are actually in use whereas max_rpi notes
15725 * how many are supported max by the device.
15727 rpi_remaining
= phba
->sli4_hba
.next_rpi
- phba
->sli4_hba
.rpi_count
;
15728 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
15729 if (rpi_remaining
< LPFC_RPI_LOW_WATER_MARK
) {
15730 rpi_hdr
= lpfc_sli4_create_rpi_hdr(phba
);
15732 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15733 "2002 Error Could not grow rpi "
15736 lrpi
= rpi_hdr
->start_rpi
;
15737 rpi_hdr
->start_rpi
= phba
->sli4_hba
.rpi_ids
[lrpi
];
15738 lpfc_sli4_post_rpi_hdr(phba
, rpi_hdr
);
15746 * lpfc_sli4_free_rpi - Release an rpi for reuse.
15747 * @phba: pointer to lpfc hba data structure.
15749 * This routine is invoked to release an rpi to the pool of
15750 * available rpis maintained by the driver.
15753 __lpfc_sli4_free_rpi(struct lpfc_hba
*phba
, int rpi
)
15755 if (test_and_clear_bit(rpi
, phba
->sli4_hba
.rpi_bmask
)) {
15756 phba
->sli4_hba
.rpi_count
--;
15757 phba
->sli4_hba
.max_cfg_param
.rpi_used
--;
15762 * lpfc_sli4_free_rpi - Release an rpi for reuse.
15763 * @phba: pointer to lpfc hba data structure.
15765 * This routine is invoked to release an rpi to the pool of
15766 * available rpis maintained by the driver.
15769 lpfc_sli4_free_rpi(struct lpfc_hba
*phba
, int rpi
)
15771 spin_lock_irq(&phba
->hbalock
);
15772 __lpfc_sli4_free_rpi(phba
, rpi
);
15773 spin_unlock_irq(&phba
->hbalock
);
15777 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
15778 * @phba: pointer to lpfc hba data structure.
15780 * This routine is invoked to remove the memory region that
15781 * provided rpi via a bitmask.
15784 lpfc_sli4_remove_rpis(struct lpfc_hba
*phba
)
15786 kfree(phba
->sli4_hba
.rpi_bmask
);
15787 kfree(phba
->sli4_hba
.rpi_ids
);
15788 bf_set(lpfc_rpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
15792 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
15793 * @phba: pointer to lpfc hba data structure.
15795 * This routine is invoked to remove the memory region that
15796 * provided rpi via a bitmask.
15799 lpfc_sli4_resume_rpi(struct lpfc_nodelist
*ndlp
,
15800 void (*cmpl
)(struct lpfc_hba
*, LPFC_MBOXQ_t
*), void *arg
)
15802 LPFC_MBOXQ_t
*mboxq
;
15803 struct lpfc_hba
*phba
= ndlp
->phba
;
15806 /* The port is notified of the header region via a mailbox command. */
15807 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
15811 /* Post all rpi memory regions to the port. */
15812 lpfc_resume_rpi(mboxq
, ndlp
);
15814 mboxq
->mbox_cmpl
= cmpl
;
15815 mboxq
->context1
= arg
;
15816 mboxq
->context2
= ndlp
;
15818 mboxq
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
15819 mboxq
->vport
= ndlp
->vport
;
15820 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
15821 if (rc
== MBX_NOT_FINISHED
) {
15822 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15823 "2010 Resume RPI Mailbox failed "
15824 "status %d, mbxStatus x%x\n", rc
,
15825 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
));
15826 mempool_free(mboxq
, phba
->mbox_mem_pool
);
15833 * lpfc_sli4_init_vpi - Initialize a vpi with the port
15834 * @vport: Pointer to the vport for which the vpi is being initialized
15836 * This routine is invoked to activate a vpi with the port.
15840 * -Evalue otherwise
15843 lpfc_sli4_init_vpi(struct lpfc_vport
*vport
)
15845 LPFC_MBOXQ_t
*mboxq
;
15847 int retval
= MBX_SUCCESS
;
15849 struct lpfc_hba
*phba
= vport
->phba
;
15850 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
15853 lpfc_init_vpi(phba
, mboxq
, vport
->vpi
);
15854 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mboxq
);
15855 rc
= lpfc_sli_issue_mbox_wait(phba
, mboxq
, mbox_tmo
);
15856 if (rc
!= MBX_SUCCESS
) {
15857 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_SLI
,
15858 "2022 INIT VPI Mailbox failed "
15859 "status %d, mbxStatus x%x\n", rc
,
15860 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
));
15863 if (rc
!= MBX_TIMEOUT
)
15864 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
15870 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
15871 * @phba: pointer to lpfc hba data structure.
15872 * @mboxq: Pointer to mailbox object.
15874 * This routine is invoked to manually add a single FCF record. The caller
15875 * must pass a completely initialized FCF_Record. This routine takes
15876 * care of the nonembedded mailbox operations.
15879 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
15882 union lpfc_sli4_cfg_shdr
*shdr
;
15883 uint32_t shdr_status
, shdr_add_status
;
15885 virt_addr
= mboxq
->sge_array
->addr
[0];
15886 /* The IOCTL status is embedded in the mailbox subheader. */
15887 shdr
= (union lpfc_sli4_cfg_shdr
*) virt_addr
;
15888 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
15889 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
15891 if ((shdr_status
|| shdr_add_status
) &&
15892 (shdr_status
!= STATUS_FCF_IN_USE
))
15893 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15894 "2558 ADD_FCF_RECORD mailbox failed with "
15895 "status x%x add_status x%x\n",
15896 shdr_status
, shdr_add_status
);
15898 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
15902 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
15903 * @phba: pointer to lpfc hba data structure.
15904 * @fcf_record: pointer to the initialized fcf record to add.
15906 * This routine is invoked to manually add a single FCF record. The caller
15907 * must pass a completely initialized FCF_Record. This routine takes
15908 * care of the nonembedded mailbox operations.
15911 lpfc_sli4_add_fcf_record(struct lpfc_hba
*phba
, struct fcf_record
*fcf_record
)
15914 LPFC_MBOXQ_t
*mboxq
;
15917 struct lpfc_mbx_sge sge
;
15918 uint32_t alloc_len
, req_len
;
15921 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
15923 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15924 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
15928 req_len
= sizeof(struct fcf_record
) + sizeof(union lpfc_sli4_cfg_shdr
) +
15931 /* Allocate DMA memory and set up the non-embedded mailbox command */
15932 alloc_len
= lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
15933 LPFC_MBOX_OPCODE_FCOE_ADD_FCF
,
15934 req_len
, LPFC_SLI4_MBX_NEMBED
);
15935 if (alloc_len
< req_len
) {
15936 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15937 "2523 Allocated DMA memory size (x%x) is "
15938 "less than the requested DMA memory "
15939 "size (x%x)\n", alloc_len
, req_len
);
15940 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
15945 * Get the first SGE entry from the non-embedded DMA memory. This
15946 * routine only uses a single SGE.
15948 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
15949 virt_addr
= mboxq
->sge_array
->addr
[0];
15951 * Configure the FCF record for FCFI 0. This is the driver's
15952 * hardcoded default and gets used in nonFIP mode.
15954 fcfindex
= bf_get(lpfc_fcf_record_fcf_index
, fcf_record
);
15955 bytep
= virt_addr
+ sizeof(union lpfc_sli4_cfg_shdr
);
15956 lpfc_sli_pcimem_bcopy(&fcfindex
, bytep
, sizeof(uint32_t));
15959 * Copy the fcf_index and the FCF Record Data. The data starts after
15960 * the FCoE header plus word10. The data copy needs to be endian
15963 bytep
+= sizeof(uint32_t);
15964 lpfc_sli_pcimem_bcopy(fcf_record
, bytep
, sizeof(struct fcf_record
));
15965 mboxq
->vport
= phba
->pport
;
15966 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_add_fcf_record
;
15967 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
15968 if (rc
== MBX_NOT_FINISHED
) {
15969 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15970 "2515 ADD_FCF_RECORD mailbox failed with "
15971 "status 0x%x\n", rc
);
15972 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
15981 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
15982 * @phba: pointer to lpfc hba data structure.
15983 * @fcf_record: pointer to the fcf record to write the default data.
15984 * @fcf_index: FCF table entry index.
15986 * This routine is invoked to build the driver's default FCF record. The
15987 * values used are hardcoded. This routine handles memory initialization.
15991 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba
*phba
,
15992 struct fcf_record
*fcf_record
,
15993 uint16_t fcf_index
)
15995 memset(fcf_record
, 0, sizeof(struct fcf_record
));
15996 fcf_record
->max_rcv_size
= LPFC_FCOE_MAX_RCV_SIZE
;
15997 fcf_record
->fka_adv_period
= LPFC_FCOE_FKA_ADV_PER
;
15998 fcf_record
->fip_priority
= LPFC_FCOE_FIP_PRIORITY
;
15999 bf_set(lpfc_fcf_record_mac_0
, fcf_record
, phba
->fc_map
[0]);
16000 bf_set(lpfc_fcf_record_mac_1
, fcf_record
, phba
->fc_map
[1]);
16001 bf_set(lpfc_fcf_record_mac_2
, fcf_record
, phba
->fc_map
[2]);
16002 bf_set(lpfc_fcf_record_mac_3
, fcf_record
, LPFC_FCOE_FCF_MAC3
);
16003 bf_set(lpfc_fcf_record_mac_4
, fcf_record
, LPFC_FCOE_FCF_MAC4
);
16004 bf_set(lpfc_fcf_record_mac_5
, fcf_record
, LPFC_FCOE_FCF_MAC5
);
16005 bf_set(lpfc_fcf_record_fc_map_0
, fcf_record
, phba
->fc_map
[0]);
16006 bf_set(lpfc_fcf_record_fc_map_1
, fcf_record
, phba
->fc_map
[1]);
16007 bf_set(lpfc_fcf_record_fc_map_2
, fcf_record
, phba
->fc_map
[2]);
16008 bf_set(lpfc_fcf_record_fcf_valid
, fcf_record
, 1);
16009 bf_set(lpfc_fcf_record_fcf_avail
, fcf_record
, 1);
16010 bf_set(lpfc_fcf_record_fcf_index
, fcf_record
, fcf_index
);
16011 bf_set(lpfc_fcf_record_mac_addr_prov
, fcf_record
,
16012 LPFC_FCF_FPMA
| LPFC_FCF_SPMA
);
16013 /* Set the VLAN bit map */
16014 if (phba
->valid_vlan
) {
16015 fcf_record
->vlan_bitmap
[phba
->vlan_id
/ 8]
16016 = 1 << (phba
->vlan_id
% 8);
16021 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
16022 * @phba: pointer to lpfc hba data structure.
16023 * @fcf_index: FCF table entry offset.
16025 * This routine is invoked to scan the entire FCF table by reading FCF
16026 * record and processing it one at a time starting from the @fcf_index
16027 * for initial FCF discovery or fast FCF failover rediscovery.
16029 * Return 0 if the mailbox command is submitted successfully, none 0
16033 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
16036 LPFC_MBOXQ_t
*mboxq
;
16038 phba
->fcoe_eventtag_at_fcf_scan
= phba
->fcoe_eventtag
;
16039 phba
->fcoe_cvl_eventtag_attn
= phba
->fcoe_cvl_eventtag
;
16040 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16042 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16043 "2000 Failed to allocate mbox for "
16046 goto fail_fcf_scan
;
16048 /* Construct the read FCF record mailbox command */
16049 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
16052 goto fail_fcf_scan
;
16054 /* Issue the mailbox command asynchronously */
16055 mboxq
->vport
= phba
->pport
;
16056 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_fcf_scan_read_fcf_rec
;
16058 spin_lock_irq(&phba
->hbalock
);
16059 phba
->hba_flag
|= FCF_TS_INPROG
;
16060 spin_unlock_irq(&phba
->hbalock
);
16062 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
16063 if (rc
== MBX_NOT_FINISHED
)
16066 /* Reset eligible FCF count for new scan */
16067 if (fcf_index
== LPFC_FCOE_FCF_GET_FIRST
)
16068 phba
->fcf
.eligible_fcf_cnt
= 0;
16074 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
16075 /* FCF scan failed, clear FCF_TS_INPROG flag */
16076 spin_lock_irq(&phba
->hbalock
);
16077 phba
->hba_flag
&= ~FCF_TS_INPROG
;
16078 spin_unlock_irq(&phba
->hbalock
);
16084 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
16085 * @phba: pointer to lpfc hba data structure.
16086 * @fcf_index: FCF table entry offset.
16088 * This routine is invoked to read an FCF record indicated by @fcf_index
16089 * and to use it for FLOGI roundrobin FCF failover.
16091 * Return 0 if the mailbox command is submitted successfully, none 0
16095 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
16098 LPFC_MBOXQ_t
*mboxq
;
16100 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16102 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_INIT
,
16103 "2763 Failed to allocate mbox for "
16106 goto fail_fcf_read
;
16108 /* Construct the read FCF record mailbox command */
16109 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
16112 goto fail_fcf_read
;
16114 /* Issue the mailbox command asynchronously */
16115 mboxq
->vport
= phba
->pport
;
16116 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_fcf_rr_read_fcf_rec
;
16117 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
16118 if (rc
== MBX_NOT_FINISHED
)
16124 if (error
&& mboxq
)
16125 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
16130 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
16131 * @phba: pointer to lpfc hba data structure.
16132 * @fcf_index: FCF table entry offset.
16134 * This routine is invoked to read an FCF record indicated by @fcf_index to
16135 * determine whether it's eligible for FLOGI roundrobin failover list.
16137 * Return 0 if the mailbox command is submitted successfully, none 0
16141 lpfc_sli4_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
16144 LPFC_MBOXQ_t
*mboxq
;
16146 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16148 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_INIT
,
16149 "2758 Failed to allocate mbox for "
16152 goto fail_fcf_read
;
16154 /* Construct the read FCF record mailbox command */
16155 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
16158 goto fail_fcf_read
;
16160 /* Issue the mailbox command asynchronously */
16161 mboxq
->vport
= phba
->pport
;
16162 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_read_fcf_rec
;
16163 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
16164 if (rc
== MBX_NOT_FINISHED
)
16170 if (error
&& mboxq
)
16171 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
16176 * lpfc_check_next_fcf_pri
16177 * phba pointer to the lpfc_hba struct for this port.
16178 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
16179 * routine when the rr_bmask is empty. The FCF indecies are put into the
16180 * rr_bmask based on their priority level. Starting from the highest priority
16181 * to the lowest. The most likely FCF candidate will be in the highest
16182 * priority group. When this routine is called it searches the fcf_pri list for
16183 * next lowest priority group and repopulates the rr_bmask with only those
16186 * 1=success 0=failure
16189 lpfc_check_next_fcf_pri_level(struct lpfc_hba
*phba
)
16191 uint16_t next_fcf_pri
;
16192 uint16_t last_index
;
16193 struct lpfc_fcf_pri
*fcf_pri
;
16197 last_index
= find_first_bit(phba
->fcf
.fcf_rr_bmask
,
16198 LPFC_SLI4_FCF_TBL_INDX_MAX
);
16199 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
16200 "3060 Last IDX %d\n", last_index
);
16202 /* Verify the priority list has 2 or more entries */
16203 spin_lock_irq(&phba
->hbalock
);
16204 if (list_empty(&phba
->fcf
.fcf_pri_list
) ||
16205 list_is_singular(&phba
->fcf
.fcf_pri_list
)) {
16206 spin_unlock_irq(&phba
->hbalock
);
16207 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
16208 "3061 Last IDX %d\n", last_index
);
16209 return 0; /* Empty rr list */
16211 spin_unlock_irq(&phba
->hbalock
);
16215 * Clear the rr_bmask and set all of the bits that are at this
16218 memset(phba
->fcf
.fcf_rr_bmask
, 0,
16219 sizeof(*phba
->fcf
.fcf_rr_bmask
));
16220 spin_lock_irq(&phba
->hbalock
);
16221 list_for_each_entry(fcf_pri
, &phba
->fcf
.fcf_pri_list
, list
) {
16222 if (fcf_pri
->fcf_rec
.flag
& LPFC_FCF_FLOGI_FAILED
)
16225 * the 1st priority that has not FLOGI failed
16226 * will be the highest.
16229 next_fcf_pri
= fcf_pri
->fcf_rec
.priority
;
16230 spin_unlock_irq(&phba
->hbalock
);
16231 if (fcf_pri
->fcf_rec
.priority
== next_fcf_pri
) {
16232 rc
= lpfc_sli4_fcf_rr_index_set(phba
,
16233 fcf_pri
->fcf_rec
.fcf_index
);
16237 spin_lock_irq(&phba
->hbalock
);
16240 * if next_fcf_pri was not set above and the list is not empty then
16241 * we have failed flogis on all of them. So reset flogi failed
16242 * and start at the beginning.
16244 if (!next_fcf_pri
&& !list_empty(&phba
->fcf
.fcf_pri_list
)) {
16245 list_for_each_entry(fcf_pri
, &phba
->fcf
.fcf_pri_list
, list
) {
16246 fcf_pri
->fcf_rec
.flag
&= ~LPFC_FCF_FLOGI_FAILED
;
16248 * the 1st priority that has not FLOGI failed
16249 * will be the highest.
16252 next_fcf_pri
= fcf_pri
->fcf_rec
.priority
;
16253 spin_unlock_irq(&phba
->hbalock
);
16254 if (fcf_pri
->fcf_rec
.priority
== next_fcf_pri
) {
16255 rc
= lpfc_sli4_fcf_rr_index_set(phba
,
16256 fcf_pri
->fcf_rec
.fcf_index
);
16260 spin_lock_irq(&phba
->hbalock
);
16264 spin_unlock_irq(&phba
->hbalock
);
16269 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
16270 * @phba: pointer to lpfc hba data structure.
16272 * This routine is to get the next eligible FCF record index in a round
16273 * robin fashion. If the next eligible FCF record index equals to the
16274 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
16275 * shall be returned, otherwise, the next eligible FCF record's index
16276 * shall be returned.
16279 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba
*phba
)
16281 uint16_t next_fcf_index
;
16284 /* Search start from next bit of currently registered FCF index */
16285 next_fcf_index
= phba
->fcf
.current_rec
.fcf_indx
;
16288 /* Determine the next fcf index to check */
16289 next_fcf_index
= (next_fcf_index
+ 1) % LPFC_SLI4_FCF_TBL_INDX_MAX
;
16290 next_fcf_index
= find_next_bit(phba
->fcf
.fcf_rr_bmask
,
16291 LPFC_SLI4_FCF_TBL_INDX_MAX
,
16294 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
16295 if (next_fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
16297 * If we have wrapped then we need to clear the bits that
16298 * have been tested so that we can detect when we should
16299 * change the priority level.
16301 next_fcf_index
= find_next_bit(phba
->fcf
.fcf_rr_bmask
,
16302 LPFC_SLI4_FCF_TBL_INDX_MAX
, 0);
16306 /* Check roundrobin failover list empty condition */
16307 if (next_fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
||
16308 next_fcf_index
== phba
->fcf
.current_rec
.fcf_indx
) {
16310 * If next fcf index is not found check if there are lower
16311 * Priority level fcf's in the fcf_priority list.
16312 * Set up the rr_bmask with all of the avaiable fcf bits
16313 * at that level and continue the selection process.
16315 if (lpfc_check_next_fcf_pri_level(phba
))
16316 goto initial_priority
;
16317 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
16318 "2844 No roundrobin failover FCF available\n");
16319 if (next_fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
)
16320 return LPFC_FCOE_FCF_NEXT_NONE
;
16322 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
16323 "3063 Only FCF available idx %d, flag %x\n",
16325 phba
->fcf
.fcf_pri
[next_fcf_index
].fcf_rec
.flag
);
16326 return next_fcf_index
;
16330 if (next_fcf_index
< LPFC_SLI4_FCF_TBL_INDX_MAX
&&
16331 phba
->fcf
.fcf_pri
[next_fcf_index
].fcf_rec
.flag
&
16332 LPFC_FCF_FLOGI_FAILED
)
16333 goto next_priority
;
16335 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
16336 "2845 Get next roundrobin failover FCF (x%x)\n",
16339 return next_fcf_index
;
16343 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
16344 * @phba: pointer to lpfc hba data structure.
16346 * This routine sets the FCF record index in to the eligible bmask for
16347 * roundrobin failover search. It checks to make sure that the index
16348 * does not go beyond the range of the driver allocated bmask dimension
16349 * before setting the bit.
16351 * Returns 0 if the index bit successfully set, otherwise, it returns
16355 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba
*phba
, uint16_t fcf_index
)
16357 if (fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
16358 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
16359 "2610 FCF (x%x) reached driver's book "
16360 "keeping dimension:x%x\n",
16361 fcf_index
, LPFC_SLI4_FCF_TBL_INDX_MAX
);
16364 /* Set the eligible FCF record index bmask */
16365 set_bit(fcf_index
, phba
->fcf
.fcf_rr_bmask
);
16367 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
16368 "2790 Set FCF (x%x) to roundrobin FCF failover "
16369 "bmask\n", fcf_index
);
16375 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
16376 * @phba: pointer to lpfc hba data structure.
16378 * This routine clears the FCF record index from the eligible bmask for
16379 * roundrobin failover search. It checks to make sure that the index
16380 * does not go beyond the range of the driver allocated bmask dimension
16381 * before clearing the bit.
16384 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba
*phba
, uint16_t fcf_index
)
16386 struct lpfc_fcf_pri
*fcf_pri
, *fcf_pri_next
;
16387 if (fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
16388 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
16389 "2762 FCF (x%x) reached driver's book "
16390 "keeping dimension:x%x\n",
16391 fcf_index
, LPFC_SLI4_FCF_TBL_INDX_MAX
);
16394 /* Clear the eligible FCF record index bmask */
16395 spin_lock_irq(&phba
->hbalock
);
16396 list_for_each_entry_safe(fcf_pri
, fcf_pri_next
, &phba
->fcf
.fcf_pri_list
,
16398 if (fcf_pri
->fcf_rec
.fcf_index
== fcf_index
) {
16399 list_del_init(&fcf_pri
->list
);
16403 spin_unlock_irq(&phba
->hbalock
);
16404 clear_bit(fcf_index
, phba
->fcf
.fcf_rr_bmask
);
16406 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
16407 "2791 Clear FCF (x%x) from roundrobin failover "
16408 "bmask\n", fcf_index
);
16412 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
16413 * @phba: pointer to lpfc hba data structure.
16415 * This routine is the completion routine for the rediscover FCF table mailbox
16416 * command. If the mailbox command returned failure, it will try to stop the
16417 * FCF rediscover wait timer.
16420 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mbox
)
16422 struct lpfc_mbx_redisc_fcf_tbl
*redisc_fcf
;
16423 uint32_t shdr_status
, shdr_add_status
;
16425 redisc_fcf
= &mbox
->u
.mqe
.un
.redisc_fcf_tbl
;
16427 shdr_status
= bf_get(lpfc_mbox_hdr_status
,
16428 &redisc_fcf
->header
.cfg_shdr
.response
);
16429 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
,
16430 &redisc_fcf
->header
.cfg_shdr
.response
);
16431 if (shdr_status
|| shdr_add_status
) {
16432 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
16433 "2746 Requesting for FCF rediscovery failed "
16434 "status x%x add_status x%x\n",
16435 shdr_status
, shdr_add_status
);
16436 if (phba
->fcf
.fcf_flag
& FCF_ACVL_DISC
) {
16437 spin_lock_irq(&phba
->hbalock
);
16438 phba
->fcf
.fcf_flag
&= ~FCF_ACVL_DISC
;
16439 spin_unlock_irq(&phba
->hbalock
);
16441 * CVL event triggered FCF rediscover request failed,
16442 * last resort to re-try current registered FCF entry.
16444 lpfc_retry_pport_discovery(phba
);
16446 spin_lock_irq(&phba
->hbalock
);
16447 phba
->fcf
.fcf_flag
&= ~FCF_DEAD_DISC
;
16448 spin_unlock_irq(&phba
->hbalock
);
16450 * DEAD FCF event triggered FCF rediscover request
16451 * failed, last resort to fail over as a link down
16452 * to FCF registration.
16454 lpfc_sli4_fcf_dead_failthrough(phba
);
16457 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
16458 "2775 Start FCF rediscover quiescent timer\n");
16460 * Start FCF rediscovery wait timer for pending FCF
16461 * before rescan FCF record table.
16463 lpfc_fcf_redisc_wait_start_timer(phba
);
16466 mempool_free(mbox
, phba
->mbox_mem_pool
);
16470 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
16471 * @phba: pointer to lpfc hba data structure.
16473 * This routine is invoked to request for rediscovery of the entire FCF table
16477 lpfc_sli4_redisc_fcf_table(struct lpfc_hba
*phba
)
16479 LPFC_MBOXQ_t
*mbox
;
16480 struct lpfc_mbx_redisc_fcf_tbl
*redisc_fcf
;
16483 /* Cancel retry delay timers to all vports before FCF rediscover */
16484 lpfc_cancel_all_vport_retry_delay_timer(phba
);
16486 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16488 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
16489 "2745 Failed to allocate mbox for "
16490 "requesting FCF rediscover.\n");
16494 length
= (sizeof(struct lpfc_mbx_redisc_fcf_tbl
) -
16495 sizeof(struct lpfc_sli4_cfg_mhdr
));
16496 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
16497 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF
,
16498 length
, LPFC_SLI4_MBX_EMBED
);
16500 redisc_fcf
= &mbox
->u
.mqe
.un
.redisc_fcf_tbl
;
16501 /* Set count to 0 for invalidating the entire FCF database */
16502 bf_set(lpfc_mbx_redisc_fcf_count
, redisc_fcf
, 0);
16504 /* Issue the mailbox command asynchronously */
16505 mbox
->vport
= phba
->pport
;
16506 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_redisc_fcf_table
;
16507 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
16509 if (rc
== MBX_NOT_FINISHED
) {
16510 mempool_free(mbox
, phba
->mbox_mem_pool
);
16517 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
16518 * @phba: pointer to lpfc hba data structure.
16520 * This function is the failover routine as a last resort to the FCF DEAD
16521 * event when driver failed to perform fast FCF failover.
16524 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba
*phba
)
16526 uint32_t link_state
;
16529 * Last resort as FCF DEAD event failover will treat this as
16530 * a link down, but save the link state because we don't want
16531 * it to be changed to Link Down unless it is already down.
16533 link_state
= phba
->link_state
;
16534 lpfc_linkdown(phba
);
16535 phba
->link_state
= link_state
;
16537 /* Unregister FCF if no devices connected to it */
16538 lpfc_unregister_unused_fcf(phba
);
16542 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
16543 * @phba: pointer to lpfc hba data structure.
16544 * @rgn23_data: pointer to configure region 23 data.
16546 * This function gets SLI3 port configure region 23 data through memory dump
16547 * mailbox command. When it successfully retrieves data, the size of the data
16548 * will be returned, otherwise, 0 will be returned.
16551 lpfc_sli_get_config_region23(struct lpfc_hba
*phba
, char *rgn23_data
)
16553 LPFC_MBOXQ_t
*pmb
= NULL
;
16555 uint32_t offset
= 0;
16561 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16563 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16564 "2600 failed to allocate mailbox memory\n");
16570 lpfc_dump_mem(phba
, pmb
, offset
, DMP_REGION_23
);
16571 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
16573 if (rc
!= MBX_SUCCESS
) {
16574 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
16575 "2601 failed to read config "
16576 "region 23, rc 0x%x Status 0x%x\n",
16577 rc
, mb
->mbxStatus
);
16578 mb
->un
.varDmp
.word_cnt
= 0;
16581 * dump mem may return a zero when finished or we got a
16582 * mailbox error, either way we are done.
16584 if (mb
->un
.varDmp
.word_cnt
== 0)
16586 if (mb
->un
.varDmp
.word_cnt
> DMP_RGN23_SIZE
- offset
)
16587 mb
->un
.varDmp
.word_cnt
= DMP_RGN23_SIZE
- offset
;
16589 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
16590 rgn23_data
+ offset
,
16591 mb
->un
.varDmp
.word_cnt
);
16592 offset
+= mb
->un
.varDmp
.word_cnt
;
16593 } while (mb
->un
.varDmp
.word_cnt
&& offset
< DMP_RGN23_SIZE
);
16595 mempool_free(pmb
, phba
->mbox_mem_pool
);
16600 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
16601 * @phba: pointer to lpfc hba data structure.
16602 * @rgn23_data: pointer to configure region 23 data.
16604 * This function gets SLI4 port configure region 23 data through memory dump
16605 * mailbox command. When it successfully retrieves data, the size of the data
16606 * will be returned, otherwise, 0 will be returned.
16609 lpfc_sli4_get_config_region23(struct lpfc_hba
*phba
, char *rgn23_data
)
16611 LPFC_MBOXQ_t
*mboxq
= NULL
;
16612 struct lpfc_dmabuf
*mp
= NULL
;
16613 struct lpfc_mqe
*mqe
;
16614 uint32_t data_length
= 0;
16620 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16622 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16623 "3105 failed to allocate mailbox memory\n");
16627 if (lpfc_sli4_dump_cfg_rg23(phba
, mboxq
))
16629 mqe
= &mboxq
->u
.mqe
;
16630 mp
= (struct lpfc_dmabuf
*) mboxq
->context1
;
16631 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
16634 data_length
= mqe
->un
.mb_words
[5];
16635 if (data_length
== 0)
16637 if (data_length
> DMP_RGN23_SIZE
) {
16641 lpfc_sli_pcimem_bcopy((char *)mp
->virt
, rgn23_data
, data_length
);
16643 mempool_free(mboxq
, phba
->mbox_mem_pool
);
16645 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
16648 return data_length
;
16652 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
16653 * @phba: pointer to lpfc hba data structure.
16655 * This function read region 23 and parse TLV for port status to
16656 * decide if the user disaled the port. If the TLV indicates the
16657 * port is disabled, the hba_flag is set accordingly.
16660 lpfc_sli_read_link_ste(struct lpfc_hba
*phba
)
16662 uint8_t *rgn23_data
= NULL
;
16663 uint32_t if_type
, data_size
, sub_tlv_len
, tlv_offset
;
16664 uint32_t offset
= 0;
16666 /* Get adapter Region 23 data */
16667 rgn23_data
= kzalloc(DMP_RGN23_SIZE
, GFP_KERNEL
);
16671 if (phba
->sli_rev
< LPFC_SLI_REV4
)
16672 data_size
= lpfc_sli_get_config_region23(phba
, rgn23_data
);
16674 if_type
= bf_get(lpfc_sli_intf_if_type
,
16675 &phba
->sli4_hba
.sli_intf
);
16676 if (if_type
== LPFC_SLI_INTF_IF_TYPE_0
)
16678 data_size
= lpfc_sli4_get_config_region23(phba
, rgn23_data
);
16684 /* Check the region signature first */
16685 if (memcmp(&rgn23_data
[offset
], LPFC_REGION23_SIGNATURE
, 4)) {
16686 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16687 "2619 Config region 23 has bad signature\n");
16692 /* Check the data structure version */
16693 if (rgn23_data
[offset
] != LPFC_REGION23_VERSION
) {
16694 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16695 "2620 Config region 23 has bad version\n");
16700 /* Parse TLV entries in the region */
16701 while (offset
< data_size
) {
16702 if (rgn23_data
[offset
] == LPFC_REGION23_LAST_REC
)
16705 * If the TLV is not driver specific TLV or driver id is
16706 * not linux driver id, skip the record.
16708 if ((rgn23_data
[offset
] != DRIVER_SPECIFIC_TYPE
) ||
16709 (rgn23_data
[offset
+ 2] != LINUX_DRIVER_ID
) ||
16710 (rgn23_data
[offset
+ 3] != 0)) {
16711 offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
16715 /* Driver found a driver specific TLV in the config region */
16716 sub_tlv_len
= rgn23_data
[offset
+ 1] * 4;
16721 * Search for configured port state sub-TLV.
16723 while ((offset
< data_size
) &&
16724 (tlv_offset
< sub_tlv_len
)) {
16725 if (rgn23_data
[offset
] == LPFC_REGION23_LAST_REC
) {
16730 if (rgn23_data
[offset
] != PORT_STE_TYPE
) {
16731 offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
16732 tlv_offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
16736 /* This HBA contains PORT_STE configured */
16737 if (!rgn23_data
[offset
+ 2])
16738 phba
->hba_flag
|= LINK_DISABLED
;
16750 * lpfc_wr_object - write an object to the firmware
16751 * @phba: HBA structure that indicates port to create a queue on.
16752 * @dmabuf_list: list of dmabufs to write to the port.
16753 * @size: the total byte value of the objects to write to the port.
16754 * @offset: the current offset to be used to start the transfer.
16756 * This routine will create a wr_object mailbox command to send to the port.
16757 * the mailbox command will be constructed using the dma buffers described in
16758 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
16759 * BDEs that the imbedded mailbox can support. The @offset variable will be
16760 * used to indicate the starting offset of the transfer and will also return
16761 * the offset after the write object mailbox has completed. @size is used to
16762 * determine the end of the object and whether the eof bit should be set.
16764 * Return 0 is successful and offset will contain the the new offset to use
16765 * for the next write.
16766 * Return negative value for error cases.
16769 lpfc_wr_object(struct lpfc_hba
*phba
, struct list_head
*dmabuf_list
,
16770 uint32_t size
, uint32_t *offset
)
16772 struct lpfc_mbx_wr_object
*wr_object
;
16773 LPFC_MBOXQ_t
*mbox
;
16775 uint32_t shdr_status
, shdr_add_status
;
16777 union lpfc_sli4_cfg_shdr
*shdr
;
16778 struct lpfc_dmabuf
*dmabuf
;
16779 uint32_t written
= 0;
16781 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16785 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
16786 LPFC_MBOX_OPCODE_WRITE_OBJECT
,
16787 sizeof(struct lpfc_mbx_wr_object
) -
16788 sizeof(struct lpfc_sli4_cfg_mhdr
), LPFC_SLI4_MBX_EMBED
);
16790 wr_object
= (struct lpfc_mbx_wr_object
*)&mbox
->u
.mqe
.un
.wr_object
;
16791 wr_object
->u
.request
.write_offset
= *offset
;
16792 sprintf((uint8_t *)wr_object
->u
.request
.object_name
, "/");
16793 wr_object
->u
.request
.object_name
[0] =
16794 cpu_to_le32(wr_object
->u
.request
.object_name
[0]);
16795 bf_set(lpfc_wr_object_eof
, &wr_object
->u
.request
, 0);
16796 list_for_each_entry(dmabuf
, dmabuf_list
, list
) {
16797 if (i
>= LPFC_MBX_WR_CONFIG_MAX_BDE
|| written
>= size
)
16799 wr_object
->u
.request
.bde
[i
].addrLow
= putPaddrLow(dmabuf
->phys
);
16800 wr_object
->u
.request
.bde
[i
].addrHigh
=
16801 putPaddrHigh(dmabuf
->phys
);
16802 if (written
+ SLI4_PAGE_SIZE
>= size
) {
16803 wr_object
->u
.request
.bde
[i
].tus
.f
.bdeSize
=
16805 written
+= (size
- written
);
16806 bf_set(lpfc_wr_object_eof
, &wr_object
->u
.request
, 1);
16808 wr_object
->u
.request
.bde
[i
].tus
.f
.bdeSize
=
16810 written
+= SLI4_PAGE_SIZE
;
16814 wr_object
->u
.request
.bde_count
= i
;
16815 bf_set(lpfc_wr_object_write_length
, &wr_object
->u
.request
, written
);
16816 if (!phba
->sli4_hba
.intr_enable
)
16817 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
16819 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
16820 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
16822 /* The IOCTL status is embedded in the mailbox subheader. */
16823 shdr
= (union lpfc_sli4_cfg_shdr
*) &wr_object
->header
.cfg_shdr
;
16824 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16825 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16826 if (rc
!= MBX_TIMEOUT
)
16827 mempool_free(mbox
, phba
->mbox_mem_pool
);
16828 if (shdr_status
|| shdr_add_status
|| rc
) {
16829 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16830 "3025 Write Object mailbox failed with "
16831 "status x%x add_status x%x, mbx status x%x\n",
16832 shdr_status
, shdr_add_status
, rc
);
16835 *offset
+= wr_object
->u
.response
.actual_write_length
;
16840 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
16841 * @vport: pointer to vport data structure.
16843 * This function iterate through the mailboxq and clean up all REG_LOGIN
16844 * and REG_VPI mailbox commands associated with the vport. This function
16845 * is called when driver want to restart discovery of the vport due to
16846 * a Clear Virtual Link event.
16849 lpfc_cleanup_pending_mbox(struct lpfc_vport
*vport
)
16851 struct lpfc_hba
*phba
= vport
->phba
;
16852 LPFC_MBOXQ_t
*mb
, *nextmb
;
16853 struct lpfc_dmabuf
*mp
;
16854 struct lpfc_nodelist
*ndlp
;
16855 struct lpfc_nodelist
*act_mbx_ndlp
= NULL
;
16856 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
16857 LIST_HEAD(mbox_cmd_list
);
16858 uint8_t restart_loop
;
16860 /* Clean up internally queued mailbox commands with the vport */
16861 spin_lock_irq(&phba
->hbalock
);
16862 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
16863 if (mb
->vport
!= vport
)
16866 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) &&
16867 (mb
->u
.mb
.mbxCommand
!= MBX_REG_VPI
))
16870 list_del(&mb
->list
);
16871 list_add_tail(&mb
->list
, &mbox_cmd_list
);
16873 /* Clean up active mailbox command with the vport */
16874 mb
= phba
->sli
.mbox_active
;
16875 if (mb
&& (mb
->vport
== vport
)) {
16876 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) ||
16877 (mb
->u
.mb
.mbxCommand
== MBX_REG_VPI
))
16878 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
16879 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
16880 act_mbx_ndlp
= (struct lpfc_nodelist
*)mb
->context2
;
16881 /* Put reference count for delayed processing */
16882 act_mbx_ndlp
= lpfc_nlp_get(act_mbx_ndlp
);
16883 /* Unregister the RPI when mailbox complete */
16884 mb
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
16887 /* Cleanup any mailbox completions which are not yet processed */
16890 list_for_each_entry(mb
, &phba
->sli
.mboxq_cmpl
, list
) {
16892 * If this mailox is already processed or it is
16893 * for another vport ignore it.
16895 if ((mb
->vport
!= vport
) ||
16896 (mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
))
16899 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) &&
16900 (mb
->u
.mb
.mbxCommand
!= MBX_REG_VPI
))
16903 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
16904 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
16905 ndlp
= (struct lpfc_nodelist
*)mb
->context2
;
16906 /* Unregister the RPI when mailbox complete */
16907 mb
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
16909 spin_unlock_irq(&phba
->hbalock
);
16910 spin_lock(shost
->host_lock
);
16911 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
16912 spin_unlock(shost
->host_lock
);
16913 spin_lock_irq(&phba
->hbalock
);
16917 } while (restart_loop
);
16919 spin_unlock_irq(&phba
->hbalock
);
16921 /* Release the cleaned-up mailbox commands */
16922 while (!list_empty(&mbox_cmd_list
)) {
16923 list_remove_head(&mbox_cmd_list
, mb
, LPFC_MBOXQ_t
, list
);
16924 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
16925 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
16927 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
16930 ndlp
= (struct lpfc_nodelist
*) mb
->context2
;
16931 mb
->context2
= NULL
;
16933 spin_lock(shost
->host_lock
);
16934 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
16935 spin_unlock(shost
->host_lock
);
16936 lpfc_nlp_put(ndlp
);
16939 mempool_free(mb
, phba
->mbox_mem_pool
);
16942 /* Release the ndlp with the cleaned-up active mailbox command */
16943 if (act_mbx_ndlp
) {
16944 spin_lock(shost
->host_lock
);
16945 act_mbx_ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
16946 spin_unlock(shost
->host_lock
);
16947 lpfc_nlp_put(act_mbx_ndlp
);
16952 * lpfc_drain_txq - Drain the txq
16953 * @phba: Pointer to HBA context object.
16955 * This function attempt to submit IOCBs on the txq
16956 * to the adapter. For SLI4 adapters, the txq contains
16957 * ELS IOCBs that have been deferred because the there
16958 * are no SGLs. This congestion can occur with large
16959 * vport counts during node discovery.
16963 lpfc_drain_txq(struct lpfc_hba
*phba
)
16965 LIST_HEAD(completions
);
16966 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
16967 struct lpfc_iocbq
*piocbq
= NULL
;
16968 unsigned long iflags
= 0;
16969 char *fail_msg
= NULL
;
16970 struct lpfc_sglq
*sglq
;
16971 union lpfc_wqe wqe
;
16972 uint32_t txq_cnt
= 0;
16974 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
16975 list_for_each_entry(piocbq
, &pring
->txq
, list
) {
16979 if (txq_cnt
> pring
->txq_max
)
16980 pring
->txq_max
= txq_cnt
;
16982 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
16984 while (!list_empty(&pring
->txq
)) {
16985 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
16987 piocbq
= lpfc_sli_ringtx_get(phba
, pring
);
16989 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
16990 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
16991 "2823 txq empty and txq_cnt is %d\n ",
16995 sglq
= __lpfc_sli_get_sglq(phba
, piocbq
);
16997 __lpfc_sli_ringtx_put(phba
, pring
, piocbq
);
16998 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
17003 /* The xri and iocb resources secured,
17004 * attempt to issue request
17006 piocbq
->sli4_lxritag
= sglq
->sli4_lxritag
;
17007 piocbq
->sli4_xritag
= sglq
->sli4_xritag
;
17008 if (NO_XRI
== lpfc_sli4_bpl2sgl(phba
, piocbq
, sglq
))
17009 fail_msg
= "to convert bpl to sgl";
17010 else if (lpfc_sli4_iocb2wqe(phba
, piocbq
, &wqe
))
17011 fail_msg
= "to convert iocb to wqe";
17012 else if (lpfc_sli4_wq_put(phba
->sli4_hba
.els_wq
, &wqe
))
17013 fail_msg
= " - Wq is full";
17015 lpfc_sli_ringtxcmpl_put(phba
, pring
, piocbq
);
17018 /* Failed means we can't issue and need to cancel */
17019 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
17020 "2822 IOCB failed %s iotag 0x%x "
17023 piocbq
->iotag
, piocbq
->sli4_xritag
);
17024 list_add_tail(&piocbq
->list
, &completions
);
17026 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
17029 /* Cancel all the IOCBs that cannot be issued */
17030 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
17031 IOERR_SLI_ABORTED
);