1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2015 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
33 #include <scsi/fc/fc_fs.h>
34 #include <linux/aer.h>
39 #include "lpfc_sli4.h"
41 #include "lpfc_disc.h"
42 #include "lpfc_scsi.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_logmsg.h"
46 #include "lpfc_compat.h"
47 #include "lpfc_debugfs.h"
48 #include "lpfc_vport.h"
50 /* There are only four IOCB completion types. */
51 typedef enum _lpfc_iocb_type
{
59 /* Provide function prototypes local to this module. */
60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba
*, LPFC_MBOXQ_t
*,
62 static int lpfc_sli4_read_rev(struct lpfc_hba
*, LPFC_MBOXQ_t
*,
63 uint8_t *, uint32_t *);
64 static struct lpfc_iocbq
*lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba
*,
66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport
*,
68 static int lpfc_sli4_fp_handle_wcqe(struct lpfc_hba
*, struct lpfc_queue
*,
70 static int lpfc_sli4_post_els_sgl_list(struct lpfc_hba
*, struct list_head
*,
72 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba
*, struct lpfc_eqe
*,
74 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba
*phba
);
75 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba
*phba
);
78 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq
*iocbq
)
84 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
85 * @q: The Work Queue to operate on.
86 * @wqe: The work Queue Entry to put on the Work queue.
88 * This routine will copy the contents of @wqe to the next available entry on
89 * the @q. This function will then ring the Work Queue Doorbell to signal the
90 * HBA to start processing the Work Queue Entry. This function returns 0 if
91 * successful. If no entries are available on @q then this function will return
93 * The caller is expected to hold the hbalock when calling this routine.
96 lpfc_sli4_wq_put(struct lpfc_queue
*q
, union lpfc_wqe
*wqe
)
98 union lpfc_wqe
*temp_wqe
;
99 struct lpfc_register doorbell
;
103 /* sanity check on queue memory */
106 temp_wqe
= q
->qe
[q
->host_index
].wqe
;
108 /* If the host has not yet processed the next entry then we are done */
109 idx
= ((q
->host_index
+ 1) % q
->entry_count
);
110 if (idx
== q
->hba_index
) {
115 /* set consumption flag every once in a while */
116 if (!((q
->host_index
+ 1) % q
->entry_repost
))
117 bf_set(wqe_wqec
, &wqe
->generic
.wqe_com
, 1);
119 bf_set(wqe_wqec
, &wqe
->generic
.wqe_com
, 0);
120 if (q
->phba
->sli3_options
& LPFC_SLI4_PHWQ_ENABLED
)
121 bf_set(wqe_wqid
, &wqe
->generic
.wqe_com
, q
->queue_id
);
122 lpfc_sli_pcimem_bcopy(wqe
, temp_wqe
, q
->entry_size
);
123 /* ensure WQE bcopy flushed before doorbell write */
126 /* Update the host index before invoking device */
127 host_index
= q
->host_index
;
133 if (q
->db_format
== LPFC_DB_LIST_FORMAT
) {
134 bf_set(lpfc_wq_db_list_fm_num_posted
, &doorbell
, 1);
135 bf_set(lpfc_wq_db_list_fm_index
, &doorbell
, host_index
);
136 bf_set(lpfc_wq_db_list_fm_id
, &doorbell
, q
->queue_id
);
137 } else if (q
->db_format
== LPFC_DB_RING_FORMAT
) {
138 bf_set(lpfc_wq_db_ring_fm_num_posted
, &doorbell
, 1);
139 bf_set(lpfc_wq_db_ring_fm_id
, &doorbell
, q
->queue_id
);
143 writel(doorbell
.word0
, q
->db_regaddr
);
149 * lpfc_sli4_wq_release - Updates internal hba index for WQ
150 * @q: The Work Queue to operate on.
151 * @index: The index to advance the hba index to.
153 * This routine will update the HBA index of a queue to reflect consumption of
154 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
155 * an entry the host calls this function to update the queue's internal
156 * pointers. This routine returns the number of entries that were consumed by
160 lpfc_sli4_wq_release(struct lpfc_queue
*q
, uint32_t index
)
162 uint32_t released
= 0;
164 /* sanity check on queue memory */
168 if (q
->hba_index
== index
)
171 q
->hba_index
= ((q
->hba_index
+ 1) % q
->entry_count
);
173 } while (q
->hba_index
!= index
);
178 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
179 * @q: The Mailbox Queue to operate on.
180 * @wqe: The Mailbox Queue Entry to put on the Work queue.
182 * This routine will copy the contents of @mqe to the next available entry on
183 * the @q. This function will then ring the Work Queue Doorbell to signal the
184 * HBA to start processing the Work Queue Entry. This function returns 0 if
185 * successful. If no entries are available on @q then this function will return
187 * The caller is expected to hold the hbalock when calling this routine.
190 lpfc_sli4_mq_put(struct lpfc_queue
*q
, struct lpfc_mqe
*mqe
)
192 struct lpfc_mqe
*temp_mqe
;
193 struct lpfc_register doorbell
;
195 /* sanity check on queue memory */
198 temp_mqe
= q
->qe
[q
->host_index
].mqe
;
200 /* If the host has not yet processed the next entry then we are done */
201 if (((q
->host_index
+ 1) % q
->entry_count
) == q
->hba_index
)
203 lpfc_sli_pcimem_bcopy(mqe
, temp_mqe
, q
->entry_size
);
204 /* Save off the mailbox pointer for completion */
205 q
->phba
->mbox
= (MAILBOX_t
*)temp_mqe
;
207 /* Update the host index before invoking device */
208 q
->host_index
= ((q
->host_index
+ 1) % q
->entry_count
);
212 bf_set(lpfc_mq_doorbell_num_posted
, &doorbell
, 1);
213 bf_set(lpfc_mq_doorbell_id
, &doorbell
, q
->queue_id
);
214 writel(doorbell
.word0
, q
->phba
->sli4_hba
.MQDBregaddr
);
219 * lpfc_sli4_mq_release - Updates internal hba index for MQ
220 * @q: The Mailbox Queue to operate on.
222 * This routine will update the HBA index of a queue to reflect consumption of
223 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
224 * an entry the host calls this function to update the queue's internal
225 * pointers. This routine returns the number of entries that were consumed by
229 lpfc_sli4_mq_release(struct lpfc_queue
*q
)
231 /* sanity check on queue memory */
235 /* Clear the mailbox pointer for completion */
236 q
->phba
->mbox
= NULL
;
237 q
->hba_index
= ((q
->hba_index
+ 1) % q
->entry_count
);
242 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
243 * @q: The Event Queue to get the first valid EQE from
245 * This routine will get the first valid Event Queue Entry from @q, update
246 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
247 * the Queue (no more work to do), or the Queue is full of EQEs that have been
248 * processed, but not popped back to the HBA then this routine will return NULL.
250 static struct lpfc_eqe
*
251 lpfc_sli4_eq_get(struct lpfc_queue
*q
)
253 struct lpfc_eqe
*eqe
;
256 /* sanity check on queue memory */
259 eqe
= q
->qe
[q
->hba_index
].eqe
;
261 /* If the next EQE is not valid then we are done */
262 if (!bf_get_le32(lpfc_eqe_valid
, eqe
))
264 /* If the host has not yet processed the next entry then we are done */
265 idx
= ((q
->hba_index
+ 1) % q
->entry_count
);
266 if (idx
== q
->host_index
)
272 * insert barrier for instruction interlock : data from the hardware
273 * must have the valid bit checked before it can be copied and acted
274 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
275 * instructions allowing action on content before valid bit checked,
276 * add barrier here as well. May not be needed as "content" is a
277 * single 32-bit entity here (vs multi word structure for cq's).
284 * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
285 * @q: The Event Queue to disable interrupts
289 lpfc_sli4_eq_clr_intr(struct lpfc_queue
*q
)
291 struct lpfc_register doorbell
;
294 bf_set(lpfc_eqcq_doorbell_eqci
, &doorbell
, 1);
295 bf_set(lpfc_eqcq_doorbell_qt
, &doorbell
, LPFC_QUEUE_TYPE_EVENT
);
296 bf_set(lpfc_eqcq_doorbell_eqid_hi
, &doorbell
,
297 (q
->queue_id
>> LPFC_EQID_HI_FIELD_SHIFT
));
298 bf_set(lpfc_eqcq_doorbell_eqid_lo
, &doorbell
, q
->queue_id
);
299 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQCQDBregaddr
);
303 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
304 * @q: The Event Queue that the host has completed processing for.
305 * @arm: Indicates whether the host wants to arms this CQ.
307 * This routine will mark all Event Queue Entries on @q, from the last
308 * known completed entry to the last entry that was processed, as completed
309 * by clearing the valid bit for each completion queue entry. Then it will
310 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
311 * The internal host index in the @q will be updated by this routine to indicate
312 * that the host has finished processing the entries. The @arm parameter
313 * indicates that the queue should be rearmed when ringing the doorbell.
315 * This function will return the number of EQEs that were popped.
318 lpfc_sli4_eq_release(struct lpfc_queue
*q
, bool arm
)
320 uint32_t released
= 0;
321 struct lpfc_eqe
*temp_eqe
;
322 struct lpfc_register doorbell
;
324 /* sanity check on queue memory */
328 /* while there are valid entries */
329 while (q
->hba_index
!= q
->host_index
) {
330 temp_eqe
= q
->qe
[q
->host_index
].eqe
;
331 bf_set_le32(lpfc_eqe_valid
, temp_eqe
, 0);
333 q
->host_index
= ((q
->host_index
+ 1) % q
->entry_count
);
335 if (unlikely(released
== 0 && !arm
))
338 /* ring doorbell for number popped */
341 bf_set(lpfc_eqcq_doorbell_arm
, &doorbell
, 1);
342 bf_set(lpfc_eqcq_doorbell_eqci
, &doorbell
, 1);
344 bf_set(lpfc_eqcq_doorbell_num_released
, &doorbell
, released
);
345 bf_set(lpfc_eqcq_doorbell_qt
, &doorbell
, LPFC_QUEUE_TYPE_EVENT
);
346 bf_set(lpfc_eqcq_doorbell_eqid_hi
, &doorbell
,
347 (q
->queue_id
>> LPFC_EQID_HI_FIELD_SHIFT
));
348 bf_set(lpfc_eqcq_doorbell_eqid_lo
, &doorbell
, q
->queue_id
);
349 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQCQDBregaddr
);
350 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
351 if ((q
->phba
->intr_type
== INTx
) && (arm
== LPFC_QUEUE_REARM
))
352 readl(q
->phba
->sli4_hba
.EQCQDBregaddr
);
357 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
358 * @q: The Completion Queue to get the first valid CQE from
360 * This routine will get the first valid Completion Queue Entry from @q, update
361 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
362 * the Queue (no more work to do), or the Queue is full of CQEs that have been
363 * processed, but not popped back to the HBA then this routine will return NULL.
365 static struct lpfc_cqe
*
366 lpfc_sli4_cq_get(struct lpfc_queue
*q
)
368 struct lpfc_cqe
*cqe
;
371 /* sanity check on queue memory */
375 /* If the next CQE is not valid then we are done */
376 if (!bf_get_le32(lpfc_cqe_valid
, q
->qe
[q
->hba_index
].cqe
))
378 /* If the host has not yet processed the next entry then we are done */
379 idx
= ((q
->hba_index
+ 1) % q
->entry_count
);
380 if (idx
== q
->host_index
)
383 cqe
= q
->qe
[q
->hba_index
].cqe
;
387 * insert barrier for instruction interlock : data from the hardware
388 * must have the valid bit checked before it can be copied and acted
389 * upon. Speculative instructions were allowing a bcopy at the start
390 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
391 * after our return, to copy data before the valid bit check above
392 * was done. As such, some of the copied data was stale. The barrier
393 * ensures the check is before any data is copied.
400 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
401 * @q: The Completion Queue that the host has completed processing for.
402 * @arm: Indicates whether the host wants to arms this CQ.
404 * This routine will mark all Completion queue entries on @q, from the last
405 * known completed entry to the last entry that was processed, as completed
406 * by clearing the valid bit for each completion queue entry. Then it will
407 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
408 * The internal host index in the @q will be updated by this routine to indicate
409 * that the host has finished processing the entries. The @arm parameter
410 * indicates that the queue should be rearmed when ringing the doorbell.
412 * This function will return the number of CQEs that were released.
415 lpfc_sli4_cq_release(struct lpfc_queue
*q
, bool arm
)
417 uint32_t released
= 0;
418 struct lpfc_cqe
*temp_qe
;
419 struct lpfc_register doorbell
;
421 /* sanity check on queue memory */
424 /* while there are valid entries */
425 while (q
->hba_index
!= q
->host_index
) {
426 temp_qe
= q
->qe
[q
->host_index
].cqe
;
427 bf_set_le32(lpfc_cqe_valid
, temp_qe
, 0);
429 q
->host_index
= ((q
->host_index
+ 1) % q
->entry_count
);
431 if (unlikely(released
== 0 && !arm
))
434 /* ring doorbell for number popped */
437 bf_set(lpfc_eqcq_doorbell_arm
, &doorbell
, 1);
438 bf_set(lpfc_eqcq_doorbell_num_released
, &doorbell
, released
);
439 bf_set(lpfc_eqcq_doorbell_qt
, &doorbell
, LPFC_QUEUE_TYPE_COMPLETION
);
440 bf_set(lpfc_eqcq_doorbell_cqid_hi
, &doorbell
,
441 (q
->queue_id
>> LPFC_CQID_HI_FIELD_SHIFT
));
442 bf_set(lpfc_eqcq_doorbell_cqid_lo
, &doorbell
, q
->queue_id
);
443 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQCQDBregaddr
);
448 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
449 * @q: The Header Receive Queue to operate on.
450 * @wqe: The Receive Queue Entry to put on the Receive queue.
452 * This routine will copy the contents of @wqe to the next available entry on
453 * the @q. This function will then ring the Receive Queue Doorbell to signal the
454 * HBA to start processing the Receive Queue Entry. This function returns the
455 * index that the rqe was copied to if successful. If no entries are available
456 * on @q then this function will return -ENOMEM.
457 * The caller is expected to hold the hbalock when calling this routine.
460 lpfc_sli4_rq_put(struct lpfc_queue
*hq
, struct lpfc_queue
*dq
,
461 struct lpfc_rqe
*hrqe
, struct lpfc_rqe
*drqe
)
463 struct lpfc_rqe
*temp_hrqe
;
464 struct lpfc_rqe
*temp_drqe
;
465 struct lpfc_register doorbell
;
468 /* sanity check on queue memory */
469 if (unlikely(!hq
) || unlikely(!dq
))
471 put_index
= hq
->host_index
;
472 temp_hrqe
= hq
->qe
[hq
->host_index
].rqe
;
473 temp_drqe
= dq
->qe
[dq
->host_index
].rqe
;
475 if (hq
->type
!= LPFC_HRQ
|| dq
->type
!= LPFC_DRQ
)
477 if (hq
->host_index
!= dq
->host_index
)
479 /* If the host has not yet processed the next entry then we are done */
480 if (((hq
->host_index
+ 1) % hq
->entry_count
) == hq
->hba_index
)
482 lpfc_sli_pcimem_bcopy(hrqe
, temp_hrqe
, hq
->entry_size
);
483 lpfc_sli_pcimem_bcopy(drqe
, temp_drqe
, dq
->entry_size
);
485 /* Update the host index to point to the next slot */
486 hq
->host_index
= ((hq
->host_index
+ 1) % hq
->entry_count
);
487 dq
->host_index
= ((dq
->host_index
+ 1) % dq
->entry_count
);
489 /* Ring The Header Receive Queue Doorbell */
490 if (!(hq
->host_index
% hq
->entry_repost
)) {
492 if (hq
->db_format
== LPFC_DB_RING_FORMAT
) {
493 bf_set(lpfc_rq_db_ring_fm_num_posted
, &doorbell
,
495 bf_set(lpfc_rq_db_ring_fm_id
, &doorbell
, hq
->queue_id
);
496 } else if (hq
->db_format
== LPFC_DB_LIST_FORMAT
) {
497 bf_set(lpfc_rq_db_list_fm_num_posted
, &doorbell
,
499 bf_set(lpfc_rq_db_list_fm_index
, &doorbell
,
501 bf_set(lpfc_rq_db_list_fm_id
, &doorbell
, hq
->queue_id
);
505 writel(doorbell
.word0
, hq
->db_regaddr
);
511 * lpfc_sli4_rq_release - Updates internal hba index for RQ
512 * @q: The Header Receive Queue to operate on.
514 * This routine will update the HBA index of a queue to reflect consumption of
515 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
516 * consumed an entry the host calls this function to update the queue's
517 * internal pointers. This routine returns the number of entries that were
518 * consumed by the HBA.
521 lpfc_sli4_rq_release(struct lpfc_queue
*hq
, struct lpfc_queue
*dq
)
523 /* sanity check on queue memory */
524 if (unlikely(!hq
) || unlikely(!dq
))
527 if ((hq
->type
!= LPFC_HRQ
) || (dq
->type
!= LPFC_DRQ
))
529 hq
->hba_index
= ((hq
->hba_index
+ 1) % hq
->entry_count
);
530 dq
->hba_index
= ((dq
->hba_index
+ 1) % dq
->entry_count
);
535 * lpfc_cmd_iocb - Get next command iocb entry in the ring
536 * @phba: Pointer to HBA context object.
537 * @pring: Pointer to driver SLI ring object.
539 * This function returns pointer to next command iocb entry
540 * in the command ring. The caller must hold hbalock to prevent
541 * other threads consume the next command iocb.
542 * SLI-2/SLI-3 provide different sized iocbs.
544 static inline IOCB_t
*
545 lpfc_cmd_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
547 return (IOCB_t
*) (((char *) pring
->sli
.sli3
.cmdringaddr
) +
548 pring
->sli
.sli3
.cmdidx
* phba
->iocb_cmd_size
);
552 * lpfc_resp_iocb - Get next response iocb entry in the ring
553 * @phba: Pointer to HBA context object.
554 * @pring: Pointer to driver SLI ring object.
556 * This function returns pointer to next response iocb entry
557 * in the response ring. The caller must hold hbalock to make sure
558 * that no other thread consume the next response iocb.
559 * SLI-2/SLI-3 provide different sized iocbs.
561 static inline IOCB_t
*
562 lpfc_resp_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
564 return (IOCB_t
*) (((char *) pring
->sli
.sli3
.rspringaddr
) +
565 pring
->sli
.sli3
.rspidx
* phba
->iocb_rsp_size
);
569 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
570 * @phba: Pointer to HBA context object.
572 * This function is called with hbalock held. This function
573 * allocates a new driver iocb object from the iocb pool. If the
574 * allocation is successful, it returns pointer to the newly
575 * allocated iocb object else it returns NULL.
578 __lpfc_sli_get_iocbq(struct lpfc_hba
*phba
)
580 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
581 struct lpfc_iocbq
* iocbq
= NULL
;
583 list_remove_head(lpfc_iocb_list
, iocbq
, struct lpfc_iocbq
, list
);
586 if (phba
->iocb_cnt
> phba
->iocb_max
)
587 phba
->iocb_max
= phba
->iocb_cnt
;
592 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
593 * @phba: Pointer to HBA context object.
594 * @xritag: XRI value.
596 * This function clears the sglq pointer from the array of acive
597 * sglq's. The xritag that is passed in is used to index into the
598 * array. Before the xritag can be used it needs to be adjusted
599 * by subtracting the xribase.
601 * Returns sglq ponter = success, NULL = Failure.
603 static struct lpfc_sglq
*
604 __lpfc_clear_active_sglq(struct lpfc_hba
*phba
, uint16_t xritag
)
606 struct lpfc_sglq
*sglq
;
608 sglq
= phba
->sli4_hba
.lpfc_sglq_active_list
[xritag
];
609 phba
->sli4_hba
.lpfc_sglq_active_list
[xritag
] = NULL
;
614 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
615 * @phba: Pointer to HBA context object.
616 * @xritag: XRI value.
618 * This function returns the sglq pointer from the array of acive
619 * sglq's. The xritag that is passed in is used to index into the
620 * array. Before the xritag can be used it needs to be adjusted
621 * by subtracting the xribase.
623 * Returns sglq ponter = success, NULL = Failure.
626 __lpfc_get_active_sglq(struct lpfc_hba
*phba
, uint16_t xritag
)
628 struct lpfc_sglq
*sglq
;
630 sglq
= phba
->sli4_hba
.lpfc_sglq_active_list
[xritag
];
635 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
636 * @phba: Pointer to HBA context object.
637 * @xritag: xri used in this exchange.
638 * @rrq: The RRQ to be cleared.
642 lpfc_clr_rrq_active(struct lpfc_hba
*phba
,
644 struct lpfc_node_rrq
*rrq
)
646 struct lpfc_nodelist
*ndlp
= NULL
;
648 if ((rrq
->vport
) && NLP_CHK_NODE_ACT(rrq
->ndlp
))
649 ndlp
= lpfc_findnode_did(rrq
->vport
, rrq
->nlp_DID
);
651 /* The target DID could have been swapped (cable swap)
652 * we should use the ndlp from the findnode if it is
655 if ((!ndlp
) && rrq
->ndlp
)
661 if (test_and_clear_bit(xritag
, ndlp
->active_rrqs_xri_bitmap
)) {
664 rrq
->rrq_stop_time
= 0;
667 mempool_free(rrq
, phba
->rrq_pool
);
671 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
672 * @phba: Pointer to HBA context object.
674 * This function is called with hbalock held. This function
675 * Checks if stop_time (ratov from setting rrq active) has
676 * been reached, if it has and the send_rrq flag is set then
677 * it will call lpfc_send_rrq. If the send_rrq flag is not set
678 * then it will just call the routine to clear the rrq and
679 * free the rrq resource.
680 * The timer is set to the next rrq that is going to expire before
681 * leaving the routine.
685 lpfc_handle_rrq_active(struct lpfc_hba
*phba
)
687 struct lpfc_node_rrq
*rrq
;
688 struct lpfc_node_rrq
*nextrrq
;
689 unsigned long next_time
;
690 unsigned long iflags
;
693 spin_lock_irqsave(&phba
->hbalock
, iflags
);
694 phba
->hba_flag
&= ~HBA_RRQ_ACTIVE
;
695 next_time
= jiffies
+ msecs_to_jiffies(1000 * (phba
->fc_ratov
+ 1));
696 list_for_each_entry_safe(rrq
, nextrrq
,
697 &phba
->active_rrq_list
, list
) {
698 if (time_after(jiffies
, rrq
->rrq_stop_time
))
699 list_move(&rrq
->list
, &send_rrq
);
700 else if (time_before(rrq
->rrq_stop_time
, next_time
))
701 next_time
= rrq
->rrq_stop_time
;
703 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
704 if ((!list_empty(&phba
->active_rrq_list
)) &&
705 (!(phba
->pport
->load_flag
& FC_UNLOADING
)))
706 mod_timer(&phba
->rrq_tmr
, next_time
);
707 list_for_each_entry_safe(rrq
, nextrrq
, &send_rrq
, list
) {
708 list_del(&rrq
->list
);
710 /* this call will free the rrq */
711 lpfc_clr_rrq_active(phba
, rrq
->xritag
, rrq
);
712 else if (lpfc_send_rrq(phba
, rrq
)) {
713 /* if we send the rrq then the completion handler
714 * will clear the bit in the xribitmap.
716 lpfc_clr_rrq_active(phba
, rrq
->xritag
,
723 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
724 * @vport: Pointer to vport context object.
725 * @xri: The xri used in the exchange.
726 * @did: The targets DID for this exchange.
728 * returns NULL = rrq not found in the phba->active_rrq_list.
729 * rrq = rrq for this xri and target.
731 struct lpfc_node_rrq
*
732 lpfc_get_active_rrq(struct lpfc_vport
*vport
, uint16_t xri
, uint32_t did
)
734 struct lpfc_hba
*phba
= vport
->phba
;
735 struct lpfc_node_rrq
*rrq
;
736 struct lpfc_node_rrq
*nextrrq
;
737 unsigned long iflags
;
739 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
741 spin_lock_irqsave(&phba
->hbalock
, iflags
);
742 list_for_each_entry_safe(rrq
, nextrrq
, &phba
->active_rrq_list
, list
) {
743 if (rrq
->vport
== vport
&& rrq
->xritag
== xri
&&
744 rrq
->nlp_DID
== did
){
745 list_del(&rrq
->list
);
746 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
750 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
755 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
756 * @vport: Pointer to vport context object.
757 * @ndlp: Pointer to the lpfc_node_list structure.
758 * If ndlp is NULL Remove all active RRQs for this vport from the
759 * phba->active_rrq_list and clear the rrq.
760 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
763 lpfc_cleanup_vports_rrqs(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
766 struct lpfc_hba
*phba
= vport
->phba
;
767 struct lpfc_node_rrq
*rrq
;
768 struct lpfc_node_rrq
*nextrrq
;
769 unsigned long iflags
;
772 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
775 lpfc_sli4_vport_delete_els_xri_aborted(vport
);
776 lpfc_sli4_vport_delete_fcp_xri_aborted(vport
);
778 spin_lock_irqsave(&phba
->hbalock
, iflags
);
779 list_for_each_entry_safe(rrq
, nextrrq
, &phba
->active_rrq_list
, list
)
780 if ((rrq
->vport
== vport
) && (!ndlp
|| rrq
->ndlp
== ndlp
))
781 list_move(&rrq
->list
, &rrq_list
);
782 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
784 list_for_each_entry_safe(rrq
, nextrrq
, &rrq_list
, list
) {
785 list_del(&rrq
->list
);
786 lpfc_clr_rrq_active(phba
, rrq
->xritag
, rrq
);
791 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
792 * @phba: Pointer to HBA context object.
793 * @ndlp: Targets nodelist pointer for this exchange.
794 * @xritag the xri in the bitmap to test.
796 * This function is called with hbalock held. This function
797 * returns 0 = rrq not active for this xri
798 * 1 = rrq is valid for this xri.
801 lpfc_test_rrq_active(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
,
806 if (!ndlp
->active_rrqs_xri_bitmap
)
808 if (test_bit(xritag
, ndlp
->active_rrqs_xri_bitmap
))
815 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
816 * @phba: Pointer to HBA context object.
817 * @ndlp: nodelist pointer for this target.
818 * @xritag: xri used in this exchange.
819 * @rxid: Remote Exchange ID.
820 * @send_rrq: Flag used to determine if we should send rrq els cmd.
822 * This function takes the hbalock.
823 * The active bit is always set in the active rrq xri_bitmap even
824 * if there is no slot avaiable for the other rrq information.
826 * returns 0 rrq actived for this xri
827 * < 0 No memory or invalid ndlp.
830 lpfc_set_rrq_active(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
,
831 uint16_t xritag
, uint16_t rxid
, uint16_t send_rrq
)
833 unsigned long iflags
;
834 struct lpfc_node_rrq
*rrq
;
840 if (!phba
->cfg_enable_rrq
)
843 spin_lock_irqsave(&phba
->hbalock
, iflags
);
844 if (phba
->pport
->load_flag
& FC_UNLOADING
) {
845 phba
->hba_flag
&= ~HBA_RRQ_ACTIVE
;
850 * set the active bit even if there is no mem available.
852 if (NLP_CHK_FREE_REQ(ndlp
))
855 if (ndlp
->vport
&& (ndlp
->vport
->load_flag
& FC_UNLOADING
))
858 if (!ndlp
->active_rrqs_xri_bitmap
)
861 if (test_and_set_bit(xritag
, ndlp
->active_rrqs_xri_bitmap
))
864 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
865 rrq
= mempool_alloc(phba
->rrq_pool
, GFP_KERNEL
);
867 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
868 "3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
869 " DID:0x%x Send:%d\n",
870 xritag
, rxid
, ndlp
->nlp_DID
, send_rrq
);
873 if (phba
->cfg_enable_rrq
== 1)
874 rrq
->send_rrq
= send_rrq
;
877 rrq
->xritag
= xritag
;
878 rrq
->rrq_stop_time
= jiffies
+
879 msecs_to_jiffies(1000 * (phba
->fc_ratov
+ 1));
881 rrq
->nlp_DID
= ndlp
->nlp_DID
;
882 rrq
->vport
= ndlp
->vport
;
884 spin_lock_irqsave(&phba
->hbalock
, iflags
);
885 empty
= list_empty(&phba
->active_rrq_list
);
886 list_add_tail(&rrq
->list
, &phba
->active_rrq_list
);
887 phba
->hba_flag
|= HBA_RRQ_ACTIVE
;
889 lpfc_worker_wake_up(phba
);
890 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
893 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
894 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
895 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
896 " DID:0x%x Send:%d\n",
897 xritag
, rxid
, ndlp
->nlp_DID
, send_rrq
);
902 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
903 * @phba: Pointer to HBA context object.
904 * @piocb: Pointer to the iocbq.
906 * This function is called with the ring lock held. This function
907 * gets a new driver sglq object from the sglq list. If the
908 * list is not empty then it is successful, it returns pointer to the newly
909 * allocated sglq object else it returns NULL.
911 static struct lpfc_sglq
*
912 __lpfc_sli_get_sglq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocbq
)
914 struct list_head
*lpfc_sgl_list
= &phba
->sli4_hba
.lpfc_sgl_list
;
915 struct lpfc_sglq
*sglq
= NULL
;
916 struct lpfc_sglq
*start_sglq
= NULL
;
917 struct lpfc_scsi_buf
*lpfc_cmd
;
918 struct lpfc_nodelist
*ndlp
;
921 if (piocbq
->iocb_flag
& LPFC_IO_FCP
) {
922 lpfc_cmd
= (struct lpfc_scsi_buf
*) piocbq
->context1
;
923 ndlp
= lpfc_cmd
->rdata
->pnode
;
924 } else if ((piocbq
->iocb
.ulpCommand
== CMD_GEN_REQUEST64_CR
) &&
925 !(piocbq
->iocb_flag
& LPFC_IO_LIBDFC
)) {
926 ndlp
= piocbq
->context_un
.ndlp
;
927 } else if (piocbq
->iocb_flag
& LPFC_IO_LIBDFC
) {
928 if (piocbq
->iocb_flag
& LPFC_IO_LOOPBACK
)
931 ndlp
= piocbq
->context_un
.ndlp
;
933 ndlp
= piocbq
->context1
;
936 list_remove_head(lpfc_sgl_list
, sglq
, struct lpfc_sglq
, list
);
941 if (lpfc_test_rrq_active(phba
, ndlp
, sglq
->sli4_lxritag
)) {
942 /* This xri has an rrq outstanding for this DID.
943 * put it back in the list and get another xri.
945 list_add_tail(&sglq
->list
, lpfc_sgl_list
);
947 list_remove_head(lpfc_sgl_list
, sglq
,
948 struct lpfc_sglq
, list
);
949 if (sglq
== start_sglq
) {
957 phba
->sli4_hba
.lpfc_sglq_active_list
[sglq
->sli4_lxritag
] = sglq
;
958 sglq
->state
= SGL_ALLOCATED
;
964 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
965 * @phba: Pointer to HBA context object.
967 * This function is called with no lock held. This function
968 * allocates a new driver iocb object from the iocb pool. If the
969 * allocation is successful, it returns pointer to the newly
970 * allocated iocb object else it returns NULL.
973 lpfc_sli_get_iocbq(struct lpfc_hba
*phba
)
975 struct lpfc_iocbq
* iocbq
= NULL
;
976 unsigned long iflags
;
978 spin_lock_irqsave(&phba
->hbalock
, iflags
);
979 iocbq
= __lpfc_sli_get_iocbq(phba
);
980 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
985 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
986 * @phba: Pointer to HBA context object.
987 * @iocbq: Pointer to driver iocb object.
989 * This function is called with hbalock held to release driver
990 * iocb object to the iocb pool. The iotag in the iocb object
991 * does not change for each use of the iocb object. This function
992 * clears all other fields of the iocb object when it is freed.
993 * The sqlq structure that holds the xritag and phys and virtual
994 * mappings for the scatter gather list is retrieved from the
995 * active array of sglq. The get of the sglq pointer also clears
996 * the entry in the array. If the status of the IO indiactes that
997 * this IO was aborted then the sglq entry it put on the
998 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
999 * IO has good status or fails for any other reason then the sglq
1000 * entry is added to the free list (lpfc_sgl_list).
1003 __lpfc_sli_release_iocbq_s4(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1005 struct lpfc_sglq
*sglq
;
1006 size_t start_clean
= offsetof(struct lpfc_iocbq
, iocb
);
1007 unsigned long iflag
= 0;
1008 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
1010 if (iocbq
->sli4_xritag
== NO_XRI
)
1013 sglq
= __lpfc_clear_active_sglq(phba
, iocbq
->sli4_lxritag
);
1017 if ((iocbq
->iocb_flag
& LPFC_EXCHANGE_BUSY
) &&
1018 (sglq
->state
!= SGL_XRI_ABORTED
)) {
1019 spin_lock_irqsave(&phba
->sli4_hba
.abts_sgl_list_lock
,
1021 list_add(&sglq
->list
,
1022 &phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
1023 spin_unlock_irqrestore(
1024 &phba
->sli4_hba
.abts_sgl_list_lock
, iflag
);
1026 spin_lock_irqsave(&pring
->ring_lock
, iflag
);
1027 sglq
->state
= SGL_FREED
;
1029 list_add_tail(&sglq
->list
,
1030 &phba
->sli4_hba
.lpfc_sgl_list
);
1031 spin_unlock_irqrestore(&pring
->ring_lock
, iflag
);
1033 /* Check if TXQ queue needs to be serviced */
1034 if (!list_empty(&pring
->txq
))
1035 lpfc_worker_wake_up(phba
);
1041 * Clean all volatile data fields, preserve iotag and node struct.
1043 memset((char *)iocbq
+ start_clean
, 0, sizeof(*iocbq
) - start_clean
);
1044 iocbq
->sli4_lxritag
= NO_XRI
;
1045 iocbq
->sli4_xritag
= NO_XRI
;
1046 list_add_tail(&iocbq
->list
, &phba
->lpfc_iocb_list
);
1051 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1052 * @phba: Pointer to HBA context object.
1053 * @iocbq: Pointer to driver iocb object.
1055 * This function is called with hbalock held to release driver
1056 * iocb object to the iocb pool. The iotag in the iocb object
1057 * does not change for each use of the iocb object. This function
1058 * clears all other fields of the iocb object when it is freed.
1061 __lpfc_sli_release_iocbq_s3(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1063 size_t start_clean
= offsetof(struct lpfc_iocbq
, iocb
);
1067 * Clean all volatile data fields, preserve iotag and node struct.
1069 memset((char*)iocbq
+ start_clean
, 0, sizeof(*iocbq
) - start_clean
);
1070 iocbq
->sli4_xritag
= NO_XRI
;
1071 list_add_tail(&iocbq
->list
, &phba
->lpfc_iocb_list
);
1075 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1076 * @phba: Pointer to HBA context object.
1077 * @iocbq: Pointer to driver iocb object.
1079 * This function is called with hbalock held to release driver
1080 * iocb object to the iocb pool. The iotag in the iocb object
1081 * does not change for each use of the iocb object. This function
1082 * clears all other fields of the iocb object when it is freed.
1085 __lpfc_sli_release_iocbq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1087 phba
->__lpfc_sli_release_iocbq(phba
, iocbq
);
1092 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1093 * @phba: Pointer to HBA context object.
1094 * @iocbq: Pointer to driver iocb object.
1096 * This function is called with no lock held to release the iocb to
1100 lpfc_sli_release_iocbq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1102 unsigned long iflags
;
1105 * Clean all volatile data fields, preserve iotag and node struct.
1107 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1108 __lpfc_sli_release_iocbq(phba
, iocbq
);
1109 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1113 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1114 * @phba: Pointer to HBA context object.
1115 * @iocblist: List of IOCBs.
1116 * @ulpstatus: ULP status in IOCB command field.
1117 * @ulpWord4: ULP word-4 in IOCB command field.
1119 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1120 * on the list by invoking the complete callback function associated with the
1121 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1125 lpfc_sli_cancel_iocbs(struct lpfc_hba
*phba
, struct list_head
*iocblist
,
1126 uint32_t ulpstatus
, uint32_t ulpWord4
)
1128 struct lpfc_iocbq
*piocb
;
1130 while (!list_empty(iocblist
)) {
1131 list_remove_head(iocblist
, piocb
, struct lpfc_iocbq
, list
);
1132 if (!piocb
->iocb_cmpl
)
1133 lpfc_sli_release_iocbq(phba
, piocb
);
1135 piocb
->iocb
.ulpStatus
= ulpstatus
;
1136 piocb
->iocb
.un
.ulpWord
[4] = ulpWord4
;
1137 (piocb
->iocb_cmpl
) (phba
, piocb
, piocb
);
1144 * lpfc_sli_iocb_cmd_type - Get the iocb type
1145 * @iocb_cmnd: iocb command code.
1147 * This function is called by ring event handler function to get the iocb type.
1148 * This function translates the iocb command to an iocb command type used to
1149 * decide the final disposition of each completed IOCB.
1150 * The function returns
1151 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1152 * LPFC_SOL_IOCB if it is a solicited iocb completion
1153 * LPFC_ABORT_IOCB if it is an abort iocb
1154 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1156 * The caller is not required to hold any lock.
1158 static lpfc_iocb_type
1159 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd
)
1161 lpfc_iocb_type type
= LPFC_UNKNOWN_IOCB
;
1163 if (iocb_cmnd
> CMD_MAX_IOCB_CMD
)
1166 switch (iocb_cmnd
) {
1167 case CMD_XMIT_SEQUENCE_CR
:
1168 case CMD_XMIT_SEQUENCE_CX
:
1169 case CMD_XMIT_BCAST_CN
:
1170 case CMD_XMIT_BCAST_CX
:
1171 case CMD_ELS_REQUEST_CR
:
1172 case CMD_ELS_REQUEST_CX
:
1173 case CMD_CREATE_XRI_CR
:
1174 case CMD_CREATE_XRI_CX
:
1175 case CMD_GET_RPI_CN
:
1176 case CMD_XMIT_ELS_RSP_CX
:
1177 case CMD_GET_RPI_CR
:
1178 case CMD_FCP_IWRITE_CR
:
1179 case CMD_FCP_IWRITE_CX
:
1180 case CMD_FCP_IREAD_CR
:
1181 case CMD_FCP_IREAD_CX
:
1182 case CMD_FCP_ICMND_CR
:
1183 case CMD_FCP_ICMND_CX
:
1184 case CMD_FCP_TSEND_CX
:
1185 case CMD_FCP_TRSP_CX
:
1186 case CMD_FCP_TRECEIVE_CX
:
1187 case CMD_FCP_AUTO_TRSP_CX
:
1188 case CMD_ADAPTER_MSG
:
1189 case CMD_ADAPTER_DUMP
:
1190 case CMD_XMIT_SEQUENCE64_CR
:
1191 case CMD_XMIT_SEQUENCE64_CX
:
1192 case CMD_XMIT_BCAST64_CN
:
1193 case CMD_XMIT_BCAST64_CX
:
1194 case CMD_ELS_REQUEST64_CR
:
1195 case CMD_ELS_REQUEST64_CX
:
1196 case CMD_FCP_IWRITE64_CR
:
1197 case CMD_FCP_IWRITE64_CX
:
1198 case CMD_FCP_IREAD64_CR
:
1199 case CMD_FCP_IREAD64_CX
:
1200 case CMD_FCP_ICMND64_CR
:
1201 case CMD_FCP_ICMND64_CX
:
1202 case CMD_FCP_TSEND64_CX
:
1203 case CMD_FCP_TRSP64_CX
:
1204 case CMD_FCP_TRECEIVE64_CX
:
1205 case CMD_GEN_REQUEST64_CR
:
1206 case CMD_GEN_REQUEST64_CX
:
1207 case CMD_XMIT_ELS_RSP64_CX
:
1208 case DSSCMD_IWRITE64_CR
:
1209 case DSSCMD_IWRITE64_CX
:
1210 case DSSCMD_IREAD64_CR
:
1211 case DSSCMD_IREAD64_CX
:
1212 type
= LPFC_SOL_IOCB
;
1214 case CMD_ABORT_XRI_CN
:
1215 case CMD_ABORT_XRI_CX
:
1216 case CMD_CLOSE_XRI_CN
:
1217 case CMD_CLOSE_XRI_CX
:
1218 case CMD_XRI_ABORTED_CX
:
1219 case CMD_ABORT_MXRI64_CN
:
1220 case CMD_XMIT_BLS_RSP64_CX
:
1221 type
= LPFC_ABORT_IOCB
;
1223 case CMD_RCV_SEQUENCE_CX
:
1224 case CMD_RCV_ELS_REQ_CX
:
1225 case CMD_RCV_SEQUENCE64_CX
:
1226 case CMD_RCV_ELS_REQ64_CX
:
1227 case CMD_ASYNC_STATUS
:
1228 case CMD_IOCB_RCV_SEQ64_CX
:
1229 case CMD_IOCB_RCV_ELS64_CX
:
1230 case CMD_IOCB_RCV_CONT64_CX
:
1231 case CMD_IOCB_RET_XRI64_CX
:
1232 type
= LPFC_UNSOL_IOCB
;
1234 case CMD_IOCB_XMIT_MSEQ64_CR
:
1235 case CMD_IOCB_XMIT_MSEQ64_CX
:
1236 case CMD_IOCB_RCV_SEQ_LIST64_CX
:
1237 case CMD_IOCB_RCV_ELS_LIST64_CX
:
1238 case CMD_IOCB_CLOSE_EXTENDED_CN
:
1239 case CMD_IOCB_ABORT_EXTENDED_CN
:
1240 case CMD_IOCB_RET_HBQE64_CN
:
1241 case CMD_IOCB_FCP_IBIDIR64_CR
:
1242 case CMD_IOCB_FCP_IBIDIR64_CX
:
1243 case CMD_IOCB_FCP_ITASKMGT64_CX
:
1244 case CMD_IOCB_LOGENTRY_CN
:
1245 case CMD_IOCB_LOGENTRY_ASYNC_CN
:
1246 printk("%s - Unhandled SLI-3 Command x%x\n",
1247 __func__
, iocb_cmnd
);
1248 type
= LPFC_UNKNOWN_IOCB
;
1251 type
= LPFC_UNKNOWN_IOCB
;
1259 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1260 * @phba: Pointer to HBA context object.
1262 * This function is called from SLI initialization code
1263 * to configure every ring of the HBA's SLI interface. The
1264 * caller is not required to hold any lock. This function issues
1265 * a config_ring mailbox command for each ring.
1266 * This function returns zero if successful else returns a negative
1270 lpfc_sli_ring_map(struct lpfc_hba
*phba
)
1272 struct lpfc_sli
*psli
= &phba
->sli
;
1277 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1281 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
1282 for (i
= 0; i
< psli
->num_rings
; i
++) {
1283 lpfc_config_ring(phba
, i
, pmb
);
1284 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
1285 if (rc
!= MBX_SUCCESS
) {
1286 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1287 "0446 Adapter failed to init (%d), "
1288 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1290 rc
, pmbox
->mbxCommand
,
1291 pmbox
->mbxStatus
, i
);
1292 phba
->link_state
= LPFC_HBA_ERROR
;
1297 mempool_free(pmb
, phba
->mbox_mem_pool
);
1302 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1303 * @phba: Pointer to HBA context object.
1304 * @pring: Pointer to driver SLI ring object.
1305 * @piocb: Pointer to the driver iocb object.
1307 * This function is called with hbalock held. The function adds the
1308 * new iocb to txcmplq of the given ring. This function always returns
1309 * 0. If this function is called for ELS ring, this function checks if
1310 * there is a vport associated with the ELS command. This function also
1311 * starts els_tmofunc timer if this is an ELS command.
1314 lpfc_sli_ringtxcmpl_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1315 struct lpfc_iocbq
*piocb
)
1317 list_add_tail(&piocb
->list
, &pring
->txcmplq
);
1318 piocb
->iocb_flag
|= LPFC_IO_ON_TXCMPLQ
;
1320 if ((unlikely(pring
->ringno
== LPFC_ELS_RING
)) &&
1321 (piocb
->iocb
.ulpCommand
!= CMD_ABORT_XRI_CN
) &&
1322 (piocb
->iocb
.ulpCommand
!= CMD_CLOSE_XRI_CN
) &&
1323 (!(piocb
->vport
->load_flag
& FC_UNLOADING
))) {
1327 mod_timer(&piocb
->vport
->els_tmofunc
,
1329 msecs_to_jiffies(1000 * (phba
->fc_ratov
<< 1)));
1337 * lpfc_sli_ringtx_get - Get first element of the txq
1338 * @phba: Pointer to HBA context object.
1339 * @pring: Pointer to driver SLI ring object.
1341 * This function is called with hbalock held to get next
1342 * iocb in txq of the given ring. If there is any iocb in
1343 * the txq, the function returns first iocb in the list after
1344 * removing the iocb from the list, else it returns NULL.
1347 lpfc_sli_ringtx_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1349 struct lpfc_iocbq
*cmd_iocb
;
1351 list_remove_head((&pring
->txq
), cmd_iocb
, struct lpfc_iocbq
, list
);
1356 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1357 * @phba: Pointer to HBA context object.
1358 * @pring: Pointer to driver SLI ring object.
1360 * This function is called with hbalock held and the caller must post the
1361 * iocb without releasing the lock. If the caller releases the lock,
1362 * iocb slot returned by the function is not guaranteed to be available.
1363 * The function returns pointer to the next available iocb slot if there
1364 * is available slot in the ring, else it returns NULL.
1365 * If the get index of the ring is ahead of the put index, the function
1366 * will post an error attention event to the worker thread to take the
1367 * HBA to offline state.
1370 lpfc_sli_next_iocb_slot (struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1372 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
1373 uint32_t max_cmd_idx
= pring
->sli
.sli3
.numCiocb
;
1374 if ((pring
->sli
.sli3
.next_cmdidx
== pring
->sli
.sli3
.cmdidx
) &&
1375 (++pring
->sli
.sli3
.next_cmdidx
>= max_cmd_idx
))
1376 pring
->sli
.sli3
.next_cmdidx
= 0;
1378 if (unlikely(pring
->sli
.sli3
.local_getidx
==
1379 pring
->sli
.sli3
.next_cmdidx
)) {
1381 pring
->sli
.sli3
.local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
1383 if (unlikely(pring
->sli
.sli3
.local_getidx
>= max_cmd_idx
)) {
1384 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
1385 "0315 Ring %d issue: portCmdGet %d "
1386 "is bigger than cmd ring %d\n",
1388 pring
->sli
.sli3
.local_getidx
,
1391 phba
->link_state
= LPFC_HBA_ERROR
;
1393 * All error attention handlers are posted to
1396 phba
->work_ha
|= HA_ERATT
;
1397 phba
->work_hs
= HS_FFER3
;
1399 lpfc_worker_wake_up(phba
);
1404 if (pring
->sli
.sli3
.local_getidx
== pring
->sli
.sli3
.next_cmdidx
)
1408 return lpfc_cmd_iocb(phba
, pring
);
1412 * lpfc_sli_next_iotag - Get an iotag for the iocb
1413 * @phba: Pointer to HBA context object.
1414 * @iocbq: Pointer to driver iocb object.
1416 * This function gets an iotag for the iocb. If there is no unused iotag and
1417 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1418 * array and assigns a new iotag.
1419 * The function returns the allocated iotag if successful, else returns zero.
1420 * Zero is not a valid iotag.
1421 * The caller is not required to hold any lock.
1424 lpfc_sli_next_iotag(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1426 struct lpfc_iocbq
**new_arr
;
1427 struct lpfc_iocbq
**old_arr
;
1429 struct lpfc_sli
*psli
= &phba
->sli
;
1432 spin_lock_irq(&phba
->hbalock
);
1433 iotag
= psli
->last_iotag
;
1434 if(++iotag
< psli
->iocbq_lookup_len
) {
1435 psli
->last_iotag
= iotag
;
1436 psli
->iocbq_lookup
[iotag
] = iocbq
;
1437 spin_unlock_irq(&phba
->hbalock
);
1438 iocbq
->iotag
= iotag
;
1440 } else if (psli
->iocbq_lookup_len
< (0xffff
1441 - LPFC_IOCBQ_LOOKUP_INCREMENT
)) {
1442 new_len
= psli
->iocbq_lookup_len
+ LPFC_IOCBQ_LOOKUP_INCREMENT
;
1443 spin_unlock_irq(&phba
->hbalock
);
1444 new_arr
= kzalloc(new_len
* sizeof (struct lpfc_iocbq
*),
1447 spin_lock_irq(&phba
->hbalock
);
1448 old_arr
= psli
->iocbq_lookup
;
1449 if (new_len
<= psli
->iocbq_lookup_len
) {
1450 /* highly unprobable case */
1452 iotag
= psli
->last_iotag
;
1453 if(++iotag
< psli
->iocbq_lookup_len
) {
1454 psli
->last_iotag
= iotag
;
1455 psli
->iocbq_lookup
[iotag
] = iocbq
;
1456 spin_unlock_irq(&phba
->hbalock
);
1457 iocbq
->iotag
= iotag
;
1460 spin_unlock_irq(&phba
->hbalock
);
1463 if (psli
->iocbq_lookup
)
1464 memcpy(new_arr
, old_arr
,
1465 ((psli
->last_iotag
+ 1) *
1466 sizeof (struct lpfc_iocbq
*)));
1467 psli
->iocbq_lookup
= new_arr
;
1468 psli
->iocbq_lookup_len
= new_len
;
1469 psli
->last_iotag
= iotag
;
1470 psli
->iocbq_lookup
[iotag
] = iocbq
;
1471 spin_unlock_irq(&phba
->hbalock
);
1472 iocbq
->iotag
= iotag
;
1477 spin_unlock_irq(&phba
->hbalock
);
1479 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
1480 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1487 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1488 * @phba: Pointer to HBA context object.
1489 * @pring: Pointer to driver SLI ring object.
1490 * @iocb: Pointer to iocb slot in the ring.
1491 * @nextiocb: Pointer to driver iocb object which need to be
1492 * posted to firmware.
1494 * This function is called with hbalock held to post a new iocb to
1495 * the firmware. This function copies the new iocb to ring iocb slot and
1496 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1497 * a completion call back for this iocb else the function will free the
1501 lpfc_sli_submit_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1502 IOCB_t
*iocb
, struct lpfc_iocbq
*nextiocb
)
1507 nextiocb
->iocb
.ulpIoTag
= (nextiocb
->iocb_cmpl
) ? nextiocb
->iotag
: 0;
1510 if (pring
->ringno
== LPFC_ELS_RING
) {
1511 lpfc_debugfs_slow_ring_trc(phba
,
1512 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1513 *(((uint32_t *) &nextiocb
->iocb
) + 4),
1514 *(((uint32_t *) &nextiocb
->iocb
) + 6),
1515 *(((uint32_t *) &nextiocb
->iocb
) + 7));
1519 * Issue iocb command to adapter
1521 lpfc_sli_pcimem_bcopy(&nextiocb
->iocb
, iocb
, phba
->iocb_cmd_size
);
1523 pring
->stats
.iocb_cmd
++;
1526 * If there is no completion routine to call, we can release the
1527 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1528 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1530 if (nextiocb
->iocb_cmpl
)
1531 lpfc_sli_ringtxcmpl_put(phba
, pring
, nextiocb
);
1533 __lpfc_sli_release_iocbq(phba
, nextiocb
);
1536 * Let the HBA know what IOCB slot will be the next one the
1537 * driver will put a command into.
1539 pring
->sli
.sli3
.cmdidx
= pring
->sli
.sli3
.next_cmdidx
;
1540 writel(pring
->sli
.sli3
.cmdidx
, &phba
->host_gp
[pring
->ringno
].cmdPutInx
);
1544 * lpfc_sli_update_full_ring - Update the chip attention register
1545 * @phba: Pointer to HBA context object.
1546 * @pring: Pointer to driver SLI ring object.
1548 * The caller is not required to hold any lock for calling this function.
1549 * This function updates the chip attention bits for the ring to inform firmware
1550 * that there are pending work to be done for this ring and requests an
1551 * interrupt when there is space available in the ring. This function is
1552 * called when the driver is unable to post more iocbs to the ring due
1553 * to unavailability of space in the ring.
1556 lpfc_sli_update_full_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1558 int ringno
= pring
->ringno
;
1560 pring
->flag
|= LPFC_CALL_RING_AVAILABLE
;
1565 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1566 * The HBA will tell us when an IOCB entry is available.
1568 writel((CA_R0ATT
|CA_R0CE_REQ
) << (ringno
*4), phba
->CAregaddr
);
1569 readl(phba
->CAregaddr
); /* flush */
1571 pring
->stats
.iocb_cmd_full
++;
1575 * lpfc_sli_update_ring - Update chip attention register
1576 * @phba: Pointer to HBA context object.
1577 * @pring: Pointer to driver SLI ring object.
1579 * This function updates the chip attention register bit for the
1580 * given ring to inform HBA that there is more work to be done
1581 * in this ring. The caller is not required to hold any lock.
1584 lpfc_sli_update_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1586 int ringno
= pring
->ringno
;
1589 * Tell the HBA that there is work to do in this ring.
1591 if (!(phba
->sli3_options
& LPFC_SLI3_CRP_ENABLED
)) {
1593 writel(CA_R0ATT
<< (ringno
* 4), phba
->CAregaddr
);
1594 readl(phba
->CAregaddr
); /* flush */
1599 * lpfc_sli_resume_iocb - Process iocbs in the txq
1600 * @phba: Pointer to HBA context object.
1601 * @pring: Pointer to driver SLI ring object.
1603 * This function is called with hbalock held to post pending iocbs
1604 * in the txq to the firmware. This function is called when driver
1605 * detects space available in the ring.
1608 lpfc_sli_resume_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1611 struct lpfc_iocbq
*nextiocb
;
1615 * (a) there is anything on the txq to send
1617 * (c) link attention events can be processed (fcp ring only)
1618 * (d) IOCB processing is not blocked by the outstanding mbox command.
1621 if (lpfc_is_link_up(phba
) &&
1622 (!list_empty(&pring
->txq
)) &&
1623 (pring
->ringno
!= phba
->sli
.fcp_ring
||
1624 phba
->sli
.sli_flag
& LPFC_PROCESS_LA
)) {
1626 while ((iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)) &&
1627 (nextiocb
= lpfc_sli_ringtx_get(phba
, pring
)))
1628 lpfc_sli_submit_iocb(phba
, pring
, iocb
, nextiocb
);
1631 lpfc_sli_update_ring(phba
, pring
);
1633 lpfc_sli_update_full_ring(phba
, pring
);
1640 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1641 * @phba: Pointer to HBA context object.
1642 * @hbqno: HBQ number.
1644 * This function is called with hbalock held to get the next
1645 * available slot for the given HBQ. If there is free slot
1646 * available for the HBQ it will return pointer to the next available
1647 * HBQ entry else it will return NULL.
1649 static struct lpfc_hbq_entry
*
1650 lpfc_sli_next_hbq_slot(struct lpfc_hba
*phba
, uint32_t hbqno
)
1652 struct hbq_s
*hbqp
= &phba
->hbqs
[hbqno
];
1654 if (hbqp
->next_hbqPutIdx
== hbqp
->hbqPutIdx
&&
1655 ++hbqp
->next_hbqPutIdx
>= hbqp
->entry_count
)
1656 hbqp
->next_hbqPutIdx
= 0;
1658 if (unlikely(hbqp
->local_hbqGetIdx
== hbqp
->next_hbqPutIdx
)) {
1659 uint32_t raw_index
= phba
->hbq_get
[hbqno
];
1660 uint32_t getidx
= le32_to_cpu(raw_index
);
1662 hbqp
->local_hbqGetIdx
= getidx
;
1664 if (unlikely(hbqp
->local_hbqGetIdx
>= hbqp
->entry_count
)) {
1665 lpfc_printf_log(phba
, KERN_ERR
,
1666 LOG_SLI
| LOG_VPORT
,
1667 "1802 HBQ %d: local_hbqGetIdx "
1668 "%u is > than hbqp->entry_count %u\n",
1669 hbqno
, hbqp
->local_hbqGetIdx
,
1672 phba
->link_state
= LPFC_HBA_ERROR
;
1676 if (hbqp
->local_hbqGetIdx
== hbqp
->next_hbqPutIdx
)
1680 return (struct lpfc_hbq_entry
*) phba
->hbqs
[hbqno
].hbq_virt
+
1685 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1686 * @phba: Pointer to HBA context object.
1688 * This function is called with no lock held to free all the
1689 * hbq buffers while uninitializing the SLI interface. It also
1690 * frees the HBQ buffers returned by the firmware but not yet
1691 * processed by the upper layers.
1694 lpfc_sli_hbqbuf_free_all(struct lpfc_hba
*phba
)
1696 struct lpfc_dmabuf
*dmabuf
, *next_dmabuf
;
1697 struct hbq_dmabuf
*hbq_buf
;
1698 unsigned long flags
;
1702 hbq_count
= lpfc_sli_hbq_count();
1703 /* Return all memory used by all HBQs */
1704 spin_lock_irqsave(&phba
->hbalock
, flags
);
1705 for (i
= 0; i
< hbq_count
; ++i
) {
1706 list_for_each_entry_safe(dmabuf
, next_dmabuf
,
1707 &phba
->hbqs
[i
].hbq_buffer_list
, list
) {
1708 hbq_buf
= container_of(dmabuf
, struct hbq_dmabuf
, dbuf
);
1709 list_del(&hbq_buf
->dbuf
.list
);
1710 (phba
->hbqs
[i
].hbq_free_buffer
)(phba
, hbq_buf
);
1712 phba
->hbqs
[i
].buffer_count
= 0;
1714 /* Return all HBQ buffer that are in-fly */
1715 list_for_each_entry_safe(dmabuf
, next_dmabuf
, &phba
->rb_pend_list
,
1717 hbq_buf
= container_of(dmabuf
, struct hbq_dmabuf
, dbuf
);
1718 list_del(&hbq_buf
->dbuf
.list
);
1719 if (hbq_buf
->tag
== -1) {
1720 (phba
->hbqs
[LPFC_ELS_HBQ
].hbq_free_buffer
)
1723 hbqno
= hbq_buf
->tag
>> 16;
1724 if (hbqno
>= LPFC_MAX_HBQS
)
1725 (phba
->hbqs
[LPFC_ELS_HBQ
].hbq_free_buffer
)
1728 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
,
1733 /* Mark the HBQs not in use */
1734 phba
->hbq_in_use
= 0;
1735 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1739 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1740 * @phba: Pointer to HBA context object.
1741 * @hbqno: HBQ number.
1742 * @hbq_buf: Pointer to HBQ buffer.
1744 * This function is called with the hbalock held to post a
1745 * hbq buffer to the firmware. If the function finds an empty
1746 * slot in the HBQ, it will post the buffer. The function will return
1747 * pointer to the hbq entry if it successfully post the buffer
1748 * else it will return NULL.
1751 lpfc_sli_hbq_to_firmware(struct lpfc_hba
*phba
, uint32_t hbqno
,
1752 struct hbq_dmabuf
*hbq_buf
)
1754 return phba
->lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buf
);
1758 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1759 * @phba: Pointer to HBA context object.
1760 * @hbqno: HBQ number.
1761 * @hbq_buf: Pointer to HBQ buffer.
1763 * This function is called with the hbalock held to post a hbq buffer to the
1764 * firmware. If the function finds an empty slot in the HBQ, it will post the
1765 * buffer and place it on the hbq_buffer_list. The function will return zero if
1766 * it successfully post the buffer else it will return an error.
1769 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba
*phba
, uint32_t hbqno
,
1770 struct hbq_dmabuf
*hbq_buf
)
1772 struct lpfc_hbq_entry
*hbqe
;
1773 dma_addr_t physaddr
= hbq_buf
->dbuf
.phys
;
1775 /* Get next HBQ entry slot to use */
1776 hbqe
= lpfc_sli_next_hbq_slot(phba
, hbqno
);
1778 struct hbq_s
*hbqp
= &phba
->hbqs
[hbqno
];
1780 hbqe
->bde
.addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
1781 hbqe
->bde
.addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
1782 hbqe
->bde
.tus
.f
.bdeSize
= hbq_buf
->size
;
1783 hbqe
->bde
.tus
.f
.bdeFlags
= 0;
1784 hbqe
->bde
.tus
.w
= le32_to_cpu(hbqe
->bde
.tus
.w
);
1785 hbqe
->buffer_tag
= le32_to_cpu(hbq_buf
->tag
);
1787 hbqp
->hbqPutIdx
= hbqp
->next_hbqPutIdx
;
1788 writel(hbqp
->hbqPutIdx
, phba
->hbq_put
+ hbqno
);
1790 readl(phba
->hbq_put
+ hbqno
);
1791 list_add_tail(&hbq_buf
->dbuf
.list
, &hbqp
->hbq_buffer_list
);
1798 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1799 * @phba: Pointer to HBA context object.
1800 * @hbqno: HBQ number.
1801 * @hbq_buf: Pointer to HBQ buffer.
1803 * This function is called with the hbalock held to post an RQE to the SLI4
1804 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1805 * the hbq_buffer_list and return zero, otherwise it will return an error.
1808 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba
*phba
, uint32_t hbqno
,
1809 struct hbq_dmabuf
*hbq_buf
)
1812 struct lpfc_rqe hrqe
;
1813 struct lpfc_rqe drqe
;
1815 hrqe
.address_lo
= putPaddrLow(hbq_buf
->hbuf
.phys
);
1816 hrqe
.address_hi
= putPaddrHigh(hbq_buf
->hbuf
.phys
);
1817 drqe
.address_lo
= putPaddrLow(hbq_buf
->dbuf
.phys
);
1818 drqe
.address_hi
= putPaddrHigh(hbq_buf
->dbuf
.phys
);
1819 rc
= lpfc_sli4_rq_put(phba
->sli4_hba
.hdr_rq
, phba
->sli4_hba
.dat_rq
,
1824 list_add_tail(&hbq_buf
->dbuf
.list
, &phba
->hbqs
[hbqno
].hbq_buffer_list
);
1828 /* HBQ for ELS and CT traffic. */
1829 static struct lpfc_hbq_init lpfc_els_hbq
= {
1834 .ring_mask
= (1 << LPFC_ELS_RING
),
1840 /* HBQ for the extra ring if needed */
1841 static struct lpfc_hbq_init lpfc_extra_hbq
= {
1846 .ring_mask
= (1 << LPFC_EXTRA_RING
),
1853 struct lpfc_hbq_init
*lpfc_hbq_defs
[] = {
1859 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1860 * @phba: Pointer to HBA context object.
1861 * @hbqno: HBQ number.
1862 * @count: Number of HBQ buffers to be posted.
1864 * This function is called with no lock held to post more hbq buffers to the
1865 * given HBQ. The function returns the number of HBQ buffers successfully
1869 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba
*phba
, uint32_t hbqno
, uint32_t count
)
1871 uint32_t i
, posted
= 0;
1872 unsigned long flags
;
1873 struct hbq_dmabuf
*hbq_buffer
;
1874 LIST_HEAD(hbq_buf_list
);
1875 if (!phba
->hbqs
[hbqno
].hbq_alloc_buffer
)
1878 if ((phba
->hbqs
[hbqno
].buffer_count
+ count
) >
1879 lpfc_hbq_defs
[hbqno
]->entry_count
)
1880 count
= lpfc_hbq_defs
[hbqno
]->entry_count
-
1881 phba
->hbqs
[hbqno
].buffer_count
;
1884 /* Allocate HBQ entries */
1885 for (i
= 0; i
< count
; i
++) {
1886 hbq_buffer
= (phba
->hbqs
[hbqno
].hbq_alloc_buffer
)(phba
);
1889 list_add_tail(&hbq_buffer
->dbuf
.list
, &hbq_buf_list
);
1891 /* Check whether HBQ is still in use */
1892 spin_lock_irqsave(&phba
->hbalock
, flags
);
1893 if (!phba
->hbq_in_use
)
1895 while (!list_empty(&hbq_buf_list
)) {
1896 list_remove_head(&hbq_buf_list
, hbq_buffer
, struct hbq_dmabuf
,
1898 hbq_buffer
->tag
= (phba
->hbqs
[hbqno
].buffer_count
|
1900 if (!lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buffer
)) {
1901 phba
->hbqs
[hbqno
].buffer_count
++;
1904 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
1906 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1909 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1910 while (!list_empty(&hbq_buf_list
)) {
1911 list_remove_head(&hbq_buf_list
, hbq_buffer
, struct hbq_dmabuf
,
1913 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
1919 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1920 * @phba: Pointer to HBA context object.
1923 * This function posts more buffers to the HBQ. This function
1924 * is called with no lock held. The function returns the number of HBQ entries
1925 * successfully allocated.
1928 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba
*phba
, uint32_t qno
)
1930 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1933 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
1934 lpfc_hbq_defs
[qno
]->add_count
);
1938 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1939 * @phba: Pointer to HBA context object.
1940 * @qno: HBQ queue number.
1942 * This function is called from SLI initialization code path with
1943 * no lock held to post initial HBQ buffers to firmware. The
1944 * function returns the number of HBQ entries successfully allocated.
1947 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba
*phba
, uint32_t qno
)
1949 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1950 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
1951 lpfc_hbq_defs
[qno
]->entry_count
);
1953 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
1954 lpfc_hbq_defs
[qno
]->init_count
);
1958 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1959 * @phba: Pointer to HBA context object.
1960 * @hbqno: HBQ number.
1962 * This function removes the first hbq buffer on an hbq list and returns a
1963 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1965 static struct hbq_dmabuf
*
1966 lpfc_sli_hbqbuf_get(struct list_head
*rb_list
)
1968 struct lpfc_dmabuf
*d_buf
;
1970 list_remove_head(rb_list
, d_buf
, struct lpfc_dmabuf
, list
);
1973 return container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
1977 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
1978 * @phba: Pointer to HBA context object.
1979 * @tag: Tag of the hbq buffer.
1981 * This function is called with hbalock held. This function searches
1982 * for the hbq buffer associated with the given tag in the hbq buffer
1983 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1986 static struct hbq_dmabuf
*
1987 lpfc_sli_hbqbuf_find(struct lpfc_hba
*phba
, uint32_t tag
)
1989 struct lpfc_dmabuf
*d_buf
;
1990 struct hbq_dmabuf
*hbq_buf
;
1994 if (hbqno
>= LPFC_MAX_HBQS
)
1997 spin_lock_irq(&phba
->hbalock
);
1998 list_for_each_entry(d_buf
, &phba
->hbqs
[hbqno
].hbq_buffer_list
, list
) {
1999 hbq_buf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
2000 if (hbq_buf
->tag
== tag
) {
2001 spin_unlock_irq(&phba
->hbalock
);
2005 spin_unlock_irq(&phba
->hbalock
);
2006 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
| LOG_VPORT
,
2007 "1803 Bad hbq tag. Data: x%x x%x\n",
2008 tag
, phba
->hbqs
[tag
>> 16].buffer_count
);
2013 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2014 * @phba: Pointer to HBA context object.
2015 * @hbq_buffer: Pointer to HBQ buffer.
2017 * This function is called with hbalock. This function gives back
2018 * the hbq buffer to firmware. If the HBQ does not have space to
2019 * post the buffer, it will free the buffer.
2022 lpfc_sli_free_hbq(struct lpfc_hba
*phba
, struct hbq_dmabuf
*hbq_buffer
)
2027 hbqno
= hbq_buffer
->tag
>> 16;
2028 if (lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buffer
))
2029 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
2034 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2035 * @mbxCommand: mailbox command code.
2037 * This function is called by the mailbox event handler function to verify
2038 * that the completed mailbox command is a legitimate mailbox command. If the
2039 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2040 * and the mailbox event handler will take the HBA offline.
2043 lpfc_sli_chk_mbx_command(uint8_t mbxCommand
)
2047 switch (mbxCommand
) {
2051 case MBX_WRITE_VPARMS
:
2052 case MBX_RUN_BIU_DIAG
:
2055 case MBX_CONFIG_LINK
:
2056 case MBX_CONFIG_RING
:
2057 case MBX_RESET_RING
:
2058 case MBX_READ_CONFIG
:
2059 case MBX_READ_RCONFIG
:
2060 case MBX_READ_SPARM
:
2061 case MBX_READ_STATUS
:
2065 case MBX_READ_LNK_STAT
:
2067 case MBX_UNREG_LOGIN
:
2069 case MBX_DUMP_MEMORY
:
2070 case MBX_DUMP_CONTEXT
:
2073 case MBX_UPDATE_CFG
:
2075 case MBX_DEL_LD_ENTRY
:
2076 case MBX_RUN_PROGRAM
:
2078 case MBX_SET_VARIABLE
:
2079 case MBX_UNREG_D_ID
:
2080 case MBX_KILL_BOARD
:
2081 case MBX_CONFIG_FARP
:
2084 case MBX_RUN_BIU_DIAG64
:
2085 case MBX_CONFIG_PORT
:
2086 case MBX_READ_SPARM64
:
2087 case MBX_READ_RPI64
:
2088 case MBX_REG_LOGIN64
:
2089 case MBX_READ_TOPOLOGY
:
2092 case MBX_LOAD_EXP_ROM
:
2093 case MBX_ASYNCEVT_ENABLE
:
2097 case MBX_PORT_CAPABILITIES
:
2098 case MBX_PORT_IOV_CONTROL
:
2099 case MBX_SLI4_CONFIG
:
2100 case MBX_SLI4_REQ_FTRS
:
2102 case MBX_UNREG_FCFI
:
2107 case MBX_RESUME_RPI
:
2108 case MBX_READ_EVENT_LOG_STATUS
:
2109 case MBX_READ_EVENT_LOG
:
2110 case MBX_SECURITY_MGMT
:
2112 case MBX_ACCESS_VDATA
:
2123 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2124 * @phba: Pointer to HBA context object.
2125 * @pmboxq: Pointer to mailbox command.
2127 * This is completion handler function for mailbox commands issued from
2128 * lpfc_sli_issue_mbox_wait function. This function is called by the
2129 * mailbox event handler function with no lock held. This function
2130 * will wake up thread waiting on the wait queue pointed by context1
2134 lpfc_sli_wake_mbox_wait(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
2136 wait_queue_head_t
*pdone_q
;
2137 unsigned long drvr_flag
;
2140 * If pdone_q is empty, the driver thread gave up waiting and
2141 * continued running.
2143 pmboxq
->mbox_flag
|= LPFC_MBX_WAKE
;
2144 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
2145 pdone_q
= (wait_queue_head_t
*) pmboxq
->context1
;
2147 wake_up_interruptible(pdone_q
);
2148 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
2154 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2155 * @phba: Pointer to HBA context object.
2156 * @pmb: Pointer to mailbox object.
2158 * This function is the default mailbox completion handler. It
2159 * frees the memory resources associated with the completed mailbox
2160 * command. If the completed command is a REG_LOGIN mailbox command,
2161 * this function will issue a UREG_LOGIN to re-claim the RPI.
2164 lpfc_sli_def_mbox_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2166 struct lpfc_vport
*vport
= pmb
->vport
;
2167 struct lpfc_dmabuf
*mp
;
2168 struct lpfc_nodelist
*ndlp
;
2169 struct Scsi_Host
*shost
;
2173 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2176 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2181 * If a REG_LOGIN succeeded after node is destroyed or node
2182 * is in re-discovery driver need to cleanup the RPI.
2184 if (!(phba
->pport
->load_flag
& FC_UNLOADING
) &&
2185 pmb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
&&
2186 !pmb
->u
.mb
.mbxStatus
) {
2187 rpi
= pmb
->u
.mb
.un
.varWords
[0];
2188 vpi
= pmb
->u
.mb
.un
.varRegLogin
.vpi
;
2189 lpfc_unreg_login(phba
, vpi
, rpi
, pmb
);
2190 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
2191 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
2192 if (rc
!= MBX_NOT_FINISHED
)
2196 if ((pmb
->u
.mb
.mbxCommand
== MBX_REG_VPI
) &&
2197 !(phba
->pport
->load_flag
& FC_UNLOADING
) &&
2198 !pmb
->u
.mb
.mbxStatus
) {
2199 shost
= lpfc_shost_from_vport(vport
);
2200 spin_lock_irq(shost
->host_lock
);
2201 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2202 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2203 spin_unlock_irq(shost
->host_lock
);
2206 if (pmb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
2207 ndlp
= (struct lpfc_nodelist
*)pmb
->context2
;
2209 pmb
->context2
= NULL
;
2212 /* Check security permission status on INIT_LINK mailbox command */
2213 if ((pmb
->u
.mb
.mbxCommand
== MBX_INIT_LINK
) &&
2214 (pmb
->u
.mb
.mbxStatus
== MBXERR_SEC_NO_PERMISSION
))
2215 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
2216 "2860 SLI authentication is required "
2217 "for INIT_LINK but has not done yet\n");
2219 if (bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
) == MBX_SLI4_CONFIG
)
2220 lpfc_sli4_mbox_cmd_free(phba
, pmb
);
2222 mempool_free(pmb
, phba
->mbox_mem_pool
);
2225 * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2226 * @phba: Pointer to HBA context object.
2227 * @pmb: Pointer to mailbox object.
2229 * This function is the unreg rpi mailbox completion handler. It
2230 * frees the memory resources associated with the completed mailbox
2231 * command. An additional refrenece is put on the ndlp to prevent
2232 * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2233 * the unreg mailbox command completes, this routine puts the
2238 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2240 struct lpfc_vport
*vport
= pmb
->vport
;
2241 struct lpfc_nodelist
*ndlp
;
2243 ndlp
= pmb
->context1
;
2244 if (pmb
->u
.mb
.mbxCommand
== MBX_UNREG_LOGIN
) {
2245 if (phba
->sli_rev
== LPFC_SLI_REV4
&&
2246 (bf_get(lpfc_sli_intf_if_type
,
2247 &phba
->sli4_hba
.sli_intf
) ==
2248 LPFC_SLI_INTF_IF_TYPE_2
)) {
2250 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
2251 "0010 UNREG_LOGIN vpi:%x "
2252 "rpi:%x DID:%x map:%x %p\n",
2253 vport
->vpi
, ndlp
->nlp_rpi
,
2255 ndlp
->nlp_usg_map
, ndlp
);
2256 ndlp
->nlp_flag
&= ~NLP_LOGO_ACC
;
2262 mempool_free(pmb
, phba
->mbox_mem_pool
);
2266 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2267 * @phba: Pointer to HBA context object.
2269 * This function is called with no lock held. This function processes all
2270 * the completed mailbox commands and gives it to upper layers. The interrupt
2271 * service routine processes mailbox completion interrupt and adds completed
2272 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2273 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2274 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2275 * function returns the mailbox commands to the upper layer by calling the
2276 * completion handler function of each mailbox.
2279 lpfc_sli_handle_mb_event(struct lpfc_hba
*phba
)
2286 phba
->sli
.slistat
.mbox_event
++;
2288 /* Get all completed mailboxe buffers into the cmplq */
2289 spin_lock_irq(&phba
->hbalock
);
2290 list_splice_init(&phba
->sli
.mboxq_cmpl
, &cmplq
);
2291 spin_unlock_irq(&phba
->hbalock
);
2293 /* Get a Mailbox buffer to setup mailbox commands for callback */
2295 list_remove_head(&cmplq
, pmb
, LPFC_MBOXQ_t
, list
);
2301 if (pmbox
->mbxCommand
!= MBX_HEARTBEAT
) {
2303 lpfc_debugfs_disc_trc(pmb
->vport
,
2304 LPFC_DISC_TRC_MBOX_VPORT
,
2305 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2306 (uint32_t)pmbox
->mbxCommand
,
2307 pmbox
->un
.varWords
[0],
2308 pmbox
->un
.varWords
[1]);
2311 lpfc_debugfs_disc_trc(phba
->pport
,
2313 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2314 (uint32_t)pmbox
->mbxCommand
,
2315 pmbox
->un
.varWords
[0],
2316 pmbox
->un
.varWords
[1]);
2321 * It is a fatal error if unknown mbox command completion.
2323 if (lpfc_sli_chk_mbx_command(pmbox
->mbxCommand
) ==
2325 /* Unknown mailbox command compl */
2326 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
2327 "(%d):0323 Unknown Mailbox command "
2328 "x%x (x%x/x%x) Cmpl\n",
2329 pmb
->vport
? pmb
->vport
->vpi
: 0,
2331 lpfc_sli_config_mbox_subsys_get(phba
,
2333 lpfc_sli_config_mbox_opcode_get(phba
,
2335 phba
->link_state
= LPFC_HBA_ERROR
;
2336 phba
->work_hs
= HS_FFER3
;
2337 lpfc_handle_eratt(phba
);
2341 if (pmbox
->mbxStatus
) {
2342 phba
->sli
.slistat
.mbox_stat_err
++;
2343 if (pmbox
->mbxStatus
== MBXERR_NO_RESOURCES
) {
2344 /* Mbox cmd cmpl error - RETRYing */
2345 lpfc_printf_log(phba
, KERN_INFO
,
2347 "(%d):0305 Mbox cmd cmpl "
2348 "error - RETRYing Data: x%x "
2349 "(x%x/x%x) x%x x%x x%x\n",
2350 pmb
->vport
? pmb
->vport
->vpi
: 0,
2352 lpfc_sli_config_mbox_subsys_get(phba
,
2354 lpfc_sli_config_mbox_opcode_get(phba
,
2357 pmbox
->un
.varWords
[0],
2358 pmb
->vport
->port_state
);
2359 pmbox
->mbxStatus
= 0;
2360 pmbox
->mbxOwner
= OWN_HOST
;
2361 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
2362 if (rc
!= MBX_NOT_FINISHED
)
2367 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2368 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
2369 "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p "
2370 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
2372 pmb
->vport
? pmb
->vport
->vpi
: 0,
2374 lpfc_sli_config_mbox_subsys_get(phba
, pmb
),
2375 lpfc_sli_config_mbox_opcode_get(phba
, pmb
),
2377 *((uint32_t *) pmbox
),
2378 pmbox
->un
.varWords
[0],
2379 pmbox
->un
.varWords
[1],
2380 pmbox
->un
.varWords
[2],
2381 pmbox
->un
.varWords
[3],
2382 pmbox
->un
.varWords
[4],
2383 pmbox
->un
.varWords
[5],
2384 pmbox
->un
.varWords
[6],
2385 pmbox
->un
.varWords
[7],
2386 pmbox
->un
.varWords
[8],
2387 pmbox
->un
.varWords
[9],
2388 pmbox
->un
.varWords
[10]);
2391 pmb
->mbox_cmpl(phba
,pmb
);
2397 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2398 * @phba: Pointer to HBA context object.
2399 * @pring: Pointer to driver SLI ring object.
2402 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2403 * is set in the tag the buffer is posted for a particular exchange,
2404 * the function will return the buffer without replacing the buffer.
2405 * If the buffer is for unsolicited ELS or CT traffic, this function
2406 * returns the buffer and also posts another buffer to the firmware.
2408 static struct lpfc_dmabuf
*
2409 lpfc_sli_get_buff(struct lpfc_hba
*phba
,
2410 struct lpfc_sli_ring
*pring
,
2413 struct hbq_dmabuf
*hbq_entry
;
2415 if (tag
& QUE_BUFTAG_BIT
)
2416 return lpfc_sli_ring_taggedbuf_get(phba
, pring
, tag
);
2417 hbq_entry
= lpfc_sli_hbqbuf_find(phba
, tag
);
2420 return &hbq_entry
->dbuf
;
2424 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2425 * @phba: Pointer to HBA context object.
2426 * @pring: Pointer to driver SLI ring object.
2427 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2428 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2429 * @fch_type: the type for the first frame of the sequence.
2431 * This function is called with no lock held. This function uses the r_ctl and
2432 * type of the received sequence to find the correct callback function to call
2433 * to process the sequence.
2436 lpfc_complete_unsol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2437 struct lpfc_iocbq
*saveq
, uint32_t fch_r_ctl
,
2442 /* unSolicited Responses */
2443 if (pring
->prt
[0].profile
) {
2444 if (pring
->prt
[0].lpfc_sli_rcv_unsol_event
)
2445 (pring
->prt
[0].lpfc_sli_rcv_unsol_event
) (phba
, pring
,
2449 /* We must search, based on rctl / type
2450 for the right routine */
2451 for (i
= 0; i
< pring
->num_mask
; i
++) {
2452 if ((pring
->prt
[i
].rctl
== fch_r_ctl
) &&
2453 (pring
->prt
[i
].type
== fch_type
)) {
2454 if (pring
->prt
[i
].lpfc_sli_rcv_unsol_event
)
2455 (pring
->prt
[i
].lpfc_sli_rcv_unsol_event
)
2456 (phba
, pring
, saveq
);
2464 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2465 * @phba: Pointer to HBA context object.
2466 * @pring: Pointer to driver SLI ring object.
2467 * @saveq: Pointer to the unsolicited iocb.
2469 * This function is called with no lock held by the ring event handler
2470 * when there is an unsolicited iocb posted to the response ring by the
2471 * firmware. This function gets the buffer associated with the iocbs
2472 * and calls the event handler for the ring. This function handles both
2473 * qring buffers and hbq buffers.
2474 * When the function returns 1 the caller can free the iocb object otherwise
2475 * upper layer functions will free the iocb objects.
2478 lpfc_sli_process_unsol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2479 struct lpfc_iocbq
*saveq
)
2483 uint32_t Rctl
, Type
;
2484 struct lpfc_iocbq
*iocbq
;
2485 struct lpfc_dmabuf
*dmzbuf
;
2487 irsp
= &(saveq
->iocb
);
2489 if (irsp
->ulpCommand
== CMD_ASYNC_STATUS
) {
2490 if (pring
->lpfc_sli_rcv_async_status
)
2491 pring
->lpfc_sli_rcv_async_status(phba
, pring
, saveq
);
2493 lpfc_printf_log(phba
,
2496 "0316 Ring %d handler: unexpected "
2497 "ASYNC_STATUS iocb received evt_code "
2500 irsp
->un
.asyncstat
.evt_code
);
2504 if ((irsp
->ulpCommand
== CMD_IOCB_RET_XRI64_CX
) &&
2505 (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)) {
2506 if (irsp
->ulpBdeCount
> 0) {
2507 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
2508 irsp
->un
.ulpWord
[3]);
2509 lpfc_in_buf_free(phba
, dmzbuf
);
2512 if (irsp
->ulpBdeCount
> 1) {
2513 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
2514 irsp
->unsli3
.sli3Words
[3]);
2515 lpfc_in_buf_free(phba
, dmzbuf
);
2518 if (irsp
->ulpBdeCount
> 2) {
2519 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
2520 irsp
->unsli3
.sli3Words
[7]);
2521 lpfc_in_buf_free(phba
, dmzbuf
);
2527 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
2528 if (irsp
->ulpBdeCount
!= 0) {
2529 saveq
->context2
= lpfc_sli_get_buff(phba
, pring
,
2530 irsp
->un
.ulpWord
[3]);
2531 if (!saveq
->context2
)
2532 lpfc_printf_log(phba
,
2535 "0341 Ring %d Cannot find buffer for "
2536 "an unsolicited iocb. tag 0x%x\n",
2538 irsp
->un
.ulpWord
[3]);
2540 if (irsp
->ulpBdeCount
== 2) {
2541 saveq
->context3
= lpfc_sli_get_buff(phba
, pring
,
2542 irsp
->unsli3
.sli3Words
[7]);
2543 if (!saveq
->context3
)
2544 lpfc_printf_log(phba
,
2547 "0342 Ring %d Cannot find buffer for an"
2548 " unsolicited iocb. tag 0x%x\n",
2550 irsp
->unsli3
.sli3Words
[7]);
2552 list_for_each_entry(iocbq
, &saveq
->list
, list
) {
2553 irsp
= &(iocbq
->iocb
);
2554 if (irsp
->ulpBdeCount
!= 0) {
2555 iocbq
->context2
= lpfc_sli_get_buff(phba
, pring
,
2556 irsp
->un
.ulpWord
[3]);
2557 if (!iocbq
->context2
)
2558 lpfc_printf_log(phba
,
2561 "0343 Ring %d Cannot find "
2562 "buffer for an unsolicited iocb"
2563 ". tag 0x%x\n", pring
->ringno
,
2564 irsp
->un
.ulpWord
[3]);
2566 if (irsp
->ulpBdeCount
== 2) {
2567 iocbq
->context3
= lpfc_sli_get_buff(phba
, pring
,
2568 irsp
->unsli3
.sli3Words
[7]);
2569 if (!iocbq
->context3
)
2570 lpfc_printf_log(phba
,
2573 "0344 Ring %d Cannot find "
2574 "buffer for an unsolicited "
2577 irsp
->unsli3
.sli3Words
[7]);
2581 if (irsp
->ulpBdeCount
!= 0 &&
2582 (irsp
->ulpCommand
== CMD_IOCB_RCV_CONT64_CX
||
2583 irsp
->ulpStatus
== IOSTAT_INTERMED_RSP
)) {
2586 /* search continue save q for same XRI */
2587 list_for_each_entry(iocbq
, &pring
->iocb_continue_saveq
, clist
) {
2588 if (iocbq
->iocb
.unsli3
.rcvsli3
.ox_id
==
2589 saveq
->iocb
.unsli3
.rcvsli3
.ox_id
) {
2590 list_add_tail(&saveq
->list
, &iocbq
->list
);
2596 list_add_tail(&saveq
->clist
,
2597 &pring
->iocb_continue_saveq
);
2598 if (saveq
->iocb
.ulpStatus
!= IOSTAT_INTERMED_RSP
) {
2599 list_del_init(&iocbq
->clist
);
2601 irsp
= &(saveq
->iocb
);
2605 if ((irsp
->ulpCommand
== CMD_RCV_ELS_REQ64_CX
) ||
2606 (irsp
->ulpCommand
== CMD_RCV_ELS_REQ_CX
) ||
2607 (irsp
->ulpCommand
== CMD_IOCB_RCV_ELS64_CX
)) {
2608 Rctl
= FC_RCTL_ELS_REQ
;
2611 w5p
= (WORD5
*)&(saveq
->iocb
.un
.ulpWord
[5]);
2612 Rctl
= w5p
->hcsw
.Rctl
;
2613 Type
= w5p
->hcsw
.Type
;
2615 /* Firmware Workaround */
2616 if ((Rctl
== 0) && (pring
->ringno
== LPFC_ELS_RING
) &&
2617 (irsp
->ulpCommand
== CMD_RCV_SEQUENCE64_CX
||
2618 irsp
->ulpCommand
== CMD_IOCB_RCV_SEQ64_CX
)) {
2619 Rctl
= FC_RCTL_ELS_REQ
;
2621 w5p
->hcsw
.Rctl
= Rctl
;
2622 w5p
->hcsw
.Type
= Type
;
2626 if (!lpfc_complete_unsol_iocb(phba
, pring
, saveq
, Rctl
, Type
))
2627 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
2628 "0313 Ring %d handler: unexpected Rctl x%x "
2629 "Type x%x received\n",
2630 pring
->ringno
, Rctl
, Type
);
2636 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2637 * @phba: Pointer to HBA context object.
2638 * @pring: Pointer to driver SLI ring object.
2639 * @prspiocb: Pointer to response iocb object.
2641 * This function looks up the iocb_lookup table to get the command iocb
2642 * corresponding to the given response iocb using the iotag of the
2643 * response iocb. This function is called with the hbalock held.
2644 * This function returns the command iocb object if it finds the command
2645 * iocb else returns NULL.
2647 static struct lpfc_iocbq
*
2648 lpfc_sli_iocbq_lookup(struct lpfc_hba
*phba
,
2649 struct lpfc_sli_ring
*pring
,
2650 struct lpfc_iocbq
*prspiocb
)
2652 struct lpfc_iocbq
*cmd_iocb
= NULL
;
2655 iotag
= prspiocb
->iocb
.ulpIoTag
;
2657 if (iotag
!= 0 && iotag
<= phba
->sli
.last_iotag
) {
2658 cmd_iocb
= phba
->sli
.iocbq_lookup
[iotag
];
2659 list_del_init(&cmd_iocb
->list
);
2660 if (cmd_iocb
->iocb_flag
& LPFC_IO_ON_TXCMPLQ
) {
2661 cmd_iocb
->iocb_flag
&= ~LPFC_IO_ON_TXCMPLQ
;
2666 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2667 "0317 iotag x%x is out off "
2668 "range: max iotag x%x wd0 x%x\n",
2669 iotag
, phba
->sli
.last_iotag
,
2670 *(((uint32_t *) &prspiocb
->iocb
) + 7));
2675 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2676 * @phba: Pointer to HBA context object.
2677 * @pring: Pointer to driver SLI ring object.
2680 * This function looks up the iocb_lookup table to get the command iocb
2681 * corresponding to the given iotag. This function is called with the
2683 * This function returns the command iocb object if it finds the command
2684 * iocb else returns NULL.
2686 static struct lpfc_iocbq
*
2687 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba
*phba
,
2688 struct lpfc_sli_ring
*pring
, uint16_t iotag
)
2690 struct lpfc_iocbq
*cmd_iocb
;
2692 if (iotag
!= 0 && iotag
<= phba
->sli
.last_iotag
) {
2693 cmd_iocb
= phba
->sli
.iocbq_lookup
[iotag
];
2694 if (cmd_iocb
->iocb_flag
& LPFC_IO_ON_TXCMPLQ
) {
2695 /* remove from txcmpl queue list */
2696 list_del_init(&cmd_iocb
->list
);
2697 cmd_iocb
->iocb_flag
&= ~LPFC_IO_ON_TXCMPLQ
;
2701 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2702 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2703 iotag
, phba
->sli
.last_iotag
);
2708 * lpfc_sli_process_sol_iocb - process solicited iocb completion
2709 * @phba: Pointer to HBA context object.
2710 * @pring: Pointer to driver SLI ring object.
2711 * @saveq: Pointer to the response iocb to be processed.
2713 * This function is called by the ring event handler for non-fcp
2714 * rings when there is a new response iocb in the response ring.
2715 * The caller is not required to hold any locks. This function
2716 * gets the command iocb associated with the response iocb and
2717 * calls the completion handler for the command iocb. If there
2718 * is no completion handler, the function will free the resources
2719 * associated with command iocb. If the response iocb is for
2720 * an already aborted command iocb, the status of the completion
2721 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2722 * This function always returns 1.
2725 lpfc_sli_process_sol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2726 struct lpfc_iocbq
*saveq
)
2728 struct lpfc_iocbq
*cmdiocbp
;
2730 unsigned long iflag
;
2732 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2733 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2734 cmdiocbp
= lpfc_sli_iocbq_lookup(phba
, pring
, saveq
);
2735 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2738 if (cmdiocbp
->iocb_cmpl
) {
2740 * If an ELS command failed send an event to mgmt
2743 if (saveq
->iocb
.ulpStatus
&&
2744 (pring
->ringno
== LPFC_ELS_RING
) &&
2745 (cmdiocbp
->iocb
.ulpCommand
==
2746 CMD_ELS_REQUEST64_CR
))
2747 lpfc_send_els_failure_event(phba
,
2751 * Post all ELS completions to the worker thread.
2752 * All other are passed to the completion callback.
2754 if (pring
->ringno
== LPFC_ELS_RING
) {
2755 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
2756 (cmdiocbp
->iocb_flag
&
2757 LPFC_DRIVER_ABORTED
)) {
2758 spin_lock_irqsave(&phba
->hbalock
,
2760 cmdiocbp
->iocb_flag
&=
2761 ~LPFC_DRIVER_ABORTED
;
2762 spin_unlock_irqrestore(&phba
->hbalock
,
2764 saveq
->iocb
.ulpStatus
=
2765 IOSTAT_LOCAL_REJECT
;
2766 saveq
->iocb
.un
.ulpWord
[4] =
2769 /* Firmware could still be in progress
2770 * of DMAing payload, so don't free data
2771 * buffer till after a hbeat.
2773 spin_lock_irqsave(&phba
->hbalock
,
2775 saveq
->iocb_flag
|= LPFC_DELAY_MEM_FREE
;
2776 spin_unlock_irqrestore(&phba
->hbalock
,
2779 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2780 if (saveq
->iocb_flag
&
2781 LPFC_EXCHANGE_BUSY
) {
2782 /* Set cmdiocb flag for the
2783 * exchange busy so sgl (xri)
2784 * will not be released until
2785 * the abort xri is received
2789 &phba
->hbalock
, iflag
);
2790 cmdiocbp
->iocb_flag
|=
2792 spin_unlock_irqrestore(
2793 &phba
->hbalock
, iflag
);
2795 if (cmdiocbp
->iocb_flag
&
2796 LPFC_DRIVER_ABORTED
) {
2798 * Clear LPFC_DRIVER_ABORTED
2799 * bit in case it was driver
2803 &phba
->hbalock
, iflag
);
2804 cmdiocbp
->iocb_flag
&=
2805 ~LPFC_DRIVER_ABORTED
;
2806 spin_unlock_irqrestore(
2807 &phba
->hbalock
, iflag
);
2808 cmdiocbp
->iocb
.ulpStatus
=
2809 IOSTAT_LOCAL_REJECT
;
2810 cmdiocbp
->iocb
.un
.ulpWord
[4] =
2811 IOERR_ABORT_REQUESTED
;
2813 * For SLI4, irsiocb contains
2814 * NO_XRI in sli_xritag, it
2815 * shall not affect releasing
2816 * sgl (xri) process.
2818 saveq
->iocb
.ulpStatus
=
2819 IOSTAT_LOCAL_REJECT
;
2820 saveq
->iocb
.un
.ulpWord
[4] =
2823 &phba
->hbalock
, iflag
);
2825 LPFC_DELAY_MEM_FREE
;
2826 spin_unlock_irqrestore(
2827 &phba
->hbalock
, iflag
);
2831 (cmdiocbp
->iocb_cmpl
) (phba
, cmdiocbp
, saveq
);
2833 lpfc_sli_release_iocbq(phba
, cmdiocbp
);
2836 * Unknown initiating command based on the response iotag.
2837 * This could be the case on the ELS ring because of
2840 if (pring
->ringno
!= LPFC_ELS_RING
) {
2842 * Ring <ringno> handler: unexpected completion IoTag
2845 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
2846 "0322 Ring %d handler: "
2847 "unexpected completion IoTag x%x "
2848 "Data: x%x x%x x%x x%x\n",
2850 saveq
->iocb
.ulpIoTag
,
2851 saveq
->iocb
.ulpStatus
,
2852 saveq
->iocb
.un
.ulpWord
[4],
2853 saveq
->iocb
.ulpCommand
,
2854 saveq
->iocb
.ulpContext
);
2862 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2863 * @phba: Pointer to HBA context object.
2864 * @pring: Pointer to driver SLI ring object.
2866 * This function is called from the iocb ring event handlers when
2867 * put pointer is ahead of the get pointer for a ring. This function signal
2868 * an error attention condition to the worker thread and the worker
2869 * thread will transition the HBA to offline state.
2872 lpfc_sli_rsp_pointers_error(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
2874 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
2876 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2877 * rsp ring <portRspMax>
2879 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2880 "0312 Ring %d handler: portRspPut %d "
2881 "is bigger than rsp ring %d\n",
2882 pring
->ringno
, le32_to_cpu(pgp
->rspPutInx
),
2883 pring
->sli
.sli3
.numRiocb
);
2885 phba
->link_state
= LPFC_HBA_ERROR
;
2888 * All error attention handlers are posted to
2891 phba
->work_ha
|= HA_ERATT
;
2892 phba
->work_hs
= HS_FFER3
;
2894 lpfc_worker_wake_up(phba
);
2900 * lpfc_poll_eratt - Error attention polling timer timeout handler
2901 * @ptr: Pointer to address of HBA context object.
2903 * This function is invoked by the Error Attention polling timer when the
2904 * timer times out. It will check the SLI Error Attention register for
2905 * possible attention events. If so, it will post an Error Attention event
2906 * and wake up worker thread to process it. Otherwise, it will set up the
2907 * Error Attention polling timer for the next poll.
2909 void lpfc_poll_eratt(unsigned long ptr
)
2911 struct lpfc_hba
*phba
;
2913 uint64_t sli_intr
, cnt
;
2915 phba
= (struct lpfc_hba
*)ptr
;
2917 /* Here we will also keep track of interrupts per sec of the hba */
2918 sli_intr
= phba
->sli
.slistat
.sli_intr
;
2920 if (phba
->sli
.slistat
.sli_prev_intr
> sli_intr
)
2921 cnt
= (((uint64_t)(-1) - phba
->sli
.slistat
.sli_prev_intr
) +
2924 cnt
= (sli_intr
- phba
->sli
.slistat
.sli_prev_intr
);
2926 /* 64-bit integer division not supporte on 32-bit x86 - use do_div */
2927 do_div(cnt
, LPFC_ERATT_POLL_INTERVAL
);
2928 phba
->sli
.slistat
.sli_ips
= cnt
;
2930 phba
->sli
.slistat
.sli_prev_intr
= sli_intr
;
2932 /* Check chip HA register for error event */
2933 eratt
= lpfc_sli_check_eratt(phba
);
2936 /* Tell the worker thread there is work to do */
2937 lpfc_worker_wake_up(phba
);
2939 /* Restart the timer for next eratt poll */
2940 mod_timer(&phba
->eratt_poll
,
2942 msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL
));
2948 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
2949 * @phba: Pointer to HBA context object.
2950 * @pring: Pointer to driver SLI ring object.
2951 * @mask: Host attention register mask for this ring.
2953 * This function is called from the interrupt context when there is a ring
2954 * event for the fcp ring. The caller does not hold any lock.
2955 * The function processes each response iocb in the response ring until it
2956 * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
2957 * LE bit set. The function will call the completion handler of the command iocb
2958 * if the response iocb indicates a completion for a command iocb or it is
2959 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2960 * function if this is an unsolicited iocb.
2961 * This routine presumes LPFC_FCP_RING handling and doesn't bother
2962 * to check it explicitly.
2965 lpfc_sli_handle_fast_ring_event(struct lpfc_hba
*phba
,
2966 struct lpfc_sli_ring
*pring
, uint32_t mask
)
2968 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
2969 IOCB_t
*irsp
= NULL
;
2970 IOCB_t
*entry
= NULL
;
2971 struct lpfc_iocbq
*cmdiocbq
= NULL
;
2972 struct lpfc_iocbq rspiocbq
;
2974 uint32_t portRspPut
, portRspMax
;
2976 lpfc_iocb_type type
;
2977 unsigned long iflag
;
2978 uint32_t rsp_cmpl
= 0;
2980 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2981 pring
->stats
.iocb_event
++;
2984 * The next available response entry should never exceed the maximum
2985 * entries. If it does, treat it as an adapter hardware error.
2987 portRspMax
= pring
->sli
.sli3
.numRiocb
;
2988 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
2989 if (unlikely(portRspPut
>= portRspMax
)) {
2990 lpfc_sli_rsp_pointers_error(phba
, pring
);
2991 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2994 if (phba
->fcp_ring_in_use
) {
2995 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2998 phba
->fcp_ring_in_use
= 1;
3001 while (pring
->sli
.sli3
.rspidx
!= portRspPut
) {
3003 * Fetch an entry off the ring and copy it into a local data
3004 * structure. The copy involves a byte-swap since the
3005 * network byte order and pci byte orders are different.
3007 entry
= lpfc_resp_iocb(phba
, pring
);
3008 phba
->last_completion_time
= jiffies
;
3010 if (++pring
->sli
.sli3
.rspidx
>= portRspMax
)
3011 pring
->sli
.sli3
.rspidx
= 0;
3013 lpfc_sli_pcimem_bcopy((uint32_t *) entry
,
3014 (uint32_t *) &rspiocbq
.iocb
,
3015 phba
->iocb_rsp_size
);
3016 INIT_LIST_HEAD(&(rspiocbq
.list
));
3017 irsp
= &rspiocbq
.iocb
;
3019 type
= lpfc_sli_iocb_cmd_type(irsp
->ulpCommand
& CMD_IOCB_MASK
);
3020 pring
->stats
.iocb_rsp
++;
3023 if (unlikely(irsp
->ulpStatus
)) {
3025 * If resource errors reported from HBA, reduce
3026 * queuedepths of the SCSI device.
3028 if ((irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) &&
3029 ((irsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) ==
3030 IOERR_NO_RESOURCES
)) {
3031 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3032 phba
->lpfc_rampdown_queue_depth(phba
);
3033 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3036 /* Rsp ring <ringno> error: IOCB */
3037 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3038 "0336 Rsp Ring %d error: IOCB Data: "
3039 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
3041 irsp
->un
.ulpWord
[0],
3042 irsp
->un
.ulpWord
[1],
3043 irsp
->un
.ulpWord
[2],
3044 irsp
->un
.ulpWord
[3],
3045 irsp
->un
.ulpWord
[4],
3046 irsp
->un
.ulpWord
[5],
3047 *(uint32_t *)&irsp
->un1
,
3048 *((uint32_t *)&irsp
->un1
+ 1));
3052 case LPFC_ABORT_IOCB
:
3055 * Idle exchange closed via ABTS from port. No iocb
3056 * resources need to be recovered.
3058 if (unlikely(irsp
->ulpCommand
== CMD_XRI_ABORTED_CX
)) {
3059 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3060 "0333 IOCB cmd 0x%x"
3061 " processed. Skipping"
3067 cmdiocbq
= lpfc_sli_iocbq_lookup(phba
, pring
,
3069 if (unlikely(!cmdiocbq
))
3071 if (cmdiocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
)
3072 cmdiocbq
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
3073 if (cmdiocbq
->iocb_cmpl
) {
3074 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3075 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
,
3077 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3080 case LPFC_UNSOL_IOCB
:
3081 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3082 lpfc_sli_process_unsol_iocb(phba
, pring
, &rspiocbq
);
3083 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3086 if (irsp
->ulpCommand
== CMD_ADAPTER_MSG
) {
3087 char adaptermsg
[LPFC_MAX_ADPTMSG
];
3088 memset(adaptermsg
, 0, LPFC_MAX_ADPTMSG
);
3089 memcpy(&adaptermsg
[0], (uint8_t *) irsp
,
3091 dev_warn(&((phba
->pcidev
)->dev
),
3093 phba
->brd_no
, adaptermsg
);
3095 /* Unknown IOCB command */
3096 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3097 "0334 Unknown IOCB command "
3098 "Data: x%x, x%x x%x x%x x%x\n",
3099 type
, irsp
->ulpCommand
,
3108 * The response IOCB has been processed. Update the ring
3109 * pointer in SLIM. If the port response put pointer has not
3110 * been updated, sync the pgp->rspPutInx and fetch the new port
3111 * response put pointer.
3113 writel(pring
->sli
.sli3
.rspidx
,
3114 &phba
->host_gp
[pring
->ringno
].rspGetInx
);
3116 if (pring
->sli
.sli3
.rspidx
== portRspPut
)
3117 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
3120 if ((rsp_cmpl
> 0) && (mask
& HA_R0RE_REQ
)) {
3121 pring
->stats
.iocb_rsp_full
++;
3122 status
= ((CA_R0ATT
| CA_R0RE_RSP
) << (pring
->ringno
* 4));
3123 writel(status
, phba
->CAregaddr
);
3124 readl(phba
->CAregaddr
);
3126 if ((mask
& HA_R0CE_RSP
) && (pring
->flag
& LPFC_CALL_RING_AVAILABLE
)) {
3127 pring
->flag
&= ~LPFC_CALL_RING_AVAILABLE
;
3128 pring
->stats
.iocb_cmd_empty
++;
3130 /* Force update of the local copy of cmdGetInx */
3131 pring
->sli
.sli3
.local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
3132 lpfc_sli_resume_iocb(phba
, pring
);
3134 if ((pring
->lpfc_sli_cmd_available
))
3135 (pring
->lpfc_sli_cmd_available
) (phba
, pring
);
3139 phba
->fcp_ring_in_use
= 0;
3140 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3145 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3146 * @phba: Pointer to HBA context object.
3147 * @pring: Pointer to driver SLI ring object.
3148 * @rspiocbp: Pointer to driver response IOCB object.
3150 * This function is called from the worker thread when there is a slow-path
3151 * response IOCB to process. This function chains all the response iocbs until
3152 * seeing the iocb with the LE bit set. The function will call
3153 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3154 * completion of a command iocb. The function will call the
3155 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3156 * The function frees the resources or calls the completion handler if this
3157 * iocb is an abort completion. The function returns NULL when the response
3158 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3159 * this function shall chain the iocb on to the iocb_continueq and return the
3160 * response iocb passed in.
3162 static struct lpfc_iocbq
*
3163 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
3164 struct lpfc_iocbq
*rspiocbp
)
3166 struct lpfc_iocbq
*saveq
;
3167 struct lpfc_iocbq
*cmdiocbp
;
3168 struct lpfc_iocbq
*next_iocb
;
3169 IOCB_t
*irsp
= NULL
;
3170 uint32_t free_saveq
;
3171 uint8_t iocb_cmd_type
;
3172 lpfc_iocb_type type
;
3173 unsigned long iflag
;
3176 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3177 /* First add the response iocb to the countinueq list */
3178 list_add_tail(&rspiocbp
->list
, &(pring
->iocb_continueq
));
3179 pring
->iocb_continueq_cnt
++;
3181 /* Now, determine whether the list is completed for processing */
3182 irsp
= &rspiocbp
->iocb
;
3185 * By default, the driver expects to free all resources
3186 * associated with this iocb completion.
3189 saveq
= list_get_first(&pring
->iocb_continueq
,
3190 struct lpfc_iocbq
, list
);
3191 irsp
= &(saveq
->iocb
);
3192 list_del_init(&pring
->iocb_continueq
);
3193 pring
->iocb_continueq_cnt
= 0;
3195 pring
->stats
.iocb_rsp
++;
3198 * If resource errors reported from HBA, reduce
3199 * queuedepths of the SCSI device.
3201 if ((irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) &&
3202 ((irsp
->un
.ulpWord
[4] & IOERR_PARAM_MASK
) ==
3203 IOERR_NO_RESOURCES
)) {
3204 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3205 phba
->lpfc_rampdown_queue_depth(phba
);
3206 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3209 if (irsp
->ulpStatus
) {
3210 /* Rsp ring <ringno> error: IOCB */
3211 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3212 "0328 Rsp Ring %d error: "
3217 "x%x x%x x%x x%x\n",
3219 irsp
->un
.ulpWord
[0],
3220 irsp
->un
.ulpWord
[1],
3221 irsp
->un
.ulpWord
[2],
3222 irsp
->un
.ulpWord
[3],
3223 irsp
->un
.ulpWord
[4],
3224 irsp
->un
.ulpWord
[5],
3225 *(((uint32_t *) irsp
) + 6),
3226 *(((uint32_t *) irsp
) + 7),
3227 *(((uint32_t *) irsp
) + 8),
3228 *(((uint32_t *) irsp
) + 9),
3229 *(((uint32_t *) irsp
) + 10),
3230 *(((uint32_t *) irsp
) + 11),
3231 *(((uint32_t *) irsp
) + 12),
3232 *(((uint32_t *) irsp
) + 13),
3233 *(((uint32_t *) irsp
) + 14),
3234 *(((uint32_t *) irsp
) + 15));
3238 * Fetch the IOCB command type and call the correct completion
3239 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3240 * get freed back to the lpfc_iocb_list by the discovery
3243 iocb_cmd_type
= irsp
->ulpCommand
& CMD_IOCB_MASK
;
3244 type
= lpfc_sli_iocb_cmd_type(iocb_cmd_type
);
3247 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3248 rc
= lpfc_sli_process_sol_iocb(phba
, pring
, saveq
);
3249 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3252 case LPFC_UNSOL_IOCB
:
3253 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3254 rc
= lpfc_sli_process_unsol_iocb(phba
, pring
, saveq
);
3255 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3260 case LPFC_ABORT_IOCB
:
3262 if (irsp
->ulpCommand
!= CMD_XRI_ABORTED_CX
)
3263 cmdiocbp
= lpfc_sli_iocbq_lookup(phba
, pring
,
3266 /* Call the specified completion routine */
3267 if (cmdiocbp
->iocb_cmpl
) {
3268 spin_unlock_irqrestore(&phba
->hbalock
,
3270 (cmdiocbp
->iocb_cmpl
)(phba
, cmdiocbp
,
3272 spin_lock_irqsave(&phba
->hbalock
,
3275 __lpfc_sli_release_iocbq(phba
,
3280 case LPFC_UNKNOWN_IOCB
:
3281 if (irsp
->ulpCommand
== CMD_ADAPTER_MSG
) {
3282 char adaptermsg
[LPFC_MAX_ADPTMSG
];
3283 memset(adaptermsg
, 0, LPFC_MAX_ADPTMSG
);
3284 memcpy(&adaptermsg
[0], (uint8_t *)irsp
,
3286 dev_warn(&((phba
->pcidev
)->dev
),
3288 phba
->brd_no
, adaptermsg
);
3290 /* Unknown IOCB command */
3291 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3292 "0335 Unknown IOCB "
3293 "command Data: x%x "
3304 list_for_each_entry_safe(rspiocbp
, next_iocb
,
3305 &saveq
->list
, list
) {
3306 list_del_init(&rspiocbp
->list
);
3307 __lpfc_sli_release_iocbq(phba
, rspiocbp
);
3309 __lpfc_sli_release_iocbq(phba
, saveq
);
3313 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3318 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3319 * @phba: Pointer to HBA context object.
3320 * @pring: Pointer to driver SLI ring object.
3321 * @mask: Host attention register mask for this ring.
3323 * This routine wraps the actual slow_ring event process routine from the
3324 * API jump table function pointer from the lpfc_hba struct.
3327 lpfc_sli_handle_slow_ring_event(struct lpfc_hba
*phba
,
3328 struct lpfc_sli_ring
*pring
, uint32_t mask
)
3330 phba
->lpfc_sli_handle_slow_ring_event(phba
, pring
, mask
);
3334 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3335 * @phba: Pointer to HBA context object.
3336 * @pring: Pointer to driver SLI ring object.
3337 * @mask: Host attention register mask for this ring.
3339 * This function is called from the worker thread when there is a ring event
3340 * for non-fcp rings. The caller does not hold any lock. The function will
3341 * remove each response iocb in the response ring and calls the handle
3342 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3345 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba
*phba
,
3346 struct lpfc_sli_ring
*pring
, uint32_t mask
)
3348 struct lpfc_pgp
*pgp
;
3350 IOCB_t
*irsp
= NULL
;
3351 struct lpfc_iocbq
*rspiocbp
= NULL
;
3352 uint32_t portRspPut
, portRspMax
;
3353 unsigned long iflag
;
3356 pgp
= &phba
->port_gp
[pring
->ringno
];
3357 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3358 pring
->stats
.iocb_event
++;
3361 * The next available response entry should never exceed the maximum
3362 * entries. If it does, treat it as an adapter hardware error.
3364 portRspMax
= pring
->sli
.sli3
.numRiocb
;
3365 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
3366 if (portRspPut
>= portRspMax
) {
3368 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3369 * rsp ring <portRspMax>
3371 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3372 "0303 Ring %d handler: portRspPut %d "
3373 "is bigger than rsp ring %d\n",
3374 pring
->ringno
, portRspPut
, portRspMax
);
3376 phba
->link_state
= LPFC_HBA_ERROR
;
3377 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3379 phba
->work_hs
= HS_FFER3
;
3380 lpfc_handle_eratt(phba
);
3386 while (pring
->sli
.sli3
.rspidx
!= portRspPut
) {
3388 * Build a completion list and call the appropriate handler.
3389 * The process is to get the next available response iocb, get
3390 * a free iocb from the list, copy the response data into the
3391 * free iocb, insert to the continuation list, and update the
3392 * next response index to slim. This process makes response
3393 * iocb's in the ring available to DMA as fast as possible but
3394 * pays a penalty for a copy operation. Since the iocb is
3395 * only 32 bytes, this penalty is considered small relative to
3396 * the PCI reads for register values and a slim write. When
3397 * the ulpLe field is set, the entire Command has been
3400 entry
= lpfc_resp_iocb(phba
, pring
);
3402 phba
->last_completion_time
= jiffies
;
3403 rspiocbp
= __lpfc_sli_get_iocbq(phba
);
3404 if (rspiocbp
== NULL
) {
3405 printk(KERN_ERR
"%s: out of buffers! Failing "
3406 "completion.\n", __func__
);
3410 lpfc_sli_pcimem_bcopy(entry
, &rspiocbp
->iocb
,
3411 phba
->iocb_rsp_size
);
3412 irsp
= &rspiocbp
->iocb
;
3414 if (++pring
->sli
.sli3
.rspidx
>= portRspMax
)
3415 pring
->sli
.sli3
.rspidx
= 0;
3417 if (pring
->ringno
== LPFC_ELS_RING
) {
3418 lpfc_debugfs_slow_ring_trc(phba
,
3419 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3420 *(((uint32_t *) irsp
) + 4),
3421 *(((uint32_t *) irsp
) + 6),
3422 *(((uint32_t *) irsp
) + 7));
3425 writel(pring
->sli
.sli3
.rspidx
,
3426 &phba
->host_gp
[pring
->ringno
].rspGetInx
);
3428 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3429 /* Handle the response IOCB */
3430 rspiocbp
= lpfc_sli_sp_handle_rspiocb(phba
, pring
, rspiocbp
);
3431 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3434 * If the port response put pointer has not been updated, sync
3435 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3436 * response put pointer.
3438 if (pring
->sli
.sli3
.rspidx
== portRspPut
) {
3439 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
3441 } /* while (pring->sli.sli3.rspidx != portRspPut) */
3443 if ((rspiocbp
!= NULL
) && (mask
& HA_R0RE_REQ
)) {
3444 /* At least one response entry has been freed */
3445 pring
->stats
.iocb_rsp_full
++;
3446 /* SET RxRE_RSP in Chip Att register */
3447 status
= ((CA_R0ATT
| CA_R0RE_RSP
) << (pring
->ringno
* 4));
3448 writel(status
, phba
->CAregaddr
);
3449 readl(phba
->CAregaddr
); /* flush */
3451 if ((mask
& HA_R0CE_RSP
) && (pring
->flag
& LPFC_CALL_RING_AVAILABLE
)) {
3452 pring
->flag
&= ~LPFC_CALL_RING_AVAILABLE
;
3453 pring
->stats
.iocb_cmd_empty
++;
3455 /* Force update of the local copy of cmdGetInx */
3456 pring
->sli
.sli3
.local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
3457 lpfc_sli_resume_iocb(phba
, pring
);
3459 if ((pring
->lpfc_sli_cmd_available
))
3460 (pring
->lpfc_sli_cmd_available
) (phba
, pring
);
3464 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3469 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3470 * @phba: Pointer to HBA context object.
3471 * @pring: Pointer to driver SLI ring object.
3472 * @mask: Host attention register mask for this ring.
3474 * This function is called from the worker thread when there is a pending
3475 * ELS response iocb on the driver internal slow-path response iocb worker
3476 * queue. The caller does not hold any lock. The function will remove each
3477 * response iocb from the response worker queue and calls the handle
3478 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3481 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba
*phba
,
3482 struct lpfc_sli_ring
*pring
, uint32_t mask
)
3484 struct lpfc_iocbq
*irspiocbq
;
3485 struct hbq_dmabuf
*dmabuf
;
3486 struct lpfc_cq_event
*cq_event
;
3487 unsigned long iflag
;
3489 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3490 phba
->hba_flag
&= ~HBA_SP_QUEUE_EVT
;
3491 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3492 while (!list_empty(&phba
->sli4_hba
.sp_queue_event
)) {
3493 /* Get the response iocb from the head of work queue */
3494 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3495 list_remove_head(&phba
->sli4_hba
.sp_queue_event
,
3496 cq_event
, struct lpfc_cq_event
, list
);
3497 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3499 switch (bf_get(lpfc_wcqe_c_code
, &cq_event
->cqe
.wcqe_cmpl
)) {
3500 case CQE_CODE_COMPL_WQE
:
3501 irspiocbq
= container_of(cq_event
, struct lpfc_iocbq
,
3503 /* Translate ELS WCQE to response IOCBQ */
3504 irspiocbq
= lpfc_sli4_els_wcqe_to_rspiocbq(phba
,
3507 lpfc_sli_sp_handle_rspiocb(phba
, pring
,
3510 case CQE_CODE_RECEIVE
:
3511 case CQE_CODE_RECEIVE_V1
:
3512 dmabuf
= container_of(cq_event
, struct hbq_dmabuf
,
3514 lpfc_sli4_handle_received_buffer(phba
, dmabuf
);
3523 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3524 * @phba: Pointer to HBA context object.
3525 * @pring: Pointer to driver SLI ring object.
3527 * This function aborts all iocbs in the given ring and frees all the iocb
3528 * objects in txq. This function issues an abort iocb for all the iocb commands
3529 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3530 * the return of this function. The caller is not required to hold any locks.
3533 lpfc_sli_abort_iocb_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
3535 LIST_HEAD(completions
);
3536 struct lpfc_iocbq
*iocb
, *next_iocb
;
3538 if (pring
->ringno
== LPFC_ELS_RING
) {
3539 lpfc_fabric_abort_hba(phba
);
3542 /* Error everything on txq and txcmplq
3545 if (phba
->sli_rev
>= LPFC_SLI_REV4
) {
3546 spin_lock_irq(&pring
->ring_lock
);
3547 list_splice_init(&pring
->txq
, &completions
);
3549 spin_unlock_irq(&pring
->ring_lock
);
3551 spin_lock_irq(&phba
->hbalock
);
3552 /* Next issue ABTS for everything on the txcmplq */
3553 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
)
3554 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
3555 spin_unlock_irq(&phba
->hbalock
);
3557 spin_lock_irq(&phba
->hbalock
);
3558 list_splice_init(&pring
->txq
, &completions
);
3561 /* Next issue ABTS for everything on the txcmplq */
3562 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
)
3563 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
3564 spin_unlock_irq(&phba
->hbalock
);
3567 /* Cancel all the IOCBs from the completions list */
3568 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
3573 * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
3574 * @phba: Pointer to HBA context object.
3575 * @pring: Pointer to driver SLI ring object.
3577 * This function aborts all iocbs in FCP rings and frees all the iocb
3578 * objects in txq. This function issues an abort iocb for all the iocb commands
3579 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3580 * the return of this function. The caller is not required to hold any locks.
3583 lpfc_sli_abort_fcp_rings(struct lpfc_hba
*phba
)
3585 struct lpfc_sli
*psli
= &phba
->sli
;
3586 struct lpfc_sli_ring
*pring
;
3589 /* Look on all the FCP Rings for the iotag */
3590 if (phba
->sli_rev
>= LPFC_SLI_REV4
) {
3591 for (i
= 0; i
< phba
->cfg_fcp_io_channel
; i
++) {
3592 pring
= &psli
->ring
[i
+ MAX_SLI3_CONFIGURED_RINGS
];
3593 lpfc_sli_abort_iocb_ring(phba
, pring
);
3596 pring
= &psli
->ring
[psli
->fcp_ring
];
3597 lpfc_sli_abort_iocb_ring(phba
, pring
);
3603 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3604 * @phba: Pointer to HBA context object.
3606 * This function flushes all iocbs in the fcp ring and frees all the iocb
3607 * objects in txq and txcmplq. This function will not issue abort iocbs
3608 * for all the iocb commands in txcmplq, they will just be returned with
3609 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3610 * slot has been permanently disabled.
3613 lpfc_sli_flush_fcp_rings(struct lpfc_hba
*phba
)
3617 struct lpfc_sli
*psli
= &phba
->sli
;
3618 struct lpfc_sli_ring
*pring
;
3621 spin_lock_irq(&phba
->hbalock
);
3622 /* Indicate the I/O queues are flushed */
3623 phba
->hba_flag
|= HBA_FCP_IOQ_FLUSH
;
3624 spin_unlock_irq(&phba
->hbalock
);
3626 /* Look on all the FCP Rings for the iotag */
3627 if (phba
->sli_rev
>= LPFC_SLI_REV4
) {
3628 for (i
= 0; i
< phba
->cfg_fcp_io_channel
; i
++) {
3629 pring
= &psli
->ring
[i
+ MAX_SLI3_CONFIGURED_RINGS
];
3631 spin_lock_irq(&pring
->ring_lock
);
3632 /* Retrieve everything on txq */
3633 list_splice_init(&pring
->txq
, &txq
);
3634 /* Retrieve everything on the txcmplq */
3635 list_splice_init(&pring
->txcmplq
, &txcmplq
);
3637 pring
->txcmplq_cnt
= 0;
3638 spin_unlock_irq(&pring
->ring_lock
);
3641 lpfc_sli_cancel_iocbs(phba
, &txq
,
3642 IOSTAT_LOCAL_REJECT
,
3644 /* Flush the txcmpq */
3645 lpfc_sli_cancel_iocbs(phba
, &txcmplq
,
3646 IOSTAT_LOCAL_REJECT
,
3650 pring
= &psli
->ring
[psli
->fcp_ring
];
3652 spin_lock_irq(&phba
->hbalock
);
3653 /* Retrieve everything on txq */
3654 list_splice_init(&pring
->txq
, &txq
);
3655 /* Retrieve everything on the txcmplq */
3656 list_splice_init(&pring
->txcmplq
, &txcmplq
);
3658 pring
->txcmplq_cnt
= 0;
3659 spin_unlock_irq(&phba
->hbalock
);
3662 lpfc_sli_cancel_iocbs(phba
, &txq
, IOSTAT_LOCAL_REJECT
,
3664 /* Flush the txcmpq */
3665 lpfc_sli_cancel_iocbs(phba
, &txcmplq
, IOSTAT_LOCAL_REJECT
,
3671 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
3672 * @phba: Pointer to HBA context object.
3673 * @mask: Bit mask to be checked.
3675 * This function reads the host status register and compares
3676 * with the provided bit mask to check if HBA completed
3677 * the restart. This function will wait in a loop for the
3678 * HBA to complete restart. If the HBA does not restart within
3679 * 15 iterations, the function will reset the HBA again. The
3680 * function returns 1 when HBA fail to restart otherwise returns
3684 lpfc_sli_brdready_s3(struct lpfc_hba
*phba
, uint32_t mask
)
3690 /* Read the HBA Host Status Register */
3691 if (lpfc_readl(phba
->HSregaddr
, &status
))
3695 * Check status register every 100ms for 5 retries, then every
3696 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3697 * every 2.5 sec for 4.
3698 * Break our of the loop if errors occurred during init.
3700 while (((status
& mask
) != mask
) &&
3701 !(status
& HS_FFERM
) &&
3713 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
3714 lpfc_sli_brdrestart(phba
);
3716 /* Read the HBA Host Status Register */
3717 if (lpfc_readl(phba
->HSregaddr
, &status
)) {
3723 /* Check to see if any errors occurred during init */
3724 if ((status
& HS_FFERM
) || (i
>= 20)) {
3725 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3726 "2751 Adapter failed to restart, "
3727 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3729 readl(phba
->MBslimaddr
+ 0xa8),
3730 readl(phba
->MBslimaddr
+ 0xac));
3731 phba
->link_state
= LPFC_HBA_ERROR
;
3739 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3740 * @phba: Pointer to HBA context object.
3741 * @mask: Bit mask to be checked.
3743 * This function checks the host status register to check if HBA is
3744 * ready. This function will wait in a loop for the HBA to be ready
3745 * If the HBA is not ready , the function will will reset the HBA PCI
3746 * function again. The function returns 1 when HBA fail to be ready
3747 * otherwise returns zero.
3750 lpfc_sli_brdready_s4(struct lpfc_hba
*phba
, uint32_t mask
)
3755 /* Read the HBA Host Status Register */
3756 status
= lpfc_sli4_post_status_check(phba
);
3759 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
3760 lpfc_sli_brdrestart(phba
);
3761 status
= lpfc_sli4_post_status_check(phba
);
3764 /* Check to see if any errors occurred during init */
3766 phba
->link_state
= LPFC_HBA_ERROR
;
3769 phba
->sli4_hba
.intr_enable
= 0;
3775 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3776 * @phba: Pointer to HBA context object.
3777 * @mask: Bit mask to be checked.
3779 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3780 * from the API jump table function pointer from the lpfc_hba struct.
3783 lpfc_sli_brdready(struct lpfc_hba
*phba
, uint32_t mask
)
3785 return phba
->lpfc_sli_brdready(phba
, mask
);
3788 #define BARRIER_TEST_PATTERN (0xdeadbeef)
3791 * lpfc_reset_barrier - Make HBA ready for HBA reset
3792 * @phba: Pointer to HBA context object.
3794 * This function is called before resetting an HBA. This function is called
3795 * with hbalock held and requests HBA to quiesce DMAs before a reset.
3797 void lpfc_reset_barrier(struct lpfc_hba
*phba
)
3799 uint32_t __iomem
*resp_buf
;
3800 uint32_t __iomem
*mbox_buf
;
3801 volatile uint32_t mbox
;
3802 uint32_t hc_copy
, ha_copy
, resp_data
;
3806 pci_read_config_byte(phba
->pcidev
, PCI_HEADER_TYPE
, &hdrtype
);
3807 if (hdrtype
!= 0x80 ||
3808 (FC_JEDEC_ID(phba
->vpd
.rev
.biuRev
) != HELIOS_JEDEC_ID
&&
3809 FC_JEDEC_ID(phba
->vpd
.rev
.biuRev
) != THOR_JEDEC_ID
))
3813 * Tell the other part of the chip to suspend temporarily all
3816 resp_buf
= phba
->MBslimaddr
;
3818 /* Disable the error attention */
3819 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
))
3821 writel((hc_copy
& ~HC_ERINT_ENA
), phba
->HCregaddr
);
3822 readl(phba
->HCregaddr
); /* flush */
3823 phba
->link_flag
|= LS_IGNORE_ERATT
;
3825 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
3827 if (ha_copy
& HA_ERATT
) {
3828 /* Clear Chip error bit */
3829 writel(HA_ERATT
, phba
->HAregaddr
);
3830 phba
->pport
->stopped
= 1;
3834 ((MAILBOX_t
*)&mbox
)->mbxCommand
= MBX_KILL_BOARD
;
3835 ((MAILBOX_t
*)&mbox
)->mbxOwner
= OWN_CHIP
;
3837 writel(BARRIER_TEST_PATTERN
, (resp_buf
+ 1));
3838 mbox_buf
= phba
->MBslimaddr
;
3839 writel(mbox
, mbox_buf
);
3841 for (i
= 0; i
< 50; i
++) {
3842 if (lpfc_readl((resp_buf
+ 1), &resp_data
))
3844 if (resp_data
!= ~(BARRIER_TEST_PATTERN
))
3850 if (lpfc_readl((resp_buf
+ 1), &resp_data
))
3852 if (resp_data
!= ~(BARRIER_TEST_PATTERN
)) {
3853 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
||
3854 phba
->pport
->stopped
)
3860 ((MAILBOX_t
*)&mbox
)->mbxOwner
= OWN_HOST
;
3862 for (i
= 0; i
< 500; i
++) {
3863 if (lpfc_readl(resp_buf
, &resp_data
))
3865 if (resp_data
!= mbox
)
3874 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
3876 if (!(ha_copy
& HA_ERATT
))
3882 if (readl(phba
->HAregaddr
) & HA_ERATT
) {
3883 writel(HA_ERATT
, phba
->HAregaddr
);
3884 phba
->pport
->stopped
= 1;
3888 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
3889 writel(hc_copy
, phba
->HCregaddr
);
3890 readl(phba
->HCregaddr
); /* flush */
3894 * lpfc_sli_brdkill - Issue a kill_board mailbox command
3895 * @phba: Pointer to HBA context object.
3897 * This function issues a kill_board mailbox command and waits for
3898 * the error attention interrupt. This function is called for stopping
3899 * the firmware processing. The caller is not required to hold any
3900 * locks. This function calls lpfc_hba_down_post function to free
3901 * any pending commands after the kill. The function will return 1 when it
3902 * fails to kill the board else will return 0.
3905 lpfc_sli_brdkill(struct lpfc_hba
*phba
)
3907 struct lpfc_sli
*psli
;
3917 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3918 "0329 Kill HBA Data: x%x x%x\n",
3919 phba
->pport
->port_state
, psli
->sli_flag
);
3921 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3925 /* Disable the error attention */
3926 spin_lock_irq(&phba
->hbalock
);
3927 if (lpfc_readl(phba
->HCregaddr
, &status
)) {
3928 spin_unlock_irq(&phba
->hbalock
);
3929 mempool_free(pmb
, phba
->mbox_mem_pool
);
3932 status
&= ~HC_ERINT_ENA
;
3933 writel(status
, phba
->HCregaddr
);
3934 readl(phba
->HCregaddr
); /* flush */
3935 phba
->link_flag
|= LS_IGNORE_ERATT
;
3936 spin_unlock_irq(&phba
->hbalock
);
3938 lpfc_kill_board(phba
, pmb
);
3939 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3940 retval
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
3942 if (retval
!= MBX_SUCCESS
) {
3943 if (retval
!= MBX_BUSY
)
3944 mempool_free(pmb
, phba
->mbox_mem_pool
);
3945 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3946 "2752 KILL_BOARD command failed retval %d\n",
3948 spin_lock_irq(&phba
->hbalock
);
3949 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
3950 spin_unlock_irq(&phba
->hbalock
);
3954 spin_lock_irq(&phba
->hbalock
);
3955 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
3956 spin_unlock_irq(&phba
->hbalock
);
3958 mempool_free(pmb
, phba
->mbox_mem_pool
);
3960 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3961 * attention every 100ms for 3 seconds. If we don't get ERATT after
3962 * 3 seconds we still set HBA_ERROR state because the status of the
3963 * board is now undefined.
3965 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
3967 while ((i
++ < 30) && !(ha_copy
& HA_ERATT
)) {
3969 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
3973 del_timer_sync(&psli
->mbox_tmo
);
3974 if (ha_copy
& HA_ERATT
) {
3975 writel(HA_ERATT
, phba
->HAregaddr
);
3976 phba
->pport
->stopped
= 1;
3978 spin_lock_irq(&phba
->hbalock
);
3979 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
3980 psli
->mbox_active
= NULL
;
3981 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
3982 spin_unlock_irq(&phba
->hbalock
);
3984 lpfc_hba_down_post(phba
);
3985 phba
->link_state
= LPFC_HBA_ERROR
;
3987 return ha_copy
& HA_ERATT
? 0 : 1;
3991 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
3992 * @phba: Pointer to HBA context object.
3994 * This function resets the HBA by writing HC_INITFF to the control
3995 * register. After the HBA resets, this function resets all the iocb ring
3996 * indices. This function disables PCI layer parity checking during
3998 * This function returns 0 always.
3999 * The caller is not required to hold any locks.
4002 lpfc_sli_brdreset(struct lpfc_hba
*phba
)
4004 struct lpfc_sli
*psli
;
4005 struct lpfc_sli_ring
*pring
;
4012 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4013 "0325 Reset HBA Data: x%x x%x\n",
4014 phba
->pport
->port_state
, psli
->sli_flag
);
4016 /* perform board reset */
4017 phba
->fc_eventTag
= 0;
4018 phba
->link_events
= 0;
4019 phba
->pport
->fc_myDID
= 0;
4020 phba
->pport
->fc_prevDID
= 0;
4022 /* Turn off parity checking and serr during the physical reset */
4023 pci_read_config_word(phba
->pcidev
, PCI_COMMAND
, &cfg_value
);
4024 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
,
4026 ~(PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
)));
4028 psli
->sli_flag
&= ~(LPFC_SLI_ACTIVE
| LPFC_PROCESS_LA
);
4030 /* Now toggle INITFF bit in the Host Control Register */
4031 writel(HC_INITFF
, phba
->HCregaddr
);
4033 readl(phba
->HCregaddr
); /* flush */
4034 writel(0, phba
->HCregaddr
);
4035 readl(phba
->HCregaddr
); /* flush */
4037 /* Restore PCI cmd register */
4038 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, cfg_value
);
4040 /* Initialize relevant SLI info */
4041 for (i
= 0; i
< psli
->num_rings
; i
++) {
4042 pring
= &psli
->ring
[i
];
4044 pring
->sli
.sli3
.rspidx
= 0;
4045 pring
->sli
.sli3
.next_cmdidx
= 0;
4046 pring
->sli
.sli3
.local_getidx
= 0;
4047 pring
->sli
.sli3
.cmdidx
= 0;
4048 pring
->missbufcnt
= 0;
4051 phba
->link_state
= LPFC_WARM_START
;
4056 * lpfc_sli4_brdreset - Reset a sli-4 HBA
4057 * @phba: Pointer to HBA context object.
4059 * This function resets a SLI4 HBA. This function disables PCI layer parity
4060 * checking during resets the device. The caller is not required to hold
4063 * This function returns 0 always.
4066 lpfc_sli4_brdreset(struct lpfc_hba
*phba
)
4068 struct lpfc_sli
*psli
= &phba
->sli
;
4073 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4074 "0295 Reset HBA Data: x%x x%x x%x\n",
4075 phba
->pport
->port_state
, psli
->sli_flag
,
4078 /* perform board reset */
4079 phba
->fc_eventTag
= 0;
4080 phba
->link_events
= 0;
4081 phba
->pport
->fc_myDID
= 0;
4082 phba
->pport
->fc_prevDID
= 0;
4084 spin_lock_irq(&phba
->hbalock
);
4085 psli
->sli_flag
&= ~(LPFC_PROCESS_LA
);
4086 phba
->fcf
.fcf_flag
= 0;
4087 spin_unlock_irq(&phba
->hbalock
);
4089 /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
4090 if (phba
->hba_flag
& HBA_FW_DUMP_OP
) {
4091 phba
->hba_flag
&= ~HBA_FW_DUMP_OP
;
4095 /* Now physically reset the device */
4096 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4097 "0389 Performing PCI function reset!\n");
4099 /* Turn off parity checking and serr during the physical reset */
4100 pci_read_config_word(phba
->pcidev
, PCI_COMMAND
, &cfg_value
);
4101 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, (cfg_value
&
4102 ~(PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
)));
4104 /* Perform FCoE PCI function reset before freeing queue memory */
4105 rc
= lpfc_pci_function_reset(phba
);
4106 lpfc_sli4_queue_destroy(phba
);
4108 /* Restore PCI cmd register */
4109 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, cfg_value
);
4115 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
4116 * @phba: Pointer to HBA context object.
4118 * This function is called in the SLI initialization code path to
4119 * restart the HBA. The caller is not required to hold any lock.
4120 * This function writes MBX_RESTART mailbox command to the SLIM and
4121 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
4122 * function to free any pending commands. The function enables
4123 * POST only during the first initialization. The function returns zero.
4124 * The function does not guarantee completion of MBX_RESTART mailbox
4125 * command before the return of this function.
4128 lpfc_sli_brdrestart_s3(struct lpfc_hba
*phba
)
4131 struct lpfc_sli
*psli
;
4132 volatile uint32_t word0
;
4133 void __iomem
*to_slim
;
4134 uint32_t hba_aer_enabled
;
4136 spin_lock_irq(&phba
->hbalock
);
4138 /* Take PCIe device Advanced Error Reporting (AER) state */
4139 hba_aer_enabled
= phba
->hba_flag
& HBA_AER_ENABLED
;
4144 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4145 "0337 Restart HBA Data: x%x x%x\n",
4146 phba
->pport
->port_state
, psli
->sli_flag
);
4149 mb
= (MAILBOX_t
*) &word0
;
4150 mb
->mbxCommand
= MBX_RESTART
;
4153 lpfc_reset_barrier(phba
);
4155 to_slim
= phba
->MBslimaddr
;
4156 writel(*(uint32_t *) mb
, to_slim
);
4157 readl(to_slim
); /* flush */
4159 /* Only skip post after fc_ffinit is completed */
4160 if (phba
->pport
->port_state
)
4161 word0
= 1; /* This is really setting up word1 */
4163 word0
= 0; /* This is really setting up word1 */
4164 to_slim
= phba
->MBslimaddr
+ sizeof (uint32_t);
4165 writel(*(uint32_t *) mb
, to_slim
);
4166 readl(to_slim
); /* flush */
4168 lpfc_sli_brdreset(phba
);
4169 phba
->pport
->stopped
= 0;
4170 phba
->link_state
= LPFC_INIT_START
;
4172 spin_unlock_irq(&phba
->hbalock
);
4174 memset(&psli
->lnk_stat_offsets
, 0, sizeof(psli
->lnk_stat_offsets
));
4175 psli
->stats_start
= get_seconds();
4177 /* Give the INITFF and Post time to settle. */
4180 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4181 if (hba_aer_enabled
)
4182 pci_disable_pcie_error_reporting(phba
->pcidev
);
4184 lpfc_hba_down_post(phba
);
4190 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
4191 * @phba: Pointer to HBA context object.
4193 * This function is called in the SLI initialization code path to restart
4194 * a SLI4 HBA. The caller is not required to hold any lock.
4195 * At the end of the function, it calls lpfc_hba_down_post function to
4196 * free any pending commands.
4199 lpfc_sli_brdrestart_s4(struct lpfc_hba
*phba
)
4201 struct lpfc_sli
*psli
= &phba
->sli
;
4202 uint32_t hba_aer_enabled
;
4206 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4207 "0296 Restart HBA Data: x%x x%x\n",
4208 phba
->pport
->port_state
, psli
->sli_flag
);
4210 /* Take PCIe device Advanced Error Reporting (AER) state */
4211 hba_aer_enabled
= phba
->hba_flag
& HBA_AER_ENABLED
;
4213 rc
= lpfc_sli4_brdreset(phba
);
4215 spin_lock_irq(&phba
->hbalock
);
4216 phba
->pport
->stopped
= 0;
4217 phba
->link_state
= LPFC_INIT_START
;
4219 spin_unlock_irq(&phba
->hbalock
);
4221 memset(&psli
->lnk_stat_offsets
, 0, sizeof(psli
->lnk_stat_offsets
));
4222 psli
->stats_start
= get_seconds();
4224 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4225 if (hba_aer_enabled
)
4226 pci_disable_pcie_error_reporting(phba
->pcidev
);
4228 lpfc_hba_down_post(phba
);
4234 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4235 * @phba: Pointer to HBA context object.
4237 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4238 * API jump table function pointer from the lpfc_hba struct.
4241 lpfc_sli_brdrestart(struct lpfc_hba
*phba
)
4243 return phba
->lpfc_sli_brdrestart(phba
);
4247 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4248 * @phba: Pointer to HBA context object.
4250 * This function is called after a HBA restart to wait for successful
4251 * restart of the HBA. Successful restart of the HBA is indicated by
4252 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4253 * iteration, the function will restart the HBA again. The function returns
4254 * zero if HBA successfully restarted else returns negative error code.
4257 lpfc_sli_chipset_init(struct lpfc_hba
*phba
)
4259 uint32_t status
, i
= 0;
4261 /* Read the HBA Host Status Register */
4262 if (lpfc_readl(phba
->HSregaddr
, &status
))
4265 /* Check status register to see what current state is */
4267 while ((status
& (HS_FFRDY
| HS_MBRDY
)) != (HS_FFRDY
| HS_MBRDY
)) {
4269 /* Check every 10ms for 10 retries, then every 100ms for 90
4270 * retries, then every 1 sec for 50 retires for a total of
4271 * ~60 seconds before reset the board again and check every
4272 * 1 sec for 50 retries. The up to 60 seconds before the
4273 * board ready is required by the Falcon FIPS zeroization
4274 * complete, and any reset the board in between shall cause
4275 * restart of zeroization, further delay the board ready.
4278 /* Adapter failed to init, timeout, status reg
4280 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4281 "0436 Adapter failed to init, "
4282 "timeout, status reg x%x, "
4283 "FW Data: A8 x%x AC x%x\n", status
,
4284 readl(phba
->MBslimaddr
+ 0xa8),
4285 readl(phba
->MBslimaddr
+ 0xac));
4286 phba
->link_state
= LPFC_HBA_ERROR
;
4290 /* Check to see if any errors occurred during init */
4291 if (status
& HS_FFERM
) {
4292 /* ERROR: During chipset initialization */
4293 /* Adapter failed to init, chipset, status reg
4295 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4296 "0437 Adapter failed to init, "
4297 "chipset, status reg x%x, "
4298 "FW Data: A8 x%x AC x%x\n", status
,
4299 readl(phba
->MBslimaddr
+ 0xa8),
4300 readl(phba
->MBslimaddr
+ 0xac));
4301 phba
->link_state
= LPFC_HBA_ERROR
;
4314 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
4315 lpfc_sli_brdrestart(phba
);
4317 /* Read the HBA Host Status Register */
4318 if (lpfc_readl(phba
->HSregaddr
, &status
))
4322 /* Check to see if any errors occurred during init */
4323 if (status
& HS_FFERM
) {
4324 /* ERROR: During chipset initialization */
4325 /* Adapter failed to init, chipset, status reg <status> */
4326 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4327 "0438 Adapter failed to init, chipset, "
4329 "FW Data: A8 x%x AC x%x\n", status
,
4330 readl(phba
->MBslimaddr
+ 0xa8),
4331 readl(phba
->MBslimaddr
+ 0xac));
4332 phba
->link_state
= LPFC_HBA_ERROR
;
4336 /* Clear all interrupt enable conditions */
4337 writel(0, phba
->HCregaddr
);
4338 readl(phba
->HCregaddr
); /* flush */
4340 /* setup host attn register */
4341 writel(0xffffffff, phba
->HAregaddr
);
4342 readl(phba
->HAregaddr
); /* flush */
4347 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4349 * This function calculates and returns the number of HBQs required to be
4353 lpfc_sli_hbq_count(void)
4355 return ARRAY_SIZE(lpfc_hbq_defs
);
4359 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4361 * This function adds the number of hbq entries in every HBQ to get
4362 * the total number of hbq entries required for the HBA and returns
4366 lpfc_sli_hbq_entry_count(void)
4368 int hbq_count
= lpfc_sli_hbq_count();
4372 for (i
= 0; i
< hbq_count
; ++i
)
4373 count
+= lpfc_hbq_defs
[i
]->entry_count
;
4378 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4380 * This function calculates amount of memory required for all hbq entries
4381 * to be configured and returns the total memory required.
4384 lpfc_sli_hbq_size(void)
4386 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry
);
4390 * lpfc_sli_hbq_setup - configure and initialize HBQs
4391 * @phba: Pointer to HBA context object.
4393 * This function is called during the SLI initialization to configure
4394 * all the HBQs and post buffers to the HBQ. The caller is not
4395 * required to hold any locks. This function will return zero if successful
4396 * else it will return negative error code.
4399 lpfc_sli_hbq_setup(struct lpfc_hba
*phba
)
4401 int hbq_count
= lpfc_sli_hbq_count();
4405 uint32_t hbq_entry_index
;
4407 /* Get a Mailbox buffer to setup mailbox
4408 * commands for HBA initialization
4410 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4417 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4418 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
4419 phba
->hbq_in_use
= 1;
4421 hbq_entry_index
= 0;
4422 for (hbqno
= 0; hbqno
< hbq_count
; ++hbqno
) {
4423 phba
->hbqs
[hbqno
].next_hbqPutIdx
= 0;
4424 phba
->hbqs
[hbqno
].hbqPutIdx
= 0;
4425 phba
->hbqs
[hbqno
].local_hbqGetIdx
= 0;
4426 phba
->hbqs
[hbqno
].entry_count
=
4427 lpfc_hbq_defs
[hbqno
]->entry_count
;
4428 lpfc_config_hbq(phba
, hbqno
, lpfc_hbq_defs
[hbqno
],
4429 hbq_entry_index
, pmb
);
4430 hbq_entry_index
+= phba
->hbqs
[hbqno
].entry_count
;
4432 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
4433 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4434 mbxStatus <status>, ring <num> */
4436 lpfc_printf_log(phba
, KERN_ERR
,
4437 LOG_SLI
| LOG_VPORT
,
4438 "1805 Adapter failed to init. "
4439 "Data: x%x x%x x%x\n",
4441 pmbox
->mbxStatus
, hbqno
);
4443 phba
->link_state
= LPFC_HBA_ERROR
;
4444 mempool_free(pmb
, phba
->mbox_mem_pool
);
4448 phba
->hbq_count
= hbq_count
;
4450 mempool_free(pmb
, phba
->mbox_mem_pool
);
4452 /* Initially populate or replenish the HBQs */
4453 for (hbqno
= 0; hbqno
< hbq_count
; ++hbqno
)
4454 lpfc_sli_hbqbuf_init_hbqs(phba
, hbqno
);
4459 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4460 * @phba: Pointer to HBA context object.
4462 * This function is called during the SLI initialization to configure
4463 * all the HBQs and post buffers to the HBQ. The caller is not
4464 * required to hold any locks. This function will return zero if successful
4465 * else it will return negative error code.
4468 lpfc_sli4_rb_setup(struct lpfc_hba
*phba
)
4470 phba
->hbq_in_use
= 1;
4471 phba
->hbqs
[0].entry_count
= lpfc_hbq_defs
[0]->entry_count
;
4472 phba
->hbq_count
= 1;
4473 /* Initially populate or replenish the HBQs */
4474 lpfc_sli_hbqbuf_init_hbqs(phba
, 0);
4479 * lpfc_sli_config_port - Issue config port mailbox command
4480 * @phba: Pointer to HBA context object.
4481 * @sli_mode: sli mode - 2/3
4483 * This function is called by the sli intialization code path
4484 * to issue config_port mailbox command. This function restarts the
4485 * HBA firmware and issues a config_port mailbox command to configure
4486 * the SLI interface in the sli mode specified by sli_mode
4487 * variable. The caller is not required to hold any locks.
4488 * The function returns 0 if successful, else returns negative error
4492 lpfc_sli_config_port(struct lpfc_hba
*phba
, int sli_mode
)
4495 uint32_t resetcount
= 0, rc
= 0, done
= 0;
4497 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4499 phba
->link_state
= LPFC_HBA_ERROR
;
4503 phba
->sli_rev
= sli_mode
;
4504 while (resetcount
< 2 && !done
) {
4505 spin_lock_irq(&phba
->hbalock
);
4506 phba
->sli
.sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
4507 spin_unlock_irq(&phba
->hbalock
);
4508 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
4509 lpfc_sli_brdrestart(phba
);
4510 rc
= lpfc_sli_chipset_init(phba
);
4514 spin_lock_irq(&phba
->hbalock
);
4515 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
4516 spin_unlock_irq(&phba
->hbalock
);
4519 /* Call pre CONFIG_PORT mailbox command initialization. A
4520 * value of 0 means the call was successful. Any other
4521 * nonzero value is a failure, but if ERESTART is returned,
4522 * the driver may reset the HBA and try again.
4524 rc
= lpfc_config_port_prep(phba
);
4525 if (rc
== -ERESTART
) {
4526 phba
->link_state
= LPFC_LINK_UNKNOWN
;
4531 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
4532 lpfc_config_port(phba
, pmb
);
4533 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
4534 phba
->sli3_options
&= ~(LPFC_SLI3_NPIV_ENABLED
|
4535 LPFC_SLI3_HBQ_ENABLED
|
4536 LPFC_SLI3_CRP_ENABLED
|
4537 LPFC_SLI3_BG_ENABLED
|
4538 LPFC_SLI3_DSS_ENABLED
);
4539 if (rc
!= MBX_SUCCESS
) {
4540 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4541 "0442 Adapter failed to init, mbxCmd x%x "
4542 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4543 pmb
->u
.mb
.mbxCommand
, pmb
->u
.mb
.mbxStatus
, 0);
4544 spin_lock_irq(&phba
->hbalock
);
4545 phba
->sli
.sli_flag
&= ~LPFC_SLI_ACTIVE
;
4546 spin_unlock_irq(&phba
->hbalock
);
4549 /* Allow asynchronous mailbox command to go through */
4550 spin_lock_irq(&phba
->hbalock
);
4551 phba
->sli
.sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
4552 spin_unlock_irq(&phba
->hbalock
);
4555 if ((pmb
->u
.mb
.un
.varCfgPort
.casabt
== 1) &&
4556 (pmb
->u
.mb
.un
.varCfgPort
.gasabt
== 0))
4557 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
4558 "3110 Port did not grant ASABT\n");
4563 goto do_prep_failed
;
4565 if (pmb
->u
.mb
.un
.varCfgPort
.sli_mode
== 3) {
4566 if (!pmb
->u
.mb
.un
.varCfgPort
.cMA
) {
4568 goto do_prep_failed
;
4570 if (phba
->max_vpi
&& pmb
->u
.mb
.un
.varCfgPort
.gmv
) {
4571 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
4572 phba
->max_vpi
= pmb
->u
.mb
.un
.varCfgPort
.max_vpi
;
4573 phba
->max_vports
= (phba
->max_vpi
> phba
->max_vports
) ?
4574 phba
->max_vpi
: phba
->max_vports
;
4578 phba
->fips_level
= 0;
4579 phba
->fips_spec_rev
= 0;
4580 if (pmb
->u
.mb
.un
.varCfgPort
.gdss
) {
4581 phba
->sli3_options
|= LPFC_SLI3_DSS_ENABLED
;
4582 phba
->fips_level
= pmb
->u
.mb
.un
.varCfgPort
.fips_level
;
4583 phba
->fips_spec_rev
= pmb
->u
.mb
.un
.varCfgPort
.fips_rev
;
4584 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4585 "2850 Security Crypto Active. FIPS x%d "
4587 phba
->fips_level
, phba
->fips_spec_rev
);
4589 if (pmb
->u
.mb
.un
.varCfgPort
.sec_err
) {
4590 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4591 "2856 Config Port Security Crypto "
4593 pmb
->u
.mb
.un
.varCfgPort
.sec_err
);
4595 if (pmb
->u
.mb
.un
.varCfgPort
.gerbm
)
4596 phba
->sli3_options
|= LPFC_SLI3_HBQ_ENABLED
;
4597 if (pmb
->u
.mb
.un
.varCfgPort
.gcrp
)
4598 phba
->sli3_options
|= LPFC_SLI3_CRP_ENABLED
;
4600 phba
->hbq_get
= phba
->mbox
->us
.s3_pgp
.hbq_get
;
4601 phba
->port_gp
= phba
->mbox
->us
.s3_pgp
.port
;
4603 if (phba
->cfg_enable_bg
) {
4604 if (pmb
->u
.mb
.un
.varCfgPort
.gbg
)
4605 phba
->sli3_options
|= LPFC_SLI3_BG_ENABLED
;
4607 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4608 "0443 Adapter did not grant "
4612 phba
->hbq_get
= NULL
;
4613 phba
->port_gp
= phba
->mbox
->us
.s2
.port
;
4617 mempool_free(pmb
, phba
->mbox_mem_pool
);
4623 * lpfc_sli_hba_setup - SLI intialization function
4624 * @phba: Pointer to HBA context object.
4626 * This function is the main SLI intialization function. This function
4627 * is called by the HBA intialization code, HBA reset code and HBA
4628 * error attention handler code. Caller is not required to hold any
4629 * locks. This function issues config_port mailbox command to configure
4630 * the SLI, setup iocb rings and HBQ rings. In the end the function
4631 * calls the config_port_post function to issue init_link mailbox
4632 * command and to start the discovery. The function will return zero
4633 * if successful, else it will return negative error code.
4636 lpfc_sli_hba_setup(struct lpfc_hba
*phba
)
4642 switch (lpfc_sli_mode
) {
4644 if (phba
->cfg_enable_npiv
) {
4645 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_VPORT
,
4646 "1824 NPIV enabled: Override lpfc_sli_mode "
4647 "parameter (%d) to auto (0).\n",
4657 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_VPORT
,
4658 "1819 Unrecognized lpfc_sli_mode "
4659 "parameter: %d.\n", lpfc_sli_mode
);
4664 rc
= lpfc_sli_config_port(phba
, mode
);
4666 if (rc
&& lpfc_sli_mode
== 3)
4667 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_VPORT
,
4668 "1820 Unable to select SLI-3. "
4669 "Not supported by adapter.\n");
4670 if (rc
&& mode
!= 2)
4671 rc
= lpfc_sli_config_port(phba
, 2);
4673 goto lpfc_sli_hba_setup_error
;
4675 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4676 if (phba
->cfg_aer_support
== 1 && !(phba
->hba_flag
& HBA_AER_ENABLED
)) {
4677 rc
= pci_enable_pcie_error_reporting(phba
->pcidev
);
4679 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4680 "2709 This device supports "
4681 "Advanced Error Reporting (AER)\n");
4682 spin_lock_irq(&phba
->hbalock
);
4683 phba
->hba_flag
|= HBA_AER_ENABLED
;
4684 spin_unlock_irq(&phba
->hbalock
);
4686 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4687 "2708 This device does not support "
4688 "Advanced Error Reporting (AER): %d\n",
4690 phba
->cfg_aer_support
= 0;
4694 if (phba
->sli_rev
== 3) {
4695 phba
->iocb_cmd_size
= SLI3_IOCB_CMD_SIZE
;
4696 phba
->iocb_rsp_size
= SLI3_IOCB_RSP_SIZE
;
4698 phba
->iocb_cmd_size
= SLI2_IOCB_CMD_SIZE
;
4699 phba
->iocb_rsp_size
= SLI2_IOCB_RSP_SIZE
;
4700 phba
->sli3_options
= 0;
4703 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4704 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4705 phba
->sli_rev
, phba
->max_vpi
);
4706 rc
= lpfc_sli_ring_map(phba
);
4709 goto lpfc_sli_hba_setup_error
;
4711 /* Initialize VPIs. */
4712 if (phba
->sli_rev
== LPFC_SLI_REV3
) {
4714 * The VPI bitmask and physical ID array are allocated
4715 * and initialized once only - at driver load. A port
4716 * reset doesn't need to reinitialize this memory.
4718 if ((phba
->vpi_bmask
== NULL
) && (phba
->vpi_ids
== NULL
)) {
4719 longs
= (phba
->max_vpi
+ BITS_PER_LONG
) / BITS_PER_LONG
;
4720 phba
->vpi_bmask
= kzalloc(longs
* sizeof(unsigned long),
4722 if (!phba
->vpi_bmask
) {
4724 goto lpfc_sli_hba_setup_error
;
4727 phba
->vpi_ids
= kzalloc(
4728 (phba
->max_vpi
+1) * sizeof(uint16_t),
4730 if (!phba
->vpi_ids
) {
4731 kfree(phba
->vpi_bmask
);
4733 goto lpfc_sli_hba_setup_error
;
4735 for (i
= 0; i
< phba
->max_vpi
; i
++)
4736 phba
->vpi_ids
[i
] = i
;
4741 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
4742 rc
= lpfc_sli_hbq_setup(phba
);
4744 goto lpfc_sli_hba_setup_error
;
4746 spin_lock_irq(&phba
->hbalock
);
4747 phba
->sli
.sli_flag
|= LPFC_PROCESS_LA
;
4748 spin_unlock_irq(&phba
->hbalock
);
4750 rc
= lpfc_config_port_post(phba
);
4752 goto lpfc_sli_hba_setup_error
;
4756 lpfc_sli_hba_setup_error
:
4757 phba
->link_state
= LPFC_HBA_ERROR
;
4758 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4759 "0445 Firmware initialization failed\n");
4764 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4765 * @phba: Pointer to HBA context object.
4766 * @mboxq: mailbox pointer.
4767 * This function issue a dump mailbox command to read config region
4768 * 23 and parse the records in the region and populate driver
4772 lpfc_sli4_read_fcoe_params(struct lpfc_hba
*phba
)
4774 LPFC_MBOXQ_t
*mboxq
;
4775 struct lpfc_dmabuf
*mp
;
4776 struct lpfc_mqe
*mqe
;
4777 uint32_t data_length
;
4780 /* Program the default value of vlan_id and fc_map */
4781 phba
->valid_vlan
= 0;
4782 phba
->fc_map
[0] = LPFC_FCOE_FCF_MAP0
;
4783 phba
->fc_map
[1] = LPFC_FCOE_FCF_MAP1
;
4784 phba
->fc_map
[2] = LPFC_FCOE_FCF_MAP2
;
4786 mboxq
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4790 mqe
= &mboxq
->u
.mqe
;
4791 if (lpfc_sli4_dump_cfg_rg23(phba
, mboxq
)) {
4793 goto out_free_mboxq
;
4796 mp
= (struct lpfc_dmabuf
*) mboxq
->context1
;
4797 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
4799 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
4800 "(%d):2571 Mailbox cmd x%x Status x%x "
4801 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4802 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4803 "CQ: x%x x%x x%x x%x\n",
4804 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
4805 bf_get(lpfc_mqe_command
, mqe
),
4806 bf_get(lpfc_mqe_status
, mqe
),
4807 mqe
->un
.mb_words
[0], mqe
->un
.mb_words
[1],
4808 mqe
->un
.mb_words
[2], mqe
->un
.mb_words
[3],
4809 mqe
->un
.mb_words
[4], mqe
->un
.mb_words
[5],
4810 mqe
->un
.mb_words
[6], mqe
->un
.mb_words
[7],
4811 mqe
->un
.mb_words
[8], mqe
->un
.mb_words
[9],
4812 mqe
->un
.mb_words
[10], mqe
->un
.mb_words
[11],
4813 mqe
->un
.mb_words
[12], mqe
->un
.mb_words
[13],
4814 mqe
->un
.mb_words
[14], mqe
->un
.mb_words
[15],
4815 mqe
->un
.mb_words
[16], mqe
->un
.mb_words
[50],
4817 mboxq
->mcqe
.mcqe_tag0
, mboxq
->mcqe
.mcqe_tag1
,
4818 mboxq
->mcqe
.trailer
);
4821 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4824 goto out_free_mboxq
;
4826 data_length
= mqe
->un
.mb_words
[5];
4827 if (data_length
> DMP_RGN23_SIZE
) {
4828 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4831 goto out_free_mboxq
;
4834 lpfc_parse_fcoe_conf(phba
, mp
->virt
, data_length
);
4835 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4840 mempool_free(mboxq
, phba
->mbox_mem_pool
);
4845 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4846 * @phba: pointer to lpfc hba data structure.
4847 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4848 * @vpd: pointer to the memory to hold resulting port vpd data.
4849 * @vpd_size: On input, the number of bytes allocated to @vpd.
4850 * On output, the number of data bytes in @vpd.
4852 * This routine executes a READ_REV SLI4 mailbox command. In
4853 * addition, this routine gets the port vpd data.
4857 * -ENOMEM - could not allocated memory.
4860 lpfc_sli4_read_rev(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
4861 uint8_t *vpd
, uint32_t *vpd_size
)
4865 struct lpfc_dmabuf
*dmabuf
;
4866 struct lpfc_mqe
*mqe
;
4868 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
4873 * Get a DMA buffer for the vpd data resulting from the READ_REV
4876 dma_size
= *vpd_size
;
4877 dmabuf
->virt
= dma_zalloc_coherent(&phba
->pcidev
->dev
, dma_size
,
4878 &dmabuf
->phys
, GFP_KERNEL
);
4879 if (!dmabuf
->virt
) {
4885 * The SLI4 implementation of READ_REV conflicts at word1,
4886 * bits 31:16 and SLI4 adds vpd functionality not present
4887 * in SLI3. This code corrects the conflicts.
4889 lpfc_read_rev(phba
, mboxq
);
4890 mqe
= &mboxq
->u
.mqe
;
4891 mqe
->un
.read_rev
.vpd_paddr_high
= putPaddrHigh(dmabuf
->phys
);
4892 mqe
->un
.read_rev
.vpd_paddr_low
= putPaddrLow(dmabuf
->phys
);
4893 mqe
->un
.read_rev
.word1
&= 0x0000FFFF;
4894 bf_set(lpfc_mbx_rd_rev_vpd
, &mqe
->un
.read_rev
, 1);
4895 bf_set(lpfc_mbx_rd_rev_avail_len
, &mqe
->un
.read_rev
, dma_size
);
4897 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
4899 dma_free_coherent(&phba
->pcidev
->dev
, dma_size
,
4900 dmabuf
->virt
, dmabuf
->phys
);
4906 * The available vpd length cannot be bigger than the
4907 * DMA buffer passed to the port. Catch the less than
4908 * case and update the caller's size.
4910 if (mqe
->un
.read_rev
.avail_vpd_len
< *vpd_size
)
4911 *vpd_size
= mqe
->un
.read_rev
.avail_vpd_len
;
4913 memcpy(vpd
, dmabuf
->virt
, *vpd_size
);
4915 dma_free_coherent(&phba
->pcidev
->dev
, dma_size
,
4916 dmabuf
->virt
, dmabuf
->phys
);
4922 * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
4923 * @phba: pointer to lpfc hba data structure.
4925 * This routine retrieves SLI4 device physical port name this PCI function
4930 * otherwise - failed to retrieve physical port name
4933 lpfc_sli4_retrieve_pport_name(struct lpfc_hba
*phba
)
4935 LPFC_MBOXQ_t
*mboxq
;
4936 struct lpfc_mbx_get_cntl_attributes
*mbx_cntl_attr
;
4937 struct lpfc_controller_attribute
*cntl_attr
;
4938 struct lpfc_mbx_get_port_name
*get_port_name
;
4939 void *virtaddr
= NULL
;
4940 uint32_t alloclen
, reqlen
;
4941 uint32_t shdr_status
, shdr_add_status
;
4942 union lpfc_sli4_cfg_shdr
*shdr
;
4943 char cport_name
= 0;
4946 /* We assume nothing at this point */
4947 phba
->sli4_hba
.lnk_info
.lnk_dv
= LPFC_LNK_DAT_INVAL
;
4948 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_NON
;
4950 mboxq
= (LPFC_MBOXQ_t
*)mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4953 /* obtain link type and link number via READ_CONFIG */
4954 phba
->sli4_hba
.lnk_info
.lnk_dv
= LPFC_LNK_DAT_INVAL
;
4955 lpfc_sli4_read_config(phba
);
4956 if (phba
->sli4_hba
.lnk_info
.lnk_dv
== LPFC_LNK_DAT_VAL
)
4957 goto retrieve_ppname
;
4959 /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
4960 reqlen
= sizeof(struct lpfc_mbx_get_cntl_attributes
);
4961 alloclen
= lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
4962 LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES
, reqlen
,
4963 LPFC_SLI4_MBX_NEMBED
);
4964 if (alloclen
< reqlen
) {
4965 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
4966 "3084 Allocated DMA memory size (%d) is "
4967 "less than the requested DMA memory size "
4968 "(%d)\n", alloclen
, reqlen
);
4970 goto out_free_mboxq
;
4972 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
4973 virtaddr
= mboxq
->sge_array
->addr
[0];
4974 mbx_cntl_attr
= (struct lpfc_mbx_get_cntl_attributes
*)virtaddr
;
4975 shdr
= &mbx_cntl_attr
->cfg_shdr
;
4976 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
4977 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
4978 if (shdr_status
|| shdr_add_status
|| rc
) {
4979 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
4980 "3085 Mailbox x%x (x%x/x%x) failed, "
4981 "rc:x%x, status:x%x, add_status:x%x\n",
4982 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
4983 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
4984 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
4985 rc
, shdr_status
, shdr_add_status
);
4987 goto out_free_mboxq
;
4989 cntl_attr
= &mbx_cntl_attr
->cntl_attr
;
4990 phba
->sli4_hba
.lnk_info
.lnk_dv
= LPFC_LNK_DAT_VAL
;
4991 phba
->sli4_hba
.lnk_info
.lnk_tp
=
4992 bf_get(lpfc_cntl_attr_lnk_type
, cntl_attr
);
4993 phba
->sli4_hba
.lnk_info
.lnk_no
=
4994 bf_get(lpfc_cntl_attr_lnk_numb
, cntl_attr
);
4995 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
4996 "3086 lnk_type:%d, lnk_numb:%d\n",
4997 phba
->sli4_hba
.lnk_info
.lnk_tp
,
4998 phba
->sli4_hba
.lnk_info
.lnk_no
);
5001 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_COMMON
,
5002 LPFC_MBOX_OPCODE_GET_PORT_NAME
,
5003 sizeof(struct lpfc_mbx_get_port_name
) -
5004 sizeof(struct lpfc_sli4_cfg_mhdr
),
5005 LPFC_SLI4_MBX_EMBED
);
5006 get_port_name
= &mboxq
->u
.mqe
.un
.get_port_name
;
5007 shdr
= (union lpfc_sli4_cfg_shdr
*)&get_port_name
->header
.cfg_shdr
;
5008 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
, LPFC_OPCODE_VERSION_1
);
5009 bf_set(lpfc_mbx_get_port_name_lnk_type
, &get_port_name
->u
.request
,
5010 phba
->sli4_hba
.lnk_info
.lnk_tp
);
5011 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
5012 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
5013 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
5014 if (shdr_status
|| shdr_add_status
|| rc
) {
5015 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
5016 "3087 Mailbox x%x (x%x/x%x) failed: "
5017 "rc:x%x, status:x%x, add_status:x%x\n",
5018 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
5019 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
5020 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
5021 rc
, shdr_status
, shdr_add_status
);
5023 goto out_free_mboxq
;
5025 switch (phba
->sli4_hba
.lnk_info
.lnk_no
) {
5026 case LPFC_LINK_NUMBER_0
:
5027 cport_name
= bf_get(lpfc_mbx_get_port_name_name0
,
5028 &get_port_name
->u
.response
);
5029 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_GET
;
5031 case LPFC_LINK_NUMBER_1
:
5032 cport_name
= bf_get(lpfc_mbx_get_port_name_name1
,
5033 &get_port_name
->u
.response
);
5034 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_GET
;
5036 case LPFC_LINK_NUMBER_2
:
5037 cport_name
= bf_get(lpfc_mbx_get_port_name_name2
,
5038 &get_port_name
->u
.response
);
5039 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_GET
;
5041 case LPFC_LINK_NUMBER_3
:
5042 cport_name
= bf_get(lpfc_mbx_get_port_name_name3
,
5043 &get_port_name
->u
.response
);
5044 phba
->sli4_hba
.pport_name_sta
= LPFC_SLI4_PPNAME_GET
;
5050 if (phba
->sli4_hba
.pport_name_sta
== LPFC_SLI4_PPNAME_GET
) {
5051 phba
->Port
[0] = cport_name
;
5052 phba
->Port
[1] = '\0';
5053 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5054 "3091 SLI get port name: %s\n", phba
->Port
);
5058 if (rc
!= MBX_TIMEOUT
) {
5059 if (bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
) == MBX_SLI4_CONFIG
)
5060 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
5062 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5068 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
5069 * @phba: pointer to lpfc hba data structure.
5071 * This routine is called to explicitly arm the SLI4 device's completion and
5075 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba
*phba
)
5079 lpfc_sli4_cq_release(phba
->sli4_hba
.mbx_cq
, LPFC_QUEUE_REARM
);
5080 lpfc_sli4_cq_release(phba
->sli4_hba
.els_cq
, LPFC_QUEUE_REARM
);
5082 if (phba
->sli4_hba
.fcp_cq
) {
5084 lpfc_sli4_cq_release(phba
->sli4_hba
.fcp_cq
[fcp_eqidx
],
5086 } while (++fcp_eqidx
< phba
->cfg_fcp_io_channel
);
5090 lpfc_sli4_cq_release(phba
->sli4_hba
.oas_cq
, LPFC_QUEUE_REARM
);
5092 if (phba
->sli4_hba
.hba_eq
) {
5093 for (fcp_eqidx
= 0; fcp_eqidx
< phba
->cfg_fcp_io_channel
;
5095 lpfc_sli4_eq_release(phba
->sli4_hba
.hba_eq
[fcp_eqidx
],
5100 lpfc_sli4_eq_release(phba
->sli4_hba
.fof_eq
, LPFC_QUEUE_REARM
);
5104 * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
5105 * @phba: Pointer to HBA context object.
5106 * @type: The resource extent type.
5107 * @extnt_count: buffer to hold port available extent count.
5108 * @extnt_size: buffer to hold element count per extent.
5110 * This function calls the port and retrievs the number of available
5111 * extents and their size for a particular extent type.
5113 * Returns: 0 if successful. Nonzero otherwise.
5116 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba
*phba
, uint16_t type
,
5117 uint16_t *extnt_count
, uint16_t *extnt_size
)
5122 struct lpfc_mbx_get_rsrc_extent_info
*rsrc_info
;
5125 mbox
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5129 /* Find out how many extents are available for this resource type */
5130 length
= (sizeof(struct lpfc_mbx_get_rsrc_extent_info
) -
5131 sizeof(struct lpfc_sli4_cfg_mhdr
));
5132 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
5133 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO
,
5134 length
, LPFC_SLI4_MBX_EMBED
);
5136 /* Send an extents count of 0 - the GET doesn't use it. */
5137 rc
= lpfc_sli4_mbox_rsrc_extent(phba
, mbox
, 0, type
,
5138 LPFC_SLI4_MBX_EMBED
);
5144 if (!phba
->sli4_hba
.intr_enable
)
5145 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
5147 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
5148 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
5155 rsrc_info
= &mbox
->u
.mqe
.un
.rsrc_extent_info
;
5156 if (bf_get(lpfc_mbox_hdr_status
,
5157 &rsrc_info
->header
.cfg_shdr
.response
)) {
5158 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_INIT
,
5159 "2930 Failed to get resource extents "
5160 "Status 0x%x Add'l Status 0x%x\n",
5161 bf_get(lpfc_mbox_hdr_status
,
5162 &rsrc_info
->header
.cfg_shdr
.response
),
5163 bf_get(lpfc_mbox_hdr_add_status
,
5164 &rsrc_info
->header
.cfg_shdr
.response
));
5169 *extnt_count
= bf_get(lpfc_mbx_get_rsrc_extent_info_cnt
,
5171 *extnt_size
= bf_get(lpfc_mbx_get_rsrc_extent_info_size
,
5174 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
5175 "3162 Retrieved extents type-%d from port: count:%d, "
5176 "size:%d\n", type
, *extnt_count
, *extnt_size
);
5179 mempool_free(mbox
, phba
->mbox_mem_pool
);
5184 * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
5185 * @phba: Pointer to HBA context object.
5186 * @type: The extent type to check.
5188 * This function reads the current available extents from the port and checks
5189 * if the extent count or extent size has changed since the last access.
5190 * Callers use this routine post port reset to understand if there is a
5191 * extent reprovisioning requirement.
5194 * -Error: error indicates problem.
5195 * 1: Extent count or size has changed.
5199 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba
*phba
, uint16_t type
)
5201 uint16_t curr_ext_cnt
, rsrc_ext_cnt
;
5202 uint16_t size_diff
, rsrc_ext_size
;
5204 struct lpfc_rsrc_blks
*rsrc_entry
;
5205 struct list_head
*rsrc_blk_list
= NULL
;
5209 rc
= lpfc_sli4_get_avail_extnt_rsrc(phba
, type
,
5216 case LPFC_RSC_TYPE_FCOE_RPI
:
5217 rsrc_blk_list
= &phba
->sli4_hba
.lpfc_rpi_blk_list
;
5219 case LPFC_RSC_TYPE_FCOE_VPI
:
5220 rsrc_blk_list
= &phba
->lpfc_vpi_blk_list
;
5222 case LPFC_RSC_TYPE_FCOE_XRI
:
5223 rsrc_blk_list
= &phba
->sli4_hba
.lpfc_xri_blk_list
;
5225 case LPFC_RSC_TYPE_FCOE_VFI
:
5226 rsrc_blk_list
= &phba
->sli4_hba
.lpfc_vfi_blk_list
;
5232 list_for_each_entry(rsrc_entry
, rsrc_blk_list
, list
) {
5234 if (rsrc_entry
->rsrc_size
!= rsrc_ext_size
)
5238 if (curr_ext_cnt
!= rsrc_ext_cnt
|| size_diff
!= 0)
5245 * lpfc_sli4_cfg_post_extnts -
5246 * @phba: Pointer to HBA context object.
5247 * @extnt_cnt - number of available extents.
5248 * @type - the extent type (rpi, xri, vfi, vpi).
5249 * @emb - buffer to hold either MBX_EMBED or MBX_NEMBED operation.
5250 * @mbox - pointer to the caller's allocated mailbox structure.
5252 * This function executes the extents allocation request. It also
5253 * takes care of the amount of memory needed to allocate or get the
5254 * allocated extents. It is the caller's responsibility to evaluate
5258 * -Error: Error value describes the condition found.
5262 lpfc_sli4_cfg_post_extnts(struct lpfc_hba
*phba
, uint16_t extnt_cnt
,
5263 uint16_t type
, bool *emb
, LPFC_MBOXQ_t
*mbox
)
5268 uint32_t alloc_len
, mbox_tmo
;
5270 /* Calculate the total requested length of the dma memory */
5271 req_len
= extnt_cnt
* sizeof(uint16_t);
5274 * Calculate the size of an embedded mailbox. The uint32_t
5275 * accounts for extents-specific word.
5277 emb_len
= sizeof(MAILBOX_t
) - sizeof(struct mbox_header
) -
5281 * Presume the allocation and response will fit into an embedded
5282 * mailbox. If not true, reconfigure to a non-embedded mailbox.
5284 *emb
= LPFC_SLI4_MBX_EMBED
;
5285 if (req_len
> emb_len
) {
5286 req_len
= extnt_cnt
* sizeof(uint16_t) +
5287 sizeof(union lpfc_sli4_cfg_shdr
) +
5289 *emb
= LPFC_SLI4_MBX_NEMBED
;
5292 alloc_len
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
5293 LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT
,
5295 if (alloc_len
< req_len
) {
5296 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
5297 "2982 Allocated DMA memory size (x%x) is "
5298 "less than the requested DMA memory "
5299 "size (x%x)\n", alloc_len
, req_len
);
5302 rc
= lpfc_sli4_mbox_rsrc_extent(phba
, mbox
, extnt_cnt
, type
, *emb
);
5306 if (!phba
->sli4_hba
.intr_enable
)
5307 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
5309 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
5310 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
5319 * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
5320 * @phba: Pointer to HBA context object.
5321 * @type: The resource extent type to allocate.
5323 * This function allocates the number of elements for the specified
5327 lpfc_sli4_alloc_extent(struct lpfc_hba
*phba
, uint16_t type
)
5330 uint16_t rsrc_id_cnt
, rsrc_cnt
, rsrc_size
;
5331 uint16_t rsrc_id
, rsrc_start
, j
, k
;
5334 unsigned long longs
;
5335 unsigned long *bmask
;
5336 struct lpfc_rsrc_blks
*rsrc_blks
;
5339 struct lpfc_id_range
*id_array
= NULL
;
5340 void *virtaddr
= NULL
;
5341 struct lpfc_mbx_nembed_rsrc_extent
*n_rsrc
;
5342 struct lpfc_mbx_alloc_rsrc_extents
*rsrc_ext
;
5343 struct list_head
*ext_blk_list
;
5345 rc
= lpfc_sli4_get_avail_extnt_rsrc(phba
, type
,
5351 if ((rsrc_cnt
== 0) || (rsrc_size
== 0)) {
5352 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_INIT
,
5353 "3009 No available Resource Extents "
5354 "for resource type 0x%x: Count: 0x%x, "
5355 "Size 0x%x\n", type
, rsrc_cnt
,
5360 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_INIT
| LOG_SLI
,
5361 "2903 Post resource extents type-0x%x: "
5362 "count:%d, size %d\n", type
, rsrc_cnt
, rsrc_size
);
5364 mbox
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5368 rc
= lpfc_sli4_cfg_post_extnts(phba
, rsrc_cnt
, type
, &emb
, mbox
);
5375 * Figure out where the response is located. Then get local pointers
5376 * to the response data. The port does not guarantee to respond to
5377 * all extents counts request so update the local variable with the
5378 * allocated count from the port.
5380 if (emb
== LPFC_SLI4_MBX_EMBED
) {
5381 rsrc_ext
= &mbox
->u
.mqe
.un
.alloc_rsrc_extents
;
5382 id_array
= &rsrc_ext
->u
.rsp
.id
[0];
5383 rsrc_cnt
= bf_get(lpfc_mbx_rsrc_cnt
, &rsrc_ext
->u
.rsp
);
5385 virtaddr
= mbox
->sge_array
->addr
[0];
5386 n_rsrc
= (struct lpfc_mbx_nembed_rsrc_extent
*) virtaddr
;
5387 rsrc_cnt
= bf_get(lpfc_mbx_rsrc_cnt
, n_rsrc
);
5388 id_array
= &n_rsrc
->id
;
5391 longs
= ((rsrc_cnt
* rsrc_size
) + BITS_PER_LONG
- 1) / BITS_PER_LONG
;
5392 rsrc_id_cnt
= rsrc_cnt
* rsrc_size
;
5395 * Based on the resource size and count, correct the base and max
5398 length
= sizeof(struct lpfc_rsrc_blks
);
5400 case LPFC_RSC_TYPE_FCOE_RPI
:
5401 phba
->sli4_hba
.rpi_bmask
= kzalloc(longs
*
5402 sizeof(unsigned long),
5404 if (unlikely(!phba
->sli4_hba
.rpi_bmask
)) {
5408 phba
->sli4_hba
.rpi_ids
= kzalloc(rsrc_id_cnt
*
5411 if (unlikely(!phba
->sli4_hba
.rpi_ids
)) {
5412 kfree(phba
->sli4_hba
.rpi_bmask
);
5418 * The next_rpi was initialized with the maximum available
5419 * count but the port may allocate a smaller number. Catch
5420 * that case and update the next_rpi.
5422 phba
->sli4_hba
.next_rpi
= rsrc_id_cnt
;
5424 /* Initialize local ptrs for common extent processing later. */
5425 bmask
= phba
->sli4_hba
.rpi_bmask
;
5426 ids
= phba
->sli4_hba
.rpi_ids
;
5427 ext_blk_list
= &phba
->sli4_hba
.lpfc_rpi_blk_list
;
5429 case LPFC_RSC_TYPE_FCOE_VPI
:
5430 phba
->vpi_bmask
= kzalloc(longs
*
5431 sizeof(unsigned long),
5433 if (unlikely(!phba
->vpi_bmask
)) {
5437 phba
->vpi_ids
= kzalloc(rsrc_id_cnt
*
5440 if (unlikely(!phba
->vpi_ids
)) {
5441 kfree(phba
->vpi_bmask
);
5446 /* Initialize local ptrs for common extent processing later. */
5447 bmask
= phba
->vpi_bmask
;
5448 ids
= phba
->vpi_ids
;
5449 ext_blk_list
= &phba
->lpfc_vpi_blk_list
;
5451 case LPFC_RSC_TYPE_FCOE_XRI
:
5452 phba
->sli4_hba
.xri_bmask
= kzalloc(longs
*
5453 sizeof(unsigned long),
5455 if (unlikely(!phba
->sli4_hba
.xri_bmask
)) {
5459 phba
->sli4_hba
.max_cfg_param
.xri_used
= 0;
5460 phba
->sli4_hba
.xri_ids
= kzalloc(rsrc_id_cnt
*
5463 if (unlikely(!phba
->sli4_hba
.xri_ids
)) {
5464 kfree(phba
->sli4_hba
.xri_bmask
);
5469 /* Initialize local ptrs for common extent processing later. */
5470 bmask
= phba
->sli4_hba
.xri_bmask
;
5471 ids
= phba
->sli4_hba
.xri_ids
;
5472 ext_blk_list
= &phba
->sli4_hba
.lpfc_xri_blk_list
;
5474 case LPFC_RSC_TYPE_FCOE_VFI
:
5475 phba
->sli4_hba
.vfi_bmask
= kzalloc(longs
*
5476 sizeof(unsigned long),
5478 if (unlikely(!phba
->sli4_hba
.vfi_bmask
)) {
5482 phba
->sli4_hba
.vfi_ids
= kzalloc(rsrc_id_cnt
*
5485 if (unlikely(!phba
->sli4_hba
.vfi_ids
)) {
5486 kfree(phba
->sli4_hba
.vfi_bmask
);
5491 /* Initialize local ptrs for common extent processing later. */
5492 bmask
= phba
->sli4_hba
.vfi_bmask
;
5493 ids
= phba
->sli4_hba
.vfi_ids
;
5494 ext_blk_list
= &phba
->sli4_hba
.lpfc_vfi_blk_list
;
5497 /* Unsupported Opcode. Fail call. */
5501 ext_blk_list
= NULL
;
5506 * Complete initializing the extent configuration with the
5507 * allocated ids assigned to this function. The bitmask serves
5508 * as an index into the array and manages the available ids. The
5509 * array just stores the ids communicated to the port via the wqes.
5511 for (i
= 0, j
= 0, k
= 0; i
< rsrc_cnt
; i
++) {
5513 rsrc_id
= bf_get(lpfc_mbx_rsrc_id_word4_0
,
5516 rsrc_id
= bf_get(lpfc_mbx_rsrc_id_word4_1
,
5519 rsrc_blks
= kzalloc(length
, GFP_KERNEL
);
5520 if (unlikely(!rsrc_blks
)) {
5526 rsrc_blks
->rsrc_start
= rsrc_id
;
5527 rsrc_blks
->rsrc_size
= rsrc_size
;
5528 list_add_tail(&rsrc_blks
->list
, ext_blk_list
);
5529 rsrc_start
= rsrc_id
;
5530 if ((type
== LPFC_RSC_TYPE_FCOE_XRI
) && (j
== 0))
5531 phba
->sli4_hba
.scsi_xri_start
= rsrc_start
+
5532 lpfc_sli4_get_els_iocb_cnt(phba
);
5534 while (rsrc_id
< (rsrc_start
+ rsrc_size
)) {
5539 /* Entire word processed. Get next word.*/
5544 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
5549 * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
5550 * @phba: Pointer to HBA context object.
5551 * @type: the extent's type.
5553 * This function deallocates all extents of a particular resource type.
5554 * SLI4 does not allow for deallocating a particular extent range. It
5555 * is the caller's responsibility to release all kernel memory resources.
5558 lpfc_sli4_dealloc_extent(struct lpfc_hba
*phba
, uint16_t type
)
5561 uint32_t length
, mbox_tmo
= 0;
5563 struct lpfc_mbx_dealloc_rsrc_extents
*dealloc_rsrc
;
5564 struct lpfc_rsrc_blks
*rsrc_blk
, *rsrc_blk_next
;
5566 mbox
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
5571 * This function sends an embedded mailbox because it only sends the
5572 * the resource type. All extents of this type are released by the
5575 length
= (sizeof(struct lpfc_mbx_dealloc_rsrc_extents
) -
5576 sizeof(struct lpfc_sli4_cfg_mhdr
));
5577 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
5578 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT
,
5579 length
, LPFC_SLI4_MBX_EMBED
);
5581 /* Send an extents count of 0 - the dealloc doesn't use it. */
5582 rc
= lpfc_sli4_mbox_rsrc_extent(phba
, mbox
, 0, type
,
5583 LPFC_SLI4_MBX_EMBED
);
5588 if (!phba
->sli4_hba
.intr_enable
)
5589 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
5591 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
5592 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
5599 dealloc_rsrc
= &mbox
->u
.mqe
.un
.dealloc_rsrc_extents
;
5600 if (bf_get(lpfc_mbox_hdr_status
,
5601 &dealloc_rsrc
->header
.cfg_shdr
.response
)) {
5602 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_INIT
,
5603 "2919 Failed to release resource extents "
5604 "for type %d - Status 0x%x Add'l Status 0x%x. "
5605 "Resource memory not released.\n",
5607 bf_get(lpfc_mbox_hdr_status
,
5608 &dealloc_rsrc
->header
.cfg_shdr
.response
),
5609 bf_get(lpfc_mbox_hdr_add_status
,
5610 &dealloc_rsrc
->header
.cfg_shdr
.response
));
5615 /* Release kernel memory resources for the specific type. */
5617 case LPFC_RSC_TYPE_FCOE_VPI
:
5618 kfree(phba
->vpi_bmask
);
5619 kfree(phba
->vpi_ids
);
5620 bf_set(lpfc_vpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
5621 list_for_each_entry_safe(rsrc_blk
, rsrc_blk_next
,
5622 &phba
->lpfc_vpi_blk_list
, list
) {
5623 list_del_init(&rsrc_blk
->list
);
5626 phba
->sli4_hba
.max_cfg_param
.vpi_used
= 0;
5628 case LPFC_RSC_TYPE_FCOE_XRI
:
5629 kfree(phba
->sli4_hba
.xri_bmask
);
5630 kfree(phba
->sli4_hba
.xri_ids
);
5631 list_for_each_entry_safe(rsrc_blk
, rsrc_blk_next
,
5632 &phba
->sli4_hba
.lpfc_xri_blk_list
, list
) {
5633 list_del_init(&rsrc_blk
->list
);
5637 case LPFC_RSC_TYPE_FCOE_VFI
:
5638 kfree(phba
->sli4_hba
.vfi_bmask
);
5639 kfree(phba
->sli4_hba
.vfi_ids
);
5640 bf_set(lpfc_vfi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
5641 list_for_each_entry_safe(rsrc_blk
, rsrc_blk_next
,
5642 &phba
->sli4_hba
.lpfc_vfi_blk_list
, list
) {
5643 list_del_init(&rsrc_blk
->list
);
5647 case LPFC_RSC_TYPE_FCOE_RPI
:
5648 /* RPI bitmask and physical id array are cleaned up earlier. */
5649 list_for_each_entry_safe(rsrc_blk
, rsrc_blk_next
,
5650 &phba
->sli4_hba
.lpfc_rpi_blk_list
, list
) {
5651 list_del_init(&rsrc_blk
->list
);
5659 bf_set(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
5662 mempool_free(mbox
, phba
->mbox_mem_pool
);
5667 * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
5668 * @phba: Pointer to HBA context object.
5670 * This function allocates all SLI4 resource identifiers.
5673 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba
*phba
)
5675 int i
, rc
, error
= 0;
5676 uint16_t count
, base
;
5677 unsigned long longs
;
5679 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
5680 phba
->sli4_hba
.next_rpi
= phba
->sli4_hba
.max_cfg_param
.max_rpi
;
5681 if (phba
->sli4_hba
.extents_in_use
) {
5683 * The port supports resource extents. The XRI, VPI, VFI, RPI
5684 * resource extent count must be read and allocated before
5685 * provisioning the resource id arrays.
5687 if (bf_get(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
) ==
5688 LPFC_IDX_RSRC_RDY
) {
5690 * Extent-based resources are set - the driver could
5691 * be in a port reset. Figure out if any corrective
5692 * actions need to be taken.
5694 rc
= lpfc_sli4_chk_avail_extnt_rsrc(phba
,
5695 LPFC_RSC_TYPE_FCOE_VFI
);
5698 rc
= lpfc_sli4_chk_avail_extnt_rsrc(phba
,
5699 LPFC_RSC_TYPE_FCOE_VPI
);
5702 rc
= lpfc_sli4_chk_avail_extnt_rsrc(phba
,
5703 LPFC_RSC_TYPE_FCOE_XRI
);
5706 rc
= lpfc_sli4_chk_avail_extnt_rsrc(phba
,
5707 LPFC_RSC_TYPE_FCOE_RPI
);
5712 * It's possible that the number of resources
5713 * provided to this port instance changed between
5714 * resets. Detect this condition and reallocate
5715 * resources. Otherwise, there is no action.
5718 lpfc_printf_log(phba
, KERN_INFO
,
5719 LOG_MBOX
| LOG_INIT
,
5720 "2931 Detected extent resource "
5721 "change. Reallocating all "
5723 rc
= lpfc_sli4_dealloc_extent(phba
,
5724 LPFC_RSC_TYPE_FCOE_VFI
);
5725 rc
= lpfc_sli4_dealloc_extent(phba
,
5726 LPFC_RSC_TYPE_FCOE_VPI
);
5727 rc
= lpfc_sli4_dealloc_extent(phba
,
5728 LPFC_RSC_TYPE_FCOE_XRI
);
5729 rc
= lpfc_sli4_dealloc_extent(phba
,
5730 LPFC_RSC_TYPE_FCOE_RPI
);
5735 rc
= lpfc_sli4_alloc_extent(phba
, LPFC_RSC_TYPE_FCOE_VFI
);
5739 rc
= lpfc_sli4_alloc_extent(phba
, LPFC_RSC_TYPE_FCOE_VPI
);
5743 rc
= lpfc_sli4_alloc_extent(phba
, LPFC_RSC_TYPE_FCOE_RPI
);
5747 rc
= lpfc_sli4_alloc_extent(phba
, LPFC_RSC_TYPE_FCOE_XRI
);
5750 bf_set(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
,
5755 * The port does not support resource extents. The XRI, VPI,
5756 * VFI, RPI resource ids were determined from READ_CONFIG.
5757 * Just allocate the bitmasks and provision the resource id
5758 * arrays. If a port reset is active, the resources don't
5759 * need any action - just exit.
5761 if (bf_get(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
) ==
5762 LPFC_IDX_RSRC_RDY
) {
5763 lpfc_sli4_dealloc_resource_identifiers(phba
);
5764 lpfc_sli4_remove_rpis(phba
);
5767 count
= phba
->sli4_hba
.max_cfg_param
.max_rpi
;
5769 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5770 "3279 Invalid provisioning of "
5775 base
= phba
->sli4_hba
.max_cfg_param
.rpi_base
;
5776 longs
= (count
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
5777 phba
->sli4_hba
.rpi_bmask
= kzalloc(longs
*
5778 sizeof(unsigned long),
5780 if (unlikely(!phba
->sli4_hba
.rpi_bmask
)) {
5784 phba
->sli4_hba
.rpi_ids
= kzalloc(count
*
5787 if (unlikely(!phba
->sli4_hba
.rpi_ids
)) {
5789 goto free_rpi_bmask
;
5792 for (i
= 0; i
< count
; i
++)
5793 phba
->sli4_hba
.rpi_ids
[i
] = base
+ i
;
5796 count
= phba
->sli4_hba
.max_cfg_param
.max_vpi
;
5798 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5799 "3280 Invalid provisioning of "
5804 base
= phba
->sli4_hba
.max_cfg_param
.vpi_base
;
5805 longs
= (count
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
5806 phba
->vpi_bmask
= kzalloc(longs
*
5807 sizeof(unsigned long),
5809 if (unlikely(!phba
->vpi_bmask
)) {
5813 phba
->vpi_ids
= kzalloc(count
*
5816 if (unlikely(!phba
->vpi_ids
)) {
5818 goto free_vpi_bmask
;
5821 for (i
= 0; i
< count
; i
++)
5822 phba
->vpi_ids
[i
] = base
+ i
;
5825 count
= phba
->sli4_hba
.max_cfg_param
.max_xri
;
5827 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5828 "3281 Invalid provisioning of "
5833 base
= phba
->sli4_hba
.max_cfg_param
.xri_base
;
5834 longs
= (count
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
5835 phba
->sli4_hba
.xri_bmask
= kzalloc(longs
*
5836 sizeof(unsigned long),
5838 if (unlikely(!phba
->sli4_hba
.xri_bmask
)) {
5842 phba
->sli4_hba
.max_cfg_param
.xri_used
= 0;
5843 phba
->sli4_hba
.xri_ids
= kzalloc(count
*
5846 if (unlikely(!phba
->sli4_hba
.xri_ids
)) {
5848 goto free_xri_bmask
;
5851 for (i
= 0; i
< count
; i
++)
5852 phba
->sli4_hba
.xri_ids
[i
] = base
+ i
;
5855 count
= phba
->sli4_hba
.max_cfg_param
.max_vfi
;
5857 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
5858 "3282 Invalid provisioning of "
5863 base
= phba
->sli4_hba
.max_cfg_param
.vfi_base
;
5864 longs
= (count
+ BITS_PER_LONG
- 1) / BITS_PER_LONG
;
5865 phba
->sli4_hba
.vfi_bmask
= kzalloc(longs
*
5866 sizeof(unsigned long),
5868 if (unlikely(!phba
->sli4_hba
.vfi_bmask
)) {
5872 phba
->sli4_hba
.vfi_ids
= kzalloc(count
*
5875 if (unlikely(!phba
->sli4_hba
.vfi_ids
)) {
5877 goto free_vfi_bmask
;
5880 for (i
= 0; i
< count
; i
++)
5881 phba
->sli4_hba
.vfi_ids
[i
] = base
+ i
;
5884 * Mark all resources ready. An HBA reset doesn't need
5885 * to reset the initialization.
5887 bf_set(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
,
5893 kfree(phba
->sli4_hba
.vfi_bmask
);
5894 phba
->sli4_hba
.vfi_bmask
= NULL
;
5896 kfree(phba
->sli4_hba
.xri_ids
);
5897 phba
->sli4_hba
.xri_ids
= NULL
;
5899 kfree(phba
->sli4_hba
.xri_bmask
);
5900 phba
->sli4_hba
.xri_bmask
= NULL
;
5902 kfree(phba
->vpi_ids
);
5903 phba
->vpi_ids
= NULL
;
5905 kfree(phba
->vpi_bmask
);
5906 phba
->vpi_bmask
= NULL
;
5908 kfree(phba
->sli4_hba
.rpi_ids
);
5909 phba
->sli4_hba
.rpi_ids
= NULL
;
5911 kfree(phba
->sli4_hba
.rpi_bmask
);
5912 phba
->sli4_hba
.rpi_bmask
= NULL
;
5918 * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
5919 * @phba: Pointer to HBA context object.
5921 * This function allocates the number of elements for the specified
5925 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba
*phba
)
5927 if (phba
->sli4_hba
.extents_in_use
) {
5928 lpfc_sli4_dealloc_extent(phba
, LPFC_RSC_TYPE_FCOE_VPI
);
5929 lpfc_sli4_dealloc_extent(phba
, LPFC_RSC_TYPE_FCOE_RPI
);
5930 lpfc_sli4_dealloc_extent(phba
, LPFC_RSC_TYPE_FCOE_XRI
);
5931 lpfc_sli4_dealloc_extent(phba
, LPFC_RSC_TYPE_FCOE_VFI
);
5933 kfree(phba
->vpi_bmask
);
5934 phba
->sli4_hba
.max_cfg_param
.vpi_used
= 0;
5935 kfree(phba
->vpi_ids
);
5936 bf_set(lpfc_vpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
5937 kfree(phba
->sli4_hba
.xri_bmask
);
5938 kfree(phba
->sli4_hba
.xri_ids
);
5939 kfree(phba
->sli4_hba
.vfi_bmask
);
5940 kfree(phba
->sli4_hba
.vfi_ids
);
5941 bf_set(lpfc_vfi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
5942 bf_set(lpfc_idx_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
5949 * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
5950 * @phba: Pointer to HBA context object.
5951 * @type: The resource extent type.
5952 * @extnt_count: buffer to hold port extent count response
5953 * @extnt_size: buffer to hold port extent size response.
5955 * This function calls the port to read the host allocated extents
5956 * for a particular type.
5959 lpfc_sli4_get_allocated_extnts(struct lpfc_hba
*phba
, uint16_t type
,
5960 uint16_t *extnt_cnt
, uint16_t *extnt_size
)
5964 uint16_t curr_blks
= 0;
5965 uint32_t req_len
, emb_len
;
5966 uint32_t alloc_len
, mbox_tmo
;
5967 struct list_head
*blk_list_head
;
5968 struct lpfc_rsrc_blks
*rsrc_blk
;
5970 void *virtaddr
= NULL
;
5971 struct lpfc_mbx_nembed_rsrc_extent
*n_rsrc
;
5972 struct lpfc_mbx_alloc_rsrc_extents
*rsrc_ext
;
5973 union lpfc_sli4_cfg_shdr
*shdr
;
5976 case LPFC_RSC_TYPE_FCOE_VPI
:
5977 blk_list_head
= &phba
->lpfc_vpi_blk_list
;
5979 case LPFC_RSC_TYPE_FCOE_XRI
:
5980 blk_list_head
= &phba
->sli4_hba
.lpfc_xri_blk_list
;
5982 case LPFC_RSC_TYPE_FCOE_VFI
:
5983 blk_list_head
= &phba
->sli4_hba
.lpfc_vfi_blk_list
;
5985 case LPFC_RSC_TYPE_FCOE_RPI
:
5986 blk_list_head
= &phba
->sli4_hba
.lpfc_rpi_blk_list
;
5992 /* Count the number of extents currently allocatd for this type. */
5993 list_for_each_entry(rsrc_blk
, blk_list_head
, list
) {
5994 if (curr_blks
== 0) {
5996 * The GET_ALLOCATED mailbox does not return the size,
5997 * just the count. The size should be just the size
5998 * stored in the current allocated block and all sizes
5999 * for an extent type are the same so set the return
6002 *extnt_size
= rsrc_blk
->rsrc_size
;
6008 * Calculate the size of an embedded mailbox. The uint32_t
6009 * accounts for extents-specific word.
6011 emb_len
= sizeof(MAILBOX_t
) - sizeof(struct mbox_header
) -
6015 * Presume the allocation and response will fit into an embedded
6016 * mailbox. If not true, reconfigure to a non-embedded mailbox.
6018 emb
= LPFC_SLI4_MBX_EMBED
;
6020 if (req_len
> emb_len
) {
6021 req_len
= curr_blks
* sizeof(uint16_t) +
6022 sizeof(union lpfc_sli4_cfg_shdr
) +
6024 emb
= LPFC_SLI4_MBX_NEMBED
;
6027 mbox
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6030 memset(mbox
, 0, sizeof(LPFC_MBOXQ_t
));
6032 alloc_len
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
6033 LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT
,
6035 if (alloc_len
< req_len
) {
6036 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6037 "2983 Allocated DMA memory size (x%x) is "
6038 "less than the requested DMA memory "
6039 "size (x%x)\n", alloc_len
, req_len
);
6043 rc
= lpfc_sli4_mbox_rsrc_extent(phba
, mbox
, curr_blks
, type
, emb
);
6049 if (!phba
->sli4_hba
.intr_enable
)
6050 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
6052 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
6053 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
6062 * Figure out where the response is located. Then get local pointers
6063 * to the response data. The port does not guarantee to respond to
6064 * all extents counts request so update the local variable with the
6065 * allocated count from the port.
6067 if (emb
== LPFC_SLI4_MBX_EMBED
) {
6068 rsrc_ext
= &mbox
->u
.mqe
.un
.alloc_rsrc_extents
;
6069 shdr
= &rsrc_ext
->header
.cfg_shdr
;
6070 *extnt_cnt
= bf_get(lpfc_mbx_rsrc_cnt
, &rsrc_ext
->u
.rsp
);
6072 virtaddr
= mbox
->sge_array
->addr
[0];
6073 n_rsrc
= (struct lpfc_mbx_nembed_rsrc_extent
*) virtaddr
;
6074 shdr
= &n_rsrc
->cfg_shdr
;
6075 *extnt_cnt
= bf_get(lpfc_mbx_rsrc_cnt
, n_rsrc
);
6078 if (bf_get(lpfc_mbox_hdr_status
, &shdr
->response
)) {
6079 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_INIT
,
6080 "2984 Failed to read allocated resources "
6081 "for type %d - Status 0x%x Add'l Status 0x%x.\n",
6083 bf_get(lpfc_mbox_hdr_status
, &shdr
->response
),
6084 bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
));
6089 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
6094 * lpfc_sli4_repost_els_sgl_list - Repsot the els buffers sgl pages as block
6095 * @phba: pointer to lpfc hba data structure.
6097 * This routine walks the list of els buffers that have been allocated and
6098 * repost them to the port by using SGL block post. This is needed after a
6099 * pci_function_reset/warm_start or start. It attempts to construct blocks
6100 * of els buffer sgls which contains contiguous xris and uses the non-embedded
6101 * SGL block post mailbox commands to post them to the port. For single els
6102 * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
6103 * mailbox command for posting.
6105 * Returns: 0 = success, non-zero failure.
6108 lpfc_sli4_repost_els_sgl_list(struct lpfc_hba
*phba
)
6110 struct lpfc_sglq
*sglq_entry
= NULL
;
6111 struct lpfc_sglq
*sglq_entry_next
= NULL
;
6112 struct lpfc_sglq
*sglq_entry_first
= NULL
;
6113 int status
, total_cnt
, post_cnt
= 0, num_posted
= 0, block_cnt
= 0;
6114 int last_xritag
= NO_XRI
;
6115 struct lpfc_sli_ring
*pring
;
6116 LIST_HEAD(prep_sgl_list
);
6117 LIST_HEAD(blck_sgl_list
);
6118 LIST_HEAD(allc_sgl_list
);
6119 LIST_HEAD(post_sgl_list
);
6120 LIST_HEAD(free_sgl_list
);
6122 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
6123 spin_lock_irq(&phba
->hbalock
);
6124 spin_lock(&pring
->ring_lock
);
6125 list_splice_init(&phba
->sli4_hba
.lpfc_sgl_list
, &allc_sgl_list
);
6126 spin_unlock(&pring
->ring_lock
);
6127 spin_unlock_irq(&phba
->hbalock
);
6129 total_cnt
= phba
->sli4_hba
.els_xri_cnt
;
6130 list_for_each_entry_safe(sglq_entry
, sglq_entry_next
,
6131 &allc_sgl_list
, list
) {
6132 list_del_init(&sglq_entry
->list
);
6134 if ((last_xritag
!= NO_XRI
) &&
6135 (sglq_entry
->sli4_xritag
!= last_xritag
+ 1)) {
6136 /* a hole in xri block, form a sgl posting block */
6137 list_splice_init(&prep_sgl_list
, &blck_sgl_list
);
6138 post_cnt
= block_cnt
- 1;
6139 /* prepare list for next posting block */
6140 list_add_tail(&sglq_entry
->list
, &prep_sgl_list
);
6143 /* prepare list for next posting block */
6144 list_add_tail(&sglq_entry
->list
, &prep_sgl_list
);
6145 /* enough sgls for non-embed sgl mbox command */
6146 if (block_cnt
== LPFC_NEMBED_MBOX_SGL_CNT
) {
6147 list_splice_init(&prep_sgl_list
,
6149 post_cnt
= block_cnt
;
6155 /* keep track of last sgl's xritag */
6156 last_xritag
= sglq_entry
->sli4_xritag
;
6158 /* end of repost sgl list condition for els buffers */
6159 if (num_posted
== phba
->sli4_hba
.els_xri_cnt
) {
6160 if (post_cnt
== 0) {
6161 list_splice_init(&prep_sgl_list
,
6163 post_cnt
= block_cnt
;
6164 } else if (block_cnt
== 1) {
6165 status
= lpfc_sli4_post_sgl(phba
,
6166 sglq_entry
->phys
, 0,
6167 sglq_entry
->sli4_xritag
);
6169 /* successful, put sgl to posted list */
6170 list_add_tail(&sglq_entry
->list
,
6173 /* Failure, put sgl to free list */
6174 lpfc_printf_log(phba
, KERN_WARNING
,
6176 "3159 Failed to post els "
6177 "sgl, xritag:x%x\n",
6178 sglq_entry
->sli4_xritag
);
6179 list_add_tail(&sglq_entry
->list
,
6186 /* continue until a nembed page worth of sgls */
6190 /* post the els buffer list sgls as a block */
6191 status
= lpfc_sli4_post_els_sgl_list(phba
, &blck_sgl_list
,
6195 /* success, put sgl list to posted sgl list */
6196 list_splice_init(&blck_sgl_list
, &post_sgl_list
);
6198 /* Failure, put sgl list to free sgl list */
6199 sglq_entry_first
= list_first_entry(&blck_sgl_list
,
6202 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
6203 "3160 Failed to post els sgl-list, "
6205 sglq_entry_first
->sli4_xritag
,
6206 (sglq_entry_first
->sli4_xritag
+
6208 list_splice_init(&blck_sgl_list
, &free_sgl_list
);
6209 total_cnt
-= post_cnt
;
6212 /* don't reset xirtag due to hole in xri block */
6214 last_xritag
= NO_XRI
;
6216 /* reset els sgl post count for next round of posting */
6219 /* update the number of XRIs posted for ELS */
6220 phba
->sli4_hba
.els_xri_cnt
= total_cnt
;
6222 /* free the els sgls failed to post */
6223 lpfc_free_sgl_list(phba
, &free_sgl_list
);
6225 /* push els sgls posted to the availble list */
6226 if (!list_empty(&post_sgl_list
)) {
6227 spin_lock_irq(&phba
->hbalock
);
6228 spin_lock(&pring
->ring_lock
);
6229 list_splice_init(&post_sgl_list
,
6230 &phba
->sli4_hba
.lpfc_sgl_list
);
6231 spin_unlock(&pring
->ring_lock
);
6232 spin_unlock_irq(&phba
->hbalock
);
6234 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6235 "3161 Failure to post els sgl to port.\n");
6242 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
6243 * @phba: Pointer to HBA context object.
6245 * This function is the main SLI4 device intialization PCI function. This
6246 * function is called by the HBA intialization code, HBA reset code and
6247 * HBA error attention handler code. Caller is not required to hold any
6251 lpfc_sli4_hba_setup(struct lpfc_hba
*phba
)
6254 LPFC_MBOXQ_t
*mboxq
;
6255 struct lpfc_mqe
*mqe
;
6258 uint32_t ftr_rsp
= 0;
6259 struct Scsi_Host
*shost
= lpfc_shost_from_vport(phba
->pport
);
6260 struct lpfc_vport
*vport
= phba
->pport
;
6261 struct lpfc_dmabuf
*mp
;
6263 /* Perform a PCI function reset to start from clean */
6264 rc
= lpfc_pci_function_reset(phba
);
6268 /* Check the HBA Host Status Register for readyness */
6269 rc
= lpfc_sli4_post_status_check(phba
);
6273 spin_lock_irq(&phba
->hbalock
);
6274 phba
->sli
.sli_flag
|= LPFC_SLI_ACTIVE
;
6275 spin_unlock_irq(&phba
->hbalock
);
6279 * Allocate a single mailbox container for initializing the
6282 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
6286 /* Issue READ_REV to collect vpd and FW information. */
6287 vpd_size
= SLI4_PAGE_SIZE
;
6288 vpd
= kzalloc(vpd_size
, GFP_KERNEL
);
6294 rc
= lpfc_sli4_read_rev(phba
, mboxq
, vpd
, &vpd_size
);
6300 mqe
= &mboxq
->u
.mqe
;
6301 phba
->sli_rev
= bf_get(lpfc_mbx_rd_rev_sli_lvl
, &mqe
->un
.read_rev
);
6302 if (bf_get(lpfc_mbx_rd_rev_fcoe
, &mqe
->un
.read_rev
))
6303 phba
->hba_flag
|= HBA_FCOE_MODE
;
6305 phba
->hba_flag
&= ~HBA_FCOE_MODE
;
6307 if (bf_get(lpfc_mbx_rd_rev_cee_ver
, &mqe
->un
.read_rev
) ==
6309 phba
->hba_flag
|= HBA_FIP_SUPPORT
;
6311 phba
->hba_flag
&= ~HBA_FIP_SUPPORT
;
6313 phba
->hba_flag
&= ~HBA_FCP_IOQ_FLUSH
;
6315 if (phba
->sli_rev
!= LPFC_SLI_REV4
) {
6316 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6317 "0376 READ_REV Error. SLI Level %d "
6318 "FCoE enabled %d\n",
6319 phba
->sli_rev
, phba
->hba_flag
& HBA_FCOE_MODE
);
6326 * Continue initialization with default values even if driver failed
6327 * to read FCoE param config regions, only read parameters if the
6330 if (phba
->hba_flag
& HBA_FCOE_MODE
&&
6331 lpfc_sli4_read_fcoe_params(phba
))
6332 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_INIT
,
6333 "2570 Failed to read FCoE parameters\n");
6336 * Retrieve sli4 device physical port name, failure of doing it
6337 * is considered as non-fatal.
6339 rc
= lpfc_sli4_retrieve_pport_name(phba
);
6341 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
6342 "3080 Successful retrieving SLI4 device "
6343 "physical port name: %s.\n", phba
->Port
);
6346 * Evaluate the read rev and vpd data. Populate the driver
6347 * state with the results. If this routine fails, the failure
6348 * is not fatal as the driver will use generic values.
6350 rc
= lpfc_parse_vpd(phba
, vpd
, vpd_size
);
6351 if (unlikely(!rc
)) {
6352 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6353 "0377 Error %d parsing vpd. "
6354 "Using defaults.\n", rc
);
6359 /* Save information as VPD data */
6360 phba
->vpd
.rev
.biuRev
= mqe
->un
.read_rev
.first_hw_rev
;
6361 phba
->vpd
.rev
.smRev
= mqe
->un
.read_rev
.second_hw_rev
;
6362 phba
->vpd
.rev
.endecRev
= mqe
->un
.read_rev
.third_hw_rev
;
6363 phba
->vpd
.rev
.fcphHigh
= bf_get(lpfc_mbx_rd_rev_fcph_high
,
6365 phba
->vpd
.rev
.fcphLow
= bf_get(lpfc_mbx_rd_rev_fcph_low
,
6367 phba
->vpd
.rev
.feaLevelHigh
= bf_get(lpfc_mbx_rd_rev_ftr_lvl_high
,
6369 phba
->vpd
.rev
.feaLevelLow
= bf_get(lpfc_mbx_rd_rev_ftr_lvl_low
,
6371 phba
->vpd
.rev
.sli1FwRev
= mqe
->un
.read_rev
.fw_id_rev
;
6372 memcpy(phba
->vpd
.rev
.sli1FwName
, mqe
->un
.read_rev
.fw_name
, 16);
6373 phba
->vpd
.rev
.sli2FwRev
= mqe
->un
.read_rev
.ulp_fw_id_rev
;
6374 memcpy(phba
->vpd
.rev
.sli2FwName
, mqe
->un
.read_rev
.ulp_fw_name
, 16);
6375 phba
->vpd
.rev
.opFwRev
= mqe
->un
.read_rev
.fw_id_rev
;
6376 memcpy(phba
->vpd
.rev
.opFwName
, mqe
->un
.read_rev
.fw_name
, 16);
6377 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
6378 "(%d):0380 READ_REV Status x%x "
6379 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
6380 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
6381 bf_get(lpfc_mqe_status
, mqe
),
6382 phba
->vpd
.rev
.opFwName
,
6383 phba
->vpd
.rev
.fcphHigh
, phba
->vpd
.rev
.fcphLow
,
6384 phba
->vpd
.rev
.feaLevelHigh
, phba
->vpd
.rev
.feaLevelLow
);
6386 /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
6387 rc
= (phba
->sli4_hba
.max_cfg_param
.max_xri
>> 3);
6388 if (phba
->pport
->cfg_lun_queue_depth
> rc
) {
6389 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
6390 "3362 LUN queue depth changed from %d to %d\n",
6391 phba
->pport
->cfg_lun_queue_depth
, rc
);
6392 phba
->pport
->cfg_lun_queue_depth
= rc
;
6397 * Discover the port's supported feature set and match it against the
6400 lpfc_request_features(phba
, mboxq
);
6401 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
6408 * The port must support FCP initiator mode as this is the
6409 * only mode running in the host.
6411 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi
, &mqe
->un
.req_ftrs
))) {
6412 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
6413 "0378 No support for fcpi mode.\n");
6416 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh
, &mqe
->un
.req_ftrs
))
6417 phba
->sli3_options
|= LPFC_SLI4_PERFH_ENABLED
;
6419 phba
->sli3_options
&= ~LPFC_SLI4_PERFH_ENABLED
;
6421 * If the port cannot support the host's requested features
6422 * then turn off the global config parameters to disable the
6423 * feature in the driver. This is not a fatal error.
6425 phba
->sli3_options
&= ~LPFC_SLI3_BG_ENABLED
;
6426 if (phba
->cfg_enable_bg
) {
6427 if (bf_get(lpfc_mbx_rq_ftr_rsp_dif
, &mqe
->un
.req_ftrs
))
6428 phba
->sli3_options
|= LPFC_SLI3_BG_ENABLED
;
6433 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
6434 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv
, &mqe
->un
.req_ftrs
)))
6438 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
6439 "0379 Feature Mismatch Data: x%08x %08x "
6440 "x%x x%x x%x\n", mqe
->un
.req_ftrs
.word2
,
6441 mqe
->un
.req_ftrs
.word3
, phba
->cfg_enable_bg
,
6442 phba
->cfg_enable_npiv
, phba
->max_vpi
);
6443 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif
, &mqe
->un
.req_ftrs
)))
6444 phba
->cfg_enable_bg
= 0;
6445 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv
, &mqe
->un
.req_ftrs
)))
6446 phba
->cfg_enable_npiv
= 0;
6449 /* These SLI3 features are assumed in SLI4 */
6450 spin_lock_irq(&phba
->hbalock
);
6451 phba
->sli3_options
|= (LPFC_SLI3_NPIV_ENABLED
| LPFC_SLI3_HBQ_ENABLED
);
6452 spin_unlock_irq(&phba
->hbalock
);
6455 * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent
6456 * calls depends on these resources to complete port setup.
6458 rc
= lpfc_sli4_alloc_resource_identifiers(phba
);
6460 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6461 "2920 Failed to alloc Resource IDs "
6466 /* Read the port's service parameters. */
6467 rc
= lpfc_read_sparam(phba
, mboxq
, vport
->vpi
);
6469 phba
->link_state
= LPFC_HBA_ERROR
;
6474 mboxq
->vport
= vport
;
6475 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
6476 mp
= (struct lpfc_dmabuf
*) mboxq
->context1
;
6477 if (rc
== MBX_SUCCESS
) {
6478 memcpy(&vport
->fc_sparam
, mp
->virt
, sizeof(struct serv_parm
));
6483 * This memory was allocated by the lpfc_read_sparam routine. Release
6484 * it to the mbuf pool.
6486 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
6488 mboxq
->context1
= NULL
;
6490 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6491 "0382 READ_SPARAM command failed "
6492 "status %d, mbxStatus x%x\n",
6493 rc
, bf_get(lpfc_mqe_status
, mqe
));
6494 phba
->link_state
= LPFC_HBA_ERROR
;
6499 lpfc_update_vport_wwn(vport
);
6501 /* Update the fc_host data structures with new wwn. */
6502 fc_host_node_name(shost
) = wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
6503 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
6505 /* update host els and scsi xri-sgl sizes and mappings */
6506 rc
= lpfc_sli4_xri_sgl_update(phba
);
6508 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6509 "1400 Failed to update xri-sgl size and "
6510 "mapping: %d\n", rc
);
6514 /* register the els sgl pool to the port */
6515 rc
= lpfc_sli4_repost_els_sgl_list(phba
);
6517 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6518 "0582 Error %d during els sgl post "
6524 /* register the allocated scsi sgl pool to the port */
6525 rc
= lpfc_sli4_repost_scsi_sgl_list(phba
);
6527 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6528 "0383 Error %d during scsi sgl post "
6530 /* Some Scsi buffers were moved to the abort scsi list */
6531 /* A pci function reset will repost them */
6536 /* Post the rpi header region to the device. */
6537 rc
= lpfc_sli4_post_all_rpi_hdrs(phba
);
6539 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6540 "0393 Error %d during rpi post operation\n",
6545 lpfc_sli4_node_prep(phba
);
6547 /* Create all the SLI4 queues */
6548 rc
= lpfc_sli4_queue_create(phba
);
6550 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6551 "3089 Failed to allocate queues\n");
6553 goto out_stop_timers
;
6555 /* Set up all the queues to the device */
6556 rc
= lpfc_sli4_queue_setup(phba
);
6558 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6559 "0381 Error %d during queue setup.\n ", rc
);
6560 goto out_destroy_queue
;
6563 /* Arm the CQs and then EQs on device */
6564 lpfc_sli4_arm_cqeq_intr(phba
);
6566 /* Indicate device interrupt mode */
6567 phba
->sli4_hba
.intr_enable
= 1;
6569 /* Allow asynchronous mailbox command to go through */
6570 spin_lock_irq(&phba
->hbalock
);
6571 phba
->sli
.sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
6572 spin_unlock_irq(&phba
->hbalock
);
6574 /* Post receive buffers to the device */
6575 lpfc_sli4_rb_setup(phba
);
6577 /* Reset HBA FCF states after HBA reset */
6578 phba
->fcf
.fcf_flag
= 0;
6579 phba
->fcf
.current_rec
.flag
= 0;
6581 /* Start the ELS watchdog timer */
6582 mod_timer(&vport
->els_tmofunc
,
6583 jiffies
+ msecs_to_jiffies(1000 * (phba
->fc_ratov
* 2)));
6585 /* Start heart beat timer */
6586 mod_timer(&phba
->hb_tmofunc
,
6587 jiffies
+ msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL
));
6588 phba
->hb_outstanding
= 0;
6589 phba
->last_completion_time
= jiffies
;
6591 /* Start error attention (ERATT) polling timer */
6592 mod_timer(&phba
->eratt_poll
,
6593 jiffies
+ msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL
));
6595 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
6596 if (phba
->cfg_aer_support
== 1 && !(phba
->hba_flag
& HBA_AER_ENABLED
)) {
6597 rc
= pci_enable_pcie_error_reporting(phba
->pcidev
);
6599 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
6600 "2829 This device supports "
6601 "Advanced Error Reporting (AER)\n");
6602 spin_lock_irq(&phba
->hbalock
);
6603 phba
->hba_flag
|= HBA_AER_ENABLED
;
6604 spin_unlock_irq(&phba
->hbalock
);
6606 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
6607 "2830 This device does not support "
6608 "Advanced Error Reporting (AER)\n");
6609 phba
->cfg_aer_support
= 0;
6614 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
6616 * The FC Port needs to register FCFI (index 0)
6618 lpfc_reg_fcfi(phba
, mboxq
);
6619 mboxq
->vport
= phba
->pport
;
6620 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
6621 if (rc
!= MBX_SUCCESS
)
6622 goto out_unset_queue
;
6624 phba
->fcf
.fcfi
= bf_get(lpfc_reg_fcfi_fcfi
,
6625 &mboxq
->u
.mqe
.un
.reg_fcfi
);
6627 /* Check if the port is configured to be disabled */
6628 lpfc_sli_read_link_ste(phba
);
6632 * The port is ready, set the host's link state to LINK_DOWN
6633 * in preparation for link interrupts.
6635 spin_lock_irq(&phba
->hbalock
);
6636 phba
->link_state
= LPFC_LINK_DOWN
;
6637 spin_unlock_irq(&phba
->hbalock
);
6638 if (!(phba
->hba_flag
& HBA_FCOE_MODE
) &&
6639 (phba
->hba_flag
& LINK_DISABLED
)) {
6640 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_SLI
,
6641 "3103 Adapter Link is disabled.\n");
6642 lpfc_down_link(phba
, mboxq
);
6643 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
6644 if (rc
!= MBX_SUCCESS
) {
6645 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_SLI
,
6646 "3104 Adapter failed to issue "
6647 "DOWN_LINK mbox cmd, rc:x%x\n", rc
);
6648 goto out_unset_queue
;
6650 } else if (phba
->cfg_suppress_link_up
== LPFC_INITIALIZE_LINK
) {
6651 /* don't perform init_link on SLI4 FC port loopback test */
6652 if (!(phba
->link_flag
& LS_LOOPBACK_MODE
)) {
6653 rc
= phba
->lpfc_hba_init_link(phba
, MBX_NOWAIT
);
6655 goto out_unset_queue
;
6658 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6661 /* Unset all the queues set up in this routine when error out */
6662 lpfc_sli4_queue_unset(phba
);
6664 lpfc_sli4_queue_destroy(phba
);
6666 lpfc_stop_hba_timers(phba
);
6668 mempool_free(mboxq
, phba
->mbox_mem_pool
);
6673 * lpfc_mbox_timeout - Timeout call back function for mbox timer
6674 * @ptr: context object - pointer to hba structure.
6676 * This is the callback function for mailbox timer. The mailbox
6677 * timer is armed when a new mailbox command is issued and the timer
6678 * is deleted when the mailbox complete. The function is called by
6679 * the kernel timer code when a mailbox does not complete within
6680 * expected time. This function wakes up the worker thread to
6681 * process the mailbox timeout and returns. All the processing is
6682 * done by the worker thread function lpfc_mbox_timeout_handler.
6685 lpfc_mbox_timeout(unsigned long ptr
)
6687 struct lpfc_hba
*phba
= (struct lpfc_hba
*) ptr
;
6688 unsigned long iflag
;
6689 uint32_t tmo_posted
;
6691 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflag
);
6692 tmo_posted
= phba
->pport
->work_port_events
& WORKER_MBOX_TMO
;
6694 phba
->pport
->work_port_events
|= WORKER_MBOX_TMO
;
6695 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflag
);
6698 lpfc_worker_wake_up(phba
);
6703 * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
6705 * @phba: Pointer to HBA context object.
6707 * This function checks if any mailbox completions are present on the mailbox
6711 lpfc_sli4_mbox_completions_pending(struct lpfc_hba
*phba
)
6715 struct lpfc_queue
*mcq
;
6716 struct lpfc_mcqe
*mcqe
;
6717 bool pending_completions
= false;
6719 if (unlikely(!phba
) || (phba
->sli_rev
!= LPFC_SLI_REV4
))
6722 /* Check for completions on mailbox completion queue */
6724 mcq
= phba
->sli4_hba
.mbx_cq
;
6725 idx
= mcq
->hba_index
;
6726 while (bf_get_le32(lpfc_cqe_valid
, mcq
->qe
[idx
].cqe
)) {
6727 mcqe
= (struct lpfc_mcqe
*)mcq
->qe
[idx
].cqe
;
6728 if (bf_get_le32(lpfc_trailer_completed
, mcqe
) &&
6729 (!bf_get_le32(lpfc_trailer_async
, mcqe
))) {
6730 pending_completions
= true;
6733 idx
= (idx
+ 1) % mcq
->entry_count
;
6734 if (mcq
->hba_index
== idx
)
6737 return pending_completions
;
6742 * lpfc_sli4_process_missed_mbox_completions - process mbox completions
6744 * @phba: Pointer to HBA context object.
6746 * For sli4, it is possible to miss an interrupt. As such mbox completions
6747 * maybe missed causing erroneous mailbox timeouts to occur. This function
6748 * checks to see if mbox completions are on the mailbox completion queue
6749 * and will process all the completions associated with the eq for the
6750 * mailbox completion queue.
6753 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba
*phba
)
6757 struct lpfc_queue
*fpeq
= NULL
;
6758 struct lpfc_eqe
*eqe
;
6761 if (unlikely(!phba
) || (phba
->sli_rev
!= LPFC_SLI_REV4
))
6764 /* Find the eq associated with the mcq */
6766 if (phba
->sli4_hba
.hba_eq
)
6767 for (eqidx
= 0; eqidx
< phba
->cfg_fcp_io_channel
; eqidx
++)
6768 if (phba
->sli4_hba
.hba_eq
[eqidx
]->queue_id
==
6769 phba
->sli4_hba
.mbx_cq
->assoc_qid
) {
6770 fpeq
= phba
->sli4_hba
.hba_eq
[eqidx
];
6776 /* Turn off interrupts from this EQ */
6778 lpfc_sli4_eq_clr_intr(fpeq
);
6780 /* Check to see if a mbox completion is pending */
6782 mbox_pending
= lpfc_sli4_mbox_completions_pending(phba
);
6785 * If a mbox completion is pending, process all the events on EQ
6786 * associated with the mbox completion queue (this could include
6787 * mailbox commands, async events, els commands, receive queue data
6792 while ((eqe
= lpfc_sli4_eq_get(fpeq
))) {
6793 lpfc_sli4_hba_handle_eqe(phba
, eqe
, eqidx
);
6794 fpeq
->EQ_processed
++;
6797 /* Always clear and re-arm the EQ */
6799 lpfc_sli4_eq_release(fpeq
, LPFC_QUEUE_REARM
);
6801 return mbox_pending
;
6806 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
6807 * @phba: Pointer to HBA context object.
6809 * This function is called from worker thread when a mailbox command times out.
6810 * The caller is not required to hold any locks. This function will reset the
6811 * HBA and recover all the pending commands.
6814 lpfc_mbox_timeout_handler(struct lpfc_hba
*phba
)
6816 LPFC_MBOXQ_t
*pmbox
= phba
->sli
.mbox_active
;
6817 MAILBOX_t
*mb
= NULL
;
6819 struct lpfc_sli
*psli
= &phba
->sli
;
6821 /* If the mailbox completed, process the completion and return */
6822 if (lpfc_sli4_process_missed_mbox_completions(phba
))
6827 /* Check the pmbox pointer first. There is a race condition
6828 * between the mbox timeout handler getting executed in the
6829 * worklist and the mailbox actually completing. When this
6830 * race condition occurs, the mbox_active will be NULL.
6832 spin_lock_irq(&phba
->hbalock
);
6833 if (pmbox
== NULL
) {
6834 lpfc_printf_log(phba
, KERN_WARNING
,
6836 "0353 Active Mailbox cleared - mailbox timeout "
6838 spin_unlock_irq(&phba
->hbalock
);
6842 /* Mbox cmd <mbxCommand> timeout */
6843 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6844 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
6846 phba
->pport
->port_state
,
6848 phba
->sli
.mbox_active
);
6849 spin_unlock_irq(&phba
->hbalock
);
6851 /* Setting state unknown so lpfc_sli_abort_iocb_ring
6852 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
6853 * it to fail all outstanding SCSI IO.
6855 spin_lock_irq(&phba
->pport
->work_port_lock
);
6856 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
6857 spin_unlock_irq(&phba
->pport
->work_port_lock
);
6858 spin_lock_irq(&phba
->hbalock
);
6859 phba
->link_state
= LPFC_LINK_UNKNOWN
;
6860 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
6861 spin_unlock_irq(&phba
->hbalock
);
6863 lpfc_sli_abort_fcp_rings(phba
);
6865 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6866 "0345 Resetting board due to mailbox timeout\n");
6868 /* Reset the HBA device */
6869 lpfc_reset_hba(phba
);
6873 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
6874 * @phba: Pointer to HBA context object.
6875 * @pmbox: Pointer to mailbox object.
6876 * @flag: Flag indicating how the mailbox need to be processed.
6878 * This function is called by discovery code and HBA management code
6879 * to submit a mailbox command to firmware with SLI-3 interface spec. This
6880 * function gets the hbalock to protect the data structures.
6881 * The mailbox command can be submitted in polling mode, in which case
6882 * this function will wait in a polling loop for the completion of the
6884 * If the mailbox is submitted in no_wait mode (not polling) the
6885 * function will submit the command and returns immediately without waiting
6886 * for the mailbox completion. The no_wait is supported only when HBA
6887 * is in SLI2/SLI3 mode - interrupts are enabled.
6888 * The SLI interface allows only one mailbox pending at a time. If the
6889 * mailbox is issued in polling mode and there is already a mailbox
6890 * pending, then the function will return an error. If the mailbox is issued
6891 * in NO_WAIT mode and there is a mailbox pending already, the function
6892 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
6893 * The sli layer owns the mailbox object until the completion of mailbox
6894 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
6895 * return codes the caller owns the mailbox command after the return of
6899 lpfc_sli_issue_mbox_s3(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmbox
,
6903 struct lpfc_sli
*psli
= &phba
->sli
;
6904 uint32_t status
, evtctr
;
6905 uint32_t ha_copy
, hc_copy
;
6907 unsigned long timeout
;
6908 unsigned long drvr_flag
= 0;
6909 uint32_t word0
, ldata
;
6910 void __iomem
*to_slim
;
6911 int processing_queue
= 0;
6913 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
6915 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
6916 /* processing mbox queue from intr_handler */
6917 if (unlikely(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
6918 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
6921 processing_queue
= 1;
6922 pmbox
= lpfc_mbox_get(phba
);
6924 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
6929 if (pmbox
->mbox_cmpl
&& pmbox
->mbox_cmpl
!= lpfc_sli_def_mbox_cmpl
&&
6930 pmbox
->mbox_cmpl
!= lpfc_sli_wake_mbox_wait
) {
6932 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
6933 lpfc_printf_log(phba
, KERN_ERR
,
6934 LOG_MBOX
| LOG_VPORT
,
6935 "1806 Mbox x%x failed. No vport\n",
6936 pmbox
->u
.mb
.mbxCommand
);
6938 goto out_not_finished
;
6942 /* If the PCI channel is in offline state, do not post mbox. */
6943 if (unlikely(pci_channel_offline(phba
->pcidev
))) {
6944 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
6945 goto out_not_finished
;
6948 /* If HBA has a deferred error attention, fail the iocb. */
6949 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
6950 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
6951 goto out_not_finished
;
6957 status
= MBX_SUCCESS
;
6959 if (phba
->link_state
== LPFC_HBA_ERROR
) {
6960 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
6962 /* Mbox command <mbxCommand> cannot issue */
6963 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6964 "(%d):0311 Mailbox command x%x cannot "
6965 "issue Data: x%x x%x\n",
6966 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
6967 pmbox
->u
.mb
.mbxCommand
, psli
->sli_flag
, flag
);
6968 goto out_not_finished
;
6971 if (mbx
->mbxCommand
!= MBX_KILL_BOARD
&& flag
& MBX_NOWAIT
) {
6972 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
) ||
6973 !(hc_copy
& HC_MBINT_ENA
)) {
6974 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
6975 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6976 "(%d):2528 Mailbox command x%x cannot "
6977 "issue Data: x%x x%x\n",
6978 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
6979 pmbox
->u
.mb
.mbxCommand
, psli
->sli_flag
, flag
);
6980 goto out_not_finished
;
6984 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
6985 /* Polling for a mbox command when another one is already active
6986 * is not allowed in SLI. Also, the driver must have established
6987 * SLI2 mode to queue and process multiple mbox commands.
6990 if (flag
& MBX_POLL
) {
6991 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
6993 /* Mbox command <mbxCommand> cannot issue */
6994 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
6995 "(%d):2529 Mailbox command x%x "
6996 "cannot issue Data: x%x x%x\n",
6997 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
6998 pmbox
->u
.mb
.mbxCommand
,
6999 psli
->sli_flag
, flag
);
7000 goto out_not_finished
;
7003 if (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
)) {
7004 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
7005 /* Mbox command <mbxCommand> cannot issue */
7006 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7007 "(%d):2530 Mailbox command x%x "
7008 "cannot issue Data: x%x x%x\n",
7009 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
7010 pmbox
->u
.mb
.mbxCommand
,
7011 psli
->sli_flag
, flag
);
7012 goto out_not_finished
;
7015 /* Another mailbox command is still being processed, queue this
7016 * command to be processed later.
7018 lpfc_mbox_put(phba
, pmbox
);
7020 /* Mbox cmd issue - BUSY */
7021 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
7022 "(%d):0308 Mbox cmd issue - BUSY Data: "
7023 "x%x x%x x%x x%x\n",
7024 pmbox
->vport
? pmbox
->vport
->vpi
: 0xffffff,
7025 mbx
->mbxCommand
, phba
->pport
->port_state
,
7026 psli
->sli_flag
, flag
);
7028 psli
->slistat
.mbox_busy
++;
7029 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
7032 lpfc_debugfs_disc_trc(pmbox
->vport
,
7033 LPFC_DISC_TRC_MBOX_VPORT
,
7034 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
7035 (uint32_t)mbx
->mbxCommand
,
7036 mbx
->un
.varWords
[0], mbx
->un
.varWords
[1]);
7039 lpfc_debugfs_disc_trc(phba
->pport
,
7041 "MBOX Bsy: cmd:x%x mb:x%x x%x",
7042 (uint32_t)mbx
->mbxCommand
,
7043 mbx
->un
.varWords
[0], mbx
->un
.varWords
[1]);
7049 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
7051 /* If we are not polling, we MUST be in SLI2 mode */
7052 if (flag
!= MBX_POLL
) {
7053 if (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
) &&
7054 (mbx
->mbxCommand
!= MBX_KILL_BOARD
)) {
7055 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
7056 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
7057 /* Mbox command <mbxCommand> cannot issue */
7058 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7059 "(%d):2531 Mailbox command x%x "
7060 "cannot issue Data: x%x x%x\n",
7061 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
7062 pmbox
->u
.mb
.mbxCommand
,
7063 psli
->sli_flag
, flag
);
7064 goto out_not_finished
;
7066 /* timeout active mbox command */
7067 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, pmbox
) *
7069 mod_timer(&psli
->mbox_tmo
, jiffies
+ timeout
);
7072 /* Mailbox cmd <cmd> issue */
7073 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
7074 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
7076 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
7077 mbx
->mbxCommand
, phba
->pport
->port_state
,
7078 psli
->sli_flag
, flag
);
7080 if (mbx
->mbxCommand
!= MBX_HEARTBEAT
) {
7082 lpfc_debugfs_disc_trc(pmbox
->vport
,
7083 LPFC_DISC_TRC_MBOX_VPORT
,
7084 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7085 (uint32_t)mbx
->mbxCommand
,
7086 mbx
->un
.varWords
[0], mbx
->un
.varWords
[1]);
7089 lpfc_debugfs_disc_trc(phba
->pport
,
7091 "MBOX Send: cmd:x%x mb:x%x x%x",
7092 (uint32_t)mbx
->mbxCommand
,
7093 mbx
->un
.varWords
[0], mbx
->un
.varWords
[1]);
7097 psli
->slistat
.mbox_cmd
++;
7098 evtctr
= psli
->slistat
.mbox_event
;
7100 /* next set own bit for the adapter and copy over command word */
7101 mbx
->mbxOwner
= OWN_CHIP
;
7103 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
7104 /* Populate mbox extension offset word. */
7105 if (pmbox
->in_ext_byte_len
|| pmbox
->out_ext_byte_len
) {
7106 *(((uint32_t *)mbx
) + pmbox
->mbox_offset_word
)
7107 = (uint8_t *)phba
->mbox_ext
7108 - (uint8_t *)phba
->mbox
;
7111 /* Copy the mailbox extension data */
7112 if (pmbox
->in_ext_byte_len
&& pmbox
->context2
) {
7113 lpfc_sli_pcimem_bcopy(pmbox
->context2
,
7114 (uint8_t *)phba
->mbox_ext
,
7115 pmbox
->in_ext_byte_len
);
7117 /* Copy command data to host SLIM area */
7118 lpfc_sli_pcimem_bcopy(mbx
, phba
->mbox
, MAILBOX_CMD_SIZE
);
7120 /* Populate mbox extension offset word. */
7121 if (pmbox
->in_ext_byte_len
|| pmbox
->out_ext_byte_len
)
7122 *(((uint32_t *)mbx
) + pmbox
->mbox_offset_word
)
7123 = MAILBOX_HBA_EXT_OFFSET
;
7125 /* Copy the mailbox extension data */
7126 if (pmbox
->in_ext_byte_len
&& pmbox
->context2
) {
7127 lpfc_memcpy_to_slim(phba
->MBslimaddr
+
7128 MAILBOX_HBA_EXT_OFFSET
,
7129 pmbox
->context2
, pmbox
->in_ext_byte_len
);
7132 if (mbx
->mbxCommand
== MBX_CONFIG_PORT
) {
7133 /* copy command data into host mbox for cmpl */
7134 lpfc_sli_pcimem_bcopy(mbx
, phba
->mbox
, MAILBOX_CMD_SIZE
);
7137 /* First copy mbox command data to HBA SLIM, skip past first
7139 to_slim
= phba
->MBslimaddr
+ sizeof (uint32_t);
7140 lpfc_memcpy_to_slim(to_slim
, &mbx
->un
.varWords
[0],
7141 MAILBOX_CMD_SIZE
- sizeof (uint32_t));
7143 /* Next copy over first word, with mbxOwner set */
7144 ldata
= *((uint32_t *)mbx
);
7145 to_slim
= phba
->MBslimaddr
;
7146 writel(ldata
, to_slim
);
7147 readl(to_slim
); /* flush */
7149 if (mbx
->mbxCommand
== MBX_CONFIG_PORT
) {
7150 /* switch over to host mailbox */
7151 psli
->sli_flag
|= LPFC_SLI_ACTIVE
;
7159 /* Set up reference to mailbox command */
7160 psli
->mbox_active
= pmbox
;
7161 /* Interrupt board to do it */
7162 writel(CA_MBATT
, phba
->CAregaddr
);
7163 readl(phba
->CAregaddr
); /* flush */
7164 /* Don't wait for it to finish, just return */
7168 /* Set up null reference to mailbox command */
7169 psli
->mbox_active
= NULL
;
7170 /* Interrupt board to do it */
7171 writel(CA_MBATT
, phba
->CAregaddr
);
7172 readl(phba
->CAregaddr
); /* flush */
7174 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
7175 /* First read mbox status word */
7176 word0
= *((uint32_t *)phba
->mbox
);
7177 word0
= le32_to_cpu(word0
);
7179 /* First read mbox status word */
7180 if (lpfc_readl(phba
->MBslimaddr
, &word0
)) {
7181 spin_unlock_irqrestore(&phba
->hbalock
,
7183 goto out_not_finished
;
7187 /* Read the HBA Host Attention Register */
7188 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
)) {
7189 spin_unlock_irqrestore(&phba
->hbalock
,
7191 goto out_not_finished
;
7193 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, pmbox
) *
7196 /* Wait for command to complete */
7197 while (((word0
& OWN_CHIP
) == OWN_CHIP
) ||
7198 (!(ha_copy
& HA_MBATT
) &&
7199 (phba
->link_state
> LPFC_WARM_START
))) {
7200 if (time_after(jiffies
, timeout
)) {
7201 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
7202 spin_unlock_irqrestore(&phba
->hbalock
,
7204 goto out_not_finished
;
7207 /* Check if we took a mbox interrupt while we were
7209 if (((word0
& OWN_CHIP
) != OWN_CHIP
)
7210 && (evtctr
!= psli
->slistat
.mbox_event
))
7214 spin_unlock_irqrestore(&phba
->hbalock
,
7217 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
7220 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
7221 /* First copy command data */
7222 word0
= *((uint32_t *)phba
->mbox
);
7223 word0
= le32_to_cpu(word0
);
7224 if (mbx
->mbxCommand
== MBX_CONFIG_PORT
) {
7227 /* Check real SLIM for any errors */
7228 slimword0
= readl(phba
->MBslimaddr
);
7229 slimmb
= (MAILBOX_t
*) & slimword0
;
7230 if (((slimword0
& OWN_CHIP
) != OWN_CHIP
)
7231 && slimmb
->mbxStatus
) {
7238 /* First copy command data */
7239 word0
= readl(phba
->MBslimaddr
);
7241 /* Read the HBA Host Attention Register */
7242 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
)) {
7243 spin_unlock_irqrestore(&phba
->hbalock
,
7245 goto out_not_finished
;
7249 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
7250 /* copy results back to user */
7251 lpfc_sli_pcimem_bcopy(phba
->mbox
, mbx
, MAILBOX_CMD_SIZE
);
7252 /* Copy the mailbox extension data */
7253 if (pmbox
->out_ext_byte_len
&& pmbox
->context2
) {
7254 lpfc_sli_pcimem_bcopy(phba
->mbox_ext
,
7256 pmbox
->out_ext_byte_len
);
7259 /* First copy command data */
7260 lpfc_memcpy_from_slim(mbx
, phba
->MBslimaddr
,
7262 /* Copy the mailbox extension data */
7263 if (pmbox
->out_ext_byte_len
&& pmbox
->context2
) {
7264 lpfc_memcpy_from_slim(pmbox
->context2
,
7266 MAILBOX_HBA_EXT_OFFSET
,
7267 pmbox
->out_ext_byte_len
);
7271 writel(HA_MBATT
, phba
->HAregaddr
);
7272 readl(phba
->HAregaddr
); /* flush */
7274 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
7275 status
= mbx
->mbxStatus
;
7278 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
7282 if (processing_queue
) {
7283 pmbox
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
7284 lpfc_mbox_cmpl_put(phba
, pmbox
);
7286 return MBX_NOT_FINISHED
;
7290 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
7291 * @phba: Pointer to HBA context object.
7293 * The function blocks the posting of SLI4 asynchronous mailbox commands from
7294 * the driver internal pending mailbox queue. It will then try to wait out the
7295 * possible outstanding mailbox command before return.
7298 * 0 - the outstanding mailbox command completed; otherwise, the wait for
7299 * the outstanding mailbox command timed out.
7302 lpfc_sli4_async_mbox_block(struct lpfc_hba
*phba
)
7304 struct lpfc_sli
*psli
= &phba
->sli
;
7306 unsigned long timeout
= 0;
7308 /* Mark the asynchronous mailbox command posting as blocked */
7309 spin_lock_irq(&phba
->hbalock
);
7310 psli
->sli_flag
|= LPFC_SLI_ASYNC_MBX_BLK
;
7311 /* Determine how long we might wait for the active mailbox
7312 * command to be gracefully completed by firmware.
7314 if (phba
->sli
.mbox_active
)
7315 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
,
7316 phba
->sli
.mbox_active
) *
7318 spin_unlock_irq(&phba
->hbalock
);
7320 /* Make sure the mailbox is really active */
7322 lpfc_sli4_process_missed_mbox_completions(phba
);
7324 /* Wait for the outstnading mailbox command to complete */
7325 while (phba
->sli
.mbox_active
) {
7326 /* Check active mailbox complete status every 2ms */
7328 if (time_after(jiffies
, timeout
)) {
7329 /* Timeout, marked the outstanding cmd not complete */
7335 /* Can not cleanly block async mailbox command, fails it */
7337 spin_lock_irq(&phba
->hbalock
);
7338 psli
->sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
7339 spin_unlock_irq(&phba
->hbalock
);
7345 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
7346 * @phba: Pointer to HBA context object.
7348 * The function unblocks and resume posting of SLI4 asynchronous mailbox
7349 * commands from the driver internal pending mailbox queue. It makes sure
7350 * that there is no outstanding mailbox command before resuming posting
7351 * asynchronous mailbox commands. If, for any reason, there is outstanding
7352 * mailbox command, it will try to wait it out before resuming asynchronous
7353 * mailbox command posting.
7356 lpfc_sli4_async_mbox_unblock(struct lpfc_hba
*phba
)
7358 struct lpfc_sli
*psli
= &phba
->sli
;
7360 spin_lock_irq(&phba
->hbalock
);
7361 if (!(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
7362 /* Asynchronous mailbox posting is not blocked, do nothing */
7363 spin_unlock_irq(&phba
->hbalock
);
7367 /* Outstanding synchronous mailbox command is guaranteed to be done,
7368 * successful or timeout, after timing-out the outstanding mailbox
7369 * command shall always be removed, so just unblock posting async
7370 * mailbox command and resume
7372 psli
->sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
7373 spin_unlock_irq(&phba
->hbalock
);
7375 /* wake up worker thread to post asynchronlous mailbox command */
7376 lpfc_worker_wake_up(phba
);
7380 * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
7381 * @phba: Pointer to HBA context object.
7382 * @mboxq: Pointer to mailbox object.
7384 * The function waits for the bootstrap mailbox register ready bit from
7385 * port for twice the regular mailbox command timeout value.
7387 * 0 - no timeout on waiting for bootstrap mailbox register ready.
7388 * MBXERR_ERROR - wait for bootstrap mailbox register timed out.
7391 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
7394 unsigned long timeout
;
7395 struct lpfc_register bmbx_reg
;
7397 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, mboxq
)
7401 bmbx_reg
.word0
= readl(phba
->sli4_hba
.BMBXregaddr
);
7402 db_ready
= bf_get(lpfc_bmbx_rdy
, &bmbx_reg
);
7406 if (time_after(jiffies
, timeout
))
7407 return MBXERR_ERROR
;
7408 } while (!db_ready
);
7414 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
7415 * @phba: Pointer to HBA context object.
7416 * @mboxq: Pointer to mailbox object.
7418 * The function posts a mailbox to the port. The mailbox is expected
7419 * to be comletely filled in and ready for the port to operate on it.
7420 * This routine executes a synchronous completion operation on the
7421 * mailbox by polling for its completion.
7423 * The caller must not be holding any locks when calling this routine.
7426 * MBX_SUCCESS - mailbox posted successfully
7427 * Any of the MBX error values.
7430 lpfc_sli4_post_sync_mbox(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
7432 int rc
= MBX_SUCCESS
;
7433 unsigned long iflag
;
7434 uint32_t mcqe_status
;
7436 struct lpfc_sli
*psli
= &phba
->sli
;
7437 struct lpfc_mqe
*mb
= &mboxq
->u
.mqe
;
7438 struct lpfc_bmbx_create
*mbox_rgn
;
7439 struct dma_address
*dma_address
;
7442 * Only one mailbox can be active to the bootstrap mailbox region
7443 * at a time and there is no queueing provided.
7445 spin_lock_irqsave(&phba
->hbalock
, iflag
);
7446 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
7447 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
7448 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7449 "(%d):2532 Mailbox command x%x (x%x/x%x) "
7450 "cannot issue Data: x%x x%x\n",
7451 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
7452 mboxq
->u
.mb
.mbxCommand
,
7453 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
7454 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
7455 psli
->sli_flag
, MBX_POLL
);
7456 return MBXERR_ERROR
;
7458 /* The server grabs the token and owns it until release */
7459 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
7460 phba
->sli
.mbox_active
= mboxq
;
7461 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
7463 /* wait for bootstrap mbox register for readyness */
7464 rc
= lpfc_sli4_wait_bmbx_ready(phba
, mboxq
);
7469 * Initialize the bootstrap memory region to avoid stale data areas
7470 * in the mailbox post. Then copy the caller's mailbox contents to
7471 * the bmbx mailbox region.
7473 mbx_cmnd
= bf_get(lpfc_mqe_command
, mb
);
7474 memset(phba
->sli4_hba
.bmbx
.avirt
, 0, sizeof(struct lpfc_bmbx_create
));
7475 lpfc_sli_pcimem_bcopy(mb
, phba
->sli4_hba
.bmbx
.avirt
,
7476 sizeof(struct lpfc_mqe
));
7478 /* Post the high mailbox dma address to the port and wait for ready. */
7479 dma_address
= &phba
->sli4_hba
.bmbx
.dma_address
;
7480 writel(dma_address
->addr_hi
, phba
->sli4_hba
.BMBXregaddr
);
7482 /* wait for bootstrap mbox register for hi-address write done */
7483 rc
= lpfc_sli4_wait_bmbx_ready(phba
, mboxq
);
7487 /* Post the low mailbox dma address to the port. */
7488 writel(dma_address
->addr_lo
, phba
->sli4_hba
.BMBXregaddr
);
7490 /* wait for bootstrap mbox register for low address write done */
7491 rc
= lpfc_sli4_wait_bmbx_ready(phba
, mboxq
);
7496 * Read the CQ to ensure the mailbox has completed.
7497 * If so, update the mailbox status so that the upper layers
7498 * can complete the request normally.
7500 lpfc_sli_pcimem_bcopy(phba
->sli4_hba
.bmbx
.avirt
, mb
,
7501 sizeof(struct lpfc_mqe
));
7502 mbox_rgn
= (struct lpfc_bmbx_create
*) phba
->sli4_hba
.bmbx
.avirt
;
7503 lpfc_sli_pcimem_bcopy(&mbox_rgn
->mcqe
, &mboxq
->mcqe
,
7504 sizeof(struct lpfc_mcqe
));
7505 mcqe_status
= bf_get(lpfc_mcqe_status
, &mbox_rgn
->mcqe
);
7507 * When the CQE status indicates a failure and the mailbox status
7508 * indicates success then copy the CQE status into the mailbox status
7509 * (and prefix it with x4000).
7511 if (mcqe_status
!= MB_CQE_STATUS_SUCCESS
) {
7512 if (bf_get(lpfc_mqe_status
, mb
) == MBX_SUCCESS
)
7513 bf_set(lpfc_mqe_status
, mb
,
7514 (LPFC_MBX_ERROR_RANGE
| mcqe_status
));
7517 lpfc_sli4_swap_str(phba
, mboxq
);
7519 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
7520 "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
7521 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
7522 " x%x x%x CQ: x%x x%x x%x x%x\n",
7523 mboxq
->vport
? mboxq
->vport
->vpi
: 0, mbx_cmnd
,
7524 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
7525 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
7526 bf_get(lpfc_mqe_status
, mb
),
7527 mb
->un
.mb_words
[0], mb
->un
.mb_words
[1],
7528 mb
->un
.mb_words
[2], mb
->un
.mb_words
[3],
7529 mb
->un
.mb_words
[4], mb
->un
.mb_words
[5],
7530 mb
->un
.mb_words
[6], mb
->un
.mb_words
[7],
7531 mb
->un
.mb_words
[8], mb
->un
.mb_words
[9],
7532 mb
->un
.mb_words
[10], mb
->un
.mb_words
[11],
7533 mb
->un
.mb_words
[12], mboxq
->mcqe
.word0
,
7534 mboxq
->mcqe
.mcqe_tag0
, mboxq
->mcqe
.mcqe_tag1
,
7535 mboxq
->mcqe
.trailer
);
7537 /* We are holding the token, no needed for lock when release */
7538 spin_lock_irqsave(&phba
->hbalock
, iflag
);
7539 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
7540 phba
->sli
.mbox_active
= NULL
;
7541 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
7546 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
7547 * @phba: Pointer to HBA context object.
7548 * @pmbox: Pointer to mailbox object.
7549 * @flag: Flag indicating how the mailbox need to be processed.
7551 * This function is called by discovery code and HBA management code to submit
7552 * a mailbox command to firmware with SLI-4 interface spec.
7554 * Return codes the caller owns the mailbox command after the return of the
7558 lpfc_sli_issue_mbox_s4(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
7561 struct lpfc_sli
*psli
= &phba
->sli
;
7562 unsigned long iflags
;
7565 /* dump from issue mailbox command if setup */
7566 lpfc_idiag_mbxacc_dump_issue_mbox(phba
, &mboxq
->u
.mb
);
7568 rc
= lpfc_mbox_dev_check(phba
);
7570 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7571 "(%d):2544 Mailbox command x%x (x%x/x%x) "
7572 "cannot issue Data: x%x x%x\n",
7573 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
7574 mboxq
->u
.mb
.mbxCommand
,
7575 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
7576 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
7577 psli
->sli_flag
, flag
);
7578 goto out_not_finished
;
7581 /* Detect polling mode and jump to a handler */
7582 if (!phba
->sli4_hba
.intr_enable
) {
7583 if (flag
== MBX_POLL
)
7584 rc
= lpfc_sli4_post_sync_mbox(phba
, mboxq
);
7587 if (rc
!= MBX_SUCCESS
)
7588 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
7589 "(%d):2541 Mailbox command x%x "
7590 "(x%x/x%x) failure: "
7591 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7593 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
7594 mboxq
->u
.mb
.mbxCommand
,
7595 lpfc_sli_config_mbox_subsys_get(phba
,
7597 lpfc_sli_config_mbox_opcode_get(phba
,
7599 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
),
7600 bf_get(lpfc_mcqe_status
, &mboxq
->mcqe
),
7601 bf_get(lpfc_mcqe_ext_status
,
7603 psli
->sli_flag
, flag
);
7605 } else if (flag
== MBX_POLL
) {
7606 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
7607 "(%d):2542 Try to issue mailbox command "
7608 "x%x (x%x/x%x) synchronously ahead of async"
7609 "mailbox command queue: x%x x%x\n",
7610 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
7611 mboxq
->u
.mb
.mbxCommand
,
7612 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
7613 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
7614 psli
->sli_flag
, flag
);
7615 /* Try to block the asynchronous mailbox posting */
7616 rc
= lpfc_sli4_async_mbox_block(phba
);
7618 /* Successfully blocked, now issue sync mbox cmd */
7619 rc
= lpfc_sli4_post_sync_mbox(phba
, mboxq
);
7620 if (rc
!= MBX_SUCCESS
)
7621 lpfc_printf_log(phba
, KERN_WARNING
,
7623 "(%d):2597 Sync Mailbox command "
7624 "x%x (x%x/x%x) failure: "
7625 "mqe_sta: x%x mcqe_sta: x%x/x%x "
7627 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
7628 mboxq
->u
.mb
.mbxCommand
,
7629 lpfc_sli_config_mbox_subsys_get(phba
,
7631 lpfc_sli_config_mbox_opcode_get(phba
,
7633 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
),
7634 bf_get(lpfc_mcqe_status
, &mboxq
->mcqe
),
7635 bf_get(lpfc_mcqe_ext_status
,
7637 psli
->sli_flag
, flag
);
7638 /* Unblock the async mailbox posting afterward */
7639 lpfc_sli4_async_mbox_unblock(phba
);
7644 /* Now, interrupt mode asynchrous mailbox command */
7645 rc
= lpfc_mbox_cmd_check(phba
, mboxq
);
7647 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7648 "(%d):2543 Mailbox command x%x (x%x/x%x) "
7649 "cannot issue Data: x%x x%x\n",
7650 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
7651 mboxq
->u
.mb
.mbxCommand
,
7652 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
7653 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
7654 psli
->sli_flag
, flag
);
7655 goto out_not_finished
;
7658 /* Put the mailbox command to the driver internal FIFO */
7659 psli
->slistat
.mbox_busy
++;
7660 spin_lock_irqsave(&phba
->hbalock
, iflags
);
7661 lpfc_mbox_put(phba
, mboxq
);
7662 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7663 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
7664 "(%d):0354 Mbox cmd issue - Enqueue Data: "
7665 "x%x (x%x/x%x) x%x x%x x%x\n",
7666 mboxq
->vport
? mboxq
->vport
->vpi
: 0xffffff,
7667 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
7668 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
7669 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
7670 phba
->pport
->port_state
,
7671 psli
->sli_flag
, MBX_NOWAIT
);
7672 /* Wake up worker thread to transport mailbox command from head */
7673 lpfc_worker_wake_up(phba
);
7678 return MBX_NOT_FINISHED
;
7682 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
7683 * @phba: Pointer to HBA context object.
7685 * This function is called by worker thread to send a mailbox command to
7686 * SLI4 HBA firmware.
7690 lpfc_sli4_post_async_mbox(struct lpfc_hba
*phba
)
7692 struct lpfc_sli
*psli
= &phba
->sli
;
7693 LPFC_MBOXQ_t
*mboxq
;
7694 int rc
= MBX_SUCCESS
;
7695 unsigned long iflags
;
7696 struct lpfc_mqe
*mqe
;
7699 /* Check interrupt mode before post async mailbox command */
7700 if (unlikely(!phba
->sli4_hba
.intr_enable
))
7701 return MBX_NOT_FINISHED
;
7703 /* Check for mailbox command service token */
7704 spin_lock_irqsave(&phba
->hbalock
, iflags
);
7705 if (unlikely(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
7706 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7707 return MBX_NOT_FINISHED
;
7709 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
7710 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7711 return MBX_NOT_FINISHED
;
7713 if (unlikely(phba
->sli
.mbox_active
)) {
7714 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7715 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7716 "0384 There is pending active mailbox cmd\n");
7717 return MBX_NOT_FINISHED
;
7719 /* Take the mailbox command service token */
7720 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
7722 /* Get the next mailbox command from head of queue */
7723 mboxq
= lpfc_mbox_get(phba
);
7725 /* If no more mailbox command waiting for post, we're done */
7727 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
7728 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7731 phba
->sli
.mbox_active
= mboxq
;
7732 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7734 /* Check device readiness for posting mailbox command */
7735 rc
= lpfc_mbox_dev_check(phba
);
7737 /* Driver clean routine will clean up pending mailbox */
7738 goto out_not_finished
;
7740 /* Prepare the mbox command to be posted */
7741 mqe
= &mboxq
->u
.mqe
;
7742 mbx_cmnd
= bf_get(lpfc_mqe_command
, mqe
);
7744 /* Start timer for the mbox_tmo and log some mailbox post messages */
7745 mod_timer(&psli
->mbox_tmo
, (jiffies
+
7746 msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba
, mboxq
))));
7748 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
7749 "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
7751 mboxq
->vport
? mboxq
->vport
->vpi
: 0, mbx_cmnd
,
7752 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
7753 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
7754 phba
->pport
->port_state
, psli
->sli_flag
);
7756 if (mbx_cmnd
!= MBX_HEARTBEAT
) {
7758 lpfc_debugfs_disc_trc(mboxq
->vport
,
7759 LPFC_DISC_TRC_MBOX_VPORT
,
7760 "MBOX Send vport: cmd:x%x mb:x%x x%x",
7761 mbx_cmnd
, mqe
->un
.mb_words
[0],
7762 mqe
->un
.mb_words
[1]);
7764 lpfc_debugfs_disc_trc(phba
->pport
,
7766 "MBOX Send: cmd:x%x mb:x%x x%x",
7767 mbx_cmnd
, mqe
->un
.mb_words
[0],
7768 mqe
->un
.mb_words
[1]);
7771 psli
->slistat
.mbox_cmd
++;
7773 /* Post the mailbox command to the port */
7774 rc
= lpfc_sli4_mq_put(phba
->sli4_hba
.mbx_wq
, mqe
);
7775 if (rc
!= MBX_SUCCESS
) {
7776 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
7777 "(%d):2533 Mailbox command x%x (x%x/x%x) "
7778 "cannot issue Data: x%x x%x\n",
7779 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
7780 mboxq
->u
.mb
.mbxCommand
,
7781 lpfc_sli_config_mbox_subsys_get(phba
, mboxq
),
7782 lpfc_sli_config_mbox_opcode_get(phba
, mboxq
),
7783 psli
->sli_flag
, MBX_NOWAIT
);
7784 goto out_not_finished
;
7790 spin_lock_irqsave(&phba
->hbalock
, iflags
);
7791 if (phba
->sli
.mbox_active
) {
7792 mboxq
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
7793 __lpfc_mbox_cmpl_put(phba
, mboxq
);
7794 /* Release the token */
7795 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
7796 phba
->sli
.mbox_active
= NULL
;
7798 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
7800 return MBX_NOT_FINISHED
;
7804 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
7805 * @phba: Pointer to HBA context object.
7806 * @pmbox: Pointer to mailbox object.
7807 * @flag: Flag indicating how the mailbox need to be processed.
7809 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
7810 * the API jump table function pointer from the lpfc_hba struct.
7812 * Return codes the caller owns the mailbox command after the return of the
7816 lpfc_sli_issue_mbox(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmbox
, uint32_t flag
)
7818 return phba
->lpfc_sli_issue_mbox(phba
, pmbox
, flag
);
7822 * lpfc_mbox_api_table_setup - Set up mbox api function jump table
7823 * @phba: The hba struct for which this call is being executed.
7824 * @dev_grp: The HBA PCI-Device group number.
7826 * This routine sets up the mbox interface API function jump table in @phba
7828 * Returns: 0 - success, -ENODEV - failure.
7831 lpfc_mbox_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
7835 case LPFC_PCI_DEV_LP
:
7836 phba
->lpfc_sli_issue_mbox
= lpfc_sli_issue_mbox_s3
;
7837 phba
->lpfc_sli_handle_slow_ring_event
=
7838 lpfc_sli_handle_slow_ring_event_s3
;
7839 phba
->lpfc_sli_hbq_to_firmware
= lpfc_sli_hbq_to_firmware_s3
;
7840 phba
->lpfc_sli_brdrestart
= lpfc_sli_brdrestart_s3
;
7841 phba
->lpfc_sli_brdready
= lpfc_sli_brdready_s3
;
7843 case LPFC_PCI_DEV_OC
:
7844 phba
->lpfc_sli_issue_mbox
= lpfc_sli_issue_mbox_s4
;
7845 phba
->lpfc_sli_handle_slow_ring_event
=
7846 lpfc_sli_handle_slow_ring_event_s4
;
7847 phba
->lpfc_sli_hbq_to_firmware
= lpfc_sli_hbq_to_firmware_s4
;
7848 phba
->lpfc_sli_brdrestart
= lpfc_sli_brdrestart_s4
;
7849 phba
->lpfc_sli_brdready
= lpfc_sli_brdready_s4
;
7852 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7853 "1420 Invalid HBA PCI-device group: 0x%x\n",
7862 * __lpfc_sli_ringtx_put - Add an iocb to the txq
7863 * @phba: Pointer to HBA context object.
7864 * @pring: Pointer to driver SLI ring object.
7865 * @piocb: Pointer to address of newly added command iocb.
7867 * This function is called with hbalock held to add a command
7868 * iocb to the txq when SLI layer cannot submit the command iocb
7872 __lpfc_sli_ringtx_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
7873 struct lpfc_iocbq
*piocb
)
7875 /* Insert the caller's iocb in the txq tail for later processing. */
7876 list_add_tail(&piocb
->list
, &pring
->txq
);
7880 * lpfc_sli_next_iocb - Get the next iocb in the txq
7881 * @phba: Pointer to HBA context object.
7882 * @pring: Pointer to driver SLI ring object.
7883 * @piocb: Pointer to address of newly added command iocb.
7885 * This function is called with hbalock held before a new
7886 * iocb is submitted to the firmware. This function checks
7887 * txq to flush the iocbs in txq to Firmware before
7888 * submitting new iocbs to the Firmware.
7889 * If there are iocbs in the txq which need to be submitted
7890 * to firmware, lpfc_sli_next_iocb returns the first element
7891 * of the txq after dequeuing it from txq.
7892 * If there is no iocb in the txq then the function will return
7893 * *piocb and *piocb is set to NULL. Caller needs to check
7894 * *piocb to find if there are more commands in the txq.
7896 static struct lpfc_iocbq
*
7897 lpfc_sli_next_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
7898 struct lpfc_iocbq
**piocb
)
7900 struct lpfc_iocbq
* nextiocb
;
7902 nextiocb
= lpfc_sli_ringtx_get(phba
, pring
);
7912 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
7913 * @phba: Pointer to HBA context object.
7914 * @ring_number: SLI ring number to issue iocb on.
7915 * @piocb: Pointer to command iocb.
7916 * @flag: Flag indicating if this command can be put into txq.
7918 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
7919 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
7920 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
7921 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
7922 * this function allows only iocbs for posting buffers. This function finds
7923 * next available slot in the command ring and posts the command to the
7924 * available slot and writes the port attention register to request HBA start
7925 * processing new iocb. If there is no slot available in the ring and
7926 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
7927 * the function returns IOCB_BUSY.
7929 * This function is called with hbalock held. The function will return success
7930 * after it successfully submit the iocb to firmware or after adding to the
7934 __lpfc_sli_issue_iocb_s3(struct lpfc_hba
*phba
, uint32_t ring_number
,
7935 struct lpfc_iocbq
*piocb
, uint32_t flag
)
7937 struct lpfc_iocbq
*nextiocb
;
7939 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[ring_number
];
7941 if (piocb
->iocb_cmpl
&& (!piocb
->vport
) &&
7942 (piocb
->iocb
.ulpCommand
!= CMD_ABORT_XRI_CN
) &&
7943 (piocb
->iocb
.ulpCommand
!= CMD_CLOSE_XRI_CN
)) {
7944 lpfc_printf_log(phba
, KERN_ERR
,
7945 LOG_SLI
| LOG_VPORT
,
7946 "1807 IOCB x%x failed. No vport\n",
7947 piocb
->iocb
.ulpCommand
);
7953 /* If the PCI channel is in offline state, do not post iocbs. */
7954 if (unlikely(pci_channel_offline(phba
->pcidev
)))
7957 /* If HBA has a deferred error attention, fail the iocb. */
7958 if (unlikely(phba
->hba_flag
& DEFER_ERATT
))
7962 * We should never get an IOCB if we are in a < LINK_DOWN state
7964 if (unlikely(phba
->link_state
< LPFC_LINK_DOWN
))
7968 * Check to see if we are blocking IOCB processing because of a
7969 * outstanding event.
7971 if (unlikely(pring
->flag
& LPFC_STOP_IOCB_EVENT
))
7974 if (unlikely(phba
->link_state
== LPFC_LINK_DOWN
)) {
7976 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
7977 * can be issued if the link is not up.
7979 switch (piocb
->iocb
.ulpCommand
) {
7980 case CMD_GEN_REQUEST64_CR
:
7981 case CMD_GEN_REQUEST64_CX
:
7982 if (!(phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
) ||
7983 (piocb
->iocb
.un
.genreq64
.w5
.hcsw
.Rctl
!=
7984 FC_RCTL_DD_UNSOL_CMD
) ||
7985 (piocb
->iocb
.un
.genreq64
.w5
.hcsw
.Type
!=
7986 MENLO_TRANSPORT_TYPE
))
7990 case CMD_QUE_RING_BUF_CN
:
7991 case CMD_QUE_RING_BUF64_CN
:
7993 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
7994 * completion, iocb_cmpl MUST be 0.
7996 if (piocb
->iocb_cmpl
)
7997 piocb
->iocb_cmpl
= NULL
;
7999 case CMD_CREATE_XRI_CR
:
8000 case CMD_CLOSE_XRI_CN
:
8001 case CMD_CLOSE_XRI_CX
:
8008 * For FCP commands, we must be in a state where we can process link
8011 } else if (unlikely(pring
->ringno
== phba
->sli
.fcp_ring
&&
8012 !(phba
->sli
.sli_flag
& LPFC_PROCESS_LA
))) {
8016 while ((iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)) &&
8017 (nextiocb
= lpfc_sli_next_iocb(phba
, pring
, &piocb
)))
8018 lpfc_sli_submit_iocb(phba
, pring
, iocb
, nextiocb
);
8021 lpfc_sli_update_ring(phba
, pring
);
8023 lpfc_sli_update_full_ring(phba
, pring
);
8026 return IOCB_SUCCESS
;
8031 pring
->stats
.iocb_cmd_delay
++;
8035 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
8036 __lpfc_sli_ringtx_put(phba
, pring
, piocb
);
8037 return IOCB_SUCCESS
;
8044 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
8045 * @phba: Pointer to HBA context object.
8046 * @piocb: Pointer to command iocb.
8047 * @sglq: Pointer to the scatter gather queue object.
8049 * This routine converts the bpl or bde that is in the IOCB
8050 * to a sgl list for the sli4 hardware. The physical address
8051 * of the bpl/bde is converted back to a virtual address.
8052 * If the IOCB contains a BPL then the list of BDE's is
8053 * converted to sli4_sge's. If the IOCB contains a single
8054 * BDE then it is converted to a single sli_sge.
8055 * The IOCB is still in cpu endianess so the contents of
8056 * the bpl can be used without byte swapping.
8058 * Returns valid XRI = Success, NO_XRI = Failure.
8061 lpfc_sli4_bpl2sgl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocbq
,
8062 struct lpfc_sglq
*sglq
)
8064 uint16_t xritag
= NO_XRI
;
8065 struct ulp_bde64
*bpl
= NULL
;
8066 struct ulp_bde64 bde
;
8067 struct sli4_sge
*sgl
= NULL
;
8068 struct lpfc_dmabuf
*dmabuf
;
8072 uint32_t offset
= 0; /* accumulated offset in the sg request list */
8073 int inbound
= 0; /* number of sg reply entries inbound from firmware */
8075 if (!piocbq
|| !sglq
)
8078 sgl
= (struct sli4_sge
*)sglq
->sgl
;
8079 icmd
= &piocbq
->iocb
;
8080 if (icmd
->ulpCommand
== CMD_XMIT_BLS_RSP64_CX
)
8081 return sglq
->sli4_xritag
;
8082 if (icmd
->un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BLP_64
) {
8083 numBdes
= icmd
->un
.genreq64
.bdl
.bdeSize
/
8084 sizeof(struct ulp_bde64
);
8085 /* The addrHigh and addrLow fields within the IOCB
8086 * have not been byteswapped yet so there is no
8087 * need to swap them back.
8089 if (piocbq
->context3
)
8090 dmabuf
= (struct lpfc_dmabuf
*)piocbq
->context3
;
8094 bpl
= (struct ulp_bde64
*)dmabuf
->virt
;
8098 for (i
= 0; i
< numBdes
; i
++) {
8099 /* Should already be byte swapped. */
8100 sgl
->addr_hi
= bpl
->addrHigh
;
8101 sgl
->addr_lo
= bpl
->addrLow
;
8103 sgl
->word2
= le32_to_cpu(sgl
->word2
);
8104 if ((i
+1) == numBdes
)
8105 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
8107 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
8108 /* swap the size field back to the cpu so we
8109 * can assign it to the sgl.
8111 bde
.tus
.w
= le32_to_cpu(bpl
->tus
.w
);
8112 sgl
->sge_len
= cpu_to_le32(bde
.tus
.f
.bdeSize
);
8113 /* The offsets in the sgl need to be accumulated
8114 * separately for the request and reply lists.
8115 * The request is always first, the reply follows.
8117 if (piocbq
->iocb
.ulpCommand
== CMD_GEN_REQUEST64_CR
) {
8118 /* add up the reply sg entries */
8119 if (bpl
->tus
.f
.bdeFlags
== BUFF_TYPE_BDE_64I
)
8121 /* first inbound? reset the offset */
8124 bf_set(lpfc_sli4_sge_offset
, sgl
, offset
);
8125 bf_set(lpfc_sli4_sge_type
, sgl
,
8126 LPFC_SGE_TYPE_DATA
);
8127 offset
+= bde
.tus
.f
.bdeSize
;
8129 sgl
->word2
= cpu_to_le32(sgl
->word2
);
8133 } else if (icmd
->un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BDE_64
) {
8134 /* The addrHigh and addrLow fields of the BDE have not
8135 * been byteswapped yet so they need to be swapped
8136 * before putting them in the sgl.
8139 cpu_to_le32(icmd
->un
.genreq64
.bdl
.addrHigh
);
8141 cpu_to_le32(icmd
->un
.genreq64
.bdl
.addrLow
);
8142 sgl
->word2
= le32_to_cpu(sgl
->word2
);
8143 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
8144 sgl
->word2
= cpu_to_le32(sgl
->word2
);
8146 cpu_to_le32(icmd
->un
.genreq64
.bdl
.bdeSize
);
8148 return sglq
->sli4_xritag
;
8152 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
8153 * @phba: Pointer to HBA context object.
8154 * @piocb: Pointer to command iocb.
8155 * @wqe: Pointer to the work queue entry.
8157 * This routine converts the iocb command to its Work Queue Entry
8158 * equivalent. The wqe pointer should not have any fields set when
8159 * this routine is called because it will memcpy over them.
8160 * This routine does not set the CQ_ID or the WQEC bits in the
8163 * Returns: 0 = Success, IOCB_ERROR = Failure.
8166 lpfc_sli4_iocb2wqe(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
,
8167 union lpfc_wqe
*wqe
)
8169 uint32_t xmit_len
= 0, total_len
= 0;
8173 uint8_t command_type
= ELS_COMMAND_NON_FIP
;
8176 uint16_t abrt_iotag
;
8177 struct lpfc_iocbq
*abrtiocbq
;
8178 struct ulp_bde64
*bpl
= NULL
;
8179 uint32_t els_id
= LPFC_ELS_ID_DEFAULT
;
8181 struct ulp_bde64 bde
;
8182 struct lpfc_nodelist
*ndlp
;
8186 fip
= phba
->hba_flag
& HBA_FIP_SUPPORT
;
8187 /* The fcp commands will set command type */
8188 if (iocbq
->iocb_flag
& LPFC_IO_FCP
)
8189 command_type
= FCP_COMMAND
;
8190 else if (fip
&& (iocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
))
8191 command_type
= ELS_COMMAND_FIP
;
8193 command_type
= ELS_COMMAND_NON_FIP
;
8195 /* Some of the fields are in the right position already */
8196 memcpy(wqe
, &iocbq
->iocb
, sizeof(union lpfc_wqe
));
8197 abort_tag
= (uint32_t) iocbq
->iotag
;
8198 xritag
= iocbq
->sli4_xritag
;
8199 wqe
->generic
.wqe_com
.word7
= 0; /* The ct field has moved so reset */
8200 wqe
->generic
.wqe_com
.word10
= 0;
8201 /* words0-2 bpl convert bde */
8202 if (iocbq
->iocb
.un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BLP_64
) {
8203 numBdes
= iocbq
->iocb
.un
.genreq64
.bdl
.bdeSize
/
8204 sizeof(struct ulp_bde64
);
8205 bpl
= (struct ulp_bde64
*)
8206 ((struct lpfc_dmabuf
*)iocbq
->context3
)->virt
;
8210 /* Should already be byte swapped. */
8211 wqe
->generic
.bde
.addrHigh
= le32_to_cpu(bpl
->addrHigh
);
8212 wqe
->generic
.bde
.addrLow
= le32_to_cpu(bpl
->addrLow
);
8213 /* swap the size field back to the cpu so we
8214 * can assign it to the sgl.
8216 wqe
->generic
.bde
.tus
.w
= le32_to_cpu(bpl
->tus
.w
);
8217 xmit_len
= wqe
->generic
.bde
.tus
.f
.bdeSize
;
8219 for (i
= 0; i
< numBdes
; i
++) {
8220 bde
.tus
.w
= le32_to_cpu(bpl
[i
].tus
.w
);
8221 total_len
+= bde
.tus
.f
.bdeSize
;
8224 xmit_len
= iocbq
->iocb
.un
.fcpi64
.bdl
.bdeSize
;
8226 iocbq
->iocb
.ulpIoTag
= iocbq
->iotag
;
8227 cmnd
= iocbq
->iocb
.ulpCommand
;
8229 switch (iocbq
->iocb
.ulpCommand
) {
8230 case CMD_ELS_REQUEST64_CR
:
8231 if (iocbq
->iocb_flag
& LPFC_IO_LIBDFC
)
8232 ndlp
= iocbq
->context_un
.ndlp
;
8234 ndlp
= (struct lpfc_nodelist
*)iocbq
->context1
;
8235 if (!iocbq
->iocb
.ulpLe
) {
8236 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
8237 "2007 Only Limited Edition cmd Format"
8238 " supported 0x%x\n",
8239 iocbq
->iocb
.ulpCommand
);
8243 wqe
->els_req
.payload_len
= xmit_len
;
8244 /* Els_reguest64 has a TMO */
8245 bf_set(wqe_tmo
, &wqe
->els_req
.wqe_com
,
8246 iocbq
->iocb
.ulpTimeout
);
8247 /* Need a VF for word 4 set the vf bit*/
8248 bf_set(els_req64_vf
, &wqe
->els_req
, 0);
8249 /* And a VFID for word 12 */
8250 bf_set(els_req64_vfid
, &wqe
->els_req
, 0);
8251 ct
= ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
);
8252 bf_set(wqe_ctxt_tag
, &wqe
->els_req
.wqe_com
,
8253 iocbq
->iocb
.ulpContext
);
8254 bf_set(wqe_ct
, &wqe
->els_req
.wqe_com
, ct
);
8255 bf_set(wqe_pu
, &wqe
->els_req
.wqe_com
, 0);
8256 /* CCP CCPE PV PRI in word10 were set in the memcpy */
8257 if (command_type
== ELS_COMMAND_FIP
)
8258 els_id
= ((iocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
)
8259 >> LPFC_FIP_ELS_ID_SHIFT
);
8260 pcmd
= (uint32_t *) (((struct lpfc_dmabuf
*)
8261 iocbq
->context2
)->virt
);
8262 if_type
= bf_get(lpfc_sli_intf_if_type
,
8263 &phba
->sli4_hba
.sli_intf
);
8264 if (if_type
== LPFC_SLI_INTF_IF_TYPE_2
) {
8265 if (pcmd
&& (*pcmd
== ELS_CMD_FLOGI
||
8266 *pcmd
== ELS_CMD_SCR
||
8267 *pcmd
== ELS_CMD_FDISC
||
8268 *pcmd
== ELS_CMD_LOGO
||
8269 *pcmd
== ELS_CMD_PLOGI
)) {
8270 bf_set(els_req64_sp
, &wqe
->els_req
, 1);
8271 bf_set(els_req64_sid
, &wqe
->els_req
,
8272 iocbq
->vport
->fc_myDID
);
8273 if ((*pcmd
== ELS_CMD_FLOGI
) &&
8274 !(phba
->fc_topology
==
8275 LPFC_TOPOLOGY_LOOP
))
8276 bf_set(els_req64_sid
, &wqe
->els_req
, 0);
8277 bf_set(wqe_ct
, &wqe
->els_req
.wqe_com
, 1);
8278 bf_set(wqe_ctxt_tag
, &wqe
->els_req
.wqe_com
,
8279 phba
->vpi_ids
[iocbq
->vport
->vpi
]);
8280 } else if (pcmd
&& iocbq
->context1
) {
8281 bf_set(wqe_ct
, &wqe
->els_req
.wqe_com
, 0);
8282 bf_set(wqe_ctxt_tag
, &wqe
->els_req
.wqe_com
,
8283 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
8286 bf_set(wqe_temp_rpi
, &wqe
->els_req
.wqe_com
,
8287 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
8288 bf_set(wqe_els_id
, &wqe
->els_req
.wqe_com
, els_id
);
8289 bf_set(wqe_dbde
, &wqe
->els_req
.wqe_com
, 1);
8290 bf_set(wqe_iod
, &wqe
->els_req
.wqe_com
, LPFC_WQE_IOD_READ
);
8291 bf_set(wqe_qosd
, &wqe
->els_req
.wqe_com
, 1);
8292 bf_set(wqe_lenloc
, &wqe
->els_req
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
8293 bf_set(wqe_ebde_cnt
, &wqe
->els_req
.wqe_com
, 0);
8294 wqe
->els_req
.max_response_payload_len
= total_len
- xmit_len
;
8296 case CMD_XMIT_SEQUENCE64_CX
:
8297 bf_set(wqe_ctxt_tag
, &wqe
->xmit_sequence
.wqe_com
,
8298 iocbq
->iocb
.un
.ulpWord
[3]);
8299 bf_set(wqe_rcvoxid
, &wqe
->xmit_sequence
.wqe_com
,
8300 iocbq
->iocb
.unsli3
.rcvsli3
.ox_id
);
8301 /* The entire sequence is transmitted for this IOCB */
8302 xmit_len
= total_len
;
8303 cmnd
= CMD_XMIT_SEQUENCE64_CR
;
8304 if (phba
->link_flag
& LS_LOOPBACK_MODE
)
8305 bf_set(wqe_xo
, &wqe
->xmit_sequence
.wge_ctl
, 1);
8306 case CMD_XMIT_SEQUENCE64_CR
:
8307 /* word3 iocb=io_tag32 wqe=reserved */
8308 wqe
->xmit_sequence
.rsvd3
= 0;
8309 /* word4 relative_offset memcpy */
8310 /* word5 r_ctl/df_ctl memcpy */
8311 bf_set(wqe_pu
, &wqe
->xmit_sequence
.wqe_com
, 0);
8312 bf_set(wqe_dbde
, &wqe
->xmit_sequence
.wqe_com
, 1);
8313 bf_set(wqe_iod
, &wqe
->xmit_sequence
.wqe_com
,
8314 LPFC_WQE_IOD_WRITE
);
8315 bf_set(wqe_lenloc
, &wqe
->xmit_sequence
.wqe_com
,
8316 LPFC_WQE_LENLOC_WORD12
);
8317 bf_set(wqe_ebde_cnt
, &wqe
->xmit_sequence
.wqe_com
, 0);
8318 wqe
->xmit_sequence
.xmit_len
= xmit_len
;
8319 command_type
= OTHER_COMMAND
;
8321 case CMD_XMIT_BCAST64_CN
:
8322 /* word3 iocb=iotag32 wqe=seq_payload_len */
8323 wqe
->xmit_bcast64
.seq_payload_len
= xmit_len
;
8324 /* word4 iocb=rsvd wqe=rsvd */
8325 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
8326 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
8327 bf_set(wqe_ct
, &wqe
->xmit_bcast64
.wqe_com
,
8328 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
8329 bf_set(wqe_dbde
, &wqe
->xmit_bcast64
.wqe_com
, 1);
8330 bf_set(wqe_iod
, &wqe
->xmit_bcast64
.wqe_com
, LPFC_WQE_IOD_WRITE
);
8331 bf_set(wqe_lenloc
, &wqe
->xmit_bcast64
.wqe_com
,
8332 LPFC_WQE_LENLOC_WORD3
);
8333 bf_set(wqe_ebde_cnt
, &wqe
->xmit_bcast64
.wqe_com
, 0);
8335 case CMD_FCP_IWRITE64_CR
:
8336 command_type
= FCP_COMMAND_DATA_OUT
;
8337 /* word3 iocb=iotag wqe=payload_offset_len */
8338 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8339 bf_set(payload_offset_len
, &wqe
->fcp_iwrite
,
8340 xmit_len
+ sizeof(struct fcp_rsp
));
8341 bf_set(cmd_buff_len
, &wqe
->fcp_iwrite
,
8343 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8344 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8345 bf_set(wqe_erp
, &wqe
->fcp_iwrite
.wqe_com
,
8346 iocbq
->iocb
.ulpFCP2Rcvy
);
8347 bf_set(wqe_lnk
, &wqe
->fcp_iwrite
.wqe_com
, iocbq
->iocb
.ulpXS
);
8348 /* Always open the exchange */
8349 bf_set(wqe_xc
, &wqe
->fcp_iwrite
.wqe_com
, 0);
8350 bf_set(wqe_iod
, &wqe
->fcp_iwrite
.wqe_com
, LPFC_WQE_IOD_WRITE
);
8351 bf_set(wqe_lenloc
, &wqe
->fcp_iwrite
.wqe_com
,
8352 LPFC_WQE_LENLOC_WORD4
);
8353 bf_set(wqe_ebde_cnt
, &wqe
->fcp_iwrite
.wqe_com
, 0);
8354 bf_set(wqe_pu
, &wqe
->fcp_iwrite
.wqe_com
, iocbq
->iocb
.ulpPU
);
8355 bf_set(wqe_dbde
, &wqe
->fcp_iwrite
.wqe_com
, 1);
8356 if (iocbq
->iocb_flag
& LPFC_IO_OAS
) {
8357 bf_set(wqe_oas
, &wqe
->fcp_iwrite
.wqe_com
, 1);
8358 if (phba
->cfg_XLanePriority
) {
8359 bf_set(wqe_ccpe
, &wqe
->fcp_iwrite
.wqe_com
, 1);
8360 bf_set(wqe_ccp
, &wqe
->fcp_iwrite
.wqe_com
,
8361 (phba
->cfg_XLanePriority
<< 1));
8365 case CMD_FCP_IREAD64_CR
:
8366 /* word3 iocb=iotag wqe=payload_offset_len */
8367 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8368 bf_set(payload_offset_len
, &wqe
->fcp_iread
,
8369 xmit_len
+ sizeof(struct fcp_rsp
));
8370 bf_set(cmd_buff_len
, &wqe
->fcp_iread
,
8372 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
8373 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
8374 bf_set(wqe_erp
, &wqe
->fcp_iread
.wqe_com
,
8375 iocbq
->iocb
.ulpFCP2Rcvy
);
8376 bf_set(wqe_lnk
, &wqe
->fcp_iread
.wqe_com
, iocbq
->iocb
.ulpXS
);
8377 /* Always open the exchange */
8378 bf_set(wqe_xc
, &wqe
->fcp_iread
.wqe_com
, 0);
8379 bf_set(wqe_iod
, &wqe
->fcp_iread
.wqe_com
, LPFC_WQE_IOD_READ
);
8380 bf_set(wqe_lenloc
, &wqe
->fcp_iread
.wqe_com
,
8381 LPFC_WQE_LENLOC_WORD4
);
8382 bf_set(wqe_ebde_cnt
, &wqe
->fcp_iread
.wqe_com
, 0);
8383 bf_set(wqe_pu
, &wqe
->fcp_iread
.wqe_com
, iocbq
->iocb
.ulpPU
);
8384 bf_set(wqe_dbde
, &wqe
->fcp_iread
.wqe_com
, 1);
8385 if (iocbq
->iocb_flag
& LPFC_IO_OAS
) {
8386 bf_set(wqe_oas
, &wqe
->fcp_iread
.wqe_com
, 1);
8387 if (phba
->cfg_XLanePriority
) {
8388 bf_set(wqe_ccpe
, &wqe
->fcp_iread
.wqe_com
, 1);
8389 bf_set(wqe_ccp
, &wqe
->fcp_iread
.wqe_com
,
8390 (phba
->cfg_XLanePriority
<< 1));
8394 case CMD_FCP_ICMND64_CR
:
8395 /* word3 iocb=iotag wqe=payload_offset_len */
8396 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
8397 bf_set(payload_offset_len
, &wqe
->fcp_icmd
,
8398 xmit_len
+ sizeof(struct fcp_rsp
));
8399 bf_set(cmd_buff_len
, &wqe
->fcp_icmd
,
8401 /* word3 iocb=IO_TAG wqe=reserved */
8402 bf_set(wqe_pu
, &wqe
->fcp_icmd
.wqe_com
, 0);
8403 /* Always open the exchange */
8404 bf_set(wqe_xc
, &wqe
->fcp_icmd
.wqe_com
, 0);
8405 bf_set(wqe_dbde
, &wqe
->fcp_icmd
.wqe_com
, 1);
8406 bf_set(wqe_iod
, &wqe
->fcp_icmd
.wqe_com
, LPFC_WQE_IOD_WRITE
);
8407 bf_set(wqe_qosd
, &wqe
->fcp_icmd
.wqe_com
, 1);
8408 bf_set(wqe_lenloc
, &wqe
->fcp_icmd
.wqe_com
,
8409 LPFC_WQE_LENLOC_NONE
);
8410 bf_set(wqe_ebde_cnt
, &wqe
->fcp_icmd
.wqe_com
, 0);
8411 bf_set(wqe_erp
, &wqe
->fcp_icmd
.wqe_com
,
8412 iocbq
->iocb
.ulpFCP2Rcvy
);
8413 if (iocbq
->iocb_flag
& LPFC_IO_OAS
) {
8414 bf_set(wqe_oas
, &wqe
->fcp_icmd
.wqe_com
, 1);
8415 if (phba
->cfg_XLanePriority
) {
8416 bf_set(wqe_ccpe
, &wqe
->fcp_icmd
.wqe_com
, 1);
8417 bf_set(wqe_ccp
, &wqe
->fcp_icmd
.wqe_com
,
8418 (phba
->cfg_XLanePriority
<< 1));
8422 case CMD_GEN_REQUEST64_CR
:
8423 /* For this command calculate the xmit length of the
8427 numBdes
= iocbq
->iocb
.un
.genreq64
.bdl
.bdeSize
/
8428 sizeof(struct ulp_bde64
);
8429 for (i
= 0; i
< numBdes
; i
++) {
8430 bde
.tus
.w
= le32_to_cpu(bpl
[i
].tus
.w
);
8431 if (bde
.tus
.f
.bdeFlags
!= BUFF_TYPE_BDE_64
)
8433 xmit_len
+= bde
.tus
.f
.bdeSize
;
8435 /* word3 iocb=IO_TAG wqe=request_payload_len */
8436 wqe
->gen_req
.request_payload_len
= xmit_len
;
8437 /* word4 iocb=parameter wqe=relative_offset memcpy */
8438 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
8439 /* word6 context tag copied in memcpy */
8440 if (iocbq
->iocb
.ulpCt_h
|| iocbq
->iocb
.ulpCt_l
) {
8441 ct
= ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
);
8442 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
8443 "2015 Invalid CT %x command 0x%x\n",
8444 ct
, iocbq
->iocb
.ulpCommand
);
8447 bf_set(wqe_ct
, &wqe
->gen_req
.wqe_com
, 0);
8448 bf_set(wqe_tmo
, &wqe
->gen_req
.wqe_com
, iocbq
->iocb
.ulpTimeout
);
8449 bf_set(wqe_pu
, &wqe
->gen_req
.wqe_com
, iocbq
->iocb
.ulpPU
);
8450 bf_set(wqe_dbde
, &wqe
->gen_req
.wqe_com
, 1);
8451 bf_set(wqe_iod
, &wqe
->gen_req
.wqe_com
, LPFC_WQE_IOD_READ
);
8452 bf_set(wqe_qosd
, &wqe
->gen_req
.wqe_com
, 1);
8453 bf_set(wqe_lenloc
, &wqe
->gen_req
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
8454 bf_set(wqe_ebde_cnt
, &wqe
->gen_req
.wqe_com
, 0);
8455 wqe
->gen_req
.max_response_payload_len
= total_len
- xmit_len
;
8456 command_type
= OTHER_COMMAND
;
8458 case CMD_XMIT_ELS_RSP64_CX
:
8459 ndlp
= (struct lpfc_nodelist
*)iocbq
->context1
;
8460 /* words0-2 BDE memcpy */
8461 /* word3 iocb=iotag32 wqe=response_payload_len */
8462 wqe
->xmit_els_rsp
.response_payload_len
= xmit_len
;
8464 wqe
->xmit_els_rsp
.word4
= 0;
8465 /* word5 iocb=rsvd wge=did */
8466 bf_set(wqe_els_did
, &wqe
->xmit_els_rsp
.wqe_dest
,
8467 iocbq
->iocb
.un
.xseq64
.xmit_els_remoteID
);
8469 if_type
= bf_get(lpfc_sli_intf_if_type
,
8470 &phba
->sli4_hba
.sli_intf
);
8471 if (if_type
== LPFC_SLI_INTF_IF_TYPE_2
) {
8472 if (iocbq
->vport
->fc_flag
& FC_PT2PT
) {
8473 bf_set(els_rsp64_sp
, &wqe
->xmit_els_rsp
, 1);
8474 bf_set(els_rsp64_sid
, &wqe
->xmit_els_rsp
,
8475 iocbq
->vport
->fc_myDID
);
8476 if (iocbq
->vport
->fc_myDID
== Fabric_DID
) {
8478 &wqe
->xmit_els_rsp
.wqe_dest
, 0);
8482 bf_set(wqe_ct
, &wqe
->xmit_els_rsp
.wqe_com
,
8483 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
8484 bf_set(wqe_pu
, &wqe
->xmit_els_rsp
.wqe_com
, iocbq
->iocb
.ulpPU
);
8485 bf_set(wqe_rcvoxid
, &wqe
->xmit_els_rsp
.wqe_com
,
8486 iocbq
->iocb
.unsli3
.rcvsli3
.ox_id
);
8487 if (!iocbq
->iocb
.ulpCt_h
&& iocbq
->iocb
.ulpCt_l
)
8488 bf_set(wqe_ctxt_tag
, &wqe
->xmit_els_rsp
.wqe_com
,
8489 phba
->vpi_ids
[iocbq
->vport
->vpi
]);
8490 bf_set(wqe_dbde
, &wqe
->xmit_els_rsp
.wqe_com
, 1);
8491 bf_set(wqe_iod
, &wqe
->xmit_els_rsp
.wqe_com
, LPFC_WQE_IOD_WRITE
);
8492 bf_set(wqe_qosd
, &wqe
->xmit_els_rsp
.wqe_com
, 1);
8493 bf_set(wqe_lenloc
, &wqe
->xmit_els_rsp
.wqe_com
,
8494 LPFC_WQE_LENLOC_WORD3
);
8495 bf_set(wqe_ebde_cnt
, &wqe
->xmit_els_rsp
.wqe_com
, 0);
8496 bf_set(wqe_rsp_temp_rpi
, &wqe
->xmit_els_rsp
,
8497 phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
]);
8498 pcmd
= (uint32_t *) (((struct lpfc_dmabuf
*)
8499 iocbq
->context2
)->virt
);
8500 if (phba
->fc_topology
== LPFC_TOPOLOGY_LOOP
) {
8501 bf_set(els_rsp64_sp
, &wqe
->xmit_els_rsp
, 1);
8502 bf_set(els_rsp64_sid
, &wqe
->xmit_els_rsp
,
8503 iocbq
->vport
->fc_myDID
);
8504 bf_set(wqe_ct
, &wqe
->xmit_els_rsp
.wqe_com
, 1);
8505 bf_set(wqe_ctxt_tag
, &wqe
->xmit_els_rsp
.wqe_com
,
8506 phba
->vpi_ids
[phba
->pport
->vpi
]);
8508 command_type
= OTHER_COMMAND
;
8510 case CMD_CLOSE_XRI_CN
:
8511 case CMD_ABORT_XRI_CN
:
8512 case CMD_ABORT_XRI_CX
:
8513 /* words 0-2 memcpy should be 0 rserved */
8514 /* port will send abts */
8515 abrt_iotag
= iocbq
->iocb
.un
.acxri
.abortContextTag
;
8516 if (abrt_iotag
!= 0 && abrt_iotag
<= phba
->sli
.last_iotag
) {
8517 abrtiocbq
= phba
->sli
.iocbq_lookup
[abrt_iotag
];
8518 fip
= abrtiocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
;
8522 if ((iocbq
->iocb
.ulpCommand
== CMD_CLOSE_XRI_CN
) || fip
)
8524 * The link is down, or the command was ELS_FIP
8525 * so the fw does not need to send abts
8528 bf_set(abort_cmd_ia
, &wqe
->abort_cmd
, 1);
8530 bf_set(abort_cmd_ia
, &wqe
->abort_cmd
, 0);
8531 bf_set(abort_cmd_criteria
, &wqe
->abort_cmd
, T_XRI_TAG
);
8532 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
8533 wqe
->abort_cmd
.rsrvd5
= 0;
8534 bf_set(wqe_ct
, &wqe
->abort_cmd
.wqe_com
,
8535 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
8536 abort_tag
= iocbq
->iocb
.un
.acxri
.abortIoTag
;
8538 * The abort handler will send us CMD_ABORT_XRI_CN or
8539 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
8541 bf_set(wqe_cmnd
, &wqe
->abort_cmd
.wqe_com
, CMD_ABORT_XRI_CX
);
8542 bf_set(wqe_qosd
, &wqe
->abort_cmd
.wqe_com
, 1);
8543 bf_set(wqe_lenloc
, &wqe
->abort_cmd
.wqe_com
,
8544 LPFC_WQE_LENLOC_NONE
);
8545 cmnd
= CMD_ABORT_XRI_CX
;
8546 command_type
= OTHER_COMMAND
;
8549 case CMD_XMIT_BLS_RSP64_CX
:
8550 ndlp
= (struct lpfc_nodelist
*)iocbq
->context1
;
8551 /* As BLS ABTS RSP WQE is very different from other WQEs,
8552 * we re-construct this WQE here based on information in
8553 * iocbq from scratch.
8555 memset(wqe
, 0, sizeof(union lpfc_wqe
));
8556 /* OX_ID is invariable to who sent ABTS to CT exchange */
8557 bf_set(xmit_bls_rsp64_oxid
, &wqe
->xmit_bls_rsp
,
8558 bf_get(lpfc_abts_oxid
, &iocbq
->iocb
.un
.bls_rsp
));
8559 if (bf_get(lpfc_abts_orig
, &iocbq
->iocb
.un
.bls_rsp
) ==
8560 LPFC_ABTS_UNSOL_INT
) {
8561 /* ABTS sent by initiator to CT exchange, the
8562 * RX_ID field will be filled with the newly
8563 * allocated responder XRI.
8565 bf_set(xmit_bls_rsp64_rxid
, &wqe
->xmit_bls_rsp
,
8566 iocbq
->sli4_xritag
);
8568 /* ABTS sent by responder to CT exchange, the
8569 * RX_ID field will be filled with the responder
8572 bf_set(xmit_bls_rsp64_rxid
, &wqe
->xmit_bls_rsp
,
8573 bf_get(lpfc_abts_rxid
, &iocbq
->iocb
.un
.bls_rsp
));
8575 bf_set(xmit_bls_rsp64_seqcnthi
, &wqe
->xmit_bls_rsp
, 0xffff);
8576 bf_set(wqe_xmit_bls_pt
, &wqe
->xmit_bls_rsp
.wqe_dest
, 0x1);
8579 bf_set(wqe_els_did
, &wqe
->xmit_bls_rsp
.wqe_dest
,
8581 bf_set(xmit_bls_rsp64_temprpi
, &wqe
->xmit_bls_rsp
,
8582 iocbq
->iocb
.ulpContext
);
8583 bf_set(wqe_ct
, &wqe
->xmit_bls_rsp
.wqe_com
, 1);
8584 bf_set(wqe_ctxt_tag
, &wqe
->xmit_bls_rsp
.wqe_com
,
8585 phba
->vpi_ids
[phba
->pport
->vpi
]);
8586 bf_set(wqe_qosd
, &wqe
->xmit_bls_rsp
.wqe_com
, 1);
8587 bf_set(wqe_lenloc
, &wqe
->xmit_bls_rsp
.wqe_com
,
8588 LPFC_WQE_LENLOC_NONE
);
8589 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
8590 command_type
= OTHER_COMMAND
;
8591 if (iocbq
->iocb
.un
.xseq64
.w5
.hcsw
.Rctl
== FC_RCTL_BA_RJT
) {
8592 bf_set(xmit_bls_rsp64_rjt_vspec
, &wqe
->xmit_bls_rsp
,
8593 bf_get(lpfc_vndr_code
, &iocbq
->iocb
.un
.bls_rsp
));
8594 bf_set(xmit_bls_rsp64_rjt_expc
, &wqe
->xmit_bls_rsp
,
8595 bf_get(lpfc_rsn_expln
, &iocbq
->iocb
.un
.bls_rsp
));
8596 bf_set(xmit_bls_rsp64_rjt_rsnc
, &wqe
->xmit_bls_rsp
,
8597 bf_get(lpfc_rsn_code
, &iocbq
->iocb
.un
.bls_rsp
));
8601 case CMD_XRI_ABORTED_CX
:
8602 case CMD_CREATE_XRI_CR
: /* Do we expect to use this? */
8603 case CMD_IOCB_FCP_IBIDIR64_CR
: /* bidirectional xfer */
8604 case CMD_FCP_TSEND64_CX
: /* Target mode send xfer-ready */
8605 case CMD_FCP_TRSP64_CX
: /* Target mode rcv */
8606 case CMD_FCP_AUTO_TRSP_CX
: /* Auto target rsp */
8608 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
8609 "2014 Invalid command 0x%x\n",
8610 iocbq
->iocb
.ulpCommand
);
8615 if (iocbq
->iocb_flag
& LPFC_IO_DIF_PASS
)
8616 bf_set(wqe_dif
, &wqe
->generic
.wqe_com
, LPFC_WQE_DIF_PASSTHRU
);
8617 else if (iocbq
->iocb_flag
& LPFC_IO_DIF_STRIP
)
8618 bf_set(wqe_dif
, &wqe
->generic
.wqe_com
, LPFC_WQE_DIF_STRIP
);
8619 else if (iocbq
->iocb_flag
& LPFC_IO_DIF_INSERT
)
8620 bf_set(wqe_dif
, &wqe
->generic
.wqe_com
, LPFC_WQE_DIF_INSERT
);
8621 iocbq
->iocb_flag
&= ~(LPFC_IO_DIF_PASS
| LPFC_IO_DIF_STRIP
|
8622 LPFC_IO_DIF_INSERT
);
8623 bf_set(wqe_xri_tag
, &wqe
->generic
.wqe_com
, xritag
);
8624 bf_set(wqe_reqtag
, &wqe
->generic
.wqe_com
, iocbq
->iotag
);
8625 wqe
->generic
.wqe_com
.abort_tag
= abort_tag
;
8626 bf_set(wqe_cmd_type
, &wqe
->generic
.wqe_com
, command_type
);
8627 bf_set(wqe_cmnd
, &wqe
->generic
.wqe_com
, cmnd
);
8628 bf_set(wqe_class
, &wqe
->generic
.wqe_com
, iocbq
->iocb
.ulpClass
);
8629 bf_set(wqe_cqid
, &wqe
->generic
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
8634 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
8635 * @phba: Pointer to HBA context object.
8636 * @ring_number: SLI ring number to issue iocb on.
8637 * @piocb: Pointer to command iocb.
8638 * @flag: Flag indicating if this command can be put into txq.
8640 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
8641 * an iocb command to an HBA with SLI-4 interface spec.
8643 * This function is called with hbalock held. The function will return success
8644 * after it successfully submit the iocb to firmware or after adding to the
8648 __lpfc_sli_issue_iocb_s4(struct lpfc_hba
*phba
, uint32_t ring_number
,
8649 struct lpfc_iocbq
*piocb
, uint32_t flag
)
8651 struct lpfc_sglq
*sglq
;
8653 struct lpfc_queue
*wq
;
8654 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[ring_number
];
8656 if (piocb
->sli4_xritag
== NO_XRI
) {
8657 if (piocb
->iocb
.ulpCommand
== CMD_ABORT_XRI_CN
||
8658 piocb
->iocb
.ulpCommand
== CMD_CLOSE_XRI_CN
)
8661 if (!list_empty(&pring
->txq
)) {
8662 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
8663 __lpfc_sli_ringtx_put(phba
,
8665 return IOCB_SUCCESS
;
8670 sglq
= __lpfc_sli_get_sglq(phba
, piocb
);
8672 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
8673 __lpfc_sli_ringtx_put(phba
,
8676 return IOCB_SUCCESS
;
8682 } else if (piocb
->iocb_flag
& LPFC_IO_FCP
) {
8683 /* These IO's already have an XRI and a mapped sgl. */
8687 * This is a continuation of a commandi,(CX) so this
8688 * sglq is on the active list
8690 sglq
= __lpfc_get_active_sglq(phba
, piocb
->sli4_lxritag
);
8696 piocb
->sli4_lxritag
= sglq
->sli4_lxritag
;
8697 piocb
->sli4_xritag
= sglq
->sli4_xritag
;
8698 if (NO_XRI
== lpfc_sli4_bpl2sgl(phba
, piocb
, sglq
))
8702 if (lpfc_sli4_iocb2wqe(phba
, piocb
, &wqe
))
8705 if ((piocb
->iocb_flag
& LPFC_IO_FCP
) ||
8706 (piocb
->iocb_flag
& LPFC_USE_FCPWQIDX
)) {
8707 if (!phba
->cfg_fof
|| (!(piocb
->iocb_flag
& LPFC_IO_OAS
))) {
8708 wq
= phba
->sli4_hba
.fcp_wq
[piocb
->fcp_wqidx
];
8710 wq
= phba
->sli4_hba
.oas_wq
;
8712 if (lpfc_sli4_wq_put(wq
, &wqe
))
8715 if (unlikely(!phba
->sli4_hba
.els_wq
))
8717 if (lpfc_sli4_wq_put(phba
->sli4_hba
.els_wq
, &wqe
))
8720 lpfc_sli_ringtxcmpl_put(phba
, pring
, piocb
);
8726 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
8728 * This routine wraps the actual lockless version for issusing IOCB function
8729 * pointer from the lpfc_hba struct.
8732 * IOCB_ERROR - Error
8733 * IOCB_SUCCESS - Success
8737 __lpfc_sli_issue_iocb(struct lpfc_hba
*phba
, uint32_t ring_number
,
8738 struct lpfc_iocbq
*piocb
, uint32_t flag
)
8740 return phba
->__lpfc_sli_issue_iocb(phba
, ring_number
, piocb
, flag
);
8744 * lpfc_sli_api_table_setup - Set up sli api function jump table
8745 * @phba: The hba struct for which this call is being executed.
8746 * @dev_grp: The HBA PCI-Device group number.
8748 * This routine sets up the SLI interface API function jump table in @phba
8750 * Returns: 0 - success, -ENODEV - failure.
8753 lpfc_sli_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
8757 case LPFC_PCI_DEV_LP
:
8758 phba
->__lpfc_sli_issue_iocb
= __lpfc_sli_issue_iocb_s3
;
8759 phba
->__lpfc_sli_release_iocbq
= __lpfc_sli_release_iocbq_s3
;
8761 case LPFC_PCI_DEV_OC
:
8762 phba
->__lpfc_sli_issue_iocb
= __lpfc_sli_issue_iocb_s4
;
8763 phba
->__lpfc_sli_release_iocbq
= __lpfc_sli_release_iocbq_s4
;
8766 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8767 "1419 Invalid HBA PCI-device group: 0x%x\n",
8772 phba
->lpfc_get_iocb_from_iocbq
= lpfc_get_iocb_from_iocbq
;
8777 * lpfc_sli_calc_ring - Calculates which ring to use
8778 * @phba: Pointer to HBA context object.
8779 * @ring_number: Initial ring
8780 * @piocb: Pointer to command iocb.
8782 * For SLI4, FCP IO can deferred to one fo many WQs, based on
8783 * fcp_wqidx, thus we need to calculate the corresponding ring.
8784 * Since ABORTS must go on the same WQ of the command they are
8785 * aborting, we use command's fcp_wqidx.
8788 lpfc_sli_calc_ring(struct lpfc_hba
*phba
, uint32_t ring_number
,
8789 struct lpfc_iocbq
*piocb
)
8791 if (phba
->sli_rev
< LPFC_SLI_REV4
)
8794 if (piocb
->iocb_flag
& (LPFC_IO_FCP
| LPFC_USE_FCPWQIDX
)) {
8795 if (!(phba
->cfg_fof
) ||
8796 (!(piocb
->iocb_flag
& LPFC_IO_FOF
))) {
8797 if (unlikely(!phba
->sli4_hba
.fcp_wq
))
8798 return LPFC_HBA_ERROR
;
8800 * for abort iocb fcp_wqidx should already
8801 * be setup based on what work queue we used.
8803 if (!(piocb
->iocb_flag
& LPFC_USE_FCPWQIDX
))
8805 lpfc_sli4_scmd_to_wqidx_distr(phba
,
8807 ring_number
= MAX_SLI3_CONFIGURED_RINGS
+
8810 if (unlikely(!phba
->sli4_hba
.oas_wq
))
8811 return LPFC_HBA_ERROR
;
8812 piocb
->fcp_wqidx
= 0;
8813 ring_number
= LPFC_FCP_OAS_RING
;
8820 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
8821 * @phba: Pointer to HBA context object.
8822 * @pring: Pointer to driver SLI ring object.
8823 * @piocb: Pointer to command iocb.
8824 * @flag: Flag indicating if this command can be put into txq.
8826 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
8827 * function. This function gets the hbalock and calls
8828 * __lpfc_sli_issue_iocb function and will return the error returned
8829 * by __lpfc_sli_issue_iocb function. This wrapper is used by
8830 * functions which do not hold hbalock.
8833 lpfc_sli_issue_iocb(struct lpfc_hba
*phba
, uint32_t ring_number
,
8834 struct lpfc_iocbq
*piocb
, uint32_t flag
)
8836 struct lpfc_fcp_eq_hdl
*fcp_eq_hdl
;
8837 struct lpfc_sli_ring
*pring
;
8838 struct lpfc_queue
*fpeq
;
8839 struct lpfc_eqe
*eqe
;
8840 unsigned long iflags
;
8843 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
8844 ring_number
= lpfc_sli_calc_ring(phba
, ring_number
, piocb
);
8845 if (unlikely(ring_number
== LPFC_HBA_ERROR
))
8847 idx
= piocb
->fcp_wqidx
;
8849 pring
= &phba
->sli
.ring
[ring_number
];
8850 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
8851 rc
= __lpfc_sli_issue_iocb(phba
, ring_number
, piocb
, flag
);
8852 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
8854 if (lpfc_fcp_look_ahead
&& (piocb
->iocb_flag
& LPFC_IO_FCP
)) {
8855 fcp_eq_hdl
= &phba
->sli4_hba
.fcp_eq_hdl
[idx
];
8857 if (atomic_dec_and_test(&fcp_eq_hdl
->
8860 /* Get associated EQ with this index */
8861 fpeq
= phba
->sli4_hba
.hba_eq
[idx
];
8863 /* Turn off interrupts from this EQ */
8864 lpfc_sli4_eq_clr_intr(fpeq
);
8867 * Process all the events on FCP EQ
8869 while ((eqe
= lpfc_sli4_eq_get(fpeq
))) {
8870 lpfc_sli4_hba_handle_eqe(phba
,
8872 fpeq
->EQ_processed
++;
8875 /* Always clear and re-arm the EQ */
8876 lpfc_sli4_eq_release(fpeq
,
8879 atomic_inc(&fcp_eq_hdl
->fcp_eq_in_use
);
8882 /* For now, SLI2/3 will still use hbalock */
8883 spin_lock_irqsave(&phba
->hbalock
, iflags
);
8884 rc
= __lpfc_sli_issue_iocb(phba
, ring_number
, piocb
, flag
);
8885 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8891 * lpfc_extra_ring_setup - Extra ring setup function
8892 * @phba: Pointer to HBA context object.
8894 * This function is called while driver attaches with the
8895 * HBA to setup the extra ring. The extra ring is used
8896 * only when driver needs to support target mode functionality
8897 * or IP over FC functionalities.
8899 * This function is called with no lock held.
8902 lpfc_extra_ring_setup( struct lpfc_hba
*phba
)
8904 struct lpfc_sli
*psli
;
8905 struct lpfc_sli_ring
*pring
;
8909 /* Adjust cmd/rsp ring iocb entries more evenly */
8911 /* Take some away from the FCP ring */
8912 pring
= &psli
->ring
[psli
->fcp_ring
];
8913 pring
->sli
.sli3
.numCiocb
-= SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
8914 pring
->sli
.sli3
.numRiocb
-= SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
8915 pring
->sli
.sli3
.numCiocb
-= SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
8916 pring
->sli
.sli3
.numRiocb
-= SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
8918 /* and give them to the extra ring */
8919 pring
= &psli
->ring
[psli
->extra_ring
];
8921 pring
->sli
.sli3
.numCiocb
+= SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
8922 pring
->sli
.sli3
.numRiocb
+= SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
8923 pring
->sli
.sli3
.numCiocb
+= SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
8924 pring
->sli
.sli3
.numRiocb
+= SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
8926 /* Setup default profile for this ring */
8927 pring
->iotag_max
= 4096;
8928 pring
->num_mask
= 1;
8929 pring
->prt
[0].profile
= 0; /* Mask 0 */
8930 pring
->prt
[0].rctl
= phba
->cfg_multi_ring_rctl
;
8931 pring
->prt
[0].type
= phba
->cfg_multi_ring_type
;
8932 pring
->prt
[0].lpfc_sli_rcv_unsol_event
= NULL
;
8936 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
8937 * @phba: Pointer to HBA context object.
8938 * @iocbq: Pointer to iocb object.
8940 * The async_event handler calls this routine when it receives
8941 * an ASYNC_STATUS_CN event from the port. The port generates
8942 * this event when an Abort Sequence request to an rport fails
8943 * twice in succession. The abort could be originated by the
8944 * driver or by the port. The ABTS could have been for an ELS
8945 * or FCP IO. The port only generates this event when an ABTS
8946 * fails to complete after one retry.
8949 lpfc_sli_abts_err_handler(struct lpfc_hba
*phba
,
8950 struct lpfc_iocbq
*iocbq
)
8952 struct lpfc_nodelist
*ndlp
= NULL
;
8953 uint16_t rpi
= 0, vpi
= 0;
8954 struct lpfc_vport
*vport
= NULL
;
8956 /* The rpi in the ulpContext is vport-sensitive. */
8957 vpi
= iocbq
->iocb
.un
.asyncstat
.sub_ctxt_tag
;
8958 rpi
= iocbq
->iocb
.ulpContext
;
8960 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
8961 "3092 Port generated ABTS async event "
8962 "on vpi %d rpi %d status 0x%x\n",
8963 vpi
, rpi
, iocbq
->iocb
.ulpStatus
);
8965 vport
= lpfc_find_vport_by_vpid(phba
, vpi
);
8968 ndlp
= lpfc_findnode_rpi(vport
, rpi
);
8969 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
))
8972 if (iocbq
->iocb
.ulpStatus
== IOSTAT_LOCAL_REJECT
)
8973 lpfc_sli_abts_recover_port(vport
, ndlp
);
8977 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
8978 "3095 Event Context not found, no "
8979 "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
8980 iocbq
->iocb
.ulpContext
, iocbq
->iocb
.ulpStatus
,
8984 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
8985 * @phba: pointer to HBA context object.
8986 * @ndlp: nodelist pointer for the impacted rport.
8987 * @axri: pointer to the wcqe containing the failed exchange.
8989 * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
8990 * port. The port generates this event when an abort exchange request to an
8991 * rport fails twice in succession with no reply. The abort could be originated
8992 * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
8995 lpfc_sli4_abts_err_handler(struct lpfc_hba
*phba
,
8996 struct lpfc_nodelist
*ndlp
,
8997 struct sli4_wcqe_xri_aborted
*axri
)
8999 struct lpfc_vport
*vport
;
9000 uint32_t ext_status
= 0;
9002 if (!ndlp
|| !NLP_CHK_NODE_ACT(ndlp
)) {
9003 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
9004 "3115 Node Context not found, driver "
9005 "ignoring abts err event\n");
9009 vport
= ndlp
->vport
;
9010 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9011 "3116 Port generated FCP XRI ABORT event on "
9012 "vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
9013 ndlp
->vport
->vpi
, phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
],
9014 bf_get(lpfc_wcqe_xa_xri
, axri
),
9015 bf_get(lpfc_wcqe_xa_status
, axri
),
9019 * Catch the ABTS protocol failure case. Older OCe FW releases returned
9020 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
9021 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
9023 ext_status
= axri
->parameter
& IOERR_PARAM_MASK
;
9024 if ((bf_get(lpfc_wcqe_xa_status
, axri
) == IOSTAT_LOCAL_REJECT
) &&
9025 ((ext_status
== IOERR_SEQUENCE_TIMEOUT
) || (ext_status
== 0)))
9026 lpfc_sli_abts_recover_port(vport
, ndlp
);
9030 * lpfc_sli_async_event_handler - ASYNC iocb handler function
9031 * @phba: Pointer to HBA context object.
9032 * @pring: Pointer to driver SLI ring object.
9033 * @iocbq: Pointer to iocb object.
9035 * This function is called by the slow ring event handler
9036 * function when there is an ASYNC event iocb in the ring.
9037 * This function is called with no lock held.
9038 * Currently this function handles only temperature related
9039 * ASYNC events. The function decodes the temperature sensor
9040 * event message and posts events for the management applications.
9043 lpfc_sli_async_event_handler(struct lpfc_hba
* phba
,
9044 struct lpfc_sli_ring
* pring
, struct lpfc_iocbq
* iocbq
)
9048 struct temp_event temp_event_data
;
9049 struct Scsi_Host
*shost
;
9052 icmd
= &iocbq
->iocb
;
9053 evt_code
= icmd
->un
.asyncstat
.evt_code
;
9056 case ASYNC_TEMP_WARN
:
9057 case ASYNC_TEMP_SAFE
:
9058 temp_event_data
.data
= (uint32_t) icmd
->ulpContext
;
9059 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
9060 if (evt_code
== ASYNC_TEMP_WARN
) {
9061 temp_event_data
.event_code
= LPFC_THRESHOLD_TEMP
;
9062 lpfc_printf_log(phba
, KERN_ERR
, LOG_TEMP
,
9063 "0347 Adapter is very hot, please take "
9064 "corrective action. temperature : %d Celsius\n",
9065 (uint32_t) icmd
->ulpContext
);
9067 temp_event_data
.event_code
= LPFC_NORMAL_TEMP
;
9068 lpfc_printf_log(phba
, KERN_ERR
, LOG_TEMP
,
9069 "0340 Adapter temperature is OK now. "
9070 "temperature : %d Celsius\n",
9071 (uint32_t) icmd
->ulpContext
);
9074 /* Send temperature change event to applications */
9075 shost
= lpfc_shost_from_vport(phba
->pport
);
9076 fc_host_post_vendor_event(shost
, fc_get_event_number(),
9077 sizeof(temp_event_data
), (char *) &temp_event_data
,
9080 case ASYNC_STATUS_CN
:
9081 lpfc_sli_abts_err_handler(phba
, iocbq
);
9084 iocb_w
= (uint32_t *) icmd
;
9085 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9086 "0346 Ring %d handler: unexpected ASYNC_STATUS"
9088 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
9089 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
9090 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
9091 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
9092 pring
->ringno
, icmd
->un
.asyncstat
.evt_code
,
9093 iocb_w
[0], iocb_w
[1], iocb_w
[2], iocb_w
[3],
9094 iocb_w
[4], iocb_w
[5], iocb_w
[6], iocb_w
[7],
9095 iocb_w
[8], iocb_w
[9], iocb_w
[10], iocb_w
[11],
9096 iocb_w
[12], iocb_w
[13], iocb_w
[14], iocb_w
[15]);
9104 * lpfc_sli_setup - SLI ring setup function
9105 * @phba: Pointer to HBA context object.
9107 * lpfc_sli_setup sets up rings of the SLI interface with
9108 * number of iocbs per ring and iotags. This function is
9109 * called while driver attach to the HBA and before the
9110 * interrupts are enabled. So there is no need for locking.
9112 * This function always returns 0.
9115 lpfc_sli_setup(struct lpfc_hba
*phba
)
9117 int i
, totiocbsize
= 0;
9118 struct lpfc_sli
*psli
= &phba
->sli
;
9119 struct lpfc_sli_ring
*pring
;
9121 psli
->num_rings
= MAX_SLI3_CONFIGURED_RINGS
;
9122 if (phba
->sli_rev
== LPFC_SLI_REV4
)
9123 psli
->num_rings
+= phba
->cfg_fcp_io_channel
;
9125 psli
->fcp_ring
= LPFC_FCP_RING
;
9126 psli
->next_ring
= LPFC_FCP_NEXT_RING
;
9127 psli
->extra_ring
= LPFC_EXTRA_RING
;
9129 psli
->iocbq_lookup
= NULL
;
9130 psli
->iocbq_lookup_len
= 0;
9131 psli
->last_iotag
= 0;
9133 for (i
= 0; i
< psli
->num_rings
; i
++) {
9134 pring
= &psli
->ring
[i
];
9136 case LPFC_FCP_RING
: /* ring 0 - FCP */
9137 /* numCiocb and numRiocb are used in config_port */
9138 pring
->sli
.sli3
.numCiocb
= SLI2_IOCB_CMD_R0_ENTRIES
;
9139 pring
->sli
.sli3
.numRiocb
= SLI2_IOCB_RSP_R0_ENTRIES
;
9140 pring
->sli
.sli3
.numCiocb
+=
9141 SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
9142 pring
->sli
.sli3
.numRiocb
+=
9143 SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
9144 pring
->sli
.sli3
.numCiocb
+=
9145 SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
9146 pring
->sli
.sli3
.numRiocb
+=
9147 SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
9148 pring
->sli
.sli3
.sizeCiocb
= (phba
->sli_rev
== 3) ?
9149 SLI3_IOCB_CMD_SIZE
:
9151 pring
->sli
.sli3
.sizeRiocb
= (phba
->sli_rev
== 3) ?
9152 SLI3_IOCB_RSP_SIZE
:
9154 pring
->iotag_ctr
= 0;
9156 (phba
->cfg_hba_queue_depth
* 2);
9157 pring
->fast_iotag
= pring
->iotag_max
;
9158 pring
->num_mask
= 0;
9160 case LPFC_EXTRA_RING
: /* ring 1 - EXTRA */
9161 /* numCiocb and numRiocb are used in config_port */
9162 pring
->sli
.sli3
.numCiocb
= SLI2_IOCB_CMD_R1_ENTRIES
;
9163 pring
->sli
.sli3
.numRiocb
= SLI2_IOCB_RSP_R1_ENTRIES
;
9164 pring
->sli
.sli3
.sizeCiocb
= (phba
->sli_rev
== 3) ?
9165 SLI3_IOCB_CMD_SIZE
:
9167 pring
->sli
.sli3
.sizeRiocb
= (phba
->sli_rev
== 3) ?
9168 SLI3_IOCB_RSP_SIZE
:
9170 pring
->iotag_max
= phba
->cfg_hba_queue_depth
;
9171 pring
->num_mask
= 0;
9173 case LPFC_ELS_RING
: /* ring 2 - ELS / CT */
9174 /* numCiocb and numRiocb are used in config_port */
9175 pring
->sli
.sli3
.numCiocb
= SLI2_IOCB_CMD_R2_ENTRIES
;
9176 pring
->sli
.sli3
.numRiocb
= SLI2_IOCB_RSP_R2_ENTRIES
;
9177 pring
->sli
.sli3
.sizeCiocb
= (phba
->sli_rev
== 3) ?
9178 SLI3_IOCB_CMD_SIZE
:
9180 pring
->sli
.sli3
.sizeRiocb
= (phba
->sli_rev
== 3) ?
9181 SLI3_IOCB_RSP_SIZE
:
9183 pring
->fast_iotag
= 0;
9184 pring
->iotag_ctr
= 0;
9185 pring
->iotag_max
= 4096;
9186 pring
->lpfc_sli_rcv_async_status
=
9187 lpfc_sli_async_event_handler
;
9188 pring
->num_mask
= LPFC_MAX_RING_MASK
;
9189 pring
->prt
[0].profile
= 0; /* Mask 0 */
9190 pring
->prt
[0].rctl
= FC_RCTL_ELS_REQ
;
9191 pring
->prt
[0].type
= FC_TYPE_ELS
;
9192 pring
->prt
[0].lpfc_sli_rcv_unsol_event
=
9193 lpfc_els_unsol_event
;
9194 pring
->prt
[1].profile
= 0; /* Mask 1 */
9195 pring
->prt
[1].rctl
= FC_RCTL_ELS_REP
;
9196 pring
->prt
[1].type
= FC_TYPE_ELS
;
9197 pring
->prt
[1].lpfc_sli_rcv_unsol_event
=
9198 lpfc_els_unsol_event
;
9199 pring
->prt
[2].profile
= 0; /* Mask 2 */
9200 /* NameServer Inquiry */
9201 pring
->prt
[2].rctl
= FC_RCTL_DD_UNSOL_CTL
;
9203 pring
->prt
[2].type
= FC_TYPE_CT
;
9204 pring
->prt
[2].lpfc_sli_rcv_unsol_event
=
9205 lpfc_ct_unsol_event
;
9206 pring
->prt
[3].profile
= 0; /* Mask 3 */
9207 /* NameServer response */
9208 pring
->prt
[3].rctl
= FC_RCTL_DD_SOL_CTL
;
9210 pring
->prt
[3].type
= FC_TYPE_CT
;
9211 pring
->prt
[3].lpfc_sli_rcv_unsol_event
=
9212 lpfc_ct_unsol_event
;
9215 totiocbsize
+= (pring
->sli
.sli3
.numCiocb
*
9216 pring
->sli
.sli3
.sizeCiocb
) +
9217 (pring
->sli
.sli3
.numRiocb
* pring
->sli
.sli3
.sizeRiocb
);
9219 if (totiocbsize
> MAX_SLIM_IOCB_SIZE
) {
9220 /* Too many cmd / rsp ring entries in SLI2 SLIM */
9221 printk(KERN_ERR
"%d:0462 Too many cmd / rsp ring entries in "
9222 "SLI2 SLIM Data: x%x x%lx\n",
9223 phba
->brd_no
, totiocbsize
,
9224 (unsigned long) MAX_SLIM_IOCB_SIZE
);
9226 if (phba
->cfg_multi_ring_support
== 2)
9227 lpfc_extra_ring_setup(phba
);
9233 * lpfc_sli_queue_setup - Queue initialization function
9234 * @phba: Pointer to HBA context object.
9236 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
9237 * ring. This function also initializes ring indices of each ring.
9238 * This function is called during the initialization of the SLI
9239 * interface of an HBA.
9240 * This function is called with no lock held and always returns
9244 lpfc_sli_queue_setup(struct lpfc_hba
*phba
)
9246 struct lpfc_sli
*psli
;
9247 struct lpfc_sli_ring
*pring
;
9251 spin_lock_irq(&phba
->hbalock
);
9252 INIT_LIST_HEAD(&psli
->mboxq
);
9253 INIT_LIST_HEAD(&psli
->mboxq_cmpl
);
9254 /* Initialize list headers for txq and txcmplq as double linked lists */
9255 for (i
= 0; i
< psli
->num_rings
; i
++) {
9256 pring
= &psli
->ring
[i
];
9258 pring
->sli
.sli3
.next_cmdidx
= 0;
9259 pring
->sli
.sli3
.local_getidx
= 0;
9260 pring
->sli
.sli3
.cmdidx
= 0;
9262 INIT_LIST_HEAD(&pring
->txq
);
9263 INIT_LIST_HEAD(&pring
->txcmplq
);
9264 INIT_LIST_HEAD(&pring
->iocb_continueq
);
9265 INIT_LIST_HEAD(&pring
->iocb_continue_saveq
);
9266 INIT_LIST_HEAD(&pring
->postbufq
);
9267 spin_lock_init(&pring
->ring_lock
);
9269 spin_unlock_irq(&phba
->hbalock
);
9274 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
9275 * @phba: Pointer to HBA context object.
9277 * This routine flushes the mailbox command subsystem. It will unconditionally
9278 * flush all the mailbox commands in the three possible stages in the mailbox
9279 * command sub-system: pending mailbox command queue; the outstanding mailbox
9280 * command; and completed mailbox command queue. It is caller's responsibility
9281 * to make sure that the driver is in the proper state to flush the mailbox
9282 * command sub-system. Namely, the posting of mailbox commands into the
9283 * pending mailbox command queue from the various clients must be stopped;
9284 * either the HBA is in a state that it will never works on the outstanding
9285 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
9286 * mailbox command has been completed.
9289 lpfc_sli_mbox_sys_flush(struct lpfc_hba
*phba
)
9291 LIST_HEAD(completions
);
9292 struct lpfc_sli
*psli
= &phba
->sli
;
9294 unsigned long iflag
;
9296 /* Flush all the mailbox commands in the mbox system */
9297 spin_lock_irqsave(&phba
->hbalock
, iflag
);
9298 /* The pending mailbox command queue */
9299 list_splice_init(&phba
->sli
.mboxq
, &completions
);
9300 /* The outstanding active mailbox command */
9301 if (psli
->mbox_active
) {
9302 list_add_tail(&psli
->mbox_active
->list
, &completions
);
9303 psli
->mbox_active
= NULL
;
9304 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
9306 /* The completed mailbox command queue */
9307 list_splice_init(&phba
->sli
.mboxq_cmpl
, &completions
);
9308 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
9310 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
9311 while (!list_empty(&completions
)) {
9312 list_remove_head(&completions
, pmb
, LPFC_MBOXQ_t
, list
);
9313 pmb
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
9315 pmb
->mbox_cmpl(phba
, pmb
);
9320 * lpfc_sli_host_down - Vport cleanup function
9321 * @vport: Pointer to virtual port object.
9323 * lpfc_sli_host_down is called to clean up the resources
9324 * associated with a vport before destroying virtual
9325 * port data structures.
9326 * This function does following operations:
9327 * - Free discovery resources associated with this virtual
9329 * - Free iocbs associated with this virtual port in
9331 * - Send abort for all iocb commands associated with this
9334 * This function is called with no lock held and always returns 1.
9337 lpfc_sli_host_down(struct lpfc_vport
*vport
)
9339 LIST_HEAD(completions
);
9340 struct lpfc_hba
*phba
= vport
->phba
;
9341 struct lpfc_sli
*psli
= &phba
->sli
;
9342 struct lpfc_sli_ring
*pring
;
9343 struct lpfc_iocbq
*iocb
, *next_iocb
;
9345 unsigned long flags
= 0;
9346 uint16_t prev_pring_flag
;
9348 lpfc_cleanup_discovery_resources(vport
);
9350 spin_lock_irqsave(&phba
->hbalock
, flags
);
9351 for (i
= 0; i
< psli
->num_rings
; i
++) {
9352 pring
= &psli
->ring
[i
];
9353 prev_pring_flag
= pring
->flag
;
9354 /* Only slow rings */
9355 if (pring
->ringno
== LPFC_ELS_RING
) {
9356 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
9357 /* Set the lpfc data pending flag */
9358 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
9361 * Error everything on the txq since these iocbs have not been
9362 * given to the FW yet.
9364 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
9365 if (iocb
->vport
!= vport
)
9367 list_move_tail(&iocb
->list
, &completions
);
9370 /* Next issue ABTS for everything on the txcmplq */
9371 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
,
9373 if (iocb
->vport
!= vport
)
9375 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
9378 pring
->flag
= prev_pring_flag
;
9381 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
9383 /* Cancel all the IOCBs from the completions list */
9384 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
9390 * lpfc_sli_hba_down - Resource cleanup function for the HBA
9391 * @phba: Pointer to HBA context object.
9393 * This function cleans up all iocb, buffers, mailbox commands
9394 * while shutting down the HBA. This function is called with no
9395 * lock held and always returns 1.
9396 * This function does the following to cleanup driver resources:
9397 * - Free discovery resources for each virtual port
9398 * - Cleanup any pending fabric iocbs
9399 * - Iterate through the iocb txq and free each entry
9401 * - Free up any buffer posted to the HBA
9402 * - Free mailbox commands in the mailbox queue.
9405 lpfc_sli_hba_down(struct lpfc_hba
*phba
)
9407 LIST_HEAD(completions
);
9408 struct lpfc_sli
*psli
= &phba
->sli
;
9409 struct lpfc_sli_ring
*pring
;
9410 struct lpfc_dmabuf
*buf_ptr
;
9411 unsigned long flags
= 0;
9414 /* Shutdown the mailbox command sub-system */
9415 lpfc_sli_mbox_sys_shutdown(phba
, LPFC_MBX_WAIT
);
9417 lpfc_hba_down_prep(phba
);
9419 lpfc_fabric_abort_hba(phba
);
9421 spin_lock_irqsave(&phba
->hbalock
, flags
);
9422 for (i
= 0; i
< psli
->num_rings
; i
++) {
9423 pring
= &psli
->ring
[i
];
9424 /* Only slow rings */
9425 if (pring
->ringno
== LPFC_ELS_RING
) {
9426 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
9427 /* Set the lpfc data pending flag */
9428 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
9432 * Error everything on the txq since these iocbs have not been
9433 * given to the FW yet.
9435 list_splice_init(&pring
->txq
, &completions
);
9437 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
9439 /* Cancel all the IOCBs from the completions list */
9440 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
9443 spin_lock_irqsave(&phba
->hbalock
, flags
);
9444 list_splice_init(&phba
->elsbuf
, &completions
);
9445 phba
->elsbuf_cnt
= 0;
9446 phba
->elsbuf_prev_cnt
= 0;
9447 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
9449 while (!list_empty(&completions
)) {
9450 list_remove_head(&completions
, buf_ptr
,
9451 struct lpfc_dmabuf
, list
);
9452 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
9456 /* Return any active mbox cmds */
9457 del_timer_sync(&psli
->mbox_tmo
);
9459 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
9460 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
9461 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
9467 * lpfc_sli_pcimem_bcopy - SLI memory copy function
9468 * @srcp: Source memory pointer.
9469 * @destp: Destination memory pointer.
9470 * @cnt: Number of words required to be copied.
9472 * This function is used for copying data between driver memory
9473 * and the SLI memory. This function also changes the endianness
9474 * of each word if native endianness is different from SLI
9475 * endianness. This function can be called with or without
9479 lpfc_sli_pcimem_bcopy(void *srcp
, void *destp
, uint32_t cnt
)
9481 uint32_t *src
= srcp
;
9482 uint32_t *dest
= destp
;
9486 for (i
= 0; i
< (int)cnt
; i
+= sizeof (uint32_t)) {
9488 ldata
= le32_to_cpu(ldata
);
9497 * lpfc_sli_bemem_bcopy - SLI memory copy function
9498 * @srcp: Source memory pointer.
9499 * @destp: Destination memory pointer.
9500 * @cnt: Number of words required to be copied.
9502 * This function is used for copying data between a data structure
9503 * with big endian representation to local endianness.
9504 * This function can be called with or without lock.
9507 lpfc_sli_bemem_bcopy(void *srcp
, void *destp
, uint32_t cnt
)
9509 uint32_t *src
= srcp
;
9510 uint32_t *dest
= destp
;
9514 for (i
= 0; i
< (int)cnt
; i
+= sizeof(uint32_t)) {
9516 ldata
= be32_to_cpu(ldata
);
9524 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
9525 * @phba: Pointer to HBA context object.
9526 * @pring: Pointer to driver SLI ring object.
9527 * @mp: Pointer to driver buffer object.
9529 * This function is called with no lock held.
9530 * It always return zero after adding the buffer to the postbufq
9534 lpfc_sli_ringpostbuf_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
9535 struct lpfc_dmabuf
*mp
)
9537 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
9539 spin_lock_irq(&phba
->hbalock
);
9540 list_add_tail(&mp
->list
, &pring
->postbufq
);
9541 pring
->postbufq_cnt
++;
9542 spin_unlock_irq(&phba
->hbalock
);
9547 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
9548 * @phba: Pointer to HBA context object.
9550 * When HBQ is enabled, buffers are searched based on tags. This function
9551 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
9552 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
9553 * does not conflict with tags of buffer posted for unsolicited events.
9554 * The function returns the allocated tag. The function is called with
9558 lpfc_sli_get_buffer_tag(struct lpfc_hba
*phba
)
9560 spin_lock_irq(&phba
->hbalock
);
9561 phba
->buffer_tag_count
++;
9563 * Always set the QUE_BUFTAG_BIT to distiguish between
9564 * a tag assigned by HBQ.
9566 phba
->buffer_tag_count
|= QUE_BUFTAG_BIT
;
9567 spin_unlock_irq(&phba
->hbalock
);
9568 return phba
->buffer_tag_count
;
9572 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
9573 * @phba: Pointer to HBA context object.
9574 * @pring: Pointer to driver SLI ring object.
9577 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
9578 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
9579 * iocb is posted to the response ring with the tag of the buffer.
9580 * This function searches the pring->postbufq list using the tag
9581 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
9582 * iocb. If the buffer is found then lpfc_dmabuf object of the
9583 * buffer is returned to the caller else NULL is returned.
9584 * This function is called with no lock held.
9586 struct lpfc_dmabuf
*
9587 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
9590 struct lpfc_dmabuf
*mp
, *next_mp
;
9591 struct list_head
*slp
= &pring
->postbufq
;
9593 /* Search postbufq, from the beginning, looking for a match on tag */
9594 spin_lock_irq(&phba
->hbalock
);
9595 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
9596 if (mp
->buffer_tag
== tag
) {
9597 list_del_init(&mp
->list
);
9598 pring
->postbufq_cnt
--;
9599 spin_unlock_irq(&phba
->hbalock
);
9604 spin_unlock_irq(&phba
->hbalock
);
9605 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9606 "0402 Cannot find virtual addr for buffer tag on "
9607 "ring %d Data x%lx x%p x%p x%x\n",
9608 pring
->ringno
, (unsigned long) tag
,
9609 slp
->next
, slp
->prev
, pring
->postbufq_cnt
);
9615 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
9616 * @phba: Pointer to HBA context object.
9617 * @pring: Pointer to driver SLI ring object.
9618 * @phys: DMA address of the buffer.
9620 * This function searches the buffer list using the dma_address
9621 * of unsolicited event to find the driver's lpfc_dmabuf object
9622 * corresponding to the dma_address. The function returns the
9623 * lpfc_dmabuf object if a buffer is found else it returns NULL.
9624 * This function is called by the ct and els unsolicited event
9625 * handlers to get the buffer associated with the unsolicited
9628 * This function is called with no lock held.
9630 struct lpfc_dmabuf
*
9631 lpfc_sli_ringpostbuf_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
9634 struct lpfc_dmabuf
*mp
, *next_mp
;
9635 struct list_head
*slp
= &pring
->postbufq
;
9637 /* Search postbufq, from the beginning, looking for a match on phys */
9638 spin_lock_irq(&phba
->hbalock
);
9639 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
9640 if (mp
->phys
== phys
) {
9641 list_del_init(&mp
->list
);
9642 pring
->postbufq_cnt
--;
9643 spin_unlock_irq(&phba
->hbalock
);
9648 spin_unlock_irq(&phba
->hbalock
);
9649 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
9650 "0410 Cannot find virtual addr for mapped buf on "
9651 "ring %d Data x%llx x%p x%p x%x\n",
9652 pring
->ringno
, (unsigned long long)phys
,
9653 slp
->next
, slp
->prev
, pring
->postbufq_cnt
);
9658 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
9659 * @phba: Pointer to HBA context object.
9660 * @cmdiocb: Pointer to driver command iocb object.
9661 * @rspiocb: Pointer to driver response iocb object.
9663 * This function is the completion handler for the abort iocbs for
9664 * ELS commands. This function is called from the ELS ring event
9665 * handler with no lock held. This function frees memory resources
9666 * associated with the abort iocb.
9669 lpfc_sli_abort_els_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
9670 struct lpfc_iocbq
*rspiocb
)
9672 IOCB_t
*irsp
= &rspiocb
->iocb
;
9673 uint16_t abort_iotag
, abort_context
;
9674 struct lpfc_iocbq
*abort_iocb
= NULL
;
9676 if (irsp
->ulpStatus
) {
9679 * Assume that the port already completed and returned, or
9680 * will return the iocb. Just Log the message.
9682 abort_context
= cmdiocb
->iocb
.un
.acxri
.abortContextTag
;
9683 abort_iotag
= cmdiocb
->iocb
.un
.acxri
.abortIoTag
;
9685 spin_lock_irq(&phba
->hbalock
);
9686 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
9687 if (abort_iotag
!= 0 &&
9688 abort_iotag
<= phba
->sli
.last_iotag
)
9690 phba
->sli
.iocbq_lookup
[abort_iotag
];
9692 /* For sli4 the abort_tag is the XRI,
9693 * so the abort routine puts the iotag of the iocb
9694 * being aborted in the context field of the abort
9697 abort_iocb
= phba
->sli
.iocbq_lookup
[abort_context
];
9699 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
| LOG_SLI
,
9700 "0327 Cannot abort els iocb %p "
9701 "with tag %x context %x, abort status %x, "
9703 abort_iocb
, abort_iotag
, abort_context
,
9704 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4]);
9706 spin_unlock_irq(&phba
->hbalock
);
9708 lpfc_sli_release_iocbq(phba
, cmdiocb
);
9713 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
9714 * @phba: Pointer to HBA context object.
9715 * @cmdiocb: Pointer to driver command iocb object.
9716 * @rspiocb: Pointer to driver response iocb object.
9718 * The function is called from SLI ring event handler with no
9719 * lock held. This function is the completion handler for ELS commands
9720 * which are aborted. The function frees memory resources used for
9721 * the aborted ELS commands.
9724 lpfc_ignore_els_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
9725 struct lpfc_iocbq
*rspiocb
)
9727 IOCB_t
*irsp
= &rspiocb
->iocb
;
9729 /* ELS cmd tag <ulpIoTag> completes */
9730 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
9731 "0139 Ignoring ELS cmd tag x%x completion Data: "
9733 irsp
->ulpIoTag
, irsp
->ulpStatus
,
9734 irsp
->un
.ulpWord
[4], irsp
->ulpTimeout
);
9735 if (cmdiocb
->iocb
.ulpCommand
== CMD_GEN_REQUEST64_CR
)
9736 lpfc_ct_free_iocb(phba
, cmdiocb
);
9738 lpfc_els_free_iocb(phba
, cmdiocb
);
9743 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
9744 * @phba: Pointer to HBA context object.
9745 * @pring: Pointer to driver SLI ring object.
9746 * @cmdiocb: Pointer to driver command iocb object.
9748 * This function issues an abort iocb for the provided command iocb down to
9749 * the port. Other than the case the outstanding command iocb is an abort
9750 * request, this function issues abort out unconditionally. This function is
9751 * called with hbalock held. The function returns 0 when it fails due to
9752 * memory allocation failure or when the command iocb is an abort request.
9755 lpfc_sli_abort_iotag_issue(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
9756 struct lpfc_iocbq
*cmdiocb
)
9758 struct lpfc_vport
*vport
= cmdiocb
->vport
;
9759 struct lpfc_iocbq
*abtsiocbp
;
9760 IOCB_t
*icmd
= NULL
;
9761 IOCB_t
*iabt
= NULL
;
9764 unsigned long iflags
;
9767 * There are certain command types we don't want to abort. And we
9768 * don't want to abort commands that are already in the process of
9771 icmd
= &cmdiocb
->iocb
;
9772 if (icmd
->ulpCommand
== CMD_ABORT_XRI_CN
||
9773 icmd
->ulpCommand
== CMD_CLOSE_XRI_CN
||
9774 (cmdiocb
->iocb_flag
& LPFC_DRIVER_ABORTED
) != 0)
9777 /* issue ABTS for this IOCB based on iotag */
9778 abtsiocbp
= __lpfc_sli_get_iocbq(phba
);
9779 if (abtsiocbp
== NULL
)
9782 /* This signals the response to set the correct status
9783 * before calling the completion handler
9785 cmdiocb
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
9787 iabt
= &abtsiocbp
->iocb
;
9788 iabt
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
9789 iabt
->un
.acxri
.abortContextTag
= icmd
->ulpContext
;
9790 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
9791 iabt
->un
.acxri
.abortIoTag
= cmdiocb
->sli4_xritag
;
9792 iabt
->un
.acxri
.abortContextTag
= cmdiocb
->iotag
;
9795 iabt
->un
.acxri
.abortIoTag
= icmd
->ulpIoTag
;
9797 iabt
->ulpClass
= icmd
->ulpClass
;
9799 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
9800 abtsiocbp
->fcp_wqidx
= cmdiocb
->fcp_wqidx
;
9801 if (cmdiocb
->iocb_flag
& LPFC_IO_FCP
)
9802 abtsiocbp
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
9803 if (cmdiocb
->iocb_flag
& LPFC_IO_FOF
)
9804 abtsiocbp
->iocb_flag
|= LPFC_IO_FOF
;
9806 if (phba
->link_state
>= LPFC_LINK_UP
)
9807 iabt
->ulpCommand
= CMD_ABORT_XRI_CN
;
9809 iabt
->ulpCommand
= CMD_CLOSE_XRI_CN
;
9811 abtsiocbp
->iocb_cmpl
= lpfc_sli_abort_els_cmpl
;
9812 abtsiocbp
->vport
= vport
;
9814 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
9815 "0339 Abort xri x%x, original iotag x%x, "
9816 "abort cmd iotag x%x\n",
9817 iabt
->un
.acxri
.abortIoTag
,
9818 iabt
->un
.acxri
.abortContextTag
,
9821 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
9823 lpfc_sli_calc_ring(phba
, pring
->ringno
, abtsiocbp
);
9824 if (unlikely(ring_number
== LPFC_HBA_ERROR
))
9826 pring
= &phba
->sli
.ring
[ring_number
];
9827 /* Note: both hbalock and ring_lock need to be set here */
9828 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
9829 retval
= __lpfc_sli_issue_iocb(phba
, pring
->ringno
,
9831 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
9833 retval
= __lpfc_sli_issue_iocb(phba
, pring
->ringno
,
9838 __lpfc_sli_release_iocbq(phba
, abtsiocbp
);
9841 * Caller to this routine should check for IOCB_ERROR
9842 * and handle it properly. This routine no longer removes
9843 * iocb off txcmplq and call compl in case of IOCB_ERROR.
9849 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
9850 * @phba: Pointer to HBA context object.
9851 * @pring: Pointer to driver SLI ring object.
9852 * @cmdiocb: Pointer to driver command iocb object.
9854 * This function issues an abort iocb for the provided command iocb. In case
9855 * of unloading, the abort iocb will not be issued to commands on the ELS
9856 * ring. Instead, the callback function shall be changed to those commands
9857 * so that nothing happens when them finishes. This function is called with
9858 * hbalock held. The function returns 0 when the command iocb is an abort
9862 lpfc_sli_issue_abort_iotag(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
9863 struct lpfc_iocbq
*cmdiocb
)
9865 struct lpfc_vport
*vport
= cmdiocb
->vport
;
9866 int retval
= IOCB_ERROR
;
9867 IOCB_t
*icmd
= NULL
;
9870 * There are certain command types we don't want to abort. And we
9871 * don't want to abort commands that are already in the process of
9874 icmd
= &cmdiocb
->iocb
;
9875 if (icmd
->ulpCommand
== CMD_ABORT_XRI_CN
||
9876 icmd
->ulpCommand
== CMD_CLOSE_XRI_CN
||
9877 (cmdiocb
->iocb_flag
& LPFC_DRIVER_ABORTED
) != 0)
9881 * If we're unloading, don't abort iocb on the ELS ring, but change
9882 * the callback so that nothing happens when it finishes.
9884 if ((vport
->load_flag
& FC_UNLOADING
) &&
9885 (pring
->ringno
== LPFC_ELS_RING
)) {
9886 if (cmdiocb
->iocb_flag
& LPFC_IO_FABRIC
)
9887 cmdiocb
->fabric_iocb_cmpl
= lpfc_ignore_els_cmpl
;
9889 cmdiocb
->iocb_cmpl
= lpfc_ignore_els_cmpl
;
9890 goto abort_iotag_exit
;
9893 /* Now, we try to issue the abort to the cmdiocb out */
9894 retval
= lpfc_sli_abort_iotag_issue(phba
, pring
, cmdiocb
);
9898 * Caller to this routine should check for IOCB_ERROR
9899 * and handle it properly. This routine no longer removes
9900 * iocb off txcmplq and call compl in case of IOCB_ERROR.
9906 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
9907 * @phba: pointer to lpfc HBA data structure.
9909 * This routine will abort all pending and outstanding iocbs to an HBA.
9912 lpfc_sli_hba_iocb_abort(struct lpfc_hba
*phba
)
9914 struct lpfc_sli
*psli
= &phba
->sli
;
9915 struct lpfc_sli_ring
*pring
;
9918 for (i
= 0; i
< psli
->num_rings
; i
++) {
9919 pring
= &psli
->ring
[i
];
9920 lpfc_sli_abort_iocb_ring(phba
, pring
);
9925 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
9926 * @iocbq: Pointer to driver iocb object.
9927 * @vport: Pointer to driver virtual port object.
9928 * @tgt_id: SCSI ID of the target.
9929 * @lun_id: LUN ID of the scsi device.
9930 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
9932 * This function acts as an iocb filter for functions which abort or count
9933 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
9934 * 0 if the filtering criteria is met for the given iocb and will return
9935 * 1 if the filtering criteria is not met.
9936 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
9937 * given iocb is for the SCSI device specified by vport, tgt_id and
9939 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
9940 * given iocb is for the SCSI target specified by vport and tgt_id
9942 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
9943 * given iocb is for the SCSI host associated with the given vport.
9944 * This function is called with no locks held.
9947 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq
*iocbq
, struct lpfc_vport
*vport
,
9948 uint16_t tgt_id
, uint64_t lun_id
,
9949 lpfc_ctx_cmd ctx_cmd
)
9951 struct lpfc_scsi_buf
*lpfc_cmd
;
9954 if (!(iocbq
->iocb_flag
& LPFC_IO_FCP
))
9957 if (iocbq
->vport
!= vport
)
9960 lpfc_cmd
= container_of(iocbq
, struct lpfc_scsi_buf
, cur_iocbq
);
9962 if (lpfc_cmd
->pCmd
== NULL
)
9967 if ((lpfc_cmd
->rdata
->pnode
) &&
9968 (lpfc_cmd
->rdata
->pnode
->nlp_sid
== tgt_id
) &&
9969 (scsilun_to_int(&lpfc_cmd
->fcp_cmnd
->fcp_lun
) == lun_id
))
9973 if ((lpfc_cmd
->rdata
->pnode
) &&
9974 (lpfc_cmd
->rdata
->pnode
->nlp_sid
== tgt_id
))
9981 printk(KERN_ERR
"%s: Unknown context cmd type, value %d\n",
9990 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
9991 * @vport: Pointer to virtual port.
9992 * @tgt_id: SCSI ID of the target.
9993 * @lun_id: LUN ID of the scsi device.
9994 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
9996 * This function returns number of FCP commands pending for the vport.
9997 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
9998 * commands pending on the vport associated with SCSI device specified
9999 * by tgt_id and lun_id parameters.
10000 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
10001 * commands pending on the vport associated with SCSI target specified
10002 * by tgt_id parameter.
10003 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
10004 * commands pending on the vport.
10005 * This function returns the number of iocbs which satisfy the filter.
10006 * This function is called without any lock held.
10009 lpfc_sli_sum_iocb(struct lpfc_vport
*vport
, uint16_t tgt_id
, uint64_t lun_id
,
10010 lpfc_ctx_cmd ctx_cmd
)
10012 struct lpfc_hba
*phba
= vport
->phba
;
10013 struct lpfc_iocbq
*iocbq
;
10016 for (i
= 1, sum
= 0; i
<= phba
->sli
.last_iotag
; i
++) {
10017 iocbq
= phba
->sli
.iocbq_lookup
[i
];
10019 if (lpfc_sli_validate_fcp_iocb (iocbq
, vport
, tgt_id
, lun_id
,
10028 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
10029 * @phba: Pointer to HBA context object
10030 * @cmdiocb: Pointer to command iocb object.
10031 * @rspiocb: Pointer to response iocb object.
10033 * This function is called when an aborted FCP iocb completes. This
10034 * function is called by the ring event handler with no lock held.
10035 * This function frees the iocb.
10038 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
10039 struct lpfc_iocbq
*rspiocb
)
10041 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
10042 "3096 ABORT_XRI_CN completing on rpi x%x "
10043 "original iotag x%x, abort cmd iotag x%x "
10044 "status 0x%x, reason 0x%x\n",
10045 cmdiocb
->iocb
.un
.acxri
.abortContextTag
,
10046 cmdiocb
->iocb
.un
.acxri
.abortIoTag
,
10047 cmdiocb
->iotag
, rspiocb
->iocb
.ulpStatus
,
10048 rspiocb
->iocb
.un
.ulpWord
[4]);
10049 lpfc_sli_release_iocbq(phba
, cmdiocb
);
10054 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
10055 * @vport: Pointer to virtual port.
10056 * @pring: Pointer to driver SLI ring object.
10057 * @tgt_id: SCSI ID of the target.
10058 * @lun_id: LUN ID of the scsi device.
10059 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10061 * This function sends an abort command for every SCSI command
10062 * associated with the given virtual port pending on the ring
10063 * filtered by lpfc_sli_validate_fcp_iocb function.
10064 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
10065 * FCP iocbs associated with lun specified by tgt_id and lun_id
10067 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
10068 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10069 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
10070 * FCP iocbs associated with virtual port.
10071 * This function returns number of iocbs it failed to abort.
10072 * This function is called with no locks held.
10075 lpfc_sli_abort_iocb(struct lpfc_vport
*vport
, struct lpfc_sli_ring
*pring
,
10076 uint16_t tgt_id
, uint64_t lun_id
, lpfc_ctx_cmd abort_cmd
)
10078 struct lpfc_hba
*phba
= vport
->phba
;
10079 struct lpfc_iocbq
*iocbq
;
10080 struct lpfc_iocbq
*abtsiocb
;
10081 IOCB_t
*cmd
= NULL
;
10082 int errcnt
= 0, ret_val
= 0;
10085 for (i
= 1; i
<= phba
->sli
.last_iotag
; i
++) {
10086 iocbq
= phba
->sli
.iocbq_lookup
[i
];
10088 if (lpfc_sli_validate_fcp_iocb(iocbq
, vport
, tgt_id
, lun_id
,
10093 * If the iocbq is already being aborted, don't take a second
10094 * action, but do count it.
10096 if (iocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
)
10099 /* issue ABTS for this IOCB based on iotag */
10100 abtsiocb
= lpfc_sli_get_iocbq(phba
);
10101 if (abtsiocb
== NULL
) {
10106 /* indicate the IO is being aborted by the driver. */
10107 iocbq
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
10109 cmd
= &iocbq
->iocb
;
10110 abtsiocb
->iocb
.un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
10111 abtsiocb
->iocb
.un
.acxri
.abortContextTag
= cmd
->ulpContext
;
10112 if (phba
->sli_rev
== LPFC_SLI_REV4
)
10113 abtsiocb
->iocb
.un
.acxri
.abortIoTag
= iocbq
->sli4_xritag
;
10115 abtsiocb
->iocb
.un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
10116 abtsiocb
->iocb
.ulpLe
= 1;
10117 abtsiocb
->iocb
.ulpClass
= cmd
->ulpClass
;
10118 abtsiocb
->vport
= vport
;
10120 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10121 abtsiocb
->fcp_wqidx
= iocbq
->fcp_wqidx
;
10122 if (iocbq
->iocb_flag
& LPFC_IO_FCP
)
10123 abtsiocb
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
10124 if (iocbq
->iocb_flag
& LPFC_IO_FOF
)
10125 abtsiocb
->iocb_flag
|= LPFC_IO_FOF
;
10127 if (lpfc_is_link_up(phba
))
10128 abtsiocb
->iocb
.ulpCommand
= CMD_ABORT_XRI_CN
;
10130 abtsiocb
->iocb
.ulpCommand
= CMD_CLOSE_XRI_CN
;
10132 /* Setup callback routine and issue the command. */
10133 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
10134 ret_val
= lpfc_sli_issue_iocb(phba
, pring
->ringno
,
10136 if (ret_val
== IOCB_ERROR
) {
10137 lpfc_sli_release_iocbq(phba
, abtsiocb
);
10147 * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
10148 * @vport: Pointer to virtual port.
10149 * @pring: Pointer to driver SLI ring object.
10150 * @tgt_id: SCSI ID of the target.
10151 * @lun_id: LUN ID of the scsi device.
10152 * @taskmgmt_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
10154 * This function sends an abort command for every SCSI command
10155 * associated with the given virtual port pending on the ring
10156 * filtered by lpfc_sli_validate_fcp_iocb function.
10157 * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
10158 * FCP iocbs associated with lun specified by tgt_id and lun_id
10160 * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
10161 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
10162 * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
10163 * FCP iocbs associated with virtual port.
10164 * This function returns number of iocbs it aborted .
10165 * This function is called with no locks held right after a taskmgmt
10169 lpfc_sli_abort_taskmgmt(struct lpfc_vport
*vport
, struct lpfc_sli_ring
*pring
,
10170 uint16_t tgt_id
, uint64_t lun_id
, lpfc_ctx_cmd cmd
)
10172 struct lpfc_hba
*phba
= vport
->phba
;
10173 struct lpfc_scsi_buf
*lpfc_cmd
;
10174 struct lpfc_iocbq
*abtsiocbq
;
10175 struct lpfc_nodelist
*ndlp
;
10176 struct lpfc_iocbq
*iocbq
;
10178 int sum
, i
, ret_val
;
10179 unsigned long iflags
;
10180 struct lpfc_sli_ring
*pring_s4
;
10181 uint32_t ring_number
;
10183 spin_lock_irq(&phba
->hbalock
);
10185 /* all I/Os are in process of being flushed */
10186 if (phba
->hba_flag
& HBA_FCP_IOQ_FLUSH
) {
10187 spin_unlock_irq(&phba
->hbalock
);
10192 for (i
= 1; i
<= phba
->sli
.last_iotag
; i
++) {
10193 iocbq
= phba
->sli
.iocbq_lookup
[i
];
10195 if (lpfc_sli_validate_fcp_iocb(iocbq
, vport
, tgt_id
, lun_id
,
10200 * If the iocbq is already being aborted, don't take a second
10201 * action, but do count it.
10203 if (iocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
)
10206 /* issue ABTS for this IOCB based on iotag */
10207 abtsiocbq
= __lpfc_sli_get_iocbq(phba
);
10208 if (abtsiocbq
== NULL
)
10211 icmd
= &iocbq
->iocb
;
10212 abtsiocbq
->iocb
.un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
10213 abtsiocbq
->iocb
.un
.acxri
.abortContextTag
= icmd
->ulpContext
;
10214 if (phba
->sli_rev
== LPFC_SLI_REV4
)
10215 abtsiocbq
->iocb
.un
.acxri
.abortIoTag
=
10216 iocbq
->sli4_xritag
;
10218 abtsiocbq
->iocb
.un
.acxri
.abortIoTag
= icmd
->ulpIoTag
;
10219 abtsiocbq
->iocb
.ulpLe
= 1;
10220 abtsiocbq
->iocb
.ulpClass
= icmd
->ulpClass
;
10221 abtsiocbq
->vport
= vport
;
10223 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
10224 abtsiocbq
->fcp_wqidx
= iocbq
->fcp_wqidx
;
10225 if (iocbq
->iocb_flag
& LPFC_IO_FCP
)
10226 abtsiocbq
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
10227 if (iocbq
->iocb_flag
& LPFC_IO_FOF
)
10228 abtsiocbq
->iocb_flag
|= LPFC_IO_FOF
;
10230 lpfc_cmd
= container_of(iocbq
, struct lpfc_scsi_buf
, cur_iocbq
);
10231 ndlp
= lpfc_cmd
->rdata
->pnode
;
10233 if (lpfc_is_link_up(phba
) &&
10234 (ndlp
&& ndlp
->nlp_state
== NLP_STE_MAPPED_NODE
))
10235 abtsiocbq
->iocb
.ulpCommand
= CMD_ABORT_XRI_CN
;
10237 abtsiocbq
->iocb
.ulpCommand
= CMD_CLOSE_XRI_CN
;
10239 /* Setup callback routine and issue the command. */
10240 abtsiocbq
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
10243 * Indicate the IO is being aborted by the driver and set
10244 * the caller's flag into the aborted IO.
10246 iocbq
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
10248 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
10249 ring_number
= MAX_SLI3_CONFIGURED_RINGS
+
10251 pring_s4
= &phba
->sli
.ring
[ring_number
];
10252 /* Note: both hbalock and ring_lock must be set here */
10253 spin_lock_irqsave(&pring_s4
->ring_lock
, iflags
);
10254 ret_val
= __lpfc_sli_issue_iocb(phba
, pring_s4
->ringno
,
10256 spin_unlock_irqrestore(&pring_s4
->ring_lock
, iflags
);
10258 ret_val
= __lpfc_sli_issue_iocb(phba
, pring
->ringno
,
10263 if (ret_val
== IOCB_ERROR
)
10264 __lpfc_sli_release_iocbq(phba
, abtsiocbq
);
10268 spin_unlock_irq(&phba
->hbalock
);
10273 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
10274 * @phba: Pointer to HBA context object.
10275 * @cmdiocbq: Pointer to command iocb.
10276 * @rspiocbq: Pointer to response iocb.
10278 * This function is the completion handler for iocbs issued using
10279 * lpfc_sli_issue_iocb_wait function. This function is called by the
10280 * ring event handler function without any lock held. This function
10281 * can be called from both worker thread context and interrupt
10282 * context. This function also can be called from other thread which
10283 * cleans up the SLI layer objects.
10284 * This function copy the contents of the response iocb to the
10285 * response iocb memory object provided by the caller of
10286 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
10287 * sleeps for the iocb completion.
10290 lpfc_sli_wake_iocb_wait(struct lpfc_hba
*phba
,
10291 struct lpfc_iocbq
*cmdiocbq
,
10292 struct lpfc_iocbq
*rspiocbq
)
10294 wait_queue_head_t
*pdone_q
;
10295 unsigned long iflags
;
10296 struct lpfc_scsi_buf
*lpfc_cmd
;
10298 spin_lock_irqsave(&phba
->hbalock
, iflags
);
10299 if (cmdiocbq
->iocb_flag
& LPFC_IO_WAKE_TMO
) {
10302 * A time out has occurred for the iocb. If a time out
10303 * completion handler has been supplied, call it. Otherwise,
10304 * just free the iocbq.
10307 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
10308 cmdiocbq
->iocb_cmpl
= cmdiocbq
->wait_iocb_cmpl
;
10309 cmdiocbq
->wait_iocb_cmpl
= NULL
;
10310 if (cmdiocbq
->iocb_cmpl
)
10311 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
, NULL
);
10313 lpfc_sli_release_iocbq(phba
, cmdiocbq
);
10317 cmdiocbq
->iocb_flag
|= LPFC_IO_WAKE
;
10318 if (cmdiocbq
->context2
&& rspiocbq
)
10319 memcpy(&((struct lpfc_iocbq
*)cmdiocbq
->context2
)->iocb
,
10320 &rspiocbq
->iocb
, sizeof(IOCB_t
));
10322 /* Set the exchange busy flag for task management commands */
10323 if ((cmdiocbq
->iocb_flag
& LPFC_IO_FCP
) &&
10324 !(cmdiocbq
->iocb_flag
& LPFC_IO_LIBDFC
)) {
10325 lpfc_cmd
= container_of(cmdiocbq
, struct lpfc_scsi_buf
,
10327 lpfc_cmd
->exch_busy
= rspiocbq
->iocb_flag
& LPFC_EXCHANGE_BUSY
;
10330 pdone_q
= cmdiocbq
->context_un
.wait_queue
;
10333 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
10338 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
10339 * @phba: Pointer to HBA context object..
10340 * @piocbq: Pointer to command iocb.
10341 * @flag: Flag to test.
10343 * This routine grabs the hbalock and then test the iocb_flag to
10344 * see if the passed in flag is set.
10346 * 1 if flag is set.
10347 * 0 if flag is not set.
10350 lpfc_chk_iocb_flg(struct lpfc_hba
*phba
,
10351 struct lpfc_iocbq
*piocbq
, uint32_t flag
)
10353 unsigned long iflags
;
10356 spin_lock_irqsave(&phba
->hbalock
, iflags
);
10357 ret
= piocbq
->iocb_flag
& flag
;
10358 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
10364 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
10365 * @phba: Pointer to HBA context object..
10366 * @pring: Pointer to sli ring.
10367 * @piocb: Pointer to command iocb.
10368 * @prspiocbq: Pointer to response iocb.
10369 * @timeout: Timeout in number of seconds.
10371 * This function issues the iocb to firmware and waits for the
10372 * iocb to complete. The iocb_cmpl field of the shall be used
10373 * to handle iocbs which time out. If the field is NULL, the
10374 * function shall free the iocbq structure. If more clean up is
10375 * needed, the caller is expected to provide a completion function
10376 * that will provide the needed clean up. If the iocb command is
10377 * not completed within timeout seconds, the function will either
10378 * free the iocbq structure (if iocb_cmpl == NULL) or execute the
10379 * completion function set in the iocb_cmpl field and then return
10380 * a status of IOCB_TIMEDOUT. The caller should not free the iocb
10381 * resources if this function returns IOCB_TIMEDOUT.
10382 * The function waits for the iocb completion using an
10383 * non-interruptible wait.
10384 * This function will sleep while waiting for iocb completion.
10385 * So, this function should not be called from any context which
10386 * does not allow sleeping. Due to the same reason, this function
10387 * cannot be called with interrupt disabled.
10388 * This function assumes that the iocb completions occur while
10389 * this function sleep. So, this function cannot be called from
10390 * the thread which process iocb completion for this ring.
10391 * This function clears the iocb_flag of the iocb object before
10392 * issuing the iocb and the iocb completion handler sets this
10393 * flag and wakes this thread when the iocb completes.
10394 * The contents of the response iocb will be copied to prspiocbq
10395 * by the completion handler when the command completes.
10396 * This function returns IOCB_SUCCESS when success.
10397 * This function is called with no lock held.
10400 lpfc_sli_issue_iocb_wait(struct lpfc_hba
*phba
,
10401 uint32_t ring_number
,
10402 struct lpfc_iocbq
*piocb
,
10403 struct lpfc_iocbq
*prspiocbq
,
10406 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q
);
10407 long timeleft
, timeout_req
= 0;
10408 int retval
= IOCB_SUCCESS
;
10410 struct lpfc_iocbq
*iocb
;
10412 int txcmplq_cnt
= 0;
10413 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
10414 unsigned long iflags
;
10415 bool iocb_completed
= true;
10418 * If the caller has provided a response iocbq buffer, then context2
10419 * is NULL or its an error.
10422 if (piocb
->context2
)
10424 piocb
->context2
= prspiocbq
;
10427 piocb
->wait_iocb_cmpl
= piocb
->iocb_cmpl
;
10428 piocb
->iocb_cmpl
= lpfc_sli_wake_iocb_wait
;
10429 piocb
->context_un
.wait_queue
= &done_q
;
10430 piocb
->iocb_flag
&= ~(LPFC_IO_WAKE
| LPFC_IO_WAKE_TMO
);
10432 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
10433 if (lpfc_readl(phba
->HCregaddr
, &creg_val
))
10435 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
10436 writel(creg_val
, phba
->HCregaddr
);
10437 readl(phba
->HCregaddr
); /* flush */
10440 retval
= lpfc_sli_issue_iocb(phba
, ring_number
, piocb
,
10441 SLI_IOCB_RET_IOCB
);
10442 if (retval
== IOCB_SUCCESS
) {
10443 timeout_req
= msecs_to_jiffies(timeout
* 1000);
10444 timeleft
= wait_event_timeout(done_q
,
10445 lpfc_chk_iocb_flg(phba
, piocb
, LPFC_IO_WAKE
),
10447 spin_lock_irqsave(&phba
->hbalock
, iflags
);
10448 if (!(piocb
->iocb_flag
& LPFC_IO_WAKE
)) {
10451 * IOCB timed out. Inform the wake iocb wait
10452 * completion function and set local status
10455 iocb_completed
= false;
10456 piocb
->iocb_flag
|= LPFC_IO_WAKE_TMO
;
10458 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
10459 if (iocb_completed
) {
10460 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
10461 "0331 IOCB wake signaled\n");
10462 /* Note: we are not indicating if the IOCB has a success
10463 * status or not - that's for the caller to check.
10464 * IOCB_SUCCESS means just that the command was sent and
10465 * completed. Not that it completed successfully.
10467 } else if (timeleft
== 0) {
10468 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10469 "0338 IOCB wait timeout error - no "
10470 "wake response Data x%x\n", timeout
);
10471 retval
= IOCB_TIMEDOUT
;
10473 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10474 "0330 IOCB wake NOT set, "
10476 timeout
, (timeleft
/ jiffies
));
10477 retval
= IOCB_TIMEDOUT
;
10479 } else if (retval
== IOCB_BUSY
) {
10480 if (phba
->cfg_log_verbose
& LOG_SLI
) {
10481 list_for_each_entry(iocb
, &pring
->txq
, list
) {
10484 list_for_each_entry(iocb
, &pring
->txcmplq
, list
) {
10487 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
10488 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
10489 phba
->iocb_cnt
, txq_cnt
, txcmplq_cnt
);
10493 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
10494 "0332 IOCB wait issue failed, Data x%x\n",
10496 retval
= IOCB_ERROR
;
10499 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
10500 if (lpfc_readl(phba
->HCregaddr
, &creg_val
))
10502 creg_val
&= ~(HC_R0INT_ENA
<< LPFC_FCP_RING
);
10503 writel(creg_val
, phba
->HCregaddr
);
10504 readl(phba
->HCregaddr
); /* flush */
10508 piocb
->context2
= NULL
;
10510 piocb
->context_un
.wait_queue
= NULL
;
10511 piocb
->iocb_cmpl
= NULL
;
10516 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
10517 * @phba: Pointer to HBA context object.
10518 * @pmboxq: Pointer to driver mailbox object.
10519 * @timeout: Timeout in number of seconds.
10521 * This function issues the mailbox to firmware and waits for the
10522 * mailbox command to complete. If the mailbox command is not
10523 * completed within timeout seconds, it returns MBX_TIMEOUT.
10524 * The function waits for the mailbox completion using an
10525 * interruptible wait. If the thread is woken up due to a
10526 * signal, MBX_TIMEOUT error is returned to the caller. Caller
10527 * should not free the mailbox resources, if this function returns
10529 * This function will sleep while waiting for mailbox completion.
10530 * So, this function should not be called from any context which
10531 * does not allow sleeping. Due to the same reason, this function
10532 * cannot be called with interrupt disabled.
10533 * This function assumes that the mailbox completion occurs while
10534 * this function sleep. So, this function cannot be called from
10535 * the worker thread which processes mailbox completion.
10536 * This function is called in the context of HBA management
10538 * This function returns MBX_SUCCESS when successful.
10539 * This function is called with no lock held.
10542 lpfc_sli_issue_mbox_wait(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
,
10545 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q
);
10546 MAILBOX_t
*mb
= NULL
;
10548 unsigned long flag
;
10550 /* The caller might set context1 for extended buffer */
10551 if (pmboxq
->context1
)
10552 mb
= (MAILBOX_t
*)pmboxq
->context1
;
10554 pmboxq
->mbox_flag
&= ~LPFC_MBX_WAKE
;
10555 /* setup wake call as IOCB callback */
10556 pmboxq
->mbox_cmpl
= lpfc_sli_wake_mbox_wait
;
10557 /* setup context field to pass wait_queue pointer to wake function */
10558 pmboxq
->context1
= &done_q
;
10560 /* now issue the command */
10561 retval
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
10562 if (retval
== MBX_BUSY
|| retval
== MBX_SUCCESS
) {
10563 wait_event_interruptible_timeout(done_q
,
10564 pmboxq
->mbox_flag
& LPFC_MBX_WAKE
,
10565 msecs_to_jiffies(timeout
* 1000));
10567 spin_lock_irqsave(&phba
->hbalock
, flag
);
10568 /* restore the possible extended buffer for free resource */
10569 pmboxq
->context1
= (uint8_t *)mb
;
10571 * if LPFC_MBX_WAKE flag is set the mailbox is completed
10572 * else do not free the resources.
10574 if (pmboxq
->mbox_flag
& LPFC_MBX_WAKE
) {
10575 retval
= MBX_SUCCESS
;
10577 retval
= MBX_TIMEOUT
;
10578 pmboxq
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
10580 spin_unlock_irqrestore(&phba
->hbalock
, flag
);
10582 /* restore the possible extended buffer for free resource */
10583 pmboxq
->context1
= (uint8_t *)mb
;
10590 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
10591 * @phba: Pointer to HBA context.
10593 * This function is called to shutdown the driver's mailbox sub-system.
10594 * It first marks the mailbox sub-system is in a block state to prevent
10595 * the asynchronous mailbox command from issued off the pending mailbox
10596 * command queue. If the mailbox command sub-system shutdown is due to
10597 * HBA error conditions such as EEH or ERATT, this routine shall invoke
10598 * the mailbox sub-system flush routine to forcefully bring down the
10599 * mailbox sub-system. Otherwise, if it is due to normal condition (such
10600 * as with offline or HBA function reset), this routine will wait for the
10601 * outstanding mailbox command to complete before invoking the mailbox
10602 * sub-system flush routine to gracefully bring down mailbox sub-system.
10605 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba
*phba
, int mbx_action
)
10607 struct lpfc_sli
*psli
= &phba
->sli
;
10608 unsigned long timeout
;
10610 if (mbx_action
== LPFC_MBX_NO_WAIT
) {
10611 /* delay 100ms for port state */
10613 lpfc_sli_mbox_sys_flush(phba
);
10616 timeout
= msecs_to_jiffies(LPFC_MBOX_TMO
* 1000) + jiffies
;
10618 spin_lock_irq(&phba
->hbalock
);
10619 psli
->sli_flag
|= LPFC_SLI_ASYNC_MBX_BLK
;
10621 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
10622 /* Determine how long we might wait for the active mailbox
10623 * command to be gracefully completed by firmware.
10625 if (phba
->sli
.mbox_active
)
10626 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
,
10627 phba
->sli
.mbox_active
) *
10629 spin_unlock_irq(&phba
->hbalock
);
10631 while (phba
->sli
.mbox_active
) {
10632 /* Check active mailbox complete status every 2ms */
10634 if (time_after(jiffies
, timeout
))
10635 /* Timeout, let the mailbox flush routine to
10636 * forcefully release active mailbox command
10641 spin_unlock_irq(&phba
->hbalock
);
10643 lpfc_sli_mbox_sys_flush(phba
);
10647 * lpfc_sli_eratt_read - read sli-3 error attention events
10648 * @phba: Pointer to HBA context.
10650 * This function is called to read the SLI3 device error attention registers
10651 * for possible error attention events. The caller must hold the hostlock
10652 * with spin_lock_irq().
10654 * This function returns 1 when there is Error Attention in the Host Attention
10655 * Register and returns 0 otherwise.
10658 lpfc_sli_eratt_read(struct lpfc_hba
*phba
)
10662 /* Read chip Host Attention (HA) register */
10663 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
10666 if (ha_copy
& HA_ERATT
) {
10667 /* Read host status register to retrieve error event */
10668 if (lpfc_sli_read_hs(phba
))
10671 /* Check if there is a deferred error condition is active */
10672 if ((HS_FFER1
& phba
->work_hs
) &&
10673 ((HS_FFER2
| HS_FFER3
| HS_FFER4
| HS_FFER5
|
10674 HS_FFER6
| HS_FFER7
| HS_FFER8
) & phba
->work_hs
)) {
10675 phba
->hba_flag
|= DEFER_ERATT
;
10676 /* Clear all interrupt enable conditions */
10677 writel(0, phba
->HCregaddr
);
10678 readl(phba
->HCregaddr
);
10681 /* Set the driver HA work bitmap */
10682 phba
->work_ha
|= HA_ERATT
;
10683 /* Indicate polling handles this ERATT */
10684 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
10690 /* Set the driver HS work bitmap */
10691 phba
->work_hs
|= UNPLUG_ERR
;
10692 /* Set the driver HA work bitmap */
10693 phba
->work_ha
|= HA_ERATT
;
10694 /* Indicate polling handles this ERATT */
10695 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
10700 * lpfc_sli4_eratt_read - read sli-4 error attention events
10701 * @phba: Pointer to HBA context.
10703 * This function is called to read the SLI4 device error attention registers
10704 * for possible error attention events. The caller must hold the hostlock
10705 * with spin_lock_irq().
10707 * This function returns 1 when there is Error Attention in the Host Attention
10708 * Register and returns 0 otherwise.
10711 lpfc_sli4_eratt_read(struct lpfc_hba
*phba
)
10713 uint32_t uerr_sta_hi
, uerr_sta_lo
;
10714 uint32_t if_type
, portsmphr
;
10715 struct lpfc_register portstat_reg
;
10718 * For now, use the SLI4 device internal unrecoverable error
10719 * registers for error attention. This can be changed later.
10721 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
10723 case LPFC_SLI_INTF_IF_TYPE_0
:
10724 if (lpfc_readl(phba
->sli4_hba
.u
.if_type0
.UERRLOregaddr
,
10726 lpfc_readl(phba
->sli4_hba
.u
.if_type0
.UERRHIregaddr
,
10728 phba
->work_hs
|= UNPLUG_ERR
;
10729 phba
->work_ha
|= HA_ERATT
;
10730 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
10733 if ((~phba
->sli4_hba
.ue_mask_lo
& uerr_sta_lo
) ||
10734 (~phba
->sli4_hba
.ue_mask_hi
& uerr_sta_hi
)) {
10735 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10736 "1423 HBA Unrecoverable error: "
10737 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
10738 "ue_mask_lo_reg=0x%x, "
10739 "ue_mask_hi_reg=0x%x\n",
10740 uerr_sta_lo
, uerr_sta_hi
,
10741 phba
->sli4_hba
.ue_mask_lo
,
10742 phba
->sli4_hba
.ue_mask_hi
);
10743 phba
->work_status
[0] = uerr_sta_lo
;
10744 phba
->work_status
[1] = uerr_sta_hi
;
10745 phba
->work_ha
|= HA_ERATT
;
10746 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
10750 case LPFC_SLI_INTF_IF_TYPE_2
:
10751 if (lpfc_readl(phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
,
10752 &portstat_reg
.word0
) ||
10753 lpfc_readl(phba
->sli4_hba
.PSMPHRregaddr
,
10755 phba
->work_hs
|= UNPLUG_ERR
;
10756 phba
->work_ha
|= HA_ERATT
;
10757 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
10760 if (bf_get(lpfc_sliport_status_err
, &portstat_reg
)) {
10761 phba
->work_status
[0] =
10762 readl(phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
);
10763 phba
->work_status
[1] =
10764 readl(phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
);
10765 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10766 "2885 Port Status Event: "
10767 "port status reg 0x%x, "
10768 "port smphr reg 0x%x, "
10769 "error 1=0x%x, error 2=0x%x\n",
10770 portstat_reg
.word0
,
10772 phba
->work_status
[0],
10773 phba
->work_status
[1]);
10774 phba
->work_ha
|= HA_ERATT
;
10775 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
10779 case LPFC_SLI_INTF_IF_TYPE_1
:
10781 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10782 "2886 HBA Error Attention on unsupported "
10783 "if type %d.", if_type
);
10791 * lpfc_sli_check_eratt - check error attention events
10792 * @phba: Pointer to HBA context.
10794 * This function is called from timer soft interrupt context to check HBA's
10795 * error attention register bit for error attention events.
10797 * This function returns 1 when there is Error Attention in the Host Attention
10798 * Register and returns 0 otherwise.
10801 lpfc_sli_check_eratt(struct lpfc_hba
*phba
)
10805 /* If somebody is waiting to handle an eratt, don't process it
10806 * here. The brdkill function will do this.
10808 if (phba
->link_flag
& LS_IGNORE_ERATT
)
10811 /* Check if interrupt handler handles this ERATT */
10812 spin_lock_irq(&phba
->hbalock
);
10813 if (phba
->hba_flag
& HBA_ERATT_HANDLED
) {
10814 /* Interrupt handler has handled ERATT */
10815 spin_unlock_irq(&phba
->hbalock
);
10820 * If there is deferred error attention, do not check for error
10823 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
10824 spin_unlock_irq(&phba
->hbalock
);
10828 /* If PCI channel is offline, don't process it */
10829 if (unlikely(pci_channel_offline(phba
->pcidev
))) {
10830 spin_unlock_irq(&phba
->hbalock
);
10834 switch (phba
->sli_rev
) {
10835 case LPFC_SLI_REV2
:
10836 case LPFC_SLI_REV3
:
10837 /* Read chip Host Attention (HA) register */
10838 ha_copy
= lpfc_sli_eratt_read(phba
);
10840 case LPFC_SLI_REV4
:
10841 /* Read device Uncoverable Error (UERR) registers */
10842 ha_copy
= lpfc_sli4_eratt_read(phba
);
10845 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10846 "0299 Invalid SLI revision (%d)\n",
10851 spin_unlock_irq(&phba
->hbalock
);
10857 * lpfc_intr_state_check - Check device state for interrupt handling
10858 * @phba: Pointer to HBA context.
10860 * This inline routine checks whether a device or its PCI slot is in a state
10861 * that the interrupt should be handled.
10863 * This function returns 0 if the device or the PCI slot is in a state that
10864 * interrupt should be handled, otherwise -EIO.
10867 lpfc_intr_state_check(struct lpfc_hba
*phba
)
10869 /* If the pci channel is offline, ignore all the interrupts */
10870 if (unlikely(pci_channel_offline(phba
->pcidev
)))
10873 /* Update device level interrupt statistics */
10874 phba
->sli
.slistat
.sli_intr
++;
10876 /* Ignore all interrupts during initialization. */
10877 if (unlikely(phba
->link_state
< LPFC_LINK_DOWN
))
10884 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
10885 * @irq: Interrupt number.
10886 * @dev_id: The device context pointer.
10888 * This function is directly called from the PCI layer as an interrupt
10889 * service routine when device with SLI-3 interface spec is enabled with
10890 * MSI-X multi-message interrupt mode and there are slow-path events in
10891 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
10892 * interrupt mode, this function is called as part of the device-level
10893 * interrupt handler. When the PCI slot is in error recovery or the HBA
10894 * is undergoing initialization, the interrupt handler will not process
10895 * the interrupt. The link attention and ELS ring attention events are
10896 * handled by the worker thread. The interrupt handler signals the worker
10897 * thread and returns for these events. This function is called without
10898 * any lock held. It gets the hbalock to access and update SLI data
10901 * This function returns IRQ_HANDLED when interrupt is handled else it
10902 * returns IRQ_NONE.
10905 lpfc_sli_sp_intr_handler(int irq
, void *dev_id
)
10907 struct lpfc_hba
*phba
;
10908 uint32_t ha_copy
, hc_copy
;
10909 uint32_t work_ha_copy
;
10910 unsigned long status
;
10911 unsigned long iflag
;
10914 MAILBOX_t
*mbox
, *pmbox
;
10915 struct lpfc_vport
*vport
;
10916 struct lpfc_nodelist
*ndlp
;
10917 struct lpfc_dmabuf
*mp
;
10922 * Get the driver's phba structure from the dev_id and
10923 * assume the HBA is not interrupting.
10925 phba
= (struct lpfc_hba
*)dev_id
;
10927 if (unlikely(!phba
))
10931 * Stuff needs to be attented to when this function is invoked as an
10932 * individual interrupt handler in MSI-X multi-message interrupt mode
10934 if (phba
->intr_type
== MSIX
) {
10935 /* Check device state for handling interrupt */
10936 if (lpfc_intr_state_check(phba
))
10938 /* Need to read HA REG for slow-path events */
10939 spin_lock_irqsave(&phba
->hbalock
, iflag
);
10940 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
10942 /* If somebody is waiting to handle an eratt don't process it
10943 * here. The brdkill function will do this.
10945 if (phba
->link_flag
& LS_IGNORE_ERATT
)
10946 ha_copy
&= ~HA_ERATT
;
10947 /* Check the need for handling ERATT in interrupt handler */
10948 if (ha_copy
& HA_ERATT
) {
10949 if (phba
->hba_flag
& HBA_ERATT_HANDLED
)
10950 /* ERATT polling has handled ERATT */
10951 ha_copy
&= ~HA_ERATT
;
10953 /* Indicate interrupt handler handles ERATT */
10954 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
10958 * If there is deferred error attention, do not check for any
10961 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
10962 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
10966 /* Clear up only attention source related to slow-path */
10967 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
))
10970 writel(hc_copy
& ~(HC_MBINT_ENA
| HC_R2INT_ENA
|
10971 HC_LAINT_ENA
| HC_ERINT_ENA
),
10973 writel((ha_copy
& (HA_MBATT
| HA_R2_CLR_MSK
)),
10975 writel(hc_copy
, phba
->HCregaddr
);
10976 readl(phba
->HAregaddr
); /* flush */
10977 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
10979 ha_copy
= phba
->ha_copy
;
10981 work_ha_copy
= ha_copy
& phba
->work_ha_mask
;
10983 if (work_ha_copy
) {
10984 if (work_ha_copy
& HA_LATT
) {
10985 if (phba
->sli
.sli_flag
& LPFC_PROCESS_LA
) {
10987 * Turn off Link Attention interrupts
10988 * until CLEAR_LA done
10990 spin_lock_irqsave(&phba
->hbalock
, iflag
);
10991 phba
->sli
.sli_flag
&= ~LPFC_PROCESS_LA
;
10992 if (lpfc_readl(phba
->HCregaddr
, &control
))
10994 control
&= ~HC_LAINT_ENA
;
10995 writel(control
, phba
->HCregaddr
);
10996 readl(phba
->HCregaddr
); /* flush */
10997 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11000 work_ha_copy
&= ~HA_LATT
;
11003 if (work_ha_copy
& ~(HA_ERATT
| HA_MBATT
| HA_LATT
)) {
11005 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
11006 * the only slow ring.
11008 status
= (work_ha_copy
&
11009 (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
11010 status
>>= (4*LPFC_ELS_RING
);
11011 if (status
& HA_RXMASK
) {
11012 spin_lock_irqsave(&phba
->hbalock
, iflag
);
11013 if (lpfc_readl(phba
->HCregaddr
, &control
))
11016 lpfc_debugfs_slow_ring_trc(phba
,
11017 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
11019 (uint32_t)phba
->sli
.slistat
.sli_intr
);
11021 if (control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
)) {
11022 lpfc_debugfs_slow_ring_trc(phba
,
11023 "ISR Disable ring:"
11024 "pwork:x%x hawork:x%x wait:x%x",
11025 phba
->work_ha
, work_ha_copy
,
11026 (uint32_t)((unsigned long)
11027 &phba
->work_waitq
));
11030 ~(HC_R0INT_ENA
<< LPFC_ELS_RING
);
11031 writel(control
, phba
->HCregaddr
);
11032 readl(phba
->HCregaddr
); /* flush */
11035 lpfc_debugfs_slow_ring_trc(phba
,
11036 "ISR slow ring: pwork:"
11037 "x%x hawork:x%x wait:x%x",
11038 phba
->work_ha
, work_ha_copy
,
11039 (uint32_t)((unsigned long)
11040 &phba
->work_waitq
));
11042 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11045 spin_lock_irqsave(&phba
->hbalock
, iflag
);
11046 if (work_ha_copy
& HA_ERATT
) {
11047 if (lpfc_sli_read_hs(phba
))
11050 * Check if there is a deferred error condition
11053 if ((HS_FFER1
& phba
->work_hs
) &&
11054 ((HS_FFER2
| HS_FFER3
| HS_FFER4
| HS_FFER5
|
11055 HS_FFER6
| HS_FFER7
| HS_FFER8
) &
11057 phba
->hba_flag
|= DEFER_ERATT
;
11058 /* Clear all interrupt enable conditions */
11059 writel(0, phba
->HCregaddr
);
11060 readl(phba
->HCregaddr
);
11064 if ((work_ha_copy
& HA_MBATT
) && (phba
->sli
.mbox_active
)) {
11065 pmb
= phba
->sli
.mbox_active
;
11066 pmbox
= &pmb
->u
.mb
;
11068 vport
= pmb
->vport
;
11070 /* First check out the status word */
11071 lpfc_sli_pcimem_bcopy(mbox
, pmbox
, sizeof(uint32_t));
11072 if (pmbox
->mbxOwner
!= OWN_HOST
) {
11073 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11075 * Stray Mailbox Interrupt, mbxCommand <cmd>
11076 * mbxStatus <status>
11078 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
|
11080 "(%d):0304 Stray Mailbox "
11081 "Interrupt mbxCommand x%x "
11083 (vport
? vport
->vpi
: 0),
11086 /* clear mailbox attention bit */
11087 work_ha_copy
&= ~HA_MBATT
;
11089 phba
->sli
.mbox_active
= NULL
;
11090 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11091 phba
->last_completion_time
= jiffies
;
11092 del_timer(&phba
->sli
.mbox_tmo
);
11093 if (pmb
->mbox_cmpl
) {
11094 lpfc_sli_pcimem_bcopy(mbox
, pmbox
,
11096 if (pmb
->out_ext_byte_len
&&
11098 lpfc_sli_pcimem_bcopy(
11101 pmb
->out_ext_byte_len
);
11103 if (pmb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) {
11104 pmb
->mbox_flag
&= ~LPFC_MBX_IMED_UNREG
;
11106 lpfc_debugfs_disc_trc(vport
,
11107 LPFC_DISC_TRC_MBOX_VPORT
,
11108 "MBOX dflt rpi: : "
11109 "status:x%x rpi:x%x",
11110 (uint32_t)pmbox
->mbxStatus
,
11111 pmbox
->un
.varWords
[0], 0);
11113 if (!pmbox
->mbxStatus
) {
11114 mp
= (struct lpfc_dmabuf
*)
11116 ndlp
= (struct lpfc_nodelist
*)
11119 /* Reg_LOGIN of dflt RPI was
11120 * successful. new lets get
11121 * rid of the RPI using the
11122 * same mbox buffer.
11124 lpfc_unreg_login(phba
,
11126 pmbox
->un
.varWords
[0],
11129 lpfc_mbx_cmpl_dflt_rpi
;
11130 pmb
->context1
= mp
;
11131 pmb
->context2
= ndlp
;
11132 pmb
->vport
= vport
;
11133 rc
= lpfc_sli_issue_mbox(phba
,
11136 if (rc
!= MBX_BUSY
)
11137 lpfc_printf_log(phba
,
11139 LOG_MBOX
| LOG_SLI
,
11140 "0350 rc should have"
11141 "been MBX_BUSY\n");
11142 if (rc
!= MBX_NOT_FINISHED
)
11143 goto send_current_mbox
;
11147 &phba
->pport
->work_port_lock
,
11149 phba
->pport
->work_port_events
&=
11151 spin_unlock_irqrestore(
11152 &phba
->pport
->work_port_lock
,
11154 lpfc_mbox_cmpl_put(phba
, pmb
);
11157 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11159 if ((work_ha_copy
& HA_MBATT
) &&
11160 (phba
->sli
.mbox_active
== NULL
)) {
11162 /* Process next mailbox command if there is one */
11164 rc
= lpfc_sli_issue_mbox(phba
, NULL
,
11166 } while (rc
== MBX_NOT_FINISHED
);
11167 if (rc
!= MBX_SUCCESS
)
11168 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
|
11169 LOG_SLI
, "0349 rc should be "
11173 spin_lock_irqsave(&phba
->hbalock
, iflag
);
11174 phba
->work_ha
|= work_ha_copy
;
11175 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11176 lpfc_worker_wake_up(phba
);
11178 return IRQ_HANDLED
;
11180 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11181 return IRQ_HANDLED
;
11183 } /* lpfc_sli_sp_intr_handler */
11186 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
11187 * @irq: Interrupt number.
11188 * @dev_id: The device context pointer.
11190 * This function is directly called from the PCI layer as an interrupt
11191 * service routine when device with SLI-3 interface spec is enabled with
11192 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
11193 * ring event in the HBA. However, when the device is enabled with either
11194 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
11195 * device-level interrupt handler. When the PCI slot is in error recovery
11196 * or the HBA is undergoing initialization, the interrupt handler will not
11197 * process the interrupt. The SCSI FCP fast-path ring event are handled in
11198 * the intrrupt context. This function is called without any lock held.
11199 * It gets the hbalock to access and update SLI data structures.
11201 * This function returns IRQ_HANDLED when interrupt is handled else it
11202 * returns IRQ_NONE.
11205 lpfc_sli_fp_intr_handler(int irq
, void *dev_id
)
11207 struct lpfc_hba
*phba
;
11209 unsigned long status
;
11210 unsigned long iflag
;
11212 /* Get the driver's phba structure from the dev_id and
11213 * assume the HBA is not interrupting.
11215 phba
= (struct lpfc_hba
*) dev_id
;
11217 if (unlikely(!phba
))
11221 * Stuff needs to be attented to when this function is invoked as an
11222 * individual interrupt handler in MSI-X multi-message interrupt mode
11224 if (phba
->intr_type
== MSIX
) {
11225 /* Check device state for handling interrupt */
11226 if (lpfc_intr_state_check(phba
))
11228 /* Need to read HA REG for FCP ring and other ring events */
11229 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
11230 return IRQ_HANDLED
;
11231 /* Clear up only attention source related to fast-path */
11232 spin_lock_irqsave(&phba
->hbalock
, iflag
);
11234 * If there is deferred error attention, do not check for
11237 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
11238 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11241 writel((ha_copy
& (HA_R0_CLR_MSK
| HA_R1_CLR_MSK
)),
11243 readl(phba
->HAregaddr
); /* flush */
11244 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
11246 ha_copy
= phba
->ha_copy
;
11249 * Process all events on FCP ring. Take the optimized path for FCP IO.
11251 ha_copy
&= ~(phba
->work_ha_mask
);
11253 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_FCP_RING
)));
11254 status
>>= (4*LPFC_FCP_RING
);
11255 if (status
& HA_RXMASK
)
11256 lpfc_sli_handle_fast_ring_event(phba
,
11257 &phba
->sli
.ring
[LPFC_FCP_RING
],
11260 if (phba
->cfg_multi_ring_support
== 2) {
11262 * Process all events on extra ring. Take the optimized path
11263 * for extra ring IO.
11265 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_EXTRA_RING
)));
11266 status
>>= (4*LPFC_EXTRA_RING
);
11267 if (status
& HA_RXMASK
) {
11268 lpfc_sli_handle_fast_ring_event(phba
,
11269 &phba
->sli
.ring
[LPFC_EXTRA_RING
],
11273 return IRQ_HANDLED
;
11274 } /* lpfc_sli_fp_intr_handler */
11277 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
11278 * @irq: Interrupt number.
11279 * @dev_id: The device context pointer.
11281 * This function is the HBA device-level interrupt handler to device with
11282 * SLI-3 interface spec, called from the PCI layer when either MSI or
11283 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
11284 * requires driver attention. This function invokes the slow-path interrupt
11285 * attention handling function and fast-path interrupt attention handling
11286 * function in turn to process the relevant HBA attention events. This
11287 * function is called without any lock held. It gets the hbalock to access
11288 * and update SLI data structures.
11290 * This function returns IRQ_HANDLED when interrupt is handled, else it
11291 * returns IRQ_NONE.
11294 lpfc_sli_intr_handler(int irq
, void *dev_id
)
11296 struct lpfc_hba
*phba
;
11297 irqreturn_t sp_irq_rc
, fp_irq_rc
;
11298 unsigned long status1
, status2
;
11302 * Get the driver's phba structure from the dev_id and
11303 * assume the HBA is not interrupting.
11305 phba
= (struct lpfc_hba
*) dev_id
;
11307 if (unlikely(!phba
))
11310 /* Check device state for handling interrupt */
11311 if (lpfc_intr_state_check(phba
))
11314 spin_lock(&phba
->hbalock
);
11315 if (lpfc_readl(phba
->HAregaddr
, &phba
->ha_copy
)) {
11316 spin_unlock(&phba
->hbalock
);
11317 return IRQ_HANDLED
;
11320 if (unlikely(!phba
->ha_copy
)) {
11321 spin_unlock(&phba
->hbalock
);
11323 } else if (phba
->ha_copy
& HA_ERATT
) {
11324 if (phba
->hba_flag
& HBA_ERATT_HANDLED
)
11325 /* ERATT polling has handled ERATT */
11326 phba
->ha_copy
&= ~HA_ERATT
;
11328 /* Indicate interrupt handler handles ERATT */
11329 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
11333 * If there is deferred error attention, do not check for any interrupt.
11335 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
11336 spin_unlock(&phba
->hbalock
);
11340 /* Clear attention sources except link and error attentions */
11341 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
)) {
11342 spin_unlock(&phba
->hbalock
);
11343 return IRQ_HANDLED
;
11345 writel(hc_copy
& ~(HC_MBINT_ENA
| HC_R0INT_ENA
| HC_R1INT_ENA
11346 | HC_R2INT_ENA
| HC_LAINT_ENA
| HC_ERINT_ENA
),
11348 writel((phba
->ha_copy
& ~(HA_LATT
| HA_ERATT
)), phba
->HAregaddr
);
11349 writel(hc_copy
, phba
->HCregaddr
);
11350 readl(phba
->HAregaddr
); /* flush */
11351 spin_unlock(&phba
->hbalock
);
11354 * Invokes slow-path host attention interrupt handling as appropriate.
11357 /* status of events with mailbox and link attention */
11358 status1
= phba
->ha_copy
& (HA_MBATT
| HA_LATT
| HA_ERATT
);
11360 /* status of events with ELS ring */
11361 status2
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
11362 status2
>>= (4*LPFC_ELS_RING
);
11364 if (status1
|| (status2
& HA_RXMASK
))
11365 sp_irq_rc
= lpfc_sli_sp_intr_handler(irq
, dev_id
);
11367 sp_irq_rc
= IRQ_NONE
;
11370 * Invoke fast-path host attention interrupt handling as appropriate.
11373 /* status of events with FCP ring */
11374 status1
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_FCP_RING
)));
11375 status1
>>= (4*LPFC_FCP_RING
);
11377 /* status of events with extra ring */
11378 if (phba
->cfg_multi_ring_support
== 2) {
11379 status2
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_EXTRA_RING
)));
11380 status2
>>= (4*LPFC_EXTRA_RING
);
11384 if ((status1
& HA_RXMASK
) || (status2
& HA_RXMASK
))
11385 fp_irq_rc
= lpfc_sli_fp_intr_handler(irq
, dev_id
);
11387 fp_irq_rc
= IRQ_NONE
;
11389 /* Return device-level interrupt handling status */
11390 return (sp_irq_rc
== IRQ_HANDLED
) ? sp_irq_rc
: fp_irq_rc
;
11391 } /* lpfc_sli_intr_handler */
11394 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
11395 * @phba: pointer to lpfc hba data structure.
11397 * This routine is invoked by the worker thread to process all the pending
11398 * SLI4 FCP abort XRI events.
11400 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba
*phba
)
11402 struct lpfc_cq_event
*cq_event
;
11404 /* First, declare the fcp xri abort event has been handled */
11405 spin_lock_irq(&phba
->hbalock
);
11406 phba
->hba_flag
&= ~FCP_XRI_ABORT_EVENT
;
11407 spin_unlock_irq(&phba
->hbalock
);
11408 /* Now, handle all the fcp xri abort events */
11409 while (!list_empty(&phba
->sli4_hba
.sp_fcp_xri_aborted_work_queue
)) {
11410 /* Get the first event from the head of the event queue */
11411 spin_lock_irq(&phba
->hbalock
);
11412 list_remove_head(&phba
->sli4_hba
.sp_fcp_xri_aborted_work_queue
,
11413 cq_event
, struct lpfc_cq_event
, list
);
11414 spin_unlock_irq(&phba
->hbalock
);
11415 /* Notify aborted XRI for FCP work queue */
11416 lpfc_sli4_fcp_xri_aborted(phba
, &cq_event
->cqe
.wcqe_axri
);
11417 /* Free the event processed back to the free pool */
11418 lpfc_sli4_cq_event_release(phba
, cq_event
);
11423 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
11424 * @phba: pointer to lpfc hba data structure.
11426 * This routine is invoked by the worker thread to process all the pending
11427 * SLI4 els abort xri events.
11429 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba
*phba
)
11431 struct lpfc_cq_event
*cq_event
;
11433 /* First, declare the els xri abort event has been handled */
11434 spin_lock_irq(&phba
->hbalock
);
11435 phba
->hba_flag
&= ~ELS_XRI_ABORT_EVENT
;
11436 spin_unlock_irq(&phba
->hbalock
);
11437 /* Now, handle all the els xri abort events */
11438 while (!list_empty(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
)) {
11439 /* Get the first event from the head of the event queue */
11440 spin_lock_irq(&phba
->hbalock
);
11441 list_remove_head(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
,
11442 cq_event
, struct lpfc_cq_event
, list
);
11443 spin_unlock_irq(&phba
->hbalock
);
11444 /* Notify aborted XRI for ELS work queue */
11445 lpfc_sli4_els_xri_aborted(phba
, &cq_event
->cqe
.wcqe_axri
);
11446 /* Free the event processed back to the free pool */
11447 lpfc_sli4_cq_event_release(phba
, cq_event
);
11452 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
11453 * @phba: pointer to lpfc hba data structure
11454 * @pIocbIn: pointer to the rspiocbq
11455 * @pIocbOut: pointer to the cmdiocbq
11456 * @wcqe: pointer to the complete wcqe
11458 * This routine transfers the fields of a command iocbq to a response iocbq
11459 * by copying all the IOCB fields from command iocbq and transferring the
11460 * completion status information from the complete wcqe.
11463 lpfc_sli4_iocb_param_transfer(struct lpfc_hba
*phba
,
11464 struct lpfc_iocbq
*pIocbIn
,
11465 struct lpfc_iocbq
*pIocbOut
,
11466 struct lpfc_wcqe_complete
*wcqe
)
11469 unsigned long iflags
;
11470 uint32_t status
, max_response
;
11471 struct lpfc_dmabuf
*dmabuf
;
11472 struct ulp_bde64
*bpl
, bde
;
11473 size_t offset
= offsetof(struct lpfc_iocbq
, iocb
);
11475 memcpy((char *)pIocbIn
+ offset
, (char *)pIocbOut
+ offset
,
11476 sizeof(struct lpfc_iocbq
) - offset
);
11477 /* Map WCQE parameters into irspiocb parameters */
11478 status
= bf_get(lpfc_wcqe_c_status
, wcqe
);
11479 pIocbIn
->iocb
.ulpStatus
= (status
& LPFC_IOCB_STATUS_MASK
);
11480 if (pIocbOut
->iocb_flag
& LPFC_IO_FCP
)
11481 if (pIocbIn
->iocb
.ulpStatus
== IOSTAT_FCP_RSP_ERROR
)
11482 pIocbIn
->iocb
.un
.fcpi
.fcpi_parm
=
11483 pIocbOut
->iocb
.un
.fcpi
.fcpi_parm
-
11484 wcqe
->total_data_placed
;
11486 pIocbIn
->iocb
.un
.ulpWord
[4] = wcqe
->parameter
;
11488 pIocbIn
->iocb
.un
.ulpWord
[4] = wcqe
->parameter
;
11489 switch (pIocbOut
->iocb
.ulpCommand
) {
11490 case CMD_ELS_REQUEST64_CR
:
11491 dmabuf
= (struct lpfc_dmabuf
*)pIocbOut
->context3
;
11492 bpl
= (struct ulp_bde64
*)dmabuf
->virt
;
11493 bde
.tus
.w
= le32_to_cpu(bpl
[1].tus
.w
);
11494 max_response
= bde
.tus
.f
.bdeSize
;
11496 case CMD_GEN_REQUEST64_CR
:
11498 if (!pIocbOut
->context3
)
11500 numBdes
= pIocbOut
->iocb
.un
.genreq64
.bdl
.bdeSize
/
11501 sizeof(struct ulp_bde64
);
11502 dmabuf
= (struct lpfc_dmabuf
*)pIocbOut
->context3
;
11503 bpl
= (struct ulp_bde64
*)dmabuf
->virt
;
11504 for (i
= 0; i
< numBdes
; i
++) {
11505 bde
.tus
.w
= le32_to_cpu(bpl
[i
].tus
.w
);
11506 if (bde
.tus
.f
.bdeFlags
!= BUFF_TYPE_BDE_64
)
11507 max_response
+= bde
.tus
.f
.bdeSize
;
11511 max_response
= wcqe
->total_data_placed
;
11514 if (max_response
< wcqe
->total_data_placed
)
11515 pIocbIn
->iocb
.un
.genreq64
.bdl
.bdeSize
= max_response
;
11517 pIocbIn
->iocb
.un
.genreq64
.bdl
.bdeSize
=
11518 wcqe
->total_data_placed
;
11521 /* Convert BG errors for completion status */
11522 if (status
== CQE_STATUS_DI_ERROR
) {
11523 pIocbIn
->iocb
.ulpStatus
= IOSTAT_LOCAL_REJECT
;
11525 if (bf_get(lpfc_wcqe_c_bg_edir
, wcqe
))
11526 pIocbIn
->iocb
.un
.ulpWord
[4] = IOERR_RX_DMA_FAILED
;
11528 pIocbIn
->iocb
.un
.ulpWord
[4] = IOERR_TX_DMA_FAILED
;
11530 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
= 0;
11531 if (bf_get(lpfc_wcqe_c_bg_ge
, wcqe
)) /* Guard Check failed */
11532 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
11533 BGS_GUARD_ERR_MASK
;
11534 if (bf_get(lpfc_wcqe_c_bg_ae
, wcqe
)) /* App Tag Check failed */
11535 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
11536 BGS_APPTAG_ERR_MASK
;
11537 if (bf_get(lpfc_wcqe_c_bg_re
, wcqe
)) /* Ref Tag Check failed */
11538 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
11539 BGS_REFTAG_ERR_MASK
;
11541 /* Check to see if there was any good data before the error */
11542 if (bf_get(lpfc_wcqe_c_bg_tdpv
, wcqe
)) {
11543 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
11544 BGS_HI_WATER_MARK_PRESENT_MASK
;
11545 pIocbIn
->iocb
.unsli3
.sli3_bg
.bghm
=
11546 wcqe
->total_data_placed
;
11550 * Set ALL the error bits to indicate we don't know what
11551 * type of error it is.
11553 if (!pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
)
11554 pIocbIn
->iocb
.unsli3
.sli3_bg
.bgstat
|=
11555 (BGS_REFTAG_ERR_MASK
| BGS_APPTAG_ERR_MASK
|
11556 BGS_GUARD_ERR_MASK
);
11559 /* Pick up HBA exchange busy condition */
11560 if (bf_get(lpfc_wcqe_c_xb
, wcqe
)) {
11561 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11562 pIocbIn
->iocb_flag
|= LPFC_EXCHANGE_BUSY
;
11563 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11568 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
11569 * @phba: Pointer to HBA context object.
11570 * @wcqe: Pointer to work-queue completion queue entry.
11572 * This routine handles an ELS work-queue completion event and construct
11573 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
11574 * discovery engine to handle.
11576 * Return: Pointer to the receive IOCBQ, NULL otherwise.
11578 static struct lpfc_iocbq
*
11579 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba
*phba
,
11580 struct lpfc_iocbq
*irspiocbq
)
11582 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
11583 struct lpfc_iocbq
*cmdiocbq
;
11584 struct lpfc_wcqe_complete
*wcqe
;
11585 unsigned long iflags
;
11587 wcqe
= &irspiocbq
->cq_event
.cqe
.wcqe_cmpl
;
11588 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
11589 pring
->stats
.iocb_event
++;
11590 /* Look up the ELS command IOCB and create pseudo response IOCB */
11591 cmdiocbq
= lpfc_sli_iocbq_lookup_by_tag(phba
, pring
,
11592 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
11593 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
11595 if (unlikely(!cmdiocbq
)) {
11596 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
11597 "0386 ELS complete with no corresponding "
11598 "cmdiocb: iotag (%d)\n",
11599 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
11600 lpfc_sli_release_iocbq(phba
, irspiocbq
);
11604 /* Fake the irspiocbq and copy necessary response information */
11605 lpfc_sli4_iocb_param_transfer(phba
, irspiocbq
, cmdiocbq
, wcqe
);
11611 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
11612 * @phba: Pointer to HBA context object.
11613 * @cqe: Pointer to mailbox completion queue entry.
11615 * This routine process a mailbox completion queue entry with asynchrous
11618 * Return: true if work posted to worker thread, otherwise false.
11621 lpfc_sli4_sp_handle_async_event(struct lpfc_hba
*phba
, struct lpfc_mcqe
*mcqe
)
11623 struct lpfc_cq_event
*cq_event
;
11624 unsigned long iflags
;
11626 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
11627 "0392 Async Event: word0:x%x, word1:x%x, "
11628 "word2:x%x, word3:x%x\n", mcqe
->word0
,
11629 mcqe
->mcqe_tag0
, mcqe
->mcqe_tag1
, mcqe
->trailer
);
11631 /* Allocate a new internal CQ_EVENT entry */
11632 cq_event
= lpfc_sli4_cq_event_alloc(phba
);
11634 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11635 "0394 Failed to allocate CQ_EVENT entry\n");
11639 /* Move the CQE into an asynchronous event entry */
11640 memcpy(&cq_event
->cqe
, mcqe
, sizeof(struct lpfc_mcqe
));
11641 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11642 list_add_tail(&cq_event
->list
, &phba
->sli4_hba
.sp_asynce_work_queue
);
11643 /* Set the async event flag */
11644 phba
->hba_flag
|= ASYNC_EVENT
;
11645 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11651 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
11652 * @phba: Pointer to HBA context object.
11653 * @cqe: Pointer to mailbox completion queue entry.
11655 * This routine process a mailbox completion queue entry with mailbox
11656 * completion event.
11658 * Return: true if work posted to worker thread, otherwise false.
11661 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba
*phba
, struct lpfc_mcqe
*mcqe
)
11663 uint32_t mcqe_status
;
11664 MAILBOX_t
*mbox
, *pmbox
;
11665 struct lpfc_mqe
*mqe
;
11666 struct lpfc_vport
*vport
;
11667 struct lpfc_nodelist
*ndlp
;
11668 struct lpfc_dmabuf
*mp
;
11669 unsigned long iflags
;
11671 bool workposted
= false;
11674 /* If not a mailbox complete MCQE, out by checking mailbox consume */
11675 if (!bf_get(lpfc_trailer_completed
, mcqe
))
11676 goto out_no_mqe_complete
;
11678 /* Get the reference to the active mbox command */
11679 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11680 pmb
= phba
->sli
.mbox_active
;
11681 if (unlikely(!pmb
)) {
11682 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
11683 "1832 No pending MBOX command to handle\n");
11684 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11685 goto out_no_mqe_complete
;
11687 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11689 pmbox
= (MAILBOX_t
*)&pmb
->u
.mqe
;
11691 vport
= pmb
->vport
;
11693 /* Reset heartbeat timer */
11694 phba
->last_completion_time
= jiffies
;
11695 del_timer(&phba
->sli
.mbox_tmo
);
11697 /* Move mbox data to caller's mailbox region, do endian swapping */
11698 if (pmb
->mbox_cmpl
&& mbox
)
11699 lpfc_sli_pcimem_bcopy(mbox
, mqe
, sizeof(struct lpfc_mqe
));
11702 * For mcqe errors, conditionally move a modified error code to
11703 * the mbox so that the error will not be missed.
11705 mcqe_status
= bf_get(lpfc_mcqe_status
, mcqe
);
11706 if (mcqe_status
!= MB_CQE_STATUS_SUCCESS
) {
11707 if (bf_get(lpfc_mqe_status
, mqe
) == MBX_SUCCESS
)
11708 bf_set(lpfc_mqe_status
, mqe
,
11709 (LPFC_MBX_ERROR_RANGE
| mcqe_status
));
11711 if (pmb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) {
11712 pmb
->mbox_flag
&= ~LPFC_MBX_IMED_UNREG
;
11713 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_MBOX_VPORT
,
11714 "MBOX dflt rpi: status:x%x rpi:x%x",
11716 pmbox
->un
.varWords
[0], 0);
11717 if (mcqe_status
== MB_CQE_STATUS_SUCCESS
) {
11718 mp
= (struct lpfc_dmabuf
*)(pmb
->context1
);
11719 ndlp
= (struct lpfc_nodelist
*)pmb
->context2
;
11720 /* Reg_LOGIN of dflt RPI was successful. Now lets get
11721 * RID of the PPI using the same mbox buffer.
11723 lpfc_unreg_login(phba
, vport
->vpi
,
11724 pmbox
->un
.varWords
[0], pmb
);
11725 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
11726 pmb
->context1
= mp
;
11727 pmb
->context2
= ndlp
;
11728 pmb
->vport
= vport
;
11729 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
11730 if (rc
!= MBX_BUSY
)
11731 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
|
11732 LOG_SLI
, "0385 rc should "
11733 "have been MBX_BUSY\n");
11734 if (rc
!= MBX_NOT_FINISHED
)
11735 goto send_current_mbox
;
11738 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflags
);
11739 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
11740 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflags
);
11742 /* There is mailbox completion work to do */
11743 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11744 __lpfc_mbox_cmpl_put(phba
, pmb
);
11745 phba
->work_ha
|= HA_MBATT
;
11746 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11750 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11751 /* Release the mailbox command posting token */
11752 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
11753 /* Setting active mailbox pointer need to be in sync to flag clear */
11754 phba
->sli
.mbox_active
= NULL
;
11755 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11756 /* Wake up worker thread to post the next pending mailbox command */
11757 lpfc_worker_wake_up(phba
);
11758 out_no_mqe_complete
:
11759 if (bf_get(lpfc_trailer_consumed
, mcqe
))
11760 lpfc_sli4_mq_release(phba
->sli4_hba
.mbx_wq
);
11765 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
11766 * @phba: Pointer to HBA context object.
11767 * @cqe: Pointer to mailbox completion queue entry.
11769 * This routine process a mailbox completion queue entry, it invokes the
11770 * proper mailbox complete handling or asynchrous event handling routine
11771 * according to the MCQE's async bit.
11773 * Return: true if work posted to worker thread, otherwise false.
11776 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba
*phba
, struct lpfc_cqe
*cqe
)
11778 struct lpfc_mcqe mcqe
;
11781 /* Copy the mailbox MCQE and convert endian order as needed */
11782 lpfc_sli_pcimem_bcopy(cqe
, &mcqe
, sizeof(struct lpfc_mcqe
));
11784 /* Invoke the proper event handling routine */
11785 if (!bf_get(lpfc_trailer_async
, &mcqe
))
11786 workposted
= lpfc_sli4_sp_handle_mbox_event(phba
, &mcqe
);
11788 workposted
= lpfc_sli4_sp_handle_async_event(phba
, &mcqe
);
11793 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
11794 * @phba: Pointer to HBA context object.
11795 * @cq: Pointer to associated CQ
11796 * @wcqe: Pointer to work-queue completion queue entry.
11798 * This routine handles an ELS work-queue completion event.
11800 * Return: true if work posted to worker thread, otherwise false.
11803 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
11804 struct lpfc_wcqe_complete
*wcqe
)
11806 struct lpfc_iocbq
*irspiocbq
;
11807 unsigned long iflags
;
11808 struct lpfc_sli_ring
*pring
= cq
->pring
;
11810 int txcmplq_cnt
= 0;
11811 int fcp_txcmplq_cnt
= 0;
11813 /* Get an irspiocbq for later ELS response processing use */
11814 irspiocbq
= lpfc_sli_get_iocbq(phba
);
11816 if (!list_empty(&pring
->txq
))
11818 if (!list_empty(&pring
->txcmplq
))
11820 if (!list_empty(&phba
->sli
.ring
[LPFC_FCP_RING
].txcmplq
))
11822 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11823 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
11824 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
11825 txq_cnt
, phba
->iocb_cnt
,
11831 /* Save off the slow-path queue event for work thread to process */
11832 memcpy(&irspiocbq
->cq_event
.cqe
.wcqe_cmpl
, wcqe
, sizeof(*wcqe
));
11833 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11834 list_add_tail(&irspiocbq
->cq_event
.list
,
11835 &phba
->sli4_hba
.sp_queue_event
);
11836 phba
->hba_flag
|= HBA_SP_QUEUE_EVT
;
11837 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11843 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
11844 * @phba: Pointer to HBA context object.
11845 * @wcqe: Pointer to work-queue completion queue entry.
11847 * This routine handles slow-path WQ entry comsumed event by invoking the
11848 * proper WQ release routine to the slow-path WQ.
11851 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba
*phba
,
11852 struct lpfc_wcqe_release
*wcqe
)
11854 /* sanity check on queue memory */
11855 if (unlikely(!phba
->sli4_hba
.els_wq
))
11857 /* Check for the slow-path ELS work queue */
11858 if (bf_get(lpfc_wcqe_r_wq_id
, wcqe
) == phba
->sli4_hba
.els_wq
->queue_id
)
11859 lpfc_sli4_wq_release(phba
->sli4_hba
.els_wq
,
11860 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
));
11862 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
11863 "2579 Slow-path wqe consume event carries "
11864 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
11865 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
),
11866 phba
->sli4_hba
.els_wq
->queue_id
);
11870 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
11871 * @phba: Pointer to HBA context object.
11872 * @cq: Pointer to a WQ completion queue.
11873 * @wcqe: Pointer to work-queue completion queue entry.
11875 * This routine handles an XRI abort event.
11877 * Return: true if work posted to worker thread, otherwise false.
11880 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba
*phba
,
11881 struct lpfc_queue
*cq
,
11882 struct sli4_wcqe_xri_aborted
*wcqe
)
11884 bool workposted
= false;
11885 struct lpfc_cq_event
*cq_event
;
11886 unsigned long iflags
;
11888 /* Allocate a new internal CQ_EVENT entry */
11889 cq_event
= lpfc_sli4_cq_event_alloc(phba
);
11891 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11892 "0602 Failed to allocate CQ_EVENT entry\n");
11896 /* Move the CQE into the proper xri abort event list */
11897 memcpy(&cq_event
->cqe
, wcqe
, sizeof(struct sli4_wcqe_xri_aborted
));
11898 switch (cq
->subtype
) {
11900 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11901 list_add_tail(&cq_event
->list
,
11902 &phba
->sli4_hba
.sp_fcp_xri_aborted_work_queue
);
11903 /* Set the fcp xri abort event flag */
11904 phba
->hba_flag
|= FCP_XRI_ABORT_EVENT
;
11905 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11909 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11910 list_add_tail(&cq_event
->list
,
11911 &phba
->sli4_hba
.sp_els_xri_aborted_work_queue
);
11912 /* Set the els xri abort event flag */
11913 phba
->hba_flag
|= ELS_XRI_ABORT_EVENT
;
11914 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11918 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11919 "0603 Invalid work queue CQE subtype (x%x)\n",
11921 workposted
= false;
11928 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
11929 * @phba: Pointer to HBA context object.
11930 * @rcqe: Pointer to receive-queue completion queue entry.
11932 * This routine process a receive-queue completion queue entry.
11934 * Return: true if work posted to worker thread, otherwise false.
11937 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba
*phba
, struct lpfc_rcqe
*rcqe
)
11939 bool workposted
= false;
11940 struct lpfc_queue
*hrq
= phba
->sli4_hba
.hdr_rq
;
11941 struct lpfc_queue
*drq
= phba
->sli4_hba
.dat_rq
;
11942 struct hbq_dmabuf
*dma_buf
;
11943 uint32_t status
, rq_id
;
11944 unsigned long iflags
;
11946 /* sanity check on queue memory */
11947 if (unlikely(!hrq
) || unlikely(!drq
))
11950 if (bf_get(lpfc_cqe_code
, rcqe
) == CQE_CODE_RECEIVE_V1
)
11951 rq_id
= bf_get(lpfc_rcqe_rq_id_v1
, rcqe
);
11953 rq_id
= bf_get(lpfc_rcqe_rq_id
, rcqe
);
11954 if (rq_id
!= hrq
->queue_id
)
11957 status
= bf_get(lpfc_rcqe_status
, rcqe
);
11959 case FC_STATUS_RQ_BUF_LEN_EXCEEDED
:
11960 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11961 "2537 Receive Frame Truncated!!\n");
11962 hrq
->RQ_buf_trunc
++;
11963 case FC_STATUS_RQ_SUCCESS
:
11964 lpfc_sli4_rq_release(hrq
, drq
);
11965 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11966 dma_buf
= lpfc_sli_hbqbuf_get(&phba
->hbqs
[0].hbq_buffer_list
);
11968 hrq
->RQ_no_buf_found
++;
11969 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11973 memcpy(&dma_buf
->cq_event
.cqe
.rcqe_cmpl
, rcqe
, sizeof(*rcqe
));
11974 /* save off the frame for the word thread to process */
11975 list_add_tail(&dma_buf
->cq_event
.list
,
11976 &phba
->sli4_hba
.sp_queue_event
);
11977 /* Frame received */
11978 phba
->hba_flag
|= HBA_SP_QUEUE_EVT
;
11979 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11982 case FC_STATUS_INSUFF_BUF_NEED_BUF
:
11983 case FC_STATUS_INSUFF_BUF_FRM_DISC
:
11984 hrq
->RQ_no_posted_buf
++;
11985 /* Post more buffers if possible */
11986 spin_lock_irqsave(&phba
->hbalock
, iflags
);
11987 phba
->hba_flag
|= HBA_POST_RECEIVE_BUFFER
;
11988 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
11997 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
11998 * @phba: Pointer to HBA context object.
11999 * @cq: Pointer to the completion queue.
12000 * @wcqe: Pointer to a completion queue entry.
12002 * This routine process a slow-path work-queue or receive queue completion queue
12005 * Return: true if work posted to worker thread, otherwise false.
12008 lpfc_sli4_sp_handle_cqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
12009 struct lpfc_cqe
*cqe
)
12011 struct lpfc_cqe cqevt
;
12012 bool workposted
= false;
12014 /* Copy the work queue CQE and convert endian order if needed */
12015 lpfc_sli_pcimem_bcopy(cqe
, &cqevt
, sizeof(struct lpfc_cqe
));
12017 /* Check and process for different type of WCQE and dispatch */
12018 switch (bf_get(lpfc_cqe_code
, &cqevt
)) {
12019 case CQE_CODE_COMPL_WQE
:
12020 /* Process the WQ/RQ complete event */
12021 phba
->last_completion_time
= jiffies
;
12022 workposted
= lpfc_sli4_sp_handle_els_wcqe(phba
, cq
,
12023 (struct lpfc_wcqe_complete
*)&cqevt
);
12025 case CQE_CODE_RELEASE_WQE
:
12026 /* Process the WQ release event */
12027 lpfc_sli4_sp_handle_rel_wcqe(phba
,
12028 (struct lpfc_wcqe_release
*)&cqevt
);
12030 case CQE_CODE_XRI_ABORTED
:
12031 /* Process the WQ XRI abort event */
12032 phba
->last_completion_time
= jiffies
;
12033 workposted
= lpfc_sli4_sp_handle_abort_xri_wcqe(phba
, cq
,
12034 (struct sli4_wcqe_xri_aborted
*)&cqevt
);
12036 case CQE_CODE_RECEIVE
:
12037 case CQE_CODE_RECEIVE_V1
:
12038 /* Process the RQ event */
12039 phba
->last_completion_time
= jiffies
;
12040 workposted
= lpfc_sli4_sp_handle_rcqe(phba
,
12041 (struct lpfc_rcqe
*)&cqevt
);
12044 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12045 "0388 Not a valid WCQE code: x%x\n",
12046 bf_get(lpfc_cqe_code
, &cqevt
));
12053 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
12054 * @phba: Pointer to HBA context object.
12055 * @eqe: Pointer to fast-path event queue entry.
12057 * This routine process a event queue entry from the slow-path event queue.
12058 * It will check the MajorCode and MinorCode to determine this is for a
12059 * completion event on a completion queue, if not, an error shall be logged
12060 * and just return. Otherwise, it will get to the corresponding completion
12061 * queue and process all the entries on that completion queue, rearm the
12062 * completion queue, and then return.
12066 lpfc_sli4_sp_handle_eqe(struct lpfc_hba
*phba
, struct lpfc_eqe
*eqe
,
12067 struct lpfc_queue
*speq
)
12069 struct lpfc_queue
*cq
= NULL
, *childq
;
12070 struct lpfc_cqe
*cqe
;
12071 bool workposted
= false;
12075 /* Get the reference to the corresponding CQ */
12076 cqid
= bf_get_le32(lpfc_eqe_resource_id
, eqe
);
12078 list_for_each_entry(childq
, &speq
->child_list
, list
) {
12079 if (childq
->queue_id
== cqid
) {
12084 if (unlikely(!cq
)) {
12085 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)
12086 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12087 "0365 Slow-path CQ identifier "
12088 "(%d) does not exist\n", cqid
);
12092 /* Process all the entries to the CQ */
12093 switch (cq
->type
) {
12095 while ((cqe
= lpfc_sli4_cq_get(cq
))) {
12096 workposted
|= lpfc_sli4_sp_handle_mcqe(phba
, cqe
);
12097 if (!(++ecount
% cq
->entry_repost
))
12098 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_NOARM
);
12103 while ((cqe
= lpfc_sli4_cq_get(cq
))) {
12104 if (cq
->subtype
== LPFC_FCP
)
12105 workposted
|= lpfc_sli4_fp_handle_wcqe(phba
, cq
,
12108 workposted
|= lpfc_sli4_sp_handle_cqe(phba
, cq
,
12110 if (!(++ecount
% cq
->entry_repost
))
12111 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_NOARM
);
12114 /* Track the max number of CQEs processed in 1 EQ */
12115 if (ecount
> cq
->CQ_max_cqe
)
12116 cq
->CQ_max_cqe
= ecount
;
12119 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12120 "0370 Invalid completion queue type (%d)\n",
12125 /* Catch the no cq entry condition, log an error */
12126 if (unlikely(ecount
== 0))
12127 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12128 "0371 No entry from the CQ: identifier "
12129 "(x%x), type (%d)\n", cq
->queue_id
, cq
->type
);
12131 /* In any case, flash and re-arm the RCQ */
12132 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_REARM
);
12134 /* wake up worker thread if there are works to be done */
12136 lpfc_worker_wake_up(phba
);
12140 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
12141 * @phba: Pointer to HBA context object.
12142 * @cq: Pointer to associated CQ
12143 * @wcqe: Pointer to work-queue completion queue entry.
12145 * This routine process a fast-path work queue completion entry from fast-path
12146 * event queue for FCP command response completion.
12149 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
12150 struct lpfc_wcqe_complete
*wcqe
)
12152 struct lpfc_sli_ring
*pring
= cq
->pring
;
12153 struct lpfc_iocbq
*cmdiocbq
;
12154 struct lpfc_iocbq irspiocbq
;
12155 unsigned long iflags
;
12157 /* Check for response status */
12158 if (unlikely(bf_get(lpfc_wcqe_c_status
, wcqe
))) {
12159 /* If resource errors reported from HBA, reduce queue
12160 * depth of the SCSI device.
12162 if (((bf_get(lpfc_wcqe_c_status
, wcqe
) ==
12163 IOSTAT_LOCAL_REJECT
)) &&
12164 ((wcqe
->parameter
& IOERR_PARAM_MASK
) ==
12165 IOERR_NO_RESOURCES
))
12166 phba
->lpfc_rampdown_queue_depth(phba
);
12168 /* Log the error status */
12169 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
12170 "0373 FCP complete error: status=x%x, "
12171 "hw_status=x%x, total_data_specified=%d, "
12172 "parameter=x%x, word3=x%x\n",
12173 bf_get(lpfc_wcqe_c_status
, wcqe
),
12174 bf_get(lpfc_wcqe_c_hw_status
, wcqe
),
12175 wcqe
->total_data_placed
, wcqe
->parameter
,
12179 /* Look up the FCP command IOCB and create pseudo response IOCB */
12180 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
12181 pring
->stats
.iocb_event
++;
12182 cmdiocbq
= lpfc_sli_iocbq_lookup_by_tag(phba
, pring
,
12183 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
12184 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
12185 if (unlikely(!cmdiocbq
)) {
12186 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
12187 "0374 FCP complete with no corresponding "
12188 "cmdiocb: iotag (%d)\n",
12189 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
12192 if (unlikely(!cmdiocbq
->iocb_cmpl
)) {
12193 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
12194 "0375 FCP cmdiocb not callback function "
12196 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
12200 /* Fake the irspiocb and copy necessary response information */
12201 lpfc_sli4_iocb_param_transfer(phba
, &irspiocbq
, cmdiocbq
, wcqe
);
12203 if (cmdiocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
) {
12204 spin_lock_irqsave(&phba
->hbalock
, iflags
);
12205 cmdiocbq
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
12206 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
12209 /* Pass the cmd_iocb and the rsp state to the upper layer */
12210 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
, &irspiocbq
);
12214 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
12215 * @phba: Pointer to HBA context object.
12216 * @cq: Pointer to completion queue.
12217 * @wcqe: Pointer to work-queue completion queue entry.
12219 * This routine handles an fast-path WQ entry comsumed event by invoking the
12220 * proper WQ release routine to the slow-path WQ.
12223 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
12224 struct lpfc_wcqe_release
*wcqe
)
12226 struct lpfc_queue
*childwq
;
12227 bool wqid_matched
= false;
12230 /* Check for fast-path FCP work queue release */
12231 fcp_wqid
= bf_get(lpfc_wcqe_r_wq_id
, wcqe
);
12232 list_for_each_entry(childwq
, &cq
->child_list
, list
) {
12233 if (childwq
->queue_id
== fcp_wqid
) {
12234 lpfc_sli4_wq_release(childwq
,
12235 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
));
12236 wqid_matched
= true;
12240 /* Report warning log message if no match found */
12241 if (wqid_matched
!= true)
12242 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
12243 "2580 Fast-path wqe consume event carries "
12244 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid
);
12248 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
12249 * @cq: Pointer to the completion queue.
12250 * @eqe: Pointer to fast-path completion queue entry.
12252 * This routine process a fast-path work queue completion entry from fast-path
12253 * event queue for FCP command response completion.
12256 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
12257 struct lpfc_cqe
*cqe
)
12259 struct lpfc_wcqe_release wcqe
;
12260 bool workposted
= false;
12262 /* Copy the work queue CQE and convert endian order if needed */
12263 lpfc_sli_pcimem_bcopy(cqe
, &wcqe
, sizeof(struct lpfc_cqe
));
12265 /* Check and process for different type of WCQE and dispatch */
12266 switch (bf_get(lpfc_wcqe_c_code
, &wcqe
)) {
12267 case CQE_CODE_COMPL_WQE
:
12269 /* Process the WQ complete event */
12270 phba
->last_completion_time
= jiffies
;
12271 lpfc_sli4_fp_handle_fcp_wcqe(phba
, cq
,
12272 (struct lpfc_wcqe_complete
*)&wcqe
);
12274 case CQE_CODE_RELEASE_WQE
:
12275 cq
->CQ_release_wqe
++;
12276 /* Process the WQ release event */
12277 lpfc_sli4_fp_handle_rel_wcqe(phba
, cq
,
12278 (struct lpfc_wcqe_release
*)&wcqe
);
12280 case CQE_CODE_XRI_ABORTED
:
12281 cq
->CQ_xri_aborted
++;
12282 /* Process the WQ XRI abort event */
12283 phba
->last_completion_time
= jiffies
;
12284 workposted
= lpfc_sli4_sp_handle_abort_xri_wcqe(phba
, cq
,
12285 (struct sli4_wcqe_xri_aborted
*)&wcqe
);
12288 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12289 "0144 Not a valid WCQE code: x%x\n",
12290 bf_get(lpfc_wcqe_c_code
, &wcqe
));
12297 * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
12298 * @phba: Pointer to HBA context object.
12299 * @eqe: Pointer to fast-path event queue entry.
12301 * This routine process a event queue entry from the fast-path event queue.
12302 * It will check the MajorCode and MinorCode to determine this is for a
12303 * completion event on a completion queue, if not, an error shall be logged
12304 * and just return. Otherwise, it will get to the corresponding completion
12305 * queue and process all the entries on the completion queue, rearm the
12306 * completion queue, and then return.
12309 lpfc_sli4_hba_handle_eqe(struct lpfc_hba
*phba
, struct lpfc_eqe
*eqe
,
12312 struct lpfc_queue
*cq
;
12313 struct lpfc_cqe
*cqe
;
12314 bool workposted
= false;
12318 if (unlikely(bf_get_le32(lpfc_eqe_major_code
, eqe
) != 0)) {
12319 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12320 "0366 Not a valid completion "
12321 "event: majorcode=x%x, minorcode=x%x\n",
12322 bf_get_le32(lpfc_eqe_major_code
, eqe
),
12323 bf_get_le32(lpfc_eqe_minor_code
, eqe
));
12327 /* Get the reference to the corresponding CQ */
12328 cqid
= bf_get_le32(lpfc_eqe_resource_id
, eqe
);
12330 /* Check if this is a Slow path event */
12331 if (unlikely(cqid
!= phba
->sli4_hba
.fcp_cq_map
[qidx
])) {
12332 lpfc_sli4_sp_handle_eqe(phba
, eqe
,
12333 phba
->sli4_hba
.hba_eq
[qidx
]);
12337 if (unlikely(!phba
->sli4_hba
.fcp_cq
)) {
12338 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
12339 "3146 Fast-path completion queues "
12340 "does not exist\n");
12343 cq
= phba
->sli4_hba
.fcp_cq
[qidx
];
12344 if (unlikely(!cq
)) {
12345 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)
12346 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12347 "0367 Fast-path completion queue "
12348 "(%d) does not exist\n", qidx
);
12352 if (unlikely(cqid
!= cq
->queue_id
)) {
12353 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12354 "0368 Miss-matched fast-path completion "
12355 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
12356 cqid
, cq
->queue_id
);
12360 /* Process all the entries to the CQ */
12361 while ((cqe
= lpfc_sli4_cq_get(cq
))) {
12362 workposted
|= lpfc_sli4_fp_handle_wcqe(phba
, cq
, cqe
);
12363 if (!(++ecount
% cq
->entry_repost
))
12364 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_NOARM
);
12367 /* Track the max number of CQEs processed in 1 EQ */
12368 if (ecount
> cq
->CQ_max_cqe
)
12369 cq
->CQ_max_cqe
= ecount
;
12371 /* Catch the no cq entry condition */
12372 if (unlikely(ecount
== 0))
12373 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12374 "0369 No entry from fast-path completion "
12375 "queue fcpcqid=%d\n", cq
->queue_id
);
12377 /* In any case, flash and re-arm the CQ */
12378 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_REARM
);
12380 /* wake up worker thread if there are works to be done */
12382 lpfc_worker_wake_up(phba
);
12386 lpfc_sli4_eq_flush(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
)
12388 struct lpfc_eqe
*eqe
;
12390 /* walk all the EQ entries and drop on the floor */
12391 while ((eqe
= lpfc_sli4_eq_get(eq
)))
12394 /* Clear and re-arm the EQ */
12395 lpfc_sli4_eq_release(eq
, LPFC_QUEUE_REARM
);
12400 * lpfc_sli4_fof_handle_eqe - Process a Flash Optimized Fabric event queue
12402 * @phba: Pointer to HBA context object.
12403 * @eqe: Pointer to fast-path event queue entry.
12405 * This routine process a event queue entry from the Flash Optimized Fabric
12406 * event queue. It will check the MajorCode and MinorCode to determine this
12407 * is for a completion event on a completion queue, if not, an error shall be
12408 * logged and just return. Otherwise, it will get to the corresponding
12409 * completion queue and process all the entries on the completion queue, rearm
12410 * the completion queue, and then return.
12413 lpfc_sli4_fof_handle_eqe(struct lpfc_hba
*phba
, struct lpfc_eqe
*eqe
)
12415 struct lpfc_queue
*cq
;
12416 struct lpfc_cqe
*cqe
;
12417 bool workposted
= false;
12421 if (unlikely(bf_get_le32(lpfc_eqe_major_code
, eqe
) != 0)) {
12422 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12423 "9147 Not a valid completion "
12424 "event: majorcode=x%x, minorcode=x%x\n",
12425 bf_get_le32(lpfc_eqe_major_code
, eqe
),
12426 bf_get_le32(lpfc_eqe_minor_code
, eqe
));
12430 /* Get the reference to the corresponding CQ */
12431 cqid
= bf_get_le32(lpfc_eqe_resource_id
, eqe
);
12433 /* Next check for OAS */
12434 cq
= phba
->sli4_hba
.oas_cq
;
12435 if (unlikely(!cq
)) {
12436 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)
12437 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12438 "9148 OAS completion queue "
12439 "does not exist\n");
12443 if (unlikely(cqid
!= cq
->queue_id
)) {
12444 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12445 "9149 Miss-matched fast-path compl "
12446 "queue id: eqcqid=%d, fcpcqid=%d\n",
12447 cqid
, cq
->queue_id
);
12451 /* Process all the entries to the OAS CQ */
12452 while ((cqe
= lpfc_sli4_cq_get(cq
))) {
12453 workposted
|= lpfc_sli4_fp_handle_wcqe(phba
, cq
, cqe
);
12454 if (!(++ecount
% cq
->entry_repost
))
12455 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_NOARM
);
12458 /* Track the max number of CQEs processed in 1 EQ */
12459 if (ecount
> cq
->CQ_max_cqe
)
12460 cq
->CQ_max_cqe
= ecount
;
12462 /* Catch the no cq entry condition */
12463 if (unlikely(ecount
== 0))
12464 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12465 "9153 No entry from fast-path completion "
12466 "queue fcpcqid=%d\n", cq
->queue_id
);
12468 /* In any case, flash and re-arm the CQ */
12469 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_REARM
);
12471 /* wake up worker thread if there are works to be done */
12473 lpfc_worker_wake_up(phba
);
12477 * lpfc_sli4_fof_intr_handler - HBA interrupt handler to SLI-4 device
12478 * @irq: Interrupt number.
12479 * @dev_id: The device context pointer.
12481 * This function is directly called from the PCI layer as an interrupt
12482 * service routine when device with SLI-4 interface spec is enabled with
12483 * MSI-X multi-message interrupt mode and there is a Flash Optimized Fabric
12484 * IOCB ring event in the HBA. However, when the device is enabled with either
12485 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12486 * device-level interrupt handler. When the PCI slot is in error recovery
12487 * or the HBA is undergoing initialization, the interrupt handler will not
12488 * process the interrupt. The Flash Optimized Fabric ring event are handled in
12489 * the intrrupt context. This function is called without any lock held.
12490 * It gets the hbalock to access and update SLI data structures. Note that,
12491 * the EQ to CQ are one-to-one map such that the EQ index is
12492 * equal to that of CQ index.
12494 * This function returns IRQ_HANDLED when interrupt is handled else it
12495 * returns IRQ_NONE.
12498 lpfc_sli4_fof_intr_handler(int irq
, void *dev_id
)
12500 struct lpfc_hba
*phba
;
12501 struct lpfc_fcp_eq_hdl
*fcp_eq_hdl
;
12502 struct lpfc_queue
*eq
;
12503 struct lpfc_eqe
*eqe
;
12504 unsigned long iflag
;
12507 /* Get the driver's phba structure from the dev_id */
12508 fcp_eq_hdl
= (struct lpfc_fcp_eq_hdl
*)dev_id
;
12509 phba
= fcp_eq_hdl
->phba
;
12511 if (unlikely(!phba
))
12514 /* Get to the EQ struct associated with this vector */
12515 eq
= phba
->sli4_hba
.fof_eq
;
12519 /* Check device state for handling interrupt */
12520 if (unlikely(lpfc_intr_state_check(phba
))) {
12522 /* Check again for link_state with lock held */
12523 spin_lock_irqsave(&phba
->hbalock
, iflag
);
12524 if (phba
->link_state
< LPFC_LINK_DOWN
)
12525 /* Flush, clear interrupt, and rearm the EQ */
12526 lpfc_sli4_eq_flush(phba
, eq
);
12527 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12532 * Process all the event on FCP fast-path EQ
12534 while ((eqe
= lpfc_sli4_eq_get(eq
))) {
12535 lpfc_sli4_fof_handle_eqe(phba
, eqe
);
12536 if (!(++ecount
% eq
->entry_repost
))
12537 lpfc_sli4_eq_release(eq
, LPFC_QUEUE_NOARM
);
12538 eq
->EQ_processed
++;
12541 /* Track the max number of EQEs processed in 1 intr */
12542 if (ecount
> eq
->EQ_max_eqe
)
12543 eq
->EQ_max_eqe
= ecount
;
12546 if (unlikely(ecount
== 0)) {
12549 if (phba
->intr_type
== MSIX
)
12550 /* MSI-X treated interrupt served as no EQ share INT */
12551 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
12552 "9145 MSI-X interrupt with no EQE\n");
12554 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12555 "9146 ISR interrupt with no EQE\n");
12556 /* Non MSI-X treated on interrupt as EQ share INT */
12560 /* Always clear and re-arm the fast-path EQ */
12561 lpfc_sli4_eq_release(eq
, LPFC_QUEUE_REARM
);
12562 return IRQ_HANDLED
;
12566 * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
12567 * @irq: Interrupt number.
12568 * @dev_id: The device context pointer.
12570 * This function is directly called from the PCI layer as an interrupt
12571 * service routine when device with SLI-4 interface spec is enabled with
12572 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
12573 * ring event in the HBA. However, when the device is enabled with either
12574 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
12575 * device-level interrupt handler. When the PCI slot is in error recovery
12576 * or the HBA is undergoing initialization, the interrupt handler will not
12577 * process the interrupt. The SCSI FCP fast-path ring event are handled in
12578 * the intrrupt context. This function is called without any lock held.
12579 * It gets the hbalock to access and update SLI data structures. Note that,
12580 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
12581 * equal to that of FCP CQ index.
12583 * The link attention and ELS ring attention events are handled
12584 * by the worker thread. The interrupt handler signals the worker thread
12585 * and returns for these events. This function is called without any lock
12586 * held. It gets the hbalock to access and update SLI data structures.
12588 * This function returns IRQ_HANDLED when interrupt is handled else it
12589 * returns IRQ_NONE.
12592 lpfc_sli4_hba_intr_handler(int irq
, void *dev_id
)
12594 struct lpfc_hba
*phba
;
12595 struct lpfc_fcp_eq_hdl
*fcp_eq_hdl
;
12596 struct lpfc_queue
*fpeq
;
12597 struct lpfc_eqe
*eqe
;
12598 unsigned long iflag
;
12602 /* Get the driver's phba structure from the dev_id */
12603 fcp_eq_hdl
= (struct lpfc_fcp_eq_hdl
*)dev_id
;
12604 phba
= fcp_eq_hdl
->phba
;
12605 fcp_eqidx
= fcp_eq_hdl
->idx
;
12607 if (unlikely(!phba
))
12609 if (unlikely(!phba
->sli4_hba
.hba_eq
))
12612 /* Get to the EQ struct associated with this vector */
12613 fpeq
= phba
->sli4_hba
.hba_eq
[fcp_eqidx
];
12614 if (unlikely(!fpeq
))
12617 if (lpfc_fcp_look_ahead
) {
12618 if (atomic_dec_and_test(&fcp_eq_hdl
->fcp_eq_in_use
))
12619 lpfc_sli4_eq_clr_intr(fpeq
);
12621 atomic_inc(&fcp_eq_hdl
->fcp_eq_in_use
);
12626 /* Check device state for handling interrupt */
12627 if (unlikely(lpfc_intr_state_check(phba
))) {
12628 fpeq
->EQ_badstate
++;
12629 /* Check again for link_state with lock held */
12630 spin_lock_irqsave(&phba
->hbalock
, iflag
);
12631 if (phba
->link_state
< LPFC_LINK_DOWN
)
12632 /* Flush, clear interrupt, and rearm the EQ */
12633 lpfc_sli4_eq_flush(phba
, fpeq
);
12634 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
12635 if (lpfc_fcp_look_ahead
)
12636 atomic_inc(&fcp_eq_hdl
->fcp_eq_in_use
);
12641 * Process all the event on FCP fast-path EQ
12643 while ((eqe
= lpfc_sli4_eq_get(fpeq
))) {
12647 lpfc_sli4_hba_handle_eqe(phba
, eqe
, fcp_eqidx
);
12648 if (!(++ecount
% fpeq
->entry_repost
))
12649 lpfc_sli4_eq_release(fpeq
, LPFC_QUEUE_NOARM
);
12650 fpeq
->EQ_processed
++;
12653 /* Track the max number of EQEs processed in 1 intr */
12654 if (ecount
> fpeq
->EQ_max_eqe
)
12655 fpeq
->EQ_max_eqe
= ecount
;
12657 /* Always clear and re-arm the fast-path EQ */
12658 lpfc_sli4_eq_release(fpeq
, LPFC_QUEUE_REARM
);
12660 if (unlikely(ecount
== 0)) {
12661 fpeq
->EQ_no_entry
++;
12663 if (lpfc_fcp_look_ahead
) {
12664 atomic_inc(&fcp_eq_hdl
->fcp_eq_in_use
);
12668 if (phba
->intr_type
== MSIX
)
12669 /* MSI-X treated interrupt served as no EQ share INT */
12670 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
12671 "0358 MSI-X interrupt with no EQE\n");
12673 /* Non MSI-X treated on interrupt as EQ share INT */
12677 if (lpfc_fcp_look_ahead
)
12678 atomic_inc(&fcp_eq_hdl
->fcp_eq_in_use
);
12679 return IRQ_HANDLED
;
12680 } /* lpfc_sli4_fp_intr_handler */
12683 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
12684 * @irq: Interrupt number.
12685 * @dev_id: The device context pointer.
12687 * This function is the device-level interrupt handler to device with SLI-4
12688 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
12689 * interrupt mode is enabled and there is an event in the HBA which requires
12690 * driver attention. This function invokes the slow-path interrupt attention
12691 * handling function and fast-path interrupt attention handling function in
12692 * turn to process the relevant HBA attention events. This function is called
12693 * without any lock held. It gets the hbalock to access and update SLI data
12696 * This function returns IRQ_HANDLED when interrupt is handled, else it
12697 * returns IRQ_NONE.
12700 lpfc_sli4_intr_handler(int irq
, void *dev_id
)
12702 struct lpfc_hba
*phba
;
12703 irqreturn_t hba_irq_rc
;
12704 bool hba_handled
= false;
12707 /* Get the driver's phba structure from the dev_id */
12708 phba
= (struct lpfc_hba
*)dev_id
;
12710 if (unlikely(!phba
))
12714 * Invoke fast-path host attention interrupt handling as appropriate.
12716 for (fcp_eqidx
= 0; fcp_eqidx
< phba
->cfg_fcp_io_channel
; fcp_eqidx
++) {
12717 hba_irq_rc
= lpfc_sli4_hba_intr_handler(irq
,
12718 &phba
->sli4_hba
.fcp_eq_hdl
[fcp_eqidx
]);
12719 if (hba_irq_rc
== IRQ_HANDLED
)
12720 hba_handled
|= true;
12723 if (phba
->cfg_fof
) {
12724 hba_irq_rc
= lpfc_sli4_fof_intr_handler(irq
,
12725 &phba
->sli4_hba
.fcp_eq_hdl
[0]);
12726 if (hba_irq_rc
== IRQ_HANDLED
)
12727 hba_handled
|= true;
12730 return (hba_handled
== true) ? IRQ_HANDLED
: IRQ_NONE
;
12731 } /* lpfc_sli4_intr_handler */
12734 * lpfc_sli4_queue_free - free a queue structure and associated memory
12735 * @queue: The queue structure to free.
12737 * This function frees a queue structure and the DMAable memory used for
12738 * the host resident queue. This function must be called after destroying the
12739 * queue on the HBA.
12742 lpfc_sli4_queue_free(struct lpfc_queue
*queue
)
12744 struct lpfc_dmabuf
*dmabuf
;
12749 while (!list_empty(&queue
->page_list
)) {
12750 list_remove_head(&queue
->page_list
, dmabuf
, struct lpfc_dmabuf
,
12752 dma_free_coherent(&queue
->phba
->pcidev
->dev
, SLI4_PAGE_SIZE
,
12753 dmabuf
->virt
, dmabuf
->phys
);
12761 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
12762 * @phba: The HBA that this queue is being created on.
12763 * @entry_size: The size of each queue entry for this queue.
12764 * @entry count: The number of entries that this queue will handle.
12766 * This function allocates a queue structure and the DMAable memory used for
12767 * the host resident queue. This function must be called before creating the
12768 * queue on the HBA.
12770 struct lpfc_queue
*
12771 lpfc_sli4_queue_alloc(struct lpfc_hba
*phba
, uint32_t entry_size
,
12772 uint32_t entry_count
)
12774 struct lpfc_queue
*queue
;
12775 struct lpfc_dmabuf
*dmabuf
;
12776 int x
, total_qe_count
;
12778 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
12780 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
12781 hw_page_size
= SLI4_PAGE_SIZE
;
12783 queue
= kzalloc(sizeof(struct lpfc_queue
) +
12784 (sizeof(union sli4_qe
) * entry_count
), GFP_KERNEL
);
12787 queue
->page_count
= (ALIGN(entry_size
* entry_count
,
12788 hw_page_size
))/hw_page_size
;
12789 INIT_LIST_HEAD(&queue
->list
);
12790 INIT_LIST_HEAD(&queue
->page_list
);
12791 INIT_LIST_HEAD(&queue
->child_list
);
12792 for (x
= 0, total_qe_count
= 0; x
< queue
->page_count
; x
++) {
12793 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
12796 dmabuf
->virt
= dma_zalloc_coherent(&phba
->pcidev
->dev
,
12797 hw_page_size
, &dmabuf
->phys
,
12799 if (!dmabuf
->virt
) {
12803 dmabuf
->buffer_tag
= x
;
12804 list_add_tail(&dmabuf
->list
, &queue
->page_list
);
12805 /* initialize queue's entry array */
12806 dma_pointer
= dmabuf
->virt
;
12807 for (; total_qe_count
< entry_count
&&
12808 dma_pointer
< (hw_page_size
+ dmabuf
->virt
);
12809 total_qe_count
++, dma_pointer
+= entry_size
) {
12810 queue
->qe
[total_qe_count
].address
= dma_pointer
;
12813 queue
->entry_size
= entry_size
;
12814 queue
->entry_count
= entry_count
;
12817 * entry_repost is calculated based on the number of entries in the
12818 * queue. This works out except for RQs. If buffers are NOT initially
12819 * posted for every RQE, entry_repost should be adjusted accordingly.
12821 queue
->entry_repost
= (entry_count
>> 3);
12822 if (queue
->entry_repost
< LPFC_QUEUE_MIN_REPOST
)
12823 queue
->entry_repost
= LPFC_QUEUE_MIN_REPOST
;
12824 queue
->phba
= phba
;
12828 lpfc_sli4_queue_free(queue
);
12833 * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
12834 * @phba: HBA structure that indicates port to create a queue on.
12835 * @pci_barset: PCI BAR set flag.
12837 * This function shall perform iomap of the specified PCI BAR address to host
12838 * memory address if not already done so and return it. The returned host
12839 * memory address can be NULL.
12841 static void __iomem
*
12842 lpfc_dual_chute_pci_bar_map(struct lpfc_hba
*phba
, uint16_t pci_barset
)
12847 switch (pci_barset
) {
12848 case WQ_PCI_BAR_0_AND_1
:
12849 return phba
->pci_bar0_memmap_p
;
12850 case WQ_PCI_BAR_2_AND_3
:
12851 return phba
->pci_bar2_memmap_p
;
12852 case WQ_PCI_BAR_4_AND_5
:
12853 return phba
->pci_bar4_memmap_p
;
12861 * lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
12862 * @phba: HBA structure that indicates port to create a queue on.
12863 * @startq: The starting FCP EQ to modify
12865 * This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
12867 * The @phba struct is used to send mailbox command to HBA. The @startq
12868 * is used to get the starting FCP EQ to change.
12869 * This function is asynchronous and will wait for the mailbox
12870 * command to finish before continuing.
12872 * On success this function will return a zero. If unable to allocate enough
12873 * memory this function will return -ENOMEM. If the queue create mailbox command
12874 * fails this function will return -ENXIO.
12877 lpfc_modify_fcp_eq_delay(struct lpfc_hba
*phba
, uint32_t startq
)
12879 struct lpfc_mbx_modify_eq_delay
*eq_delay
;
12880 LPFC_MBOXQ_t
*mbox
;
12881 struct lpfc_queue
*eq
;
12882 int cnt
, rc
, length
, status
= 0;
12883 uint32_t shdr_status
, shdr_add_status
;
12886 union lpfc_sli4_cfg_shdr
*shdr
;
12889 if (startq
>= phba
->cfg_fcp_io_channel
)
12892 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12895 length
= (sizeof(struct lpfc_mbx_modify_eq_delay
) -
12896 sizeof(struct lpfc_sli4_cfg_mhdr
));
12897 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
12898 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY
,
12899 length
, LPFC_SLI4_MBX_EMBED
);
12900 eq_delay
= &mbox
->u
.mqe
.un
.eq_delay
;
12902 /* Calculate delay multiper from maximum interrupt per second */
12903 result
= phba
->cfg_fcp_imax
/ phba
->cfg_fcp_io_channel
;
12904 if (result
> LPFC_DMULT_CONST
)
12907 dmult
= LPFC_DMULT_CONST
/result
- 1;
12910 for (fcp_eqidx
= startq
; fcp_eqidx
< phba
->cfg_fcp_io_channel
;
12912 eq
= phba
->sli4_hba
.hba_eq
[fcp_eqidx
];
12915 eq_delay
->u
.request
.eq
[cnt
].eq_id
= eq
->queue_id
;
12916 eq_delay
->u
.request
.eq
[cnt
].phase
= 0;
12917 eq_delay
->u
.request
.eq
[cnt
].delay_multi
= dmult
;
12919 if (cnt
>= LPFC_MAX_EQ_DELAY
)
12922 eq_delay
->u
.request
.num_eq
= cnt
;
12924 mbox
->vport
= phba
->pport
;
12925 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
12926 mbox
->context1
= NULL
;
12927 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
12928 shdr
= (union lpfc_sli4_cfg_shdr
*) &eq_delay
->header
.cfg_shdr
;
12929 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
12930 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
12931 if (shdr_status
|| shdr_add_status
|| rc
) {
12932 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12933 "2512 MODIFY_EQ_DELAY mailbox failed with "
12934 "status x%x add_status x%x, mbx status x%x\n",
12935 shdr_status
, shdr_add_status
, rc
);
12938 mempool_free(mbox
, phba
->mbox_mem_pool
);
12943 * lpfc_eq_create - Create an Event Queue on the HBA
12944 * @phba: HBA structure that indicates port to create a queue on.
12945 * @eq: The queue structure to use to create the event queue.
12946 * @imax: The maximum interrupt per second limit.
12948 * This function creates an event queue, as detailed in @eq, on a port,
12949 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
12951 * The @phba struct is used to send mailbox command to HBA. The @eq struct
12952 * is used to get the entry count and entry size that are necessary to
12953 * determine the number of pages to allocate and use for this queue. This
12954 * function will send the EQ_CREATE mailbox command to the HBA to setup the
12955 * event queue. This function is asynchronous and will wait for the mailbox
12956 * command to finish before continuing.
12958 * On success this function will return a zero. If unable to allocate enough
12959 * memory this function will return -ENOMEM. If the queue create mailbox command
12960 * fails this function will return -ENXIO.
12963 lpfc_eq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
, uint32_t imax
)
12965 struct lpfc_mbx_eq_create
*eq_create
;
12966 LPFC_MBOXQ_t
*mbox
;
12967 int rc
, length
, status
= 0;
12968 struct lpfc_dmabuf
*dmabuf
;
12969 uint32_t shdr_status
, shdr_add_status
;
12970 union lpfc_sli4_cfg_shdr
*shdr
;
12972 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
12974 /* sanity check on queue memory */
12977 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
12978 hw_page_size
= SLI4_PAGE_SIZE
;
12980 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12983 length
= (sizeof(struct lpfc_mbx_eq_create
) -
12984 sizeof(struct lpfc_sli4_cfg_mhdr
));
12985 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
12986 LPFC_MBOX_OPCODE_EQ_CREATE
,
12987 length
, LPFC_SLI4_MBX_EMBED
);
12988 eq_create
= &mbox
->u
.mqe
.un
.eq_create
;
12989 bf_set(lpfc_mbx_eq_create_num_pages
, &eq_create
->u
.request
,
12991 bf_set(lpfc_eq_context_size
, &eq_create
->u
.request
.context
,
12993 bf_set(lpfc_eq_context_valid
, &eq_create
->u
.request
.context
, 1);
12994 /* don't setup delay multiplier using EQ_CREATE */
12996 bf_set(lpfc_eq_context_delay_multi
, &eq_create
->u
.request
.context
,
12998 switch (eq
->entry_count
) {
13000 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13001 "0360 Unsupported EQ count. (%d)\n",
13003 if (eq
->entry_count
< 256)
13005 /* otherwise default to smallest count (drop through) */
13007 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
13011 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
13015 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
13019 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
13023 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
13027 list_for_each_entry(dmabuf
, &eq
->page_list
, list
) {
13028 memset(dmabuf
->virt
, 0, hw_page_size
);
13029 eq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
13030 putPaddrLow(dmabuf
->phys
);
13031 eq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
13032 putPaddrHigh(dmabuf
->phys
);
13034 mbox
->vport
= phba
->pport
;
13035 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
13036 mbox
->context1
= NULL
;
13037 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
13038 shdr
= (union lpfc_sli4_cfg_shdr
*) &eq_create
->header
.cfg_shdr
;
13039 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
13040 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
13041 if (shdr_status
|| shdr_add_status
|| rc
) {
13042 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13043 "2500 EQ_CREATE mailbox failed with "
13044 "status x%x add_status x%x, mbx status x%x\n",
13045 shdr_status
, shdr_add_status
, rc
);
13048 eq
->type
= LPFC_EQ
;
13049 eq
->subtype
= LPFC_NONE
;
13050 eq
->queue_id
= bf_get(lpfc_mbx_eq_create_q_id
, &eq_create
->u
.response
);
13051 if (eq
->queue_id
== 0xFFFF)
13053 eq
->host_index
= 0;
13056 mempool_free(mbox
, phba
->mbox_mem_pool
);
13061 * lpfc_cq_create - Create a Completion Queue on the HBA
13062 * @phba: HBA structure that indicates port to create a queue on.
13063 * @cq: The queue structure to use to create the completion queue.
13064 * @eq: The event queue to bind this completion queue to.
13066 * This function creates a completion queue, as detailed in @wq, on a port,
13067 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
13069 * The @phba struct is used to send mailbox command to HBA. The @cq struct
13070 * is used to get the entry count and entry size that are necessary to
13071 * determine the number of pages to allocate and use for this queue. The @eq
13072 * is used to indicate which event queue to bind this completion queue to. This
13073 * function will send the CQ_CREATE mailbox command to the HBA to setup the
13074 * completion queue. This function is asynchronous and will wait for the mailbox
13075 * command to finish before continuing.
13077 * On success this function will return a zero. If unable to allocate enough
13078 * memory this function will return -ENOMEM. If the queue create mailbox command
13079 * fails this function will return -ENXIO.
13082 lpfc_cq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
13083 struct lpfc_queue
*eq
, uint32_t type
, uint32_t subtype
)
13085 struct lpfc_mbx_cq_create
*cq_create
;
13086 struct lpfc_dmabuf
*dmabuf
;
13087 LPFC_MBOXQ_t
*mbox
;
13088 int rc
, length
, status
= 0;
13089 uint32_t shdr_status
, shdr_add_status
;
13090 union lpfc_sli4_cfg_shdr
*shdr
;
13091 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
13093 /* sanity check on queue memory */
13096 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
13097 hw_page_size
= SLI4_PAGE_SIZE
;
13099 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
13102 length
= (sizeof(struct lpfc_mbx_cq_create
) -
13103 sizeof(struct lpfc_sli4_cfg_mhdr
));
13104 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
13105 LPFC_MBOX_OPCODE_CQ_CREATE
,
13106 length
, LPFC_SLI4_MBX_EMBED
);
13107 cq_create
= &mbox
->u
.mqe
.un
.cq_create
;
13108 shdr
= (union lpfc_sli4_cfg_shdr
*) &cq_create
->header
.cfg_shdr
;
13109 bf_set(lpfc_mbx_cq_create_num_pages
, &cq_create
->u
.request
,
13111 bf_set(lpfc_cq_context_event
, &cq_create
->u
.request
.context
, 1);
13112 bf_set(lpfc_cq_context_valid
, &cq_create
->u
.request
.context
, 1);
13113 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
13114 phba
->sli4_hba
.pc_sli4_params
.cqv
);
13115 if (phba
->sli4_hba
.pc_sli4_params
.cqv
== LPFC_Q_CREATE_VERSION_2
) {
13116 /* FW only supports 1. Should be PAGE_SIZE/SLI4_PAGE_SIZE */
13117 bf_set(lpfc_mbx_cq_create_page_size
, &cq_create
->u
.request
, 1);
13118 bf_set(lpfc_cq_eq_id_2
, &cq_create
->u
.request
.context
,
13121 bf_set(lpfc_cq_eq_id
, &cq_create
->u
.request
.context
,
13124 switch (cq
->entry_count
) {
13126 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13127 "0361 Unsupported CQ count. (%d)\n",
13129 if (cq
->entry_count
< 256) {
13133 /* otherwise default to smallest count (drop through) */
13135 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
13139 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
13143 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
13147 list_for_each_entry(dmabuf
, &cq
->page_list
, list
) {
13148 memset(dmabuf
->virt
, 0, hw_page_size
);
13149 cq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
13150 putPaddrLow(dmabuf
->phys
);
13151 cq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
13152 putPaddrHigh(dmabuf
->phys
);
13154 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
13156 /* The IOCTL status is embedded in the mailbox subheader. */
13157 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
13158 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
13159 if (shdr_status
|| shdr_add_status
|| rc
) {
13160 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13161 "2501 CQ_CREATE mailbox failed with "
13162 "status x%x add_status x%x, mbx status x%x\n",
13163 shdr_status
, shdr_add_status
, rc
);
13167 cq
->queue_id
= bf_get(lpfc_mbx_cq_create_q_id
, &cq_create
->u
.response
);
13168 if (cq
->queue_id
== 0xFFFF) {
13172 /* link the cq onto the parent eq child list */
13173 list_add_tail(&cq
->list
, &eq
->child_list
);
13174 /* Set up completion queue's type and subtype */
13176 cq
->subtype
= subtype
;
13177 cq
->queue_id
= bf_get(lpfc_mbx_cq_create_q_id
, &cq_create
->u
.response
);
13178 cq
->assoc_qid
= eq
->queue_id
;
13179 cq
->host_index
= 0;
13183 mempool_free(mbox
, phba
->mbox_mem_pool
);
13188 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
13189 * @phba: HBA structure that indicates port to create a queue on.
13190 * @mq: The queue structure to use to create the mailbox queue.
13191 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
13192 * @cq: The completion queue to associate with this cq.
13194 * This function provides failback (fb) functionality when the
13195 * mq_create_ext fails on older FW generations. It's purpose is identical
13196 * to mq_create_ext otherwise.
13198 * This routine cannot fail as all attributes were previously accessed and
13199 * initialized in mq_create_ext.
13202 lpfc_mq_create_fb_init(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
,
13203 LPFC_MBOXQ_t
*mbox
, struct lpfc_queue
*cq
)
13205 struct lpfc_mbx_mq_create
*mq_create
;
13206 struct lpfc_dmabuf
*dmabuf
;
13209 length
= (sizeof(struct lpfc_mbx_mq_create
) -
13210 sizeof(struct lpfc_sli4_cfg_mhdr
));
13211 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
13212 LPFC_MBOX_OPCODE_MQ_CREATE
,
13213 length
, LPFC_SLI4_MBX_EMBED
);
13214 mq_create
= &mbox
->u
.mqe
.un
.mq_create
;
13215 bf_set(lpfc_mbx_mq_create_num_pages
, &mq_create
->u
.request
,
13217 bf_set(lpfc_mq_context_cq_id
, &mq_create
->u
.request
.context
,
13219 bf_set(lpfc_mq_context_valid
, &mq_create
->u
.request
.context
, 1);
13220 switch (mq
->entry_count
) {
13222 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
13223 LPFC_MQ_RING_SIZE_16
);
13226 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
13227 LPFC_MQ_RING_SIZE_32
);
13230 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
13231 LPFC_MQ_RING_SIZE_64
);
13234 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
13235 LPFC_MQ_RING_SIZE_128
);
13238 list_for_each_entry(dmabuf
, &mq
->page_list
, list
) {
13239 mq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
13240 putPaddrLow(dmabuf
->phys
);
13241 mq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
13242 putPaddrHigh(dmabuf
->phys
);
13247 * lpfc_mq_create - Create a mailbox Queue on the HBA
13248 * @phba: HBA structure that indicates port to create a queue on.
13249 * @mq: The queue structure to use to create the mailbox queue.
13250 * @cq: The completion queue to associate with this cq.
13251 * @subtype: The queue's subtype.
13253 * This function creates a mailbox queue, as detailed in @mq, on a port,
13254 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
13256 * The @phba struct is used to send mailbox command to HBA. The @cq struct
13257 * is used to get the entry count and entry size that are necessary to
13258 * determine the number of pages to allocate and use for this queue. This
13259 * function will send the MQ_CREATE mailbox command to the HBA to setup the
13260 * mailbox queue. This function is asynchronous and will wait for the mailbox
13261 * command to finish before continuing.
13263 * On success this function will return a zero. If unable to allocate enough
13264 * memory this function will return -ENOMEM. If the queue create mailbox command
13265 * fails this function will return -ENXIO.
13268 lpfc_mq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
,
13269 struct lpfc_queue
*cq
, uint32_t subtype
)
13271 struct lpfc_mbx_mq_create
*mq_create
;
13272 struct lpfc_mbx_mq_create_ext
*mq_create_ext
;
13273 struct lpfc_dmabuf
*dmabuf
;
13274 LPFC_MBOXQ_t
*mbox
;
13275 int rc
, length
, status
= 0;
13276 uint32_t shdr_status
, shdr_add_status
;
13277 union lpfc_sli4_cfg_shdr
*shdr
;
13278 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
13280 /* sanity check on queue memory */
13283 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
13284 hw_page_size
= SLI4_PAGE_SIZE
;
13286 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
13289 length
= (sizeof(struct lpfc_mbx_mq_create_ext
) -
13290 sizeof(struct lpfc_sli4_cfg_mhdr
));
13291 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
13292 LPFC_MBOX_OPCODE_MQ_CREATE_EXT
,
13293 length
, LPFC_SLI4_MBX_EMBED
);
13295 mq_create_ext
= &mbox
->u
.mqe
.un
.mq_create_ext
;
13296 shdr
= (union lpfc_sli4_cfg_shdr
*) &mq_create_ext
->header
.cfg_shdr
;
13297 bf_set(lpfc_mbx_mq_create_ext_num_pages
,
13298 &mq_create_ext
->u
.request
, mq
->page_count
);
13299 bf_set(lpfc_mbx_mq_create_ext_async_evt_link
,
13300 &mq_create_ext
->u
.request
, 1);
13301 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip
,
13302 &mq_create_ext
->u
.request
, 1);
13303 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5
,
13304 &mq_create_ext
->u
.request
, 1);
13305 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc
,
13306 &mq_create_ext
->u
.request
, 1);
13307 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli
,
13308 &mq_create_ext
->u
.request
, 1);
13309 bf_set(lpfc_mq_context_valid
, &mq_create_ext
->u
.request
.context
, 1);
13310 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
13311 phba
->sli4_hba
.pc_sli4_params
.mqv
);
13312 if (phba
->sli4_hba
.pc_sli4_params
.mqv
== LPFC_Q_CREATE_VERSION_1
)
13313 bf_set(lpfc_mbx_mq_create_ext_cq_id
, &mq_create_ext
->u
.request
,
13316 bf_set(lpfc_mq_context_cq_id
, &mq_create_ext
->u
.request
.context
,
13318 switch (mq
->entry_count
) {
13320 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13321 "0362 Unsupported MQ count. (%d)\n",
13323 if (mq
->entry_count
< 16) {
13327 /* otherwise default to smallest count (drop through) */
13329 bf_set(lpfc_mq_context_ring_size
,
13330 &mq_create_ext
->u
.request
.context
,
13331 LPFC_MQ_RING_SIZE_16
);
13334 bf_set(lpfc_mq_context_ring_size
,
13335 &mq_create_ext
->u
.request
.context
,
13336 LPFC_MQ_RING_SIZE_32
);
13339 bf_set(lpfc_mq_context_ring_size
,
13340 &mq_create_ext
->u
.request
.context
,
13341 LPFC_MQ_RING_SIZE_64
);
13344 bf_set(lpfc_mq_context_ring_size
,
13345 &mq_create_ext
->u
.request
.context
,
13346 LPFC_MQ_RING_SIZE_128
);
13349 list_for_each_entry(dmabuf
, &mq
->page_list
, list
) {
13350 memset(dmabuf
->virt
, 0, hw_page_size
);
13351 mq_create_ext
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
13352 putPaddrLow(dmabuf
->phys
);
13353 mq_create_ext
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
13354 putPaddrHigh(dmabuf
->phys
);
13356 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
13357 mq
->queue_id
= bf_get(lpfc_mbx_mq_create_q_id
,
13358 &mq_create_ext
->u
.response
);
13359 if (rc
!= MBX_SUCCESS
) {
13360 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
13361 "2795 MQ_CREATE_EXT failed with "
13362 "status x%x. Failback to MQ_CREATE.\n",
13364 lpfc_mq_create_fb_init(phba
, mq
, mbox
, cq
);
13365 mq_create
= &mbox
->u
.mqe
.un
.mq_create
;
13366 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
13367 shdr
= (union lpfc_sli4_cfg_shdr
*) &mq_create
->header
.cfg_shdr
;
13368 mq
->queue_id
= bf_get(lpfc_mbx_mq_create_q_id
,
13369 &mq_create
->u
.response
);
13372 /* The IOCTL status is embedded in the mailbox subheader. */
13373 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
13374 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
13375 if (shdr_status
|| shdr_add_status
|| rc
) {
13376 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13377 "2502 MQ_CREATE mailbox failed with "
13378 "status x%x add_status x%x, mbx status x%x\n",
13379 shdr_status
, shdr_add_status
, rc
);
13383 if (mq
->queue_id
== 0xFFFF) {
13387 mq
->type
= LPFC_MQ
;
13388 mq
->assoc_qid
= cq
->queue_id
;
13389 mq
->subtype
= subtype
;
13390 mq
->host_index
= 0;
13393 /* link the mq onto the parent cq child list */
13394 list_add_tail(&mq
->list
, &cq
->child_list
);
13396 mempool_free(mbox
, phba
->mbox_mem_pool
);
13401 * lpfc_wq_create - Create a Work Queue on the HBA
13402 * @phba: HBA structure that indicates port to create a queue on.
13403 * @wq: The queue structure to use to create the work queue.
13404 * @cq: The completion queue to bind this work queue to.
13405 * @subtype: The subtype of the work queue indicating its functionality.
13407 * This function creates a work queue, as detailed in @wq, on a port, described
13408 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
13410 * The @phba struct is used to send mailbox command to HBA. The @wq struct
13411 * is used to get the entry count and entry size that are necessary to
13412 * determine the number of pages to allocate and use for this queue. The @cq
13413 * is used to indicate which completion queue to bind this work queue to. This
13414 * function will send the WQ_CREATE mailbox command to the HBA to setup the
13415 * work queue. This function is asynchronous and will wait for the mailbox
13416 * command to finish before continuing.
13418 * On success this function will return a zero. If unable to allocate enough
13419 * memory this function will return -ENOMEM. If the queue create mailbox command
13420 * fails this function will return -ENXIO.
13423 lpfc_wq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*wq
,
13424 struct lpfc_queue
*cq
, uint32_t subtype
)
13426 struct lpfc_mbx_wq_create
*wq_create
;
13427 struct lpfc_dmabuf
*dmabuf
;
13428 LPFC_MBOXQ_t
*mbox
;
13429 int rc
, length
, status
= 0;
13430 uint32_t shdr_status
, shdr_add_status
;
13431 union lpfc_sli4_cfg_shdr
*shdr
;
13432 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
13433 struct dma_address
*page
;
13434 void __iomem
*bar_memmap_p
;
13435 uint32_t db_offset
;
13436 uint16_t pci_barset
;
13438 /* sanity check on queue memory */
13441 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
13442 hw_page_size
= SLI4_PAGE_SIZE
;
13444 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
13447 length
= (sizeof(struct lpfc_mbx_wq_create
) -
13448 sizeof(struct lpfc_sli4_cfg_mhdr
));
13449 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
13450 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE
,
13451 length
, LPFC_SLI4_MBX_EMBED
);
13452 wq_create
= &mbox
->u
.mqe
.un
.wq_create
;
13453 shdr
= (union lpfc_sli4_cfg_shdr
*) &wq_create
->header
.cfg_shdr
;
13454 bf_set(lpfc_mbx_wq_create_num_pages
, &wq_create
->u
.request
,
13456 bf_set(lpfc_mbx_wq_create_cq_id
, &wq_create
->u
.request
,
13459 /* wqv is the earliest version supported, NOT the latest */
13460 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
13461 phba
->sli4_hba
.pc_sli4_params
.wqv
);
13463 switch (phba
->sli4_hba
.pc_sli4_params
.wqv
) {
13464 case LPFC_Q_CREATE_VERSION_0
:
13465 switch (wq
->entry_size
) {
13468 /* Nothing to do, version 0 ONLY supports 64 byte */
13469 page
= wq_create
->u
.request
.page
;
13472 if (!(phba
->sli4_hba
.pc_sli4_params
.wqsize
&
13473 LPFC_WQ_SZ128_SUPPORT
)) {
13477 /* If we get here the HBA MUST also support V1 and
13480 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
13481 LPFC_Q_CREATE_VERSION_1
);
13483 bf_set(lpfc_mbx_wq_create_wqe_count
,
13484 &wq_create
->u
.request_1
, wq
->entry_count
);
13485 bf_set(lpfc_mbx_wq_create_wqe_size
,
13486 &wq_create
->u
.request_1
,
13487 LPFC_WQ_WQE_SIZE_128
);
13488 bf_set(lpfc_mbx_wq_create_page_size
,
13489 &wq_create
->u
.request_1
,
13490 LPFC_WQ_PAGE_SIZE_4096
);
13491 page
= wq_create
->u
.request_1
.page
;
13495 case LPFC_Q_CREATE_VERSION_1
:
13496 bf_set(lpfc_mbx_wq_create_wqe_count
, &wq_create
->u
.request_1
,
13498 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
13499 LPFC_Q_CREATE_VERSION_1
);
13501 switch (wq
->entry_size
) {
13504 bf_set(lpfc_mbx_wq_create_wqe_size
,
13505 &wq_create
->u
.request_1
,
13506 LPFC_WQ_WQE_SIZE_64
);
13509 if (!(phba
->sli4_hba
.pc_sli4_params
.wqsize
&
13510 LPFC_WQ_SZ128_SUPPORT
)) {
13514 bf_set(lpfc_mbx_wq_create_wqe_size
,
13515 &wq_create
->u
.request_1
,
13516 LPFC_WQ_WQE_SIZE_128
);
13519 bf_set(lpfc_mbx_wq_create_page_size
,
13520 &wq_create
->u
.request_1
,
13521 LPFC_WQ_PAGE_SIZE_4096
);
13522 page
= wq_create
->u
.request_1
.page
;
13529 list_for_each_entry(dmabuf
, &wq
->page_list
, list
) {
13530 memset(dmabuf
->virt
, 0, hw_page_size
);
13531 page
[dmabuf
->buffer_tag
].addr_lo
= putPaddrLow(dmabuf
->phys
);
13532 page
[dmabuf
->buffer_tag
].addr_hi
= putPaddrHigh(dmabuf
->phys
);
13535 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
)
13536 bf_set(lpfc_mbx_wq_create_dua
, &wq_create
->u
.request
, 1);
13538 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
13539 /* The IOCTL status is embedded in the mailbox subheader. */
13540 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
13541 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
13542 if (shdr_status
|| shdr_add_status
|| rc
) {
13543 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13544 "2503 WQ_CREATE mailbox failed with "
13545 "status x%x add_status x%x, mbx status x%x\n",
13546 shdr_status
, shdr_add_status
, rc
);
13550 wq
->queue_id
= bf_get(lpfc_mbx_wq_create_q_id
, &wq_create
->u
.response
);
13551 if (wq
->queue_id
== 0xFFFF) {
13555 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
) {
13556 wq
->db_format
= bf_get(lpfc_mbx_wq_create_db_format
,
13557 &wq_create
->u
.response
);
13558 if ((wq
->db_format
!= LPFC_DB_LIST_FORMAT
) &&
13559 (wq
->db_format
!= LPFC_DB_RING_FORMAT
)) {
13560 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13561 "3265 WQ[%d] doorbell format not "
13562 "supported: x%x\n", wq
->queue_id
,
13567 pci_barset
= bf_get(lpfc_mbx_wq_create_bar_set
,
13568 &wq_create
->u
.response
);
13569 bar_memmap_p
= lpfc_dual_chute_pci_bar_map(phba
, pci_barset
);
13570 if (!bar_memmap_p
) {
13571 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13572 "3263 WQ[%d] failed to memmap pci "
13573 "barset:x%x\n", wq
->queue_id
,
13578 db_offset
= wq_create
->u
.response
.doorbell_offset
;
13579 if ((db_offset
!= LPFC_ULP0_WQ_DOORBELL
) &&
13580 (db_offset
!= LPFC_ULP1_WQ_DOORBELL
)) {
13581 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13582 "3252 WQ[%d] doorbell offset not "
13583 "supported: x%x\n", wq
->queue_id
,
13588 wq
->db_regaddr
= bar_memmap_p
+ db_offset
;
13589 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
13590 "3264 WQ[%d]: barset:x%x, offset:x%x, "
13591 "format:x%x\n", wq
->queue_id
, pci_barset
,
13592 db_offset
, wq
->db_format
);
13594 wq
->db_format
= LPFC_DB_LIST_FORMAT
;
13595 wq
->db_regaddr
= phba
->sli4_hba
.WQDBregaddr
;
13597 wq
->type
= LPFC_WQ
;
13598 wq
->assoc_qid
= cq
->queue_id
;
13599 wq
->subtype
= subtype
;
13600 wq
->host_index
= 0;
13602 wq
->entry_repost
= LPFC_RELEASE_NOTIFICATION_INTERVAL
;
13604 /* link the wq onto the parent cq child list */
13605 list_add_tail(&wq
->list
, &cq
->child_list
);
13607 mempool_free(mbox
, phba
->mbox_mem_pool
);
13612 * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ
13613 * @phba: HBA structure that indicates port to create a queue on.
13614 * @rq: The queue structure to use for the receive queue.
13615 * @qno: The associated HBQ number
13618 * For SLI4 we need to adjust the RQ repost value based on
13619 * the number of buffers that are initially posted to the RQ.
13622 lpfc_rq_adjust_repost(struct lpfc_hba
*phba
, struct lpfc_queue
*rq
, int qno
)
13626 /* sanity check on queue memory */
13629 cnt
= lpfc_hbq_defs
[qno
]->entry_count
;
13631 /* Recalc repost for RQs based on buffers initially posted */
13633 if (cnt
< LPFC_QUEUE_MIN_REPOST
)
13634 cnt
= LPFC_QUEUE_MIN_REPOST
;
13636 rq
->entry_repost
= cnt
;
13640 * lpfc_rq_create - Create a Receive Queue on the HBA
13641 * @phba: HBA structure that indicates port to create a queue on.
13642 * @hrq: The queue structure to use to create the header receive queue.
13643 * @drq: The queue structure to use to create the data receive queue.
13644 * @cq: The completion queue to bind this work queue to.
13646 * This function creates a receive buffer queue pair , as detailed in @hrq and
13647 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
13650 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
13651 * struct is used to get the entry count that is necessary to determine the
13652 * number of pages to use for this queue. The @cq is used to indicate which
13653 * completion queue to bind received buffers that are posted to these queues to.
13654 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
13655 * receive queue pair. This function is asynchronous and will wait for the
13656 * mailbox command to finish before continuing.
13658 * On success this function will return a zero. If unable to allocate enough
13659 * memory this function will return -ENOMEM. If the queue create mailbox command
13660 * fails this function will return -ENXIO.
13663 lpfc_rq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*hrq
,
13664 struct lpfc_queue
*drq
, struct lpfc_queue
*cq
, uint32_t subtype
)
13666 struct lpfc_mbx_rq_create
*rq_create
;
13667 struct lpfc_dmabuf
*dmabuf
;
13668 LPFC_MBOXQ_t
*mbox
;
13669 int rc
, length
, status
= 0;
13670 uint32_t shdr_status
, shdr_add_status
;
13671 union lpfc_sli4_cfg_shdr
*shdr
;
13672 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
13673 void __iomem
*bar_memmap_p
;
13674 uint32_t db_offset
;
13675 uint16_t pci_barset
;
13677 /* sanity check on queue memory */
13678 if (!hrq
|| !drq
|| !cq
)
13680 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
13681 hw_page_size
= SLI4_PAGE_SIZE
;
13683 if (hrq
->entry_count
!= drq
->entry_count
)
13685 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
13688 length
= (sizeof(struct lpfc_mbx_rq_create
) -
13689 sizeof(struct lpfc_sli4_cfg_mhdr
));
13690 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
13691 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE
,
13692 length
, LPFC_SLI4_MBX_EMBED
);
13693 rq_create
= &mbox
->u
.mqe
.un
.rq_create
;
13694 shdr
= (union lpfc_sli4_cfg_shdr
*) &rq_create
->header
.cfg_shdr
;
13695 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
13696 phba
->sli4_hba
.pc_sli4_params
.rqv
);
13697 if (phba
->sli4_hba
.pc_sli4_params
.rqv
== LPFC_Q_CREATE_VERSION_1
) {
13698 bf_set(lpfc_rq_context_rqe_count_1
,
13699 &rq_create
->u
.request
.context
,
13701 rq_create
->u
.request
.context
.buffer_size
= LPFC_HDR_BUF_SIZE
;
13702 bf_set(lpfc_rq_context_rqe_size
,
13703 &rq_create
->u
.request
.context
,
13705 bf_set(lpfc_rq_context_page_size
,
13706 &rq_create
->u
.request
.context
,
13707 LPFC_RQ_PAGE_SIZE_4096
);
13709 switch (hrq
->entry_count
) {
13711 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13712 "2535 Unsupported RQ count. (%d)\n",
13714 if (hrq
->entry_count
< 512) {
13718 /* otherwise default to smallest count (drop through) */
13720 bf_set(lpfc_rq_context_rqe_count
,
13721 &rq_create
->u
.request
.context
,
13722 LPFC_RQ_RING_SIZE_512
);
13725 bf_set(lpfc_rq_context_rqe_count
,
13726 &rq_create
->u
.request
.context
,
13727 LPFC_RQ_RING_SIZE_1024
);
13730 bf_set(lpfc_rq_context_rqe_count
,
13731 &rq_create
->u
.request
.context
,
13732 LPFC_RQ_RING_SIZE_2048
);
13735 bf_set(lpfc_rq_context_rqe_count
,
13736 &rq_create
->u
.request
.context
,
13737 LPFC_RQ_RING_SIZE_4096
);
13740 bf_set(lpfc_rq_context_buf_size
, &rq_create
->u
.request
.context
,
13741 LPFC_HDR_BUF_SIZE
);
13743 bf_set(lpfc_rq_context_cq_id
, &rq_create
->u
.request
.context
,
13745 bf_set(lpfc_mbx_rq_create_num_pages
, &rq_create
->u
.request
,
13747 list_for_each_entry(dmabuf
, &hrq
->page_list
, list
) {
13748 memset(dmabuf
->virt
, 0, hw_page_size
);
13749 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
13750 putPaddrLow(dmabuf
->phys
);
13751 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
13752 putPaddrHigh(dmabuf
->phys
);
13754 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
)
13755 bf_set(lpfc_mbx_rq_create_dua
, &rq_create
->u
.request
, 1);
13757 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
13758 /* The IOCTL status is embedded in the mailbox subheader. */
13759 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
13760 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
13761 if (shdr_status
|| shdr_add_status
|| rc
) {
13762 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13763 "2504 RQ_CREATE mailbox failed with "
13764 "status x%x add_status x%x, mbx status x%x\n",
13765 shdr_status
, shdr_add_status
, rc
);
13769 hrq
->queue_id
= bf_get(lpfc_mbx_rq_create_q_id
, &rq_create
->u
.response
);
13770 if (hrq
->queue_id
== 0xFFFF) {
13775 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
) {
13776 hrq
->db_format
= bf_get(lpfc_mbx_rq_create_db_format
,
13777 &rq_create
->u
.response
);
13778 if ((hrq
->db_format
!= LPFC_DB_LIST_FORMAT
) &&
13779 (hrq
->db_format
!= LPFC_DB_RING_FORMAT
)) {
13780 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13781 "3262 RQ [%d] doorbell format not "
13782 "supported: x%x\n", hrq
->queue_id
,
13788 pci_barset
= bf_get(lpfc_mbx_rq_create_bar_set
,
13789 &rq_create
->u
.response
);
13790 bar_memmap_p
= lpfc_dual_chute_pci_bar_map(phba
, pci_barset
);
13791 if (!bar_memmap_p
) {
13792 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13793 "3269 RQ[%d] failed to memmap pci "
13794 "barset:x%x\n", hrq
->queue_id
,
13800 db_offset
= rq_create
->u
.response
.doorbell_offset
;
13801 if ((db_offset
!= LPFC_ULP0_RQ_DOORBELL
) &&
13802 (db_offset
!= LPFC_ULP1_RQ_DOORBELL
)) {
13803 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13804 "3270 RQ[%d] doorbell offset not "
13805 "supported: x%x\n", hrq
->queue_id
,
13810 hrq
->db_regaddr
= bar_memmap_p
+ db_offset
;
13811 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
13812 "3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
13813 "format:x%x\n", hrq
->queue_id
, pci_barset
,
13814 db_offset
, hrq
->db_format
);
13816 hrq
->db_format
= LPFC_DB_RING_FORMAT
;
13817 hrq
->db_regaddr
= phba
->sli4_hba
.RQDBregaddr
;
13819 hrq
->type
= LPFC_HRQ
;
13820 hrq
->assoc_qid
= cq
->queue_id
;
13821 hrq
->subtype
= subtype
;
13822 hrq
->host_index
= 0;
13823 hrq
->hba_index
= 0;
13825 /* now create the data queue */
13826 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
13827 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE
,
13828 length
, LPFC_SLI4_MBX_EMBED
);
13829 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
13830 phba
->sli4_hba
.pc_sli4_params
.rqv
);
13831 if (phba
->sli4_hba
.pc_sli4_params
.rqv
== LPFC_Q_CREATE_VERSION_1
) {
13832 bf_set(lpfc_rq_context_rqe_count_1
,
13833 &rq_create
->u
.request
.context
, hrq
->entry_count
);
13834 rq_create
->u
.request
.context
.buffer_size
= LPFC_DATA_BUF_SIZE
;
13835 bf_set(lpfc_rq_context_rqe_size
, &rq_create
->u
.request
.context
,
13837 bf_set(lpfc_rq_context_page_size
, &rq_create
->u
.request
.context
,
13838 (PAGE_SIZE
/SLI4_PAGE_SIZE
));
13840 switch (drq
->entry_count
) {
13842 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13843 "2536 Unsupported RQ count. (%d)\n",
13845 if (drq
->entry_count
< 512) {
13849 /* otherwise default to smallest count (drop through) */
13851 bf_set(lpfc_rq_context_rqe_count
,
13852 &rq_create
->u
.request
.context
,
13853 LPFC_RQ_RING_SIZE_512
);
13856 bf_set(lpfc_rq_context_rqe_count
,
13857 &rq_create
->u
.request
.context
,
13858 LPFC_RQ_RING_SIZE_1024
);
13861 bf_set(lpfc_rq_context_rqe_count
,
13862 &rq_create
->u
.request
.context
,
13863 LPFC_RQ_RING_SIZE_2048
);
13866 bf_set(lpfc_rq_context_rqe_count
,
13867 &rq_create
->u
.request
.context
,
13868 LPFC_RQ_RING_SIZE_4096
);
13871 bf_set(lpfc_rq_context_buf_size
, &rq_create
->u
.request
.context
,
13872 LPFC_DATA_BUF_SIZE
);
13874 bf_set(lpfc_rq_context_cq_id
, &rq_create
->u
.request
.context
,
13876 bf_set(lpfc_mbx_rq_create_num_pages
, &rq_create
->u
.request
,
13878 list_for_each_entry(dmabuf
, &drq
->page_list
, list
) {
13879 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
13880 putPaddrLow(dmabuf
->phys
);
13881 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
13882 putPaddrHigh(dmabuf
->phys
);
13884 if (phba
->sli4_hba
.fw_func_mode
& LPFC_DUA_MODE
)
13885 bf_set(lpfc_mbx_rq_create_dua
, &rq_create
->u
.request
, 1);
13886 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
13887 /* The IOCTL status is embedded in the mailbox subheader. */
13888 shdr
= (union lpfc_sli4_cfg_shdr
*) &rq_create
->header
.cfg_shdr
;
13889 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
13890 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
13891 if (shdr_status
|| shdr_add_status
|| rc
) {
13895 drq
->queue_id
= bf_get(lpfc_mbx_rq_create_q_id
, &rq_create
->u
.response
);
13896 if (drq
->queue_id
== 0xFFFF) {
13900 drq
->type
= LPFC_DRQ
;
13901 drq
->assoc_qid
= cq
->queue_id
;
13902 drq
->subtype
= subtype
;
13903 drq
->host_index
= 0;
13904 drq
->hba_index
= 0;
13906 /* link the header and data RQs onto the parent cq child list */
13907 list_add_tail(&hrq
->list
, &cq
->child_list
);
13908 list_add_tail(&drq
->list
, &cq
->child_list
);
13911 mempool_free(mbox
, phba
->mbox_mem_pool
);
13916 * lpfc_eq_destroy - Destroy an event Queue on the HBA
13917 * @eq: The queue structure associated with the queue to destroy.
13919 * This function destroys a queue, as detailed in @eq by sending an mailbox
13920 * command, specific to the type of queue, to the HBA.
13922 * The @eq struct is used to get the queue ID of the queue to destroy.
13924 * On success this function will return a zero. If the queue destroy mailbox
13925 * command fails this function will return -ENXIO.
13928 lpfc_eq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
)
13930 LPFC_MBOXQ_t
*mbox
;
13931 int rc
, length
, status
= 0;
13932 uint32_t shdr_status
, shdr_add_status
;
13933 union lpfc_sli4_cfg_shdr
*shdr
;
13935 /* sanity check on queue memory */
13938 mbox
= mempool_alloc(eq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
13941 length
= (sizeof(struct lpfc_mbx_eq_destroy
) -
13942 sizeof(struct lpfc_sli4_cfg_mhdr
));
13943 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
13944 LPFC_MBOX_OPCODE_EQ_DESTROY
,
13945 length
, LPFC_SLI4_MBX_EMBED
);
13946 bf_set(lpfc_mbx_eq_destroy_q_id
, &mbox
->u
.mqe
.un
.eq_destroy
.u
.request
,
13948 mbox
->vport
= eq
->phba
->pport
;
13949 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
13951 rc
= lpfc_sli_issue_mbox(eq
->phba
, mbox
, MBX_POLL
);
13952 /* The IOCTL status is embedded in the mailbox subheader. */
13953 shdr
= (union lpfc_sli4_cfg_shdr
*)
13954 &mbox
->u
.mqe
.un
.eq_destroy
.header
.cfg_shdr
;
13955 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
13956 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
13957 if (shdr_status
|| shdr_add_status
|| rc
) {
13958 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13959 "2505 EQ_DESTROY mailbox failed with "
13960 "status x%x add_status x%x, mbx status x%x\n",
13961 shdr_status
, shdr_add_status
, rc
);
13965 /* Remove eq from any list */
13966 list_del_init(&eq
->list
);
13967 mempool_free(mbox
, eq
->phba
->mbox_mem_pool
);
13972 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
13973 * @cq: The queue structure associated with the queue to destroy.
13975 * This function destroys a queue, as detailed in @cq by sending an mailbox
13976 * command, specific to the type of queue, to the HBA.
13978 * The @cq struct is used to get the queue ID of the queue to destroy.
13980 * On success this function will return a zero. If the queue destroy mailbox
13981 * command fails this function will return -ENXIO.
13984 lpfc_cq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
)
13986 LPFC_MBOXQ_t
*mbox
;
13987 int rc
, length
, status
= 0;
13988 uint32_t shdr_status
, shdr_add_status
;
13989 union lpfc_sli4_cfg_shdr
*shdr
;
13991 /* sanity check on queue memory */
13994 mbox
= mempool_alloc(cq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
13997 length
= (sizeof(struct lpfc_mbx_cq_destroy
) -
13998 sizeof(struct lpfc_sli4_cfg_mhdr
));
13999 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
14000 LPFC_MBOX_OPCODE_CQ_DESTROY
,
14001 length
, LPFC_SLI4_MBX_EMBED
);
14002 bf_set(lpfc_mbx_cq_destroy_q_id
, &mbox
->u
.mqe
.un
.cq_destroy
.u
.request
,
14004 mbox
->vport
= cq
->phba
->pport
;
14005 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
14006 rc
= lpfc_sli_issue_mbox(cq
->phba
, mbox
, MBX_POLL
);
14007 /* The IOCTL status is embedded in the mailbox subheader. */
14008 shdr
= (union lpfc_sli4_cfg_shdr
*)
14009 &mbox
->u
.mqe
.un
.wq_create
.header
.cfg_shdr
;
14010 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
14011 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
14012 if (shdr_status
|| shdr_add_status
|| rc
) {
14013 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14014 "2506 CQ_DESTROY mailbox failed with "
14015 "status x%x add_status x%x, mbx status x%x\n",
14016 shdr_status
, shdr_add_status
, rc
);
14019 /* Remove cq from any list */
14020 list_del_init(&cq
->list
);
14021 mempool_free(mbox
, cq
->phba
->mbox_mem_pool
);
14026 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
14027 * @qm: The queue structure associated with the queue to destroy.
14029 * This function destroys a queue, as detailed in @mq by sending an mailbox
14030 * command, specific to the type of queue, to the HBA.
14032 * The @mq struct is used to get the queue ID of the queue to destroy.
14034 * On success this function will return a zero. If the queue destroy mailbox
14035 * command fails this function will return -ENXIO.
14038 lpfc_mq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
)
14040 LPFC_MBOXQ_t
*mbox
;
14041 int rc
, length
, status
= 0;
14042 uint32_t shdr_status
, shdr_add_status
;
14043 union lpfc_sli4_cfg_shdr
*shdr
;
14045 /* sanity check on queue memory */
14048 mbox
= mempool_alloc(mq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
14051 length
= (sizeof(struct lpfc_mbx_mq_destroy
) -
14052 sizeof(struct lpfc_sli4_cfg_mhdr
));
14053 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
14054 LPFC_MBOX_OPCODE_MQ_DESTROY
,
14055 length
, LPFC_SLI4_MBX_EMBED
);
14056 bf_set(lpfc_mbx_mq_destroy_q_id
, &mbox
->u
.mqe
.un
.mq_destroy
.u
.request
,
14058 mbox
->vport
= mq
->phba
->pport
;
14059 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
14060 rc
= lpfc_sli_issue_mbox(mq
->phba
, mbox
, MBX_POLL
);
14061 /* The IOCTL status is embedded in the mailbox subheader. */
14062 shdr
= (union lpfc_sli4_cfg_shdr
*)
14063 &mbox
->u
.mqe
.un
.mq_destroy
.header
.cfg_shdr
;
14064 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
14065 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
14066 if (shdr_status
|| shdr_add_status
|| rc
) {
14067 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14068 "2507 MQ_DESTROY mailbox failed with "
14069 "status x%x add_status x%x, mbx status x%x\n",
14070 shdr_status
, shdr_add_status
, rc
);
14073 /* Remove mq from any list */
14074 list_del_init(&mq
->list
);
14075 mempool_free(mbox
, mq
->phba
->mbox_mem_pool
);
14080 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
14081 * @wq: The queue structure associated with the queue to destroy.
14083 * This function destroys a queue, as detailed in @wq by sending an mailbox
14084 * command, specific to the type of queue, to the HBA.
14086 * The @wq struct is used to get the queue ID of the queue to destroy.
14088 * On success this function will return a zero. If the queue destroy mailbox
14089 * command fails this function will return -ENXIO.
14092 lpfc_wq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*wq
)
14094 LPFC_MBOXQ_t
*mbox
;
14095 int rc
, length
, status
= 0;
14096 uint32_t shdr_status
, shdr_add_status
;
14097 union lpfc_sli4_cfg_shdr
*shdr
;
14099 /* sanity check on queue memory */
14102 mbox
= mempool_alloc(wq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
14105 length
= (sizeof(struct lpfc_mbx_wq_destroy
) -
14106 sizeof(struct lpfc_sli4_cfg_mhdr
));
14107 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
14108 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY
,
14109 length
, LPFC_SLI4_MBX_EMBED
);
14110 bf_set(lpfc_mbx_wq_destroy_q_id
, &mbox
->u
.mqe
.un
.wq_destroy
.u
.request
,
14112 mbox
->vport
= wq
->phba
->pport
;
14113 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
14114 rc
= lpfc_sli_issue_mbox(wq
->phba
, mbox
, MBX_POLL
);
14115 shdr
= (union lpfc_sli4_cfg_shdr
*)
14116 &mbox
->u
.mqe
.un
.wq_destroy
.header
.cfg_shdr
;
14117 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
14118 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
14119 if (shdr_status
|| shdr_add_status
|| rc
) {
14120 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14121 "2508 WQ_DESTROY mailbox failed with "
14122 "status x%x add_status x%x, mbx status x%x\n",
14123 shdr_status
, shdr_add_status
, rc
);
14126 /* Remove wq from any list */
14127 list_del_init(&wq
->list
);
14128 mempool_free(mbox
, wq
->phba
->mbox_mem_pool
);
14133 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
14134 * @rq: The queue structure associated with the queue to destroy.
14136 * This function destroys a queue, as detailed in @rq by sending an mailbox
14137 * command, specific to the type of queue, to the HBA.
14139 * The @rq struct is used to get the queue ID of the queue to destroy.
14141 * On success this function will return a zero. If the queue destroy mailbox
14142 * command fails this function will return -ENXIO.
14145 lpfc_rq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*hrq
,
14146 struct lpfc_queue
*drq
)
14148 LPFC_MBOXQ_t
*mbox
;
14149 int rc
, length
, status
= 0;
14150 uint32_t shdr_status
, shdr_add_status
;
14151 union lpfc_sli4_cfg_shdr
*shdr
;
14153 /* sanity check on queue memory */
14156 mbox
= mempool_alloc(hrq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
14159 length
= (sizeof(struct lpfc_mbx_rq_destroy
) -
14160 sizeof(struct lpfc_sli4_cfg_mhdr
));
14161 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
14162 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY
,
14163 length
, LPFC_SLI4_MBX_EMBED
);
14164 bf_set(lpfc_mbx_rq_destroy_q_id
, &mbox
->u
.mqe
.un
.rq_destroy
.u
.request
,
14166 mbox
->vport
= hrq
->phba
->pport
;
14167 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
14168 rc
= lpfc_sli_issue_mbox(hrq
->phba
, mbox
, MBX_POLL
);
14169 /* The IOCTL status is embedded in the mailbox subheader. */
14170 shdr
= (union lpfc_sli4_cfg_shdr
*)
14171 &mbox
->u
.mqe
.un
.rq_destroy
.header
.cfg_shdr
;
14172 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
14173 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
14174 if (shdr_status
|| shdr_add_status
|| rc
) {
14175 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14176 "2509 RQ_DESTROY mailbox failed with "
14177 "status x%x add_status x%x, mbx status x%x\n",
14178 shdr_status
, shdr_add_status
, rc
);
14179 if (rc
!= MBX_TIMEOUT
)
14180 mempool_free(mbox
, hrq
->phba
->mbox_mem_pool
);
14183 bf_set(lpfc_mbx_rq_destroy_q_id
, &mbox
->u
.mqe
.un
.rq_destroy
.u
.request
,
14185 rc
= lpfc_sli_issue_mbox(drq
->phba
, mbox
, MBX_POLL
);
14186 shdr
= (union lpfc_sli4_cfg_shdr
*)
14187 &mbox
->u
.mqe
.un
.rq_destroy
.header
.cfg_shdr
;
14188 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
14189 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
14190 if (shdr_status
|| shdr_add_status
|| rc
) {
14191 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14192 "2510 RQ_DESTROY mailbox failed with "
14193 "status x%x add_status x%x, mbx status x%x\n",
14194 shdr_status
, shdr_add_status
, rc
);
14197 list_del_init(&hrq
->list
);
14198 list_del_init(&drq
->list
);
14199 mempool_free(mbox
, hrq
->phba
->mbox_mem_pool
);
14204 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
14205 * @phba: The virtual port for which this call being executed.
14206 * @pdma_phys_addr0: Physical address of the 1st SGL page.
14207 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
14208 * @xritag: the xritag that ties this io to the SGL pages.
14210 * This routine will post the sgl pages for the IO that has the xritag
14211 * that is in the iocbq structure. The xritag is assigned during iocbq
14212 * creation and persists for as long as the driver is loaded.
14213 * if the caller has fewer than 256 scatter gather segments to map then
14214 * pdma_phys_addr1 should be 0.
14215 * If the caller needs to map more than 256 scatter gather segment then
14216 * pdma_phys_addr1 should be a valid physical address.
14217 * physical address for SGLs must be 64 byte aligned.
14218 * If you are going to map 2 SGL's then the first one must have 256 entries
14219 * the second sgl can have between 1 and 256 entries.
14223 * -ENXIO, -ENOMEM - Failure
14226 lpfc_sli4_post_sgl(struct lpfc_hba
*phba
,
14227 dma_addr_t pdma_phys_addr0
,
14228 dma_addr_t pdma_phys_addr1
,
14231 struct lpfc_mbx_post_sgl_pages
*post_sgl_pages
;
14232 LPFC_MBOXQ_t
*mbox
;
14234 uint32_t shdr_status
, shdr_add_status
;
14236 union lpfc_sli4_cfg_shdr
*shdr
;
14238 if (xritag
== NO_XRI
) {
14239 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
14240 "0364 Invalid param:\n");
14244 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
14248 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
14249 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
,
14250 sizeof(struct lpfc_mbx_post_sgl_pages
) -
14251 sizeof(struct lpfc_sli4_cfg_mhdr
), LPFC_SLI4_MBX_EMBED
);
14253 post_sgl_pages
= (struct lpfc_mbx_post_sgl_pages
*)
14254 &mbox
->u
.mqe
.un
.post_sgl_pages
;
14255 bf_set(lpfc_post_sgl_pages_xri
, post_sgl_pages
, xritag
);
14256 bf_set(lpfc_post_sgl_pages_xricnt
, post_sgl_pages
, 1);
14258 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg0_addr_lo
=
14259 cpu_to_le32(putPaddrLow(pdma_phys_addr0
));
14260 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg0_addr_hi
=
14261 cpu_to_le32(putPaddrHigh(pdma_phys_addr0
));
14263 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg1_addr_lo
=
14264 cpu_to_le32(putPaddrLow(pdma_phys_addr1
));
14265 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg1_addr_hi
=
14266 cpu_to_le32(putPaddrHigh(pdma_phys_addr1
));
14267 if (!phba
->sli4_hba
.intr_enable
)
14268 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
14270 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
14271 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
14273 /* The IOCTL status is embedded in the mailbox subheader. */
14274 shdr
= (union lpfc_sli4_cfg_shdr
*) &post_sgl_pages
->header
.cfg_shdr
;
14275 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
14276 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
14277 if (rc
!= MBX_TIMEOUT
)
14278 mempool_free(mbox
, phba
->mbox_mem_pool
);
14279 if (shdr_status
|| shdr_add_status
|| rc
) {
14280 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14281 "2511 POST_SGL mailbox failed with "
14282 "status x%x add_status x%x, mbx status x%x\n",
14283 shdr_status
, shdr_add_status
, rc
);
14289 * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
14290 * @phba: pointer to lpfc hba data structure.
14292 * This routine is invoked to post rpi header templates to the
14293 * HBA consistent with the SLI-4 interface spec. This routine
14294 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
14295 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
14298 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
14299 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
14302 lpfc_sli4_alloc_xri(struct lpfc_hba
*phba
)
14307 * Fetch the next logical xri. Because this index is logical,
14308 * the driver starts at 0 each time.
14310 spin_lock_irq(&phba
->hbalock
);
14311 xri
= find_next_zero_bit(phba
->sli4_hba
.xri_bmask
,
14312 phba
->sli4_hba
.max_cfg_param
.max_xri
, 0);
14313 if (xri
>= phba
->sli4_hba
.max_cfg_param
.max_xri
) {
14314 spin_unlock_irq(&phba
->hbalock
);
14317 set_bit(xri
, phba
->sli4_hba
.xri_bmask
);
14318 phba
->sli4_hba
.max_cfg_param
.xri_used
++;
14320 spin_unlock_irq(&phba
->hbalock
);
14325 * lpfc_sli4_free_xri - Release an xri for reuse.
14326 * @phba: pointer to lpfc hba data structure.
14328 * This routine is invoked to release an xri to the pool of
14329 * available rpis maintained by the driver.
14332 __lpfc_sli4_free_xri(struct lpfc_hba
*phba
, int xri
)
14334 if (test_and_clear_bit(xri
, phba
->sli4_hba
.xri_bmask
)) {
14335 phba
->sli4_hba
.max_cfg_param
.xri_used
--;
14340 * lpfc_sli4_free_xri - Release an xri for reuse.
14341 * @phba: pointer to lpfc hba data structure.
14343 * This routine is invoked to release an xri to the pool of
14344 * available rpis maintained by the driver.
14347 lpfc_sli4_free_xri(struct lpfc_hba
*phba
, int xri
)
14349 spin_lock_irq(&phba
->hbalock
);
14350 __lpfc_sli4_free_xri(phba
, xri
);
14351 spin_unlock_irq(&phba
->hbalock
);
14355 * lpfc_sli4_next_xritag - Get an xritag for the io
14356 * @phba: Pointer to HBA context object.
14358 * This function gets an xritag for the iocb. If there is no unused xritag
14359 * it will return 0xffff.
14360 * The function returns the allocated xritag if successful, else returns zero.
14361 * Zero is not a valid xritag.
14362 * The caller is not required to hold any lock.
14365 lpfc_sli4_next_xritag(struct lpfc_hba
*phba
)
14367 uint16_t xri_index
;
14369 xri_index
= lpfc_sli4_alloc_xri(phba
);
14370 if (xri_index
== NO_XRI
)
14371 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
14372 "2004 Failed to allocate XRI.last XRITAG is %d"
14373 " Max XRI is %d, Used XRI is %d\n",
14375 phba
->sli4_hba
.max_cfg_param
.max_xri
,
14376 phba
->sli4_hba
.max_cfg_param
.xri_used
);
14381 * lpfc_sli4_post_els_sgl_list - post a block of ELS sgls to the port.
14382 * @phba: pointer to lpfc hba data structure.
14383 * @post_sgl_list: pointer to els sgl entry list.
14384 * @count: number of els sgl entries on the list.
14386 * This routine is invoked to post a block of driver's sgl pages to the
14387 * HBA using non-embedded mailbox command. No Lock is held. This routine
14388 * is only called when the driver is loading and after all IO has been
14392 lpfc_sli4_post_els_sgl_list(struct lpfc_hba
*phba
,
14393 struct list_head
*post_sgl_list
,
14396 struct lpfc_sglq
*sglq_entry
= NULL
, *sglq_next
= NULL
;
14397 struct lpfc_mbx_post_uembed_sgl_page1
*sgl
;
14398 struct sgl_page_pairs
*sgl_pg_pairs
;
14400 LPFC_MBOXQ_t
*mbox
;
14401 uint32_t reqlen
, alloclen
, pg_pairs
;
14403 uint16_t xritag_start
= 0;
14405 uint32_t shdr_status
, shdr_add_status
;
14406 union lpfc_sli4_cfg_shdr
*shdr
;
14408 reqlen
= phba
->sli4_hba
.els_xri_cnt
* sizeof(struct sgl_page_pairs
) +
14409 sizeof(union lpfc_sli4_cfg_shdr
) + sizeof(uint32_t);
14410 if (reqlen
> SLI4_PAGE_SIZE
) {
14411 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
14412 "2559 Block sgl registration required DMA "
14413 "size (%d) great than a page\n", reqlen
);
14416 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
14420 /* Allocate DMA memory and set up the non-embedded mailbox command */
14421 alloclen
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
14422 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
, reqlen
,
14423 LPFC_SLI4_MBX_NEMBED
);
14425 if (alloclen
< reqlen
) {
14426 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14427 "0285 Allocated DMA memory size (%d) is "
14428 "less than the requested DMA memory "
14429 "size (%d)\n", alloclen
, reqlen
);
14430 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
14433 /* Set up the SGL pages in the non-embedded DMA pages */
14434 viraddr
= mbox
->sge_array
->addr
[0];
14435 sgl
= (struct lpfc_mbx_post_uembed_sgl_page1
*)viraddr
;
14436 sgl_pg_pairs
= &sgl
->sgl_pg_pairs
;
14439 list_for_each_entry_safe(sglq_entry
, sglq_next
, post_sgl_list
, list
) {
14440 /* Set up the sge entry */
14441 sgl_pg_pairs
->sgl_pg0_addr_lo
=
14442 cpu_to_le32(putPaddrLow(sglq_entry
->phys
));
14443 sgl_pg_pairs
->sgl_pg0_addr_hi
=
14444 cpu_to_le32(putPaddrHigh(sglq_entry
->phys
));
14445 sgl_pg_pairs
->sgl_pg1_addr_lo
=
14446 cpu_to_le32(putPaddrLow(0));
14447 sgl_pg_pairs
->sgl_pg1_addr_hi
=
14448 cpu_to_le32(putPaddrHigh(0));
14450 /* Keep the first xritag on the list */
14452 xritag_start
= sglq_entry
->sli4_xritag
;
14457 /* Complete initialization and perform endian conversion. */
14458 bf_set(lpfc_post_sgl_pages_xri
, sgl
, xritag_start
);
14459 bf_set(lpfc_post_sgl_pages_xricnt
, sgl
, phba
->sli4_hba
.els_xri_cnt
);
14460 sgl
->word0
= cpu_to_le32(sgl
->word0
);
14461 if (!phba
->sli4_hba
.intr_enable
)
14462 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
14464 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
14465 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
14467 shdr
= (union lpfc_sli4_cfg_shdr
*) &sgl
->cfg_shdr
;
14468 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
14469 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
14470 if (rc
!= MBX_TIMEOUT
)
14471 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
14472 if (shdr_status
|| shdr_add_status
|| rc
) {
14473 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
14474 "2513 POST_SGL_BLOCK mailbox command failed "
14475 "status x%x add_status x%x mbx status x%x\n",
14476 shdr_status
, shdr_add_status
, rc
);
14483 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
14484 * @phba: pointer to lpfc hba data structure.
14485 * @sblist: pointer to scsi buffer list.
14486 * @count: number of scsi buffers on the list.
14488 * This routine is invoked to post a block of @count scsi sgl pages from a
14489 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
14494 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba
*phba
,
14495 struct list_head
*sblist
,
14498 struct lpfc_scsi_buf
*psb
;
14499 struct lpfc_mbx_post_uembed_sgl_page1
*sgl
;
14500 struct sgl_page_pairs
*sgl_pg_pairs
;
14502 LPFC_MBOXQ_t
*mbox
;
14503 uint32_t reqlen
, alloclen
, pg_pairs
;
14505 uint16_t xritag_start
= 0;
14507 uint32_t shdr_status
, shdr_add_status
;
14508 dma_addr_t pdma_phys_bpl1
;
14509 union lpfc_sli4_cfg_shdr
*shdr
;
14511 /* Calculate the requested length of the dma memory */
14512 reqlen
= count
* sizeof(struct sgl_page_pairs
) +
14513 sizeof(union lpfc_sli4_cfg_shdr
) + sizeof(uint32_t);
14514 if (reqlen
> SLI4_PAGE_SIZE
) {
14515 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
14516 "0217 Block sgl registration required DMA "
14517 "size (%d) great than a page\n", reqlen
);
14520 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
14522 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14523 "0283 Failed to allocate mbox cmd memory\n");
14527 /* Allocate DMA memory and set up the non-embedded mailbox command */
14528 alloclen
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
14529 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
, reqlen
,
14530 LPFC_SLI4_MBX_NEMBED
);
14532 if (alloclen
< reqlen
) {
14533 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
14534 "2561 Allocated DMA memory size (%d) is "
14535 "less than the requested DMA memory "
14536 "size (%d)\n", alloclen
, reqlen
);
14537 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
14541 /* Get the first SGE entry from the non-embedded DMA memory */
14542 viraddr
= mbox
->sge_array
->addr
[0];
14544 /* Set up the SGL pages in the non-embedded DMA pages */
14545 sgl
= (struct lpfc_mbx_post_uembed_sgl_page1
*)viraddr
;
14546 sgl_pg_pairs
= &sgl
->sgl_pg_pairs
;
14549 list_for_each_entry(psb
, sblist
, list
) {
14550 /* Set up the sge entry */
14551 sgl_pg_pairs
->sgl_pg0_addr_lo
=
14552 cpu_to_le32(putPaddrLow(psb
->dma_phys_bpl
));
14553 sgl_pg_pairs
->sgl_pg0_addr_hi
=
14554 cpu_to_le32(putPaddrHigh(psb
->dma_phys_bpl
));
14555 if (phba
->cfg_sg_dma_buf_size
> SGL_PAGE_SIZE
)
14556 pdma_phys_bpl1
= psb
->dma_phys_bpl
+ SGL_PAGE_SIZE
;
14558 pdma_phys_bpl1
= 0;
14559 sgl_pg_pairs
->sgl_pg1_addr_lo
=
14560 cpu_to_le32(putPaddrLow(pdma_phys_bpl1
));
14561 sgl_pg_pairs
->sgl_pg1_addr_hi
=
14562 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1
));
14563 /* Keep the first xritag on the list */
14565 xritag_start
= psb
->cur_iocbq
.sli4_xritag
;
14569 bf_set(lpfc_post_sgl_pages_xri
, sgl
, xritag_start
);
14570 bf_set(lpfc_post_sgl_pages_xricnt
, sgl
, pg_pairs
);
14571 /* Perform endian conversion if necessary */
14572 sgl
->word0
= cpu_to_le32(sgl
->word0
);
14574 if (!phba
->sli4_hba
.intr_enable
)
14575 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
14577 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
14578 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
14580 shdr
= (union lpfc_sli4_cfg_shdr
*) &sgl
->cfg_shdr
;
14581 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
14582 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
14583 if (rc
!= MBX_TIMEOUT
)
14584 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
14585 if (shdr_status
|| shdr_add_status
|| rc
) {
14586 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
14587 "2564 POST_SGL_BLOCK mailbox command failed "
14588 "status x%x add_status x%x mbx status x%x\n",
14589 shdr_status
, shdr_add_status
, rc
);
14596 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
14597 * @phba: pointer to lpfc_hba struct that the frame was received on
14598 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
14600 * This function checks the fields in the @fc_hdr to see if the FC frame is a
14601 * valid type of frame that the LPFC driver will handle. This function will
14602 * return a zero if the frame is a valid frame or a non zero value when the
14603 * frame does not pass the check.
14606 lpfc_fc_frame_check(struct lpfc_hba
*phba
, struct fc_frame_header
*fc_hdr
)
14608 /* make rctl_names static to save stack space */
14609 static char *rctl_names
[] = FC_RCTL_NAMES_INIT
;
14610 char *type_names
[] = FC_TYPE_NAMES_INIT
;
14611 struct fc_vft_header
*fc_vft_hdr
;
14612 uint32_t *header
= (uint32_t *) fc_hdr
;
14614 switch (fc_hdr
->fh_r_ctl
) {
14615 case FC_RCTL_DD_UNCAT
: /* uncategorized information */
14616 case FC_RCTL_DD_SOL_DATA
: /* solicited data */
14617 case FC_RCTL_DD_UNSOL_CTL
: /* unsolicited control */
14618 case FC_RCTL_DD_SOL_CTL
: /* solicited control or reply */
14619 case FC_RCTL_DD_UNSOL_DATA
: /* unsolicited data */
14620 case FC_RCTL_DD_DATA_DESC
: /* data descriptor */
14621 case FC_RCTL_DD_UNSOL_CMD
: /* unsolicited command */
14622 case FC_RCTL_DD_CMD_STATUS
: /* command status */
14623 case FC_RCTL_ELS_REQ
: /* extended link services request */
14624 case FC_RCTL_ELS_REP
: /* extended link services reply */
14625 case FC_RCTL_ELS4_REQ
: /* FC-4 ELS request */
14626 case FC_RCTL_ELS4_REP
: /* FC-4 ELS reply */
14627 case FC_RCTL_BA_NOP
: /* basic link service NOP */
14628 case FC_RCTL_BA_ABTS
: /* basic link service abort */
14629 case FC_RCTL_BA_RMC
: /* remove connection */
14630 case FC_RCTL_BA_ACC
: /* basic accept */
14631 case FC_RCTL_BA_RJT
: /* basic reject */
14632 case FC_RCTL_BA_PRMT
:
14633 case FC_RCTL_ACK_1
: /* acknowledge_1 */
14634 case FC_RCTL_ACK_0
: /* acknowledge_0 */
14635 case FC_RCTL_P_RJT
: /* port reject */
14636 case FC_RCTL_F_RJT
: /* fabric reject */
14637 case FC_RCTL_P_BSY
: /* port busy */
14638 case FC_RCTL_F_BSY
: /* fabric busy to data frame */
14639 case FC_RCTL_F_BSYL
: /* fabric busy to link control frame */
14640 case FC_RCTL_LCR
: /* link credit reset */
14641 case FC_RCTL_END
: /* end */
14643 case FC_RCTL_VFTH
: /* Virtual Fabric tagging Header */
14644 fc_vft_hdr
= (struct fc_vft_header
*)fc_hdr
;
14645 fc_hdr
= &((struct fc_frame_header
*)fc_vft_hdr
)[1];
14646 return lpfc_fc_frame_check(phba
, fc_hdr
);
14650 switch (fc_hdr
->fh_type
) {
14662 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
14663 "2538 Received frame rctl:%s (x%x), type:%s (x%x), "
14664 "frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
14665 rctl_names
[fc_hdr
->fh_r_ctl
], fc_hdr
->fh_r_ctl
,
14666 type_names
[fc_hdr
->fh_type
], fc_hdr
->fh_type
,
14667 be32_to_cpu(header
[0]), be32_to_cpu(header
[1]),
14668 be32_to_cpu(header
[2]), be32_to_cpu(header
[3]),
14669 be32_to_cpu(header
[4]), be32_to_cpu(header
[5]),
14670 be32_to_cpu(header
[6]));
14673 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
,
14674 "2539 Dropped frame rctl:%s type:%s\n",
14675 rctl_names
[fc_hdr
->fh_r_ctl
],
14676 type_names
[fc_hdr
->fh_type
]);
14681 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
14682 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
14684 * This function processes the FC header to retrieve the VFI from the VF
14685 * header, if one exists. This function will return the VFI if one exists
14686 * or 0 if no VSAN Header exists.
14689 lpfc_fc_hdr_get_vfi(struct fc_frame_header
*fc_hdr
)
14691 struct fc_vft_header
*fc_vft_hdr
= (struct fc_vft_header
*)fc_hdr
;
14693 if (fc_hdr
->fh_r_ctl
!= FC_RCTL_VFTH
)
14695 return bf_get(fc_vft_hdr_vf_id
, fc_vft_hdr
);
14699 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
14700 * @phba: Pointer to the HBA structure to search for the vport on
14701 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
14702 * @fcfi: The FC Fabric ID that the frame came from
14704 * This function searches the @phba for a vport that matches the content of the
14705 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
14706 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
14707 * returns the matching vport pointer or NULL if unable to match frame to a
14710 static struct lpfc_vport
*
14711 lpfc_fc_frame_to_vport(struct lpfc_hba
*phba
, struct fc_frame_header
*fc_hdr
,
14714 struct lpfc_vport
**vports
;
14715 struct lpfc_vport
*vport
= NULL
;
14717 uint32_t did
= (fc_hdr
->fh_d_id
[0] << 16 |
14718 fc_hdr
->fh_d_id
[1] << 8 |
14719 fc_hdr
->fh_d_id
[2]);
14721 if (did
== Fabric_DID
)
14722 return phba
->pport
;
14723 if ((phba
->pport
->fc_flag
& FC_PT2PT
) &&
14724 !(phba
->link_state
== LPFC_HBA_READY
))
14725 return phba
->pport
;
14727 vports
= lpfc_create_vport_work_array(phba
);
14728 if (vports
!= NULL
)
14729 for (i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
14730 if (phba
->fcf
.fcfi
== fcfi
&&
14731 vports
[i
]->vfi
== lpfc_fc_hdr_get_vfi(fc_hdr
) &&
14732 vports
[i
]->fc_myDID
== did
) {
14737 lpfc_destroy_vport_work_array(phba
, vports
);
14742 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
14743 * @vport: The vport to work on.
14745 * This function updates the receive sequence time stamp for this vport. The
14746 * receive sequence time stamp indicates the time that the last frame of the
14747 * the sequence that has been idle for the longest amount of time was received.
14748 * the driver uses this time stamp to indicate if any received sequences have
14752 lpfc_update_rcv_time_stamp(struct lpfc_vport
*vport
)
14754 struct lpfc_dmabuf
*h_buf
;
14755 struct hbq_dmabuf
*dmabuf
= NULL
;
14757 /* get the oldest sequence on the rcv list */
14758 h_buf
= list_get_first(&vport
->rcv_buffer_list
,
14759 struct lpfc_dmabuf
, list
);
14762 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
14763 vport
->rcv_buffer_time_stamp
= dmabuf
->time_stamp
;
14767 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
14768 * @vport: The vport that the received sequences were sent to.
14770 * This function cleans up all outstanding received sequences. This is called
14771 * by the driver when a link event or user action invalidates all the received
14775 lpfc_cleanup_rcv_buffers(struct lpfc_vport
*vport
)
14777 struct lpfc_dmabuf
*h_buf
, *hnext
;
14778 struct lpfc_dmabuf
*d_buf
, *dnext
;
14779 struct hbq_dmabuf
*dmabuf
= NULL
;
14781 /* start with the oldest sequence on the rcv list */
14782 list_for_each_entry_safe(h_buf
, hnext
, &vport
->rcv_buffer_list
, list
) {
14783 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
14784 list_del_init(&dmabuf
->hbuf
.list
);
14785 list_for_each_entry_safe(d_buf
, dnext
,
14786 &dmabuf
->dbuf
.list
, list
) {
14787 list_del_init(&d_buf
->list
);
14788 lpfc_in_buf_free(vport
->phba
, d_buf
);
14790 lpfc_in_buf_free(vport
->phba
, &dmabuf
->dbuf
);
14795 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
14796 * @vport: The vport that the received sequences were sent to.
14798 * This function determines whether any received sequences have timed out by
14799 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
14800 * indicates that there is at least one timed out sequence this routine will
14801 * go through the received sequences one at a time from most inactive to most
14802 * active to determine which ones need to be cleaned up. Once it has determined
14803 * that a sequence needs to be cleaned up it will simply free up the resources
14804 * without sending an abort.
14807 lpfc_rcv_seq_check_edtov(struct lpfc_vport
*vport
)
14809 struct lpfc_dmabuf
*h_buf
, *hnext
;
14810 struct lpfc_dmabuf
*d_buf
, *dnext
;
14811 struct hbq_dmabuf
*dmabuf
= NULL
;
14812 unsigned long timeout
;
14813 int abort_count
= 0;
14815 timeout
= (msecs_to_jiffies(vport
->phba
->fc_edtov
) +
14816 vport
->rcv_buffer_time_stamp
);
14817 if (list_empty(&vport
->rcv_buffer_list
) ||
14818 time_before(jiffies
, timeout
))
14820 /* start with the oldest sequence on the rcv list */
14821 list_for_each_entry_safe(h_buf
, hnext
, &vport
->rcv_buffer_list
, list
) {
14822 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
14823 timeout
= (msecs_to_jiffies(vport
->phba
->fc_edtov
) +
14824 dmabuf
->time_stamp
);
14825 if (time_before(jiffies
, timeout
))
14828 list_del_init(&dmabuf
->hbuf
.list
);
14829 list_for_each_entry_safe(d_buf
, dnext
,
14830 &dmabuf
->dbuf
.list
, list
) {
14831 list_del_init(&d_buf
->list
);
14832 lpfc_in_buf_free(vport
->phba
, d_buf
);
14834 lpfc_in_buf_free(vport
->phba
, &dmabuf
->dbuf
);
14837 lpfc_update_rcv_time_stamp(vport
);
14841 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
14842 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
14844 * This function searches through the existing incomplete sequences that have
14845 * been sent to this @vport. If the frame matches one of the incomplete
14846 * sequences then the dbuf in the @dmabuf is added to the list of frames that
14847 * make up that sequence. If no sequence is found that matches this frame then
14848 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
14849 * This function returns a pointer to the first dmabuf in the sequence list that
14850 * the frame was linked to.
14852 static struct hbq_dmabuf
*
14853 lpfc_fc_frame_add(struct lpfc_vport
*vport
, struct hbq_dmabuf
*dmabuf
)
14855 struct fc_frame_header
*new_hdr
;
14856 struct fc_frame_header
*temp_hdr
;
14857 struct lpfc_dmabuf
*d_buf
;
14858 struct lpfc_dmabuf
*h_buf
;
14859 struct hbq_dmabuf
*seq_dmabuf
= NULL
;
14860 struct hbq_dmabuf
*temp_dmabuf
= NULL
;
14863 INIT_LIST_HEAD(&dmabuf
->dbuf
.list
);
14864 dmabuf
->time_stamp
= jiffies
;
14865 new_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
14867 /* Use the hdr_buf to find the sequence that this frame belongs to */
14868 list_for_each_entry(h_buf
, &vport
->rcv_buffer_list
, list
) {
14869 temp_hdr
= (struct fc_frame_header
*)h_buf
->virt
;
14870 if ((temp_hdr
->fh_seq_id
!= new_hdr
->fh_seq_id
) ||
14871 (temp_hdr
->fh_ox_id
!= new_hdr
->fh_ox_id
) ||
14872 (memcmp(&temp_hdr
->fh_s_id
, &new_hdr
->fh_s_id
, 3)))
14874 /* found a pending sequence that matches this frame */
14875 seq_dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
14880 * This indicates first frame received for this sequence.
14881 * Queue the buffer on the vport's rcv_buffer_list.
14883 list_add_tail(&dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
14884 lpfc_update_rcv_time_stamp(vport
);
14887 temp_hdr
= seq_dmabuf
->hbuf
.virt
;
14888 if (be16_to_cpu(new_hdr
->fh_seq_cnt
) <
14889 be16_to_cpu(temp_hdr
->fh_seq_cnt
)) {
14890 list_del_init(&seq_dmabuf
->hbuf
.list
);
14891 list_add_tail(&dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
14892 list_add_tail(&dmabuf
->dbuf
.list
, &seq_dmabuf
->dbuf
.list
);
14893 lpfc_update_rcv_time_stamp(vport
);
14896 /* move this sequence to the tail to indicate a young sequence */
14897 list_move_tail(&seq_dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
14898 seq_dmabuf
->time_stamp
= jiffies
;
14899 lpfc_update_rcv_time_stamp(vport
);
14900 if (list_empty(&seq_dmabuf
->dbuf
.list
)) {
14901 temp_hdr
= dmabuf
->hbuf
.virt
;
14902 list_add_tail(&dmabuf
->dbuf
.list
, &seq_dmabuf
->dbuf
.list
);
14905 /* find the correct place in the sequence to insert this frame */
14906 d_buf
= list_entry(seq_dmabuf
->dbuf
.list
.prev
, typeof(*d_buf
), list
);
14908 temp_dmabuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
14909 temp_hdr
= (struct fc_frame_header
*)temp_dmabuf
->hbuf
.virt
;
14911 * If the frame's sequence count is greater than the frame on
14912 * the list then insert the frame right after this frame
14914 if (be16_to_cpu(new_hdr
->fh_seq_cnt
) >
14915 be16_to_cpu(temp_hdr
->fh_seq_cnt
)) {
14916 list_add(&dmabuf
->dbuf
.list
, &temp_dmabuf
->dbuf
.list
);
14921 if (&d_buf
->list
== &seq_dmabuf
->dbuf
.list
)
14923 d_buf
= list_entry(d_buf
->list
.prev
, typeof(*d_buf
), list
);
14932 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
14933 * @vport: pointer to a vitural port
14934 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14936 * This function tries to abort from the partially assembed sequence, described
14937 * by the information from basic abbort @dmabuf. It checks to see whether such
14938 * partially assembled sequence held by the driver. If so, it shall free up all
14939 * the frames from the partially assembled sequence.
14942 * true -- if there is matching partially assembled sequence present and all
14943 * the frames freed with the sequence;
14944 * false -- if there is no matching partially assembled sequence present so
14945 * nothing got aborted in the lower layer driver
14948 lpfc_sli4_abort_partial_seq(struct lpfc_vport
*vport
,
14949 struct hbq_dmabuf
*dmabuf
)
14951 struct fc_frame_header
*new_hdr
;
14952 struct fc_frame_header
*temp_hdr
;
14953 struct lpfc_dmabuf
*d_buf
, *n_buf
, *h_buf
;
14954 struct hbq_dmabuf
*seq_dmabuf
= NULL
;
14956 /* Use the hdr_buf to find the sequence that matches this frame */
14957 INIT_LIST_HEAD(&dmabuf
->dbuf
.list
);
14958 INIT_LIST_HEAD(&dmabuf
->hbuf
.list
);
14959 new_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
14960 list_for_each_entry(h_buf
, &vport
->rcv_buffer_list
, list
) {
14961 temp_hdr
= (struct fc_frame_header
*)h_buf
->virt
;
14962 if ((temp_hdr
->fh_seq_id
!= new_hdr
->fh_seq_id
) ||
14963 (temp_hdr
->fh_ox_id
!= new_hdr
->fh_ox_id
) ||
14964 (memcmp(&temp_hdr
->fh_s_id
, &new_hdr
->fh_s_id
, 3)))
14966 /* found a pending sequence that matches this frame */
14967 seq_dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
14971 /* Free up all the frames from the partially assembled sequence */
14973 list_for_each_entry_safe(d_buf
, n_buf
,
14974 &seq_dmabuf
->dbuf
.list
, list
) {
14975 list_del_init(&d_buf
->list
);
14976 lpfc_in_buf_free(vport
->phba
, d_buf
);
14984 * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
14985 * @vport: pointer to a vitural port
14986 * @dmabuf: pointer to a dmabuf that describes the FC sequence
14988 * This function tries to abort from the assembed sequence from upper level
14989 * protocol, described by the information from basic abbort @dmabuf. It
14990 * checks to see whether such pending context exists at upper level protocol.
14991 * If so, it shall clean up the pending context.
14994 * true -- if there is matching pending context of the sequence cleaned
14996 * false -- if there is no matching pending context of the sequence present
15000 lpfc_sli4_abort_ulp_seq(struct lpfc_vport
*vport
, struct hbq_dmabuf
*dmabuf
)
15002 struct lpfc_hba
*phba
= vport
->phba
;
15005 /* Accepting abort at ulp with SLI4 only */
15006 if (phba
->sli_rev
< LPFC_SLI_REV4
)
15009 /* Register all caring upper level protocols to attend abort */
15010 handled
= lpfc_ct_handle_unsol_abort(phba
, dmabuf
);
15018 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
15019 * @phba: Pointer to HBA context object.
15020 * @cmd_iocbq: pointer to the command iocbq structure.
15021 * @rsp_iocbq: pointer to the response iocbq structure.
15023 * This function handles the sequence abort response iocb command complete
15024 * event. It properly releases the memory allocated to the sequence abort
15028 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba
*phba
,
15029 struct lpfc_iocbq
*cmd_iocbq
,
15030 struct lpfc_iocbq
*rsp_iocbq
)
15032 struct lpfc_nodelist
*ndlp
;
15035 ndlp
= (struct lpfc_nodelist
*)cmd_iocbq
->context1
;
15036 lpfc_nlp_put(ndlp
);
15037 lpfc_nlp_not_used(ndlp
);
15038 lpfc_sli_release_iocbq(phba
, cmd_iocbq
);
15041 /* Failure means BLS ABORT RSP did not get delivered to remote node*/
15042 if (rsp_iocbq
&& rsp_iocbq
->iocb
.ulpStatus
)
15043 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15044 "3154 BLS ABORT RSP failed, data: x%x/x%x\n",
15045 rsp_iocbq
->iocb
.ulpStatus
,
15046 rsp_iocbq
->iocb
.un
.ulpWord
[4]);
15050 * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
15051 * @phba: Pointer to HBA context object.
15052 * @xri: xri id in transaction.
15054 * This function validates the xri maps to the known range of XRIs allocated an
15055 * used by the driver.
15058 lpfc_sli4_xri_inrange(struct lpfc_hba
*phba
,
15063 for (i
= 0; i
< phba
->sli4_hba
.max_cfg_param
.max_xri
; i
++) {
15064 if (xri
== phba
->sli4_hba
.xri_ids
[i
])
15071 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
15072 * @phba: Pointer to HBA context object.
15073 * @fc_hdr: pointer to a FC frame header.
15075 * This function sends a basic response to a previous unsol sequence abort
15076 * event after aborting the sequence handling.
15079 lpfc_sli4_seq_abort_rsp(struct lpfc_vport
*vport
,
15080 struct fc_frame_header
*fc_hdr
, bool aborted
)
15082 struct lpfc_hba
*phba
= vport
->phba
;
15083 struct lpfc_iocbq
*ctiocb
= NULL
;
15084 struct lpfc_nodelist
*ndlp
;
15085 uint16_t oxid
, rxid
, xri
, lxri
;
15086 uint32_t sid
, fctl
;
15090 if (!lpfc_is_link_up(phba
))
15093 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
15094 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
15095 rxid
= be16_to_cpu(fc_hdr
->fh_rx_id
);
15097 ndlp
= lpfc_findnode_did(vport
, sid
);
15099 ndlp
= mempool_alloc(phba
->nlp_mem_pool
, GFP_KERNEL
);
15101 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_ELS
,
15102 "1268 Failed to allocate ndlp for "
15103 "oxid:x%x SID:x%x\n", oxid
, sid
);
15106 lpfc_nlp_init(vport
, ndlp
, sid
);
15107 /* Put ndlp onto pport node list */
15108 lpfc_enqueue_node(vport
, ndlp
);
15109 } else if (!NLP_CHK_NODE_ACT(ndlp
)) {
15110 /* re-setup ndlp without removing from node list */
15111 ndlp
= lpfc_enable_node(vport
, ndlp
, NLP_STE_UNUSED_NODE
);
15113 lpfc_printf_vlog(vport
, KERN_WARNING
, LOG_ELS
,
15114 "3275 Failed to active ndlp found "
15115 "for oxid:x%x SID:x%x\n", oxid
, sid
);
15120 /* Allocate buffer for rsp iocb */
15121 ctiocb
= lpfc_sli_get_iocbq(phba
);
15125 /* Extract the F_CTL field from FC_HDR */
15126 fctl
= sli4_fctl_from_fc_hdr(fc_hdr
);
15128 icmd
= &ctiocb
->iocb
;
15129 icmd
->un
.xseq64
.bdl
.bdeSize
= 0;
15130 icmd
->un
.xseq64
.bdl
.ulpIoTag32
= 0;
15131 icmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
15132 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_BA_ACC
;
15133 icmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_BLS
;
15135 /* Fill in the rest of iocb fields */
15136 icmd
->ulpCommand
= CMD_XMIT_BLS_RSP64_CX
;
15137 icmd
->ulpBdeCount
= 0;
15139 icmd
->ulpClass
= CLASS3
;
15140 icmd
->ulpContext
= phba
->sli4_hba
.rpi_ids
[ndlp
->nlp_rpi
];
15141 ctiocb
->context1
= lpfc_nlp_get(ndlp
);
15143 ctiocb
->iocb_cmpl
= NULL
;
15144 ctiocb
->vport
= phba
->pport
;
15145 ctiocb
->iocb_cmpl
= lpfc_sli4_seq_abort_rsp_cmpl
;
15146 ctiocb
->sli4_lxritag
= NO_XRI
;
15147 ctiocb
->sli4_xritag
= NO_XRI
;
15149 if (fctl
& FC_FC_EX_CTX
)
15150 /* Exchange responder sent the abort so we
15156 lxri
= lpfc_sli4_xri_inrange(phba
, xri
);
15157 if (lxri
!= NO_XRI
)
15158 lpfc_set_rrq_active(phba
, ndlp
, lxri
,
15159 (xri
== oxid
) ? rxid
: oxid
, 0);
15160 /* For BA_ABTS from exchange responder, if the logical xri with
15161 * the oxid maps to the FCP XRI range, the port no longer has
15162 * that exchange context, send a BLS_RJT. Override the IOCB for
15165 if ((fctl
& FC_FC_EX_CTX
) &&
15166 (lxri
> lpfc_sli4_get_els_iocb_cnt(phba
))) {
15167 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_BA_RJT
;
15168 bf_set(lpfc_vndr_code
, &icmd
->un
.bls_rsp
, 0);
15169 bf_set(lpfc_rsn_expln
, &icmd
->un
.bls_rsp
, FC_BA_RJT_INV_XID
);
15170 bf_set(lpfc_rsn_code
, &icmd
->un
.bls_rsp
, FC_BA_RJT_UNABLE
);
15173 /* If BA_ABTS failed to abort a partially assembled receive sequence,
15174 * the driver no longer has that exchange, send a BLS_RJT. Override
15175 * the IOCB for a BA_RJT.
15177 if (aborted
== false) {
15178 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_BA_RJT
;
15179 bf_set(lpfc_vndr_code
, &icmd
->un
.bls_rsp
, 0);
15180 bf_set(lpfc_rsn_expln
, &icmd
->un
.bls_rsp
, FC_BA_RJT_INV_XID
);
15181 bf_set(lpfc_rsn_code
, &icmd
->un
.bls_rsp
, FC_BA_RJT_UNABLE
);
15184 if (fctl
& FC_FC_EX_CTX
) {
15185 /* ABTS sent by responder to CT exchange, construction
15186 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
15187 * field and RX_ID from ABTS for RX_ID field.
15189 bf_set(lpfc_abts_orig
, &icmd
->un
.bls_rsp
, LPFC_ABTS_UNSOL_RSP
);
15191 /* ABTS sent by initiator to CT exchange, construction
15192 * of BA_ACC will need to allocate a new XRI as for the
15195 bf_set(lpfc_abts_orig
, &icmd
->un
.bls_rsp
, LPFC_ABTS_UNSOL_INT
);
15197 bf_set(lpfc_abts_rxid
, &icmd
->un
.bls_rsp
, rxid
);
15198 bf_set(lpfc_abts_oxid
, &icmd
->un
.bls_rsp
, oxid
);
15200 /* Xmit CT abts response on exchange <xid> */
15201 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_ELS
,
15202 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
15203 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
, oxid
, phba
->link_state
);
15205 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, ctiocb
, 0);
15206 if (rc
== IOCB_ERROR
) {
15207 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_ELS
,
15208 "2925 Failed to issue CT ABTS RSP x%x on "
15209 "xri x%x, Data x%x\n",
15210 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
, oxid
,
15212 lpfc_nlp_put(ndlp
);
15213 ctiocb
->context1
= NULL
;
15214 lpfc_sli_release_iocbq(phba
, ctiocb
);
15219 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
15220 * @vport: Pointer to the vport on which this sequence was received
15221 * @dmabuf: pointer to a dmabuf that describes the FC sequence
15223 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
15224 * receive sequence is only partially assembed by the driver, it shall abort
15225 * the partially assembled frames for the sequence. Otherwise, if the
15226 * unsolicited receive sequence has been completely assembled and passed to
15227 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
15228 * unsolicited sequence has been aborted. After that, it will issue a basic
15229 * accept to accept the abort.
15232 lpfc_sli4_handle_unsol_abort(struct lpfc_vport
*vport
,
15233 struct hbq_dmabuf
*dmabuf
)
15235 struct lpfc_hba
*phba
= vport
->phba
;
15236 struct fc_frame_header fc_hdr
;
15240 /* Make a copy of fc_hdr before the dmabuf being released */
15241 memcpy(&fc_hdr
, dmabuf
->hbuf
.virt
, sizeof(struct fc_frame_header
));
15242 fctl
= sli4_fctl_from_fc_hdr(&fc_hdr
);
15244 if (fctl
& FC_FC_EX_CTX
) {
15245 /* ABTS by responder to exchange, no cleanup needed */
15248 /* ABTS by initiator to exchange, need to do cleanup */
15249 aborted
= lpfc_sli4_abort_partial_seq(vport
, dmabuf
);
15250 if (aborted
== false)
15251 aborted
= lpfc_sli4_abort_ulp_seq(vport
, dmabuf
);
15253 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
15255 /* Respond with BA_ACC or BA_RJT accordingly */
15256 lpfc_sli4_seq_abort_rsp(vport
, &fc_hdr
, aborted
);
15260 * lpfc_seq_complete - Indicates if a sequence is complete
15261 * @dmabuf: pointer to a dmabuf that describes the FC sequence
15263 * This function checks the sequence, starting with the frame described by
15264 * @dmabuf, to see if all the frames associated with this sequence are present.
15265 * the frames associated with this sequence are linked to the @dmabuf using the
15266 * dbuf list. This function looks for two major things. 1) That the first frame
15267 * has a sequence count of zero. 2) There is a frame with last frame of sequence
15268 * set. 3) That there are no holes in the sequence count. The function will
15269 * return 1 when the sequence is complete, otherwise it will return 0.
15272 lpfc_seq_complete(struct hbq_dmabuf
*dmabuf
)
15274 struct fc_frame_header
*hdr
;
15275 struct lpfc_dmabuf
*d_buf
;
15276 struct hbq_dmabuf
*seq_dmabuf
;
15280 hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
15281 /* make sure first fame of sequence has a sequence count of zero */
15282 if (hdr
->fh_seq_cnt
!= seq_count
)
15284 fctl
= (hdr
->fh_f_ctl
[0] << 16 |
15285 hdr
->fh_f_ctl
[1] << 8 |
15287 /* If last frame of sequence we can return success. */
15288 if (fctl
& FC_FC_END_SEQ
)
15290 list_for_each_entry(d_buf
, &dmabuf
->dbuf
.list
, list
) {
15291 seq_dmabuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
15292 hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
15293 /* If there is a hole in the sequence count then fail. */
15294 if (++seq_count
!= be16_to_cpu(hdr
->fh_seq_cnt
))
15296 fctl
= (hdr
->fh_f_ctl
[0] << 16 |
15297 hdr
->fh_f_ctl
[1] << 8 |
15299 /* If last frame of sequence we can return success. */
15300 if (fctl
& FC_FC_END_SEQ
)
15307 * lpfc_prep_seq - Prep sequence for ULP processing
15308 * @vport: Pointer to the vport on which this sequence was received
15309 * @dmabuf: pointer to a dmabuf that describes the FC sequence
15311 * This function takes a sequence, described by a list of frames, and creates
15312 * a list of iocbq structures to describe the sequence. This iocbq list will be
15313 * used to issue to the generic unsolicited sequence handler. This routine
15314 * returns a pointer to the first iocbq in the list. If the function is unable
15315 * to allocate an iocbq then it throw out the received frames that were not
15316 * able to be described and return a pointer to the first iocbq. If unable to
15317 * allocate any iocbqs (including the first) this function will return NULL.
15319 static struct lpfc_iocbq
*
15320 lpfc_prep_seq(struct lpfc_vport
*vport
, struct hbq_dmabuf
*seq_dmabuf
)
15322 struct hbq_dmabuf
*hbq_buf
;
15323 struct lpfc_dmabuf
*d_buf
, *n_buf
;
15324 struct lpfc_iocbq
*first_iocbq
, *iocbq
;
15325 struct fc_frame_header
*fc_hdr
;
15327 uint32_t len
, tot_len
;
15328 struct ulp_bde64
*pbde
;
15330 fc_hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
15331 /* remove from receive buffer list */
15332 list_del_init(&seq_dmabuf
->hbuf
.list
);
15333 lpfc_update_rcv_time_stamp(vport
);
15334 /* get the Remote Port's SID */
15335 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
15337 /* Get an iocbq struct to fill in. */
15338 first_iocbq
= lpfc_sli_get_iocbq(vport
->phba
);
15340 /* Initialize the first IOCB. */
15341 first_iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
= 0;
15342 first_iocbq
->iocb
.ulpStatus
= IOSTAT_SUCCESS
;
15344 /* Check FC Header to see what TYPE of frame we are rcv'ing */
15345 if (sli4_type_from_fc_hdr(fc_hdr
) == FC_TYPE_ELS
) {
15346 first_iocbq
->iocb
.ulpCommand
= CMD_IOCB_RCV_ELS64_CX
;
15347 first_iocbq
->iocb
.un
.rcvels
.parmRo
=
15348 sli4_did_from_fc_hdr(fc_hdr
);
15349 first_iocbq
->iocb
.ulpPU
= PARM_NPIV_DID
;
15351 first_iocbq
->iocb
.ulpCommand
= CMD_IOCB_RCV_SEQ64_CX
;
15352 first_iocbq
->iocb
.ulpContext
= NO_XRI
;
15353 first_iocbq
->iocb
.unsli3
.rcvsli3
.ox_id
=
15354 be16_to_cpu(fc_hdr
->fh_ox_id
);
15355 /* iocbq is prepped for internal consumption. Physical vpi. */
15356 first_iocbq
->iocb
.unsli3
.rcvsli3
.vpi
=
15357 vport
->phba
->vpi_ids
[vport
->vpi
];
15358 /* put the first buffer into the first IOCBq */
15359 tot_len
= bf_get(lpfc_rcqe_length
,
15360 &seq_dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
15362 first_iocbq
->context2
= &seq_dmabuf
->dbuf
;
15363 first_iocbq
->context3
= NULL
;
15364 first_iocbq
->iocb
.ulpBdeCount
= 1;
15365 if (tot_len
> LPFC_DATA_BUF_SIZE
)
15366 first_iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
=
15367 LPFC_DATA_BUF_SIZE
;
15369 first_iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
= tot_len
;
15371 first_iocbq
->iocb
.un
.rcvels
.remoteID
= sid
;
15373 first_iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
= tot_len
;
15375 iocbq
= first_iocbq
;
15377 * Each IOCBq can have two Buffers assigned, so go through the list
15378 * of buffers for this sequence and save two buffers in each IOCBq
15380 list_for_each_entry_safe(d_buf
, n_buf
, &seq_dmabuf
->dbuf
.list
, list
) {
15382 lpfc_in_buf_free(vport
->phba
, d_buf
);
15385 if (!iocbq
->context3
) {
15386 iocbq
->context3
= d_buf
;
15387 iocbq
->iocb
.ulpBdeCount
++;
15388 /* We need to get the size out of the right CQE */
15389 hbq_buf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
15390 len
= bf_get(lpfc_rcqe_length
,
15391 &hbq_buf
->cq_event
.cqe
.rcqe_cmpl
);
15392 pbde
= (struct ulp_bde64
*)
15393 &iocbq
->iocb
.unsli3
.sli3Words
[4];
15394 if (len
> LPFC_DATA_BUF_SIZE
)
15395 pbde
->tus
.f
.bdeSize
= LPFC_DATA_BUF_SIZE
;
15397 pbde
->tus
.f
.bdeSize
= len
;
15399 iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
+= len
;
15402 iocbq
= lpfc_sli_get_iocbq(vport
->phba
);
15405 first_iocbq
->iocb
.ulpStatus
=
15406 IOSTAT_FCP_RSP_ERROR
;
15407 first_iocbq
->iocb
.un
.ulpWord
[4] =
15408 IOERR_NO_RESOURCES
;
15410 lpfc_in_buf_free(vport
->phba
, d_buf
);
15413 /* We need to get the size out of the right CQE */
15414 hbq_buf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
15415 len
= bf_get(lpfc_rcqe_length
,
15416 &hbq_buf
->cq_event
.cqe
.rcqe_cmpl
);
15417 iocbq
->context2
= d_buf
;
15418 iocbq
->context3
= NULL
;
15419 iocbq
->iocb
.ulpBdeCount
= 1;
15420 if (len
> LPFC_DATA_BUF_SIZE
)
15421 iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
=
15422 LPFC_DATA_BUF_SIZE
;
15424 iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
= len
;
15427 iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
= tot_len
;
15429 iocbq
->iocb
.un
.rcvels
.remoteID
= sid
;
15430 list_add_tail(&iocbq
->list
, &first_iocbq
->list
);
15433 return first_iocbq
;
15437 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport
*vport
,
15438 struct hbq_dmabuf
*seq_dmabuf
)
15440 struct fc_frame_header
*fc_hdr
;
15441 struct lpfc_iocbq
*iocbq
, *curr_iocb
, *next_iocb
;
15442 struct lpfc_hba
*phba
= vport
->phba
;
15444 fc_hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
15445 iocbq
= lpfc_prep_seq(vport
, seq_dmabuf
);
15447 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15448 "2707 Ring %d handler: Failed to allocate "
15449 "iocb Rctl x%x Type x%x received\n",
15451 fc_hdr
->fh_r_ctl
, fc_hdr
->fh_type
);
15454 if (!lpfc_complete_unsol_iocb(phba
,
15455 &phba
->sli
.ring
[LPFC_ELS_RING
],
15456 iocbq
, fc_hdr
->fh_r_ctl
,
15458 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15459 "2540 Ring %d handler: unexpected Rctl "
15460 "x%x Type x%x received\n",
15462 fc_hdr
->fh_r_ctl
, fc_hdr
->fh_type
);
15464 /* Free iocb created in lpfc_prep_seq */
15465 list_for_each_entry_safe(curr_iocb
, next_iocb
,
15466 &iocbq
->list
, list
) {
15467 list_del_init(&curr_iocb
->list
);
15468 lpfc_sli_release_iocbq(phba
, curr_iocb
);
15470 lpfc_sli_release_iocbq(phba
, iocbq
);
15474 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
15475 * @phba: Pointer to HBA context object.
15477 * This function is called with no lock held. This function processes all
15478 * the received buffers and gives it to upper layers when a received buffer
15479 * indicates that it is the final frame in the sequence. The interrupt
15480 * service routine processes received buffers at interrupt contexts and adds
15481 * received dma buffers to the rb_pend_list queue and signals the worker thread.
15482 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
15483 * appropriate receive function when the final frame in a sequence is received.
15486 lpfc_sli4_handle_received_buffer(struct lpfc_hba
*phba
,
15487 struct hbq_dmabuf
*dmabuf
)
15489 struct hbq_dmabuf
*seq_dmabuf
;
15490 struct fc_frame_header
*fc_hdr
;
15491 struct lpfc_vport
*vport
;
15495 /* Process each received buffer */
15496 fc_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
15497 /* check to see if this a valid type of frame */
15498 if (lpfc_fc_frame_check(phba
, fc_hdr
)) {
15499 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
15502 if ((bf_get(lpfc_cqe_code
,
15503 &dmabuf
->cq_event
.cqe
.rcqe_cmpl
) == CQE_CODE_RECEIVE_V1
))
15504 fcfi
= bf_get(lpfc_rcqe_fcf_id_v1
,
15505 &dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
15507 fcfi
= bf_get(lpfc_rcqe_fcf_id
,
15508 &dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
15510 vport
= lpfc_fc_frame_to_vport(phba
, fc_hdr
, fcfi
);
15512 /* throw out the frame */
15513 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
15517 /* d_id this frame is directed to */
15518 did
= sli4_did_from_fc_hdr(fc_hdr
);
15520 /* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
15521 if (!(vport
->vpi_state
& LPFC_VPI_REGISTERED
) &&
15522 (did
!= Fabric_DID
)) {
15524 * Throw out the frame if we are not pt2pt.
15525 * The pt2pt protocol allows for discovery frames
15526 * to be received without a registered VPI.
15528 if (!(vport
->fc_flag
& FC_PT2PT
) ||
15529 (phba
->link_state
== LPFC_HBA_READY
)) {
15530 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
15535 /* Handle the basic abort sequence (BA_ABTS) event */
15536 if (fc_hdr
->fh_r_ctl
== FC_RCTL_BA_ABTS
) {
15537 lpfc_sli4_handle_unsol_abort(vport
, dmabuf
);
15541 /* Link this frame */
15542 seq_dmabuf
= lpfc_fc_frame_add(vport
, dmabuf
);
15544 /* unable to add frame to vport - throw it out */
15545 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
15548 /* If not last frame in sequence continue processing frames. */
15549 if (!lpfc_seq_complete(seq_dmabuf
))
15552 /* Send the complete sequence to the upper layer protocol */
15553 lpfc_sli4_send_seq_to_ulp(vport
, seq_dmabuf
);
15557 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
15558 * @phba: pointer to lpfc hba data structure.
15560 * This routine is invoked to post rpi header templates to the
15561 * HBA consistent with the SLI-4 interface spec. This routine
15562 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
15563 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
15565 * This routine does not require any locks. It's usage is expected
15566 * to be driver load or reset recovery when the driver is
15571 * -EIO - The mailbox failed to complete successfully.
15572 * When this error occurs, the driver is not guaranteed
15573 * to have any rpi regions posted to the device and
15574 * must either attempt to repost the regions or take a
15578 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba
*phba
)
15580 struct lpfc_rpi_hdr
*rpi_page
;
15584 /* SLI4 ports that support extents do not require RPI headers. */
15585 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
15587 if (phba
->sli4_hba
.extents_in_use
)
15590 list_for_each_entry(rpi_page
, &phba
->sli4_hba
.lpfc_rpi_hdr_list
, list
) {
15592 * Assign the rpi headers a physical rpi only if the driver
15593 * has not initialized those resources. A port reset only
15594 * needs the headers posted.
15596 if (bf_get(lpfc_rpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
) !=
15598 rpi_page
->start_rpi
= phba
->sli4_hba
.rpi_ids
[lrpi
];
15600 rc
= lpfc_sli4_post_rpi_hdr(phba
, rpi_page
);
15601 if (rc
!= MBX_SUCCESS
) {
15602 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15603 "2008 Error %d posting all rpi "
15611 bf_set(lpfc_rpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
,
15612 LPFC_RPI_RSRC_RDY
);
15617 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
15618 * @phba: pointer to lpfc hba data structure.
15619 * @rpi_page: pointer to the rpi memory region.
15621 * This routine is invoked to post a single rpi header to the
15622 * HBA consistent with the SLI-4 interface spec. This memory region
15623 * maps up to 64 rpi context regions.
15627 * -ENOMEM - No available memory
15628 * -EIO - The mailbox failed to complete successfully.
15631 lpfc_sli4_post_rpi_hdr(struct lpfc_hba
*phba
, struct lpfc_rpi_hdr
*rpi_page
)
15633 LPFC_MBOXQ_t
*mboxq
;
15634 struct lpfc_mbx_post_hdr_tmpl
*hdr_tmpl
;
15636 uint32_t shdr_status
, shdr_add_status
;
15637 union lpfc_sli4_cfg_shdr
*shdr
;
15639 /* SLI4 ports that support extents do not require RPI headers. */
15640 if (!phba
->sli4_hba
.rpi_hdrs_in_use
)
15642 if (phba
->sli4_hba
.extents_in_use
)
15645 /* The port is notified of the header region via a mailbox command. */
15646 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
15648 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15649 "2001 Unable to allocate memory for issuing "
15650 "SLI_CONFIG_SPECIAL mailbox command\n");
15654 /* Post all rpi memory regions to the port. */
15655 hdr_tmpl
= &mboxq
->u
.mqe
.un
.hdr_tmpl
;
15656 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
15657 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE
,
15658 sizeof(struct lpfc_mbx_post_hdr_tmpl
) -
15659 sizeof(struct lpfc_sli4_cfg_mhdr
),
15660 LPFC_SLI4_MBX_EMBED
);
15663 /* Post the physical rpi to the port for this rpi header. */
15664 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset
, hdr_tmpl
,
15665 rpi_page
->start_rpi
);
15666 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt
,
15667 hdr_tmpl
, rpi_page
->page_count
);
15669 hdr_tmpl
->rpi_paddr_lo
= putPaddrLow(rpi_page
->dmabuf
->phys
);
15670 hdr_tmpl
->rpi_paddr_hi
= putPaddrHigh(rpi_page
->dmabuf
->phys
);
15671 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
15672 shdr
= (union lpfc_sli4_cfg_shdr
*) &hdr_tmpl
->header
.cfg_shdr
;
15673 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
15674 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
15675 if (rc
!= MBX_TIMEOUT
)
15676 mempool_free(mboxq
, phba
->mbox_mem_pool
);
15677 if (shdr_status
|| shdr_add_status
|| rc
) {
15678 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15679 "2514 POST_RPI_HDR mailbox failed with "
15680 "status x%x add_status x%x, mbx status x%x\n",
15681 shdr_status
, shdr_add_status
, rc
);
15688 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
15689 * @phba: pointer to lpfc hba data structure.
15691 * This routine is invoked to post rpi header templates to the
15692 * HBA consistent with the SLI-4 interface spec. This routine
15693 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
15694 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
15697 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
15698 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
15701 lpfc_sli4_alloc_rpi(struct lpfc_hba
*phba
)
15704 uint16_t max_rpi
, rpi_limit
;
15705 uint16_t rpi_remaining
, lrpi
= 0;
15706 struct lpfc_rpi_hdr
*rpi_hdr
;
15707 unsigned long iflag
;
15710 * Fetch the next logical rpi. Because this index is logical,
15711 * the driver starts at 0 each time.
15713 spin_lock_irqsave(&phba
->hbalock
, iflag
);
15714 max_rpi
= phba
->sli4_hba
.max_cfg_param
.max_rpi
;
15715 rpi_limit
= phba
->sli4_hba
.next_rpi
;
15717 rpi
= find_next_zero_bit(phba
->sli4_hba
.rpi_bmask
, rpi_limit
, 0);
15718 if (rpi
>= rpi_limit
)
15719 rpi
= LPFC_RPI_ALLOC_ERROR
;
15721 set_bit(rpi
, phba
->sli4_hba
.rpi_bmask
);
15722 phba
->sli4_hba
.max_cfg_param
.rpi_used
++;
15723 phba
->sli4_hba
.rpi_count
++;
15725 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
15726 "0001 rpi:%x max:%x lim:%x\n",
15727 (int) rpi
, max_rpi
, rpi_limit
);
15730 * Don't try to allocate more rpi header regions if the device limit
15731 * has been exhausted.
15733 if ((rpi
== LPFC_RPI_ALLOC_ERROR
) &&
15734 (phba
->sli4_hba
.rpi_count
>= max_rpi
)) {
15735 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
15740 * RPI header postings are not required for SLI4 ports capable of
15743 if (!phba
->sli4_hba
.rpi_hdrs_in_use
) {
15744 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
15749 * If the driver is running low on rpi resources, allocate another
15750 * page now. Note that the next_rpi value is used because
15751 * it represents how many are actually in use whereas max_rpi notes
15752 * how many are supported max by the device.
15754 rpi_remaining
= phba
->sli4_hba
.next_rpi
- phba
->sli4_hba
.rpi_count
;
15755 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
15756 if (rpi_remaining
< LPFC_RPI_LOW_WATER_MARK
) {
15757 rpi_hdr
= lpfc_sli4_create_rpi_hdr(phba
);
15759 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15760 "2002 Error Could not grow rpi "
15763 lrpi
= rpi_hdr
->start_rpi
;
15764 rpi_hdr
->start_rpi
= phba
->sli4_hba
.rpi_ids
[lrpi
];
15765 lpfc_sli4_post_rpi_hdr(phba
, rpi_hdr
);
15773 * lpfc_sli4_free_rpi - Release an rpi for reuse.
15774 * @phba: pointer to lpfc hba data structure.
15776 * This routine is invoked to release an rpi to the pool of
15777 * available rpis maintained by the driver.
15780 __lpfc_sli4_free_rpi(struct lpfc_hba
*phba
, int rpi
)
15782 if (test_and_clear_bit(rpi
, phba
->sli4_hba
.rpi_bmask
)) {
15783 phba
->sli4_hba
.rpi_count
--;
15784 phba
->sli4_hba
.max_cfg_param
.rpi_used
--;
15789 * lpfc_sli4_free_rpi - Release an rpi for reuse.
15790 * @phba: pointer to lpfc hba data structure.
15792 * This routine is invoked to release an rpi to the pool of
15793 * available rpis maintained by the driver.
15796 lpfc_sli4_free_rpi(struct lpfc_hba
*phba
, int rpi
)
15798 spin_lock_irq(&phba
->hbalock
);
15799 __lpfc_sli4_free_rpi(phba
, rpi
);
15800 spin_unlock_irq(&phba
->hbalock
);
15804 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
15805 * @phba: pointer to lpfc hba data structure.
15807 * This routine is invoked to remove the memory region that
15808 * provided rpi via a bitmask.
15811 lpfc_sli4_remove_rpis(struct lpfc_hba
*phba
)
15813 kfree(phba
->sli4_hba
.rpi_bmask
);
15814 kfree(phba
->sli4_hba
.rpi_ids
);
15815 bf_set(lpfc_rpi_rsrc_rdy
, &phba
->sli4_hba
.sli4_flags
, 0);
15819 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
15820 * @phba: pointer to lpfc hba data structure.
15822 * This routine is invoked to remove the memory region that
15823 * provided rpi via a bitmask.
15826 lpfc_sli4_resume_rpi(struct lpfc_nodelist
*ndlp
,
15827 void (*cmpl
)(struct lpfc_hba
*, LPFC_MBOXQ_t
*), void *arg
)
15829 LPFC_MBOXQ_t
*mboxq
;
15830 struct lpfc_hba
*phba
= ndlp
->phba
;
15833 /* The port is notified of the header region via a mailbox command. */
15834 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
15838 /* Post all rpi memory regions to the port. */
15839 lpfc_resume_rpi(mboxq
, ndlp
);
15841 mboxq
->mbox_cmpl
= cmpl
;
15842 mboxq
->context1
= arg
;
15843 mboxq
->context2
= ndlp
;
15845 mboxq
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
15846 mboxq
->vport
= ndlp
->vport
;
15847 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
15848 if (rc
== MBX_NOT_FINISHED
) {
15849 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
15850 "2010 Resume RPI Mailbox failed "
15851 "status %d, mbxStatus x%x\n", rc
,
15852 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
));
15853 mempool_free(mboxq
, phba
->mbox_mem_pool
);
15860 * lpfc_sli4_init_vpi - Initialize a vpi with the port
15861 * @vport: Pointer to the vport for which the vpi is being initialized
15863 * This routine is invoked to activate a vpi with the port.
15867 * -Evalue otherwise
15870 lpfc_sli4_init_vpi(struct lpfc_vport
*vport
)
15872 LPFC_MBOXQ_t
*mboxq
;
15874 int retval
= MBX_SUCCESS
;
15876 struct lpfc_hba
*phba
= vport
->phba
;
15877 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
15880 lpfc_init_vpi(phba
, mboxq
, vport
->vpi
);
15881 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mboxq
);
15882 rc
= lpfc_sli_issue_mbox_wait(phba
, mboxq
, mbox_tmo
);
15883 if (rc
!= MBX_SUCCESS
) {
15884 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_SLI
,
15885 "2022 INIT VPI Mailbox failed "
15886 "status %d, mbxStatus x%x\n", rc
,
15887 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
));
15890 if (rc
!= MBX_TIMEOUT
)
15891 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
15897 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
15898 * @phba: pointer to lpfc hba data structure.
15899 * @mboxq: Pointer to mailbox object.
15901 * This routine is invoked to manually add a single FCF record. The caller
15902 * must pass a completely initialized FCF_Record. This routine takes
15903 * care of the nonembedded mailbox operations.
15906 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
15909 union lpfc_sli4_cfg_shdr
*shdr
;
15910 uint32_t shdr_status
, shdr_add_status
;
15912 virt_addr
= mboxq
->sge_array
->addr
[0];
15913 /* The IOCTL status is embedded in the mailbox subheader. */
15914 shdr
= (union lpfc_sli4_cfg_shdr
*) virt_addr
;
15915 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
15916 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
15918 if ((shdr_status
|| shdr_add_status
) &&
15919 (shdr_status
!= STATUS_FCF_IN_USE
))
15920 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15921 "2558 ADD_FCF_RECORD mailbox failed with "
15922 "status x%x add_status x%x\n",
15923 shdr_status
, shdr_add_status
);
15925 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
15929 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
15930 * @phba: pointer to lpfc hba data structure.
15931 * @fcf_record: pointer to the initialized fcf record to add.
15933 * This routine is invoked to manually add a single FCF record. The caller
15934 * must pass a completely initialized FCF_Record. This routine takes
15935 * care of the nonembedded mailbox operations.
15938 lpfc_sli4_add_fcf_record(struct lpfc_hba
*phba
, struct fcf_record
*fcf_record
)
15941 LPFC_MBOXQ_t
*mboxq
;
15944 struct lpfc_mbx_sge sge
;
15945 uint32_t alloc_len
, req_len
;
15948 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
15950 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15951 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
15955 req_len
= sizeof(struct fcf_record
) + sizeof(union lpfc_sli4_cfg_shdr
) +
15958 /* Allocate DMA memory and set up the non-embedded mailbox command */
15959 alloc_len
= lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
15960 LPFC_MBOX_OPCODE_FCOE_ADD_FCF
,
15961 req_len
, LPFC_SLI4_MBX_NEMBED
);
15962 if (alloc_len
< req_len
) {
15963 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15964 "2523 Allocated DMA memory size (x%x) is "
15965 "less than the requested DMA memory "
15966 "size (x%x)\n", alloc_len
, req_len
);
15967 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
15972 * Get the first SGE entry from the non-embedded DMA memory. This
15973 * routine only uses a single SGE.
15975 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
15976 virt_addr
= mboxq
->sge_array
->addr
[0];
15978 * Configure the FCF record for FCFI 0. This is the driver's
15979 * hardcoded default and gets used in nonFIP mode.
15981 fcfindex
= bf_get(lpfc_fcf_record_fcf_index
, fcf_record
);
15982 bytep
= virt_addr
+ sizeof(union lpfc_sli4_cfg_shdr
);
15983 lpfc_sli_pcimem_bcopy(&fcfindex
, bytep
, sizeof(uint32_t));
15986 * Copy the fcf_index and the FCF Record Data. The data starts after
15987 * the FCoE header plus word10. The data copy needs to be endian
15990 bytep
+= sizeof(uint32_t);
15991 lpfc_sli_pcimem_bcopy(fcf_record
, bytep
, sizeof(struct fcf_record
));
15992 mboxq
->vport
= phba
->pport
;
15993 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_add_fcf_record
;
15994 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
15995 if (rc
== MBX_NOT_FINISHED
) {
15996 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
15997 "2515 ADD_FCF_RECORD mailbox failed with "
15998 "status 0x%x\n", rc
);
15999 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
16008 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
16009 * @phba: pointer to lpfc hba data structure.
16010 * @fcf_record: pointer to the fcf record to write the default data.
16011 * @fcf_index: FCF table entry index.
16013 * This routine is invoked to build the driver's default FCF record. The
16014 * values used are hardcoded. This routine handles memory initialization.
16018 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba
*phba
,
16019 struct fcf_record
*fcf_record
,
16020 uint16_t fcf_index
)
16022 memset(fcf_record
, 0, sizeof(struct fcf_record
));
16023 fcf_record
->max_rcv_size
= LPFC_FCOE_MAX_RCV_SIZE
;
16024 fcf_record
->fka_adv_period
= LPFC_FCOE_FKA_ADV_PER
;
16025 fcf_record
->fip_priority
= LPFC_FCOE_FIP_PRIORITY
;
16026 bf_set(lpfc_fcf_record_mac_0
, fcf_record
, phba
->fc_map
[0]);
16027 bf_set(lpfc_fcf_record_mac_1
, fcf_record
, phba
->fc_map
[1]);
16028 bf_set(lpfc_fcf_record_mac_2
, fcf_record
, phba
->fc_map
[2]);
16029 bf_set(lpfc_fcf_record_mac_3
, fcf_record
, LPFC_FCOE_FCF_MAC3
);
16030 bf_set(lpfc_fcf_record_mac_4
, fcf_record
, LPFC_FCOE_FCF_MAC4
);
16031 bf_set(lpfc_fcf_record_mac_5
, fcf_record
, LPFC_FCOE_FCF_MAC5
);
16032 bf_set(lpfc_fcf_record_fc_map_0
, fcf_record
, phba
->fc_map
[0]);
16033 bf_set(lpfc_fcf_record_fc_map_1
, fcf_record
, phba
->fc_map
[1]);
16034 bf_set(lpfc_fcf_record_fc_map_2
, fcf_record
, phba
->fc_map
[2]);
16035 bf_set(lpfc_fcf_record_fcf_valid
, fcf_record
, 1);
16036 bf_set(lpfc_fcf_record_fcf_avail
, fcf_record
, 1);
16037 bf_set(lpfc_fcf_record_fcf_index
, fcf_record
, fcf_index
);
16038 bf_set(lpfc_fcf_record_mac_addr_prov
, fcf_record
,
16039 LPFC_FCF_FPMA
| LPFC_FCF_SPMA
);
16040 /* Set the VLAN bit map */
16041 if (phba
->valid_vlan
) {
16042 fcf_record
->vlan_bitmap
[phba
->vlan_id
/ 8]
16043 = 1 << (phba
->vlan_id
% 8);
16048 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
16049 * @phba: pointer to lpfc hba data structure.
16050 * @fcf_index: FCF table entry offset.
16052 * This routine is invoked to scan the entire FCF table by reading FCF
16053 * record and processing it one at a time starting from the @fcf_index
16054 * for initial FCF discovery or fast FCF failover rediscovery.
16056 * Return 0 if the mailbox command is submitted successfully, none 0
16060 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
16063 LPFC_MBOXQ_t
*mboxq
;
16065 phba
->fcoe_eventtag_at_fcf_scan
= phba
->fcoe_eventtag
;
16066 phba
->fcoe_cvl_eventtag_attn
= phba
->fcoe_cvl_eventtag
;
16067 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16069 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16070 "2000 Failed to allocate mbox for "
16073 goto fail_fcf_scan
;
16075 /* Construct the read FCF record mailbox command */
16076 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
16079 goto fail_fcf_scan
;
16081 /* Issue the mailbox command asynchronously */
16082 mboxq
->vport
= phba
->pport
;
16083 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_fcf_scan_read_fcf_rec
;
16085 spin_lock_irq(&phba
->hbalock
);
16086 phba
->hba_flag
|= FCF_TS_INPROG
;
16087 spin_unlock_irq(&phba
->hbalock
);
16089 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
16090 if (rc
== MBX_NOT_FINISHED
)
16093 /* Reset eligible FCF count for new scan */
16094 if (fcf_index
== LPFC_FCOE_FCF_GET_FIRST
)
16095 phba
->fcf
.eligible_fcf_cnt
= 0;
16101 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
16102 /* FCF scan failed, clear FCF_TS_INPROG flag */
16103 spin_lock_irq(&phba
->hbalock
);
16104 phba
->hba_flag
&= ~FCF_TS_INPROG
;
16105 spin_unlock_irq(&phba
->hbalock
);
16111 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
16112 * @phba: pointer to lpfc hba data structure.
16113 * @fcf_index: FCF table entry offset.
16115 * This routine is invoked to read an FCF record indicated by @fcf_index
16116 * and to use it for FLOGI roundrobin FCF failover.
16118 * Return 0 if the mailbox command is submitted successfully, none 0
16122 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
16125 LPFC_MBOXQ_t
*mboxq
;
16127 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16129 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_INIT
,
16130 "2763 Failed to allocate mbox for "
16133 goto fail_fcf_read
;
16135 /* Construct the read FCF record mailbox command */
16136 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
16139 goto fail_fcf_read
;
16141 /* Issue the mailbox command asynchronously */
16142 mboxq
->vport
= phba
->pport
;
16143 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_fcf_rr_read_fcf_rec
;
16144 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
16145 if (rc
== MBX_NOT_FINISHED
)
16151 if (error
&& mboxq
)
16152 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
16157 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
16158 * @phba: pointer to lpfc hba data structure.
16159 * @fcf_index: FCF table entry offset.
16161 * This routine is invoked to read an FCF record indicated by @fcf_index to
16162 * determine whether it's eligible for FLOGI roundrobin failover list.
16164 * Return 0 if the mailbox command is submitted successfully, none 0
16168 lpfc_sli4_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
16171 LPFC_MBOXQ_t
*mboxq
;
16173 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16175 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_INIT
,
16176 "2758 Failed to allocate mbox for "
16179 goto fail_fcf_read
;
16181 /* Construct the read FCF record mailbox command */
16182 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
16185 goto fail_fcf_read
;
16187 /* Issue the mailbox command asynchronously */
16188 mboxq
->vport
= phba
->pport
;
16189 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_read_fcf_rec
;
16190 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
16191 if (rc
== MBX_NOT_FINISHED
)
16197 if (error
&& mboxq
)
16198 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
16203 * lpfc_check_next_fcf_pri_level
16204 * phba pointer to the lpfc_hba struct for this port.
16205 * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
16206 * routine when the rr_bmask is empty. The FCF indecies are put into the
16207 * rr_bmask based on their priority level. Starting from the highest priority
16208 * to the lowest. The most likely FCF candidate will be in the highest
16209 * priority group. When this routine is called it searches the fcf_pri list for
16210 * next lowest priority group and repopulates the rr_bmask with only those
16213 * 1=success 0=failure
16216 lpfc_check_next_fcf_pri_level(struct lpfc_hba
*phba
)
16218 uint16_t next_fcf_pri
;
16219 uint16_t last_index
;
16220 struct lpfc_fcf_pri
*fcf_pri
;
16224 last_index
= find_first_bit(phba
->fcf
.fcf_rr_bmask
,
16225 LPFC_SLI4_FCF_TBL_INDX_MAX
);
16226 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
16227 "3060 Last IDX %d\n", last_index
);
16229 /* Verify the priority list has 2 or more entries */
16230 spin_lock_irq(&phba
->hbalock
);
16231 if (list_empty(&phba
->fcf
.fcf_pri_list
) ||
16232 list_is_singular(&phba
->fcf
.fcf_pri_list
)) {
16233 spin_unlock_irq(&phba
->hbalock
);
16234 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
16235 "3061 Last IDX %d\n", last_index
);
16236 return 0; /* Empty rr list */
16238 spin_unlock_irq(&phba
->hbalock
);
16242 * Clear the rr_bmask and set all of the bits that are at this
16245 memset(phba
->fcf
.fcf_rr_bmask
, 0,
16246 sizeof(*phba
->fcf
.fcf_rr_bmask
));
16247 spin_lock_irq(&phba
->hbalock
);
16248 list_for_each_entry(fcf_pri
, &phba
->fcf
.fcf_pri_list
, list
) {
16249 if (fcf_pri
->fcf_rec
.flag
& LPFC_FCF_FLOGI_FAILED
)
16252 * the 1st priority that has not FLOGI failed
16253 * will be the highest.
16256 next_fcf_pri
= fcf_pri
->fcf_rec
.priority
;
16257 spin_unlock_irq(&phba
->hbalock
);
16258 if (fcf_pri
->fcf_rec
.priority
== next_fcf_pri
) {
16259 rc
= lpfc_sli4_fcf_rr_index_set(phba
,
16260 fcf_pri
->fcf_rec
.fcf_index
);
16264 spin_lock_irq(&phba
->hbalock
);
16267 * if next_fcf_pri was not set above and the list is not empty then
16268 * we have failed flogis on all of them. So reset flogi failed
16269 * and start at the beginning.
16271 if (!next_fcf_pri
&& !list_empty(&phba
->fcf
.fcf_pri_list
)) {
16272 list_for_each_entry(fcf_pri
, &phba
->fcf
.fcf_pri_list
, list
) {
16273 fcf_pri
->fcf_rec
.flag
&= ~LPFC_FCF_FLOGI_FAILED
;
16275 * the 1st priority that has not FLOGI failed
16276 * will be the highest.
16279 next_fcf_pri
= fcf_pri
->fcf_rec
.priority
;
16280 spin_unlock_irq(&phba
->hbalock
);
16281 if (fcf_pri
->fcf_rec
.priority
== next_fcf_pri
) {
16282 rc
= lpfc_sli4_fcf_rr_index_set(phba
,
16283 fcf_pri
->fcf_rec
.fcf_index
);
16287 spin_lock_irq(&phba
->hbalock
);
16291 spin_unlock_irq(&phba
->hbalock
);
16296 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
16297 * @phba: pointer to lpfc hba data structure.
16299 * This routine is to get the next eligible FCF record index in a round
16300 * robin fashion. If the next eligible FCF record index equals to the
16301 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
16302 * shall be returned, otherwise, the next eligible FCF record's index
16303 * shall be returned.
16306 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba
*phba
)
16308 uint16_t next_fcf_index
;
16311 /* Search start from next bit of currently registered FCF index */
16312 next_fcf_index
= phba
->fcf
.current_rec
.fcf_indx
;
16315 /* Determine the next fcf index to check */
16316 next_fcf_index
= (next_fcf_index
+ 1) % LPFC_SLI4_FCF_TBL_INDX_MAX
;
16317 next_fcf_index
= find_next_bit(phba
->fcf
.fcf_rr_bmask
,
16318 LPFC_SLI4_FCF_TBL_INDX_MAX
,
16321 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
16322 if (next_fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
16324 * If we have wrapped then we need to clear the bits that
16325 * have been tested so that we can detect when we should
16326 * change the priority level.
16328 next_fcf_index
= find_next_bit(phba
->fcf
.fcf_rr_bmask
,
16329 LPFC_SLI4_FCF_TBL_INDX_MAX
, 0);
16333 /* Check roundrobin failover list empty condition */
16334 if (next_fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
||
16335 next_fcf_index
== phba
->fcf
.current_rec
.fcf_indx
) {
16337 * If next fcf index is not found check if there are lower
16338 * Priority level fcf's in the fcf_priority list.
16339 * Set up the rr_bmask with all of the avaiable fcf bits
16340 * at that level and continue the selection process.
16342 if (lpfc_check_next_fcf_pri_level(phba
))
16343 goto initial_priority
;
16344 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
16345 "2844 No roundrobin failover FCF available\n");
16346 if (next_fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
)
16347 return LPFC_FCOE_FCF_NEXT_NONE
;
16349 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
16350 "3063 Only FCF available idx %d, flag %x\n",
16352 phba
->fcf
.fcf_pri
[next_fcf_index
].fcf_rec
.flag
);
16353 return next_fcf_index
;
16357 if (next_fcf_index
< LPFC_SLI4_FCF_TBL_INDX_MAX
&&
16358 phba
->fcf
.fcf_pri
[next_fcf_index
].fcf_rec
.flag
&
16359 LPFC_FCF_FLOGI_FAILED
) {
16360 if (list_is_singular(&phba
->fcf
.fcf_pri_list
))
16361 return LPFC_FCOE_FCF_NEXT_NONE
;
16363 goto next_priority
;
16366 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
16367 "2845 Get next roundrobin failover FCF (x%x)\n",
16370 return next_fcf_index
;
16374 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
16375 * @phba: pointer to lpfc hba data structure.
16377 * This routine sets the FCF record index in to the eligible bmask for
16378 * roundrobin failover search. It checks to make sure that the index
16379 * does not go beyond the range of the driver allocated bmask dimension
16380 * before setting the bit.
16382 * Returns 0 if the index bit successfully set, otherwise, it returns
16386 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba
*phba
, uint16_t fcf_index
)
16388 if (fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
16389 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
16390 "2610 FCF (x%x) reached driver's book "
16391 "keeping dimension:x%x\n",
16392 fcf_index
, LPFC_SLI4_FCF_TBL_INDX_MAX
);
16395 /* Set the eligible FCF record index bmask */
16396 set_bit(fcf_index
, phba
->fcf
.fcf_rr_bmask
);
16398 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
16399 "2790 Set FCF (x%x) to roundrobin FCF failover "
16400 "bmask\n", fcf_index
);
16406 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
16407 * @phba: pointer to lpfc hba data structure.
16409 * This routine clears the FCF record index from the eligible bmask for
16410 * roundrobin failover search. It checks to make sure that the index
16411 * does not go beyond the range of the driver allocated bmask dimension
16412 * before clearing the bit.
16415 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba
*phba
, uint16_t fcf_index
)
16417 struct lpfc_fcf_pri
*fcf_pri
, *fcf_pri_next
;
16418 if (fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
16419 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
16420 "2762 FCF (x%x) reached driver's book "
16421 "keeping dimension:x%x\n",
16422 fcf_index
, LPFC_SLI4_FCF_TBL_INDX_MAX
);
16425 /* Clear the eligible FCF record index bmask */
16426 spin_lock_irq(&phba
->hbalock
);
16427 list_for_each_entry_safe(fcf_pri
, fcf_pri_next
, &phba
->fcf
.fcf_pri_list
,
16429 if (fcf_pri
->fcf_rec
.fcf_index
== fcf_index
) {
16430 list_del_init(&fcf_pri
->list
);
16434 spin_unlock_irq(&phba
->hbalock
);
16435 clear_bit(fcf_index
, phba
->fcf
.fcf_rr_bmask
);
16437 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
16438 "2791 Clear FCF (x%x) from roundrobin failover "
16439 "bmask\n", fcf_index
);
16443 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
16444 * @phba: pointer to lpfc hba data structure.
16446 * This routine is the completion routine for the rediscover FCF table mailbox
16447 * command. If the mailbox command returned failure, it will try to stop the
16448 * FCF rediscover wait timer.
16451 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mbox
)
16453 struct lpfc_mbx_redisc_fcf_tbl
*redisc_fcf
;
16454 uint32_t shdr_status
, shdr_add_status
;
16456 redisc_fcf
= &mbox
->u
.mqe
.un
.redisc_fcf_tbl
;
16458 shdr_status
= bf_get(lpfc_mbox_hdr_status
,
16459 &redisc_fcf
->header
.cfg_shdr
.response
);
16460 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
,
16461 &redisc_fcf
->header
.cfg_shdr
.response
);
16462 if (shdr_status
|| shdr_add_status
) {
16463 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
16464 "2746 Requesting for FCF rediscovery failed "
16465 "status x%x add_status x%x\n",
16466 shdr_status
, shdr_add_status
);
16467 if (phba
->fcf
.fcf_flag
& FCF_ACVL_DISC
) {
16468 spin_lock_irq(&phba
->hbalock
);
16469 phba
->fcf
.fcf_flag
&= ~FCF_ACVL_DISC
;
16470 spin_unlock_irq(&phba
->hbalock
);
16472 * CVL event triggered FCF rediscover request failed,
16473 * last resort to re-try current registered FCF entry.
16475 lpfc_retry_pport_discovery(phba
);
16477 spin_lock_irq(&phba
->hbalock
);
16478 phba
->fcf
.fcf_flag
&= ~FCF_DEAD_DISC
;
16479 spin_unlock_irq(&phba
->hbalock
);
16481 * DEAD FCF event triggered FCF rediscover request
16482 * failed, last resort to fail over as a link down
16483 * to FCF registration.
16485 lpfc_sli4_fcf_dead_failthrough(phba
);
16488 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
16489 "2775 Start FCF rediscover quiescent timer\n");
16491 * Start FCF rediscovery wait timer for pending FCF
16492 * before rescan FCF record table.
16494 lpfc_fcf_redisc_wait_start_timer(phba
);
16497 mempool_free(mbox
, phba
->mbox_mem_pool
);
16501 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
16502 * @phba: pointer to lpfc hba data structure.
16504 * This routine is invoked to request for rediscovery of the entire FCF table
16508 lpfc_sli4_redisc_fcf_table(struct lpfc_hba
*phba
)
16510 LPFC_MBOXQ_t
*mbox
;
16511 struct lpfc_mbx_redisc_fcf_tbl
*redisc_fcf
;
16514 /* Cancel retry delay timers to all vports before FCF rediscover */
16515 lpfc_cancel_all_vport_retry_delay_timer(phba
);
16517 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16519 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
16520 "2745 Failed to allocate mbox for "
16521 "requesting FCF rediscover.\n");
16525 length
= (sizeof(struct lpfc_mbx_redisc_fcf_tbl
) -
16526 sizeof(struct lpfc_sli4_cfg_mhdr
));
16527 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
16528 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF
,
16529 length
, LPFC_SLI4_MBX_EMBED
);
16531 redisc_fcf
= &mbox
->u
.mqe
.un
.redisc_fcf_tbl
;
16532 /* Set count to 0 for invalidating the entire FCF database */
16533 bf_set(lpfc_mbx_redisc_fcf_count
, redisc_fcf
, 0);
16535 /* Issue the mailbox command asynchronously */
16536 mbox
->vport
= phba
->pport
;
16537 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_redisc_fcf_table
;
16538 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
16540 if (rc
== MBX_NOT_FINISHED
) {
16541 mempool_free(mbox
, phba
->mbox_mem_pool
);
16548 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
16549 * @phba: pointer to lpfc hba data structure.
16551 * This function is the failover routine as a last resort to the FCF DEAD
16552 * event when driver failed to perform fast FCF failover.
16555 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba
*phba
)
16557 uint32_t link_state
;
16560 * Last resort as FCF DEAD event failover will treat this as
16561 * a link down, but save the link state because we don't want
16562 * it to be changed to Link Down unless it is already down.
16564 link_state
= phba
->link_state
;
16565 lpfc_linkdown(phba
);
16566 phba
->link_state
= link_state
;
16568 /* Unregister FCF if no devices connected to it */
16569 lpfc_unregister_unused_fcf(phba
);
16573 * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
16574 * @phba: pointer to lpfc hba data structure.
16575 * @rgn23_data: pointer to configure region 23 data.
16577 * This function gets SLI3 port configure region 23 data through memory dump
16578 * mailbox command. When it successfully retrieves data, the size of the data
16579 * will be returned, otherwise, 0 will be returned.
16582 lpfc_sli_get_config_region23(struct lpfc_hba
*phba
, char *rgn23_data
)
16584 LPFC_MBOXQ_t
*pmb
= NULL
;
16586 uint32_t offset
= 0;
16592 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16594 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16595 "2600 failed to allocate mailbox memory\n");
16601 lpfc_dump_mem(phba
, pmb
, offset
, DMP_REGION_23
);
16602 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
16604 if (rc
!= MBX_SUCCESS
) {
16605 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
16606 "2601 failed to read config "
16607 "region 23, rc 0x%x Status 0x%x\n",
16608 rc
, mb
->mbxStatus
);
16609 mb
->un
.varDmp
.word_cnt
= 0;
16612 * dump mem may return a zero when finished or we got a
16613 * mailbox error, either way we are done.
16615 if (mb
->un
.varDmp
.word_cnt
== 0)
16617 if (mb
->un
.varDmp
.word_cnt
> DMP_RGN23_SIZE
- offset
)
16618 mb
->un
.varDmp
.word_cnt
= DMP_RGN23_SIZE
- offset
;
16620 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
16621 rgn23_data
+ offset
,
16622 mb
->un
.varDmp
.word_cnt
);
16623 offset
+= mb
->un
.varDmp
.word_cnt
;
16624 } while (mb
->un
.varDmp
.word_cnt
&& offset
< DMP_RGN23_SIZE
);
16626 mempool_free(pmb
, phba
->mbox_mem_pool
);
16631 * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
16632 * @phba: pointer to lpfc hba data structure.
16633 * @rgn23_data: pointer to configure region 23 data.
16635 * This function gets SLI4 port configure region 23 data through memory dump
16636 * mailbox command. When it successfully retrieves data, the size of the data
16637 * will be returned, otherwise, 0 will be returned.
16640 lpfc_sli4_get_config_region23(struct lpfc_hba
*phba
, char *rgn23_data
)
16642 LPFC_MBOXQ_t
*mboxq
= NULL
;
16643 struct lpfc_dmabuf
*mp
= NULL
;
16644 struct lpfc_mqe
*mqe
;
16645 uint32_t data_length
= 0;
16651 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16653 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16654 "3105 failed to allocate mailbox memory\n");
16658 if (lpfc_sli4_dump_cfg_rg23(phba
, mboxq
))
16660 mqe
= &mboxq
->u
.mqe
;
16661 mp
= (struct lpfc_dmabuf
*) mboxq
->context1
;
16662 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
16665 data_length
= mqe
->un
.mb_words
[5];
16666 if (data_length
== 0)
16668 if (data_length
> DMP_RGN23_SIZE
) {
16672 lpfc_sli_pcimem_bcopy((char *)mp
->virt
, rgn23_data
, data_length
);
16674 mempool_free(mboxq
, phba
->mbox_mem_pool
);
16676 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
16679 return data_length
;
16683 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
16684 * @phba: pointer to lpfc hba data structure.
16686 * This function read region 23 and parse TLV for port status to
16687 * decide if the user disaled the port. If the TLV indicates the
16688 * port is disabled, the hba_flag is set accordingly.
16691 lpfc_sli_read_link_ste(struct lpfc_hba
*phba
)
16693 uint8_t *rgn23_data
= NULL
;
16694 uint32_t if_type
, data_size
, sub_tlv_len
, tlv_offset
;
16695 uint32_t offset
= 0;
16697 /* Get adapter Region 23 data */
16698 rgn23_data
= kzalloc(DMP_RGN23_SIZE
, GFP_KERNEL
);
16702 if (phba
->sli_rev
< LPFC_SLI_REV4
)
16703 data_size
= lpfc_sli_get_config_region23(phba
, rgn23_data
);
16705 if_type
= bf_get(lpfc_sli_intf_if_type
,
16706 &phba
->sli4_hba
.sli_intf
);
16707 if (if_type
== LPFC_SLI_INTF_IF_TYPE_0
)
16709 data_size
= lpfc_sli4_get_config_region23(phba
, rgn23_data
);
16715 /* Check the region signature first */
16716 if (memcmp(&rgn23_data
[offset
], LPFC_REGION23_SIGNATURE
, 4)) {
16717 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16718 "2619 Config region 23 has bad signature\n");
16723 /* Check the data structure version */
16724 if (rgn23_data
[offset
] != LPFC_REGION23_VERSION
) {
16725 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16726 "2620 Config region 23 has bad version\n");
16731 /* Parse TLV entries in the region */
16732 while (offset
< data_size
) {
16733 if (rgn23_data
[offset
] == LPFC_REGION23_LAST_REC
)
16736 * If the TLV is not driver specific TLV or driver id is
16737 * not linux driver id, skip the record.
16739 if ((rgn23_data
[offset
] != DRIVER_SPECIFIC_TYPE
) ||
16740 (rgn23_data
[offset
+ 2] != LINUX_DRIVER_ID
) ||
16741 (rgn23_data
[offset
+ 3] != 0)) {
16742 offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
16746 /* Driver found a driver specific TLV in the config region */
16747 sub_tlv_len
= rgn23_data
[offset
+ 1] * 4;
16752 * Search for configured port state sub-TLV.
16754 while ((offset
< data_size
) &&
16755 (tlv_offset
< sub_tlv_len
)) {
16756 if (rgn23_data
[offset
] == LPFC_REGION23_LAST_REC
) {
16761 if (rgn23_data
[offset
] != PORT_STE_TYPE
) {
16762 offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
16763 tlv_offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
16767 /* This HBA contains PORT_STE configured */
16768 if (!rgn23_data
[offset
+ 2])
16769 phba
->hba_flag
|= LINK_DISABLED
;
16781 * lpfc_wr_object - write an object to the firmware
16782 * @phba: HBA structure that indicates port to create a queue on.
16783 * @dmabuf_list: list of dmabufs to write to the port.
16784 * @size: the total byte value of the objects to write to the port.
16785 * @offset: the current offset to be used to start the transfer.
16787 * This routine will create a wr_object mailbox command to send to the port.
16788 * the mailbox command will be constructed using the dma buffers described in
16789 * @dmabuf_list to create a list of BDEs. This routine will fill in as many
16790 * BDEs that the imbedded mailbox can support. The @offset variable will be
16791 * used to indicate the starting offset of the transfer and will also return
16792 * the offset after the write object mailbox has completed. @size is used to
16793 * determine the end of the object and whether the eof bit should be set.
16795 * Return 0 is successful and offset will contain the the new offset to use
16796 * for the next write.
16797 * Return negative value for error cases.
16800 lpfc_wr_object(struct lpfc_hba
*phba
, struct list_head
*dmabuf_list
,
16801 uint32_t size
, uint32_t *offset
)
16803 struct lpfc_mbx_wr_object
*wr_object
;
16804 LPFC_MBOXQ_t
*mbox
;
16806 uint32_t shdr_status
, shdr_add_status
;
16808 union lpfc_sli4_cfg_shdr
*shdr
;
16809 struct lpfc_dmabuf
*dmabuf
;
16810 uint32_t written
= 0;
16812 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
16816 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
16817 LPFC_MBOX_OPCODE_WRITE_OBJECT
,
16818 sizeof(struct lpfc_mbx_wr_object
) -
16819 sizeof(struct lpfc_sli4_cfg_mhdr
), LPFC_SLI4_MBX_EMBED
);
16821 wr_object
= (struct lpfc_mbx_wr_object
*)&mbox
->u
.mqe
.un
.wr_object
;
16822 wr_object
->u
.request
.write_offset
= *offset
;
16823 sprintf((uint8_t *)wr_object
->u
.request
.object_name
, "/");
16824 wr_object
->u
.request
.object_name
[0] =
16825 cpu_to_le32(wr_object
->u
.request
.object_name
[0]);
16826 bf_set(lpfc_wr_object_eof
, &wr_object
->u
.request
, 0);
16827 list_for_each_entry(dmabuf
, dmabuf_list
, list
) {
16828 if (i
>= LPFC_MBX_WR_CONFIG_MAX_BDE
|| written
>= size
)
16830 wr_object
->u
.request
.bde
[i
].addrLow
= putPaddrLow(dmabuf
->phys
);
16831 wr_object
->u
.request
.bde
[i
].addrHigh
=
16832 putPaddrHigh(dmabuf
->phys
);
16833 if (written
+ SLI4_PAGE_SIZE
>= size
) {
16834 wr_object
->u
.request
.bde
[i
].tus
.f
.bdeSize
=
16836 written
+= (size
- written
);
16837 bf_set(lpfc_wr_object_eof
, &wr_object
->u
.request
, 1);
16839 wr_object
->u
.request
.bde
[i
].tus
.f
.bdeSize
=
16841 written
+= SLI4_PAGE_SIZE
;
16845 wr_object
->u
.request
.bde_count
= i
;
16846 bf_set(lpfc_wr_object_write_length
, &wr_object
->u
.request
, written
);
16847 if (!phba
->sli4_hba
.intr_enable
)
16848 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
16850 mbox_tmo
= lpfc_mbox_tmo_val(phba
, mbox
);
16851 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
16853 /* The IOCTL status is embedded in the mailbox subheader. */
16854 shdr
= (union lpfc_sli4_cfg_shdr
*) &wr_object
->header
.cfg_shdr
;
16855 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
16856 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
16857 if (rc
!= MBX_TIMEOUT
)
16858 mempool_free(mbox
, phba
->mbox_mem_pool
);
16859 if (shdr_status
|| shdr_add_status
|| rc
) {
16860 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
16861 "3025 Write Object mailbox failed with "
16862 "status x%x add_status x%x, mbx status x%x\n",
16863 shdr_status
, shdr_add_status
, rc
);
16866 *offset
+= wr_object
->u
.response
.actual_write_length
;
16871 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
16872 * @vport: pointer to vport data structure.
16874 * This function iterate through the mailboxq and clean up all REG_LOGIN
16875 * and REG_VPI mailbox commands associated with the vport. This function
16876 * is called when driver want to restart discovery of the vport due to
16877 * a Clear Virtual Link event.
16880 lpfc_cleanup_pending_mbox(struct lpfc_vport
*vport
)
16882 struct lpfc_hba
*phba
= vport
->phba
;
16883 LPFC_MBOXQ_t
*mb
, *nextmb
;
16884 struct lpfc_dmabuf
*mp
;
16885 struct lpfc_nodelist
*ndlp
;
16886 struct lpfc_nodelist
*act_mbx_ndlp
= NULL
;
16887 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
16888 LIST_HEAD(mbox_cmd_list
);
16889 uint8_t restart_loop
;
16891 /* Clean up internally queued mailbox commands with the vport */
16892 spin_lock_irq(&phba
->hbalock
);
16893 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
16894 if (mb
->vport
!= vport
)
16897 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) &&
16898 (mb
->u
.mb
.mbxCommand
!= MBX_REG_VPI
))
16901 list_del(&mb
->list
);
16902 list_add_tail(&mb
->list
, &mbox_cmd_list
);
16904 /* Clean up active mailbox command with the vport */
16905 mb
= phba
->sli
.mbox_active
;
16906 if (mb
&& (mb
->vport
== vport
)) {
16907 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) ||
16908 (mb
->u
.mb
.mbxCommand
== MBX_REG_VPI
))
16909 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
16910 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
16911 act_mbx_ndlp
= (struct lpfc_nodelist
*)mb
->context2
;
16912 /* Put reference count for delayed processing */
16913 act_mbx_ndlp
= lpfc_nlp_get(act_mbx_ndlp
);
16914 /* Unregister the RPI when mailbox complete */
16915 mb
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
16918 /* Cleanup any mailbox completions which are not yet processed */
16921 list_for_each_entry(mb
, &phba
->sli
.mboxq_cmpl
, list
) {
16923 * If this mailox is already processed or it is
16924 * for another vport ignore it.
16926 if ((mb
->vport
!= vport
) ||
16927 (mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
))
16930 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) &&
16931 (mb
->u
.mb
.mbxCommand
!= MBX_REG_VPI
))
16934 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
16935 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
16936 ndlp
= (struct lpfc_nodelist
*)mb
->context2
;
16937 /* Unregister the RPI when mailbox complete */
16938 mb
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
16940 spin_unlock_irq(&phba
->hbalock
);
16941 spin_lock(shost
->host_lock
);
16942 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
16943 spin_unlock(shost
->host_lock
);
16944 spin_lock_irq(&phba
->hbalock
);
16948 } while (restart_loop
);
16950 spin_unlock_irq(&phba
->hbalock
);
16952 /* Release the cleaned-up mailbox commands */
16953 while (!list_empty(&mbox_cmd_list
)) {
16954 list_remove_head(&mbox_cmd_list
, mb
, LPFC_MBOXQ_t
, list
);
16955 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
16956 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
16958 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
16961 ndlp
= (struct lpfc_nodelist
*) mb
->context2
;
16962 mb
->context2
= NULL
;
16964 spin_lock(shost
->host_lock
);
16965 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
16966 spin_unlock(shost
->host_lock
);
16967 lpfc_nlp_put(ndlp
);
16970 mempool_free(mb
, phba
->mbox_mem_pool
);
16973 /* Release the ndlp with the cleaned-up active mailbox command */
16974 if (act_mbx_ndlp
) {
16975 spin_lock(shost
->host_lock
);
16976 act_mbx_ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
16977 spin_unlock(shost
->host_lock
);
16978 lpfc_nlp_put(act_mbx_ndlp
);
16983 * lpfc_drain_txq - Drain the txq
16984 * @phba: Pointer to HBA context object.
16986 * This function attempt to submit IOCBs on the txq
16987 * to the adapter. For SLI4 adapters, the txq contains
16988 * ELS IOCBs that have been deferred because the there
16989 * are no SGLs. This congestion can occur with large
16990 * vport counts during node discovery.
16994 lpfc_drain_txq(struct lpfc_hba
*phba
)
16996 LIST_HEAD(completions
);
16997 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
16998 struct lpfc_iocbq
*piocbq
= NULL
;
16999 unsigned long iflags
= 0;
17000 char *fail_msg
= NULL
;
17001 struct lpfc_sglq
*sglq
;
17002 union lpfc_wqe wqe
;
17003 uint32_t txq_cnt
= 0;
17005 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
17006 list_for_each_entry(piocbq
, &pring
->txq
, list
) {
17010 if (txq_cnt
> pring
->txq_max
)
17011 pring
->txq_max
= txq_cnt
;
17013 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
17015 while (!list_empty(&pring
->txq
)) {
17016 spin_lock_irqsave(&pring
->ring_lock
, iflags
);
17018 piocbq
= lpfc_sli_ringtx_get(phba
, pring
);
17020 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
17021 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
17022 "2823 txq empty and txq_cnt is %d\n ",
17026 sglq
= __lpfc_sli_get_sglq(phba
, piocbq
);
17028 __lpfc_sli_ringtx_put(phba
, pring
, piocbq
);
17029 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
17034 /* The xri and iocb resources secured,
17035 * attempt to issue request
17037 piocbq
->sli4_lxritag
= sglq
->sli4_lxritag
;
17038 piocbq
->sli4_xritag
= sglq
->sli4_xritag
;
17039 if (NO_XRI
== lpfc_sli4_bpl2sgl(phba
, piocbq
, sglq
))
17040 fail_msg
= "to convert bpl to sgl";
17041 else if (lpfc_sli4_iocb2wqe(phba
, piocbq
, &wqe
))
17042 fail_msg
= "to convert iocb to wqe";
17043 else if (lpfc_sli4_wq_put(phba
->sli4_hba
.els_wq
, &wqe
))
17044 fail_msg
= " - Wq is full";
17046 lpfc_sli_ringtxcmpl_put(phba
, pring
, piocbq
);
17049 /* Failed means we can't issue and need to cancel */
17050 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
17051 "2822 IOCB failed %s iotag 0x%x "
17054 piocbq
->iotag
, piocbq
->sli4_xritag
);
17055 list_add_tail(&piocbq
->list
, &completions
);
17057 spin_unlock_irqrestore(&pring
->ring_lock
, iflags
);
17060 /* Cancel all the IOCBs that cannot be issued */
17061 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
17062 IOERR_SLI_ABORTED
);