1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h>
33 #include <scsi/fc/fc_fs.h>
34 #include <linux/aer.h>
39 #include "lpfc_sli4.h"
41 #include "lpfc_disc.h"
42 #include "lpfc_scsi.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_logmsg.h"
46 #include "lpfc_compat.h"
47 #include "lpfc_debugfs.h"
48 #include "lpfc_vport.h"
50 /* There are only four IOCB completion types. */
51 typedef enum _lpfc_iocb_type
{
59 /* Provide function prototypes local to this module. */
60 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba
*, LPFC_MBOXQ_t
*,
62 static int lpfc_sli4_read_rev(struct lpfc_hba
*, LPFC_MBOXQ_t
*,
63 uint8_t *, uint32_t *);
64 static struct lpfc_iocbq
*lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba
*,
66 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport
*,
69 lpfc_get_iocb_from_iocbq(struct lpfc_iocbq
*iocbq
)
75 * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
76 * @q: The Work Queue to operate on.
77 * @wqe: The work Queue Entry to put on the Work queue.
79 * This routine will copy the contents of @wqe to the next available entry on
80 * the @q. This function will then ring the Work Queue Doorbell to signal the
81 * HBA to start processing the Work Queue Entry. This function returns 0 if
82 * successful. If no entries are available on @q then this function will return
84 * The caller is expected to hold the hbalock when calling this routine.
87 lpfc_sli4_wq_put(struct lpfc_queue
*q
, union lpfc_wqe
*wqe
)
89 union lpfc_wqe
*temp_wqe
= q
->qe
[q
->host_index
].wqe
;
90 struct lpfc_register doorbell
;
93 /* If the host has not yet processed the next entry then we are done */
94 if (((q
->host_index
+ 1) % q
->entry_count
) == q
->hba_index
)
96 /* set consumption flag every once in a while */
97 if (!((q
->host_index
+ 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL
))
98 bf_set(wqe_wqec
, &wqe
->generic
.wqe_com
, 1);
99 if (q
->phba
->sli3_options
& LPFC_SLI4_PHWQ_ENABLED
)
100 bf_set(wqe_wqid
, &wqe
->generic
.wqe_com
, q
->queue_id
);
101 lpfc_sli_pcimem_bcopy(wqe
, temp_wqe
, q
->entry_size
);
103 /* Update the host index before invoking device */
104 host_index
= q
->host_index
;
105 q
->host_index
= ((q
->host_index
+ 1) % q
->entry_count
);
109 bf_set(lpfc_wq_doorbell_num_posted
, &doorbell
, 1);
110 bf_set(lpfc_wq_doorbell_index
, &doorbell
, host_index
);
111 bf_set(lpfc_wq_doorbell_id
, &doorbell
, q
->queue_id
);
112 writel(doorbell
.word0
, q
->phba
->sli4_hba
.WQDBregaddr
);
113 readl(q
->phba
->sli4_hba
.WQDBregaddr
); /* Flush */
119 * lpfc_sli4_wq_release - Updates internal hba index for WQ
120 * @q: The Work Queue to operate on.
121 * @index: The index to advance the hba index to.
123 * This routine will update the HBA index of a queue to reflect consumption of
124 * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
125 * an entry the host calls this function to update the queue's internal
126 * pointers. This routine returns the number of entries that were consumed by
130 lpfc_sli4_wq_release(struct lpfc_queue
*q
, uint32_t index
)
132 uint32_t released
= 0;
134 if (q
->hba_index
== index
)
137 q
->hba_index
= ((q
->hba_index
+ 1) % q
->entry_count
);
139 } while (q
->hba_index
!= index
);
144 * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
145 * @q: The Mailbox Queue to operate on.
146 * @wqe: The Mailbox Queue Entry to put on the Work queue.
148 * This routine will copy the contents of @mqe to the next available entry on
149 * the @q. This function will then ring the Work Queue Doorbell to signal the
150 * HBA to start processing the Work Queue Entry. This function returns 0 if
151 * successful. If no entries are available on @q then this function will return
153 * The caller is expected to hold the hbalock when calling this routine.
156 lpfc_sli4_mq_put(struct lpfc_queue
*q
, struct lpfc_mqe
*mqe
)
158 struct lpfc_mqe
*temp_mqe
= q
->qe
[q
->host_index
].mqe
;
159 struct lpfc_register doorbell
;
162 /* If the host has not yet processed the next entry then we are done */
163 if (((q
->host_index
+ 1) % q
->entry_count
) == q
->hba_index
)
165 lpfc_sli_pcimem_bcopy(mqe
, temp_mqe
, q
->entry_size
);
166 /* Save off the mailbox pointer for completion */
167 q
->phba
->mbox
= (MAILBOX_t
*)temp_mqe
;
169 /* Update the host index before invoking device */
170 host_index
= q
->host_index
;
171 q
->host_index
= ((q
->host_index
+ 1) % q
->entry_count
);
175 bf_set(lpfc_mq_doorbell_num_posted
, &doorbell
, 1);
176 bf_set(lpfc_mq_doorbell_id
, &doorbell
, q
->queue_id
);
177 writel(doorbell
.word0
, q
->phba
->sli4_hba
.MQDBregaddr
);
178 readl(q
->phba
->sli4_hba
.MQDBregaddr
); /* Flush */
183 * lpfc_sli4_mq_release - Updates internal hba index for MQ
184 * @q: The Mailbox Queue to operate on.
186 * This routine will update the HBA index of a queue to reflect consumption of
187 * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
188 * an entry the host calls this function to update the queue's internal
189 * pointers. This routine returns the number of entries that were consumed by
193 lpfc_sli4_mq_release(struct lpfc_queue
*q
)
195 /* Clear the mailbox pointer for completion */
196 q
->phba
->mbox
= NULL
;
197 q
->hba_index
= ((q
->hba_index
+ 1) % q
->entry_count
);
202 * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
203 * @q: The Event Queue to get the first valid EQE from
205 * This routine will get the first valid Event Queue Entry from @q, update
206 * the queue's internal hba index, and return the EQE. If no valid EQEs are in
207 * the Queue (no more work to do), or the Queue is full of EQEs that have been
208 * processed, but not popped back to the HBA then this routine will return NULL.
210 static struct lpfc_eqe
*
211 lpfc_sli4_eq_get(struct lpfc_queue
*q
)
213 struct lpfc_eqe
*eqe
= q
->qe
[q
->hba_index
].eqe
;
215 /* If the next EQE is not valid then we are done */
216 if (!bf_get_le32(lpfc_eqe_valid
, eqe
))
218 /* If the host has not yet processed the next entry then we are done */
219 if (((q
->hba_index
+ 1) % q
->entry_count
) == q
->host_index
)
222 q
->hba_index
= ((q
->hba_index
+ 1) % q
->entry_count
);
227 * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
228 * @q: The Event Queue that the host has completed processing for.
229 * @arm: Indicates whether the host wants to arms this CQ.
231 * This routine will mark all Event Queue Entries on @q, from the last
232 * known completed entry to the last entry that was processed, as completed
233 * by clearing the valid bit for each completion queue entry. Then it will
234 * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
235 * The internal host index in the @q will be updated by this routine to indicate
236 * that the host has finished processing the entries. The @arm parameter
237 * indicates that the queue should be rearmed when ringing the doorbell.
239 * This function will return the number of EQEs that were popped.
242 lpfc_sli4_eq_release(struct lpfc_queue
*q
, bool arm
)
244 uint32_t released
= 0;
245 struct lpfc_eqe
*temp_eqe
;
246 struct lpfc_register doorbell
;
248 /* while there are valid entries */
249 while (q
->hba_index
!= q
->host_index
) {
250 temp_eqe
= q
->qe
[q
->host_index
].eqe
;
251 bf_set_le32(lpfc_eqe_valid
, temp_eqe
, 0);
253 q
->host_index
= ((q
->host_index
+ 1) % q
->entry_count
);
255 if (unlikely(released
== 0 && !arm
))
258 /* ring doorbell for number popped */
261 bf_set(lpfc_eqcq_doorbell_arm
, &doorbell
, 1);
262 bf_set(lpfc_eqcq_doorbell_eqci
, &doorbell
, 1);
264 bf_set(lpfc_eqcq_doorbell_num_released
, &doorbell
, released
);
265 bf_set(lpfc_eqcq_doorbell_qt
, &doorbell
, LPFC_QUEUE_TYPE_EVENT
);
266 bf_set(lpfc_eqcq_doorbell_eqid
, &doorbell
, q
->queue_id
);
267 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQCQDBregaddr
);
268 /* PCI read to flush PCI pipeline on re-arming for INTx mode */
269 if ((q
->phba
->intr_type
== INTx
) && (arm
== LPFC_QUEUE_REARM
))
270 readl(q
->phba
->sli4_hba
.EQCQDBregaddr
);
275 * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
276 * @q: The Completion Queue to get the first valid CQE from
278 * This routine will get the first valid Completion Queue Entry from @q, update
279 * the queue's internal hba index, and return the CQE. If no valid CQEs are in
280 * the Queue (no more work to do), or the Queue is full of CQEs that have been
281 * processed, but not popped back to the HBA then this routine will return NULL.
283 static struct lpfc_cqe
*
284 lpfc_sli4_cq_get(struct lpfc_queue
*q
)
286 struct lpfc_cqe
*cqe
;
288 /* If the next CQE is not valid then we are done */
289 if (!bf_get_le32(lpfc_cqe_valid
, q
->qe
[q
->hba_index
].cqe
))
291 /* If the host has not yet processed the next entry then we are done */
292 if (((q
->hba_index
+ 1) % q
->entry_count
) == q
->host_index
)
295 cqe
= q
->qe
[q
->hba_index
].cqe
;
296 q
->hba_index
= ((q
->hba_index
+ 1) % q
->entry_count
);
301 * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
302 * @q: The Completion Queue that the host has completed processing for.
303 * @arm: Indicates whether the host wants to arms this CQ.
305 * This routine will mark all Completion queue entries on @q, from the last
306 * known completed entry to the last entry that was processed, as completed
307 * by clearing the valid bit for each completion queue entry. Then it will
308 * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
309 * The internal host index in the @q will be updated by this routine to indicate
310 * that the host has finished processing the entries. The @arm parameter
311 * indicates that the queue should be rearmed when ringing the doorbell.
313 * This function will return the number of CQEs that were released.
316 lpfc_sli4_cq_release(struct lpfc_queue
*q
, bool arm
)
318 uint32_t released
= 0;
319 struct lpfc_cqe
*temp_qe
;
320 struct lpfc_register doorbell
;
322 /* while there are valid entries */
323 while (q
->hba_index
!= q
->host_index
) {
324 temp_qe
= q
->qe
[q
->host_index
].cqe
;
325 bf_set_le32(lpfc_cqe_valid
, temp_qe
, 0);
327 q
->host_index
= ((q
->host_index
+ 1) % q
->entry_count
);
329 if (unlikely(released
== 0 && !arm
))
332 /* ring doorbell for number popped */
335 bf_set(lpfc_eqcq_doorbell_arm
, &doorbell
, 1);
336 bf_set(lpfc_eqcq_doorbell_num_released
, &doorbell
, released
);
337 bf_set(lpfc_eqcq_doorbell_qt
, &doorbell
, LPFC_QUEUE_TYPE_COMPLETION
);
338 bf_set(lpfc_eqcq_doorbell_cqid
, &doorbell
, q
->queue_id
);
339 writel(doorbell
.word0
, q
->phba
->sli4_hba
.EQCQDBregaddr
);
344 * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
345 * @q: The Header Receive Queue to operate on.
346 * @wqe: The Receive Queue Entry to put on the Receive queue.
348 * This routine will copy the contents of @wqe to the next available entry on
349 * the @q. This function will then ring the Receive Queue Doorbell to signal the
350 * HBA to start processing the Receive Queue Entry. This function returns the
351 * index that the rqe was copied to if successful. If no entries are available
352 * on @q then this function will return -ENOMEM.
353 * The caller is expected to hold the hbalock when calling this routine.
356 lpfc_sli4_rq_put(struct lpfc_queue
*hq
, struct lpfc_queue
*dq
,
357 struct lpfc_rqe
*hrqe
, struct lpfc_rqe
*drqe
)
359 struct lpfc_rqe
*temp_hrqe
= hq
->qe
[hq
->host_index
].rqe
;
360 struct lpfc_rqe
*temp_drqe
= dq
->qe
[dq
->host_index
].rqe
;
361 struct lpfc_register doorbell
;
362 int put_index
= hq
->host_index
;
364 if (hq
->type
!= LPFC_HRQ
|| dq
->type
!= LPFC_DRQ
)
366 if (hq
->host_index
!= dq
->host_index
)
368 /* If the host has not yet processed the next entry then we are done */
369 if (((hq
->host_index
+ 1) % hq
->entry_count
) == hq
->hba_index
)
371 lpfc_sli_pcimem_bcopy(hrqe
, temp_hrqe
, hq
->entry_size
);
372 lpfc_sli_pcimem_bcopy(drqe
, temp_drqe
, dq
->entry_size
);
374 /* Update the host index to point to the next slot */
375 hq
->host_index
= ((hq
->host_index
+ 1) % hq
->entry_count
);
376 dq
->host_index
= ((dq
->host_index
+ 1) % dq
->entry_count
);
378 /* Ring The Header Receive Queue Doorbell */
379 if (!(hq
->host_index
% LPFC_RQ_POST_BATCH
)) {
381 bf_set(lpfc_rq_doorbell_num_posted
, &doorbell
,
383 bf_set(lpfc_rq_doorbell_id
, &doorbell
, hq
->queue_id
);
384 writel(doorbell
.word0
, hq
->phba
->sli4_hba
.RQDBregaddr
);
390 * lpfc_sli4_rq_release - Updates internal hba index for RQ
391 * @q: The Header Receive Queue to operate on.
393 * This routine will update the HBA index of a queue to reflect consumption of
394 * one Receive Queue Entry by the HBA. When the HBA indicates that it has
395 * consumed an entry the host calls this function to update the queue's
396 * internal pointers. This routine returns the number of entries that were
397 * consumed by the HBA.
400 lpfc_sli4_rq_release(struct lpfc_queue
*hq
, struct lpfc_queue
*dq
)
402 if ((hq
->type
!= LPFC_HRQ
) || (dq
->type
!= LPFC_DRQ
))
404 hq
->hba_index
= ((hq
->hba_index
+ 1) % hq
->entry_count
);
405 dq
->hba_index
= ((dq
->hba_index
+ 1) % dq
->entry_count
);
410 * lpfc_cmd_iocb - Get next command iocb entry in the ring
411 * @phba: Pointer to HBA context object.
412 * @pring: Pointer to driver SLI ring object.
414 * This function returns pointer to next command iocb entry
415 * in the command ring. The caller must hold hbalock to prevent
416 * other threads consume the next command iocb.
417 * SLI-2/SLI-3 provide different sized iocbs.
419 static inline IOCB_t
*
420 lpfc_cmd_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
422 return (IOCB_t
*) (((char *) pring
->cmdringaddr
) +
423 pring
->cmdidx
* phba
->iocb_cmd_size
);
427 * lpfc_resp_iocb - Get next response iocb entry in the ring
428 * @phba: Pointer to HBA context object.
429 * @pring: Pointer to driver SLI ring object.
431 * This function returns pointer to next response iocb entry
432 * in the response ring. The caller must hold hbalock to make sure
433 * that no other thread consume the next response iocb.
434 * SLI-2/SLI-3 provide different sized iocbs.
436 static inline IOCB_t
*
437 lpfc_resp_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
439 return (IOCB_t
*) (((char *) pring
->rspringaddr
) +
440 pring
->rspidx
* phba
->iocb_rsp_size
);
444 * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
445 * @phba: Pointer to HBA context object.
447 * This function is called with hbalock held. This function
448 * allocates a new driver iocb object from the iocb pool. If the
449 * allocation is successful, it returns pointer to the newly
450 * allocated iocb object else it returns NULL.
452 static struct lpfc_iocbq
*
453 __lpfc_sli_get_iocbq(struct lpfc_hba
*phba
)
455 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
456 struct lpfc_iocbq
* iocbq
= NULL
;
458 list_remove_head(lpfc_iocb_list
, iocbq
, struct lpfc_iocbq
, list
);
462 if (phba
->iocb_cnt
> phba
->iocb_max
)
463 phba
->iocb_max
= phba
->iocb_cnt
;
468 * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
469 * @phba: Pointer to HBA context object.
470 * @xritag: XRI value.
472 * This function clears the sglq pointer from the array of acive
473 * sglq's. The xritag that is passed in is used to index into the
474 * array. Before the xritag can be used it needs to be adjusted
475 * by subtracting the xribase.
477 * Returns sglq ponter = success, NULL = Failure.
479 static struct lpfc_sglq
*
480 __lpfc_clear_active_sglq(struct lpfc_hba
*phba
, uint16_t xritag
)
483 struct lpfc_sglq
*sglq
;
484 adj_xri
= xritag
- phba
->sli4_hba
.max_cfg_param
.xri_base
;
485 if (adj_xri
> phba
->sli4_hba
.max_cfg_param
.max_xri
)
487 sglq
= phba
->sli4_hba
.lpfc_sglq_active_list
[adj_xri
];
488 phba
->sli4_hba
.lpfc_sglq_active_list
[adj_xri
] = NULL
;
493 * __lpfc_get_active_sglq - Get the active sglq for this XRI.
494 * @phba: Pointer to HBA context object.
495 * @xritag: XRI value.
497 * This function returns the sglq pointer from the array of acive
498 * sglq's. The xritag that is passed in is used to index into the
499 * array. Before the xritag can be used it needs to be adjusted
500 * by subtracting the xribase.
502 * Returns sglq ponter = success, NULL = Failure.
505 __lpfc_get_active_sglq(struct lpfc_hba
*phba
, uint16_t xritag
)
508 struct lpfc_sglq
*sglq
;
509 adj_xri
= xritag
- phba
->sli4_hba
.max_cfg_param
.xri_base
;
510 if (adj_xri
> phba
->sli4_hba
.max_cfg_param
.max_xri
)
512 sglq
= phba
->sli4_hba
.lpfc_sglq_active_list
[adj_xri
];
517 * __lpfc_set_rrq_active - set RRQ active bit in the ndlp's xri_bitmap.
518 * @phba: Pointer to HBA context object.
519 * @ndlp: nodelist pointer for this target.
520 * @xritag: xri used in this exchange.
521 * @rxid: Remote Exchange ID.
522 * @send_rrq: Flag used to determine if we should send rrq els cmd.
524 * This function is called with hbalock held.
525 * The active bit is set in the ndlp's active rrq xri_bitmap. Allocates an
526 * rrq struct and adds it to the active_rrq_list.
528 * returns 0 for rrq slot for this xri
529 * < 0 Were not able to get rrq mem or invalid parameter.
532 __lpfc_set_rrq_active(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
,
533 uint16_t xritag
, uint16_t rxid
, uint16_t send_rrq
)
536 struct lpfc_node_rrq
*rrq
;
544 if (!phba
->cfg_enable_rrq
)
547 if (phba
->pport
->load_flag
& FC_UNLOADING
) {
548 phba
->hba_flag
&= ~HBA_RRQ_ACTIVE
;
554 * set the active bit even if there is no mem available.
556 adj_xri
= xritag
- phba
->sli4_hba
.max_cfg_param
.xri_base
;
558 if (NLP_CHK_FREE_REQ(ndlp
))
561 if (ndlp
->vport
&& (ndlp
->vport
->load_flag
& FC_UNLOADING
))
564 if (test_and_set_bit(adj_xri
, ndlp
->active_rrqs
.xri_bitmap
))
567 rrq
= mempool_alloc(phba
->rrq_pool
, GFP_KERNEL
);
569 rrq
->send_rrq
= send_rrq
;
570 rrq
->xritag
= xritag
;
571 rrq
->rrq_stop_time
= jiffies
+ HZ
* (phba
->fc_ratov
+ 1);
573 rrq
->nlp_DID
= ndlp
->nlp_DID
;
574 rrq
->vport
= ndlp
->vport
;
576 empty
= list_empty(&phba
->active_rrq_list
);
577 rrq
->send_rrq
= send_rrq
;
578 list_add_tail(&rrq
->list
, &phba
->active_rrq_list
);
579 if (!(phba
->hba_flag
& HBA_RRQ_ACTIVE
)) {
580 phba
->hba_flag
|= HBA_RRQ_ACTIVE
;
582 lpfc_worker_wake_up(phba
);
587 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
588 "2921 Can't set rrq active xri:0x%x rxid:0x%x"
589 " DID:0x%x Send:%d\n",
590 xritag
, rxid
, did
, send_rrq
);
595 * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
596 * @phba: Pointer to HBA context object.
597 * @xritag: xri used in this exchange.
598 * @rrq: The RRQ to be cleared.
602 lpfc_clr_rrq_active(struct lpfc_hba
*phba
,
604 struct lpfc_node_rrq
*rrq
)
607 struct lpfc_nodelist
*ndlp
= NULL
;
609 if ((rrq
->vport
) && NLP_CHK_NODE_ACT(rrq
->ndlp
))
610 ndlp
= lpfc_findnode_did(rrq
->vport
, rrq
->nlp_DID
);
612 /* The target DID could have been swapped (cable swap)
613 * we should use the ndlp from the findnode if it is
616 if ((!ndlp
) && rrq
->ndlp
)
622 adj_xri
= xritag
- phba
->sli4_hba
.max_cfg_param
.xri_base
;
623 if (test_and_clear_bit(adj_xri
, ndlp
->active_rrqs
.xri_bitmap
)) {
626 rrq
->rrq_stop_time
= 0;
629 mempool_free(rrq
, phba
->rrq_pool
);
633 * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
634 * @phba: Pointer to HBA context object.
636 * This function is called with hbalock held. This function
637 * Checks if stop_time (ratov from setting rrq active) has
638 * been reached, if it has and the send_rrq flag is set then
639 * it will call lpfc_send_rrq. If the send_rrq flag is not set
640 * then it will just call the routine to clear the rrq and
641 * free the rrq resource.
642 * The timer is set to the next rrq that is going to expire before
643 * leaving the routine.
647 lpfc_handle_rrq_active(struct lpfc_hba
*phba
)
649 struct lpfc_node_rrq
*rrq
;
650 struct lpfc_node_rrq
*nextrrq
;
651 unsigned long next_time
;
652 unsigned long iflags
;
655 spin_lock_irqsave(&phba
->hbalock
, iflags
);
656 phba
->hba_flag
&= ~HBA_RRQ_ACTIVE
;
657 next_time
= jiffies
+ HZ
* (phba
->fc_ratov
+ 1);
658 list_for_each_entry_safe(rrq
, nextrrq
,
659 &phba
->active_rrq_list
, list
) {
660 if (time_after(jiffies
, rrq
->rrq_stop_time
))
661 list_move(&rrq
->list
, &send_rrq
);
662 else if (time_before(rrq
->rrq_stop_time
, next_time
))
663 next_time
= rrq
->rrq_stop_time
;
665 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
666 if (!list_empty(&phba
->active_rrq_list
))
667 mod_timer(&phba
->rrq_tmr
, next_time
);
668 list_for_each_entry_safe(rrq
, nextrrq
, &send_rrq
, list
) {
669 list_del(&rrq
->list
);
671 /* this call will free the rrq */
672 lpfc_clr_rrq_active(phba
, rrq
->xritag
, rrq
);
673 else if (lpfc_send_rrq(phba
, rrq
)) {
674 /* if we send the rrq then the completion handler
675 * will clear the bit in the xribitmap.
677 lpfc_clr_rrq_active(phba
, rrq
->xritag
,
684 * lpfc_get_active_rrq - Get the active RRQ for this exchange.
685 * @vport: Pointer to vport context object.
686 * @xri: The xri used in the exchange.
687 * @did: The targets DID for this exchange.
689 * returns NULL = rrq not found in the phba->active_rrq_list.
690 * rrq = rrq for this xri and target.
692 struct lpfc_node_rrq
*
693 lpfc_get_active_rrq(struct lpfc_vport
*vport
, uint16_t xri
, uint32_t did
)
695 struct lpfc_hba
*phba
= vport
->phba
;
696 struct lpfc_node_rrq
*rrq
;
697 struct lpfc_node_rrq
*nextrrq
;
698 unsigned long iflags
;
700 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
702 spin_lock_irqsave(&phba
->hbalock
, iflags
);
703 list_for_each_entry_safe(rrq
, nextrrq
, &phba
->active_rrq_list
, list
) {
704 if (rrq
->vport
== vport
&& rrq
->xritag
== xri
&&
705 rrq
->nlp_DID
== did
){
706 list_del(&rrq
->list
);
707 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
711 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
716 * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
717 * @vport: Pointer to vport context object.
718 * @ndlp: Pointer to the lpfc_node_list structure.
719 * If ndlp is NULL Remove all active RRQs for this vport from the
720 * phba->active_rrq_list and clear the rrq.
721 * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
724 lpfc_cleanup_vports_rrqs(struct lpfc_vport
*vport
, struct lpfc_nodelist
*ndlp
)
727 struct lpfc_hba
*phba
= vport
->phba
;
728 struct lpfc_node_rrq
*rrq
;
729 struct lpfc_node_rrq
*nextrrq
;
730 unsigned long iflags
;
733 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
736 lpfc_sli4_vport_delete_els_xri_aborted(vport
);
737 lpfc_sli4_vport_delete_fcp_xri_aborted(vport
);
739 spin_lock_irqsave(&phba
->hbalock
, iflags
);
740 list_for_each_entry_safe(rrq
, nextrrq
, &phba
->active_rrq_list
, list
)
741 if ((rrq
->vport
== vport
) && (!ndlp
|| rrq
->ndlp
== ndlp
))
742 list_move(&rrq
->list
, &rrq_list
);
743 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
745 list_for_each_entry_safe(rrq
, nextrrq
, &rrq_list
, list
) {
746 list_del(&rrq
->list
);
747 lpfc_clr_rrq_active(phba
, rrq
->xritag
, rrq
);
752 * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
753 * @phba: Pointer to HBA context object.
755 * Remove all rrqs from the phba->active_rrq_list and free them by
756 * calling __lpfc_clr_active_rrq
760 lpfc_cleanup_wt_rrqs(struct lpfc_hba
*phba
)
762 struct lpfc_node_rrq
*rrq
;
763 struct lpfc_node_rrq
*nextrrq
;
764 unsigned long next_time
;
765 unsigned long iflags
;
768 if (phba
->sli_rev
!= LPFC_SLI_REV4
)
770 spin_lock_irqsave(&phba
->hbalock
, iflags
);
771 phba
->hba_flag
&= ~HBA_RRQ_ACTIVE
;
772 next_time
= jiffies
+ HZ
* (phba
->fc_ratov
* 2);
773 list_splice_init(&phba
->active_rrq_list
, &rrq_list
);
774 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
776 list_for_each_entry_safe(rrq
, nextrrq
, &rrq_list
, list
) {
777 list_del(&rrq
->list
);
778 lpfc_clr_rrq_active(phba
, rrq
->xritag
, rrq
);
780 if (!list_empty(&phba
->active_rrq_list
))
781 mod_timer(&phba
->rrq_tmr
, next_time
);
786 * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
787 * @phba: Pointer to HBA context object.
788 * @ndlp: Targets nodelist pointer for this exchange.
789 * @xritag the xri in the bitmap to test.
791 * This function is called with hbalock held. This function
792 * returns 0 = rrq not active for this xri
793 * 1 = rrq is valid for this xri.
796 lpfc_test_rrq_active(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
,
801 adj_xri
= xritag
- phba
->sli4_hba
.max_cfg_param
.xri_base
;
804 if (test_bit(adj_xri
, ndlp
->active_rrqs
.xri_bitmap
))
811 * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
812 * @phba: Pointer to HBA context object.
813 * @ndlp: nodelist pointer for this target.
814 * @xritag: xri used in this exchange.
815 * @rxid: Remote Exchange ID.
816 * @send_rrq: Flag used to determine if we should send rrq els cmd.
818 * This function takes the hbalock.
819 * The active bit is always set in the active rrq xri_bitmap even
820 * if there is no slot avaiable for the other rrq information.
822 * returns 0 rrq actived for this xri
823 * < 0 No memory or invalid ndlp.
826 lpfc_set_rrq_active(struct lpfc_hba
*phba
, struct lpfc_nodelist
*ndlp
,
827 uint16_t xritag
, uint16_t rxid
, uint16_t send_rrq
)
830 unsigned long iflags
;
832 spin_lock_irqsave(&phba
->hbalock
, iflags
);
833 ret
= __lpfc_set_rrq_active(phba
, ndlp
, xritag
, rxid
, send_rrq
);
834 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
839 * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
840 * @phba: Pointer to HBA context object.
841 * @piocb: Pointer to the iocbq.
843 * This function is called with hbalock held. This function
844 * Gets a new driver sglq object from the sglq list. If the
845 * list is not empty then it is successful, it returns pointer to the newly
846 * allocated sglq object else it returns NULL.
848 static struct lpfc_sglq
*
849 __lpfc_sli_get_sglq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocbq
)
851 struct list_head
*lpfc_sgl_list
= &phba
->sli4_hba
.lpfc_sgl_list
;
852 struct lpfc_sglq
*sglq
= NULL
;
853 struct lpfc_sglq
*start_sglq
= NULL
;
855 struct lpfc_scsi_buf
*lpfc_cmd
;
856 struct lpfc_nodelist
*ndlp
;
859 if (piocbq
->iocb_flag
& LPFC_IO_FCP
) {
860 lpfc_cmd
= (struct lpfc_scsi_buf
*) piocbq
->context1
;
861 ndlp
= lpfc_cmd
->rdata
->pnode
;
862 } else if ((piocbq
->iocb
.ulpCommand
== CMD_GEN_REQUEST64_CR
) &&
863 !(piocbq
->iocb_flag
& LPFC_IO_LIBDFC
))
864 ndlp
= piocbq
->context_un
.ndlp
;
866 ndlp
= piocbq
->context1
;
868 list_remove_head(lpfc_sgl_list
, sglq
, struct lpfc_sglq
, list
);
873 adj_xri
= sglq
->sli4_xritag
-
874 phba
->sli4_hba
.max_cfg_param
.xri_base
;
875 if (lpfc_test_rrq_active(phba
, ndlp
, sglq
->sli4_xritag
)) {
876 /* This xri has an rrq outstanding for this DID.
877 * put it back in the list and get another xri.
879 list_add_tail(&sglq
->list
, lpfc_sgl_list
);
881 list_remove_head(lpfc_sgl_list
, sglq
,
882 struct lpfc_sglq
, list
);
883 if (sglq
== start_sglq
) {
891 phba
->sli4_hba
.lpfc_sglq_active_list
[adj_xri
] = sglq
;
892 sglq
->state
= SGL_ALLOCATED
;
898 * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
899 * @phba: Pointer to HBA context object.
901 * This function is called with no lock held. This function
902 * allocates a new driver iocb object from the iocb pool. If the
903 * allocation is successful, it returns pointer to the newly
904 * allocated iocb object else it returns NULL.
907 lpfc_sli_get_iocbq(struct lpfc_hba
*phba
)
909 struct lpfc_iocbq
* iocbq
= NULL
;
910 unsigned long iflags
;
912 spin_lock_irqsave(&phba
->hbalock
, iflags
);
913 iocbq
= __lpfc_sli_get_iocbq(phba
);
914 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
919 * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
920 * @phba: Pointer to HBA context object.
921 * @iocbq: Pointer to driver iocb object.
923 * This function is called with hbalock held to release driver
924 * iocb object to the iocb pool. The iotag in the iocb object
925 * does not change for each use of the iocb object. This function
926 * clears all other fields of the iocb object when it is freed.
927 * The sqlq structure that holds the xritag and phys and virtual
928 * mappings for the scatter gather list is retrieved from the
929 * active array of sglq. The get of the sglq pointer also clears
930 * the entry in the array. If the status of the IO indiactes that
931 * this IO was aborted then the sglq entry it put on the
932 * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
933 * IO has good status or fails for any other reason then the sglq
934 * entry is added to the free list (lpfc_sgl_list).
937 __lpfc_sli_release_iocbq_s4(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
939 struct lpfc_sglq
*sglq
;
940 size_t start_clean
= offsetof(struct lpfc_iocbq
, iocb
);
941 unsigned long iflag
= 0;
942 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
944 if (iocbq
->sli4_xritag
== NO_XRI
)
947 sglq
= __lpfc_clear_active_sglq(phba
, iocbq
->sli4_xritag
);
949 if ((iocbq
->iocb_flag
& LPFC_EXCHANGE_BUSY
) &&
950 (sglq
->state
!= SGL_XRI_ABORTED
)) {
951 spin_lock_irqsave(&phba
->sli4_hba
.abts_sgl_list_lock
,
953 list_add(&sglq
->list
,
954 &phba
->sli4_hba
.lpfc_abts_els_sgl_list
);
955 spin_unlock_irqrestore(
956 &phba
->sli4_hba
.abts_sgl_list_lock
, iflag
);
958 sglq
->state
= SGL_FREED
;
960 list_add_tail(&sglq
->list
,
961 &phba
->sli4_hba
.lpfc_sgl_list
);
963 /* Check if TXQ queue needs to be serviced */
965 lpfc_worker_wake_up(phba
);
971 * Clean all volatile data fields, preserve iotag and node struct.
973 memset((char *)iocbq
+ start_clean
, 0, sizeof(*iocbq
) - start_clean
);
974 iocbq
->sli4_xritag
= NO_XRI
;
975 list_add_tail(&iocbq
->list
, &phba
->lpfc_iocb_list
);
980 * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
981 * @phba: Pointer to HBA context object.
982 * @iocbq: Pointer to driver iocb object.
984 * This function is called with hbalock held to release driver
985 * iocb object to the iocb pool. The iotag in the iocb object
986 * does not change for each use of the iocb object. This function
987 * clears all other fields of the iocb object when it is freed.
990 __lpfc_sli_release_iocbq_s3(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
992 size_t start_clean
= offsetof(struct lpfc_iocbq
, iocb
);
995 * Clean all volatile data fields, preserve iotag and node struct.
997 memset((char*)iocbq
+ start_clean
, 0, sizeof(*iocbq
) - start_clean
);
998 iocbq
->sli4_xritag
= NO_XRI
;
999 list_add_tail(&iocbq
->list
, &phba
->lpfc_iocb_list
);
1003 * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1004 * @phba: Pointer to HBA context object.
1005 * @iocbq: Pointer to driver iocb object.
1007 * This function is called with hbalock held to release driver
1008 * iocb object to the iocb pool. The iotag in the iocb object
1009 * does not change for each use of the iocb object. This function
1010 * clears all other fields of the iocb object when it is freed.
1013 __lpfc_sli_release_iocbq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1015 phba
->__lpfc_sli_release_iocbq(phba
, iocbq
);
1020 * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1021 * @phba: Pointer to HBA context object.
1022 * @iocbq: Pointer to driver iocb object.
1024 * This function is called with no lock held to release the iocb to
1028 lpfc_sli_release_iocbq(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1030 unsigned long iflags
;
1033 * Clean all volatile data fields, preserve iotag and node struct.
1035 spin_lock_irqsave(&phba
->hbalock
, iflags
);
1036 __lpfc_sli_release_iocbq(phba
, iocbq
);
1037 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
1041 * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1042 * @phba: Pointer to HBA context object.
1043 * @iocblist: List of IOCBs.
1044 * @ulpstatus: ULP status in IOCB command field.
1045 * @ulpWord4: ULP word-4 in IOCB command field.
1047 * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1048 * on the list by invoking the complete callback function associated with the
1049 * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1053 lpfc_sli_cancel_iocbs(struct lpfc_hba
*phba
, struct list_head
*iocblist
,
1054 uint32_t ulpstatus
, uint32_t ulpWord4
)
1056 struct lpfc_iocbq
*piocb
;
1058 while (!list_empty(iocblist
)) {
1059 list_remove_head(iocblist
, piocb
, struct lpfc_iocbq
, list
);
1061 if (!piocb
->iocb_cmpl
)
1062 lpfc_sli_release_iocbq(phba
, piocb
);
1064 piocb
->iocb
.ulpStatus
= ulpstatus
;
1065 piocb
->iocb
.un
.ulpWord
[4] = ulpWord4
;
1066 (piocb
->iocb_cmpl
) (phba
, piocb
, piocb
);
1073 * lpfc_sli_iocb_cmd_type - Get the iocb type
1074 * @iocb_cmnd: iocb command code.
1076 * This function is called by ring event handler function to get the iocb type.
1077 * This function translates the iocb command to an iocb command type used to
1078 * decide the final disposition of each completed IOCB.
1079 * The function returns
1080 * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1081 * LPFC_SOL_IOCB if it is a solicited iocb completion
1082 * LPFC_ABORT_IOCB if it is an abort iocb
1083 * LPFC_UNSOL_IOCB if it is an unsolicited iocb
1085 * The caller is not required to hold any lock.
1087 static lpfc_iocb_type
1088 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd
)
1090 lpfc_iocb_type type
= LPFC_UNKNOWN_IOCB
;
1092 if (iocb_cmnd
> CMD_MAX_IOCB_CMD
)
1095 switch (iocb_cmnd
) {
1096 case CMD_XMIT_SEQUENCE_CR
:
1097 case CMD_XMIT_SEQUENCE_CX
:
1098 case CMD_XMIT_BCAST_CN
:
1099 case CMD_XMIT_BCAST_CX
:
1100 case CMD_ELS_REQUEST_CR
:
1101 case CMD_ELS_REQUEST_CX
:
1102 case CMD_CREATE_XRI_CR
:
1103 case CMD_CREATE_XRI_CX
:
1104 case CMD_GET_RPI_CN
:
1105 case CMD_XMIT_ELS_RSP_CX
:
1106 case CMD_GET_RPI_CR
:
1107 case CMD_FCP_IWRITE_CR
:
1108 case CMD_FCP_IWRITE_CX
:
1109 case CMD_FCP_IREAD_CR
:
1110 case CMD_FCP_IREAD_CX
:
1111 case CMD_FCP_ICMND_CR
:
1112 case CMD_FCP_ICMND_CX
:
1113 case CMD_FCP_TSEND_CX
:
1114 case CMD_FCP_TRSP_CX
:
1115 case CMD_FCP_TRECEIVE_CX
:
1116 case CMD_FCP_AUTO_TRSP_CX
:
1117 case CMD_ADAPTER_MSG
:
1118 case CMD_ADAPTER_DUMP
:
1119 case CMD_XMIT_SEQUENCE64_CR
:
1120 case CMD_XMIT_SEQUENCE64_CX
:
1121 case CMD_XMIT_BCAST64_CN
:
1122 case CMD_XMIT_BCAST64_CX
:
1123 case CMD_ELS_REQUEST64_CR
:
1124 case CMD_ELS_REQUEST64_CX
:
1125 case CMD_FCP_IWRITE64_CR
:
1126 case CMD_FCP_IWRITE64_CX
:
1127 case CMD_FCP_IREAD64_CR
:
1128 case CMD_FCP_IREAD64_CX
:
1129 case CMD_FCP_ICMND64_CR
:
1130 case CMD_FCP_ICMND64_CX
:
1131 case CMD_FCP_TSEND64_CX
:
1132 case CMD_FCP_TRSP64_CX
:
1133 case CMD_FCP_TRECEIVE64_CX
:
1134 case CMD_GEN_REQUEST64_CR
:
1135 case CMD_GEN_REQUEST64_CX
:
1136 case CMD_XMIT_ELS_RSP64_CX
:
1137 case DSSCMD_IWRITE64_CR
:
1138 case DSSCMD_IWRITE64_CX
:
1139 case DSSCMD_IREAD64_CR
:
1140 case DSSCMD_IREAD64_CX
:
1141 type
= LPFC_SOL_IOCB
;
1143 case CMD_ABORT_XRI_CN
:
1144 case CMD_ABORT_XRI_CX
:
1145 case CMD_CLOSE_XRI_CN
:
1146 case CMD_CLOSE_XRI_CX
:
1147 case CMD_XRI_ABORTED_CX
:
1148 case CMD_ABORT_MXRI64_CN
:
1149 case CMD_XMIT_BLS_RSP64_CX
:
1150 type
= LPFC_ABORT_IOCB
;
1152 case CMD_RCV_SEQUENCE_CX
:
1153 case CMD_RCV_ELS_REQ_CX
:
1154 case CMD_RCV_SEQUENCE64_CX
:
1155 case CMD_RCV_ELS_REQ64_CX
:
1156 case CMD_ASYNC_STATUS
:
1157 case CMD_IOCB_RCV_SEQ64_CX
:
1158 case CMD_IOCB_RCV_ELS64_CX
:
1159 case CMD_IOCB_RCV_CONT64_CX
:
1160 case CMD_IOCB_RET_XRI64_CX
:
1161 type
= LPFC_UNSOL_IOCB
;
1163 case CMD_IOCB_XMIT_MSEQ64_CR
:
1164 case CMD_IOCB_XMIT_MSEQ64_CX
:
1165 case CMD_IOCB_RCV_SEQ_LIST64_CX
:
1166 case CMD_IOCB_RCV_ELS_LIST64_CX
:
1167 case CMD_IOCB_CLOSE_EXTENDED_CN
:
1168 case CMD_IOCB_ABORT_EXTENDED_CN
:
1169 case CMD_IOCB_RET_HBQE64_CN
:
1170 case CMD_IOCB_FCP_IBIDIR64_CR
:
1171 case CMD_IOCB_FCP_IBIDIR64_CX
:
1172 case CMD_IOCB_FCP_ITASKMGT64_CX
:
1173 case CMD_IOCB_LOGENTRY_CN
:
1174 case CMD_IOCB_LOGENTRY_ASYNC_CN
:
1175 printk("%s - Unhandled SLI-3 Command x%x\n",
1176 __func__
, iocb_cmnd
);
1177 type
= LPFC_UNKNOWN_IOCB
;
1180 type
= LPFC_UNKNOWN_IOCB
;
1188 * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1189 * @phba: Pointer to HBA context object.
1191 * This function is called from SLI initialization code
1192 * to configure every ring of the HBA's SLI interface. The
1193 * caller is not required to hold any lock. This function issues
1194 * a config_ring mailbox command for each ring.
1195 * This function returns zero if successful else returns a negative
1199 lpfc_sli_ring_map(struct lpfc_hba
*phba
)
1201 struct lpfc_sli
*psli
= &phba
->sli
;
1206 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1210 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
1211 for (i
= 0; i
< psli
->num_rings
; i
++) {
1212 lpfc_config_ring(phba
, i
, pmb
);
1213 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
1214 if (rc
!= MBX_SUCCESS
) {
1215 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1216 "0446 Adapter failed to init (%d), "
1217 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
1219 rc
, pmbox
->mbxCommand
,
1220 pmbox
->mbxStatus
, i
);
1221 phba
->link_state
= LPFC_HBA_ERROR
;
1226 mempool_free(pmb
, phba
->mbox_mem_pool
);
1231 * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1232 * @phba: Pointer to HBA context object.
1233 * @pring: Pointer to driver SLI ring object.
1234 * @piocb: Pointer to the driver iocb object.
1236 * This function is called with hbalock held. The function adds the
1237 * new iocb to txcmplq of the given ring. This function always returns
1238 * 0. If this function is called for ELS ring, this function checks if
1239 * there is a vport associated with the ELS command. This function also
1240 * starts els_tmofunc timer if this is an ELS command.
1243 lpfc_sli_ringtxcmpl_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1244 struct lpfc_iocbq
*piocb
)
1246 list_add_tail(&piocb
->list
, &pring
->txcmplq
);
1247 piocb
->iocb_flag
|= LPFC_IO_ON_Q
;
1248 pring
->txcmplq_cnt
++;
1249 if (pring
->txcmplq_cnt
> pring
->txcmplq_max
)
1250 pring
->txcmplq_max
= pring
->txcmplq_cnt
;
1252 if ((unlikely(pring
->ringno
== LPFC_ELS_RING
)) &&
1253 (piocb
->iocb
.ulpCommand
!= CMD_ABORT_XRI_CN
) &&
1254 (piocb
->iocb
.ulpCommand
!= CMD_CLOSE_XRI_CN
)) {
1258 mod_timer(&piocb
->vport
->els_tmofunc
,
1259 jiffies
+ HZ
* (phba
->fc_ratov
<< 1));
1267 * lpfc_sli_ringtx_get - Get first element of the txq
1268 * @phba: Pointer to HBA context object.
1269 * @pring: Pointer to driver SLI ring object.
1271 * This function is called with hbalock held to get next
1272 * iocb in txq of the given ring. If there is any iocb in
1273 * the txq, the function returns first iocb in the list after
1274 * removing the iocb from the list, else it returns NULL.
1277 lpfc_sli_ringtx_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1279 struct lpfc_iocbq
*cmd_iocb
;
1281 list_remove_head((&pring
->txq
), cmd_iocb
, struct lpfc_iocbq
, list
);
1282 if (cmd_iocb
!= NULL
)
1288 * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
1289 * @phba: Pointer to HBA context object.
1290 * @pring: Pointer to driver SLI ring object.
1292 * This function is called with hbalock held and the caller must post the
1293 * iocb without releasing the lock. If the caller releases the lock,
1294 * iocb slot returned by the function is not guaranteed to be available.
1295 * The function returns pointer to the next available iocb slot if there
1296 * is available slot in the ring, else it returns NULL.
1297 * If the get index of the ring is ahead of the put index, the function
1298 * will post an error attention event to the worker thread to take the
1299 * HBA to offline state.
1302 lpfc_sli_next_iocb_slot (struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1304 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
1305 uint32_t max_cmd_idx
= pring
->numCiocb
;
1306 if ((pring
->next_cmdidx
== pring
->cmdidx
) &&
1307 (++pring
->next_cmdidx
>= max_cmd_idx
))
1308 pring
->next_cmdidx
= 0;
1310 if (unlikely(pring
->local_getidx
== pring
->next_cmdidx
)) {
1312 pring
->local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
1314 if (unlikely(pring
->local_getidx
>= max_cmd_idx
)) {
1315 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
1316 "0315 Ring %d issue: portCmdGet %d "
1317 "is bigger than cmd ring %d\n",
1319 pring
->local_getidx
, max_cmd_idx
);
1321 phba
->link_state
= LPFC_HBA_ERROR
;
1323 * All error attention handlers are posted to
1326 phba
->work_ha
|= HA_ERATT
;
1327 phba
->work_hs
= HS_FFER3
;
1329 lpfc_worker_wake_up(phba
);
1334 if (pring
->local_getidx
== pring
->next_cmdidx
)
1338 return lpfc_cmd_iocb(phba
, pring
);
1342 * lpfc_sli_next_iotag - Get an iotag for the iocb
1343 * @phba: Pointer to HBA context object.
1344 * @iocbq: Pointer to driver iocb object.
1346 * This function gets an iotag for the iocb. If there is no unused iotag and
1347 * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
1348 * array and assigns a new iotag.
1349 * The function returns the allocated iotag if successful, else returns zero.
1350 * Zero is not a valid iotag.
1351 * The caller is not required to hold any lock.
1354 lpfc_sli_next_iotag(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
)
1356 struct lpfc_iocbq
**new_arr
;
1357 struct lpfc_iocbq
**old_arr
;
1359 struct lpfc_sli
*psli
= &phba
->sli
;
1362 spin_lock_irq(&phba
->hbalock
);
1363 iotag
= psli
->last_iotag
;
1364 if(++iotag
< psli
->iocbq_lookup_len
) {
1365 psli
->last_iotag
= iotag
;
1366 psli
->iocbq_lookup
[iotag
] = iocbq
;
1367 spin_unlock_irq(&phba
->hbalock
);
1368 iocbq
->iotag
= iotag
;
1370 } else if (psli
->iocbq_lookup_len
< (0xffff
1371 - LPFC_IOCBQ_LOOKUP_INCREMENT
)) {
1372 new_len
= psli
->iocbq_lookup_len
+ LPFC_IOCBQ_LOOKUP_INCREMENT
;
1373 spin_unlock_irq(&phba
->hbalock
);
1374 new_arr
= kzalloc(new_len
* sizeof (struct lpfc_iocbq
*),
1377 spin_lock_irq(&phba
->hbalock
);
1378 old_arr
= psli
->iocbq_lookup
;
1379 if (new_len
<= psli
->iocbq_lookup_len
) {
1380 /* highly unprobable case */
1382 iotag
= psli
->last_iotag
;
1383 if(++iotag
< psli
->iocbq_lookup_len
) {
1384 psli
->last_iotag
= iotag
;
1385 psli
->iocbq_lookup
[iotag
] = iocbq
;
1386 spin_unlock_irq(&phba
->hbalock
);
1387 iocbq
->iotag
= iotag
;
1390 spin_unlock_irq(&phba
->hbalock
);
1393 if (psli
->iocbq_lookup
)
1394 memcpy(new_arr
, old_arr
,
1395 ((psli
->last_iotag
+ 1) *
1396 sizeof (struct lpfc_iocbq
*)));
1397 psli
->iocbq_lookup
= new_arr
;
1398 psli
->iocbq_lookup_len
= new_len
;
1399 psli
->last_iotag
= iotag
;
1400 psli
->iocbq_lookup
[iotag
] = iocbq
;
1401 spin_unlock_irq(&phba
->hbalock
);
1402 iocbq
->iotag
= iotag
;
1407 spin_unlock_irq(&phba
->hbalock
);
1409 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
1410 "0318 Failed to allocate IOTAG.last IOTAG is %d\n",
1417 * lpfc_sli_submit_iocb - Submit an iocb to the firmware
1418 * @phba: Pointer to HBA context object.
1419 * @pring: Pointer to driver SLI ring object.
1420 * @iocb: Pointer to iocb slot in the ring.
1421 * @nextiocb: Pointer to driver iocb object which need to be
1422 * posted to firmware.
1424 * This function is called with hbalock held to post a new iocb to
1425 * the firmware. This function copies the new iocb to ring iocb slot and
1426 * updates the ring pointers. It adds the new iocb to txcmplq if there is
1427 * a completion call back for this iocb else the function will free the
1431 lpfc_sli_submit_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
1432 IOCB_t
*iocb
, struct lpfc_iocbq
*nextiocb
)
1437 nextiocb
->iocb
.ulpIoTag
= (nextiocb
->iocb_cmpl
) ? nextiocb
->iotag
: 0;
1440 if (pring
->ringno
== LPFC_ELS_RING
) {
1441 lpfc_debugfs_slow_ring_trc(phba
,
1442 "IOCB cmd ring: wd4:x%08x wd6:x%08x wd7:x%08x",
1443 *(((uint32_t *) &nextiocb
->iocb
) + 4),
1444 *(((uint32_t *) &nextiocb
->iocb
) + 6),
1445 *(((uint32_t *) &nextiocb
->iocb
) + 7));
1449 * Issue iocb command to adapter
1451 lpfc_sli_pcimem_bcopy(&nextiocb
->iocb
, iocb
, phba
->iocb_cmd_size
);
1453 pring
->stats
.iocb_cmd
++;
1456 * If there is no completion routine to call, we can release the
1457 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
1458 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
1460 if (nextiocb
->iocb_cmpl
)
1461 lpfc_sli_ringtxcmpl_put(phba
, pring
, nextiocb
);
1463 __lpfc_sli_release_iocbq(phba
, nextiocb
);
1466 * Let the HBA know what IOCB slot will be the next one the
1467 * driver will put a command into.
1469 pring
->cmdidx
= pring
->next_cmdidx
;
1470 writel(pring
->cmdidx
, &phba
->host_gp
[pring
->ringno
].cmdPutInx
);
1474 * lpfc_sli_update_full_ring - Update the chip attention register
1475 * @phba: Pointer to HBA context object.
1476 * @pring: Pointer to driver SLI ring object.
1478 * The caller is not required to hold any lock for calling this function.
1479 * This function updates the chip attention bits for the ring to inform firmware
1480 * that there are pending work to be done for this ring and requests an
1481 * interrupt when there is space available in the ring. This function is
1482 * called when the driver is unable to post more iocbs to the ring due
1483 * to unavailability of space in the ring.
1486 lpfc_sli_update_full_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1488 int ringno
= pring
->ringno
;
1490 pring
->flag
|= LPFC_CALL_RING_AVAILABLE
;
1495 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
1496 * The HBA will tell us when an IOCB entry is available.
1498 writel((CA_R0ATT
|CA_R0CE_REQ
) << (ringno
*4), phba
->CAregaddr
);
1499 readl(phba
->CAregaddr
); /* flush */
1501 pring
->stats
.iocb_cmd_full
++;
1505 * lpfc_sli_update_ring - Update chip attention register
1506 * @phba: Pointer to HBA context object.
1507 * @pring: Pointer to driver SLI ring object.
1509 * This function updates the chip attention register bit for the
1510 * given ring to inform HBA that there is more work to be done
1511 * in this ring. The caller is not required to hold any lock.
1514 lpfc_sli_update_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1516 int ringno
= pring
->ringno
;
1519 * Tell the HBA that there is work to do in this ring.
1521 if (!(phba
->sli3_options
& LPFC_SLI3_CRP_ENABLED
)) {
1523 writel(CA_R0ATT
<< (ringno
* 4), phba
->CAregaddr
);
1524 readl(phba
->CAregaddr
); /* flush */
1529 * lpfc_sli_resume_iocb - Process iocbs in the txq
1530 * @phba: Pointer to HBA context object.
1531 * @pring: Pointer to driver SLI ring object.
1533 * This function is called with hbalock held to post pending iocbs
1534 * in the txq to the firmware. This function is called when driver
1535 * detects space available in the ring.
1538 lpfc_sli_resume_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1541 struct lpfc_iocbq
*nextiocb
;
1545 * (a) there is anything on the txq to send
1547 * (c) link attention events can be processed (fcp ring only)
1548 * (d) IOCB processing is not blocked by the outstanding mbox command.
1550 if (pring
->txq_cnt
&&
1551 lpfc_is_link_up(phba
) &&
1552 (pring
->ringno
!= phba
->sli
.fcp_ring
||
1553 phba
->sli
.sli_flag
& LPFC_PROCESS_LA
)) {
1555 while ((iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)) &&
1556 (nextiocb
= lpfc_sli_ringtx_get(phba
, pring
)))
1557 lpfc_sli_submit_iocb(phba
, pring
, iocb
, nextiocb
);
1560 lpfc_sli_update_ring(phba
, pring
);
1562 lpfc_sli_update_full_ring(phba
, pring
);
1569 * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
1570 * @phba: Pointer to HBA context object.
1571 * @hbqno: HBQ number.
1573 * This function is called with hbalock held to get the next
1574 * available slot for the given HBQ. If there is free slot
1575 * available for the HBQ it will return pointer to the next available
1576 * HBQ entry else it will return NULL.
1578 static struct lpfc_hbq_entry
*
1579 lpfc_sli_next_hbq_slot(struct lpfc_hba
*phba
, uint32_t hbqno
)
1581 struct hbq_s
*hbqp
= &phba
->hbqs
[hbqno
];
1583 if (hbqp
->next_hbqPutIdx
== hbqp
->hbqPutIdx
&&
1584 ++hbqp
->next_hbqPutIdx
>= hbqp
->entry_count
)
1585 hbqp
->next_hbqPutIdx
= 0;
1587 if (unlikely(hbqp
->local_hbqGetIdx
== hbqp
->next_hbqPutIdx
)) {
1588 uint32_t raw_index
= phba
->hbq_get
[hbqno
];
1589 uint32_t getidx
= le32_to_cpu(raw_index
);
1591 hbqp
->local_hbqGetIdx
= getidx
;
1593 if (unlikely(hbqp
->local_hbqGetIdx
>= hbqp
->entry_count
)) {
1594 lpfc_printf_log(phba
, KERN_ERR
,
1595 LOG_SLI
| LOG_VPORT
,
1596 "1802 HBQ %d: local_hbqGetIdx "
1597 "%u is > than hbqp->entry_count %u\n",
1598 hbqno
, hbqp
->local_hbqGetIdx
,
1601 phba
->link_state
= LPFC_HBA_ERROR
;
1605 if (hbqp
->local_hbqGetIdx
== hbqp
->next_hbqPutIdx
)
1609 return (struct lpfc_hbq_entry
*) phba
->hbqs
[hbqno
].hbq_virt
+
1614 * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
1615 * @phba: Pointer to HBA context object.
1617 * This function is called with no lock held to free all the
1618 * hbq buffers while uninitializing the SLI interface. It also
1619 * frees the HBQ buffers returned by the firmware but not yet
1620 * processed by the upper layers.
1623 lpfc_sli_hbqbuf_free_all(struct lpfc_hba
*phba
)
1625 struct lpfc_dmabuf
*dmabuf
, *next_dmabuf
;
1626 struct hbq_dmabuf
*hbq_buf
;
1627 unsigned long flags
;
1631 hbq_count
= lpfc_sli_hbq_count();
1632 /* Return all memory used by all HBQs */
1633 spin_lock_irqsave(&phba
->hbalock
, flags
);
1634 for (i
= 0; i
< hbq_count
; ++i
) {
1635 list_for_each_entry_safe(dmabuf
, next_dmabuf
,
1636 &phba
->hbqs
[i
].hbq_buffer_list
, list
) {
1637 hbq_buf
= container_of(dmabuf
, struct hbq_dmabuf
, dbuf
);
1638 list_del(&hbq_buf
->dbuf
.list
);
1639 (phba
->hbqs
[i
].hbq_free_buffer
)(phba
, hbq_buf
);
1641 phba
->hbqs
[i
].buffer_count
= 0;
1643 /* Return all HBQ buffer that are in-fly */
1644 list_for_each_entry_safe(dmabuf
, next_dmabuf
, &phba
->rb_pend_list
,
1646 hbq_buf
= container_of(dmabuf
, struct hbq_dmabuf
, dbuf
);
1647 list_del(&hbq_buf
->dbuf
.list
);
1648 if (hbq_buf
->tag
== -1) {
1649 (phba
->hbqs
[LPFC_ELS_HBQ
].hbq_free_buffer
)
1652 hbqno
= hbq_buf
->tag
>> 16;
1653 if (hbqno
>= LPFC_MAX_HBQS
)
1654 (phba
->hbqs
[LPFC_ELS_HBQ
].hbq_free_buffer
)
1657 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
,
1662 /* Mark the HBQs not in use */
1663 phba
->hbq_in_use
= 0;
1664 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1668 * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
1669 * @phba: Pointer to HBA context object.
1670 * @hbqno: HBQ number.
1671 * @hbq_buf: Pointer to HBQ buffer.
1673 * This function is called with the hbalock held to post a
1674 * hbq buffer to the firmware. If the function finds an empty
1675 * slot in the HBQ, it will post the buffer. The function will return
1676 * pointer to the hbq entry if it successfully post the buffer
1677 * else it will return NULL.
1680 lpfc_sli_hbq_to_firmware(struct lpfc_hba
*phba
, uint32_t hbqno
,
1681 struct hbq_dmabuf
*hbq_buf
)
1683 return phba
->lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buf
);
1687 * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
1688 * @phba: Pointer to HBA context object.
1689 * @hbqno: HBQ number.
1690 * @hbq_buf: Pointer to HBQ buffer.
1692 * This function is called with the hbalock held to post a hbq buffer to the
1693 * firmware. If the function finds an empty slot in the HBQ, it will post the
1694 * buffer and place it on the hbq_buffer_list. The function will return zero if
1695 * it successfully post the buffer else it will return an error.
1698 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba
*phba
, uint32_t hbqno
,
1699 struct hbq_dmabuf
*hbq_buf
)
1701 struct lpfc_hbq_entry
*hbqe
;
1702 dma_addr_t physaddr
= hbq_buf
->dbuf
.phys
;
1704 /* Get next HBQ entry slot to use */
1705 hbqe
= lpfc_sli_next_hbq_slot(phba
, hbqno
);
1707 struct hbq_s
*hbqp
= &phba
->hbqs
[hbqno
];
1709 hbqe
->bde
.addrHigh
= le32_to_cpu(putPaddrHigh(physaddr
));
1710 hbqe
->bde
.addrLow
= le32_to_cpu(putPaddrLow(physaddr
));
1711 hbqe
->bde
.tus
.f
.bdeSize
= hbq_buf
->size
;
1712 hbqe
->bde
.tus
.f
.bdeFlags
= 0;
1713 hbqe
->bde
.tus
.w
= le32_to_cpu(hbqe
->bde
.tus
.w
);
1714 hbqe
->buffer_tag
= le32_to_cpu(hbq_buf
->tag
);
1716 hbqp
->hbqPutIdx
= hbqp
->next_hbqPutIdx
;
1717 writel(hbqp
->hbqPutIdx
, phba
->hbq_put
+ hbqno
);
1719 readl(phba
->hbq_put
+ hbqno
);
1720 list_add_tail(&hbq_buf
->dbuf
.list
, &hbqp
->hbq_buffer_list
);
1727 * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
1728 * @phba: Pointer to HBA context object.
1729 * @hbqno: HBQ number.
1730 * @hbq_buf: Pointer to HBQ buffer.
1732 * This function is called with the hbalock held to post an RQE to the SLI4
1733 * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
1734 * the hbq_buffer_list and return zero, otherwise it will return an error.
1737 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba
*phba
, uint32_t hbqno
,
1738 struct hbq_dmabuf
*hbq_buf
)
1741 struct lpfc_rqe hrqe
;
1742 struct lpfc_rqe drqe
;
1744 hrqe
.address_lo
= putPaddrLow(hbq_buf
->hbuf
.phys
);
1745 hrqe
.address_hi
= putPaddrHigh(hbq_buf
->hbuf
.phys
);
1746 drqe
.address_lo
= putPaddrLow(hbq_buf
->dbuf
.phys
);
1747 drqe
.address_hi
= putPaddrHigh(hbq_buf
->dbuf
.phys
);
1748 rc
= lpfc_sli4_rq_put(phba
->sli4_hba
.hdr_rq
, phba
->sli4_hba
.dat_rq
,
1753 list_add_tail(&hbq_buf
->dbuf
.list
, &phba
->hbqs
[hbqno
].hbq_buffer_list
);
1757 /* HBQ for ELS and CT traffic. */
1758 static struct lpfc_hbq_init lpfc_els_hbq
= {
1763 .ring_mask
= (1 << LPFC_ELS_RING
),
1769 /* HBQ for the extra ring if needed */
1770 static struct lpfc_hbq_init lpfc_extra_hbq
= {
1775 .ring_mask
= (1 << LPFC_EXTRA_RING
),
1782 struct lpfc_hbq_init
*lpfc_hbq_defs
[] = {
1788 * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
1789 * @phba: Pointer to HBA context object.
1790 * @hbqno: HBQ number.
1791 * @count: Number of HBQ buffers to be posted.
1793 * This function is called with no lock held to post more hbq buffers to the
1794 * given HBQ. The function returns the number of HBQ buffers successfully
1798 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba
*phba
, uint32_t hbqno
, uint32_t count
)
1800 uint32_t i
, posted
= 0;
1801 unsigned long flags
;
1802 struct hbq_dmabuf
*hbq_buffer
;
1803 LIST_HEAD(hbq_buf_list
);
1804 if (!phba
->hbqs
[hbqno
].hbq_alloc_buffer
)
1807 if ((phba
->hbqs
[hbqno
].buffer_count
+ count
) >
1808 lpfc_hbq_defs
[hbqno
]->entry_count
)
1809 count
= lpfc_hbq_defs
[hbqno
]->entry_count
-
1810 phba
->hbqs
[hbqno
].buffer_count
;
1813 /* Allocate HBQ entries */
1814 for (i
= 0; i
< count
; i
++) {
1815 hbq_buffer
= (phba
->hbqs
[hbqno
].hbq_alloc_buffer
)(phba
);
1818 list_add_tail(&hbq_buffer
->dbuf
.list
, &hbq_buf_list
);
1820 /* Check whether HBQ is still in use */
1821 spin_lock_irqsave(&phba
->hbalock
, flags
);
1822 if (!phba
->hbq_in_use
)
1824 while (!list_empty(&hbq_buf_list
)) {
1825 list_remove_head(&hbq_buf_list
, hbq_buffer
, struct hbq_dmabuf
,
1827 hbq_buffer
->tag
= (phba
->hbqs
[hbqno
].buffer_count
|
1829 if (!lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buffer
)) {
1830 phba
->hbqs
[hbqno
].buffer_count
++;
1833 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
1835 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1838 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
1839 while (!list_empty(&hbq_buf_list
)) {
1840 list_remove_head(&hbq_buf_list
, hbq_buffer
, struct hbq_dmabuf
,
1842 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
1848 * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
1849 * @phba: Pointer to HBA context object.
1852 * This function posts more buffers to the HBQ. This function
1853 * is called with no lock held. The function returns the number of HBQ entries
1854 * successfully allocated.
1857 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba
*phba
, uint32_t qno
)
1859 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1862 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
1863 lpfc_hbq_defs
[qno
]->add_count
);
1867 * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
1868 * @phba: Pointer to HBA context object.
1869 * @qno: HBQ queue number.
1871 * This function is called from SLI initialization code path with
1872 * no lock held to post initial HBQ buffers to firmware. The
1873 * function returns the number of HBQ entries successfully allocated.
1876 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba
*phba
, uint32_t qno
)
1878 if (phba
->sli_rev
== LPFC_SLI_REV4
)
1879 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
1880 lpfc_hbq_defs
[qno
]->entry_count
);
1882 return lpfc_sli_hbqbuf_fill_hbqs(phba
, qno
,
1883 lpfc_hbq_defs
[qno
]->init_count
);
1887 * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
1888 * @phba: Pointer to HBA context object.
1889 * @hbqno: HBQ number.
1891 * This function removes the first hbq buffer on an hbq list and returns a
1892 * pointer to that buffer. If it finds no buffers on the list it returns NULL.
1894 static struct hbq_dmabuf
*
1895 lpfc_sli_hbqbuf_get(struct list_head
*rb_list
)
1897 struct lpfc_dmabuf
*d_buf
;
1899 list_remove_head(rb_list
, d_buf
, struct lpfc_dmabuf
, list
);
1902 return container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
1906 * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
1907 * @phba: Pointer to HBA context object.
1908 * @tag: Tag of the hbq buffer.
1910 * This function is called with hbalock held. This function searches
1911 * for the hbq buffer associated with the given tag in the hbq buffer
1912 * list. If it finds the hbq buffer, it returns the hbq_buffer other wise
1915 static struct hbq_dmabuf
*
1916 lpfc_sli_hbqbuf_find(struct lpfc_hba
*phba
, uint32_t tag
)
1918 struct lpfc_dmabuf
*d_buf
;
1919 struct hbq_dmabuf
*hbq_buf
;
1923 if (hbqno
>= LPFC_MAX_HBQS
)
1926 spin_lock_irq(&phba
->hbalock
);
1927 list_for_each_entry(d_buf
, &phba
->hbqs
[hbqno
].hbq_buffer_list
, list
) {
1928 hbq_buf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
1929 if (hbq_buf
->tag
== tag
) {
1930 spin_unlock_irq(&phba
->hbalock
);
1934 spin_unlock_irq(&phba
->hbalock
);
1935 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
| LOG_VPORT
,
1936 "1803 Bad hbq tag. Data: x%x x%x\n",
1937 tag
, phba
->hbqs
[tag
>> 16].buffer_count
);
1942 * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
1943 * @phba: Pointer to HBA context object.
1944 * @hbq_buffer: Pointer to HBQ buffer.
1946 * This function is called with hbalock. This function gives back
1947 * the hbq buffer to firmware. If the HBQ does not have space to
1948 * post the buffer, it will free the buffer.
1951 lpfc_sli_free_hbq(struct lpfc_hba
*phba
, struct hbq_dmabuf
*hbq_buffer
)
1956 hbqno
= hbq_buffer
->tag
>> 16;
1957 if (lpfc_sli_hbq_to_firmware(phba
, hbqno
, hbq_buffer
))
1958 (phba
->hbqs
[hbqno
].hbq_free_buffer
)(phba
, hbq_buffer
);
1963 * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
1964 * @mbxCommand: mailbox command code.
1966 * This function is called by the mailbox event handler function to verify
1967 * that the completed mailbox command is a legitimate mailbox command. If the
1968 * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
1969 * and the mailbox event handler will take the HBA offline.
1972 lpfc_sli_chk_mbx_command(uint8_t mbxCommand
)
1976 switch (mbxCommand
) {
1980 case MBX_WRITE_VPARMS
:
1981 case MBX_RUN_BIU_DIAG
:
1984 case MBX_CONFIG_LINK
:
1985 case MBX_CONFIG_RING
:
1986 case MBX_RESET_RING
:
1987 case MBX_READ_CONFIG
:
1988 case MBX_READ_RCONFIG
:
1989 case MBX_READ_SPARM
:
1990 case MBX_READ_STATUS
:
1994 case MBX_READ_LNK_STAT
:
1996 case MBX_UNREG_LOGIN
:
1998 case MBX_DUMP_MEMORY
:
1999 case MBX_DUMP_CONTEXT
:
2002 case MBX_UPDATE_CFG
:
2004 case MBX_DEL_LD_ENTRY
:
2005 case MBX_RUN_PROGRAM
:
2007 case MBX_SET_VARIABLE
:
2008 case MBX_UNREG_D_ID
:
2009 case MBX_KILL_BOARD
:
2010 case MBX_CONFIG_FARP
:
2013 case MBX_RUN_BIU_DIAG64
:
2014 case MBX_CONFIG_PORT
:
2015 case MBX_READ_SPARM64
:
2016 case MBX_READ_RPI64
:
2017 case MBX_REG_LOGIN64
:
2018 case MBX_READ_TOPOLOGY
:
2021 case MBX_LOAD_EXP_ROM
:
2022 case MBX_ASYNCEVT_ENABLE
:
2026 case MBX_PORT_CAPABILITIES
:
2027 case MBX_PORT_IOV_CONTROL
:
2028 case MBX_SLI4_CONFIG
:
2029 case MBX_SLI4_REQ_FTRS
:
2031 case MBX_UNREG_FCFI
:
2036 case MBX_RESUME_RPI
:
2037 case MBX_READ_EVENT_LOG_STATUS
:
2038 case MBX_READ_EVENT_LOG
:
2039 case MBX_SECURITY_MGMT
:
2051 * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2052 * @phba: Pointer to HBA context object.
2053 * @pmboxq: Pointer to mailbox command.
2055 * This is completion handler function for mailbox commands issued from
2056 * lpfc_sli_issue_mbox_wait function. This function is called by the
2057 * mailbox event handler function with no lock held. This function
2058 * will wake up thread waiting on the wait queue pointed by context1
2062 lpfc_sli_wake_mbox_wait(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
)
2064 wait_queue_head_t
*pdone_q
;
2065 unsigned long drvr_flag
;
2068 * If pdone_q is empty, the driver thread gave up waiting and
2069 * continued running.
2071 pmboxq
->mbox_flag
|= LPFC_MBX_WAKE
;
2072 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
2073 pdone_q
= (wait_queue_head_t
*) pmboxq
->context1
;
2075 wake_up_interruptible(pdone_q
);
2076 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
2082 * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2083 * @phba: Pointer to HBA context object.
2084 * @pmb: Pointer to mailbox object.
2086 * This function is the default mailbox completion handler. It
2087 * frees the memory resources associated with the completed mailbox
2088 * command. If the completed command is a REG_LOGIN mailbox command,
2089 * this function will issue a UREG_LOGIN to re-claim the RPI.
2092 lpfc_sli_def_mbox_cmpl(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmb
)
2094 struct lpfc_vport
*vport
= pmb
->vport
;
2095 struct lpfc_dmabuf
*mp
;
2096 struct lpfc_nodelist
*ndlp
;
2097 struct Scsi_Host
*shost
;
2101 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
2104 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
2109 * If a REG_LOGIN succeeded after node is destroyed or node
2110 * is in re-discovery driver need to cleanup the RPI.
2112 if (!(phba
->pport
->load_flag
& FC_UNLOADING
) &&
2113 pmb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
&&
2114 !pmb
->u
.mb
.mbxStatus
) {
2115 rpi
= pmb
->u
.mb
.un
.varWords
[0];
2116 vpi
= pmb
->u
.mb
.un
.varRegLogin
.vpi
- phba
->vpi_base
;
2117 lpfc_unreg_login(phba
, vpi
, rpi
, pmb
);
2118 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
2119 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
2120 if (rc
!= MBX_NOT_FINISHED
)
2124 if ((pmb
->u
.mb
.mbxCommand
== MBX_REG_VPI
) &&
2125 !(phba
->pport
->load_flag
& FC_UNLOADING
) &&
2126 !pmb
->u
.mb
.mbxStatus
) {
2127 shost
= lpfc_shost_from_vport(vport
);
2128 spin_lock_irq(shost
->host_lock
);
2129 vport
->vpi_state
|= LPFC_VPI_REGISTERED
;
2130 vport
->fc_flag
&= ~FC_VPORT_NEEDS_REG_VPI
;
2131 spin_unlock_irq(shost
->host_lock
);
2134 if (pmb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
2135 ndlp
= (struct lpfc_nodelist
*)pmb
->context2
;
2137 pmb
->context2
= NULL
;
2140 /* Check security permission status on INIT_LINK mailbox command */
2141 if ((pmb
->u
.mb
.mbxCommand
== MBX_INIT_LINK
) &&
2142 (pmb
->u
.mb
.mbxStatus
== MBXERR_SEC_NO_PERMISSION
))
2143 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
2144 "2860 SLI authentication is required "
2145 "for INIT_LINK but has not done yet\n");
2147 if (bf_get(lpfc_mqe_command
, &pmb
->u
.mqe
) == MBX_SLI4_CONFIG
)
2148 lpfc_sli4_mbox_cmd_free(phba
, pmb
);
2150 mempool_free(pmb
, phba
->mbox_mem_pool
);
2154 * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
2155 * @phba: Pointer to HBA context object.
2157 * This function is called with no lock held. This function processes all
2158 * the completed mailbox commands and gives it to upper layers. The interrupt
2159 * service routine processes mailbox completion interrupt and adds completed
2160 * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
2161 * Worker thread call lpfc_sli_handle_mb_event, which will return the
2162 * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
2163 * function returns the mailbox commands to the upper layer by calling the
2164 * completion handler function of each mailbox.
2167 lpfc_sli_handle_mb_event(struct lpfc_hba
*phba
)
2174 phba
->sli
.slistat
.mbox_event
++;
2176 /* Get all completed mailboxe buffers into the cmplq */
2177 spin_lock_irq(&phba
->hbalock
);
2178 list_splice_init(&phba
->sli
.mboxq_cmpl
, &cmplq
);
2179 spin_unlock_irq(&phba
->hbalock
);
2181 /* Get a Mailbox buffer to setup mailbox commands for callback */
2183 list_remove_head(&cmplq
, pmb
, LPFC_MBOXQ_t
, list
);
2189 if (pmbox
->mbxCommand
!= MBX_HEARTBEAT
) {
2191 lpfc_debugfs_disc_trc(pmb
->vport
,
2192 LPFC_DISC_TRC_MBOX_VPORT
,
2193 "MBOX cmpl vport: cmd:x%x mb:x%x x%x",
2194 (uint32_t)pmbox
->mbxCommand
,
2195 pmbox
->un
.varWords
[0],
2196 pmbox
->un
.varWords
[1]);
2199 lpfc_debugfs_disc_trc(phba
->pport
,
2201 "MBOX cmpl: cmd:x%x mb:x%x x%x",
2202 (uint32_t)pmbox
->mbxCommand
,
2203 pmbox
->un
.varWords
[0],
2204 pmbox
->un
.varWords
[1]);
2209 * It is a fatal error if unknown mbox command completion.
2211 if (lpfc_sli_chk_mbx_command(pmbox
->mbxCommand
) ==
2213 /* Unknown mailbox command compl */
2214 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
2215 "(%d):0323 Unknown Mailbox command "
2217 pmb
->vport
? pmb
->vport
->vpi
: 0,
2219 lpfc_sli4_mbox_opcode_get(phba
, pmb
));
2220 phba
->link_state
= LPFC_HBA_ERROR
;
2221 phba
->work_hs
= HS_FFER3
;
2222 lpfc_handle_eratt(phba
);
2226 if (pmbox
->mbxStatus
) {
2227 phba
->sli
.slistat
.mbox_stat_err
++;
2228 if (pmbox
->mbxStatus
== MBXERR_NO_RESOURCES
) {
2229 /* Mbox cmd cmpl error - RETRYing */
2230 lpfc_printf_log(phba
, KERN_INFO
,
2232 "(%d):0305 Mbox cmd cmpl "
2233 "error - RETRYing Data: x%x "
2234 "(x%x) x%x x%x x%x\n",
2235 pmb
->vport
? pmb
->vport
->vpi
:0,
2237 lpfc_sli4_mbox_opcode_get(phba
,
2240 pmbox
->un
.varWords
[0],
2241 pmb
->vport
->port_state
);
2242 pmbox
->mbxStatus
= 0;
2243 pmbox
->mbxOwner
= OWN_HOST
;
2244 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
2245 if (rc
!= MBX_NOT_FINISHED
)
2250 /* Mailbox cmd <cmd> Cmpl <cmpl> */
2251 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
2252 "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
2253 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
2254 pmb
->vport
? pmb
->vport
->vpi
: 0,
2256 lpfc_sli4_mbox_opcode_get(phba
, pmb
),
2258 *((uint32_t *) pmbox
),
2259 pmbox
->un
.varWords
[0],
2260 pmbox
->un
.varWords
[1],
2261 pmbox
->un
.varWords
[2],
2262 pmbox
->un
.varWords
[3],
2263 pmbox
->un
.varWords
[4],
2264 pmbox
->un
.varWords
[5],
2265 pmbox
->un
.varWords
[6],
2266 pmbox
->un
.varWords
[7]);
2269 pmb
->mbox_cmpl(phba
,pmb
);
2275 * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
2276 * @phba: Pointer to HBA context object.
2277 * @pring: Pointer to driver SLI ring object.
2280 * This function is called with no lock held. When QUE_BUFTAG_BIT bit
2281 * is set in the tag the buffer is posted for a particular exchange,
2282 * the function will return the buffer without replacing the buffer.
2283 * If the buffer is for unsolicited ELS or CT traffic, this function
2284 * returns the buffer and also posts another buffer to the firmware.
2286 static struct lpfc_dmabuf
*
2287 lpfc_sli_get_buff(struct lpfc_hba
*phba
,
2288 struct lpfc_sli_ring
*pring
,
2291 struct hbq_dmabuf
*hbq_entry
;
2293 if (tag
& QUE_BUFTAG_BIT
)
2294 return lpfc_sli_ring_taggedbuf_get(phba
, pring
, tag
);
2295 hbq_entry
= lpfc_sli_hbqbuf_find(phba
, tag
);
2298 return &hbq_entry
->dbuf
;
2302 * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
2303 * @phba: Pointer to HBA context object.
2304 * @pring: Pointer to driver SLI ring object.
2305 * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
2306 * @fch_r_ctl: the r_ctl for the first frame of the sequence.
2307 * @fch_type: the type for the first frame of the sequence.
2309 * This function is called with no lock held. This function uses the r_ctl and
2310 * type of the received sequence to find the correct callback function to call
2311 * to process the sequence.
2314 lpfc_complete_unsol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2315 struct lpfc_iocbq
*saveq
, uint32_t fch_r_ctl
,
2320 /* unSolicited Responses */
2321 if (pring
->prt
[0].profile
) {
2322 if (pring
->prt
[0].lpfc_sli_rcv_unsol_event
)
2323 (pring
->prt
[0].lpfc_sli_rcv_unsol_event
) (phba
, pring
,
2327 /* We must search, based on rctl / type
2328 for the right routine */
2329 for (i
= 0; i
< pring
->num_mask
; i
++) {
2330 if ((pring
->prt
[i
].rctl
== fch_r_ctl
) &&
2331 (pring
->prt
[i
].type
== fch_type
)) {
2332 if (pring
->prt
[i
].lpfc_sli_rcv_unsol_event
)
2333 (pring
->prt
[i
].lpfc_sli_rcv_unsol_event
)
2334 (phba
, pring
, saveq
);
2342 * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
2343 * @phba: Pointer to HBA context object.
2344 * @pring: Pointer to driver SLI ring object.
2345 * @saveq: Pointer to the unsolicited iocb.
2347 * This function is called with no lock held by the ring event handler
2348 * when there is an unsolicited iocb posted to the response ring by the
2349 * firmware. This function gets the buffer associated with the iocbs
2350 * and calls the event handler for the ring. This function handles both
2351 * qring buffers and hbq buffers.
2352 * When the function returns 1 the caller can free the iocb object otherwise
2353 * upper layer functions will free the iocb objects.
2356 lpfc_sli_process_unsol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2357 struct lpfc_iocbq
*saveq
)
2361 uint32_t Rctl
, Type
;
2363 struct lpfc_iocbq
*iocbq
;
2364 struct lpfc_dmabuf
*dmzbuf
;
2367 irsp
= &(saveq
->iocb
);
2369 if (irsp
->ulpCommand
== CMD_ASYNC_STATUS
) {
2370 if (pring
->lpfc_sli_rcv_async_status
)
2371 pring
->lpfc_sli_rcv_async_status(phba
, pring
, saveq
);
2373 lpfc_printf_log(phba
,
2376 "0316 Ring %d handler: unexpected "
2377 "ASYNC_STATUS iocb received evt_code "
2380 irsp
->un
.asyncstat
.evt_code
);
2384 if ((irsp
->ulpCommand
== CMD_IOCB_RET_XRI64_CX
) &&
2385 (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
)) {
2386 if (irsp
->ulpBdeCount
> 0) {
2387 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
2388 irsp
->un
.ulpWord
[3]);
2389 lpfc_in_buf_free(phba
, dmzbuf
);
2392 if (irsp
->ulpBdeCount
> 1) {
2393 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
2394 irsp
->unsli3
.sli3Words
[3]);
2395 lpfc_in_buf_free(phba
, dmzbuf
);
2398 if (irsp
->ulpBdeCount
> 2) {
2399 dmzbuf
= lpfc_sli_get_buff(phba
, pring
,
2400 irsp
->unsli3
.sli3Words
[7]);
2401 lpfc_in_buf_free(phba
, dmzbuf
);
2407 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
2408 if (irsp
->ulpBdeCount
!= 0) {
2409 saveq
->context2
= lpfc_sli_get_buff(phba
, pring
,
2410 irsp
->un
.ulpWord
[3]);
2411 if (!saveq
->context2
)
2412 lpfc_printf_log(phba
,
2415 "0341 Ring %d Cannot find buffer for "
2416 "an unsolicited iocb. tag 0x%x\n",
2418 irsp
->un
.ulpWord
[3]);
2420 if (irsp
->ulpBdeCount
== 2) {
2421 saveq
->context3
= lpfc_sli_get_buff(phba
, pring
,
2422 irsp
->unsli3
.sli3Words
[7]);
2423 if (!saveq
->context3
)
2424 lpfc_printf_log(phba
,
2427 "0342 Ring %d Cannot find buffer for an"
2428 " unsolicited iocb. tag 0x%x\n",
2430 irsp
->unsli3
.sli3Words
[7]);
2432 list_for_each_entry(iocbq
, &saveq
->list
, list
) {
2433 irsp
= &(iocbq
->iocb
);
2434 if (irsp
->ulpBdeCount
!= 0) {
2435 iocbq
->context2
= lpfc_sli_get_buff(phba
, pring
,
2436 irsp
->un
.ulpWord
[3]);
2437 if (!iocbq
->context2
)
2438 lpfc_printf_log(phba
,
2441 "0343 Ring %d Cannot find "
2442 "buffer for an unsolicited iocb"
2443 ". tag 0x%x\n", pring
->ringno
,
2444 irsp
->un
.ulpWord
[3]);
2446 if (irsp
->ulpBdeCount
== 2) {
2447 iocbq
->context3
= lpfc_sli_get_buff(phba
, pring
,
2448 irsp
->unsli3
.sli3Words
[7]);
2449 if (!iocbq
->context3
)
2450 lpfc_printf_log(phba
,
2453 "0344 Ring %d Cannot find "
2454 "buffer for an unsolicited "
2457 irsp
->unsli3
.sli3Words
[7]);
2461 if (irsp
->ulpBdeCount
!= 0 &&
2462 (irsp
->ulpCommand
== CMD_IOCB_RCV_CONT64_CX
||
2463 irsp
->ulpStatus
== IOSTAT_INTERMED_RSP
)) {
2466 /* search continue save q for same XRI */
2467 list_for_each_entry(iocbq
, &pring
->iocb_continue_saveq
, clist
) {
2468 if (iocbq
->iocb
.ulpContext
== saveq
->iocb
.ulpContext
) {
2469 list_add_tail(&saveq
->list
, &iocbq
->list
);
2475 list_add_tail(&saveq
->clist
,
2476 &pring
->iocb_continue_saveq
);
2477 if (saveq
->iocb
.ulpStatus
!= IOSTAT_INTERMED_RSP
) {
2478 list_del_init(&iocbq
->clist
);
2480 irsp
= &(saveq
->iocb
);
2484 if ((irsp
->ulpCommand
== CMD_RCV_ELS_REQ64_CX
) ||
2485 (irsp
->ulpCommand
== CMD_RCV_ELS_REQ_CX
) ||
2486 (irsp
->ulpCommand
== CMD_IOCB_RCV_ELS64_CX
)) {
2487 Rctl
= FC_RCTL_ELS_REQ
;
2490 w5p
= (WORD5
*)&(saveq
->iocb
.un
.ulpWord
[5]);
2491 Rctl
= w5p
->hcsw
.Rctl
;
2492 Type
= w5p
->hcsw
.Type
;
2494 /* Firmware Workaround */
2495 if ((Rctl
== 0) && (pring
->ringno
== LPFC_ELS_RING
) &&
2496 (irsp
->ulpCommand
== CMD_RCV_SEQUENCE64_CX
||
2497 irsp
->ulpCommand
== CMD_IOCB_RCV_SEQ64_CX
)) {
2498 Rctl
= FC_RCTL_ELS_REQ
;
2500 w5p
->hcsw
.Rctl
= Rctl
;
2501 w5p
->hcsw
.Type
= Type
;
2505 if (!lpfc_complete_unsol_iocb(phba
, pring
, saveq
, Rctl
, Type
))
2506 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
2507 "0313 Ring %d handler: unexpected Rctl x%x "
2508 "Type x%x received\n",
2509 pring
->ringno
, Rctl
, Type
);
2515 * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
2516 * @phba: Pointer to HBA context object.
2517 * @pring: Pointer to driver SLI ring object.
2518 * @prspiocb: Pointer to response iocb object.
2520 * This function looks up the iocb_lookup table to get the command iocb
2521 * corresponding to the given response iocb using the iotag of the
2522 * response iocb. This function is called with the hbalock held.
2523 * This function returns the command iocb object if it finds the command
2524 * iocb else returns NULL.
2526 static struct lpfc_iocbq
*
2527 lpfc_sli_iocbq_lookup(struct lpfc_hba
*phba
,
2528 struct lpfc_sli_ring
*pring
,
2529 struct lpfc_iocbq
*prspiocb
)
2531 struct lpfc_iocbq
*cmd_iocb
= NULL
;
2534 iotag
= prspiocb
->iocb
.ulpIoTag
;
2536 if (iotag
!= 0 && iotag
<= phba
->sli
.last_iotag
) {
2537 cmd_iocb
= phba
->sli
.iocbq_lookup
[iotag
];
2538 list_del_init(&cmd_iocb
->list
);
2539 if (cmd_iocb
->iocb_flag
& LPFC_IO_ON_Q
) {
2540 pring
->txcmplq_cnt
--;
2541 cmd_iocb
->iocb_flag
&= ~LPFC_IO_ON_Q
;
2546 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2547 "0317 iotag x%x is out off "
2548 "range: max iotag x%x wd0 x%x\n",
2549 iotag
, phba
->sli
.last_iotag
,
2550 *(((uint32_t *) &prspiocb
->iocb
) + 7));
2555 * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
2556 * @phba: Pointer to HBA context object.
2557 * @pring: Pointer to driver SLI ring object.
2560 * This function looks up the iocb_lookup table to get the command iocb
2561 * corresponding to the given iotag. This function is called with the
2563 * This function returns the command iocb object if it finds the command
2564 * iocb else returns NULL.
2566 static struct lpfc_iocbq
*
2567 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba
*phba
,
2568 struct lpfc_sli_ring
*pring
, uint16_t iotag
)
2570 struct lpfc_iocbq
*cmd_iocb
;
2572 if (iotag
!= 0 && iotag
<= phba
->sli
.last_iotag
) {
2573 cmd_iocb
= phba
->sli
.iocbq_lookup
[iotag
];
2574 list_del_init(&cmd_iocb
->list
);
2575 if (cmd_iocb
->iocb_flag
& LPFC_IO_ON_Q
) {
2576 cmd_iocb
->iocb_flag
&= ~LPFC_IO_ON_Q
;
2577 pring
->txcmplq_cnt
--;
2582 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2583 "0372 iotag x%x is out off range: max iotag (x%x)\n",
2584 iotag
, phba
->sli
.last_iotag
);
2589 * lpfc_sli_process_sol_iocb - process solicited iocb completion
2590 * @phba: Pointer to HBA context object.
2591 * @pring: Pointer to driver SLI ring object.
2592 * @saveq: Pointer to the response iocb to be processed.
2594 * This function is called by the ring event handler for non-fcp
2595 * rings when there is a new response iocb in the response ring.
2596 * The caller is not required to hold any locks. This function
2597 * gets the command iocb associated with the response iocb and
2598 * calls the completion handler for the command iocb. If there
2599 * is no completion handler, the function will free the resources
2600 * associated with command iocb. If the response iocb is for
2601 * an already aborted command iocb, the status of the completion
2602 * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
2603 * This function always returns 1.
2606 lpfc_sli_process_sol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2607 struct lpfc_iocbq
*saveq
)
2609 struct lpfc_iocbq
*cmdiocbp
;
2611 unsigned long iflag
;
2613 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
2614 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2615 cmdiocbp
= lpfc_sli_iocbq_lookup(phba
, pring
, saveq
);
2616 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2619 if (cmdiocbp
->iocb_cmpl
) {
2621 * If an ELS command failed send an event to mgmt
2624 if (saveq
->iocb
.ulpStatus
&&
2625 (pring
->ringno
== LPFC_ELS_RING
) &&
2626 (cmdiocbp
->iocb
.ulpCommand
==
2627 CMD_ELS_REQUEST64_CR
))
2628 lpfc_send_els_failure_event(phba
,
2632 * Post all ELS completions to the worker thread.
2633 * All other are passed to the completion callback.
2635 if (pring
->ringno
== LPFC_ELS_RING
) {
2636 if ((phba
->sli_rev
< LPFC_SLI_REV4
) &&
2637 (cmdiocbp
->iocb_flag
&
2638 LPFC_DRIVER_ABORTED
)) {
2639 spin_lock_irqsave(&phba
->hbalock
,
2641 cmdiocbp
->iocb_flag
&=
2642 ~LPFC_DRIVER_ABORTED
;
2643 spin_unlock_irqrestore(&phba
->hbalock
,
2645 saveq
->iocb
.ulpStatus
=
2646 IOSTAT_LOCAL_REJECT
;
2647 saveq
->iocb
.un
.ulpWord
[4] =
2650 /* Firmware could still be in progress
2651 * of DMAing payload, so don't free data
2652 * buffer till after a hbeat.
2654 spin_lock_irqsave(&phba
->hbalock
,
2656 saveq
->iocb_flag
|= LPFC_DELAY_MEM_FREE
;
2657 spin_unlock_irqrestore(&phba
->hbalock
,
2660 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
2661 if (saveq
->iocb_flag
&
2662 LPFC_EXCHANGE_BUSY
) {
2663 /* Set cmdiocb flag for the
2664 * exchange busy so sgl (xri)
2665 * will not be released until
2666 * the abort xri is received
2670 &phba
->hbalock
, iflag
);
2671 cmdiocbp
->iocb_flag
|=
2673 spin_unlock_irqrestore(
2674 &phba
->hbalock
, iflag
);
2676 if (cmdiocbp
->iocb_flag
&
2677 LPFC_DRIVER_ABORTED
) {
2679 * Clear LPFC_DRIVER_ABORTED
2680 * bit in case it was driver
2684 &phba
->hbalock
, iflag
);
2685 cmdiocbp
->iocb_flag
&=
2686 ~LPFC_DRIVER_ABORTED
;
2687 spin_unlock_irqrestore(
2688 &phba
->hbalock
, iflag
);
2689 cmdiocbp
->iocb
.ulpStatus
=
2690 IOSTAT_LOCAL_REJECT
;
2691 cmdiocbp
->iocb
.un
.ulpWord
[4] =
2692 IOERR_ABORT_REQUESTED
;
2694 * For SLI4, irsiocb contains
2695 * NO_XRI in sli_xritag, it
2696 * shall not affect releasing
2697 * sgl (xri) process.
2699 saveq
->iocb
.ulpStatus
=
2700 IOSTAT_LOCAL_REJECT
;
2701 saveq
->iocb
.un
.ulpWord
[4] =
2704 &phba
->hbalock
, iflag
);
2706 LPFC_DELAY_MEM_FREE
;
2707 spin_unlock_irqrestore(
2708 &phba
->hbalock
, iflag
);
2712 (cmdiocbp
->iocb_cmpl
) (phba
, cmdiocbp
, saveq
);
2714 lpfc_sli_release_iocbq(phba
, cmdiocbp
);
2717 * Unknown initiating command based on the response iotag.
2718 * This could be the case on the ELS ring because of
2721 if (pring
->ringno
!= LPFC_ELS_RING
) {
2723 * Ring <ringno> handler: unexpected completion IoTag
2726 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
2727 "0322 Ring %d handler: "
2728 "unexpected completion IoTag x%x "
2729 "Data: x%x x%x x%x x%x\n",
2731 saveq
->iocb
.ulpIoTag
,
2732 saveq
->iocb
.ulpStatus
,
2733 saveq
->iocb
.un
.ulpWord
[4],
2734 saveq
->iocb
.ulpCommand
,
2735 saveq
->iocb
.ulpContext
);
2743 * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
2744 * @phba: Pointer to HBA context object.
2745 * @pring: Pointer to driver SLI ring object.
2747 * This function is called from the iocb ring event handlers when
2748 * put pointer is ahead of the get pointer for a ring. This function signal
2749 * an error attention condition to the worker thread and the worker
2750 * thread will transition the HBA to offline state.
2753 lpfc_sli_rsp_pointers_error(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
2755 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
2757 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
2758 * rsp ring <portRspMax>
2760 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2761 "0312 Ring %d handler: portRspPut %d "
2762 "is bigger than rsp ring %d\n",
2763 pring
->ringno
, le32_to_cpu(pgp
->rspPutInx
),
2766 phba
->link_state
= LPFC_HBA_ERROR
;
2769 * All error attention handlers are posted to
2772 phba
->work_ha
|= HA_ERATT
;
2773 phba
->work_hs
= HS_FFER3
;
2775 lpfc_worker_wake_up(phba
);
2781 * lpfc_poll_eratt - Error attention polling timer timeout handler
2782 * @ptr: Pointer to address of HBA context object.
2784 * This function is invoked by the Error Attention polling timer when the
2785 * timer times out. It will check the SLI Error Attention register for
2786 * possible attention events. If so, it will post an Error Attention event
2787 * and wake up worker thread to process it. Otherwise, it will set up the
2788 * Error Attention polling timer for the next poll.
2790 void lpfc_poll_eratt(unsigned long ptr
)
2792 struct lpfc_hba
*phba
;
2795 phba
= (struct lpfc_hba
*)ptr
;
2797 /* Check chip HA register for error event */
2798 eratt
= lpfc_sli_check_eratt(phba
);
2801 /* Tell the worker thread there is work to do */
2802 lpfc_worker_wake_up(phba
);
2804 /* Restart the timer for next eratt poll */
2805 mod_timer(&phba
->eratt_poll
, jiffies
+
2806 HZ
* LPFC_ERATT_POLL_INTERVAL
);
2812 * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
2813 * @phba: Pointer to HBA context object.
2814 * @pring: Pointer to driver SLI ring object.
2815 * @mask: Host attention register mask for this ring.
2817 * This function is called from the interrupt context when there is a ring
2818 * event for the fcp ring. The caller does not hold any lock.
2819 * The function processes each response iocb in the response ring until it
2820 * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
2821 * LE bit set. The function will call the completion handler of the command iocb
2822 * if the response iocb indicates a completion for a command iocb or it is
2823 * an abort completion. The function will call lpfc_sli_process_unsol_iocb
2824 * function if this is an unsolicited iocb.
2825 * This routine presumes LPFC_FCP_RING handling and doesn't bother
2826 * to check it explicitly.
2829 lpfc_sli_handle_fast_ring_event(struct lpfc_hba
*phba
,
2830 struct lpfc_sli_ring
*pring
, uint32_t mask
)
2832 struct lpfc_pgp
*pgp
= &phba
->port_gp
[pring
->ringno
];
2833 IOCB_t
*irsp
= NULL
;
2834 IOCB_t
*entry
= NULL
;
2835 struct lpfc_iocbq
*cmdiocbq
= NULL
;
2836 struct lpfc_iocbq rspiocbq
;
2838 uint32_t portRspPut
, portRspMax
;
2840 lpfc_iocb_type type
;
2841 unsigned long iflag
;
2842 uint32_t rsp_cmpl
= 0;
2844 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2845 pring
->stats
.iocb_event
++;
2848 * The next available response entry should never exceed the maximum
2849 * entries. If it does, treat it as an adapter hardware error.
2851 portRspMax
= pring
->numRiocb
;
2852 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
2853 if (unlikely(portRspPut
>= portRspMax
)) {
2854 lpfc_sli_rsp_pointers_error(phba
, pring
);
2855 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2858 if (phba
->fcp_ring_in_use
) {
2859 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2862 phba
->fcp_ring_in_use
= 1;
2865 while (pring
->rspidx
!= portRspPut
) {
2867 * Fetch an entry off the ring and copy it into a local data
2868 * structure. The copy involves a byte-swap since the
2869 * network byte order and pci byte orders are different.
2871 entry
= lpfc_resp_iocb(phba
, pring
);
2872 phba
->last_completion_time
= jiffies
;
2874 if (++pring
->rspidx
>= portRspMax
)
2877 lpfc_sli_pcimem_bcopy((uint32_t *) entry
,
2878 (uint32_t *) &rspiocbq
.iocb
,
2879 phba
->iocb_rsp_size
);
2880 INIT_LIST_HEAD(&(rspiocbq
.list
));
2881 irsp
= &rspiocbq
.iocb
;
2883 type
= lpfc_sli_iocb_cmd_type(irsp
->ulpCommand
& CMD_IOCB_MASK
);
2884 pring
->stats
.iocb_rsp
++;
2887 if (unlikely(irsp
->ulpStatus
)) {
2889 * If resource errors reported from HBA, reduce
2890 * queuedepths of the SCSI device.
2892 if ((irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) &&
2893 (irsp
->un
.ulpWord
[4] == IOERR_NO_RESOURCES
)) {
2894 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2895 phba
->lpfc_rampdown_queue_depth(phba
);
2896 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2899 /* Rsp ring <ringno> error: IOCB */
2900 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
2901 "0336 Rsp Ring %d error: IOCB Data: "
2902 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
2904 irsp
->un
.ulpWord
[0],
2905 irsp
->un
.ulpWord
[1],
2906 irsp
->un
.ulpWord
[2],
2907 irsp
->un
.ulpWord
[3],
2908 irsp
->un
.ulpWord
[4],
2909 irsp
->un
.ulpWord
[5],
2910 *(uint32_t *)&irsp
->un1
,
2911 *((uint32_t *)&irsp
->un1
+ 1));
2915 case LPFC_ABORT_IOCB
:
2918 * Idle exchange closed via ABTS from port. No iocb
2919 * resources need to be recovered.
2921 if (unlikely(irsp
->ulpCommand
== CMD_XRI_ABORTED_CX
)) {
2922 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
2923 "0333 IOCB cmd 0x%x"
2924 " processed. Skipping"
2930 cmdiocbq
= lpfc_sli_iocbq_lookup(phba
, pring
,
2932 if (unlikely(!cmdiocbq
))
2934 if (cmdiocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
)
2935 cmdiocbq
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
2936 if (cmdiocbq
->iocb_cmpl
) {
2937 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2938 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
,
2940 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2943 case LPFC_UNSOL_IOCB
:
2944 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
2945 lpfc_sli_process_unsol_iocb(phba
, pring
, &rspiocbq
);
2946 spin_lock_irqsave(&phba
->hbalock
, iflag
);
2949 if (irsp
->ulpCommand
== CMD_ADAPTER_MSG
) {
2950 char adaptermsg
[LPFC_MAX_ADPTMSG
];
2951 memset(adaptermsg
, 0, LPFC_MAX_ADPTMSG
);
2952 memcpy(&adaptermsg
[0], (uint8_t *) irsp
,
2954 dev_warn(&((phba
->pcidev
)->dev
),
2956 phba
->brd_no
, adaptermsg
);
2958 /* Unknown IOCB command */
2959 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
2960 "0334 Unknown IOCB command "
2961 "Data: x%x, x%x x%x x%x x%x\n",
2962 type
, irsp
->ulpCommand
,
2971 * The response IOCB has been processed. Update the ring
2972 * pointer in SLIM. If the port response put pointer has not
2973 * been updated, sync the pgp->rspPutInx and fetch the new port
2974 * response put pointer.
2976 writel(pring
->rspidx
, &phba
->host_gp
[pring
->ringno
].rspGetInx
);
2978 if (pring
->rspidx
== portRspPut
)
2979 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
2982 if ((rsp_cmpl
> 0) && (mask
& HA_R0RE_REQ
)) {
2983 pring
->stats
.iocb_rsp_full
++;
2984 status
= ((CA_R0ATT
| CA_R0RE_RSP
) << (pring
->ringno
* 4));
2985 writel(status
, phba
->CAregaddr
);
2986 readl(phba
->CAregaddr
);
2988 if ((mask
& HA_R0CE_RSP
) && (pring
->flag
& LPFC_CALL_RING_AVAILABLE
)) {
2989 pring
->flag
&= ~LPFC_CALL_RING_AVAILABLE
;
2990 pring
->stats
.iocb_cmd_empty
++;
2992 /* Force update of the local copy of cmdGetInx */
2993 pring
->local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
2994 lpfc_sli_resume_iocb(phba
, pring
);
2996 if ((pring
->lpfc_sli_cmd_available
))
2997 (pring
->lpfc_sli_cmd_available
) (phba
, pring
);
3001 phba
->fcp_ring_in_use
= 0;
3002 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3007 * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
3008 * @phba: Pointer to HBA context object.
3009 * @pring: Pointer to driver SLI ring object.
3010 * @rspiocbp: Pointer to driver response IOCB object.
3012 * This function is called from the worker thread when there is a slow-path
3013 * response IOCB to process. This function chains all the response iocbs until
3014 * seeing the iocb with the LE bit set. The function will call
3015 * lpfc_sli_process_sol_iocb function if the response iocb indicates a
3016 * completion of a command iocb. The function will call the
3017 * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
3018 * The function frees the resources or calls the completion handler if this
3019 * iocb is an abort completion. The function returns NULL when the response
3020 * iocb has the LE bit set and all the chained iocbs are processed, otherwise
3021 * this function shall chain the iocb on to the iocb_continueq and return the
3022 * response iocb passed in.
3024 static struct lpfc_iocbq
*
3025 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
3026 struct lpfc_iocbq
*rspiocbp
)
3028 struct lpfc_iocbq
*saveq
;
3029 struct lpfc_iocbq
*cmdiocbp
;
3030 struct lpfc_iocbq
*next_iocb
;
3031 IOCB_t
*irsp
= NULL
;
3032 uint32_t free_saveq
;
3033 uint8_t iocb_cmd_type
;
3034 lpfc_iocb_type type
;
3035 unsigned long iflag
;
3038 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3039 /* First add the response iocb to the countinueq list */
3040 list_add_tail(&rspiocbp
->list
, &(pring
->iocb_continueq
));
3041 pring
->iocb_continueq_cnt
++;
3043 /* Now, determine whetehr the list is completed for processing */
3044 irsp
= &rspiocbp
->iocb
;
3047 * By default, the driver expects to free all resources
3048 * associated with this iocb completion.
3051 saveq
= list_get_first(&pring
->iocb_continueq
,
3052 struct lpfc_iocbq
, list
);
3053 irsp
= &(saveq
->iocb
);
3054 list_del_init(&pring
->iocb_continueq
);
3055 pring
->iocb_continueq_cnt
= 0;
3057 pring
->stats
.iocb_rsp
++;
3060 * If resource errors reported from HBA, reduce
3061 * queuedepths of the SCSI device.
3063 if ((irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) &&
3064 (irsp
->un
.ulpWord
[4] == IOERR_NO_RESOURCES
)) {
3065 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3066 phba
->lpfc_rampdown_queue_depth(phba
);
3067 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3070 if (irsp
->ulpStatus
) {
3071 /* Rsp ring <ringno> error: IOCB */
3072 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
3073 "0328 Rsp Ring %d error: "
3078 "x%x x%x x%x x%x\n",
3080 irsp
->un
.ulpWord
[0],
3081 irsp
->un
.ulpWord
[1],
3082 irsp
->un
.ulpWord
[2],
3083 irsp
->un
.ulpWord
[3],
3084 irsp
->un
.ulpWord
[4],
3085 irsp
->un
.ulpWord
[5],
3086 *(((uint32_t *) irsp
) + 6),
3087 *(((uint32_t *) irsp
) + 7),
3088 *(((uint32_t *) irsp
) + 8),
3089 *(((uint32_t *) irsp
) + 9),
3090 *(((uint32_t *) irsp
) + 10),
3091 *(((uint32_t *) irsp
) + 11),
3092 *(((uint32_t *) irsp
) + 12),
3093 *(((uint32_t *) irsp
) + 13),
3094 *(((uint32_t *) irsp
) + 14),
3095 *(((uint32_t *) irsp
) + 15));
3099 * Fetch the IOCB command type and call the correct completion
3100 * routine. Solicited and Unsolicited IOCBs on the ELS ring
3101 * get freed back to the lpfc_iocb_list by the discovery
3104 iocb_cmd_type
= irsp
->ulpCommand
& CMD_IOCB_MASK
;
3105 type
= lpfc_sli_iocb_cmd_type(iocb_cmd_type
);
3108 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3109 rc
= lpfc_sli_process_sol_iocb(phba
, pring
, saveq
);
3110 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3113 case LPFC_UNSOL_IOCB
:
3114 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3115 rc
= lpfc_sli_process_unsol_iocb(phba
, pring
, saveq
);
3116 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3121 case LPFC_ABORT_IOCB
:
3123 if (irsp
->ulpCommand
!= CMD_XRI_ABORTED_CX
)
3124 cmdiocbp
= lpfc_sli_iocbq_lookup(phba
, pring
,
3127 /* Call the specified completion routine */
3128 if (cmdiocbp
->iocb_cmpl
) {
3129 spin_unlock_irqrestore(&phba
->hbalock
,
3131 (cmdiocbp
->iocb_cmpl
)(phba
, cmdiocbp
,
3133 spin_lock_irqsave(&phba
->hbalock
,
3136 __lpfc_sli_release_iocbq(phba
,
3141 case LPFC_UNKNOWN_IOCB
:
3142 if (irsp
->ulpCommand
== CMD_ADAPTER_MSG
) {
3143 char adaptermsg
[LPFC_MAX_ADPTMSG
];
3144 memset(adaptermsg
, 0, LPFC_MAX_ADPTMSG
);
3145 memcpy(&adaptermsg
[0], (uint8_t *)irsp
,
3147 dev_warn(&((phba
->pcidev
)->dev
),
3149 phba
->brd_no
, adaptermsg
);
3151 /* Unknown IOCB command */
3152 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3153 "0335 Unknown IOCB "
3154 "command Data: x%x "
3165 list_for_each_entry_safe(rspiocbp
, next_iocb
,
3166 &saveq
->list
, list
) {
3167 list_del(&rspiocbp
->list
);
3168 __lpfc_sli_release_iocbq(phba
, rspiocbp
);
3170 __lpfc_sli_release_iocbq(phba
, saveq
);
3174 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3179 * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
3180 * @phba: Pointer to HBA context object.
3181 * @pring: Pointer to driver SLI ring object.
3182 * @mask: Host attention register mask for this ring.
3184 * This routine wraps the actual slow_ring event process routine from the
3185 * API jump table function pointer from the lpfc_hba struct.
3188 lpfc_sli_handle_slow_ring_event(struct lpfc_hba
*phba
,
3189 struct lpfc_sli_ring
*pring
, uint32_t mask
)
3191 phba
->lpfc_sli_handle_slow_ring_event(phba
, pring
, mask
);
3195 * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
3196 * @phba: Pointer to HBA context object.
3197 * @pring: Pointer to driver SLI ring object.
3198 * @mask: Host attention register mask for this ring.
3200 * This function is called from the worker thread when there is a ring event
3201 * for non-fcp rings. The caller does not hold any lock. The function will
3202 * remove each response iocb in the response ring and calls the handle
3203 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3206 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba
*phba
,
3207 struct lpfc_sli_ring
*pring
, uint32_t mask
)
3209 struct lpfc_pgp
*pgp
;
3211 IOCB_t
*irsp
= NULL
;
3212 struct lpfc_iocbq
*rspiocbp
= NULL
;
3213 uint32_t portRspPut
, portRspMax
;
3214 unsigned long iflag
;
3217 pgp
= &phba
->port_gp
[pring
->ringno
];
3218 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3219 pring
->stats
.iocb_event
++;
3222 * The next available response entry should never exceed the maximum
3223 * entries. If it does, treat it as an adapter hardware error.
3225 portRspMax
= pring
->numRiocb
;
3226 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
3227 if (portRspPut
>= portRspMax
) {
3229 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3230 * rsp ring <portRspMax>
3232 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3233 "0303 Ring %d handler: portRspPut %d "
3234 "is bigger than rsp ring %d\n",
3235 pring
->ringno
, portRspPut
, portRspMax
);
3237 phba
->link_state
= LPFC_HBA_ERROR
;
3238 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3240 phba
->work_hs
= HS_FFER3
;
3241 lpfc_handle_eratt(phba
);
3247 while (pring
->rspidx
!= portRspPut
) {
3249 * Build a completion list and call the appropriate handler.
3250 * The process is to get the next available response iocb, get
3251 * a free iocb from the list, copy the response data into the
3252 * free iocb, insert to the continuation list, and update the
3253 * next response index to slim. This process makes response
3254 * iocb's in the ring available to DMA as fast as possible but
3255 * pays a penalty for a copy operation. Since the iocb is
3256 * only 32 bytes, this penalty is considered small relative to
3257 * the PCI reads for register values and a slim write. When
3258 * the ulpLe field is set, the entire Command has been
3261 entry
= lpfc_resp_iocb(phba
, pring
);
3263 phba
->last_completion_time
= jiffies
;
3264 rspiocbp
= __lpfc_sli_get_iocbq(phba
);
3265 if (rspiocbp
== NULL
) {
3266 printk(KERN_ERR
"%s: out of buffers! Failing "
3267 "completion.\n", __func__
);
3271 lpfc_sli_pcimem_bcopy(entry
, &rspiocbp
->iocb
,
3272 phba
->iocb_rsp_size
);
3273 irsp
= &rspiocbp
->iocb
;
3275 if (++pring
->rspidx
>= portRspMax
)
3278 if (pring
->ringno
== LPFC_ELS_RING
) {
3279 lpfc_debugfs_slow_ring_trc(phba
,
3280 "IOCB rsp ring: wd4:x%08x wd6:x%08x wd7:x%08x",
3281 *(((uint32_t *) irsp
) + 4),
3282 *(((uint32_t *) irsp
) + 6),
3283 *(((uint32_t *) irsp
) + 7));
3286 writel(pring
->rspidx
, &phba
->host_gp
[pring
->ringno
].rspGetInx
);
3288 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3289 /* Handle the response IOCB */
3290 rspiocbp
= lpfc_sli_sp_handle_rspiocb(phba
, pring
, rspiocbp
);
3291 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3294 * If the port response put pointer has not been updated, sync
3295 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
3296 * response put pointer.
3298 if (pring
->rspidx
== portRspPut
) {
3299 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
3301 } /* while (pring->rspidx != portRspPut) */
3303 if ((rspiocbp
!= NULL
) && (mask
& HA_R0RE_REQ
)) {
3304 /* At least one response entry has been freed */
3305 pring
->stats
.iocb_rsp_full
++;
3306 /* SET RxRE_RSP in Chip Att register */
3307 status
= ((CA_R0ATT
| CA_R0RE_RSP
) << (pring
->ringno
* 4));
3308 writel(status
, phba
->CAregaddr
);
3309 readl(phba
->CAregaddr
); /* flush */
3311 if ((mask
& HA_R0CE_RSP
) && (pring
->flag
& LPFC_CALL_RING_AVAILABLE
)) {
3312 pring
->flag
&= ~LPFC_CALL_RING_AVAILABLE
;
3313 pring
->stats
.iocb_cmd_empty
++;
3315 /* Force update of the local copy of cmdGetInx */
3316 pring
->local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
3317 lpfc_sli_resume_iocb(phba
, pring
);
3319 if ((pring
->lpfc_sli_cmd_available
))
3320 (pring
->lpfc_sli_cmd_available
) (phba
, pring
);
3324 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3329 * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
3330 * @phba: Pointer to HBA context object.
3331 * @pring: Pointer to driver SLI ring object.
3332 * @mask: Host attention register mask for this ring.
3334 * This function is called from the worker thread when there is a pending
3335 * ELS response iocb on the driver internal slow-path response iocb worker
3336 * queue. The caller does not hold any lock. The function will remove each
3337 * response iocb from the response worker queue and calls the handle
3338 * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
3341 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba
*phba
,
3342 struct lpfc_sli_ring
*pring
, uint32_t mask
)
3344 struct lpfc_iocbq
*irspiocbq
;
3345 struct hbq_dmabuf
*dmabuf
;
3346 struct lpfc_cq_event
*cq_event
;
3347 unsigned long iflag
;
3349 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3350 phba
->hba_flag
&= ~HBA_SP_QUEUE_EVT
;
3351 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3352 while (!list_empty(&phba
->sli4_hba
.sp_queue_event
)) {
3353 /* Get the response iocb from the head of work queue */
3354 spin_lock_irqsave(&phba
->hbalock
, iflag
);
3355 list_remove_head(&phba
->sli4_hba
.sp_queue_event
,
3356 cq_event
, struct lpfc_cq_event
, list
);
3357 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
3359 switch (bf_get(lpfc_wcqe_c_code
, &cq_event
->cqe
.wcqe_cmpl
)) {
3360 case CQE_CODE_COMPL_WQE
:
3361 irspiocbq
= container_of(cq_event
, struct lpfc_iocbq
,
3363 /* Translate ELS WCQE to response IOCBQ */
3364 irspiocbq
= lpfc_sli4_els_wcqe_to_rspiocbq(phba
,
3367 lpfc_sli_sp_handle_rspiocb(phba
, pring
,
3370 case CQE_CODE_RECEIVE
:
3371 dmabuf
= container_of(cq_event
, struct hbq_dmabuf
,
3373 lpfc_sli4_handle_received_buffer(phba
, dmabuf
);
3382 * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
3383 * @phba: Pointer to HBA context object.
3384 * @pring: Pointer to driver SLI ring object.
3386 * This function aborts all iocbs in the given ring and frees all the iocb
3387 * objects in txq. This function issues an abort iocb for all the iocb commands
3388 * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
3389 * the return of this function. The caller is not required to hold any locks.
3392 lpfc_sli_abort_iocb_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
3394 LIST_HEAD(completions
);
3395 struct lpfc_iocbq
*iocb
, *next_iocb
;
3397 if (pring
->ringno
== LPFC_ELS_RING
) {
3398 lpfc_fabric_abort_hba(phba
);
3401 /* Error everything on txq and txcmplq
3404 spin_lock_irq(&phba
->hbalock
);
3405 list_splice_init(&pring
->txq
, &completions
);
3408 /* Next issue ABTS for everything on the txcmplq */
3409 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
)
3410 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
3412 spin_unlock_irq(&phba
->hbalock
);
3414 /* Cancel all the IOCBs from the completions list */
3415 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
3420 * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
3421 * @phba: Pointer to HBA context object.
3423 * This function flushes all iocbs in the fcp ring and frees all the iocb
3424 * objects in txq and txcmplq. This function will not issue abort iocbs
3425 * for all the iocb commands in txcmplq, they will just be returned with
3426 * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
3427 * slot has been permanently disabled.
3430 lpfc_sli_flush_fcp_rings(struct lpfc_hba
*phba
)
3434 struct lpfc_sli
*psli
= &phba
->sli
;
3435 struct lpfc_sli_ring
*pring
;
3437 /* Currently, only one fcp ring */
3438 pring
= &psli
->ring
[psli
->fcp_ring
];
3440 spin_lock_irq(&phba
->hbalock
);
3441 /* Retrieve everything on txq */
3442 list_splice_init(&pring
->txq
, &txq
);
3445 /* Retrieve everything on the txcmplq */
3446 list_splice_init(&pring
->txcmplq
, &txcmplq
);
3447 pring
->txcmplq_cnt
= 0;
3448 spin_unlock_irq(&phba
->hbalock
);
3451 lpfc_sli_cancel_iocbs(phba
, &txq
, IOSTAT_LOCAL_REJECT
,
3454 /* Flush the txcmpq */
3455 lpfc_sli_cancel_iocbs(phba
, &txcmplq
, IOSTAT_LOCAL_REJECT
,
3460 * lpfc_sli_brdready_s3 - Check for sli3 host ready status
3461 * @phba: Pointer to HBA context object.
3462 * @mask: Bit mask to be checked.
3464 * This function reads the host status register and compares
3465 * with the provided bit mask to check if HBA completed
3466 * the restart. This function will wait in a loop for the
3467 * HBA to complete restart. If the HBA does not restart within
3468 * 15 iterations, the function will reset the HBA again. The
3469 * function returns 1 when HBA fail to restart otherwise returns
3473 lpfc_sli_brdready_s3(struct lpfc_hba
*phba
, uint32_t mask
)
3479 /* Read the HBA Host Status Register */
3480 if (lpfc_readl(phba
->HSregaddr
, &status
))
3484 * Check status register every 100ms for 5 retries, then every
3485 * 500ms for 5, then every 2.5 sec for 5, then reset board and
3486 * every 2.5 sec for 4.
3487 * Break our of the loop if errors occurred during init.
3489 while (((status
& mask
) != mask
) &&
3490 !(status
& HS_FFERM
) &&
3502 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
3503 lpfc_sli_brdrestart(phba
);
3505 /* Read the HBA Host Status Register */
3506 if (lpfc_readl(phba
->HSregaddr
, &status
)) {
3512 /* Check to see if any errors occurred during init */
3513 if ((status
& HS_FFERM
) || (i
>= 20)) {
3514 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
3515 "2751 Adapter failed to restart, "
3516 "status reg x%x, FW Data: A8 x%x AC x%x\n",
3518 readl(phba
->MBslimaddr
+ 0xa8),
3519 readl(phba
->MBslimaddr
+ 0xac));
3520 phba
->link_state
= LPFC_HBA_ERROR
;
3528 * lpfc_sli_brdready_s4 - Check for sli4 host ready status
3529 * @phba: Pointer to HBA context object.
3530 * @mask: Bit mask to be checked.
3532 * This function checks the host status register to check if HBA is
3533 * ready. This function will wait in a loop for the HBA to be ready
3534 * If the HBA is not ready , the function will will reset the HBA PCI
3535 * function again. The function returns 1 when HBA fail to be ready
3536 * otherwise returns zero.
3539 lpfc_sli_brdready_s4(struct lpfc_hba
*phba
, uint32_t mask
)
3544 /* Read the HBA Host Status Register */
3545 status
= lpfc_sli4_post_status_check(phba
);
3548 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
3549 lpfc_sli_brdrestart(phba
);
3550 status
= lpfc_sli4_post_status_check(phba
);
3553 /* Check to see if any errors occurred during init */
3555 phba
->link_state
= LPFC_HBA_ERROR
;
3558 phba
->sli4_hba
.intr_enable
= 0;
3564 * lpfc_sli_brdready - Wrapper func for checking the hba readyness
3565 * @phba: Pointer to HBA context object.
3566 * @mask: Bit mask to be checked.
3568 * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
3569 * from the API jump table function pointer from the lpfc_hba struct.
3572 lpfc_sli_brdready(struct lpfc_hba
*phba
, uint32_t mask
)
3574 return phba
->lpfc_sli_brdready(phba
, mask
);
3577 #define BARRIER_TEST_PATTERN (0xdeadbeef)
3580 * lpfc_reset_barrier - Make HBA ready for HBA reset
3581 * @phba: Pointer to HBA context object.
3583 * This function is called before resetting an HBA. This
3584 * function requests HBA to quiesce DMAs before a reset.
3586 void lpfc_reset_barrier(struct lpfc_hba
*phba
)
3588 uint32_t __iomem
*resp_buf
;
3589 uint32_t __iomem
*mbox_buf
;
3590 volatile uint32_t mbox
;
3591 uint32_t hc_copy
, ha_copy
, resp_data
;
3595 pci_read_config_byte(phba
->pcidev
, PCI_HEADER_TYPE
, &hdrtype
);
3596 if (hdrtype
!= 0x80 ||
3597 (FC_JEDEC_ID(phba
->vpd
.rev
.biuRev
) != HELIOS_JEDEC_ID
&&
3598 FC_JEDEC_ID(phba
->vpd
.rev
.biuRev
) != THOR_JEDEC_ID
))
3602 * Tell the other part of the chip to suspend temporarily all
3605 resp_buf
= phba
->MBslimaddr
;
3607 /* Disable the error attention */
3608 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
))
3610 writel((hc_copy
& ~HC_ERINT_ENA
), phba
->HCregaddr
);
3611 readl(phba
->HCregaddr
); /* flush */
3612 phba
->link_flag
|= LS_IGNORE_ERATT
;
3614 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
3616 if (ha_copy
& HA_ERATT
) {
3617 /* Clear Chip error bit */
3618 writel(HA_ERATT
, phba
->HAregaddr
);
3619 phba
->pport
->stopped
= 1;
3623 ((MAILBOX_t
*)&mbox
)->mbxCommand
= MBX_KILL_BOARD
;
3624 ((MAILBOX_t
*)&mbox
)->mbxOwner
= OWN_CHIP
;
3626 writel(BARRIER_TEST_PATTERN
, (resp_buf
+ 1));
3627 mbox_buf
= phba
->MBslimaddr
;
3628 writel(mbox
, mbox_buf
);
3630 for (i
= 0; i
< 50; i
++) {
3631 if (lpfc_readl((resp_buf
+ 1), &resp_data
))
3633 if (resp_data
!= ~(BARRIER_TEST_PATTERN
))
3639 if (lpfc_readl((resp_buf
+ 1), &resp_data
))
3641 if (resp_data
!= ~(BARRIER_TEST_PATTERN
)) {
3642 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
||
3643 phba
->pport
->stopped
)
3649 ((MAILBOX_t
*)&mbox
)->mbxOwner
= OWN_HOST
;
3651 for (i
= 0; i
< 500; i
++) {
3652 if (lpfc_readl(resp_buf
, &resp_data
))
3654 if (resp_data
!= mbox
)
3663 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
3665 if (!(ha_copy
& HA_ERATT
))
3671 if (readl(phba
->HAregaddr
) & HA_ERATT
) {
3672 writel(HA_ERATT
, phba
->HAregaddr
);
3673 phba
->pport
->stopped
= 1;
3677 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
3678 writel(hc_copy
, phba
->HCregaddr
);
3679 readl(phba
->HCregaddr
); /* flush */
3683 * lpfc_sli_brdkill - Issue a kill_board mailbox command
3684 * @phba: Pointer to HBA context object.
3686 * This function issues a kill_board mailbox command and waits for
3687 * the error attention interrupt. This function is called for stopping
3688 * the firmware processing. The caller is not required to hold any
3689 * locks. This function calls lpfc_hba_down_post function to free
3690 * any pending commands after the kill. The function will return 1 when it
3691 * fails to kill the board else will return 0.
3694 lpfc_sli_brdkill(struct lpfc_hba
*phba
)
3696 struct lpfc_sli
*psli
;
3706 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3707 "0329 Kill HBA Data: x%x x%x\n",
3708 phba
->pport
->port_state
, psli
->sli_flag
);
3710 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
3714 /* Disable the error attention */
3715 spin_lock_irq(&phba
->hbalock
);
3716 if (lpfc_readl(phba
->HCregaddr
, &status
)) {
3717 spin_unlock_irq(&phba
->hbalock
);
3718 mempool_free(pmb
, phba
->mbox_mem_pool
);
3721 status
&= ~HC_ERINT_ENA
;
3722 writel(status
, phba
->HCregaddr
);
3723 readl(phba
->HCregaddr
); /* flush */
3724 phba
->link_flag
|= LS_IGNORE_ERATT
;
3725 spin_unlock_irq(&phba
->hbalock
);
3727 lpfc_kill_board(phba
, pmb
);
3728 pmb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
3729 retval
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
3731 if (retval
!= MBX_SUCCESS
) {
3732 if (retval
!= MBX_BUSY
)
3733 mempool_free(pmb
, phba
->mbox_mem_pool
);
3734 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
3735 "2752 KILL_BOARD command failed retval %d\n",
3737 spin_lock_irq(&phba
->hbalock
);
3738 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
3739 spin_unlock_irq(&phba
->hbalock
);
3743 spin_lock_irq(&phba
->hbalock
);
3744 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
3745 spin_unlock_irq(&phba
->hbalock
);
3747 mempool_free(pmb
, phba
->mbox_mem_pool
);
3749 /* There is no completion for a KILL_BOARD mbox cmd. Check for an error
3750 * attention every 100ms for 3 seconds. If we don't get ERATT after
3751 * 3 seconds we still set HBA_ERROR state because the status of the
3752 * board is now undefined.
3754 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
3756 while ((i
++ < 30) && !(ha_copy
& HA_ERATT
)) {
3758 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
3762 del_timer_sync(&psli
->mbox_tmo
);
3763 if (ha_copy
& HA_ERATT
) {
3764 writel(HA_ERATT
, phba
->HAregaddr
);
3765 phba
->pport
->stopped
= 1;
3767 spin_lock_irq(&phba
->hbalock
);
3768 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
3769 psli
->mbox_active
= NULL
;
3770 phba
->link_flag
&= ~LS_IGNORE_ERATT
;
3771 spin_unlock_irq(&phba
->hbalock
);
3773 lpfc_hba_down_post(phba
);
3774 phba
->link_state
= LPFC_HBA_ERROR
;
3776 return ha_copy
& HA_ERATT
? 0 : 1;
3780 * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
3781 * @phba: Pointer to HBA context object.
3783 * This function resets the HBA by writing HC_INITFF to the control
3784 * register. After the HBA resets, this function resets all the iocb ring
3785 * indices. This function disables PCI layer parity checking during
3787 * This function returns 0 always.
3788 * The caller is not required to hold any locks.
3791 lpfc_sli_brdreset(struct lpfc_hba
*phba
)
3793 struct lpfc_sli
*psli
;
3794 struct lpfc_sli_ring
*pring
;
3801 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3802 "0325 Reset HBA Data: x%x x%x\n",
3803 phba
->pport
->port_state
, psli
->sli_flag
);
3805 /* perform board reset */
3806 phba
->fc_eventTag
= 0;
3807 phba
->link_events
= 0;
3808 phba
->pport
->fc_myDID
= 0;
3809 phba
->pport
->fc_prevDID
= 0;
3811 /* Turn off parity checking and serr during the physical reset */
3812 pci_read_config_word(phba
->pcidev
, PCI_COMMAND
, &cfg_value
);
3813 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
,
3815 ~(PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
)));
3817 psli
->sli_flag
&= ~(LPFC_SLI_ACTIVE
| LPFC_PROCESS_LA
);
3819 /* Now toggle INITFF bit in the Host Control Register */
3820 writel(HC_INITFF
, phba
->HCregaddr
);
3822 readl(phba
->HCregaddr
); /* flush */
3823 writel(0, phba
->HCregaddr
);
3824 readl(phba
->HCregaddr
); /* flush */
3826 /* Restore PCI cmd register */
3827 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, cfg_value
);
3829 /* Initialize relevant SLI info */
3830 for (i
= 0; i
< psli
->num_rings
; i
++) {
3831 pring
= &psli
->ring
[i
];
3834 pring
->next_cmdidx
= 0;
3835 pring
->local_getidx
= 0;
3837 pring
->missbufcnt
= 0;
3840 phba
->link_state
= LPFC_WARM_START
;
3845 * lpfc_sli4_brdreset - Reset a sli-4 HBA
3846 * @phba: Pointer to HBA context object.
3848 * This function resets a SLI4 HBA. This function disables PCI layer parity
3849 * checking during resets the device. The caller is not required to hold
3852 * This function returns 0 always.
3855 lpfc_sli4_brdreset(struct lpfc_hba
*phba
)
3857 struct lpfc_sli
*psli
= &phba
->sli
;
3862 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3863 "0295 Reset HBA Data: x%x x%x\n",
3864 phba
->pport
->port_state
, psli
->sli_flag
);
3866 /* perform board reset */
3867 phba
->fc_eventTag
= 0;
3868 phba
->link_events
= 0;
3869 phba
->pport
->fc_myDID
= 0;
3870 phba
->pport
->fc_prevDID
= 0;
3872 spin_lock_irq(&phba
->hbalock
);
3873 psli
->sli_flag
&= ~(LPFC_PROCESS_LA
);
3874 phba
->fcf
.fcf_flag
= 0;
3875 /* Clean up the child queue list for the CQs */
3876 list_del_init(&phba
->sli4_hba
.mbx_wq
->list
);
3877 list_del_init(&phba
->sli4_hba
.els_wq
->list
);
3878 list_del_init(&phba
->sli4_hba
.hdr_rq
->list
);
3879 list_del_init(&phba
->sli4_hba
.dat_rq
->list
);
3880 list_del_init(&phba
->sli4_hba
.mbx_cq
->list
);
3881 list_del_init(&phba
->sli4_hba
.els_cq
->list
);
3882 for (qindx
= 0; qindx
< phba
->cfg_fcp_wq_count
; qindx
++)
3883 list_del_init(&phba
->sli4_hba
.fcp_wq
[qindx
]->list
);
3884 for (qindx
= 0; qindx
< phba
->cfg_fcp_eq_count
; qindx
++)
3885 list_del_init(&phba
->sli4_hba
.fcp_cq
[qindx
]->list
);
3886 spin_unlock_irq(&phba
->hbalock
);
3888 /* Now physically reset the device */
3889 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
3890 "0389 Performing PCI function reset!\n");
3892 /* Turn off parity checking and serr during the physical reset */
3893 pci_read_config_word(phba
->pcidev
, PCI_COMMAND
, &cfg_value
);
3894 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, (cfg_value
&
3895 ~(PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
)));
3897 /* Perform FCoE PCI function reset */
3898 lpfc_pci_function_reset(phba
);
3900 /* Restore PCI cmd register */
3901 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, cfg_value
);
3907 * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
3908 * @phba: Pointer to HBA context object.
3910 * This function is called in the SLI initialization code path to
3911 * restart the HBA. The caller is not required to hold any lock.
3912 * This function writes MBX_RESTART mailbox command to the SLIM and
3913 * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
3914 * function to free any pending commands. The function enables
3915 * POST only during the first initialization. The function returns zero.
3916 * The function does not guarantee completion of MBX_RESTART mailbox
3917 * command before the return of this function.
3920 lpfc_sli_brdrestart_s3(struct lpfc_hba
*phba
)
3923 struct lpfc_sli
*psli
;
3924 volatile uint32_t word0
;
3925 void __iomem
*to_slim
;
3926 uint32_t hba_aer_enabled
;
3928 spin_lock_irq(&phba
->hbalock
);
3930 /* Take PCIe device Advanced Error Reporting (AER) state */
3931 hba_aer_enabled
= phba
->hba_flag
& HBA_AER_ENABLED
;
3936 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3937 "0337 Restart HBA Data: x%x x%x\n",
3938 phba
->pport
->port_state
, psli
->sli_flag
);
3941 mb
= (MAILBOX_t
*) &word0
;
3942 mb
->mbxCommand
= MBX_RESTART
;
3945 lpfc_reset_barrier(phba
);
3947 to_slim
= phba
->MBslimaddr
;
3948 writel(*(uint32_t *) mb
, to_slim
);
3949 readl(to_slim
); /* flush */
3951 /* Only skip post after fc_ffinit is completed */
3952 if (phba
->pport
->port_state
)
3953 word0
= 1; /* This is really setting up word1 */
3955 word0
= 0; /* This is really setting up word1 */
3956 to_slim
= phba
->MBslimaddr
+ sizeof (uint32_t);
3957 writel(*(uint32_t *) mb
, to_slim
);
3958 readl(to_slim
); /* flush */
3960 lpfc_sli_brdreset(phba
);
3961 phba
->pport
->stopped
= 0;
3962 phba
->link_state
= LPFC_INIT_START
;
3964 spin_unlock_irq(&phba
->hbalock
);
3966 memset(&psli
->lnk_stat_offsets
, 0, sizeof(psli
->lnk_stat_offsets
));
3967 psli
->stats_start
= get_seconds();
3969 /* Give the INITFF and Post time to settle. */
3972 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
3973 if (hba_aer_enabled
)
3974 pci_disable_pcie_error_reporting(phba
->pcidev
);
3976 lpfc_hba_down_post(phba
);
3982 * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
3983 * @phba: Pointer to HBA context object.
3985 * This function is called in the SLI initialization code path to restart
3986 * a SLI4 HBA. The caller is not required to hold any lock.
3987 * At the end of the function, it calls lpfc_hba_down_post function to
3988 * free any pending commands.
3991 lpfc_sli_brdrestart_s4(struct lpfc_hba
*phba
)
3993 struct lpfc_sli
*psli
= &phba
->sli
;
3994 uint32_t hba_aer_enabled
;
3997 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
3998 "0296 Restart HBA Data: x%x x%x\n",
3999 phba
->pport
->port_state
, psli
->sli_flag
);
4001 /* Take PCIe device Advanced Error Reporting (AER) state */
4002 hba_aer_enabled
= phba
->hba_flag
& HBA_AER_ENABLED
;
4004 lpfc_sli4_brdreset(phba
);
4006 spin_lock_irq(&phba
->hbalock
);
4007 phba
->pport
->stopped
= 0;
4008 phba
->link_state
= LPFC_INIT_START
;
4010 spin_unlock_irq(&phba
->hbalock
);
4012 memset(&psli
->lnk_stat_offsets
, 0, sizeof(psli
->lnk_stat_offsets
));
4013 psli
->stats_start
= get_seconds();
4015 /* Reset HBA AER if it was enabled, note hba_flag was reset above */
4016 if (hba_aer_enabled
)
4017 pci_disable_pcie_error_reporting(phba
->pcidev
);
4019 lpfc_hba_down_post(phba
);
4025 * lpfc_sli_brdrestart - Wrapper func for restarting hba
4026 * @phba: Pointer to HBA context object.
4028 * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
4029 * API jump table function pointer from the lpfc_hba struct.
4032 lpfc_sli_brdrestart(struct lpfc_hba
*phba
)
4034 return phba
->lpfc_sli_brdrestart(phba
);
4038 * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
4039 * @phba: Pointer to HBA context object.
4041 * This function is called after a HBA restart to wait for successful
4042 * restart of the HBA. Successful restart of the HBA is indicated by
4043 * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
4044 * iteration, the function will restart the HBA again. The function returns
4045 * zero if HBA successfully restarted else returns negative error code.
4048 lpfc_sli_chipset_init(struct lpfc_hba
*phba
)
4050 uint32_t status
, i
= 0;
4052 /* Read the HBA Host Status Register */
4053 if (lpfc_readl(phba
->HSregaddr
, &status
))
4056 /* Check status register to see what current state is */
4058 while ((status
& (HS_FFRDY
| HS_MBRDY
)) != (HS_FFRDY
| HS_MBRDY
)) {
4060 /* Check every 10ms for 10 retries, then every 100ms for 90
4061 * retries, then every 1 sec for 50 retires for a total of
4062 * ~60 seconds before reset the board again and check every
4063 * 1 sec for 50 retries. The up to 60 seconds before the
4064 * board ready is required by the Falcon FIPS zeroization
4065 * complete, and any reset the board in between shall cause
4066 * restart of zeroization, further delay the board ready.
4069 /* Adapter failed to init, timeout, status reg
4071 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4072 "0436 Adapter failed to init, "
4073 "timeout, status reg x%x, "
4074 "FW Data: A8 x%x AC x%x\n", status
,
4075 readl(phba
->MBslimaddr
+ 0xa8),
4076 readl(phba
->MBslimaddr
+ 0xac));
4077 phba
->link_state
= LPFC_HBA_ERROR
;
4081 /* Check to see if any errors occurred during init */
4082 if (status
& HS_FFERM
) {
4083 /* ERROR: During chipset initialization */
4084 /* Adapter failed to init, chipset, status reg
4086 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4087 "0437 Adapter failed to init, "
4088 "chipset, status reg x%x, "
4089 "FW Data: A8 x%x AC x%x\n", status
,
4090 readl(phba
->MBslimaddr
+ 0xa8),
4091 readl(phba
->MBslimaddr
+ 0xac));
4092 phba
->link_state
= LPFC_HBA_ERROR
;
4105 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
4106 lpfc_sli_brdrestart(phba
);
4108 /* Read the HBA Host Status Register */
4109 if (lpfc_readl(phba
->HSregaddr
, &status
))
4113 /* Check to see if any errors occurred during init */
4114 if (status
& HS_FFERM
) {
4115 /* ERROR: During chipset initialization */
4116 /* Adapter failed to init, chipset, status reg <status> */
4117 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4118 "0438 Adapter failed to init, chipset, "
4120 "FW Data: A8 x%x AC x%x\n", status
,
4121 readl(phba
->MBslimaddr
+ 0xa8),
4122 readl(phba
->MBslimaddr
+ 0xac));
4123 phba
->link_state
= LPFC_HBA_ERROR
;
4127 /* Clear all interrupt enable conditions */
4128 writel(0, phba
->HCregaddr
);
4129 readl(phba
->HCregaddr
); /* flush */
4131 /* setup host attn register */
4132 writel(0xffffffff, phba
->HAregaddr
);
4133 readl(phba
->HAregaddr
); /* flush */
4138 * lpfc_sli_hbq_count - Get the number of HBQs to be configured
4140 * This function calculates and returns the number of HBQs required to be
4144 lpfc_sli_hbq_count(void)
4146 return ARRAY_SIZE(lpfc_hbq_defs
);
4150 * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
4152 * This function adds the number of hbq entries in every HBQ to get
4153 * the total number of hbq entries required for the HBA and returns
4157 lpfc_sli_hbq_entry_count(void)
4159 int hbq_count
= lpfc_sli_hbq_count();
4163 for (i
= 0; i
< hbq_count
; ++i
)
4164 count
+= lpfc_hbq_defs
[i
]->entry_count
;
4169 * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
4171 * This function calculates amount of memory required for all hbq entries
4172 * to be configured and returns the total memory required.
4175 lpfc_sli_hbq_size(void)
4177 return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry
);
4181 * lpfc_sli_hbq_setup - configure and initialize HBQs
4182 * @phba: Pointer to HBA context object.
4184 * This function is called during the SLI initialization to configure
4185 * all the HBQs and post buffers to the HBQ. The caller is not
4186 * required to hold any locks. This function will return zero if successful
4187 * else it will return negative error code.
4190 lpfc_sli_hbq_setup(struct lpfc_hba
*phba
)
4192 int hbq_count
= lpfc_sli_hbq_count();
4196 uint32_t hbq_entry_index
;
4198 /* Get a Mailbox buffer to setup mailbox
4199 * commands for HBA initialization
4201 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4208 /* Initialize the struct lpfc_sli_hbq structure for each hbq */
4209 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
4210 phba
->hbq_in_use
= 1;
4212 hbq_entry_index
= 0;
4213 for (hbqno
= 0; hbqno
< hbq_count
; ++hbqno
) {
4214 phba
->hbqs
[hbqno
].next_hbqPutIdx
= 0;
4215 phba
->hbqs
[hbqno
].hbqPutIdx
= 0;
4216 phba
->hbqs
[hbqno
].local_hbqGetIdx
= 0;
4217 phba
->hbqs
[hbqno
].entry_count
=
4218 lpfc_hbq_defs
[hbqno
]->entry_count
;
4219 lpfc_config_hbq(phba
, hbqno
, lpfc_hbq_defs
[hbqno
],
4220 hbq_entry_index
, pmb
);
4221 hbq_entry_index
+= phba
->hbqs
[hbqno
].entry_count
;
4223 if (lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
) != MBX_SUCCESS
) {
4224 /* Adapter failed to init, mbxCmd <cmd> CFG_RING,
4225 mbxStatus <status>, ring <num> */
4227 lpfc_printf_log(phba
, KERN_ERR
,
4228 LOG_SLI
| LOG_VPORT
,
4229 "1805 Adapter failed to init. "
4230 "Data: x%x x%x x%x\n",
4232 pmbox
->mbxStatus
, hbqno
);
4234 phba
->link_state
= LPFC_HBA_ERROR
;
4235 mempool_free(pmb
, phba
->mbox_mem_pool
);
4239 phba
->hbq_count
= hbq_count
;
4241 mempool_free(pmb
, phba
->mbox_mem_pool
);
4243 /* Initially populate or replenish the HBQs */
4244 for (hbqno
= 0; hbqno
< hbq_count
; ++hbqno
)
4245 lpfc_sli_hbqbuf_init_hbqs(phba
, hbqno
);
4250 * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
4251 * @phba: Pointer to HBA context object.
4253 * This function is called during the SLI initialization to configure
4254 * all the HBQs and post buffers to the HBQ. The caller is not
4255 * required to hold any locks. This function will return zero if successful
4256 * else it will return negative error code.
4259 lpfc_sli4_rb_setup(struct lpfc_hba
*phba
)
4261 phba
->hbq_in_use
= 1;
4262 phba
->hbqs
[0].entry_count
= lpfc_hbq_defs
[0]->entry_count
;
4263 phba
->hbq_count
= 1;
4264 /* Initially populate or replenish the HBQs */
4265 lpfc_sli_hbqbuf_init_hbqs(phba
, 0);
4270 * lpfc_sli_config_port - Issue config port mailbox command
4271 * @phba: Pointer to HBA context object.
4272 * @sli_mode: sli mode - 2/3
4274 * This function is called by the sli intialization code path
4275 * to issue config_port mailbox command. This function restarts the
4276 * HBA firmware and issues a config_port mailbox command to configure
4277 * the SLI interface in the sli mode specified by sli_mode
4278 * variable. The caller is not required to hold any locks.
4279 * The function returns 0 if successful, else returns negative error
4283 lpfc_sli_config_port(struct lpfc_hba
*phba
, int sli_mode
)
4286 uint32_t resetcount
= 0, rc
= 0, done
= 0;
4288 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4290 phba
->link_state
= LPFC_HBA_ERROR
;
4294 phba
->sli_rev
= sli_mode
;
4295 while (resetcount
< 2 && !done
) {
4296 spin_lock_irq(&phba
->hbalock
);
4297 phba
->sli
.sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
4298 spin_unlock_irq(&phba
->hbalock
);
4299 phba
->pport
->port_state
= LPFC_VPORT_UNKNOWN
;
4300 lpfc_sli_brdrestart(phba
);
4301 rc
= lpfc_sli_chipset_init(phba
);
4305 spin_lock_irq(&phba
->hbalock
);
4306 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
4307 spin_unlock_irq(&phba
->hbalock
);
4310 /* Call pre CONFIG_PORT mailbox command initialization. A
4311 * value of 0 means the call was successful. Any other
4312 * nonzero value is a failure, but if ERESTART is returned,
4313 * the driver may reset the HBA and try again.
4315 rc
= lpfc_config_port_prep(phba
);
4316 if (rc
== -ERESTART
) {
4317 phba
->link_state
= LPFC_LINK_UNKNOWN
;
4321 phba
->link_state
= LPFC_INIT_MBX_CMDS
;
4322 lpfc_config_port(phba
, pmb
);
4323 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
4324 phba
->sli3_options
&= ~(LPFC_SLI3_NPIV_ENABLED
|
4325 LPFC_SLI3_HBQ_ENABLED
|
4326 LPFC_SLI3_CRP_ENABLED
|
4327 LPFC_SLI3_BG_ENABLED
|
4328 LPFC_SLI3_DSS_ENABLED
);
4329 if (rc
!= MBX_SUCCESS
) {
4330 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4331 "0442 Adapter failed to init, mbxCmd x%x "
4332 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
4333 pmb
->u
.mb
.mbxCommand
, pmb
->u
.mb
.mbxStatus
, 0);
4334 spin_lock_irq(&phba
->hbalock
);
4335 phba
->sli
.sli_flag
&= ~LPFC_SLI_ACTIVE
;
4336 spin_unlock_irq(&phba
->hbalock
);
4339 /* Allow asynchronous mailbox command to go through */
4340 spin_lock_irq(&phba
->hbalock
);
4341 phba
->sli
.sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
4342 spin_unlock_irq(&phba
->hbalock
);
4348 goto do_prep_failed
;
4350 if (pmb
->u
.mb
.un
.varCfgPort
.sli_mode
== 3) {
4351 if (!pmb
->u
.mb
.un
.varCfgPort
.cMA
) {
4353 goto do_prep_failed
;
4355 if (phba
->max_vpi
&& pmb
->u
.mb
.un
.varCfgPort
.gmv
) {
4356 phba
->sli3_options
|= LPFC_SLI3_NPIV_ENABLED
;
4357 phba
->max_vpi
= pmb
->u
.mb
.un
.varCfgPort
.max_vpi
;
4358 phba
->max_vports
= (phba
->max_vpi
> phba
->max_vports
) ?
4359 phba
->max_vpi
: phba
->max_vports
;
4363 phba
->fips_level
= 0;
4364 phba
->fips_spec_rev
= 0;
4365 if (pmb
->u
.mb
.un
.varCfgPort
.gdss
) {
4366 phba
->sli3_options
|= LPFC_SLI3_DSS_ENABLED
;
4367 phba
->fips_level
= pmb
->u
.mb
.un
.varCfgPort
.fips_level
;
4368 phba
->fips_spec_rev
= pmb
->u
.mb
.un
.varCfgPort
.fips_rev
;
4369 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4370 "2850 Security Crypto Active. FIPS x%d "
4372 phba
->fips_level
, phba
->fips_spec_rev
);
4374 if (pmb
->u
.mb
.un
.varCfgPort
.sec_err
) {
4375 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4376 "2856 Config Port Security Crypto "
4378 pmb
->u
.mb
.un
.varCfgPort
.sec_err
);
4380 if (pmb
->u
.mb
.un
.varCfgPort
.gerbm
)
4381 phba
->sli3_options
|= LPFC_SLI3_HBQ_ENABLED
;
4382 if (pmb
->u
.mb
.un
.varCfgPort
.gcrp
)
4383 phba
->sli3_options
|= LPFC_SLI3_CRP_ENABLED
;
4385 phba
->hbq_get
= phba
->mbox
->us
.s3_pgp
.hbq_get
;
4386 phba
->port_gp
= phba
->mbox
->us
.s3_pgp
.port
;
4388 if (phba
->cfg_enable_bg
) {
4389 if (pmb
->u
.mb
.un
.varCfgPort
.gbg
)
4390 phba
->sli3_options
|= LPFC_SLI3_BG_ENABLED
;
4392 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4393 "0443 Adapter did not grant "
4397 phba
->hbq_get
= NULL
;
4398 phba
->port_gp
= phba
->mbox
->us
.s2
.port
;
4402 mempool_free(pmb
, phba
->mbox_mem_pool
);
4408 * lpfc_sli_hba_setup - SLI intialization function
4409 * @phba: Pointer to HBA context object.
4411 * This function is the main SLI intialization function. This function
4412 * is called by the HBA intialization code, HBA reset code and HBA
4413 * error attention handler code. Caller is not required to hold any
4414 * locks. This function issues config_port mailbox command to configure
4415 * the SLI, setup iocb rings and HBQ rings. In the end the function
4416 * calls the config_port_post function to issue init_link mailbox
4417 * command and to start the discovery. The function will return zero
4418 * if successful, else it will return negative error code.
4421 lpfc_sli_hba_setup(struct lpfc_hba
*phba
)
4426 switch (lpfc_sli_mode
) {
4428 if (phba
->cfg_enable_npiv
) {
4429 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_VPORT
,
4430 "1824 NPIV enabled: Override lpfc_sli_mode "
4431 "parameter (%d) to auto (0).\n",
4441 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_VPORT
,
4442 "1819 Unrecognized lpfc_sli_mode "
4443 "parameter: %d.\n", lpfc_sli_mode
);
4448 rc
= lpfc_sli_config_port(phba
, mode
);
4450 if (rc
&& lpfc_sli_mode
== 3)
4451 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
| LOG_VPORT
,
4452 "1820 Unable to select SLI-3. "
4453 "Not supported by adapter.\n");
4454 if (rc
&& mode
!= 2)
4455 rc
= lpfc_sli_config_port(phba
, 2);
4457 goto lpfc_sli_hba_setup_error
;
4459 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4460 if (phba
->cfg_aer_support
== 1 && !(phba
->hba_flag
& HBA_AER_ENABLED
)) {
4461 rc
= pci_enable_pcie_error_reporting(phba
->pcidev
);
4463 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4464 "2709 This device supports "
4465 "Advanced Error Reporting (AER)\n");
4466 spin_lock_irq(&phba
->hbalock
);
4467 phba
->hba_flag
|= HBA_AER_ENABLED
;
4468 spin_unlock_irq(&phba
->hbalock
);
4470 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4471 "2708 This device does not support "
4472 "Advanced Error Reporting (AER)\n");
4473 phba
->cfg_aer_support
= 0;
4477 if (phba
->sli_rev
== 3) {
4478 phba
->iocb_cmd_size
= SLI3_IOCB_CMD_SIZE
;
4479 phba
->iocb_rsp_size
= SLI3_IOCB_RSP_SIZE
;
4481 phba
->iocb_cmd_size
= SLI2_IOCB_CMD_SIZE
;
4482 phba
->iocb_rsp_size
= SLI2_IOCB_RSP_SIZE
;
4483 phba
->sli3_options
= 0;
4486 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
4487 "0444 Firmware in SLI %x mode. Max_vpi %d\n",
4488 phba
->sli_rev
, phba
->max_vpi
);
4489 rc
= lpfc_sli_ring_map(phba
);
4492 goto lpfc_sli_hba_setup_error
;
4495 if (phba
->sli3_options
& LPFC_SLI3_HBQ_ENABLED
) {
4496 rc
= lpfc_sli_hbq_setup(phba
);
4498 goto lpfc_sli_hba_setup_error
;
4500 spin_lock_irq(&phba
->hbalock
);
4501 phba
->sli
.sli_flag
|= LPFC_PROCESS_LA
;
4502 spin_unlock_irq(&phba
->hbalock
);
4504 rc
= lpfc_config_port_post(phba
);
4506 goto lpfc_sli_hba_setup_error
;
4510 lpfc_sli_hba_setup_error
:
4511 phba
->link_state
= LPFC_HBA_ERROR
;
4512 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
4513 "0445 Firmware initialization failed\n");
4518 * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
4519 * @phba: Pointer to HBA context object.
4520 * @mboxq: mailbox pointer.
4521 * This function issue a dump mailbox command to read config region
4522 * 23 and parse the records in the region and populate driver
4526 lpfc_sli4_read_fcoe_params(struct lpfc_hba
*phba
,
4527 LPFC_MBOXQ_t
*mboxq
)
4529 struct lpfc_dmabuf
*mp
;
4530 struct lpfc_mqe
*mqe
;
4531 uint32_t data_length
;
4534 /* Program the default value of vlan_id and fc_map */
4535 phba
->valid_vlan
= 0;
4536 phba
->fc_map
[0] = LPFC_FCOE_FCF_MAP0
;
4537 phba
->fc_map
[1] = LPFC_FCOE_FCF_MAP1
;
4538 phba
->fc_map
[2] = LPFC_FCOE_FCF_MAP2
;
4540 mqe
= &mboxq
->u
.mqe
;
4541 if (lpfc_dump_fcoe_param(phba
, mboxq
))
4544 mp
= (struct lpfc_dmabuf
*) mboxq
->context1
;
4545 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
4547 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
4548 "(%d):2571 Mailbox cmd x%x Status x%x "
4549 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4550 "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
4551 "CQ: x%x x%x x%x x%x\n",
4552 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
4553 bf_get(lpfc_mqe_command
, mqe
),
4554 bf_get(lpfc_mqe_status
, mqe
),
4555 mqe
->un
.mb_words
[0], mqe
->un
.mb_words
[1],
4556 mqe
->un
.mb_words
[2], mqe
->un
.mb_words
[3],
4557 mqe
->un
.mb_words
[4], mqe
->un
.mb_words
[5],
4558 mqe
->un
.mb_words
[6], mqe
->un
.mb_words
[7],
4559 mqe
->un
.mb_words
[8], mqe
->un
.mb_words
[9],
4560 mqe
->un
.mb_words
[10], mqe
->un
.mb_words
[11],
4561 mqe
->un
.mb_words
[12], mqe
->un
.mb_words
[13],
4562 mqe
->un
.mb_words
[14], mqe
->un
.mb_words
[15],
4563 mqe
->un
.mb_words
[16], mqe
->un
.mb_words
[50],
4565 mboxq
->mcqe
.mcqe_tag0
, mboxq
->mcqe
.mcqe_tag1
,
4566 mboxq
->mcqe
.trailer
);
4569 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4573 data_length
= mqe
->un
.mb_words
[5];
4574 if (data_length
> DMP_RGN23_SIZE
) {
4575 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4580 lpfc_parse_fcoe_conf(phba
, mp
->virt
, data_length
);
4581 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4587 * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
4588 * @phba: pointer to lpfc hba data structure.
4589 * @mboxq: pointer to the LPFC_MBOXQ_t structure.
4590 * @vpd: pointer to the memory to hold resulting port vpd data.
4591 * @vpd_size: On input, the number of bytes allocated to @vpd.
4592 * On output, the number of data bytes in @vpd.
4594 * This routine executes a READ_REV SLI4 mailbox command. In
4595 * addition, this routine gets the port vpd data.
4599 * -ENOMEM - could not allocated memory.
4602 lpfc_sli4_read_rev(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
4603 uint8_t *vpd
, uint32_t *vpd_size
)
4607 struct lpfc_dmabuf
*dmabuf
;
4608 struct lpfc_mqe
*mqe
;
4610 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
4615 * Get a DMA buffer for the vpd data resulting from the READ_REV
4618 dma_size
= *vpd_size
;
4619 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
4623 if (!dmabuf
->virt
) {
4627 memset(dmabuf
->virt
, 0, dma_size
);
4630 * The SLI4 implementation of READ_REV conflicts at word1,
4631 * bits 31:16 and SLI4 adds vpd functionality not present
4632 * in SLI3. This code corrects the conflicts.
4634 lpfc_read_rev(phba
, mboxq
);
4635 mqe
= &mboxq
->u
.mqe
;
4636 mqe
->un
.read_rev
.vpd_paddr_high
= putPaddrHigh(dmabuf
->phys
);
4637 mqe
->un
.read_rev
.vpd_paddr_low
= putPaddrLow(dmabuf
->phys
);
4638 mqe
->un
.read_rev
.word1
&= 0x0000FFFF;
4639 bf_set(lpfc_mbx_rd_rev_vpd
, &mqe
->un
.read_rev
, 1);
4640 bf_set(lpfc_mbx_rd_rev_avail_len
, &mqe
->un
.read_rev
, dma_size
);
4642 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
4644 dma_free_coherent(&phba
->pcidev
->dev
, dma_size
,
4645 dmabuf
->virt
, dmabuf
->phys
);
4651 * The available vpd length cannot be bigger than the
4652 * DMA buffer passed to the port. Catch the less than
4653 * case and update the caller's size.
4655 if (mqe
->un
.read_rev
.avail_vpd_len
< *vpd_size
)
4656 *vpd_size
= mqe
->un
.read_rev
.avail_vpd_len
;
4658 memcpy(vpd
, dmabuf
->virt
, *vpd_size
);
4660 dma_free_coherent(&phba
->pcidev
->dev
, dma_size
,
4661 dmabuf
->virt
, dmabuf
->phys
);
4667 * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
4668 * @phba: pointer to lpfc hba data structure.
4670 * This routine is called to explicitly arm the SLI4 device's completion and
4674 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba
*phba
)
4678 lpfc_sli4_cq_release(phba
->sli4_hba
.mbx_cq
, LPFC_QUEUE_REARM
);
4679 lpfc_sli4_cq_release(phba
->sli4_hba
.els_cq
, LPFC_QUEUE_REARM
);
4680 for (fcp_eqidx
= 0; fcp_eqidx
< phba
->cfg_fcp_eq_count
; fcp_eqidx
++)
4681 lpfc_sli4_cq_release(phba
->sli4_hba
.fcp_cq
[fcp_eqidx
],
4683 lpfc_sli4_eq_release(phba
->sli4_hba
.sp_eq
, LPFC_QUEUE_REARM
);
4684 for (fcp_eqidx
= 0; fcp_eqidx
< phba
->cfg_fcp_eq_count
; fcp_eqidx
++)
4685 lpfc_sli4_eq_release(phba
->sli4_hba
.fp_eq
[fcp_eqidx
],
4690 * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
4691 * @phba: Pointer to HBA context object.
4693 * This function is the main SLI4 device intialization PCI function. This
4694 * function is called by the HBA intialization code, HBA reset code and
4695 * HBA error attention handler code. Caller is not required to hold any
4699 lpfc_sli4_hba_setup(struct lpfc_hba
*phba
)
4702 LPFC_MBOXQ_t
*mboxq
;
4703 struct lpfc_mqe
*mqe
;
4706 uint32_t ftr_rsp
= 0;
4707 struct Scsi_Host
*shost
= lpfc_shost_from_vport(phba
->pport
);
4708 struct lpfc_vport
*vport
= phba
->pport
;
4709 struct lpfc_dmabuf
*mp
;
4712 * TODO: Why does this routine execute these task in a different
4715 /* Perform a PCI function reset to start from clean */
4716 rc
= lpfc_pci_function_reset(phba
);
4720 /* Check the HBA Host Status Register for readyness */
4721 rc
= lpfc_sli4_post_status_check(phba
);
4725 spin_lock_irq(&phba
->hbalock
);
4726 phba
->sli
.sli_flag
|= LPFC_SLI_ACTIVE
;
4727 spin_unlock_irq(&phba
->hbalock
);
4731 * Allocate a single mailbox container for initializing the
4734 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
4739 * Continue initialization with default values even if driver failed
4740 * to read FCoE param config regions
4742 if (lpfc_sli4_read_fcoe_params(phba
, mboxq
))
4743 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_INIT
,
4744 "2570 Failed to read FCoE parameters\n");
4746 /* Issue READ_REV to collect vpd and FW information. */
4747 vpd_size
= SLI4_PAGE_SIZE
;
4748 vpd
= kzalloc(vpd_size
, GFP_KERNEL
);
4754 rc
= lpfc_sli4_read_rev(phba
, mboxq
, vpd
, &vpd_size
);
4759 mqe
= &mboxq
->u
.mqe
;
4760 phba
->sli_rev
= bf_get(lpfc_mbx_rd_rev_sli_lvl
, &mqe
->un
.read_rev
);
4761 if (bf_get(lpfc_mbx_rd_rev_fcoe
, &mqe
->un
.read_rev
))
4762 phba
->hba_flag
|= HBA_FCOE_MODE
;
4764 phba
->hba_flag
&= ~HBA_FCOE_MODE
;
4766 if (bf_get(lpfc_mbx_rd_rev_cee_ver
, &mqe
->un
.read_rev
) ==
4768 phba
->hba_flag
|= HBA_FIP_SUPPORT
;
4770 phba
->hba_flag
&= ~HBA_FIP_SUPPORT
;
4772 if (phba
->sli_rev
!= LPFC_SLI_REV4
||
4773 !(phba
->hba_flag
& HBA_FCOE_MODE
)) {
4774 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4775 "0376 READ_REV Error. SLI Level %d "
4776 "FCoE enabled %d\n",
4777 phba
->sli_rev
, phba
->hba_flag
& HBA_FCOE_MODE
);
4783 * Evaluate the read rev and vpd data. Populate the driver
4784 * state with the results. If this routine fails, the failure
4785 * is not fatal as the driver will use generic values.
4787 rc
= lpfc_parse_vpd(phba
, vpd
, vpd_size
);
4788 if (unlikely(!rc
)) {
4789 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4790 "0377 Error %d parsing vpd. "
4791 "Using defaults.\n", rc
);
4796 /* Save information as VPD data */
4797 phba
->vpd
.rev
.biuRev
= mqe
->un
.read_rev
.first_hw_rev
;
4798 phba
->vpd
.rev
.smRev
= mqe
->un
.read_rev
.second_hw_rev
;
4799 phba
->vpd
.rev
.endecRev
= mqe
->un
.read_rev
.third_hw_rev
;
4800 phba
->vpd
.rev
.fcphHigh
= bf_get(lpfc_mbx_rd_rev_fcph_high
,
4802 phba
->vpd
.rev
.fcphLow
= bf_get(lpfc_mbx_rd_rev_fcph_low
,
4804 phba
->vpd
.rev
.feaLevelHigh
= bf_get(lpfc_mbx_rd_rev_ftr_lvl_high
,
4806 phba
->vpd
.rev
.feaLevelLow
= bf_get(lpfc_mbx_rd_rev_ftr_lvl_low
,
4808 phba
->vpd
.rev
.sli1FwRev
= mqe
->un
.read_rev
.fw_id_rev
;
4809 memcpy(phba
->vpd
.rev
.sli1FwName
, mqe
->un
.read_rev
.fw_name
, 16);
4810 phba
->vpd
.rev
.sli2FwRev
= mqe
->un
.read_rev
.ulp_fw_id_rev
;
4811 memcpy(phba
->vpd
.rev
.sli2FwName
, mqe
->un
.read_rev
.ulp_fw_name
, 16);
4812 phba
->vpd
.rev
.opFwRev
= mqe
->un
.read_rev
.fw_id_rev
;
4813 memcpy(phba
->vpd
.rev
.opFwName
, mqe
->un
.read_rev
.fw_name
, 16);
4814 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
4815 "(%d):0380 READ_REV Status x%x "
4816 "fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
4817 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
4818 bf_get(lpfc_mqe_status
, mqe
),
4819 phba
->vpd
.rev
.opFwName
,
4820 phba
->vpd
.rev
.fcphHigh
, phba
->vpd
.rev
.fcphLow
,
4821 phba
->vpd
.rev
.feaLevelHigh
, phba
->vpd
.rev
.feaLevelLow
);
4824 * Discover the port's supported feature set and match it against the
4827 lpfc_request_features(phba
, mboxq
);
4828 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
4835 * The port must support FCP initiator mode as this is the
4836 * only mode running in the host.
4838 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi
, &mqe
->un
.req_ftrs
))) {
4839 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
4840 "0378 No support for fcpi mode.\n");
4843 if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh
, &mqe
->un
.req_ftrs
))
4844 phba
->sli3_options
|= LPFC_SLI4_PERFH_ENABLED
;
4846 phba
->sli3_options
&= ~LPFC_SLI4_PERFH_ENABLED
;
4848 * If the port cannot support the host's requested features
4849 * then turn off the global config parameters to disable the
4850 * feature in the driver. This is not a fatal error.
4852 if ((phba
->cfg_enable_bg
) &&
4853 !(bf_get(lpfc_mbx_rq_ftr_rsp_dif
, &mqe
->un
.req_ftrs
)))
4856 if (phba
->max_vpi
&& phba
->cfg_enable_npiv
&&
4857 !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv
, &mqe
->un
.req_ftrs
)))
4861 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
4862 "0379 Feature Mismatch Data: x%08x %08x "
4863 "x%x x%x x%x\n", mqe
->un
.req_ftrs
.word2
,
4864 mqe
->un
.req_ftrs
.word3
, phba
->cfg_enable_bg
,
4865 phba
->cfg_enable_npiv
, phba
->max_vpi
);
4866 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif
, &mqe
->un
.req_ftrs
)))
4867 phba
->cfg_enable_bg
= 0;
4868 if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv
, &mqe
->un
.req_ftrs
)))
4869 phba
->cfg_enable_npiv
= 0;
4872 /* These SLI3 features are assumed in SLI4 */
4873 spin_lock_irq(&phba
->hbalock
);
4874 phba
->sli3_options
|= (LPFC_SLI3_NPIV_ENABLED
| LPFC_SLI3_HBQ_ENABLED
);
4875 spin_unlock_irq(&phba
->hbalock
);
4877 /* Read the port's service parameters. */
4878 rc
= lpfc_read_sparam(phba
, mboxq
, vport
->vpi
);
4880 phba
->link_state
= LPFC_HBA_ERROR
;
4885 mboxq
->vport
= vport
;
4886 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
4887 mp
= (struct lpfc_dmabuf
*) mboxq
->context1
;
4888 if (rc
== MBX_SUCCESS
) {
4889 memcpy(&vport
->fc_sparam
, mp
->virt
, sizeof(struct serv_parm
));
4894 * This memory was allocated by the lpfc_read_sparam routine. Release
4895 * it to the mbuf pool.
4897 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
4899 mboxq
->context1
= NULL
;
4901 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4902 "0382 READ_SPARAM command failed "
4903 "status %d, mbxStatus x%x\n",
4904 rc
, bf_get(lpfc_mqe_status
, mqe
));
4905 phba
->link_state
= LPFC_HBA_ERROR
;
4910 if (phba
->cfg_soft_wwnn
)
4911 u64_to_wwn(phba
->cfg_soft_wwnn
,
4912 vport
->fc_sparam
.nodeName
.u
.wwn
);
4913 if (phba
->cfg_soft_wwpn
)
4914 u64_to_wwn(phba
->cfg_soft_wwpn
,
4915 vport
->fc_sparam
.portName
.u
.wwn
);
4916 memcpy(&vport
->fc_nodename
, &vport
->fc_sparam
.nodeName
,
4917 sizeof(struct lpfc_name
));
4918 memcpy(&vport
->fc_portname
, &vport
->fc_sparam
.portName
,
4919 sizeof(struct lpfc_name
));
4921 /* Update the fc_host data structures with new wwn. */
4922 fc_host_node_name(shost
) = wwn_to_u64(vport
->fc_nodename
.u
.wwn
);
4923 fc_host_port_name(shost
) = wwn_to_u64(vport
->fc_portname
.u
.wwn
);
4925 /* Register SGL pool to the device using non-embedded mailbox command */
4926 rc
= lpfc_sli4_post_sgl_list(phba
);
4928 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4929 "0582 Error %d during sgl post operation\n",
4935 /* Register SCSI SGL pool to the device */
4936 rc
= lpfc_sli4_repost_scsi_sgl_list(phba
);
4938 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
4939 "0383 Error %d during scsi sgl post "
4941 /* Some Scsi buffers were moved to the abort scsi list */
4942 /* A pci function reset will repost them */
4947 /* Post the rpi header region to the device. */
4948 rc
= lpfc_sli4_post_all_rpi_hdrs(phba
);
4950 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4951 "0393 Error %d during rpi post operation\n",
4957 /* Set up all the queues to the device */
4958 rc
= lpfc_sli4_queue_setup(phba
);
4960 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
4961 "0381 Error %d during queue setup.\n ", rc
);
4962 goto out_stop_timers
;
4965 /* Arm the CQs and then EQs on device */
4966 lpfc_sli4_arm_cqeq_intr(phba
);
4968 /* Indicate device interrupt mode */
4969 phba
->sli4_hba
.intr_enable
= 1;
4971 /* Allow asynchronous mailbox command to go through */
4972 spin_lock_irq(&phba
->hbalock
);
4973 phba
->sli
.sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
4974 spin_unlock_irq(&phba
->hbalock
);
4976 /* Post receive buffers to the device */
4977 lpfc_sli4_rb_setup(phba
);
4979 /* Reset HBA FCF states after HBA reset */
4980 phba
->fcf
.fcf_flag
= 0;
4981 phba
->fcf
.current_rec
.flag
= 0;
4983 /* Start the ELS watchdog timer */
4984 mod_timer(&vport
->els_tmofunc
,
4985 jiffies
+ HZ
* (phba
->fc_ratov
* 2));
4987 /* Start heart beat timer */
4988 mod_timer(&phba
->hb_tmofunc
,
4989 jiffies
+ HZ
* LPFC_HB_MBOX_INTERVAL
);
4990 phba
->hb_outstanding
= 0;
4991 phba
->last_completion_time
= jiffies
;
4993 /* Start error attention (ERATT) polling timer */
4994 mod_timer(&phba
->eratt_poll
, jiffies
+ HZ
* LPFC_ERATT_POLL_INTERVAL
);
4996 /* Enable PCIe device Advanced Error Reporting (AER) if configured */
4997 if (phba
->cfg_aer_support
== 1 && !(phba
->hba_flag
& HBA_AER_ENABLED
)) {
4998 rc
= pci_enable_pcie_error_reporting(phba
->pcidev
);
5000 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
5001 "2829 This device supports "
5002 "Advanced Error Reporting (AER)\n");
5003 spin_lock_irq(&phba
->hbalock
);
5004 phba
->hba_flag
|= HBA_AER_ENABLED
;
5005 spin_unlock_irq(&phba
->hbalock
);
5007 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
5008 "2830 This device does not support "
5009 "Advanced Error Reporting (AER)\n");
5010 phba
->cfg_aer_support
= 0;
5014 if (!(phba
->hba_flag
& HBA_FCOE_MODE
)) {
5016 * The FC Port needs to register FCFI (index 0)
5018 lpfc_reg_fcfi(phba
, mboxq
);
5019 mboxq
->vport
= phba
->pport
;
5020 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
5021 if (rc
== MBX_SUCCESS
)
5024 goto out_unset_queue
;
5027 * The port is ready, set the host's link state to LINK_DOWN
5028 * in preparation for link interrupts.
5030 spin_lock_irq(&phba
->hbalock
);
5031 phba
->link_state
= LPFC_LINK_DOWN
;
5032 spin_unlock_irq(&phba
->hbalock
);
5033 if (phba
->cfg_suppress_link_up
== LPFC_INITIALIZE_LINK
)
5034 rc
= phba
->lpfc_hba_init_link(phba
, MBX_NOWAIT
);
5036 /* Unset all the queues set up in this routine when error out */
5038 lpfc_sli4_queue_unset(phba
);
5041 lpfc_stop_hba_timers(phba
);
5043 mempool_free(mboxq
, phba
->mbox_mem_pool
);
5048 * lpfc_mbox_timeout - Timeout call back function for mbox timer
5049 * @ptr: context object - pointer to hba structure.
5051 * This is the callback function for mailbox timer. The mailbox
5052 * timer is armed when a new mailbox command is issued and the timer
5053 * is deleted when the mailbox complete. The function is called by
5054 * the kernel timer code when a mailbox does not complete within
5055 * expected time. This function wakes up the worker thread to
5056 * process the mailbox timeout and returns. All the processing is
5057 * done by the worker thread function lpfc_mbox_timeout_handler.
5060 lpfc_mbox_timeout(unsigned long ptr
)
5062 struct lpfc_hba
*phba
= (struct lpfc_hba
*) ptr
;
5063 unsigned long iflag
;
5064 uint32_t tmo_posted
;
5066 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflag
);
5067 tmo_posted
= phba
->pport
->work_port_events
& WORKER_MBOX_TMO
;
5069 phba
->pport
->work_port_events
|= WORKER_MBOX_TMO
;
5070 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflag
);
5073 lpfc_worker_wake_up(phba
);
5079 * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
5080 * @phba: Pointer to HBA context object.
5082 * This function is called from worker thread when a mailbox command times out.
5083 * The caller is not required to hold any locks. This function will reset the
5084 * HBA and recover all the pending commands.
5087 lpfc_mbox_timeout_handler(struct lpfc_hba
*phba
)
5089 LPFC_MBOXQ_t
*pmbox
= phba
->sli
.mbox_active
;
5090 MAILBOX_t
*mb
= &pmbox
->u
.mb
;
5091 struct lpfc_sli
*psli
= &phba
->sli
;
5092 struct lpfc_sli_ring
*pring
;
5094 /* Check the pmbox pointer first. There is a race condition
5095 * between the mbox timeout handler getting executed in the
5096 * worklist and the mailbox actually completing. When this
5097 * race condition occurs, the mbox_active will be NULL.
5099 spin_lock_irq(&phba
->hbalock
);
5100 if (pmbox
== NULL
) {
5101 lpfc_printf_log(phba
, KERN_WARNING
,
5103 "0353 Active Mailbox cleared - mailbox timeout "
5105 spin_unlock_irq(&phba
->hbalock
);
5109 /* Mbox cmd <mbxCommand> timeout */
5110 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5111 "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
5113 phba
->pport
->port_state
,
5115 phba
->sli
.mbox_active
);
5116 spin_unlock_irq(&phba
->hbalock
);
5118 /* Setting state unknown so lpfc_sli_abort_iocb_ring
5119 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
5120 * it to fail all oustanding SCSI IO.
5122 spin_lock_irq(&phba
->pport
->work_port_lock
);
5123 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
5124 spin_unlock_irq(&phba
->pport
->work_port_lock
);
5125 spin_lock_irq(&phba
->hbalock
);
5126 phba
->link_state
= LPFC_LINK_UNKNOWN
;
5127 psli
->sli_flag
&= ~LPFC_SLI_ACTIVE
;
5128 spin_unlock_irq(&phba
->hbalock
);
5130 pring
= &psli
->ring
[psli
->fcp_ring
];
5131 lpfc_sli_abort_iocb_ring(phba
, pring
);
5133 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5134 "0345 Resetting board due to mailbox timeout\n");
5136 /* Reset the HBA device */
5137 lpfc_reset_hba(phba
);
5141 * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
5142 * @phba: Pointer to HBA context object.
5143 * @pmbox: Pointer to mailbox object.
5144 * @flag: Flag indicating how the mailbox need to be processed.
5146 * This function is called by discovery code and HBA management code
5147 * to submit a mailbox command to firmware with SLI-3 interface spec. This
5148 * function gets the hbalock to protect the data structures.
5149 * The mailbox command can be submitted in polling mode, in which case
5150 * this function will wait in a polling loop for the completion of the
5152 * If the mailbox is submitted in no_wait mode (not polling) the
5153 * function will submit the command and returns immediately without waiting
5154 * for the mailbox completion. The no_wait is supported only when HBA
5155 * is in SLI2/SLI3 mode - interrupts are enabled.
5156 * The SLI interface allows only one mailbox pending at a time. If the
5157 * mailbox is issued in polling mode and there is already a mailbox
5158 * pending, then the function will return an error. If the mailbox is issued
5159 * in NO_WAIT mode and there is a mailbox pending already, the function
5160 * will return MBX_BUSY after queuing the mailbox into mailbox queue.
5161 * The sli layer owns the mailbox object until the completion of mailbox
5162 * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
5163 * return codes the caller owns the mailbox command after the return of
5167 lpfc_sli_issue_mbox_s3(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmbox
,
5171 struct lpfc_sli
*psli
= &phba
->sli
;
5172 uint32_t status
, evtctr
;
5173 uint32_t ha_copy
, hc_copy
;
5175 unsigned long timeout
;
5176 unsigned long drvr_flag
= 0;
5177 uint32_t word0
, ldata
;
5178 void __iomem
*to_slim
;
5179 int processing_queue
= 0;
5181 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
5183 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
5184 /* processing mbox queue from intr_handler */
5185 if (unlikely(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
5186 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
5189 processing_queue
= 1;
5190 pmbox
= lpfc_mbox_get(phba
);
5192 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
5197 if (pmbox
->mbox_cmpl
&& pmbox
->mbox_cmpl
!= lpfc_sli_def_mbox_cmpl
&&
5198 pmbox
->mbox_cmpl
!= lpfc_sli_wake_mbox_wait
) {
5200 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
5201 lpfc_printf_log(phba
, KERN_ERR
,
5202 LOG_MBOX
| LOG_VPORT
,
5203 "1806 Mbox x%x failed. No vport\n",
5204 pmbox
->u
.mb
.mbxCommand
);
5206 goto out_not_finished
;
5210 /* If the PCI channel is in offline state, do not post mbox. */
5211 if (unlikely(pci_channel_offline(phba
->pcidev
))) {
5212 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
5213 goto out_not_finished
;
5216 /* If HBA has a deferred error attention, fail the iocb. */
5217 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
5218 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
5219 goto out_not_finished
;
5225 status
= MBX_SUCCESS
;
5227 if (phba
->link_state
== LPFC_HBA_ERROR
) {
5228 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
5230 /* Mbox command <mbxCommand> cannot issue */
5231 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5232 "(%d):0311 Mailbox command x%x cannot "
5233 "issue Data: x%x x%x\n",
5234 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
5235 pmbox
->u
.mb
.mbxCommand
, psli
->sli_flag
, flag
);
5236 goto out_not_finished
;
5239 if (mb
->mbxCommand
!= MBX_KILL_BOARD
&& flag
& MBX_NOWAIT
) {
5240 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
) ||
5241 !(hc_copy
& HC_MBINT_ENA
)) {
5242 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
5243 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5244 "(%d):2528 Mailbox command x%x cannot "
5245 "issue Data: x%x x%x\n",
5246 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
5247 pmbox
->u
.mb
.mbxCommand
, psli
->sli_flag
, flag
);
5248 goto out_not_finished
;
5252 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
5253 /* Polling for a mbox command when another one is already active
5254 * is not allowed in SLI. Also, the driver must have established
5255 * SLI2 mode to queue and process multiple mbox commands.
5258 if (flag
& MBX_POLL
) {
5259 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
5261 /* Mbox command <mbxCommand> cannot issue */
5262 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5263 "(%d):2529 Mailbox command x%x "
5264 "cannot issue Data: x%x x%x\n",
5265 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
5266 pmbox
->u
.mb
.mbxCommand
,
5267 psli
->sli_flag
, flag
);
5268 goto out_not_finished
;
5271 if (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
)) {
5272 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
5273 /* Mbox command <mbxCommand> cannot issue */
5274 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5275 "(%d):2530 Mailbox command x%x "
5276 "cannot issue Data: x%x x%x\n",
5277 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
5278 pmbox
->u
.mb
.mbxCommand
,
5279 psli
->sli_flag
, flag
);
5280 goto out_not_finished
;
5283 /* Another mailbox command is still being processed, queue this
5284 * command to be processed later.
5286 lpfc_mbox_put(phba
, pmbox
);
5288 /* Mbox cmd issue - BUSY */
5289 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
5290 "(%d):0308 Mbox cmd issue - BUSY Data: "
5291 "x%x x%x x%x x%x\n",
5292 pmbox
->vport
? pmbox
->vport
->vpi
: 0xffffff,
5293 mb
->mbxCommand
, phba
->pport
->port_state
,
5294 psli
->sli_flag
, flag
);
5296 psli
->slistat
.mbox_busy
++;
5297 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
5300 lpfc_debugfs_disc_trc(pmbox
->vport
,
5301 LPFC_DISC_TRC_MBOX_VPORT
,
5302 "MBOX Bsy vport: cmd:x%x mb:x%x x%x",
5303 (uint32_t)mb
->mbxCommand
,
5304 mb
->un
.varWords
[0], mb
->un
.varWords
[1]);
5307 lpfc_debugfs_disc_trc(phba
->pport
,
5309 "MBOX Bsy: cmd:x%x mb:x%x x%x",
5310 (uint32_t)mb
->mbxCommand
,
5311 mb
->un
.varWords
[0], mb
->un
.varWords
[1]);
5317 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
5319 /* If we are not polling, we MUST be in SLI2 mode */
5320 if (flag
!= MBX_POLL
) {
5321 if (!(psli
->sli_flag
& LPFC_SLI_ACTIVE
) &&
5322 (mb
->mbxCommand
!= MBX_KILL_BOARD
)) {
5323 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
5324 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
5325 /* Mbox command <mbxCommand> cannot issue */
5326 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5327 "(%d):2531 Mailbox command x%x "
5328 "cannot issue Data: x%x x%x\n",
5329 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
5330 pmbox
->u
.mb
.mbxCommand
,
5331 psli
->sli_flag
, flag
);
5332 goto out_not_finished
;
5334 /* timeout active mbox command */
5335 mod_timer(&psli
->mbox_tmo
, (jiffies
+
5336 (HZ
* lpfc_mbox_tmo_val(phba
, mb
->mbxCommand
))));
5339 /* Mailbox cmd <cmd> issue */
5340 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
5341 "(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
5343 pmbox
->vport
? pmbox
->vport
->vpi
: 0,
5344 mb
->mbxCommand
, phba
->pport
->port_state
,
5345 psli
->sli_flag
, flag
);
5347 if (mb
->mbxCommand
!= MBX_HEARTBEAT
) {
5349 lpfc_debugfs_disc_trc(pmbox
->vport
,
5350 LPFC_DISC_TRC_MBOX_VPORT
,
5351 "MBOX Send vport: cmd:x%x mb:x%x x%x",
5352 (uint32_t)mb
->mbxCommand
,
5353 mb
->un
.varWords
[0], mb
->un
.varWords
[1]);
5356 lpfc_debugfs_disc_trc(phba
->pport
,
5358 "MBOX Send: cmd:x%x mb:x%x x%x",
5359 (uint32_t)mb
->mbxCommand
,
5360 mb
->un
.varWords
[0], mb
->un
.varWords
[1]);
5364 psli
->slistat
.mbox_cmd
++;
5365 evtctr
= psli
->slistat
.mbox_event
;
5367 /* next set own bit for the adapter and copy over command word */
5368 mb
->mbxOwner
= OWN_CHIP
;
5370 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
5371 /* Populate mbox extension offset word. */
5372 if (pmbox
->in_ext_byte_len
|| pmbox
->out_ext_byte_len
) {
5373 *(((uint32_t *)mb
) + pmbox
->mbox_offset_word
)
5374 = (uint8_t *)phba
->mbox_ext
5375 - (uint8_t *)phba
->mbox
;
5378 /* Copy the mailbox extension data */
5379 if (pmbox
->in_ext_byte_len
&& pmbox
->context2
) {
5380 lpfc_sli_pcimem_bcopy(pmbox
->context2
,
5381 (uint8_t *)phba
->mbox_ext
,
5382 pmbox
->in_ext_byte_len
);
5384 /* Copy command data to host SLIM area */
5385 lpfc_sli_pcimem_bcopy(mb
, phba
->mbox
, MAILBOX_CMD_SIZE
);
5387 /* Populate mbox extension offset word. */
5388 if (pmbox
->in_ext_byte_len
|| pmbox
->out_ext_byte_len
)
5389 *(((uint32_t *)mb
) + pmbox
->mbox_offset_word
)
5390 = MAILBOX_HBA_EXT_OFFSET
;
5392 /* Copy the mailbox extension data */
5393 if (pmbox
->in_ext_byte_len
&& pmbox
->context2
) {
5394 lpfc_memcpy_to_slim(phba
->MBslimaddr
+
5395 MAILBOX_HBA_EXT_OFFSET
,
5396 pmbox
->context2
, pmbox
->in_ext_byte_len
);
5399 if (mb
->mbxCommand
== MBX_CONFIG_PORT
) {
5400 /* copy command data into host mbox for cmpl */
5401 lpfc_sli_pcimem_bcopy(mb
, phba
->mbox
, MAILBOX_CMD_SIZE
);
5404 /* First copy mbox command data to HBA SLIM, skip past first
5406 to_slim
= phba
->MBslimaddr
+ sizeof (uint32_t);
5407 lpfc_memcpy_to_slim(to_slim
, &mb
->un
.varWords
[0],
5408 MAILBOX_CMD_SIZE
- sizeof (uint32_t));
5410 /* Next copy over first word, with mbxOwner set */
5411 ldata
= *((uint32_t *)mb
);
5412 to_slim
= phba
->MBslimaddr
;
5413 writel(ldata
, to_slim
);
5414 readl(to_slim
); /* flush */
5416 if (mb
->mbxCommand
== MBX_CONFIG_PORT
) {
5417 /* switch over to host mailbox */
5418 psli
->sli_flag
|= LPFC_SLI_ACTIVE
;
5426 /* Set up reference to mailbox command */
5427 psli
->mbox_active
= pmbox
;
5428 /* Interrupt board to do it */
5429 writel(CA_MBATT
, phba
->CAregaddr
);
5430 readl(phba
->CAregaddr
); /* flush */
5431 /* Don't wait for it to finish, just return */
5435 /* Set up null reference to mailbox command */
5436 psli
->mbox_active
= NULL
;
5437 /* Interrupt board to do it */
5438 writel(CA_MBATT
, phba
->CAregaddr
);
5439 readl(phba
->CAregaddr
); /* flush */
5441 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
5442 /* First read mbox status word */
5443 word0
= *((uint32_t *)phba
->mbox
);
5444 word0
= le32_to_cpu(word0
);
5446 /* First read mbox status word */
5447 if (lpfc_readl(phba
->MBslimaddr
, &word0
)) {
5448 spin_unlock_irqrestore(&phba
->hbalock
,
5450 goto out_not_finished
;
5454 /* Read the HBA Host Attention Register */
5455 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
)) {
5456 spin_unlock_irqrestore(&phba
->hbalock
,
5458 goto out_not_finished
;
5460 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
,
5464 /* Wait for command to complete */
5465 while (((word0
& OWN_CHIP
) == OWN_CHIP
) ||
5466 (!(ha_copy
& HA_MBATT
) &&
5467 (phba
->link_state
> LPFC_WARM_START
))) {
5468 if (time_after(jiffies
, timeout
)) {
5469 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
5470 spin_unlock_irqrestore(&phba
->hbalock
,
5472 goto out_not_finished
;
5475 /* Check if we took a mbox interrupt while we were
5477 if (((word0
& OWN_CHIP
) != OWN_CHIP
)
5478 && (evtctr
!= psli
->slistat
.mbox_event
))
5482 spin_unlock_irqrestore(&phba
->hbalock
,
5485 spin_lock_irqsave(&phba
->hbalock
, drvr_flag
);
5488 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
5489 /* First copy command data */
5490 word0
= *((uint32_t *)phba
->mbox
);
5491 word0
= le32_to_cpu(word0
);
5492 if (mb
->mbxCommand
== MBX_CONFIG_PORT
) {
5495 /* Check real SLIM for any errors */
5496 slimword0
= readl(phba
->MBslimaddr
);
5497 slimmb
= (MAILBOX_t
*) & slimword0
;
5498 if (((slimword0
& OWN_CHIP
) != OWN_CHIP
)
5499 && slimmb
->mbxStatus
) {
5506 /* First copy command data */
5507 word0
= readl(phba
->MBslimaddr
);
5509 /* Read the HBA Host Attention Register */
5510 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
)) {
5511 spin_unlock_irqrestore(&phba
->hbalock
,
5513 goto out_not_finished
;
5517 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
5518 /* copy results back to user */
5519 lpfc_sli_pcimem_bcopy(phba
->mbox
, mb
, MAILBOX_CMD_SIZE
);
5520 /* Copy the mailbox extension data */
5521 if (pmbox
->out_ext_byte_len
&& pmbox
->context2
) {
5522 lpfc_sli_pcimem_bcopy(phba
->mbox_ext
,
5524 pmbox
->out_ext_byte_len
);
5527 /* First copy command data */
5528 lpfc_memcpy_from_slim(mb
, phba
->MBslimaddr
,
5530 /* Copy the mailbox extension data */
5531 if (pmbox
->out_ext_byte_len
&& pmbox
->context2
) {
5532 lpfc_memcpy_from_slim(pmbox
->context2
,
5534 MAILBOX_HBA_EXT_OFFSET
,
5535 pmbox
->out_ext_byte_len
);
5539 writel(HA_MBATT
, phba
->HAregaddr
);
5540 readl(phba
->HAregaddr
); /* flush */
5542 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
5543 status
= mb
->mbxStatus
;
5546 spin_unlock_irqrestore(&phba
->hbalock
, drvr_flag
);
5550 if (processing_queue
) {
5551 pmbox
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
5552 lpfc_mbox_cmpl_put(phba
, pmbox
);
5554 return MBX_NOT_FINISHED
;
5558 * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
5559 * @phba: Pointer to HBA context object.
5561 * The function blocks the posting of SLI4 asynchronous mailbox commands from
5562 * the driver internal pending mailbox queue. It will then try to wait out the
5563 * possible outstanding mailbox command before return.
5566 * 0 - the outstanding mailbox command completed; otherwise, the wait for
5567 * the outstanding mailbox command timed out.
5570 lpfc_sli4_async_mbox_block(struct lpfc_hba
*phba
)
5572 struct lpfc_sli
*psli
= &phba
->sli
;
5573 uint8_t actcmd
= MBX_HEARTBEAT
;
5575 unsigned long timeout
;
5577 /* Mark the asynchronous mailbox command posting as blocked */
5578 spin_lock_irq(&phba
->hbalock
);
5579 psli
->sli_flag
|= LPFC_SLI_ASYNC_MBX_BLK
;
5580 if (phba
->sli
.mbox_active
)
5581 actcmd
= phba
->sli
.mbox_active
->u
.mb
.mbxCommand
;
5582 spin_unlock_irq(&phba
->hbalock
);
5583 /* Determine how long we might wait for the active mailbox
5584 * command to be gracefully completed by firmware.
5586 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, actcmd
) * 1000) +
5588 /* Wait for the outstnading mailbox command to complete */
5589 while (phba
->sli
.mbox_active
) {
5590 /* Check active mailbox complete status every 2ms */
5592 if (time_after(jiffies
, timeout
)) {
5593 /* Timeout, marked the outstanding cmd not complete */
5599 /* Can not cleanly block async mailbox command, fails it */
5601 spin_lock_irq(&phba
->hbalock
);
5602 psli
->sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
5603 spin_unlock_irq(&phba
->hbalock
);
5609 * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
5610 * @phba: Pointer to HBA context object.
5612 * The function unblocks and resume posting of SLI4 asynchronous mailbox
5613 * commands from the driver internal pending mailbox queue. It makes sure
5614 * that there is no outstanding mailbox command before resuming posting
5615 * asynchronous mailbox commands. If, for any reason, there is outstanding
5616 * mailbox command, it will try to wait it out before resuming asynchronous
5617 * mailbox command posting.
5620 lpfc_sli4_async_mbox_unblock(struct lpfc_hba
*phba
)
5622 struct lpfc_sli
*psli
= &phba
->sli
;
5624 spin_lock_irq(&phba
->hbalock
);
5625 if (!(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
5626 /* Asynchronous mailbox posting is not blocked, do nothing */
5627 spin_unlock_irq(&phba
->hbalock
);
5631 /* Outstanding synchronous mailbox command is guaranteed to be done,
5632 * successful or timeout, after timing-out the outstanding mailbox
5633 * command shall always be removed, so just unblock posting async
5634 * mailbox command and resume
5636 psli
->sli_flag
&= ~LPFC_SLI_ASYNC_MBX_BLK
;
5637 spin_unlock_irq(&phba
->hbalock
);
5639 /* wake up worker thread to post asynchronlous mailbox command */
5640 lpfc_worker_wake_up(phba
);
5644 * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
5645 * @phba: Pointer to HBA context object.
5646 * @mboxq: Pointer to mailbox object.
5648 * The function posts a mailbox to the port. The mailbox is expected
5649 * to be comletely filled in and ready for the port to operate on it.
5650 * This routine executes a synchronous completion operation on the
5651 * mailbox by polling for its completion.
5653 * The caller must not be holding any locks when calling this routine.
5656 * MBX_SUCCESS - mailbox posted successfully
5657 * Any of the MBX error values.
5660 lpfc_sli4_post_sync_mbox(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
5662 int rc
= MBX_SUCCESS
;
5663 unsigned long iflag
;
5665 uint32_t mcqe_status
;
5667 unsigned long timeout
;
5668 struct lpfc_sli
*psli
= &phba
->sli
;
5669 struct lpfc_mqe
*mb
= &mboxq
->u
.mqe
;
5670 struct lpfc_bmbx_create
*mbox_rgn
;
5671 struct dma_address
*dma_address
;
5672 struct lpfc_register bmbx_reg
;
5675 * Only one mailbox can be active to the bootstrap mailbox region
5676 * at a time and there is no queueing provided.
5678 spin_lock_irqsave(&phba
->hbalock
, iflag
);
5679 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
5680 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
5681 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5682 "(%d):2532 Mailbox command x%x (x%x) "
5683 "cannot issue Data: x%x x%x\n",
5684 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
5685 mboxq
->u
.mb
.mbxCommand
,
5686 lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5687 psli
->sli_flag
, MBX_POLL
);
5688 return MBXERR_ERROR
;
5690 /* The server grabs the token and owns it until release */
5691 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
5692 phba
->sli
.mbox_active
= mboxq
;
5693 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
5696 * Initialize the bootstrap memory region to avoid stale data areas
5697 * in the mailbox post. Then copy the caller's mailbox contents to
5698 * the bmbx mailbox region.
5700 mbx_cmnd
= bf_get(lpfc_mqe_command
, mb
);
5701 memset(phba
->sli4_hba
.bmbx
.avirt
, 0, sizeof(struct lpfc_bmbx_create
));
5702 lpfc_sli_pcimem_bcopy(mb
, phba
->sli4_hba
.bmbx
.avirt
,
5703 sizeof(struct lpfc_mqe
));
5705 /* Post the high mailbox dma address to the port and wait for ready. */
5706 dma_address
= &phba
->sli4_hba
.bmbx
.dma_address
;
5707 writel(dma_address
->addr_hi
, phba
->sli4_hba
.BMBXregaddr
);
5709 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, mbx_cmnd
)
5712 bmbx_reg
.word0
= readl(phba
->sli4_hba
.BMBXregaddr
);
5713 db_ready
= bf_get(lpfc_bmbx_rdy
, &bmbx_reg
);
5717 if (time_after(jiffies
, timeout
)) {
5721 } while (!db_ready
);
5723 /* Post the low mailbox dma address to the port. */
5724 writel(dma_address
->addr_lo
, phba
->sli4_hba
.BMBXregaddr
);
5725 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, mbx_cmnd
)
5728 bmbx_reg
.word0
= readl(phba
->sli4_hba
.BMBXregaddr
);
5729 db_ready
= bf_get(lpfc_bmbx_rdy
, &bmbx_reg
);
5733 if (time_after(jiffies
, timeout
)) {
5737 } while (!db_ready
);
5740 * Read the CQ to ensure the mailbox has completed.
5741 * If so, update the mailbox status so that the upper layers
5742 * can complete the request normally.
5744 lpfc_sli_pcimem_bcopy(phba
->sli4_hba
.bmbx
.avirt
, mb
,
5745 sizeof(struct lpfc_mqe
));
5746 mbox_rgn
= (struct lpfc_bmbx_create
*) phba
->sli4_hba
.bmbx
.avirt
;
5747 lpfc_sli_pcimem_bcopy(&mbox_rgn
->mcqe
, &mboxq
->mcqe
,
5748 sizeof(struct lpfc_mcqe
));
5749 mcqe_status
= bf_get(lpfc_mcqe_status
, &mbox_rgn
->mcqe
);
5751 /* Prefix the mailbox status with range x4000 to note SLI4 status. */
5752 if (mcqe_status
!= MB_CQE_STATUS_SUCCESS
) {
5753 bf_set(lpfc_mqe_status
, mb
, LPFC_MBX_ERROR_RANGE
| mcqe_status
);
5756 lpfc_sli4_swap_str(phba
, mboxq
);
5758 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
5759 "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
5760 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
5761 " x%x x%x CQ: x%x x%x x%x x%x\n",
5762 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
5763 mbx_cmnd
, lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5764 bf_get(lpfc_mqe_status
, mb
),
5765 mb
->un
.mb_words
[0], mb
->un
.mb_words
[1],
5766 mb
->un
.mb_words
[2], mb
->un
.mb_words
[3],
5767 mb
->un
.mb_words
[4], mb
->un
.mb_words
[5],
5768 mb
->un
.mb_words
[6], mb
->un
.mb_words
[7],
5769 mb
->un
.mb_words
[8], mb
->un
.mb_words
[9],
5770 mb
->un
.mb_words
[10], mb
->un
.mb_words
[11],
5771 mb
->un
.mb_words
[12], mboxq
->mcqe
.word0
,
5772 mboxq
->mcqe
.mcqe_tag0
, mboxq
->mcqe
.mcqe_tag1
,
5773 mboxq
->mcqe
.trailer
);
5775 /* We are holding the token, no needed for lock when release */
5776 spin_lock_irqsave(&phba
->hbalock
, iflag
);
5777 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
5778 phba
->sli
.mbox_active
= NULL
;
5779 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
5784 * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
5785 * @phba: Pointer to HBA context object.
5786 * @pmbox: Pointer to mailbox object.
5787 * @flag: Flag indicating how the mailbox need to be processed.
5789 * This function is called by discovery code and HBA management code to submit
5790 * a mailbox command to firmware with SLI-4 interface spec.
5792 * Return codes the caller owns the mailbox command after the return of the
5796 lpfc_sli_issue_mbox_s4(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
,
5799 struct lpfc_sli
*psli
= &phba
->sli
;
5800 unsigned long iflags
;
5803 rc
= lpfc_mbox_dev_check(phba
);
5805 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5806 "(%d):2544 Mailbox command x%x (x%x) "
5807 "cannot issue Data: x%x x%x\n",
5808 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
5809 mboxq
->u
.mb
.mbxCommand
,
5810 lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5811 psli
->sli_flag
, flag
);
5812 goto out_not_finished
;
5815 /* Detect polling mode and jump to a handler */
5816 if (!phba
->sli4_hba
.intr_enable
) {
5817 if (flag
== MBX_POLL
)
5818 rc
= lpfc_sli4_post_sync_mbox(phba
, mboxq
);
5821 if (rc
!= MBX_SUCCESS
)
5822 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5823 "(%d):2541 Mailbox command x%x "
5824 "(x%x) cannot issue Data: x%x x%x\n",
5825 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
5826 mboxq
->u
.mb
.mbxCommand
,
5827 lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5828 psli
->sli_flag
, flag
);
5830 } else if (flag
== MBX_POLL
) {
5831 lpfc_printf_log(phba
, KERN_WARNING
, LOG_MBOX
| LOG_SLI
,
5832 "(%d):2542 Try to issue mailbox command "
5833 "x%x (x%x) synchronously ahead of async"
5834 "mailbox command queue: x%x x%x\n",
5835 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
5836 mboxq
->u
.mb
.mbxCommand
,
5837 lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5838 psli
->sli_flag
, flag
);
5839 /* Try to block the asynchronous mailbox posting */
5840 rc
= lpfc_sli4_async_mbox_block(phba
);
5842 /* Successfully blocked, now issue sync mbox cmd */
5843 rc
= lpfc_sli4_post_sync_mbox(phba
, mboxq
);
5844 if (rc
!= MBX_SUCCESS
)
5845 lpfc_printf_log(phba
, KERN_ERR
,
5847 "(%d):2597 Mailbox command "
5848 "x%x (x%x) cannot issue "
5851 mboxq
->vport
->vpi
: 0,
5852 mboxq
->u
.mb
.mbxCommand
,
5853 lpfc_sli4_mbox_opcode_get(phba
,
5855 psli
->sli_flag
, flag
);
5856 /* Unblock the async mailbox posting afterward */
5857 lpfc_sli4_async_mbox_unblock(phba
);
5862 /* Now, interrupt mode asynchrous mailbox command */
5863 rc
= lpfc_mbox_cmd_check(phba
, mboxq
);
5865 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5866 "(%d):2543 Mailbox command x%x (x%x) "
5867 "cannot issue Data: x%x x%x\n",
5868 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
5869 mboxq
->u
.mb
.mbxCommand
,
5870 lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5871 psli
->sli_flag
, flag
);
5872 goto out_not_finished
;
5875 /* Put the mailbox command to the driver internal FIFO */
5876 psli
->slistat
.mbox_busy
++;
5877 spin_lock_irqsave(&phba
->hbalock
, iflags
);
5878 lpfc_mbox_put(phba
, mboxq
);
5879 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
5880 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
5881 "(%d):0354 Mbox cmd issue - Enqueue Data: "
5882 "x%x (x%x) x%x x%x x%x\n",
5883 mboxq
->vport
? mboxq
->vport
->vpi
: 0xffffff,
5884 bf_get(lpfc_mqe_command
, &mboxq
->u
.mqe
),
5885 lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5886 phba
->pport
->port_state
,
5887 psli
->sli_flag
, MBX_NOWAIT
);
5888 /* Wake up worker thread to transport mailbox command from head */
5889 lpfc_worker_wake_up(phba
);
5894 return MBX_NOT_FINISHED
;
5898 * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
5899 * @phba: Pointer to HBA context object.
5901 * This function is called by worker thread to send a mailbox command to
5902 * SLI4 HBA firmware.
5906 lpfc_sli4_post_async_mbox(struct lpfc_hba
*phba
)
5908 struct lpfc_sli
*psli
= &phba
->sli
;
5909 LPFC_MBOXQ_t
*mboxq
;
5910 int rc
= MBX_SUCCESS
;
5911 unsigned long iflags
;
5912 struct lpfc_mqe
*mqe
;
5915 /* Check interrupt mode before post async mailbox command */
5916 if (unlikely(!phba
->sli4_hba
.intr_enable
))
5917 return MBX_NOT_FINISHED
;
5919 /* Check for mailbox command service token */
5920 spin_lock_irqsave(&phba
->hbalock
, iflags
);
5921 if (unlikely(psli
->sli_flag
& LPFC_SLI_ASYNC_MBX_BLK
)) {
5922 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
5923 return MBX_NOT_FINISHED
;
5925 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
5926 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
5927 return MBX_NOT_FINISHED
;
5929 if (unlikely(phba
->sli
.mbox_active
)) {
5930 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
5931 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5932 "0384 There is pending active mailbox cmd\n");
5933 return MBX_NOT_FINISHED
;
5935 /* Take the mailbox command service token */
5936 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
5938 /* Get the next mailbox command from head of queue */
5939 mboxq
= lpfc_mbox_get(phba
);
5941 /* If no more mailbox command waiting for post, we're done */
5943 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
5944 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
5947 phba
->sli
.mbox_active
= mboxq
;
5948 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
5950 /* Check device readiness for posting mailbox command */
5951 rc
= lpfc_mbox_dev_check(phba
);
5953 /* Driver clean routine will clean up pending mailbox */
5954 goto out_not_finished
;
5956 /* Prepare the mbox command to be posted */
5957 mqe
= &mboxq
->u
.mqe
;
5958 mbx_cmnd
= bf_get(lpfc_mqe_command
, mqe
);
5960 /* Start timer for the mbox_tmo and log some mailbox post messages */
5961 mod_timer(&psli
->mbox_tmo
, (jiffies
+
5962 (HZ
* lpfc_mbox_tmo_val(phba
, mbx_cmnd
))));
5964 lpfc_printf_log(phba
, KERN_INFO
, LOG_MBOX
| LOG_SLI
,
5965 "(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
5967 mboxq
->vport
? mboxq
->vport
->vpi
: 0, mbx_cmnd
,
5968 lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5969 phba
->pport
->port_state
, psli
->sli_flag
);
5971 if (mbx_cmnd
!= MBX_HEARTBEAT
) {
5973 lpfc_debugfs_disc_trc(mboxq
->vport
,
5974 LPFC_DISC_TRC_MBOX_VPORT
,
5975 "MBOX Send vport: cmd:x%x mb:x%x x%x",
5976 mbx_cmnd
, mqe
->un
.mb_words
[0],
5977 mqe
->un
.mb_words
[1]);
5979 lpfc_debugfs_disc_trc(phba
->pport
,
5981 "MBOX Send: cmd:x%x mb:x%x x%x",
5982 mbx_cmnd
, mqe
->un
.mb_words
[0],
5983 mqe
->un
.mb_words
[1]);
5986 psli
->slistat
.mbox_cmd
++;
5988 /* Post the mailbox command to the port */
5989 rc
= lpfc_sli4_mq_put(phba
->sli4_hba
.mbx_wq
, mqe
);
5990 if (rc
!= MBX_SUCCESS
) {
5991 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
| LOG_SLI
,
5992 "(%d):2533 Mailbox command x%x (x%x) "
5993 "cannot issue Data: x%x x%x\n",
5994 mboxq
->vport
? mboxq
->vport
->vpi
: 0,
5995 mboxq
->u
.mb
.mbxCommand
,
5996 lpfc_sli4_mbox_opcode_get(phba
, mboxq
),
5997 psli
->sli_flag
, MBX_NOWAIT
);
5998 goto out_not_finished
;
6004 spin_lock_irqsave(&phba
->hbalock
, iflags
);
6005 mboxq
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
6006 __lpfc_mbox_cmpl_put(phba
, mboxq
);
6007 /* Release the token */
6008 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
6009 phba
->sli
.mbox_active
= NULL
;
6010 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
6012 return MBX_NOT_FINISHED
;
6016 * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
6017 * @phba: Pointer to HBA context object.
6018 * @pmbox: Pointer to mailbox object.
6019 * @flag: Flag indicating how the mailbox need to be processed.
6021 * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
6022 * the API jump table function pointer from the lpfc_hba struct.
6024 * Return codes the caller owns the mailbox command after the return of the
6028 lpfc_sli_issue_mbox(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmbox
, uint32_t flag
)
6030 return phba
->lpfc_sli_issue_mbox(phba
, pmbox
, flag
);
6034 * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
6035 * @phba: The hba struct for which this call is being executed.
6036 * @dev_grp: The HBA PCI-Device group number.
6038 * This routine sets up the mbox interface API function jump table in @phba
6040 * Returns: 0 - success, -ENODEV - failure.
6043 lpfc_mbox_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
6047 case LPFC_PCI_DEV_LP
:
6048 phba
->lpfc_sli_issue_mbox
= lpfc_sli_issue_mbox_s3
;
6049 phba
->lpfc_sli_handle_slow_ring_event
=
6050 lpfc_sli_handle_slow_ring_event_s3
;
6051 phba
->lpfc_sli_hbq_to_firmware
= lpfc_sli_hbq_to_firmware_s3
;
6052 phba
->lpfc_sli_brdrestart
= lpfc_sli_brdrestart_s3
;
6053 phba
->lpfc_sli_brdready
= lpfc_sli_brdready_s3
;
6055 case LPFC_PCI_DEV_OC
:
6056 phba
->lpfc_sli_issue_mbox
= lpfc_sli_issue_mbox_s4
;
6057 phba
->lpfc_sli_handle_slow_ring_event
=
6058 lpfc_sli_handle_slow_ring_event_s4
;
6059 phba
->lpfc_sli_hbq_to_firmware
= lpfc_sli_hbq_to_firmware_s4
;
6060 phba
->lpfc_sli_brdrestart
= lpfc_sli_brdrestart_s4
;
6061 phba
->lpfc_sli_brdready
= lpfc_sli_brdready_s4
;
6064 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6065 "1420 Invalid HBA PCI-device group: 0x%x\n",
6074 * __lpfc_sli_ringtx_put - Add an iocb to the txq
6075 * @phba: Pointer to HBA context object.
6076 * @pring: Pointer to driver SLI ring object.
6077 * @piocb: Pointer to address of newly added command iocb.
6079 * This function is called with hbalock held to add a command
6080 * iocb to the txq when SLI layer cannot submit the command iocb
6084 __lpfc_sli_ringtx_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
6085 struct lpfc_iocbq
*piocb
)
6087 /* Insert the caller's iocb in the txq tail for later processing. */
6088 list_add_tail(&piocb
->list
, &pring
->txq
);
6093 * lpfc_sli_next_iocb - Get the next iocb in the txq
6094 * @phba: Pointer to HBA context object.
6095 * @pring: Pointer to driver SLI ring object.
6096 * @piocb: Pointer to address of newly added command iocb.
6098 * This function is called with hbalock held before a new
6099 * iocb is submitted to the firmware. This function checks
6100 * txq to flush the iocbs in txq to Firmware before
6101 * submitting new iocbs to the Firmware.
6102 * If there are iocbs in the txq which need to be submitted
6103 * to firmware, lpfc_sli_next_iocb returns the first element
6104 * of the txq after dequeuing it from txq.
6105 * If there is no iocb in the txq then the function will return
6106 * *piocb and *piocb is set to NULL. Caller needs to check
6107 * *piocb to find if there are more commands in the txq.
6109 static struct lpfc_iocbq
*
6110 lpfc_sli_next_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
6111 struct lpfc_iocbq
**piocb
)
6113 struct lpfc_iocbq
* nextiocb
;
6115 nextiocb
= lpfc_sli_ringtx_get(phba
, pring
);
6125 * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
6126 * @phba: Pointer to HBA context object.
6127 * @ring_number: SLI ring number to issue iocb on.
6128 * @piocb: Pointer to command iocb.
6129 * @flag: Flag indicating if this command can be put into txq.
6131 * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
6132 * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
6133 * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
6134 * flag is turned on, the function returns IOCB_ERROR. When the link is down,
6135 * this function allows only iocbs for posting buffers. This function finds
6136 * next available slot in the command ring and posts the command to the
6137 * available slot and writes the port attention register to request HBA start
6138 * processing new iocb. If there is no slot available in the ring and
6139 * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
6140 * the function returns IOCB_BUSY.
6142 * This function is called with hbalock held. The function will return success
6143 * after it successfully submit the iocb to firmware or after adding to the
6147 __lpfc_sli_issue_iocb_s3(struct lpfc_hba
*phba
, uint32_t ring_number
,
6148 struct lpfc_iocbq
*piocb
, uint32_t flag
)
6150 struct lpfc_iocbq
*nextiocb
;
6152 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[ring_number
];
6154 if (piocb
->iocb_cmpl
&& (!piocb
->vport
) &&
6155 (piocb
->iocb
.ulpCommand
!= CMD_ABORT_XRI_CN
) &&
6156 (piocb
->iocb
.ulpCommand
!= CMD_CLOSE_XRI_CN
)) {
6157 lpfc_printf_log(phba
, KERN_ERR
,
6158 LOG_SLI
| LOG_VPORT
,
6159 "1807 IOCB x%x failed. No vport\n",
6160 piocb
->iocb
.ulpCommand
);
6166 /* If the PCI channel is in offline state, do not post iocbs. */
6167 if (unlikely(pci_channel_offline(phba
->pcidev
)))
6170 /* If HBA has a deferred error attention, fail the iocb. */
6171 if (unlikely(phba
->hba_flag
& DEFER_ERATT
))
6175 * We should never get an IOCB if we are in a < LINK_DOWN state
6177 if (unlikely(phba
->link_state
< LPFC_LINK_DOWN
))
6181 * Check to see if we are blocking IOCB processing because of a
6182 * outstanding event.
6184 if (unlikely(pring
->flag
& LPFC_STOP_IOCB_EVENT
))
6187 if (unlikely(phba
->link_state
== LPFC_LINK_DOWN
)) {
6189 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
6190 * can be issued if the link is not up.
6192 switch (piocb
->iocb
.ulpCommand
) {
6193 case CMD_GEN_REQUEST64_CR
:
6194 case CMD_GEN_REQUEST64_CX
:
6195 if (!(phba
->sli
.sli_flag
& LPFC_MENLO_MAINT
) ||
6196 (piocb
->iocb
.un
.genreq64
.w5
.hcsw
.Rctl
!=
6197 FC_RCTL_DD_UNSOL_CMD
) ||
6198 (piocb
->iocb
.un
.genreq64
.w5
.hcsw
.Type
!=
6199 MENLO_TRANSPORT_TYPE
))
6203 case CMD_QUE_RING_BUF_CN
:
6204 case CMD_QUE_RING_BUF64_CN
:
6206 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
6207 * completion, iocb_cmpl MUST be 0.
6209 if (piocb
->iocb_cmpl
)
6210 piocb
->iocb_cmpl
= NULL
;
6212 case CMD_CREATE_XRI_CR
:
6213 case CMD_CLOSE_XRI_CN
:
6214 case CMD_CLOSE_XRI_CX
:
6221 * For FCP commands, we must be in a state where we can process link
6224 } else if (unlikely(pring
->ringno
== phba
->sli
.fcp_ring
&&
6225 !(phba
->sli
.sli_flag
& LPFC_PROCESS_LA
))) {
6229 while ((iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)) &&
6230 (nextiocb
= lpfc_sli_next_iocb(phba
, pring
, &piocb
)))
6231 lpfc_sli_submit_iocb(phba
, pring
, iocb
, nextiocb
);
6234 lpfc_sli_update_ring(phba
, pring
);
6236 lpfc_sli_update_full_ring(phba
, pring
);
6239 return IOCB_SUCCESS
;
6244 pring
->stats
.iocb_cmd_delay
++;
6248 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
6249 __lpfc_sli_ringtx_put(phba
, pring
, piocb
);
6250 return IOCB_SUCCESS
;
6257 * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
6258 * @phba: Pointer to HBA context object.
6259 * @piocb: Pointer to command iocb.
6260 * @sglq: Pointer to the scatter gather queue object.
6262 * This routine converts the bpl or bde that is in the IOCB
6263 * to a sgl list for the sli4 hardware. The physical address
6264 * of the bpl/bde is converted back to a virtual address.
6265 * If the IOCB contains a BPL then the list of BDE's is
6266 * converted to sli4_sge's. If the IOCB contains a single
6267 * BDE then it is converted to a single sli_sge.
6268 * The IOCB is still in cpu endianess so the contents of
6269 * the bpl can be used without byte swapping.
6271 * Returns valid XRI = Success, NO_XRI = Failure.
6274 lpfc_sli4_bpl2sgl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*piocbq
,
6275 struct lpfc_sglq
*sglq
)
6277 uint16_t xritag
= NO_XRI
;
6278 struct ulp_bde64
*bpl
= NULL
;
6279 struct ulp_bde64 bde
;
6280 struct sli4_sge
*sgl
= NULL
;
6284 uint32_t offset
= 0; /* accumulated offset in the sg request list */
6285 int inbound
= 0; /* number of sg reply entries inbound from firmware */
6287 if (!piocbq
|| !sglq
)
6290 sgl
= (struct sli4_sge
*)sglq
->sgl
;
6291 icmd
= &piocbq
->iocb
;
6292 if (icmd
->un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BLP_64
) {
6293 numBdes
= icmd
->un
.genreq64
.bdl
.bdeSize
/
6294 sizeof(struct ulp_bde64
);
6295 /* The addrHigh and addrLow fields within the IOCB
6296 * have not been byteswapped yet so there is no
6297 * need to swap them back.
6299 bpl
= (struct ulp_bde64
*)
6300 ((struct lpfc_dmabuf
*)piocbq
->context3
)->virt
;
6305 for (i
= 0; i
< numBdes
; i
++) {
6306 /* Should already be byte swapped. */
6307 sgl
->addr_hi
= bpl
->addrHigh
;
6308 sgl
->addr_lo
= bpl
->addrLow
;
6310 if ((i
+1) == numBdes
)
6311 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
6313 bf_set(lpfc_sli4_sge_last
, sgl
, 0);
6314 /* swap the size field back to the cpu so we
6315 * can assign it to the sgl.
6317 bde
.tus
.w
= le32_to_cpu(bpl
->tus
.w
);
6318 sgl
->sge_len
= cpu_to_le32(bde
.tus
.f
.bdeSize
);
6319 /* The offsets in the sgl need to be accumulated
6320 * separately for the request and reply lists.
6321 * The request is always first, the reply follows.
6323 if (piocbq
->iocb
.ulpCommand
== CMD_GEN_REQUEST64_CR
) {
6324 /* add up the reply sg entries */
6325 if (bpl
->tus
.f
.bdeFlags
== BUFF_TYPE_BDE_64I
)
6327 /* first inbound? reset the offset */
6330 bf_set(lpfc_sli4_sge_offset
, sgl
, offset
);
6331 offset
+= bde
.tus
.f
.bdeSize
;
6333 sgl
->word2
= cpu_to_le32(sgl
->word2
);
6337 } else if (icmd
->un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BDE_64
) {
6338 /* The addrHigh and addrLow fields of the BDE have not
6339 * been byteswapped yet so they need to be swapped
6340 * before putting them in the sgl.
6343 cpu_to_le32(icmd
->un
.genreq64
.bdl
.addrHigh
);
6345 cpu_to_le32(icmd
->un
.genreq64
.bdl
.addrLow
);
6346 bf_set(lpfc_sli4_sge_last
, sgl
, 1);
6347 sgl
->word2
= cpu_to_le32(sgl
->word2
);
6349 cpu_to_le32(icmd
->un
.genreq64
.bdl
.bdeSize
);
6351 return sglq
->sli4_xritag
;
6355 * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
6356 * @phba: Pointer to HBA context object.
6358 * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
6359 * distribution. This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
6362 * Return: index into SLI4 fast-path FCP queue index.
6365 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba
*phba
)
6368 if (phba
->fcp_qidx
>= phba
->cfg_fcp_wq_count
)
6371 return phba
->fcp_qidx
;
6375 * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
6376 * @phba: Pointer to HBA context object.
6377 * @piocb: Pointer to command iocb.
6378 * @wqe: Pointer to the work queue entry.
6380 * This routine converts the iocb command to its Work Queue Entry
6381 * equivalent. The wqe pointer should not have any fields set when
6382 * this routine is called because it will memcpy over them.
6383 * This routine does not set the CQ_ID or the WQEC bits in the
6386 * Returns: 0 = Success, IOCB_ERROR = Failure.
6389 lpfc_sli4_iocb2wqe(struct lpfc_hba
*phba
, struct lpfc_iocbq
*iocbq
,
6390 union lpfc_wqe
*wqe
)
6392 uint32_t xmit_len
= 0, total_len
= 0;
6396 uint8_t command_type
= ELS_COMMAND_NON_FIP
;
6399 uint16_t abrt_iotag
;
6400 struct lpfc_iocbq
*abrtiocbq
;
6401 struct ulp_bde64
*bpl
= NULL
;
6402 uint32_t els_id
= LPFC_ELS_ID_DEFAULT
;
6404 struct ulp_bde64 bde
;
6406 fip
= phba
->hba_flag
& HBA_FIP_SUPPORT
;
6407 /* The fcp commands will set command type */
6408 if (iocbq
->iocb_flag
& LPFC_IO_FCP
)
6409 command_type
= FCP_COMMAND
;
6410 else if (fip
&& (iocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
))
6411 command_type
= ELS_COMMAND_FIP
;
6413 command_type
= ELS_COMMAND_NON_FIP
;
6415 /* Some of the fields are in the right position already */
6416 memcpy(wqe
, &iocbq
->iocb
, sizeof(union lpfc_wqe
));
6417 abort_tag
= (uint32_t) iocbq
->iotag
;
6418 xritag
= iocbq
->sli4_xritag
;
6419 wqe
->generic
.wqe_com
.word7
= 0; /* The ct field has moved so reset */
6420 /* words0-2 bpl convert bde */
6421 if (iocbq
->iocb
.un
.genreq64
.bdl
.bdeFlags
== BUFF_TYPE_BLP_64
) {
6422 numBdes
= iocbq
->iocb
.un
.genreq64
.bdl
.bdeSize
/
6423 sizeof(struct ulp_bde64
);
6424 bpl
= (struct ulp_bde64
*)
6425 ((struct lpfc_dmabuf
*)iocbq
->context3
)->virt
;
6429 /* Should already be byte swapped. */
6430 wqe
->generic
.bde
.addrHigh
= le32_to_cpu(bpl
->addrHigh
);
6431 wqe
->generic
.bde
.addrLow
= le32_to_cpu(bpl
->addrLow
);
6432 /* swap the size field back to the cpu so we
6433 * can assign it to the sgl.
6435 wqe
->generic
.bde
.tus
.w
= le32_to_cpu(bpl
->tus
.w
);
6436 xmit_len
= wqe
->generic
.bde
.tus
.f
.bdeSize
;
6438 for (i
= 0; i
< numBdes
; i
++) {
6439 bde
.tus
.w
= le32_to_cpu(bpl
[i
].tus
.w
);
6440 total_len
+= bde
.tus
.f
.bdeSize
;
6443 xmit_len
= iocbq
->iocb
.un
.fcpi64
.bdl
.bdeSize
;
6445 iocbq
->iocb
.ulpIoTag
= iocbq
->iotag
;
6446 cmnd
= iocbq
->iocb
.ulpCommand
;
6448 switch (iocbq
->iocb
.ulpCommand
) {
6449 case CMD_ELS_REQUEST64_CR
:
6450 if (!iocbq
->iocb
.ulpLe
) {
6451 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6452 "2007 Only Limited Edition cmd Format"
6453 " supported 0x%x\n",
6454 iocbq
->iocb
.ulpCommand
);
6457 wqe
->els_req
.payload_len
= xmit_len
;
6458 /* Els_reguest64 has a TMO */
6459 bf_set(wqe_tmo
, &wqe
->els_req
.wqe_com
,
6460 iocbq
->iocb
.ulpTimeout
);
6461 /* Need a VF for word 4 set the vf bit*/
6462 bf_set(els_req64_vf
, &wqe
->els_req
, 0);
6463 /* And a VFID for word 12 */
6464 bf_set(els_req64_vfid
, &wqe
->els_req
, 0);
6465 ct
= ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
);
6466 bf_set(wqe_ctxt_tag
, &wqe
->els_req
.wqe_com
,
6467 iocbq
->iocb
.ulpContext
);
6468 bf_set(wqe_ct
, &wqe
->els_req
.wqe_com
, ct
);
6469 bf_set(wqe_pu
, &wqe
->els_req
.wqe_com
, 0);
6470 /* CCP CCPE PV PRI in word10 were set in the memcpy */
6471 if (command_type
== ELS_COMMAND_FIP
) {
6472 els_id
= ((iocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
)
6473 >> LPFC_FIP_ELS_ID_SHIFT
);
6475 bf_set(wqe_els_id
, &wqe
->els_req
.wqe_com
, els_id
);
6476 bf_set(wqe_dbde
, &wqe
->els_req
.wqe_com
, 1);
6477 bf_set(wqe_iod
, &wqe
->els_req
.wqe_com
, LPFC_WQE_IOD_READ
);
6478 bf_set(wqe_qosd
, &wqe
->els_req
.wqe_com
, 1);
6479 bf_set(wqe_lenloc
, &wqe
->els_req
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
6480 bf_set(wqe_ebde_cnt
, &wqe
->els_req
.wqe_com
, 0);
6482 case CMD_XMIT_SEQUENCE64_CX
:
6483 bf_set(wqe_ctxt_tag
, &wqe
->xmit_sequence
.wqe_com
,
6484 iocbq
->iocb
.un
.ulpWord
[3]);
6485 bf_set(wqe_rcvoxid
, &wqe
->xmit_sequence
.wqe_com
,
6486 iocbq
->iocb
.ulpContext
);
6487 /* The entire sequence is transmitted for this IOCB */
6488 xmit_len
= total_len
;
6489 cmnd
= CMD_XMIT_SEQUENCE64_CR
;
6490 case CMD_XMIT_SEQUENCE64_CR
:
6491 /* word3 iocb=io_tag32 wqe=reserved */
6492 wqe
->xmit_sequence
.rsvd3
= 0;
6493 /* word4 relative_offset memcpy */
6494 /* word5 r_ctl/df_ctl memcpy */
6495 bf_set(wqe_pu
, &wqe
->xmit_sequence
.wqe_com
, 0);
6496 bf_set(wqe_dbde
, &wqe
->xmit_sequence
.wqe_com
, 1);
6497 bf_set(wqe_iod
, &wqe
->xmit_sequence
.wqe_com
,
6498 LPFC_WQE_IOD_WRITE
);
6499 bf_set(wqe_lenloc
, &wqe
->xmit_sequence
.wqe_com
,
6500 LPFC_WQE_LENLOC_WORD12
);
6501 bf_set(wqe_ebde_cnt
, &wqe
->xmit_sequence
.wqe_com
, 0);
6502 wqe
->xmit_sequence
.xmit_len
= xmit_len
;
6503 command_type
= OTHER_COMMAND
;
6505 case CMD_XMIT_BCAST64_CN
:
6506 /* word3 iocb=iotag32 wqe=seq_payload_len */
6507 wqe
->xmit_bcast64
.seq_payload_len
= xmit_len
;
6508 /* word4 iocb=rsvd wqe=rsvd */
6509 /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
6510 /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
6511 bf_set(wqe_ct
, &wqe
->xmit_bcast64
.wqe_com
,
6512 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
6513 bf_set(wqe_dbde
, &wqe
->xmit_bcast64
.wqe_com
, 1);
6514 bf_set(wqe_iod
, &wqe
->xmit_bcast64
.wqe_com
, LPFC_WQE_IOD_WRITE
);
6515 bf_set(wqe_lenloc
, &wqe
->xmit_bcast64
.wqe_com
,
6516 LPFC_WQE_LENLOC_WORD3
);
6517 bf_set(wqe_ebde_cnt
, &wqe
->xmit_bcast64
.wqe_com
, 0);
6519 case CMD_FCP_IWRITE64_CR
:
6520 command_type
= FCP_COMMAND_DATA_OUT
;
6521 /* word3 iocb=iotag wqe=payload_offset_len */
6522 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
6523 wqe
->fcp_iwrite
.payload_offset_len
=
6524 xmit_len
+ sizeof(struct fcp_rsp
);
6525 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
6526 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
6527 bf_set(wqe_erp
, &wqe
->fcp_iwrite
.wqe_com
,
6528 iocbq
->iocb
.ulpFCP2Rcvy
);
6529 bf_set(wqe_lnk
, &wqe
->fcp_iwrite
.wqe_com
, iocbq
->iocb
.ulpXS
);
6530 /* Always open the exchange */
6531 bf_set(wqe_xc
, &wqe
->fcp_iwrite
.wqe_com
, 0);
6532 bf_set(wqe_dbde
, &wqe
->fcp_iwrite
.wqe_com
, 1);
6533 bf_set(wqe_iod
, &wqe
->fcp_iwrite
.wqe_com
, LPFC_WQE_IOD_WRITE
);
6534 bf_set(wqe_lenloc
, &wqe
->fcp_iwrite
.wqe_com
,
6535 LPFC_WQE_LENLOC_WORD4
);
6536 bf_set(wqe_ebde_cnt
, &wqe
->fcp_iwrite
.wqe_com
, 0);
6537 bf_set(wqe_pu
, &wqe
->fcp_iwrite
.wqe_com
, iocbq
->iocb
.ulpPU
);
6539 case CMD_FCP_IREAD64_CR
:
6540 /* word3 iocb=iotag wqe=payload_offset_len */
6541 /* Add the FCP_CMD and FCP_RSP sizes to get the offset */
6542 wqe
->fcp_iread
.payload_offset_len
=
6543 xmit_len
+ sizeof(struct fcp_rsp
);
6544 /* word4 iocb=parameter wqe=total_xfer_length memcpy */
6545 /* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
6546 bf_set(wqe_erp
, &wqe
->fcp_iread
.wqe_com
,
6547 iocbq
->iocb
.ulpFCP2Rcvy
);
6548 bf_set(wqe_lnk
, &wqe
->fcp_iread
.wqe_com
, iocbq
->iocb
.ulpXS
);
6549 /* Always open the exchange */
6550 bf_set(wqe_xc
, &wqe
->fcp_iread
.wqe_com
, 0);
6551 bf_set(wqe_dbde
, &wqe
->fcp_iread
.wqe_com
, 1);
6552 bf_set(wqe_iod
, &wqe
->fcp_iread
.wqe_com
, LPFC_WQE_IOD_READ
);
6553 bf_set(wqe_lenloc
, &wqe
->fcp_iread
.wqe_com
,
6554 LPFC_WQE_LENLOC_WORD4
);
6555 bf_set(wqe_ebde_cnt
, &wqe
->fcp_iread
.wqe_com
, 0);
6556 bf_set(wqe_pu
, &wqe
->fcp_iread
.wqe_com
, iocbq
->iocb
.ulpPU
);
6558 case CMD_FCP_ICMND64_CR
:
6559 /* word3 iocb=IO_TAG wqe=reserved */
6560 wqe
->fcp_icmd
.rsrvd3
= 0;
6561 bf_set(wqe_pu
, &wqe
->fcp_icmd
.wqe_com
, 0);
6562 /* Always open the exchange */
6563 bf_set(wqe_xc
, &wqe
->fcp_icmd
.wqe_com
, 0);
6564 bf_set(wqe_dbde
, &wqe
->fcp_icmd
.wqe_com
, 1);
6565 bf_set(wqe_iod
, &wqe
->fcp_icmd
.wqe_com
, LPFC_WQE_IOD_WRITE
);
6566 bf_set(wqe_qosd
, &wqe
->fcp_icmd
.wqe_com
, 1);
6567 bf_set(wqe_lenloc
, &wqe
->fcp_icmd
.wqe_com
,
6568 LPFC_WQE_LENLOC_NONE
);
6569 bf_set(wqe_ebde_cnt
, &wqe
->fcp_icmd
.wqe_com
, 0);
6571 case CMD_GEN_REQUEST64_CR
:
6572 /* For this command calculate the xmit length of the
6576 numBdes
= iocbq
->iocb
.un
.genreq64
.bdl
.bdeSize
/
6577 sizeof(struct ulp_bde64
);
6578 for (i
= 0; i
< numBdes
; i
++) {
6579 bde
.tus
.w
= le32_to_cpu(bpl
[i
].tus
.w
);
6580 if (bde
.tus
.f
.bdeFlags
!= BUFF_TYPE_BDE_64
)
6582 xmit_len
+= bde
.tus
.f
.bdeSize
;
6584 /* word3 iocb=IO_TAG wqe=request_payload_len */
6585 wqe
->gen_req
.request_payload_len
= xmit_len
;
6586 /* word4 iocb=parameter wqe=relative_offset memcpy */
6587 /* word5 [rctl, type, df_ctl, la] copied in memcpy */
6588 /* word6 context tag copied in memcpy */
6589 if (iocbq
->iocb
.ulpCt_h
|| iocbq
->iocb
.ulpCt_l
) {
6590 ct
= ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
);
6591 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6592 "2015 Invalid CT %x command 0x%x\n",
6593 ct
, iocbq
->iocb
.ulpCommand
);
6596 bf_set(wqe_ct
, &wqe
->gen_req
.wqe_com
, 0);
6597 bf_set(wqe_tmo
, &wqe
->gen_req
.wqe_com
, iocbq
->iocb
.ulpTimeout
);
6598 bf_set(wqe_pu
, &wqe
->gen_req
.wqe_com
, iocbq
->iocb
.ulpPU
);
6599 bf_set(wqe_dbde
, &wqe
->gen_req
.wqe_com
, 1);
6600 bf_set(wqe_iod
, &wqe
->gen_req
.wqe_com
, LPFC_WQE_IOD_READ
);
6601 bf_set(wqe_qosd
, &wqe
->gen_req
.wqe_com
, 1);
6602 bf_set(wqe_lenloc
, &wqe
->gen_req
.wqe_com
, LPFC_WQE_LENLOC_NONE
);
6603 bf_set(wqe_ebde_cnt
, &wqe
->gen_req
.wqe_com
, 0);
6604 command_type
= OTHER_COMMAND
;
6606 case CMD_XMIT_ELS_RSP64_CX
:
6607 /* words0-2 BDE memcpy */
6608 /* word3 iocb=iotag32 wqe=response_payload_len */
6609 wqe
->xmit_els_rsp
.response_payload_len
= xmit_len
;
6610 /* word4 iocb=did wge=rsvd. */
6611 wqe
->xmit_els_rsp
.rsvd4
= 0;
6612 /* word5 iocb=rsvd wge=did */
6613 bf_set(wqe_els_did
, &wqe
->xmit_els_rsp
.wqe_dest
,
6614 iocbq
->iocb
.un
.elsreq64
.remoteID
);
6615 bf_set(wqe_ct
, &wqe
->xmit_els_rsp
.wqe_com
,
6616 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
6617 bf_set(wqe_pu
, &wqe
->xmit_els_rsp
.wqe_com
, iocbq
->iocb
.ulpPU
);
6618 bf_set(wqe_rcvoxid
, &wqe
->xmit_els_rsp
.wqe_com
,
6619 iocbq
->iocb
.ulpContext
);
6620 if (!iocbq
->iocb
.ulpCt_h
&& iocbq
->iocb
.ulpCt_l
)
6621 bf_set(wqe_ctxt_tag
, &wqe
->xmit_els_rsp
.wqe_com
,
6622 iocbq
->vport
->vpi
+ phba
->vpi_base
);
6623 bf_set(wqe_dbde
, &wqe
->xmit_els_rsp
.wqe_com
, 1);
6624 bf_set(wqe_iod
, &wqe
->xmit_els_rsp
.wqe_com
, LPFC_WQE_IOD_WRITE
);
6625 bf_set(wqe_qosd
, &wqe
->xmit_els_rsp
.wqe_com
, 1);
6626 bf_set(wqe_lenloc
, &wqe
->xmit_els_rsp
.wqe_com
,
6627 LPFC_WQE_LENLOC_WORD3
);
6628 bf_set(wqe_ebde_cnt
, &wqe
->xmit_els_rsp
.wqe_com
, 0);
6629 command_type
= OTHER_COMMAND
;
6631 case CMD_CLOSE_XRI_CN
:
6632 case CMD_ABORT_XRI_CN
:
6633 case CMD_ABORT_XRI_CX
:
6634 /* words 0-2 memcpy should be 0 rserved */
6635 /* port will send abts */
6636 abrt_iotag
= iocbq
->iocb
.un
.acxri
.abortContextTag
;
6637 if (abrt_iotag
!= 0 && abrt_iotag
<= phba
->sli
.last_iotag
) {
6638 abrtiocbq
= phba
->sli
.iocbq_lookup
[abrt_iotag
];
6639 fip
= abrtiocbq
->iocb_flag
& LPFC_FIP_ELS_ID_MASK
;
6643 if ((iocbq
->iocb
.ulpCommand
== CMD_CLOSE_XRI_CN
) || fip
)
6645 * The link is down, or the command was ELS_FIP
6646 * so the fw does not need to send abts
6649 bf_set(abort_cmd_ia
, &wqe
->abort_cmd
, 1);
6651 bf_set(abort_cmd_ia
, &wqe
->abort_cmd
, 0);
6652 bf_set(abort_cmd_criteria
, &wqe
->abort_cmd
, T_XRI_TAG
);
6653 /* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
6654 wqe
->abort_cmd
.rsrvd5
= 0;
6655 bf_set(wqe_ct
, &wqe
->abort_cmd
.wqe_com
,
6656 ((iocbq
->iocb
.ulpCt_h
<< 1) | iocbq
->iocb
.ulpCt_l
));
6657 abort_tag
= iocbq
->iocb
.un
.acxri
.abortIoTag
;
6659 * The abort handler will send us CMD_ABORT_XRI_CN or
6660 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
6662 bf_set(wqe_cmnd
, &wqe
->abort_cmd
.wqe_com
, CMD_ABORT_XRI_CX
);
6663 bf_set(wqe_qosd
, &wqe
->abort_cmd
.wqe_com
, 1);
6664 bf_set(wqe_lenloc
, &wqe
->abort_cmd
.wqe_com
,
6665 LPFC_WQE_LENLOC_NONE
);
6666 cmnd
= CMD_ABORT_XRI_CX
;
6667 command_type
= OTHER_COMMAND
;
6670 case CMD_XMIT_BLS_RSP64_CX
:
6671 /* As BLS ABTS RSP WQE is very different from other WQEs,
6672 * we re-construct this WQE here based on information in
6673 * iocbq from scratch.
6675 memset(wqe
, 0, sizeof(union lpfc_wqe
));
6676 /* OX_ID is invariable to who sent ABTS to CT exchange */
6677 bf_set(xmit_bls_rsp64_oxid
, &wqe
->xmit_bls_rsp
,
6678 bf_get(lpfc_abts_oxid
, &iocbq
->iocb
.un
.bls_rsp
));
6679 if (bf_get(lpfc_abts_orig
, &iocbq
->iocb
.un
.bls_rsp
) ==
6680 LPFC_ABTS_UNSOL_INT
) {
6681 /* ABTS sent by initiator to CT exchange, the
6682 * RX_ID field will be filled with the newly
6683 * allocated responder XRI.
6685 bf_set(xmit_bls_rsp64_rxid
, &wqe
->xmit_bls_rsp
,
6686 iocbq
->sli4_xritag
);
6688 /* ABTS sent by responder to CT exchange, the
6689 * RX_ID field will be filled with the responder
6692 bf_set(xmit_bls_rsp64_rxid
, &wqe
->xmit_bls_rsp
,
6693 bf_get(lpfc_abts_rxid
, &iocbq
->iocb
.un
.bls_rsp
));
6695 bf_set(xmit_bls_rsp64_seqcnthi
, &wqe
->xmit_bls_rsp
, 0xffff);
6696 bf_set(wqe_xmit_bls_pt
, &wqe
->xmit_bls_rsp
.wqe_dest
, 0x1);
6697 bf_set(wqe_ctxt_tag
, &wqe
->xmit_bls_rsp
.wqe_com
,
6698 iocbq
->iocb
.ulpContext
);
6699 bf_set(wqe_qosd
, &wqe
->xmit_bls_rsp
.wqe_com
, 1);
6700 bf_set(wqe_lenloc
, &wqe
->xmit_bls_rsp
.wqe_com
,
6701 LPFC_WQE_LENLOC_NONE
);
6702 /* Overwrite the pre-set comnd type with OTHER_COMMAND */
6703 command_type
= OTHER_COMMAND
;
6704 if (iocbq
->iocb
.un
.xseq64
.w5
.hcsw
.Rctl
== FC_RCTL_BA_RJT
) {
6705 bf_set(xmit_bls_rsp64_rjt_vspec
, &wqe
->xmit_bls_rsp
,
6706 bf_get(lpfc_vndr_code
, &iocbq
->iocb
.un
.bls_rsp
));
6707 bf_set(xmit_bls_rsp64_rjt_expc
, &wqe
->xmit_bls_rsp
,
6708 bf_get(lpfc_rsn_expln
, &iocbq
->iocb
.un
.bls_rsp
));
6709 bf_set(xmit_bls_rsp64_rjt_rsnc
, &wqe
->xmit_bls_rsp
,
6710 bf_get(lpfc_rsn_code
, &iocbq
->iocb
.un
.bls_rsp
));
6714 case CMD_XRI_ABORTED_CX
:
6715 case CMD_CREATE_XRI_CR
: /* Do we expect to use this? */
6716 case CMD_IOCB_FCP_IBIDIR64_CR
: /* bidirectional xfer */
6717 case CMD_FCP_TSEND64_CX
: /* Target mode send xfer-ready */
6718 case CMD_FCP_TRSP64_CX
: /* Target mode rcv */
6719 case CMD_FCP_AUTO_TRSP_CX
: /* Auto target rsp */
6721 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
6722 "2014 Invalid command 0x%x\n",
6723 iocbq
->iocb
.ulpCommand
);
6727 bf_set(wqe_xri_tag
, &wqe
->generic
.wqe_com
, xritag
);
6728 bf_set(wqe_reqtag
, &wqe
->generic
.wqe_com
, iocbq
->iotag
);
6729 wqe
->generic
.wqe_com
.abort_tag
= abort_tag
;
6730 bf_set(wqe_cmd_type
, &wqe
->generic
.wqe_com
, command_type
);
6731 bf_set(wqe_cmnd
, &wqe
->generic
.wqe_com
, cmnd
);
6732 bf_set(wqe_class
, &wqe
->generic
.wqe_com
, iocbq
->iocb
.ulpClass
);
6733 bf_set(wqe_cqid
, &wqe
->generic
.wqe_com
, LPFC_WQE_CQ_ID_DEFAULT
);
6738 * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
6739 * @phba: Pointer to HBA context object.
6740 * @ring_number: SLI ring number to issue iocb on.
6741 * @piocb: Pointer to command iocb.
6742 * @flag: Flag indicating if this command can be put into txq.
6744 * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
6745 * an iocb command to an HBA with SLI-4 interface spec.
6747 * This function is called with hbalock held. The function will return success
6748 * after it successfully submit the iocb to firmware or after adding to the
6752 __lpfc_sli_issue_iocb_s4(struct lpfc_hba
*phba
, uint32_t ring_number
,
6753 struct lpfc_iocbq
*piocb
, uint32_t flag
)
6755 struct lpfc_sglq
*sglq
;
6757 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[ring_number
];
6759 if (piocb
->sli4_xritag
== NO_XRI
) {
6760 if (piocb
->iocb
.ulpCommand
== CMD_ABORT_XRI_CN
||
6761 piocb
->iocb
.ulpCommand
== CMD_CLOSE_XRI_CN
||
6762 piocb
->iocb
.ulpCommand
== CMD_XMIT_BLS_RSP64_CX
)
6765 if (pring
->txq_cnt
) {
6766 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
6767 __lpfc_sli_ringtx_put(phba
,
6769 return IOCB_SUCCESS
;
6774 sglq
= __lpfc_sli_get_sglq(phba
, piocb
);
6776 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
6777 __lpfc_sli_ringtx_put(phba
,
6780 return IOCB_SUCCESS
;
6786 } else if (piocb
->iocb_flag
& LPFC_IO_FCP
) {
6787 sglq
= NULL
; /* These IO's already have an XRI and
6791 /* This is a continuation of a commandi,(CX) so this
6792 * sglq is on the active list
6794 sglq
= __lpfc_get_active_sglq(phba
, piocb
->sli4_xritag
);
6800 piocb
->sli4_xritag
= sglq
->sli4_xritag
;
6802 if (NO_XRI
== lpfc_sli4_bpl2sgl(phba
, piocb
, sglq
))
6806 if (lpfc_sli4_iocb2wqe(phba
, piocb
, &wqe
))
6809 if ((piocb
->iocb_flag
& LPFC_IO_FCP
) ||
6810 (piocb
->iocb_flag
& LPFC_USE_FCPWQIDX
)) {
6812 * For FCP command IOCB, get a new WQ index to distribute
6813 * WQE across the WQsr. On the other hand, for abort IOCB,
6814 * it carries the same WQ index to the original command
6817 if (piocb
->iocb_flag
& LPFC_IO_FCP
)
6818 piocb
->fcp_wqidx
= lpfc_sli4_scmd_to_wqidx_distr(phba
);
6819 if (lpfc_sli4_wq_put(phba
->sli4_hba
.fcp_wq
[piocb
->fcp_wqidx
],
6823 if (lpfc_sli4_wq_put(phba
->sli4_hba
.els_wq
, &wqe
))
6826 lpfc_sli_ringtxcmpl_put(phba
, pring
, piocb
);
6832 * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
6834 * This routine wraps the actual lockless version for issusing IOCB function
6835 * pointer from the lpfc_hba struct.
6838 * IOCB_ERROR - Error
6839 * IOCB_SUCCESS - Success
6843 __lpfc_sli_issue_iocb(struct lpfc_hba
*phba
, uint32_t ring_number
,
6844 struct lpfc_iocbq
*piocb
, uint32_t flag
)
6846 return phba
->__lpfc_sli_issue_iocb(phba
, ring_number
, piocb
, flag
);
6850 * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
6851 * @phba: The hba struct for which this call is being executed.
6852 * @dev_grp: The HBA PCI-Device group number.
6854 * This routine sets up the SLI interface API function jump table in @phba
6856 * Returns: 0 - success, -ENODEV - failure.
6859 lpfc_sli_api_table_setup(struct lpfc_hba
*phba
, uint8_t dev_grp
)
6863 case LPFC_PCI_DEV_LP
:
6864 phba
->__lpfc_sli_issue_iocb
= __lpfc_sli_issue_iocb_s3
;
6865 phba
->__lpfc_sli_release_iocbq
= __lpfc_sli_release_iocbq_s3
;
6867 case LPFC_PCI_DEV_OC
:
6868 phba
->__lpfc_sli_issue_iocb
= __lpfc_sli_issue_iocb_s4
;
6869 phba
->__lpfc_sli_release_iocbq
= __lpfc_sli_release_iocbq_s4
;
6872 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
6873 "1419 Invalid HBA PCI-device group: 0x%x\n",
6878 phba
->lpfc_get_iocb_from_iocbq
= lpfc_get_iocb_from_iocbq
;
6883 * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
6884 * @phba: Pointer to HBA context object.
6885 * @pring: Pointer to driver SLI ring object.
6886 * @piocb: Pointer to command iocb.
6887 * @flag: Flag indicating if this command can be put into txq.
6889 * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
6890 * function. This function gets the hbalock and calls
6891 * __lpfc_sli_issue_iocb function and will return the error returned
6892 * by __lpfc_sli_issue_iocb function. This wrapper is used by
6893 * functions which do not hold hbalock.
6896 lpfc_sli_issue_iocb(struct lpfc_hba
*phba
, uint32_t ring_number
,
6897 struct lpfc_iocbq
*piocb
, uint32_t flag
)
6899 unsigned long iflags
;
6902 spin_lock_irqsave(&phba
->hbalock
, iflags
);
6903 rc
= __lpfc_sli_issue_iocb(phba
, ring_number
, piocb
, flag
);
6904 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
6910 * lpfc_extra_ring_setup - Extra ring setup function
6911 * @phba: Pointer to HBA context object.
6913 * This function is called while driver attaches with the
6914 * HBA to setup the extra ring. The extra ring is used
6915 * only when driver needs to support target mode functionality
6916 * or IP over FC functionalities.
6918 * This function is called with no lock held.
6921 lpfc_extra_ring_setup( struct lpfc_hba
*phba
)
6923 struct lpfc_sli
*psli
;
6924 struct lpfc_sli_ring
*pring
;
6928 /* Adjust cmd/rsp ring iocb entries more evenly */
6930 /* Take some away from the FCP ring */
6931 pring
= &psli
->ring
[psli
->fcp_ring
];
6932 pring
->numCiocb
-= SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
6933 pring
->numRiocb
-= SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
6934 pring
->numCiocb
-= SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
6935 pring
->numRiocb
-= SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
6937 /* and give them to the extra ring */
6938 pring
= &psli
->ring
[psli
->extra_ring
];
6940 pring
->numCiocb
+= SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
6941 pring
->numRiocb
+= SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
6942 pring
->numCiocb
+= SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
6943 pring
->numRiocb
+= SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
6945 /* Setup default profile for this ring */
6946 pring
->iotag_max
= 4096;
6947 pring
->num_mask
= 1;
6948 pring
->prt
[0].profile
= 0; /* Mask 0 */
6949 pring
->prt
[0].rctl
= phba
->cfg_multi_ring_rctl
;
6950 pring
->prt
[0].type
= phba
->cfg_multi_ring_type
;
6951 pring
->prt
[0].lpfc_sli_rcv_unsol_event
= NULL
;
6956 * lpfc_sli_async_event_handler - ASYNC iocb handler function
6957 * @phba: Pointer to HBA context object.
6958 * @pring: Pointer to driver SLI ring object.
6959 * @iocbq: Pointer to iocb object.
6961 * This function is called by the slow ring event handler
6962 * function when there is an ASYNC event iocb in the ring.
6963 * This function is called with no lock held.
6964 * Currently this function handles only temperature related
6965 * ASYNC events. The function decodes the temperature sensor
6966 * event message and posts events for the management applications.
6969 lpfc_sli_async_event_handler(struct lpfc_hba
* phba
,
6970 struct lpfc_sli_ring
* pring
, struct lpfc_iocbq
* iocbq
)
6975 struct temp_event temp_event_data
;
6976 struct Scsi_Host
*shost
;
6979 icmd
= &iocbq
->iocb
;
6980 evt_code
= icmd
->un
.asyncstat
.evt_code
;
6981 temp
= icmd
->ulpContext
;
6983 if ((evt_code
!= ASYNC_TEMP_WARN
) &&
6984 (evt_code
!= ASYNC_TEMP_SAFE
)) {
6985 iocb_w
= (uint32_t *) icmd
;
6986 lpfc_printf_log(phba
,
6989 "0346 Ring %d handler: unexpected ASYNC_STATUS"
6991 "W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
6992 "W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
6993 "W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
6994 "W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
6996 icmd
->un
.asyncstat
.evt_code
,
6997 iocb_w
[0], iocb_w
[1], iocb_w
[2], iocb_w
[3],
6998 iocb_w
[4], iocb_w
[5], iocb_w
[6], iocb_w
[7],
6999 iocb_w
[8], iocb_w
[9], iocb_w
[10], iocb_w
[11],
7000 iocb_w
[12], iocb_w
[13], iocb_w
[14], iocb_w
[15]);
7004 temp_event_data
.data
= (uint32_t)temp
;
7005 temp_event_data
.event_type
= FC_REG_TEMPERATURE_EVENT
;
7006 if (evt_code
== ASYNC_TEMP_WARN
) {
7007 temp_event_data
.event_code
= LPFC_THRESHOLD_TEMP
;
7008 lpfc_printf_log(phba
,
7011 "0347 Adapter is very hot, please take "
7012 "corrective action. temperature : %d Celsius\n",
7015 if (evt_code
== ASYNC_TEMP_SAFE
) {
7016 temp_event_data
.event_code
= LPFC_NORMAL_TEMP
;
7017 lpfc_printf_log(phba
,
7020 "0340 Adapter temperature is OK now. "
7021 "temperature : %d Celsius\n",
7025 /* Send temperature change event to applications */
7026 shost
= lpfc_shost_from_vport(phba
->pport
);
7027 fc_host_post_vendor_event(shost
, fc_get_event_number(),
7028 sizeof(temp_event_data
), (char *) &temp_event_data
,
7035 * lpfc_sli_setup - SLI ring setup function
7036 * @phba: Pointer to HBA context object.
7038 * lpfc_sli_setup sets up rings of the SLI interface with
7039 * number of iocbs per ring and iotags. This function is
7040 * called while driver attach to the HBA and before the
7041 * interrupts are enabled. So there is no need for locking.
7043 * This function always returns 0.
7046 lpfc_sli_setup(struct lpfc_hba
*phba
)
7048 int i
, totiocbsize
= 0;
7049 struct lpfc_sli
*psli
= &phba
->sli
;
7050 struct lpfc_sli_ring
*pring
;
7052 psli
->num_rings
= MAX_CONFIGURED_RINGS
;
7054 psli
->fcp_ring
= LPFC_FCP_RING
;
7055 psli
->next_ring
= LPFC_FCP_NEXT_RING
;
7056 psli
->extra_ring
= LPFC_EXTRA_RING
;
7058 psli
->iocbq_lookup
= NULL
;
7059 psli
->iocbq_lookup_len
= 0;
7060 psli
->last_iotag
= 0;
7062 for (i
= 0; i
< psli
->num_rings
; i
++) {
7063 pring
= &psli
->ring
[i
];
7065 case LPFC_FCP_RING
: /* ring 0 - FCP */
7066 /* numCiocb and numRiocb are used in config_port */
7067 pring
->numCiocb
= SLI2_IOCB_CMD_R0_ENTRIES
;
7068 pring
->numRiocb
= SLI2_IOCB_RSP_R0_ENTRIES
;
7069 pring
->numCiocb
+= SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
7070 pring
->numRiocb
+= SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
7071 pring
->numCiocb
+= SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
7072 pring
->numRiocb
+= SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
7073 pring
->sizeCiocb
= (phba
->sli_rev
== 3) ?
7074 SLI3_IOCB_CMD_SIZE
:
7076 pring
->sizeRiocb
= (phba
->sli_rev
== 3) ?
7077 SLI3_IOCB_RSP_SIZE
:
7079 pring
->iotag_ctr
= 0;
7081 (phba
->cfg_hba_queue_depth
* 2);
7082 pring
->fast_iotag
= pring
->iotag_max
;
7083 pring
->num_mask
= 0;
7085 case LPFC_EXTRA_RING
: /* ring 1 - EXTRA */
7086 /* numCiocb and numRiocb are used in config_port */
7087 pring
->numCiocb
= SLI2_IOCB_CMD_R1_ENTRIES
;
7088 pring
->numRiocb
= SLI2_IOCB_RSP_R1_ENTRIES
;
7089 pring
->sizeCiocb
= (phba
->sli_rev
== 3) ?
7090 SLI3_IOCB_CMD_SIZE
:
7092 pring
->sizeRiocb
= (phba
->sli_rev
== 3) ?
7093 SLI3_IOCB_RSP_SIZE
:
7095 pring
->iotag_max
= phba
->cfg_hba_queue_depth
;
7096 pring
->num_mask
= 0;
7098 case LPFC_ELS_RING
: /* ring 2 - ELS / CT */
7099 /* numCiocb and numRiocb are used in config_port */
7100 pring
->numCiocb
= SLI2_IOCB_CMD_R2_ENTRIES
;
7101 pring
->numRiocb
= SLI2_IOCB_RSP_R2_ENTRIES
;
7102 pring
->sizeCiocb
= (phba
->sli_rev
== 3) ?
7103 SLI3_IOCB_CMD_SIZE
:
7105 pring
->sizeRiocb
= (phba
->sli_rev
== 3) ?
7106 SLI3_IOCB_RSP_SIZE
:
7108 pring
->fast_iotag
= 0;
7109 pring
->iotag_ctr
= 0;
7110 pring
->iotag_max
= 4096;
7111 pring
->lpfc_sli_rcv_async_status
=
7112 lpfc_sli_async_event_handler
;
7113 pring
->num_mask
= LPFC_MAX_RING_MASK
;
7114 pring
->prt
[0].profile
= 0; /* Mask 0 */
7115 pring
->prt
[0].rctl
= FC_RCTL_ELS_REQ
;
7116 pring
->prt
[0].type
= FC_TYPE_ELS
;
7117 pring
->prt
[0].lpfc_sli_rcv_unsol_event
=
7118 lpfc_els_unsol_event
;
7119 pring
->prt
[1].profile
= 0; /* Mask 1 */
7120 pring
->prt
[1].rctl
= FC_RCTL_ELS_REP
;
7121 pring
->prt
[1].type
= FC_TYPE_ELS
;
7122 pring
->prt
[1].lpfc_sli_rcv_unsol_event
=
7123 lpfc_els_unsol_event
;
7124 pring
->prt
[2].profile
= 0; /* Mask 2 */
7125 /* NameServer Inquiry */
7126 pring
->prt
[2].rctl
= FC_RCTL_DD_UNSOL_CTL
;
7128 pring
->prt
[2].type
= FC_TYPE_CT
;
7129 pring
->prt
[2].lpfc_sli_rcv_unsol_event
=
7130 lpfc_ct_unsol_event
;
7131 pring
->prt
[3].profile
= 0; /* Mask 3 */
7132 /* NameServer response */
7133 pring
->prt
[3].rctl
= FC_RCTL_DD_SOL_CTL
;
7135 pring
->prt
[3].type
= FC_TYPE_CT
;
7136 pring
->prt
[3].lpfc_sli_rcv_unsol_event
=
7137 lpfc_ct_unsol_event
;
7138 /* abort unsolicited sequence */
7139 pring
->prt
[4].profile
= 0; /* Mask 4 */
7140 pring
->prt
[4].rctl
= FC_RCTL_BA_ABTS
;
7141 pring
->prt
[4].type
= FC_TYPE_BLS
;
7142 pring
->prt
[4].lpfc_sli_rcv_unsol_event
=
7143 lpfc_sli4_ct_abort_unsol_event
;
7146 totiocbsize
+= (pring
->numCiocb
* pring
->sizeCiocb
) +
7147 (pring
->numRiocb
* pring
->sizeRiocb
);
7149 if (totiocbsize
> MAX_SLIM_IOCB_SIZE
) {
7150 /* Too many cmd / rsp ring entries in SLI2 SLIM */
7151 printk(KERN_ERR
"%d:0462 Too many cmd / rsp ring entries in "
7152 "SLI2 SLIM Data: x%x x%lx\n",
7153 phba
->brd_no
, totiocbsize
,
7154 (unsigned long) MAX_SLIM_IOCB_SIZE
);
7156 if (phba
->cfg_multi_ring_support
== 2)
7157 lpfc_extra_ring_setup(phba
);
7163 * lpfc_sli_queue_setup - Queue initialization function
7164 * @phba: Pointer to HBA context object.
7166 * lpfc_sli_queue_setup sets up mailbox queues and iocb queues for each
7167 * ring. This function also initializes ring indices of each ring.
7168 * This function is called during the initialization of the SLI
7169 * interface of an HBA.
7170 * This function is called with no lock held and always returns
7174 lpfc_sli_queue_setup(struct lpfc_hba
*phba
)
7176 struct lpfc_sli
*psli
;
7177 struct lpfc_sli_ring
*pring
;
7181 spin_lock_irq(&phba
->hbalock
);
7182 INIT_LIST_HEAD(&psli
->mboxq
);
7183 INIT_LIST_HEAD(&psli
->mboxq_cmpl
);
7184 /* Initialize list headers for txq and txcmplq as double linked lists */
7185 for (i
= 0; i
< psli
->num_rings
; i
++) {
7186 pring
= &psli
->ring
[i
];
7188 pring
->next_cmdidx
= 0;
7189 pring
->local_getidx
= 0;
7191 INIT_LIST_HEAD(&pring
->txq
);
7192 INIT_LIST_HEAD(&pring
->txcmplq
);
7193 INIT_LIST_HEAD(&pring
->iocb_continueq
);
7194 INIT_LIST_HEAD(&pring
->iocb_continue_saveq
);
7195 INIT_LIST_HEAD(&pring
->postbufq
);
7197 spin_unlock_irq(&phba
->hbalock
);
7202 * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
7203 * @phba: Pointer to HBA context object.
7205 * This routine flushes the mailbox command subsystem. It will unconditionally
7206 * flush all the mailbox commands in the three possible stages in the mailbox
7207 * command sub-system: pending mailbox command queue; the outstanding mailbox
7208 * command; and completed mailbox command queue. It is caller's responsibility
7209 * to make sure that the driver is in the proper state to flush the mailbox
7210 * command sub-system. Namely, the posting of mailbox commands into the
7211 * pending mailbox command queue from the various clients must be stopped;
7212 * either the HBA is in a state that it will never works on the outstanding
7213 * mailbox command (such as in EEH or ERATT conditions) or the outstanding
7214 * mailbox command has been completed.
7217 lpfc_sli_mbox_sys_flush(struct lpfc_hba
*phba
)
7219 LIST_HEAD(completions
);
7220 struct lpfc_sli
*psli
= &phba
->sli
;
7222 unsigned long iflag
;
7224 /* Flush all the mailbox commands in the mbox system */
7225 spin_lock_irqsave(&phba
->hbalock
, iflag
);
7226 /* The pending mailbox command queue */
7227 list_splice_init(&phba
->sli
.mboxq
, &completions
);
7228 /* The outstanding active mailbox command */
7229 if (psli
->mbox_active
) {
7230 list_add_tail(&psli
->mbox_active
->list
, &completions
);
7231 psli
->mbox_active
= NULL
;
7232 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
7234 /* The completed mailbox command queue */
7235 list_splice_init(&phba
->sli
.mboxq_cmpl
, &completions
);
7236 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
7238 /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
7239 while (!list_empty(&completions
)) {
7240 list_remove_head(&completions
, pmb
, LPFC_MBOXQ_t
, list
);
7241 pmb
->u
.mb
.mbxStatus
= MBX_NOT_FINISHED
;
7243 pmb
->mbox_cmpl(phba
, pmb
);
7248 * lpfc_sli_host_down - Vport cleanup function
7249 * @vport: Pointer to virtual port object.
7251 * lpfc_sli_host_down is called to clean up the resources
7252 * associated with a vport before destroying virtual
7253 * port data structures.
7254 * This function does following operations:
7255 * - Free discovery resources associated with this virtual
7257 * - Free iocbs associated with this virtual port in
7259 * - Send abort for all iocb commands associated with this
7262 * This function is called with no lock held and always returns 1.
7265 lpfc_sli_host_down(struct lpfc_vport
*vport
)
7267 LIST_HEAD(completions
);
7268 struct lpfc_hba
*phba
= vport
->phba
;
7269 struct lpfc_sli
*psli
= &phba
->sli
;
7270 struct lpfc_sli_ring
*pring
;
7271 struct lpfc_iocbq
*iocb
, *next_iocb
;
7273 unsigned long flags
= 0;
7274 uint16_t prev_pring_flag
;
7276 lpfc_cleanup_discovery_resources(vport
);
7278 spin_lock_irqsave(&phba
->hbalock
, flags
);
7279 for (i
= 0; i
< psli
->num_rings
; i
++) {
7280 pring
= &psli
->ring
[i
];
7281 prev_pring_flag
= pring
->flag
;
7282 /* Only slow rings */
7283 if (pring
->ringno
== LPFC_ELS_RING
) {
7284 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
7285 /* Set the lpfc data pending flag */
7286 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
7289 * Error everything on the txq since these iocbs have not been
7290 * given to the FW yet.
7292 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
7293 if (iocb
->vport
!= vport
)
7295 list_move_tail(&iocb
->list
, &completions
);
7299 /* Next issue ABTS for everything on the txcmplq */
7300 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
,
7302 if (iocb
->vport
!= vport
)
7304 lpfc_sli_issue_abort_iotag(phba
, pring
, iocb
);
7307 pring
->flag
= prev_pring_flag
;
7310 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
7312 /* Cancel all the IOCBs from the completions list */
7313 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
7319 * lpfc_sli_hba_down - Resource cleanup function for the HBA
7320 * @phba: Pointer to HBA context object.
7322 * This function cleans up all iocb, buffers, mailbox commands
7323 * while shutting down the HBA. This function is called with no
7324 * lock held and always returns 1.
7325 * This function does the following to cleanup driver resources:
7326 * - Free discovery resources for each virtual port
7327 * - Cleanup any pending fabric iocbs
7328 * - Iterate through the iocb txq and free each entry
7330 * - Free up any buffer posted to the HBA
7331 * - Free mailbox commands in the mailbox queue.
7334 lpfc_sli_hba_down(struct lpfc_hba
*phba
)
7336 LIST_HEAD(completions
);
7337 struct lpfc_sli
*psli
= &phba
->sli
;
7338 struct lpfc_sli_ring
*pring
;
7339 struct lpfc_dmabuf
*buf_ptr
;
7340 unsigned long flags
= 0;
7343 /* Shutdown the mailbox command sub-system */
7344 lpfc_sli_mbox_sys_shutdown(phba
);
7346 lpfc_hba_down_prep(phba
);
7348 lpfc_fabric_abort_hba(phba
);
7350 spin_lock_irqsave(&phba
->hbalock
, flags
);
7351 for (i
= 0; i
< psli
->num_rings
; i
++) {
7352 pring
= &psli
->ring
[i
];
7353 /* Only slow rings */
7354 if (pring
->ringno
== LPFC_ELS_RING
) {
7355 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
7356 /* Set the lpfc data pending flag */
7357 set_bit(LPFC_DATA_READY
, &phba
->data_flags
);
7361 * Error everything on the txq since these iocbs have not been
7362 * given to the FW yet.
7364 list_splice_init(&pring
->txq
, &completions
);
7368 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
7370 /* Cancel all the IOCBs from the completions list */
7371 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
7374 spin_lock_irqsave(&phba
->hbalock
, flags
);
7375 list_splice_init(&phba
->elsbuf
, &completions
);
7376 phba
->elsbuf_cnt
= 0;
7377 phba
->elsbuf_prev_cnt
= 0;
7378 spin_unlock_irqrestore(&phba
->hbalock
, flags
);
7380 while (!list_empty(&completions
)) {
7381 list_remove_head(&completions
, buf_ptr
,
7382 struct lpfc_dmabuf
, list
);
7383 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
7387 /* Return any active mbox cmds */
7388 del_timer_sync(&psli
->mbox_tmo
);
7390 spin_lock_irqsave(&phba
->pport
->work_port_lock
, flags
);
7391 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
7392 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, flags
);
7398 * lpfc_sli_pcimem_bcopy - SLI memory copy function
7399 * @srcp: Source memory pointer.
7400 * @destp: Destination memory pointer.
7401 * @cnt: Number of words required to be copied.
7403 * This function is used for copying data between driver memory
7404 * and the SLI memory. This function also changes the endianness
7405 * of each word if native endianness is different from SLI
7406 * endianness. This function can be called with or without
7410 lpfc_sli_pcimem_bcopy(void *srcp
, void *destp
, uint32_t cnt
)
7412 uint32_t *src
= srcp
;
7413 uint32_t *dest
= destp
;
7417 for (i
= 0; i
< (int)cnt
; i
+= sizeof (uint32_t)) {
7419 ldata
= le32_to_cpu(ldata
);
7428 * lpfc_sli_bemem_bcopy - SLI memory copy function
7429 * @srcp: Source memory pointer.
7430 * @destp: Destination memory pointer.
7431 * @cnt: Number of words required to be copied.
7433 * This function is used for copying data between a data structure
7434 * with big endian representation to local endianness.
7435 * This function can be called with or without lock.
7438 lpfc_sli_bemem_bcopy(void *srcp
, void *destp
, uint32_t cnt
)
7440 uint32_t *src
= srcp
;
7441 uint32_t *dest
= destp
;
7445 for (i
= 0; i
< (int)cnt
; i
+= sizeof(uint32_t)) {
7447 ldata
= be32_to_cpu(ldata
);
7455 * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
7456 * @phba: Pointer to HBA context object.
7457 * @pring: Pointer to driver SLI ring object.
7458 * @mp: Pointer to driver buffer object.
7460 * This function is called with no lock held.
7461 * It always return zero after adding the buffer to the postbufq
7465 lpfc_sli_ringpostbuf_put(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
7466 struct lpfc_dmabuf
*mp
)
7468 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
7470 spin_lock_irq(&phba
->hbalock
);
7471 list_add_tail(&mp
->list
, &pring
->postbufq
);
7472 pring
->postbufq_cnt
++;
7473 spin_unlock_irq(&phba
->hbalock
);
7478 * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
7479 * @phba: Pointer to HBA context object.
7481 * When HBQ is enabled, buffers are searched based on tags. This function
7482 * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
7483 * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
7484 * does not conflict with tags of buffer posted for unsolicited events.
7485 * The function returns the allocated tag. The function is called with
7489 lpfc_sli_get_buffer_tag(struct lpfc_hba
*phba
)
7491 spin_lock_irq(&phba
->hbalock
);
7492 phba
->buffer_tag_count
++;
7494 * Always set the QUE_BUFTAG_BIT to distiguish between
7495 * a tag assigned by HBQ.
7497 phba
->buffer_tag_count
|= QUE_BUFTAG_BIT
;
7498 spin_unlock_irq(&phba
->hbalock
);
7499 return phba
->buffer_tag_count
;
7503 * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
7504 * @phba: Pointer to HBA context object.
7505 * @pring: Pointer to driver SLI ring object.
7508 * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
7509 * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
7510 * iocb is posted to the response ring with the tag of the buffer.
7511 * This function searches the pring->postbufq list using the tag
7512 * to find buffer associated with CMD_IOCB_RET_XRI64_CX
7513 * iocb. If the buffer is found then lpfc_dmabuf object of the
7514 * buffer is returned to the caller else NULL is returned.
7515 * This function is called with no lock held.
7517 struct lpfc_dmabuf
*
7518 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
7521 struct lpfc_dmabuf
*mp
, *next_mp
;
7522 struct list_head
*slp
= &pring
->postbufq
;
7524 /* Search postbufq, from the begining, looking for a match on tag */
7525 spin_lock_irq(&phba
->hbalock
);
7526 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
7527 if (mp
->buffer_tag
== tag
) {
7528 list_del_init(&mp
->list
);
7529 pring
->postbufq_cnt
--;
7530 spin_unlock_irq(&phba
->hbalock
);
7535 spin_unlock_irq(&phba
->hbalock
);
7536 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7537 "0402 Cannot find virtual addr for buffer tag on "
7538 "ring %d Data x%lx x%p x%p x%x\n",
7539 pring
->ringno
, (unsigned long) tag
,
7540 slp
->next
, slp
->prev
, pring
->postbufq_cnt
);
7546 * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
7547 * @phba: Pointer to HBA context object.
7548 * @pring: Pointer to driver SLI ring object.
7549 * @phys: DMA address of the buffer.
7551 * This function searches the buffer list using the dma_address
7552 * of unsolicited event to find the driver's lpfc_dmabuf object
7553 * corresponding to the dma_address. The function returns the
7554 * lpfc_dmabuf object if a buffer is found else it returns NULL.
7555 * This function is called by the ct and els unsolicited event
7556 * handlers to get the buffer associated with the unsolicited
7559 * This function is called with no lock held.
7561 struct lpfc_dmabuf
*
7562 lpfc_sli_ringpostbuf_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
7565 struct lpfc_dmabuf
*mp
, *next_mp
;
7566 struct list_head
*slp
= &pring
->postbufq
;
7568 /* Search postbufq, from the begining, looking for a match on phys */
7569 spin_lock_irq(&phba
->hbalock
);
7570 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
7571 if (mp
->phys
== phys
) {
7572 list_del_init(&mp
->list
);
7573 pring
->postbufq_cnt
--;
7574 spin_unlock_irq(&phba
->hbalock
);
7579 spin_unlock_irq(&phba
->hbalock
);
7580 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
7581 "0410 Cannot find virtual addr for mapped buf on "
7582 "ring %d Data x%llx x%p x%p x%x\n",
7583 pring
->ringno
, (unsigned long long)phys
,
7584 slp
->next
, slp
->prev
, pring
->postbufq_cnt
);
7589 * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
7590 * @phba: Pointer to HBA context object.
7591 * @cmdiocb: Pointer to driver command iocb object.
7592 * @rspiocb: Pointer to driver response iocb object.
7594 * This function is the completion handler for the abort iocbs for
7595 * ELS commands. This function is called from the ELS ring event
7596 * handler with no lock held. This function frees memory resources
7597 * associated with the abort iocb.
7600 lpfc_sli_abort_els_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
7601 struct lpfc_iocbq
*rspiocb
)
7603 IOCB_t
*irsp
= &rspiocb
->iocb
;
7604 uint16_t abort_iotag
, abort_context
;
7605 struct lpfc_iocbq
*abort_iocb
;
7606 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
7610 if (irsp
->ulpStatus
) {
7611 abort_context
= cmdiocb
->iocb
.un
.acxri
.abortContextTag
;
7612 abort_iotag
= cmdiocb
->iocb
.un
.acxri
.abortIoTag
;
7614 spin_lock_irq(&phba
->hbalock
);
7615 if (phba
->sli_rev
< LPFC_SLI_REV4
) {
7616 if (abort_iotag
!= 0 &&
7617 abort_iotag
<= phba
->sli
.last_iotag
)
7619 phba
->sli
.iocbq_lookup
[abort_iotag
];
7621 /* For sli4 the abort_tag is the XRI,
7622 * so the abort routine puts the iotag of the iocb
7623 * being aborted in the context field of the abort
7626 abort_iocb
= phba
->sli
.iocbq_lookup
[abort_context
];
7629 * If the iocb is not found in Firmware queue the iocb
7630 * might have completed already. Do not free it again.
7632 if (irsp
->ulpStatus
== IOSTAT_LOCAL_REJECT
) {
7633 if (irsp
->un
.ulpWord
[4] != IOERR_NO_XRI
) {
7634 spin_unlock_irq(&phba
->hbalock
);
7635 lpfc_sli_release_iocbq(phba
, cmdiocb
);
7638 /* For SLI4 the ulpContext field for abort IOCB
7639 * holds the iotag of the IOCB being aborted so
7640 * the local abort_context needs to be reset to
7641 * match the aborted IOCBs ulpContext.
7643 if (abort_iocb
&& phba
->sli_rev
== LPFC_SLI_REV4
)
7644 abort_context
= abort_iocb
->iocb
.ulpContext
;
7647 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
| LOG_SLI
,
7648 "0327 Cannot abort els iocb %p "
7649 "with tag %x context %x, abort status %x, "
7651 abort_iocb
, abort_iotag
, abort_context
,
7652 irsp
->ulpStatus
, irsp
->un
.ulpWord
[4]);
7654 * make sure we have the right iocbq before taking it
7655 * off the txcmplq and try to call completion routine.
7658 abort_iocb
->iocb
.ulpContext
!= abort_context
||
7659 (abort_iocb
->iocb_flag
& LPFC_DRIVER_ABORTED
) == 0)
7660 spin_unlock_irq(&phba
->hbalock
);
7661 else if (phba
->sli_rev
< LPFC_SLI_REV4
) {
7663 * leave the SLI4 aborted command on the txcmplq
7664 * list and the command complete WCQE's XB bit
7665 * will tell whether the SGL (XRI) can be released
7666 * immediately or to the aborted SGL list for the
7667 * following abort XRI from the HBA.
7669 list_del_init(&abort_iocb
->list
);
7670 if (abort_iocb
->iocb_flag
& LPFC_IO_ON_Q
) {
7671 abort_iocb
->iocb_flag
&= ~LPFC_IO_ON_Q
;
7672 pring
->txcmplq_cnt
--;
7675 /* Firmware could still be in progress of DMAing
7676 * payload, so don't free data buffer till after
7679 abort_iocb
->iocb_flag
|= LPFC_DELAY_MEM_FREE
;
7680 abort_iocb
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
7681 spin_unlock_irq(&phba
->hbalock
);
7683 abort_iocb
->iocb
.ulpStatus
= IOSTAT_LOCAL_REJECT
;
7684 abort_iocb
->iocb
.un
.ulpWord
[4] = IOERR_ABORT_REQUESTED
;
7685 (abort_iocb
->iocb_cmpl
)(phba
, abort_iocb
, abort_iocb
);
7687 spin_unlock_irq(&phba
->hbalock
);
7690 lpfc_sli_release_iocbq(phba
, cmdiocb
);
7695 * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
7696 * @phba: Pointer to HBA context object.
7697 * @cmdiocb: Pointer to driver command iocb object.
7698 * @rspiocb: Pointer to driver response iocb object.
7700 * The function is called from SLI ring event handler with no
7701 * lock held. This function is the completion handler for ELS commands
7702 * which are aborted. The function frees memory resources used for
7703 * the aborted ELS commands.
7706 lpfc_ignore_els_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
7707 struct lpfc_iocbq
*rspiocb
)
7709 IOCB_t
*irsp
= &rspiocb
->iocb
;
7711 /* ELS cmd tag <ulpIoTag> completes */
7712 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
7713 "0139 Ignoring ELS cmd tag x%x completion Data: "
7715 irsp
->ulpIoTag
, irsp
->ulpStatus
,
7716 irsp
->un
.ulpWord
[4], irsp
->ulpTimeout
);
7717 if (cmdiocb
->iocb
.ulpCommand
== CMD_GEN_REQUEST64_CR
)
7718 lpfc_ct_free_iocb(phba
, cmdiocb
);
7720 lpfc_els_free_iocb(phba
, cmdiocb
);
7725 * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
7726 * @phba: Pointer to HBA context object.
7727 * @pring: Pointer to driver SLI ring object.
7728 * @cmdiocb: Pointer to driver command iocb object.
7730 * This function issues an abort iocb for the provided command iocb down to
7731 * the port. Other than the case the outstanding command iocb is an abort
7732 * request, this function issues abort out unconditionally. This function is
7733 * called with hbalock held. The function returns 0 when it fails due to
7734 * memory allocation failure or when the command iocb is an abort request.
7737 lpfc_sli_abort_iotag_issue(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
7738 struct lpfc_iocbq
*cmdiocb
)
7740 struct lpfc_vport
*vport
= cmdiocb
->vport
;
7741 struct lpfc_iocbq
*abtsiocbp
;
7742 IOCB_t
*icmd
= NULL
;
7743 IOCB_t
*iabt
= NULL
;
7747 * There are certain command types we don't want to abort. And we
7748 * don't want to abort commands that are already in the process of
7751 icmd
= &cmdiocb
->iocb
;
7752 if (icmd
->ulpCommand
== CMD_ABORT_XRI_CN
||
7753 icmd
->ulpCommand
== CMD_CLOSE_XRI_CN
||
7754 (cmdiocb
->iocb_flag
& LPFC_DRIVER_ABORTED
) != 0)
7757 /* issue ABTS for this IOCB based on iotag */
7758 abtsiocbp
= __lpfc_sli_get_iocbq(phba
);
7759 if (abtsiocbp
== NULL
)
7762 /* This signals the response to set the correct status
7763 * before calling the completion handler
7765 cmdiocb
->iocb_flag
|= LPFC_DRIVER_ABORTED
;
7767 iabt
= &abtsiocbp
->iocb
;
7768 iabt
->un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
7769 iabt
->un
.acxri
.abortContextTag
= icmd
->ulpContext
;
7770 if (phba
->sli_rev
== LPFC_SLI_REV4
) {
7771 iabt
->un
.acxri
.abortIoTag
= cmdiocb
->sli4_xritag
;
7772 iabt
->un
.acxri
.abortContextTag
= cmdiocb
->iotag
;
7775 iabt
->un
.acxri
.abortIoTag
= icmd
->ulpIoTag
;
7777 iabt
->ulpClass
= icmd
->ulpClass
;
7779 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
7780 abtsiocbp
->fcp_wqidx
= cmdiocb
->fcp_wqidx
;
7781 if (cmdiocb
->iocb_flag
& LPFC_IO_FCP
)
7782 abtsiocbp
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
7784 if (phba
->link_state
>= LPFC_LINK_UP
)
7785 iabt
->ulpCommand
= CMD_ABORT_XRI_CN
;
7787 iabt
->ulpCommand
= CMD_CLOSE_XRI_CN
;
7789 abtsiocbp
->iocb_cmpl
= lpfc_sli_abort_els_cmpl
;
7791 lpfc_printf_vlog(vport
, KERN_INFO
, LOG_SLI
,
7792 "0339 Abort xri x%x, original iotag x%x, "
7793 "abort cmd iotag x%x\n",
7794 iabt
->un
.acxri
.abortIoTag
,
7795 iabt
->un
.acxri
.abortContextTag
,
7797 retval
= __lpfc_sli_issue_iocb(phba
, pring
->ringno
, abtsiocbp
, 0);
7800 __lpfc_sli_release_iocbq(phba
, abtsiocbp
);
7803 * Caller to this routine should check for IOCB_ERROR
7804 * and handle it properly. This routine no longer removes
7805 * iocb off txcmplq and call compl in case of IOCB_ERROR.
7811 * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
7812 * @phba: Pointer to HBA context object.
7813 * @pring: Pointer to driver SLI ring object.
7814 * @cmdiocb: Pointer to driver command iocb object.
7816 * This function issues an abort iocb for the provided command iocb. In case
7817 * of unloading, the abort iocb will not be issued to commands on the ELS
7818 * ring. Instead, the callback function shall be changed to those commands
7819 * so that nothing happens when them finishes. This function is called with
7820 * hbalock held. The function returns 0 when the command iocb is an abort
7824 lpfc_sli_issue_abort_iotag(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
7825 struct lpfc_iocbq
*cmdiocb
)
7827 struct lpfc_vport
*vport
= cmdiocb
->vport
;
7828 int retval
= IOCB_ERROR
;
7829 IOCB_t
*icmd
= NULL
;
7832 * There are certain command types we don't want to abort. And we
7833 * don't want to abort commands that are already in the process of
7836 icmd
= &cmdiocb
->iocb
;
7837 if (icmd
->ulpCommand
== CMD_ABORT_XRI_CN
||
7838 icmd
->ulpCommand
== CMD_CLOSE_XRI_CN
||
7839 (cmdiocb
->iocb_flag
& LPFC_DRIVER_ABORTED
) != 0)
7843 * If we're unloading, don't abort iocb on the ELS ring, but change
7844 * the callback so that nothing happens when it finishes.
7846 if ((vport
->load_flag
& FC_UNLOADING
) &&
7847 (pring
->ringno
== LPFC_ELS_RING
)) {
7848 if (cmdiocb
->iocb_flag
& LPFC_IO_FABRIC
)
7849 cmdiocb
->fabric_iocb_cmpl
= lpfc_ignore_els_cmpl
;
7851 cmdiocb
->iocb_cmpl
= lpfc_ignore_els_cmpl
;
7852 goto abort_iotag_exit
;
7855 /* Now, we try to issue the abort to the cmdiocb out */
7856 retval
= lpfc_sli_abort_iotag_issue(phba
, pring
, cmdiocb
);
7860 * Caller to this routine should check for IOCB_ERROR
7861 * and handle it properly. This routine no longer removes
7862 * iocb off txcmplq and call compl in case of IOCB_ERROR.
7868 * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
7869 * @phba: Pointer to HBA context object.
7870 * @pring: Pointer to driver SLI ring object.
7872 * This function aborts all iocbs in the given ring and frees all the iocb
7873 * objects in txq. This function issues abort iocbs unconditionally for all
7874 * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
7875 * to complete before the return of this function. The caller is not required
7876 * to hold any locks.
7879 lpfc_sli_iocb_ring_abort(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
7881 LIST_HEAD(completions
);
7882 struct lpfc_iocbq
*iocb
, *next_iocb
;
7884 if (pring
->ringno
== LPFC_ELS_RING
)
7885 lpfc_fabric_abort_hba(phba
);
7887 spin_lock_irq(&phba
->hbalock
);
7889 /* Take off all the iocbs on txq for cancelling */
7890 list_splice_init(&pring
->txq
, &completions
);
7893 /* Next issue ABTS for everything on the txcmplq */
7894 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
)
7895 lpfc_sli_abort_iotag_issue(phba
, pring
, iocb
);
7897 spin_unlock_irq(&phba
->hbalock
);
7899 /* Cancel all the IOCBs from the completions list */
7900 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
7905 * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
7906 * @phba: pointer to lpfc HBA data structure.
7908 * This routine will abort all pending and outstanding iocbs to an HBA.
7911 lpfc_sli_hba_iocb_abort(struct lpfc_hba
*phba
)
7913 struct lpfc_sli
*psli
= &phba
->sli
;
7914 struct lpfc_sli_ring
*pring
;
7917 for (i
= 0; i
< psli
->num_rings
; i
++) {
7918 pring
= &psli
->ring
[i
];
7919 lpfc_sli_iocb_ring_abort(phba
, pring
);
7924 * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
7925 * @iocbq: Pointer to driver iocb object.
7926 * @vport: Pointer to driver virtual port object.
7927 * @tgt_id: SCSI ID of the target.
7928 * @lun_id: LUN ID of the scsi device.
7929 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
7931 * This function acts as an iocb filter for functions which abort or count
7932 * all FCP iocbs pending on a lun/SCSI target/SCSI host. It will return
7933 * 0 if the filtering criteria is met for the given iocb and will return
7934 * 1 if the filtering criteria is not met.
7935 * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
7936 * given iocb is for the SCSI device specified by vport, tgt_id and
7938 * If ctx_cmd == LPFC_CTX_TGT, the function returns 0 only if the
7939 * given iocb is for the SCSI target specified by vport and tgt_id
7941 * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
7942 * given iocb is for the SCSI host associated with the given vport.
7943 * This function is called with no locks held.
7946 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq
*iocbq
, struct lpfc_vport
*vport
,
7947 uint16_t tgt_id
, uint64_t lun_id
,
7948 lpfc_ctx_cmd ctx_cmd
)
7950 struct lpfc_scsi_buf
*lpfc_cmd
;
7953 if (!(iocbq
->iocb_flag
& LPFC_IO_FCP
))
7956 if (iocbq
->vport
!= vport
)
7959 lpfc_cmd
= container_of(iocbq
, struct lpfc_scsi_buf
, cur_iocbq
);
7961 if (lpfc_cmd
->pCmd
== NULL
)
7966 if ((lpfc_cmd
->rdata
->pnode
) &&
7967 (lpfc_cmd
->rdata
->pnode
->nlp_sid
== tgt_id
) &&
7968 (scsilun_to_int(&lpfc_cmd
->fcp_cmnd
->fcp_lun
) == lun_id
))
7972 if ((lpfc_cmd
->rdata
->pnode
) &&
7973 (lpfc_cmd
->rdata
->pnode
->nlp_sid
== tgt_id
))
7980 printk(KERN_ERR
"%s: Unknown context cmd type, value %d\n",
7989 * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
7990 * @vport: Pointer to virtual port.
7991 * @tgt_id: SCSI ID of the target.
7992 * @lun_id: LUN ID of the scsi device.
7993 * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
7995 * This function returns number of FCP commands pending for the vport.
7996 * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
7997 * commands pending on the vport associated with SCSI device specified
7998 * by tgt_id and lun_id parameters.
7999 * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
8000 * commands pending on the vport associated with SCSI target specified
8001 * by tgt_id parameter.
8002 * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
8003 * commands pending on the vport.
8004 * This function returns the number of iocbs which satisfy the filter.
8005 * This function is called without any lock held.
8008 lpfc_sli_sum_iocb(struct lpfc_vport
*vport
, uint16_t tgt_id
, uint64_t lun_id
,
8009 lpfc_ctx_cmd ctx_cmd
)
8011 struct lpfc_hba
*phba
= vport
->phba
;
8012 struct lpfc_iocbq
*iocbq
;
8015 for (i
= 1, sum
= 0; i
<= phba
->sli
.last_iotag
; i
++) {
8016 iocbq
= phba
->sli
.iocbq_lookup
[i
];
8018 if (lpfc_sli_validate_fcp_iocb (iocbq
, vport
, tgt_id
, lun_id
,
8027 * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
8028 * @phba: Pointer to HBA context object
8029 * @cmdiocb: Pointer to command iocb object.
8030 * @rspiocb: Pointer to response iocb object.
8032 * This function is called when an aborted FCP iocb completes. This
8033 * function is called by the ring event handler with no lock held.
8034 * This function frees the iocb.
8037 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba
*phba
, struct lpfc_iocbq
*cmdiocb
,
8038 struct lpfc_iocbq
*rspiocb
)
8040 lpfc_sli_release_iocbq(phba
, cmdiocb
);
8045 * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
8046 * @vport: Pointer to virtual port.
8047 * @pring: Pointer to driver SLI ring object.
8048 * @tgt_id: SCSI ID of the target.
8049 * @lun_id: LUN ID of the scsi device.
8050 * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
8052 * This function sends an abort command for every SCSI command
8053 * associated with the given virtual port pending on the ring
8054 * filtered by lpfc_sli_validate_fcp_iocb function.
8055 * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
8056 * FCP iocbs associated with lun specified by tgt_id and lun_id
8058 * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
8059 * FCP iocbs associated with SCSI target specified by tgt_id parameter.
8060 * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
8061 * FCP iocbs associated with virtual port.
8062 * This function returns number of iocbs it failed to abort.
8063 * This function is called with no locks held.
8066 lpfc_sli_abort_iocb(struct lpfc_vport
*vport
, struct lpfc_sli_ring
*pring
,
8067 uint16_t tgt_id
, uint64_t lun_id
, lpfc_ctx_cmd abort_cmd
)
8069 struct lpfc_hba
*phba
= vport
->phba
;
8070 struct lpfc_iocbq
*iocbq
;
8071 struct lpfc_iocbq
*abtsiocb
;
8073 int errcnt
= 0, ret_val
= 0;
8076 for (i
= 1; i
<= phba
->sli
.last_iotag
; i
++) {
8077 iocbq
= phba
->sli
.iocbq_lookup
[i
];
8079 if (lpfc_sli_validate_fcp_iocb(iocbq
, vport
, tgt_id
, lun_id
,
8083 /* issue ABTS for this IOCB based on iotag */
8084 abtsiocb
= lpfc_sli_get_iocbq(phba
);
8085 if (abtsiocb
== NULL
) {
8091 abtsiocb
->iocb
.un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
8092 abtsiocb
->iocb
.un
.acxri
.abortContextTag
= cmd
->ulpContext
;
8093 if (phba
->sli_rev
== LPFC_SLI_REV4
)
8094 abtsiocb
->iocb
.un
.acxri
.abortIoTag
= iocbq
->sli4_xritag
;
8096 abtsiocb
->iocb
.un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
8097 abtsiocb
->iocb
.ulpLe
= 1;
8098 abtsiocb
->iocb
.ulpClass
= cmd
->ulpClass
;
8099 abtsiocb
->vport
= phba
->pport
;
8101 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
8102 abtsiocb
->fcp_wqidx
= iocbq
->fcp_wqidx
;
8103 if (iocbq
->iocb_flag
& LPFC_IO_FCP
)
8104 abtsiocb
->iocb_flag
|= LPFC_USE_FCPWQIDX
;
8106 if (lpfc_is_link_up(phba
))
8107 abtsiocb
->iocb
.ulpCommand
= CMD_ABORT_XRI_CN
;
8109 abtsiocb
->iocb
.ulpCommand
= CMD_CLOSE_XRI_CN
;
8111 /* Setup callback routine and issue the command. */
8112 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
8113 ret_val
= lpfc_sli_issue_iocb(phba
, pring
->ringno
,
8115 if (ret_val
== IOCB_ERROR
) {
8116 lpfc_sli_release_iocbq(phba
, abtsiocb
);
8126 * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
8127 * @phba: Pointer to HBA context object.
8128 * @cmdiocbq: Pointer to command iocb.
8129 * @rspiocbq: Pointer to response iocb.
8131 * This function is the completion handler for iocbs issued using
8132 * lpfc_sli_issue_iocb_wait function. This function is called by the
8133 * ring event handler function without any lock held. This function
8134 * can be called from both worker thread context and interrupt
8135 * context. This function also can be called from other thread which
8136 * cleans up the SLI layer objects.
8137 * This function copy the contents of the response iocb to the
8138 * response iocb memory object provided by the caller of
8139 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
8140 * sleeps for the iocb completion.
8143 lpfc_sli_wake_iocb_wait(struct lpfc_hba
*phba
,
8144 struct lpfc_iocbq
*cmdiocbq
,
8145 struct lpfc_iocbq
*rspiocbq
)
8147 wait_queue_head_t
*pdone_q
;
8148 unsigned long iflags
;
8149 struct lpfc_scsi_buf
*lpfc_cmd
;
8151 spin_lock_irqsave(&phba
->hbalock
, iflags
);
8152 cmdiocbq
->iocb_flag
|= LPFC_IO_WAKE
;
8153 if (cmdiocbq
->context2
&& rspiocbq
)
8154 memcpy(&((struct lpfc_iocbq
*)cmdiocbq
->context2
)->iocb
,
8155 &rspiocbq
->iocb
, sizeof(IOCB_t
));
8157 /* Set the exchange busy flag for task management commands */
8158 if ((cmdiocbq
->iocb_flag
& LPFC_IO_FCP
) &&
8159 !(cmdiocbq
->iocb_flag
& LPFC_IO_LIBDFC
)) {
8160 lpfc_cmd
= container_of(cmdiocbq
, struct lpfc_scsi_buf
,
8162 lpfc_cmd
->exch_busy
= rspiocbq
->iocb_flag
& LPFC_EXCHANGE_BUSY
;
8165 pdone_q
= cmdiocbq
->context_un
.wait_queue
;
8168 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8173 * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
8174 * @phba: Pointer to HBA context object..
8175 * @piocbq: Pointer to command iocb.
8176 * @flag: Flag to test.
8178 * This routine grabs the hbalock and then test the iocb_flag to
8179 * see if the passed in flag is set.
8182 * 0 if flag is not set.
8185 lpfc_chk_iocb_flg(struct lpfc_hba
*phba
,
8186 struct lpfc_iocbq
*piocbq
, uint32_t flag
)
8188 unsigned long iflags
;
8191 spin_lock_irqsave(&phba
->hbalock
, iflags
);
8192 ret
= piocbq
->iocb_flag
& flag
;
8193 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
8199 * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
8200 * @phba: Pointer to HBA context object..
8201 * @pring: Pointer to sli ring.
8202 * @piocb: Pointer to command iocb.
8203 * @prspiocbq: Pointer to response iocb.
8204 * @timeout: Timeout in number of seconds.
8206 * This function issues the iocb to firmware and waits for the
8207 * iocb to complete. If the iocb command is not
8208 * completed within timeout seconds, it returns IOCB_TIMEDOUT.
8209 * Caller should not free the iocb resources if this function
8210 * returns IOCB_TIMEDOUT.
8211 * The function waits for the iocb completion using an
8212 * non-interruptible wait.
8213 * This function will sleep while waiting for iocb completion.
8214 * So, this function should not be called from any context which
8215 * does not allow sleeping. Due to the same reason, this function
8216 * cannot be called with interrupt disabled.
8217 * This function assumes that the iocb completions occur while
8218 * this function sleep. So, this function cannot be called from
8219 * the thread which process iocb completion for this ring.
8220 * This function clears the iocb_flag of the iocb object before
8221 * issuing the iocb and the iocb completion handler sets this
8222 * flag and wakes this thread when the iocb completes.
8223 * The contents of the response iocb will be copied to prspiocbq
8224 * by the completion handler when the command completes.
8225 * This function returns IOCB_SUCCESS when success.
8226 * This function is called with no lock held.
8229 lpfc_sli_issue_iocb_wait(struct lpfc_hba
*phba
,
8230 uint32_t ring_number
,
8231 struct lpfc_iocbq
*piocb
,
8232 struct lpfc_iocbq
*prspiocbq
,
8235 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q
);
8236 long timeleft
, timeout_req
= 0;
8237 int retval
= IOCB_SUCCESS
;
8239 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
8241 * If the caller has provided a response iocbq buffer, then context2
8242 * is NULL or its an error.
8245 if (piocb
->context2
)
8247 piocb
->context2
= prspiocbq
;
8250 piocb
->iocb_cmpl
= lpfc_sli_wake_iocb_wait
;
8251 piocb
->context_un
.wait_queue
= &done_q
;
8252 piocb
->iocb_flag
&= ~LPFC_IO_WAKE
;
8254 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
8255 if (lpfc_readl(phba
->HCregaddr
, &creg_val
))
8257 creg_val
|= (HC_R0INT_ENA
<< LPFC_FCP_RING
);
8258 writel(creg_val
, phba
->HCregaddr
);
8259 readl(phba
->HCregaddr
); /* flush */
8262 retval
= lpfc_sli_issue_iocb(phba
, ring_number
, piocb
,
8264 if (retval
== IOCB_SUCCESS
) {
8265 timeout_req
= timeout
* HZ
;
8266 timeleft
= wait_event_timeout(done_q
,
8267 lpfc_chk_iocb_flg(phba
, piocb
, LPFC_IO_WAKE
),
8270 if (piocb
->iocb_flag
& LPFC_IO_WAKE
) {
8271 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
8272 "0331 IOCB wake signaled\n");
8273 } else if (timeleft
== 0) {
8274 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
8275 "0338 IOCB wait timeout error - no "
8276 "wake response Data x%x\n", timeout
);
8277 retval
= IOCB_TIMEDOUT
;
8279 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
8280 "0330 IOCB wake NOT set, "
8282 timeout
, (timeleft
/ jiffies
));
8283 retval
= IOCB_TIMEDOUT
;
8285 } else if (retval
== IOCB_BUSY
) {
8286 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
8287 "2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
8288 phba
->iocb_cnt
, pring
->txq_cnt
, pring
->txcmplq_cnt
);
8291 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
8292 "0332 IOCB wait issue failed, Data x%x\n",
8294 retval
= IOCB_ERROR
;
8297 if (phba
->cfg_poll
& DISABLE_FCP_RING_INT
) {
8298 if (lpfc_readl(phba
->HCregaddr
, &creg_val
))
8300 creg_val
&= ~(HC_R0INT_ENA
<< LPFC_FCP_RING
);
8301 writel(creg_val
, phba
->HCregaddr
);
8302 readl(phba
->HCregaddr
); /* flush */
8306 piocb
->context2
= NULL
;
8308 piocb
->context_un
.wait_queue
= NULL
;
8309 piocb
->iocb_cmpl
= NULL
;
8314 * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
8315 * @phba: Pointer to HBA context object.
8316 * @pmboxq: Pointer to driver mailbox object.
8317 * @timeout: Timeout in number of seconds.
8319 * This function issues the mailbox to firmware and waits for the
8320 * mailbox command to complete. If the mailbox command is not
8321 * completed within timeout seconds, it returns MBX_TIMEOUT.
8322 * The function waits for the mailbox completion using an
8323 * interruptible wait. If the thread is woken up due to a
8324 * signal, MBX_TIMEOUT error is returned to the caller. Caller
8325 * should not free the mailbox resources, if this function returns
8327 * This function will sleep while waiting for mailbox completion.
8328 * So, this function should not be called from any context which
8329 * does not allow sleeping. Due to the same reason, this function
8330 * cannot be called with interrupt disabled.
8331 * This function assumes that the mailbox completion occurs while
8332 * this function sleep. So, this function cannot be called from
8333 * the worker thread which processes mailbox completion.
8334 * This function is called in the context of HBA management
8336 * This function returns MBX_SUCCESS when successful.
8337 * This function is called with no lock held.
8340 lpfc_sli_issue_mbox_wait(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*pmboxq
,
8343 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q
);
8347 /* The caller must leave context1 empty. */
8348 if (pmboxq
->context1
)
8349 return MBX_NOT_FINISHED
;
8351 pmboxq
->mbox_flag
&= ~LPFC_MBX_WAKE
;
8352 /* setup wake call as IOCB callback */
8353 pmboxq
->mbox_cmpl
= lpfc_sli_wake_mbox_wait
;
8354 /* setup context field to pass wait_queue pointer to wake function */
8355 pmboxq
->context1
= &done_q
;
8357 /* now issue the command */
8358 retval
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
8360 if (retval
== MBX_BUSY
|| retval
== MBX_SUCCESS
) {
8361 wait_event_interruptible_timeout(done_q
,
8362 pmboxq
->mbox_flag
& LPFC_MBX_WAKE
,
8365 spin_lock_irqsave(&phba
->hbalock
, flag
);
8366 pmboxq
->context1
= NULL
;
8368 * if LPFC_MBX_WAKE flag is set the mailbox is completed
8369 * else do not free the resources.
8371 if (pmboxq
->mbox_flag
& LPFC_MBX_WAKE
) {
8372 retval
= MBX_SUCCESS
;
8373 lpfc_sli4_swap_str(phba
, pmboxq
);
8375 retval
= MBX_TIMEOUT
;
8376 pmboxq
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
8378 spin_unlock_irqrestore(&phba
->hbalock
, flag
);
8385 * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
8386 * @phba: Pointer to HBA context.
8388 * This function is called to shutdown the driver's mailbox sub-system.
8389 * It first marks the mailbox sub-system is in a block state to prevent
8390 * the asynchronous mailbox command from issued off the pending mailbox
8391 * command queue. If the mailbox command sub-system shutdown is due to
8392 * HBA error conditions such as EEH or ERATT, this routine shall invoke
8393 * the mailbox sub-system flush routine to forcefully bring down the
8394 * mailbox sub-system. Otherwise, if it is due to normal condition (such
8395 * as with offline or HBA function reset), this routine will wait for the
8396 * outstanding mailbox command to complete before invoking the mailbox
8397 * sub-system flush routine to gracefully bring down mailbox sub-system.
8400 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba
*phba
)
8402 struct lpfc_sli
*psli
= &phba
->sli
;
8403 uint8_t actcmd
= MBX_HEARTBEAT
;
8404 unsigned long timeout
;
8406 spin_lock_irq(&phba
->hbalock
);
8407 psli
->sli_flag
|= LPFC_SLI_ASYNC_MBX_BLK
;
8408 spin_unlock_irq(&phba
->hbalock
);
8410 if (psli
->sli_flag
& LPFC_SLI_ACTIVE
) {
8411 spin_lock_irq(&phba
->hbalock
);
8412 if (phba
->sli
.mbox_active
)
8413 actcmd
= phba
->sli
.mbox_active
->u
.mb
.mbxCommand
;
8414 spin_unlock_irq(&phba
->hbalock
);
8415 /* Determine how long we might wait for the active mailbox
8416 * command to be gracefully completed by firmware.
8418 timeout
= msecs_to_jiffies(lpfc_mbox_tmo_val(phba
, actcmd
) *
8420 while (phba
->sli
.mbox_active
) {
8421 /* Check active mailbox complete status every 2ms */
8423 if (time_after(jiffies
, timeout
))
8424 /* Timeout, let the mailbox flush routine to
8425 * forcefully release active mailbox command
8430 lpfc_sli_mbox_sys_flush(phba
);
8434 * lpfc_sli_eratt_read - read sli-3 error attention events
8435 * @phba: Pointer to HBA context.
8437 * This function is called to read the SLI3 device error attention registers
8438 * for possible error attention events. The caller must hold the hostlock
8439 * with spin_lock_irq().
8441 * This fucntion returns 1 when there is Error Attention in the Host Attention
8442 * Register and returns 0 otherwise.
8445 lpfc_sli_eratt_read(struct lpfc_hba
*phba
)
8449 /* Read chip Host Attention (HA) register */
8450 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
8453 if (ha_copy
& HA_ERATT
) {
8454 /* Read host status register to retrieve error event */
8455 if (lpfc_sli_read_hs(phba
))
8458 /* Check if there is a deferred error condition is active */
8459 if ((HS_FFER1
& phba
->work_hs
) &&
8460 ((HS_FFER2
| HS_FFER3
| HS_FFER4
| HS_FFER5
|
8461 HS_FFER6
| HS_FFER7
| HS_FFER8
) & phba
->work_hs
)) {
8462 phba
->hba_flag
|= DEFER_ERATT
;
8463 /* Clear all interrupt enable conditions */
8464 writel(0, phba
->HCregaddr
);
8465 readl(phba
->HCregaddr
);
8468 /* Set the driver HA work bitmap */
8469 phba
->work_ha
|= HA_ERATT
;
8470 /* Indicate polling handles this ERATT */
8471 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
8477 /* Set the driver HS work bitmap */
8478 phba
->work_hs
|= UNPLUG_ERR
;
8479 /* Set the driver HA work bitmap */
8480 phba
->work_ha
|= HA_ERATT
;
8481 /* Indicate polling handles this ERATT */
8482 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
8487 * lpfc_sli4_eratt_read - read sli-4 error attention events
8488 * @phba: Pointer to HBA context.
8490 * This function is called to read the SLI4 device error attention registers
8491 * for possible error attention events. The caller must hold the hostlock
8492 * with spin_lock_irq().
8494 * This fucntion returns 1 when there is Error Attention in the Host Attention
8495 * Register and returns 0 otherwise.
8498 lpfc_sli4_eratt_read(struct lpfc_hba
*phba
)
8500 uint32_t uerr_sta_hi
, uerr_sta_lo
;
8501 uint32_t if_type
, portsmphr
;
8502 struct lpfc_register portstat_reg
;
8505 * For now, use the SLI4 device internal unrecoverable error
8506 * registers for error attention. This can be changed later.
8508 if_type
= bf_get(lpfc_sli_intf_if_type
, &phba
->sli4_hba
.sli_intf
);
8510 case LPFC_SLI_INTF_IF_TYPE_0
:
8511 if (lpfc_readl(phba
->sli4_hba
.u
.if_type0
.UERRLOregaddr
,
8513 lpfc_readl(phba
->sli4_hba
.u
.if_type0
.UERRHIregaddr
,
8515 phba
->work_hs
|= UNPLUG_ERR
;
8516 phba
->work_ha
|= HA_ERATT
;
8517 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
8520 if ((~phba
->sli4_hba
.ue_mask_lo
& uerr_sta_lo
) ||
8521 (~phba
->sli4_hba
.ue_mask_hi
& uerr_sta_hi
)) {
8522 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8523 "1423 HBA Unrecoverable error: "
8524 "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
8525 "ue_mask_lo_reg=0x%x, "
8526 "ue_mask_hi_reg=0x%x\n",
8527 uerr_sta_lo
, uerr_sta_hi
,
8528 phba
->sli4_hba
.ue_mask_lo
,
8529 phba
->sli4_hba
.ue_mask_hi
);
8530 phba
->work_status
[0] = uerr_sta_lo
;
8531 phba
->work_status
[1] = uerr_sta_hi
;
8532 phba
->work_ha
|= HA_ERATT
;
8533 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
8537 case LPFC_SLI_INTF_IF_TYPE_2
:
8538 if (lpfc_readl(phba
->sli4_hba
.u
.if_type2
.STATUSregaddr
,
8539 &portstat_reg
.word0
) ||
8540 lpfc_readl(phba
->sli4_hba
.PSMPHRregaddr
,
8542 phba
->work_hs
|= UNPLUG_ERR
;
8543 phba
->work_ha
|= HA_ERATT
;
8544 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
8547 if (bf_get(lpfc_sliport_status_err
, &portstat_reg
)) {
8548 phba
->work_status
[0] =
8549 readl(phba
->sli4_hba
.u
.if_type2
.ERR1regaddr
);
8550 phba
->work_status
[1] =
8551 readl(phba
->sli4_hba
.u
.if_type2
.ERR2regaddr
);
8552 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8553 "2885 Port Error Detected: "
8554 "port status reg 0x%x, "
8555 "port smphr reg 0x%x, "
8556 "error 1=0x%x, error 2=0x%x\n",
8559 phba
->work_status
[0],
8560 phba
->work_status
[1]);
8561 phba
->work_ha
|= HA_ERATT
;
8562 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
8566 case LPFC_SLI_INTF_IF_TYPE_1
:
8568 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8569 "2886 HBA Error Attention on unsupported "
8570 "if type %d.", if_type
);
8578 * lpfc_sli_check_eratt - check error attention events
8579 * @phba: Pointer to HBA context.
8581 * This function is called from timer soft interrupt context to check HBA's
8582 * error attention register bit for error attention events.
8584 * This fucntion returns 1 when there is Error Attention in the Host Attention
8585 * Register and returns 0 otherwise.
8588 lpfc_sli_check_eratt(struct lpfc_hba
*phba
)
8592 /* If somebody is waiting to handle an eratt, don't process it
8593 * here. The brdkill function will do this.
8595 if (phba
->link_flag
& LS_IGNORE_ERATT
)
8598 /* Check if interrupt handler handles this ERATT */
8599 spin_lock_irq(&phba
->hbalock
);
8600 if (phba
->hba_flag
& HBA_ERATT_HANDLED
) {
8601 /* Interrupt handler has handled ERATT */
8602 spin_unlock_irq(&phba
->hbalock
);
8607 * If there is deferred error attention, do not check for error
8610 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
8611 spin_unlock_irq(&phba
->hbalock
);
8615 /* If PCI channel is offline, don't process it */
8616 if (unlikely(pci_channel_offline(phba
->pcidev
))) {
8617 spin_unlock_irq(&phba
->hbalock
);
8621 switch (phba
->sli_rev
) {
8624 /* Read chip Host Attention (HA) register */
8625 ha_copy
= lpfc_sli_eratt_read(phba
);
8628 /* Read device Uncoverable Error (UERR) registers */
8629 ha_copy
= lpfc_sli4_eratt_read(phba
);
8632 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
8633 "0299 Invalid SLI revision (%d)\n",
8638 spin_unlock_irq(&phba
->hbalock
);
8644 * lpfc_intr_state_check - Check device state for interrupt handling
8645 * @phba: Pointer to HBA context.
8647 * This inline routine checks whether a device or its PCI slot is in a state
8648 * that the interrupt should be handled.
8650 * This function returns 0 if the device or the PCI slot is in a state that
8651 * interrupt should be handled, otherwise -EIO.
8654 lpfc_intr_state_check(struct lpfc_hba
*phba
)
8656 /* If the pci channel is offline, ignore all the interrupts */
8657 if (unlikely(pci_channel_offline(phba
->pcidev
)))
8660 /* Update device level interrupt statistics */
8661 phba
->sli
.slistat
.sli_intr
++;
8663 /* Ignore all interrupts during initialization. */
8664 if (unlikely(phba
->link_state
< LPFC_LINK_DOWN
))
8671 * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
8672 * @irq: Interrupt number.
8673 * @dev_id: The device context pointer.
8675 * This function is directly called from the PCI layer as an interrupt
8676 * service routine when device with SLI-3 interface spec is enabled with
8677 * MSI-X multi-message interrupt mode and there are slow-path events in
8678 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
8679 * interrupt mode, this function is called as part of the device-level
8680 * interrupt handler. When the PCI slot is in error recovery or the HBA
8681 * is undergoing initialization, the interrupt handler will not process
8682 * the interrupt. The link attention and ELS ring attention events are
8683 * handled by the worker thread. The interrupt handler signals the worker
8684 * thread and returns for these events. This function is called without
8685 * any lock held. It gets the hbalock to access and update SLI data
8688 * This function returns IRQ_HANDLED when interrupt is handled else it
8692 lpfc_sli_sp_intr_handler(int irq
, void *dev_id
)
8694 struct lpfc_hba
*phba
;
8695 uint32_t ha_copy
, hc_copy
;
8696 uint32_t work_ha_copy
;
8697 unsigned long status
;
8698 unsigned long iflag
;
8701 MAILBOX_t
*mbox
, *pmbox
;
8702 struct lpfc_vport
*vport
;
8703 struct lpfc_nodelist
*ndlp
;
8704 struct lpfc_dmabuf
*mp
;
8709 * Get the driver's phba structure from the dev_id and
8710 * assume the HBA is not interrupting.
8712 phba
= (struct lpfc_hba
*)dev_id
;
8714 if (unlikely(!phba
))
8718 * Stuff needs to be attented to when this function is invoked as an
8719 * individual interrupt handler in MSI-X multi-message interrupt mode
8721 if (phba
->intr_type
== MSIX
) {
8722 /* Check device state for handling interrupt */
8723 if (lpfc_intr_state_check(phba
))
8725 /* Need to read HA REG for slow-path events */
8726 spin_lock_irqsave(&phba
->hbalock
, iflag
);
8727 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
8729 /* If somebody is waiting to handle an eratt don't process it
8730 * here. The brdkill function will do this.
8732 if (phba
->link_flag
& LS_IGNORE_ERATT
)
8733 ha_copy
&= ~HA_ERATT
;
8734 /* Check the need for handling ERATT in interrupt handler */
8735 if (ha_copy
& HA_ERATT
) {
8736 if (phba
->hba_flag
& HBA_ERATT_HANDLED
)
8737 /* ERATT polling has handled ERATT */
8738 ha_copy
&= ~HA_ERATT
;
8740 /* Indicate interrupt handler handles ERATT */
8741 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
8745 * If there is deferred error attention, do not check for any
8748 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
8749 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8753 /* Clear up only attention source related to slow-path */
8754 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
))
8757 writel(hc_copy
& ~(HC_MBINT_ENA
| HC_R2INT_ENA
|
8758 HC_LAINT_ENA
| HC_ERINT_ENA
),
8760 writel((ha_copy
& (HA_MBATT
| HA_R2_CLR_MSK
)),
8762 writel(hc_copy
, phba
->HCregaddr
);
8763 readl(phba
->HAregaddr
); /* flush */
8764 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8766 ha_copy
= phba
->ha_copy
;
8768 work_ha_copy
= ha_copy
& phba
->work_ha_mask
;
8771 if (work_ha_copy
& HA_LATT
) {
8772 if (phba
->sli
.sli_flag
& LPFC_PROCESS_LA
) {
8774 * Turn off Link Attention interrupts
8775 * until CLEAR_LA done
8777 spin_lock_irqsave(&phba
->hbalock
, iflag
);
8778 phba
->sli
.sli_flag
&= ~LPFC_PROCESS_LA
;
8779 if (lpfc_readl(phba
->HCregaddr
, &control
))
8781 control
&= ~HC_LAINT_ENA
;
8782 writel(control
, phba
->HCregaddr
);
8783 readl(phba
->HCregaddr
); /* flush */
8784 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8787 work_ha_copy
&= ~HA_LATT
;
8790 if (work_ha_copy
& ~(HA_ERATT
| HA_MBATT
| HA_LATT
)) {
8792 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
8793 * the only slow ring.
8795 status
= (work_ha_copy
&
8796 (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
8797 status
>>= (4*LPFC_ELS_RING
);
8798 if (status
& HA_RXMASK
) {
8799 spin_lock_irqsave(&phba
->hbalock
, iflag
);
8800 if (lpfc_readl(phba
->HCregaddr
, &control
))
8803 lpfc_debugfs_slow_ring_trc(phba
,
8804 "ISR slow ring: ctl:x%x stat:x%x isrcnt:x%x",
8806 (uint32_t)phba
->sli
.slistat
.sli_intr
);
8808 if (control
& (HC_R0INT_ENA
<< LPFC_ELS_RING
)) {
8809 lpfc_debugfs_slow_ring_trc(phba
,
8811 "pwork:x%x hawork:x%x wait:x%x",
8812 phba
->work_ha
, work_ha_copy
,
8813 (uint32_t)((unsigned long)
8814 &phba
->work_waitq
));
8817 ~(HC_R0INT_ENA
<< LPFC_ELS_RING
);
8818 writel(control
, phba
->HCregaddr
);
8819 readl(phba
->HCregaddr
); /* flush */
8822 lpfc_debugfs_slow_ring_trc(phba
,
8823 "ISR slow ring: pwork:"
8824 "x%x hawork:x%x wait:x%x",
8825 phba
->work_ha
, work_ha_copy
,
8826 (uint32_t)((unsigned long)
8827 &phba
->work_waitq
));
8829 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8832 spin_lock_irqsave(&phba
->hbalock
, iflag
);
8833 if (work_ha_copy
& HA_ERATT
) {
8834 if (lpfc_sli_read_hs(phba
))
8837 * Check if there is a deferred error condition
8840 if ((HS_FFER1
& phba
->work_hs
) &&
8841 ((HS_FFER2
| HS_FFER3
| HS_FFER4
| HS_FFER5
|
8842 HS_FFER6
| HS_FFER7
| HS_FFER8
) &
8844 phba
->hba_flag
|= DEFER_ERATT
;
8845 /* Clear all interrupt enable conditions */
8846 writel(0, phba
->HCregaddr
);
8847 readl(phba
->HCregaddr
);
8851 if ((work_ha_copy
& HA_MBATT
) && (phba
->sli
.mbox_active
)) {
8852 pmb
= phba
->sli
.mbox_active
;
8857 /* First check out the status word */
8858 lpfc_sli_pcimem_bcopy(mbox
, pmbox
, sizeof(uint32_t));
8859 if (pmbox
->mbxOwner
!= OWN_HOST
) {
8860 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8862 * Stray Mailbox Interrupt, mbxCommand <cmd>
8863 * mbxStatus <status>
8865 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
|
8867 "(%d):0304 Stray Mailbox "
8868 "Interrupt mbxCommand x%x "
8870 (vport
? vport
->vpi
: 0),
8873 /* clear mailbox attention bit */
8874 work_ha_copy
&= ~HA_MBATT
;
8876 phba
->sli
.mbox_active
= NULL
;
8877 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8878 phba
->last_completion_time
= jiffies
;
8879 del_timer(&phba
->sli
.mbox_tmo
);
8880 if (pmb
->mbox_cmpl
) {
8881 lpfc_sli_pcimem_bcopy(mbox
, pmbox
,
8883 if (pmb
->out_ext_byte_len
&&
8885 lpfc_sli_pcimem_bcopy(
8888 pmb
->out_ext_byte_len
);
8890 if (pmb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) {
8891 pmb
->mbox_flag
&= ~LPFC_MBX_IMED_UNREG
;
8893 lpfc_debugfs_disc_trc(vport
,
8894 LPFC_DISC_TRC_MBOX_VPORT
,
8896 "status:x%x rpi:x%x",
8897 (uint32_t)pmbox
->mbxStatus
,
8898 pmbox
->un
.varWords
[0], 0);
8900 if (!pmbox
->mbxStatus
) {
8901 mp
= (struct lpfc_dmabuf
*)
8903 ndlp
= (struct lpfc_nodelist
*)
8906 /* Reg_LOGIN of dflt RPI was
8907 * successful. new lets get
8908 * rid of the RPI using the
8911 lpfc_unreg_login(phba
,
8913 pmbox
->un
.varWords
[0],
8916 lpfc_mbx_cmpl_dflt_rpi
;
8918 pmb
->context2
= ndlp
;
8920 rc
= lpfc_sli_issue_mbox(phba
,
8924 lpfc_printf_log(phba
,
8927 "0350 rc should have"
8929 if (rc
!= MBX_NOT_FINISHED
)
8930 goto send_current_mbox
;
8934 &phba
->pport
->work_port_lock
,
8936 phba
->pport
->work_port_events
&=
8938 spin_unlock_irqrestore(
8939 &phba
->pport
->work_port_lock
,
8941 lpfc_mbox_cmpl_put(phba
, pmb
);
8944 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8946 if ((work_ha_copy
& HA_MBATT
) &&
8947 (phba
->sli
.mbox_active
== NULL
)) {
8949 /* Process next mailbox command if there is one */
8951 rc
= lpfc_sli_issue_mbox(phba
, NULL
,
8953 } while (rc
== MBX_NOT_FINISHED
);
8954 if (rc
!= MBX_SUCCESS
)
8955 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
|
8956 LOG_SLI
, "0349 rc should be "
8960 spin_lock_irqsave(&phba
->hbalock
, iflag
);
8961 phba
->work_ha
|= work_ha_copy
;
8962 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8963 lpfc_worker_wake_up(phba
);
8967 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
8970 } /* lpfc_sli_sp_intr_handler */
8973 * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
8974 * @irq: Interrupt number.
8975 * @dev_id: The device context pointer.
8977 * This function is directly called from the PCI layer as an interrupt
8978 * service routine when device with SLI-3 interface spec is enabled with
8979 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
8980 * ring event in the HBA. However, when the device is enabled with either
8981 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
8982 * device-level interrupt handler. When the PCI slot is in error recovery
8983 * or the HBA is undergoing initialization, the interrupt handler will not
8984 * process the interrupt. The SCSI FCP fast-path ring event are handled in
8985 * the intrrupt context. This function is called without any lock held.
8986 * It gets the hbalock to access and update SLI data structures.
8988 * This function returns IRQ_HANDLED when interrupt is handled else it
8992 lpfc_sli_fp_intr_handler(int irq
, void *dev_id
)
8994 struct lpfc_hba
*phba
;
8996 unsigned long status
;
8997 unsigned long iflag
;
8999 /* Get the driver's phba structure from the dev_id and
9000 * assume the HBA is not interrupting.
9002 phba
= (struct lpfc_hba
*) dev_id
;
9004 if (unlikely(!phba
))
9008 * Stuff needs to be attented to when this function is invoked as an
9009 * individual interrupt handler in MSI-X multi-message interrupt mode
9011 if (phba
->intr_type
== MSIX
) {
9012 /* Check device state for handling interrupt */
9013 if (lpfc_intr_state_check(phba
))
9015 /* Need to read HA REG for FCP ring and other ring events */
9016 if (lpfc_readl(phba
->HAregaddr
, &ha_copy
))
9018 /* Clear up only attention source related to fast-path */
9019 spin_lock_irqsave(&phba
->hbalock
, iflag
);
9021 * If there is deferred error attention, do not check for
9024 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
9025 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
9028 writel((ha_copy
& (HA_R0_CLR_MSK
| HA_R1_CLR_MSK
)),
9030 readl(phba
->HAregaddr
); /* flush */
9031 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
9033 ha_copy
= phba
->ha_copy
;
9036 * Process all events on FCP ring. Take the optimized path for FCP IO.
9038 ha_copy
&= ~(phba
->work_ha_mask
);
9040 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_FCP_RING
)));
9041 status
>>= (4*LPFC_FCP_RING
);
9042 if (status
& HA_RXMASK
)
9043 lpfc_sli_handle_fast_ring_event(phba
,
9044 &phba
->sli
.ring
[LPFC_FCP_RING
],
9047 if (phba
->cfg_multi_ring_support
== 2) {
9049 * Process all events on extra ring. Take the optimized path
9050 * for extra ring IO.
9052 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_EXTRA_RING
)));
9053 status
>>= (4*LPFC_EXTRA_RING
);
9054 if (status
& HA_RXMASK
) {
9055 lpfc_sli_handle_fast_ring_event(phba
,
9056 &phba
->sli
.ring
[LPFC_EXTRA_RING
],
9061 } /* lpfc_sli_fp_intr_handler */
9064 * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
9065 * @irq: Interrupt number.
9066 * @dev_id: The device context pointer.
9068 * This function is the HBA device-level interrupt handler to device with
9069 * SLI-3 interface spec, called from the PCI layer when either MSI or
9070 * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
9071 * requires driver attention. This function invokes the slow-path interrupt
9072 * attention handling function and fast-path interrupt attention handling
9073 * function in turn to process the relevant HBA attention events. This
9074 * function is called without any lock held. It gets the hbalock to access
9075 * and update SLI data structures.
9077 * This function returns IRQ_HANDLED when interrupt is handled, else it
9081 lpfc_sli_intr_handler(int irq
, void *dev_id
)
9083 struct lpfc_hba
*phba
;
9084 irqreturn_t sp_irq_rc
, fp_irq_rc
;
9085 unsigned long status1
, status2
;
9089 * Get the driver's phba structure from the dev_id and
9090 * assume the HBA is not interrupting.
9092 phba
= (struct lpfc_hba
*) dev_id
;
9094 if (unlikely(!phba
))
9097 /* Check device state for handling interrupt */
9098 if (lpfc_intr_state_check(phba
))
9101 spin_lock(&phba
->hbalock
);
9102 if (lpfc_readl(phba
->HAregaddr
, &phba
->ha_copy
)) {
9103 spin_unlock(&phba
->hbalock
);
9107 if (unlikely(!phba
->ha_copy
)) {
9108 spin_unlock(&phba
->hbalock
);
9110 } else if (phba
->ha_copy
& HA_ERATT
) {
9111 if (phba
->hba_flag
& HBA_ERATT_HANDLED
)
9112 /* ERATT polling has handled ERATT */
9113 phba
->ha_copy
&= ~HA_ERATT
;
9115 /* Indicate interrupt handler handles ERATT */
9116 phba
->hba_flag
|= HBA_ERATT_HANDLED
;
9120 * If there is deferred error attention, do not check for any interrupt.
9122 if (unlikely(phba
->hba_flag
& DEFER_ERATT
)) {
9123 spin_unlock(&phba
->hbalock
);
9127 /* Clear attention sources except link and error attentions */
9128 if (lpfc_readl(phba
->HCregaddr
, &hc_copy
)) {
9129 spin_unlock(&phba
->hbalock
);
9132 writel(hc_copy
& ~(HC_MBINT_ENA
| HC_R0INT_ENA
| HC_R1INT_ENA
9133 | HC_R2INT_ENA
| HC_LAINT_ENA
| HC_ERINT_ENA
),
9135 writel((phba
->ha_copy
& ~(HA_LATT
| HA_ERATT
)), phba
->HAregaddr
);
9136 writel(hc_copy
, phba
->HCregaddr
);
9137 readl(phba
->HAregaddr
); /* flush */
9138 spin_unlock(&phba
->hbalock
);
9141 * Invokes slow-path host attention interrupt handling as appropriate.
9144 /* status of events with mailbox and link attention */
9145 status1
= phba
->ha_copy
& (HA_MBATT
| HA_LATT
| HA_ERATT
);
9147 /* status of events with ELS ring */
9148 status2
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_ELS_RING
)));
9149 status2
>>= (4*LPFC_ELS_RING
);
9151 if (status1
|| (status2
& HA_RXMASK
))
9152 sp_irq_rc
= lpfc_sli_sp_intr_handler(irq
, dev_id
);
9154 sp_irq_rc
= IRQ_NONE
;
9157 * Invoke fast-path host attention interrupt handling as appropriate.
9160 /* status of events with FCP ring */
9161 status1
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_FCP_RING
)));
9162 status1
>>= (4*LPFC_FCP_RING
);
9164 /* status of events with extra ring */
9165 if (phba
->cfg_multi_ring_support
== 2) {
9166 status2
= (phba
->ha_copy
& (HA_RXMASK
<< (4*LPFC_EXTRA_RING
)));
9167 status2
>>= (4*LPFC_EXTRA_RING
);
9171 if ((status1
& HA_RXMASK
) || (status2
& HA_RXMASK
))
9172 fp_irq_rc
= lpfc_sli_fp_intr_handler(irq
, dev_id
);
9174 fp_irq_rc
= IRQ_NONE
;
9176 /* Return device-level interrupt handling status */
9177 return (sp_irq_rc
== IRQ_HANDLED
) ? sp_irq_rc
: fp_irq_rc
;
9178 } /* lpfc_sli_intr_handler */
9181 * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
9182 * @phba: pointer to lpfc hba data structure.
9184 * This routine is invoked by the worker thread to process all the pending
9185 * SLI4 FCP abort XRI events.
9187 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba
*phba
)
9189 struct lpfc_cq_event
*cq_event
;
9191 /* First, declare the fcp xri abort event has been handled */
9192 spin_lock_irq(&phba
->hbalock
);
9193 phba
->hba_flag
&= ~FCP_XRI_ABORT_EVENT
;
9194 spin_unlock_irq(&phba
->hbalock
);
9195 /* Now, handle all the fcp xri abort events */
9196 while (!list_empty(&phba
->sli4_hba
.sp_fcp_xri_aborted_work_queue
)) {
9197 /* Get the first event from the head of the event queue */
9198 spin_lock_irq(&phba
->hbalock
);
9199 list_remove_head(&phba
->sli4_hba
.sp_fcp_xri_aborted_work_queue
,
9200 cq_event
, struct lpfc_cq_event
, list
);
9201 spin_unlock_irq(&phba
->hbalock
);
9202 /* Notify aborted XRI for FCP work queue */
9203 lpfc_sli4_fcp_xri_aborted(phba
, &cq_event
->cqe
.wcqe_axri
);
9204 /* Free the event processed back to the free pool */
9205 lpfc_sli4_cq_event_release(phba
, cq_event
);
9210 * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
9211 * @phba: pointer to lpfc hba data structure.
9213 * This routine is invoked by the worker thread to process all the pending
9214 * SLI4 els abort xri events.
9216 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba
*phba
)
9218 struct lpfc_cq_event
*cq_event
;
9220 /* First, declare the els xri abort event has been handled */
9221 spin_lock_irq(&phba
->hbalock
);
9222 phba
->hba_flag
&= ~ELS_XRI_ABORT_EVENT
;
9223 spin_unlock_irq(&phba
->hbalock
);
9224 /* Now, handle all the els xri abort events */
9225 while (!list_empty(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
)) {
9226 /* Get the first event from the head of the event queue */
9227 spin_lock_irq(&phba
->hbalock
);
9228 list_remove_head(&phba
->sli4_hba
.sp_els_xri_aborted_work_queue
,
9229 cq_event
, struct lpfc_cq_event
, list
);
9230 spin_unlock_irq(&phba
->hbalock
);
9231 /* Notify aborted XRI for ELS work queue */
9232 lpfc_sli4_els_xri_aborted(phba
, &cq_event
->cqe
.wcqe_axri
);
9233 /* Free the event processed back to the free pool */
9234 lpfc_sli4_cq_event_release(phba
, cq_event
);
9239 * lpfc_sli4_iocb_param_transfer - Transfer pIocbOut and cmpl status to pIocbIn
9240 * @phba: pointer to lpfc hba data structure
9241 * @pIocbIn: pointer to the rspiocbq
9242 * @pIocbOut: pointer to the cmdiocbq
9243 * @wcqe: pointer to the complete wcqe
9245 * This routine transfers the fields of a command iocbq to a response iocbq
9246 * by copying all the IOCB fields from command iocbq and transferring the
9247 * completion status information from the complete wcqe.
9250 lpfc_sli4_iocb_param_transfer(struct lpfc_hba
*phba
,
9251 struct lpfc_iocbq
*pIocbIn
,
9252 struct lpfc_iocbq
*pIocbOut
,
9253 struct lpfc_wcqe_complete
*wcqe
)
9255 unsigned long iflags
;
9256 size_t offset
= offsetof(struct lpfc_iocbq
, iocb
);
9258 memcpy((char *)pIocbIn
+ offset
, (char *)pIocbOut
+ offset
,
9259 sizeof(struct lpfc_iocbq
) - offset
);
9260 /* Map WCQE parameters into irspiocb parameters */
9261 pIocbIn
->iocb
.ulpStatus
= bf_get(lpfc_wcqe_c_status
, wcqe
);
9262 if (pIocbOut
->iocb_flag
& LPFC_IO_FCP
)
9263 if (pIocbIn
->iocb
.ulpStatus
== IOSTAT_FCP_RSP_ERROR
)
9264 pIocbIn
->iocb
.un
.fcpi
.fcpi_parm
=
9265 pIocbOut
->iocb
.un
.fcpi
.fcpi_parm
-
9266 wcqe
->total_data_placed
;
9268 pIocbIn
->iocb
.un
.ulpWord
[4] = wcqe
->parameter
;
9270 pIocbIn
->iocb
.un
.ulpWord
[4] = wcqe
->parameter
;
9271 pIocbIn
->iocb
.un
.genreq64
.bdl
.bdeSize
= wcqe
->total_data_placed
;
9274 /* Pick up HBA exchange busy condition */
9275 if (bf_get(lpfc_wcqe_c_xb
, wcqe
)) {
9276 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9277 pIocbIn
->iocb_flag
|= LPFC_EXCHANGE_BUSY
;
9278 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9283 * lpfc_sli4_els_wcqe_to_rspiocbq - Get response iocbq from els wcqe
9284 * @phba: Pointer to HBA context object.
9285 * @wcqe: Pointer to work-queue completion queue entry.
9287 * This routine handles an ELS work-queue completion event and construct
9288 * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common
9289 * discovery engine to handle.
9291 * Return: Pointer to the receive IOCBQ, NULL otherwise.
9293 static struct lpfc_iocbq
*
9294 lpfc_sli4_els_wcqe_to_rspiocbq(struct lpfc_hba
*phba
,
9295 struct lpfc_iocbq
*irspiocbq
)
9297 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
9298 struct lpfc_iocbq
*cmdiocbq
;
9299 struct lpfc_wcqe_complete
*wcqe
;
9300 unsigned long iflags
;
9302 wcqe
= &irspiocbq
->cq_event
.cqe
.wcqe_cmpl
;
9303 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9304 pring
->stats
.iocb_event
++;
9305 /* Look up the ELS command IOCB and create pseudo response IOCB */
9306 cmdiocbq
= lpfc_sli_iocbq_lookup_by_tag(phba
, pring
,
9307 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
9308 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9310 if (unlikely(!cmdiocbq
)) {
9311 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9312 "0386 ELS complete with no corresponding "
9313 "cmdiocb: iotag (%d)\n",
9314 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
9315 lpfc_sli_release_iocbq(phba
, irspiocbq
);
9319 /* Fake the irspiocbq and copy necessary response information */
9320 lpfc_sli4_iocb_param_transfer(phba
, irspiocbq
, cmdiocbq
, wcqe
);
9326 * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
9327 * @phba: Pointer to HBA context object.
9328 * @cqe: Pointer to mailbox completion queue entry.
9330 * This routine process a mailbox completion queue entry with asynchrous
9333 * Return: true if work posted to worker thread, otherwise false.
9336 lpfc_sli4_sp_handle_async_event(struct lpfc_hba
*phba
, struct lpfc_mcqe
*mcqe
)
9338 struct lpfc_cq_event
*cq_event
;
9339 unsigned long iflags
;
9341 lpfc_printf_log(phba
, KERN_INFO
, LOG_SLI
,
9342 "0392 Async Event: word0:x%x, word1:x%x, "
9343 "word2:x%x, word3:x%x\n", mcqe
->word0
,
9344 mcqe
->mcqe_tag0
, mcqe
->mcqe_tag1
, mcqe
->trailer
);
9346 /* Allocate a new internal CQ_EVENT entry */
9347 cq_event
= lpfc_sli4_cq_event_alloc(phba
);
9349 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9350 "0394 Failed to allocate CQ_EVENT entry\n");
9354 /* Move the CQE into an asynchronous event entry */
9355 memcpy(&cq_event
->cqe
, mcqe
, sizeof(struct lpfc_mcqe
));
9356 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9357 list_add_tail(&cq_event
->list
, &phba
->sli4_hba
.sp_asynce_work_queue
);
9358 /* Set the async event flag */
9359 phba
->hba_flag
|= ASYNC_EVENT
;
9360 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9366 * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
9367 * @phba: Pointer to HBA context object.
9368 * @cqe: Pointer to mailbox completion queue entry.
9370 * This routine process a mailbox completion queue entry with mailbox
9373 * Return: true if work posted to worker thread, otherwise false.
9376 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba
*phba
, struct lpfc_mcqe
*mcqe
)
9378 uint32_t mcqe_status
;
9379 MAILBOX_t
*mbox
, *pmbox
;
9380 struct lpfc_mqe
*mqe
;
9381 struct lpfc_vport
*vport
;
9382 struct lpfc_nodelist
*ndlp
;
9383 struct lpfc_dmabuf
*mp
;
9384 unsigned long iflags
;
9386 bool workposted
= false;
9389 /* If not a mailbox complete MCQE, out by checking mailbox consume */
9390 if (!bf_get(lpfc_trailer_completed
, mcqe
))
9391 goto out_no_mqe_complete
;
9393 /* Get the reference to the active mbox command */
9394 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9395 pmb
= phba
->sli
.mbox_active
;
9396 if (unlikely(!pmb
)) {
9397 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
,
9398 "1832 No pending MBOX command to handle\n");
9399 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9400 goto out_no_mqe_complete
;
9402 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9404 pmbox
= (MAILBOX_t
*)&pmb
->u
.mqe
;
9408 /* Reset heartbeat timer */
9409 phba
->last_completion_time
= jiffies
;
9410 del_timer(&phba
->sli
.mbox_tmo
);
9412 /* Move mbox data to caller's mailbox region, do endian swapping */
9413 if (pmb
->mbox_cmpl
&& mbox
)
9414 lpfc_sli_pcimem_bcopy(mbox
, mqe
, sizeof(struct lpfc_mqe
));
9415 /* Set the mailbox status with SLI4 range 0x4000 */
9416 mcqe_status
= bf_get(lpfc_mcqe_status
, mcqe
);
9417 if (mcqe_status
!= MB_CQE_STATUS_SUCCESS
)
9418 bf_set(lpfc_mqe_status
, mqe
,
9419 (LPFC_MBX_ERROR_RANGE
| mcqe_status
));
9421 if (pmb
->mbox_flag
& LPFC_MBX_IMED_UNREG
) {
9422 pmb
->mbox_flag
&= ~LPFC_MBX_IMED_UNREG
;
9423 lpfc_debugfs_disc_trc(vport
, LPFC_DISC_TRC_MBOX_VPORT
,
9424 "MBOX dflt rpi: status:x%x rpi:x%x",
9426 pmbox
->un
.varWords
[0], 0);
9427 if (mcqe_status
== MB_CQE_STATUS_SUCCESS
) {
9428 mp
= (struct lpfc_dmabuf
*)(pmb
->context1
);
9429 ndlp
= (struct lpfc_nodelist
*)pmb
->context2
;
9430 /* Reg_LOGIN of dflt RPI was successful. Now lets get
9431 * RID of the PPI using the same mbox buffer.
9433 lpfc_unreg_login(phba
, vport
->vpi
,
9434 pmbox
->un
.varWords
[0], pmb
);
9435 pmb
->mbox_cmpl
= lpfc_mbx_cmpl_dflt_rpi
;
9437 pmb
->context2
= ndlp
;
9439 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
9441 lpfc_printf_log(phba
, KERN_ERR
, LOG_MBOX
|
9442 LOG_SLI
, "0385 rc should "
9443 "have been MBX_BUSY\n");
9444 if (rc
!= MBX_NOT_FINISHED
)
9445 goto send_current_mbox
;
9448 spin_lock_irqsave(&phba
->pport
->work_port_lock
, iflags
);
9449 phba
->pport
->work_port_events
&= ~WORKER_MBOX_TMO
;
9450 spin_unlock_irqrestore(&phba
->pport
->work_port_lock
, iflags
);
9452 /* There is mailbox completion work to do */
9453 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9454 __lpfc_mbox_cmpl_put(phba
, pmb
);
9455 phba
->work_ha
|= HA_MBATT
;
9456 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9460 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9461 /* Release the mailbox command posting token */
9462 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
9463 /* Setting active mailbox pointer need to be in sync to flag clear */
9464 phba
->sli
.mbox_active
= NULL
;
9465 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9466 /* Wake up worker thread to post the next pending mailbox command */
9467 lpfc_worker_wake_up(phba
);
9468 out_no_mqe_complete
:
9469 if (bf_get(lpfc_trailer_consumed
, mcqe
))
9470 lpfc_sli4_mq_release(phba
->sli4_hba
.mbx_wq
);
9475 * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
9476 * @phba: Pointer to HBA context object.
9477 * @cqe: Pointer to mailbox completion queue entry.
9479 * This routine process a mailbox completion queue entry, it invokes the
9480 * proper mailbox complete handling or asynchrous event handling routine
9481 * according to the MCQE's async bit.
9483 * Return: true if work posted to worker thread, otherwise false.
9486 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba
*phba
, struct lpfc_cqe
*cqe
)
9488 struct lpfc_mcqe mcqe
;
9491 /* Copy the mailbox MCQE and convert endian order as needed */
9492 lpfc_sli_pcimem_bcopy(cqe
, &mcqe
, sizeof(struct lpfc_mcqe
));
9494 /* Invoke the proper event handling routine */
9495 if (!bf_get(lpfc_trailer_async
, &mcqe
))
9496 workposted
= lpfc_sli4_sp_handle_mbox_event(phba
, &mcqe
);
9498 workposted
= lpfc_sli4_sp_handle_async_event(phba
, &mcqe
);
9503 * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
9504 * @phba: Pointer to HBA context object.
9505 * @wcqe: Pointer to work-queue completion queue entry.
9507 * This routine handles an ELS work-queue completion event.
9509 * Return: true if work posted to worker thread, otherwise false.
9512 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba
*phba
,
9513 struct lpfc_wcqe_complete
*wcqe
)
9515 struct lpfc_iocbq
*irspiocbq
;
9516 unsigned long iflags
;
9517 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_FCP_RING
];
9519 /* Get an irspiocbq for later ELS response processing use */
9520 irspiocbq
= lpfc_sli_get_iocbq(phba
);
9522 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9523 "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
9524 "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n",
9525 pring
->txq_cnt
, phba
->iocb_cnt
,
9526 phba
->sli
.ring
[LPFC_FCP_RING
].txcmplq_cnt
,
9527 phba
->sli
.ring
[LPFC_ELS_RING
].txcmplq_cnt
);
9531 /* Save off the slow-path queue event for work thread to process */
9532 memcpy(&irspiocbq
->cq_event
.cqe
.wcqe_cmpl
, wcqe
, sizeof(*wcqe
));
9533 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9534 list_add_tail(&irspiocbq
->cq_event
.list
,
9535 &phba
->sli4_hba
.sp_queue_event
);
9536 phba
->hba_flag
|= HBA_SP_QUEUE_EVT
;
9537 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9543 * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
9544 * @phba: Pointer to HBA context object.
9545 * @wcqe: Pointer to work-queue completion queue entry.
9547 * This routine handles slow-path WQ entry comsumed event by invoking the
9548 * proper WQ release routine to the slow-path WQ.
9551 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba
*phba
,
9552 struct lpfc_wcqe_release
*wcqe
)
9554 /* Check for the slow-path ELS work queue */
9555 if (bf_get(lpfc_wcqe_r_wq_id
, wcqe
) == phba
->sli4_hba
.els_wq
->queue_id
)
9556 lpfc_sli4_wq_release(phba
->sli4_hba
.els_wq
,
9557 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
));
9559 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9560 "2579 Slow-path wqe consume event carries "
9561 "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
9562 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
),
9563 phba
->sli4_hba
.els_wq
->queue_id
);
9567 * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
9568 * @phba: Pointer to HBA context object.
9569 * @cq: Pointer to a WQ completion queue.
9570 * @wcqe: Pointer to work-queue completion queue entry.
9572 * This routine handles an XRI abort event.
9574 * Return: true if work posted to worker thread, otherwise false.
9577 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba
*phba
,
9578 struct lpfc_queue
*cq
,
9579 struct sli4_wcqe_xri_aborted
*wcqe
)
9581 bool workposted
= false;
9582 struct lpfc_cq_event
*cq_event
;
9583 unsigned long iflags
;
9585 /* Allocate a new internal CQ_EVENT entry */
9586 cq_event
= lpfc_sli4_cq_event_alloc(phba
);
9588 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9589 "0602 Failed to allocate CQ_EVENT entry\n");
9593 /* Move the CQE into the proper xri abort event list */
9594 memcpy(&cq_event
->cqe
, wcqe
, sizeof(struct sli4_wcqe_xri_aborted
));
9595 switch (cq
->subtype
) {
9597 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9598 list_add_tail(&cq_event
->list
,
9599 &phba
->sli4_hba
.sp_fcp_xri_aborted_work_queue
);
9600 /* Set the fcp xri abort event flag */
9601 phba
->hba_flag
|= FCP_XRI_ABORT_EVENT
;
9602 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9606 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9607 list_add_tail(&cq_event
->list
,
9608 &phba
->sli4_hba
.sp_els_xri_aborted_work_queue
);
9609 /* Set the els xri abort event flag */
9610 phba
->hba_flag
|= ELS_XRI_ABORT_EVENT
;
9611 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9615 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9616 "0603 Invalid work queue CQE subtype (x%x)\n",
9625 * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
9626 * @phba: Pointer to HBA context object.
9627 * @rcqe: Pointer to receive-queue completion queue entry.
9629 * This routine process a receive-queue completion queue entry.
9631 * Return: true if work posted to worker thread, otherwise false.
9634 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba
*phba
, struct lpfc_rcqe
*rcqe
)
9636 bool workposted
= false;
9637 struct lpfc_queue
*hrq
= phba
->sli4_hba
.hdr_rq
;
9638 struct lpfc_queue
*drq
= phba
->sli4_hba
.dat_rq
;
9639 struct hbq_dmabuf
*dma_buf
;
9641 unsigned long iflags
;
9643 if (bf_get(lpfc_rcqe_rq_id
, rcqe
) != hrq
->queue_id
)
9646 status
= bf_get(lpfc_rcqe_status
, rcqe
);
9648 case FC_STATUS_RQ_BUF_LEN_EXCEEDED
:
9649 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9650 "2537 Receive Frame Truncated!!\n");
9651 case FC_STATUS_RQ_SUCCESS
:
9652 lpfc_sli4_rq_release(hrq
, drq
);
9653 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9654 dma_buf
= lpfc_sli_hbqbuf_get(&phba
->hbqs
[0].hbq_buffer_list
);
9656 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9659 memcpy(&dma_buf
->cq_event
.cqe
.rcqe_cmpl
, rcqe
, sizeof(*rcqe
));
9660 /* save off the frame for the word thread to process */
9661 list_add_tail(&dma_buf
->cq_event
.list
,
9662 &phba
->sli4_hba
.sp_queue_event
);
9663 /* Frame received */
9664 phba
->hba_flag
|= HBA_SP_QUEUE_EVT
;
9665 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9668 case FC_STATUS_INSUFF_BUF_NEED_BUF
:
9669 case FC_STATUS_INSUFF_BUF_FRM_DISC
:
9670 /* Post more buffers if possible */
9671 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9672 phba
->hba_flag
|= HBA_POST_RECEIVE_BUFFER
;
9673 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9682 * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
9683 * @phba: Pointer to HBA context object.
9684 * @cq: Pointer to the completion queue.
9685 * @wcqe: Pointer to a completion queue entry.
9687 * This routine process a slow-path work-queue or recieve queue completion queue
9690 * Return: true if work posted to worker thread, otherwise false.
9693 lpfc_sli4_sp_handle_cqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
9694 struct lpfc_cqe
*cqe
)
9696 struct lpfc_cqe cqevt
;
9697 bool workposted
= false;
9699 /* Copy the work queue CQE and convert endian order if needed */
9700 lpfc_sli_pcimem_bcopy(cqe
, &cqevt
, sizeof(struct lpfc_cqe
));
9702 /* Check and process for different type of WCQE and dispatch */
9703 switch (bf_get(lpfc_cqe_code
, &cqevt
)) {
9704 case CQE_CODE_COMPL_WQE
:
9705 /* Process the WQ/RQ complete event */
9706 phba
->last_completion_time
= jiffies
;
9707 workposted
= lpfc_sli4_sp_handle_els_wcqe(phba
,
9708 (struct lpfc_wcqe_complete
*)&cqevt
);
9710 case CQE_CODE_RELEASE_WQE
:
9711 /* Process the WQ release event */
9712 lpfc_sli4_sp_handle_rel_wcqe(phba
,
9713 (struct lpfc_wcqe_release
*)&cqevt
);
9715 case CQE_CODE_XRI_ABORTED
:
9716 /* Process the WQ XRI abort event */
9717 phba
->last_completion_time
= jiffies
;
9718 workposted
= lpfc_sli4_sp_handle_abort_xri_wcqe(phba
, cq
,
9719 (struct sli4_wcqe_xri_aborted
*)&cqevt
);
9721 case CQE_CODE_RECEIVE
:
9722 /* Process the RQ event */
9723 phba
->last_completion_time
= jiffies
;
9724 workposted
= lpfc_sli4_sp_handle_rcqe(phba
,
9725 (struct lpfc_rcqe
*)&cqevt
);
9728 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9729 "0388 Not a valid WCQE code: x%x\n",
9730 bf_get(lpfc_cqe_code
, &cqevt
));
9737 * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
9738 * @phba: Pointer to HBA context object.
9739 * @eqe: Pointer to fast-path event queue entry.
9741 * This routine process a event queue entry from the slow-path event queue.
9742 * It will check the MajorCode and MinorCode to determine this is for a
9743 * completion event on a completion queue, if not, an error shall be logged
9744 * and just return. Otherwise, it will get to the corresponding completion
9745 * queue and process all the entries on that completion queue, rearm the
9746 * completion queue, and then return.
9750 lpfc_sli4_sp_handle_eqe(struct lpfc_hba
*phba
, struct lpfc_eqe
*eqe
)
9752 struct lpfc_queue
*cq
= NULL
, *childq
, *speq
;
9753 struct lpfc_cqe
*cqe
;
9754 bool workposted
= false;
9758 if (bf_get_le32(lpfc_eqe_major_code
, eqe
) != 0) {
9759 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9760 "0359 Not a valid slow-path completion "
9761 "event: majorcode=x%x, minorcode=x%x\n",
9762 bf_get_le32(lpfc_eqe_major_code
, eqe
),
9763 bf_get_le32(lpfc_eqe_minor_code
, eqe
));
9767 /* Get the reference to the corresponding CQ */
9768 cqid
= bf_get_le32(lpfc_eqe_resource_id
, eqe
);
9770 /* Search for completion queue pointer matching this cqid */
9771 speq
= phba
->sli4_hba
.sp_eq
;
9772 list_for_each_entry(childq
, &speq
->child_list
, list
) {
9773 if (childq
->queue_id
== cqid
) {
9778 if (unlikely(!cq
)) {
9779 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)
9780 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9781 "0365 Slow-path CQ identifier "
9782 "(%d) does not exist\n", cqid
);
9786 /* Process all the entries to the CQ */
9789 while ((cqe
= lpfc_sli4_cq_get(cq
))) {
9790 workposted
|= lpfc_sli4_sp_handle_mcqe(phba
, cqe
);
9791 if (!(++ecount
% LPFC_GET_QE_REL_INT
))
9792 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_NOARM
);
9796 while ((cqe
= lpfc_sli4_cq_get(cq
))) {
9797 workposted
|= lpfc_sli4_sp_handle_cqe(phba
, cq
, cqe
);
9798 if (!(++ecount
% LPFC_GET_QE_REL_INT
))
9799 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_NOARM
);
9803 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9804 "0370 Invalid completion queue type (%d)\n",
9809 /* Catch the no cq entry condition, log an error */
9810 if (unlikely(ecount
== 0))
9811 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9812 "0371 No entry from the CQ: identifier "
9813 "(x%x), type (%d)\n", cq
->queue_id
, cq
->type
);
9815 /* In any case, flash and re-arm the RCQ */
9816 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_REARM
);
9818 /* wake up worker thread if there are works to be done */
9820 lpfc_worker_wake_up(phba
);
9824 * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
9825 * @eqe: Pointer to fast-path completion queue entry.
9827 * This routine process a fast-path work queue completion entry from fast-path
9828 * event queue for FCP command response completion.
9831 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba
*phba
,
9832 struct lpfc_wcqe_complete
*wcqe
)
9834 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_FCP_RING
];
9835 struct lpfc_iocbq
*cmdiocbq
;
9836 struct lpfc_iocbq irspiocbq
;
9837 unsigned long iflags
;
9839 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9840 pring
->stats
.iocb_event
++;
9841 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9843 /* Check for response status */
9844 if (unlikely(bf_get(lpfc_wcqe_c_status
, wcqe
))) {
9845 /* If resource errors reported from HBA, reduce queue
9846 * depth of the SCSI device.
9848 if ((bf_get(lpfc_wcqe_c_status
, wcqe
) ==
9849 IOSTAT_LOCAL_REJECT
) &&
9850 (wcqe
->parameter
== IOERR_NO_RESOURCES
)) {
9851 phba
->lpfc_rampdown_queue_depth(phba
);
9853 /* Log the error status */
9854 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9855 "0373 FCP complete error: status=x%x, "
9856 "hw_status=x%x, total_data_specified=%d, "
9857 "parameter=x%x, word3=x%x\n",
9858 bf_get(lpfc_wcqe_c_status
, wcqe
),
9859 bf_get(lpfc_wcqe_c_hw_status
, wcqe
),
9860 wcqe
->total_data_placed
, wcqe
->parameter
,
9864 /* Look up the FCP command IOCB and create pseudo response IOCB */
9865 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9866 cmdiocbq
= lpfc_sli_iocbq_lookup_by_tag(phba
, pring
,
9867 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
9868 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9869 if (unlikely(!cmdiocbq
)) {
9870 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9871 "0374 FCP complete with no corresponding "
9872 "cmdiocb: iotag (%d)\n",
9873 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
9876 if (unlikely(!cmdiocbq
->iocb_cmpl
)) {
9877 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9878 "0375 FCP cmdiocb not callback function "
9880 bf_get(lpfc_wcqe_c_request_tag
, wcqe
));
9884 /* Fake the irspiocb and copy necessary response information */
9885 lpfc_sli4_iocb_param_transfer(phba
, &irspiocbq
, cmdiocbq
, wcqe
);
9887 if (cmdiocbq
->iocb_flag
& LPFC_DRIVER_ABORTED
) {
9888 spin_lock_irqsave(&phba
->hbalock
, iflags
);
9889 cmdiocbq
->iocb_flag
&= ~LPFC_DRIVER_ABORTED
;
9890 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
9893 /* Pass the cmd_iocb and the rsp state to the upper layer */
9894 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
, &irspiocbq
);
9898 * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
9899 * @phba: Pointer to HBA context object.
9900 * @cq: Pointer to completion queue.
9901 * @wcqe: Pointer to work-queue completion queue entry.
9903 * This routine handles an fast-path WQ entry comsumed event by invoking the
9904 * proper WQ release routine to the slow-path WQ.
9907 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
9908 struct lpfc_wcqe_release
*wcqe
)
9910 struct lpfc_queue
*childwq
;
9911 bool wqid_matched
= false;
9914 /* Check for fast-path FCP work queue release */
9915 fcp_wqid
= bf_get(lpfc_wcqe_r_wq_id
, wcqe
);
9916 list_for_each_entry(childwq
, &cq
->child_list
, list
) {
9917 if (childwq
->queue_id
== fcp_wqid
) {
9918 lpfc_sli4_wq_release(childwq
,
9919 bf_get(lpfc_wcqe_r_wqe_index
, wcqe
));
9920 wqid_matched
= true;
9924 /* Report warning log message if no match found */
9925 if (wqid_matched
!= true)
9926 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
9927 "2580 Fast-path wqe consume event carries "
9928 "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid
);
9932 * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
9933 * @cq: Pointer to the completion queue.
9934 * @eqe: Pointer to fast-path completion queue entry.
9936 * This routine process a fast-path work queue completion entry from fast-path
9937 * event queue for FCP command response completion.
9940 lpfc_sli4_fp_handle_wcqe(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
9941 struct lpfc_cqe
*cqe
)
9943 struct lpfc_wcqe_release wcqe
;
9944 bool workposted
= false;
9946 /* Copy the work queue CQE and convert endian order if needed */
9947 lpfc_sli_pcimem_bcopy(cqe
, &wcqe
, sizeof(struct lpfc_cqe
));
9949 /* Check and process for different type of WCQE and dispatch */
9950 switch (bf_get(lpfc_wcqe_c_code
, &wcqe
)) {
9951 case CQE_CODE_COMPL_WQE
:
9952 /* Process the WQ complete event */
9953 phba
->last_completion_time
= jiffies
;
9954 lpfc_sli4_fp_handle_fcp_wcqe(phba
,
9955 (struct lpfc_wcqe_complete
*)&wcqe
);
9957 case CQE_CODE_RELEASE_WQE
:
9958 /* Process the WQ release event */
9959 lpfc_sli4_fp_handle_rel_wcqe(phba
, cq
,
9960 (struct lpfc_wcqe_release
*)&wcqe
);
9962 case CQE_CODE_XRI_ABORTED
:
9963 /* Process the WQ XRI abort event */
9964 phba
->last_completion_time
= jiffies
;
9965 workposted
= lpfc_sli4_sp_handle_abort_xri_wcqe(phba
, cq
,
9966 (struct sli4_wcqe_xri_aborted
*)&wcqe
);
9969 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
9970 "0144 Not a valid WCQE code: x%x\n",
9971 bf_get(lpfc_wcqe_c_code
, &wcqe
));
9978 * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
9979 * @phba: Pointer to HBA context object.
9980 * @eqe: Pointer to fast-path event queue entry.
9982 * This routine process a event queue entry from the fast-path event queue.
9983 * It will check the MajorCode and MinorCode to determine this is for a
9984 * completion event on a completion queue, if not, an error shall be logged
9985 * and just return. Otherwise, it will get to the corresponding completion
9986 * queue and process all the entries on the completion queue, rearm the
9987 * completion queue, and then return.
9990 lpfc_sli4_fp_handle_eqe(struct lpfc_hba
*phba
, struct lpfc_eqe
*eqe
,
9993 struct lpfc_queue
*cq
;
9994 struct lpfc_cqe
*cqe
;
9995 bool workposted
= false;
9999 if (unlikely(bf_get_le32(lpfc_eqe_major_code
, eqe
) != 0)) {
10000 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10001 "0366 Not a valid fast-path completion "
10002 "event: majorcode=x%x, minorcode=x%x\n",
10003 bf_get_le32(lpfc_eqe_major_code
, eqe
),
10004 bf_get_le32(lpfc_eqe_minor_code
, eqe
));
10008 cq
= phba
->sli4_hba
.fcp_cq
[fcp_cqidx
];
10009 if (unlikely(!cq
)) {
10010 if (phba
->sli
.sli_flag
& LPFC_SLI_ACTIVE
)
10011 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10012 "0367 Fast-path completion queue "
10013 "does not exist\n");
10017 /* Get the reference to the corresponding CQ */
10018 cqid
= bf_get_le32(lpfc_eqe_resource_id
, eqe
);
10019 if (unlikely(cqid
!= cq
->queue_id
)) {
10020 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10021 "0368 Miss-matched fast-path completion "
10022 "queue identifier: eqcqid=%d, fcpcqid=%d\n",
10023 cqid
, cq
->queue_id
);
10027 /* Process all the entries to the CQ */
10028 while ((cqe
= lpfc_sli4_cq_get(cq
))) {
10029 workposted
|= lpfc_sli4_fp_handle_wcqe(phba
, cq
, cqe
);
10030 if (!(++ecount
% LPFC_GET_QE_REL_INT
))
10031 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_NOARM
);
10034 /* Catch the no cq entry condition */
10035 if (unlikely(ecount
== 0))
10036 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10037 "0369 No entry from fast-path completion "
10038 "queue fcpcqid=%d\n", cq
->queue_id
);
10040 /* In any case, flash and re-arm the CQ */
10041 lpfc_sli4_cq_release(cq
, LPFC_QUEUE_REARM
);
10043 /* wake up worker thread if there are works to be done */
10045 lpfc_worker_wake_up(phba
);
10049 lpfc_sli4_eq_flush(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
)
10051 struct lpfc_eqe
*eqe
;
10053 /* walk all the EQ entries and drop on the floor */
10054 while ((eqe
= lpfc_sli4_eq_get(eq
)))
10057 /* Clear and re-arm the EQ */
10058 lpfc_sli4_eq_release(eq
, LPFC_QUEUE_REARM
);
10062 * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
10063 * @irq: Interrupt number.
10064 * @dev_id: The device context pointer.
10066 * This function is directly called from the PCI layer as an interrupt
10067 * service routine when device with SLI-4 interface spec is enabled with
10068 * MSI-X multi-message interrupt mode and there are slow-path events in
10069 * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
10070 * interrupt mode, this function is called as part of the device-level
10071 * interrupt handler. When the PCI slot is in error recovery or the HBA is
10072 * undergoing initialization, the interrupt handler will not process the
10073 * interrupt. The link attention and ELS ring attention events are handled
10074 * by the worker thread. The interrupt handler signals the worker thread
10075 * and returns for these events. This function is called without any lock
10076 * held. It gets the hbalock to access and update SLI data structures.
10078 * This function returns IRQ_HANDLED when interrupt is handled else it
10079 * returns IRQ_NONE.
10082 lpfc_sli4_sp_intr_handler(int irq
, void *dev_id
)
10084 struct lpfc_hba
*phba
;
10085 struct lpfc_queue
*speq
;
10086 struct lpfc_eqe
*eqe
;
10087 unsigned long iflag
;
10091 * Get the driver's phba structure from the dev_id
10093 phba
= (struct lpfc_hba
*)dev_id
;
10095 if (unlikely(!phba
))
10098 /* Get to the EQ struct associated with this vector */
10099 speq
= phba
->sli4_hba
.sp_eq
;
10101 /* Check device state for handling interrupt */
10102 if (unlikely(lpfc_intr_state_check(phba
))) {
10103 /* Check again for link_state with lock held */
10104 spin_lock_irqsave(&phba
->hbalock
, iflag
);
10105 if (phba
->link_state
< LPFC_LINK_DOWN
)
10106 /* Flush, clear interrupt, and rearm the EQ */
10107 lpfc_sli4_eq_flush(phba
, speq
);
10108 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
10113 * Process all the event on FCP slow-path EQ
10115 while ((eqe
= lpfc_sli4_eq_get(speq
))) {
10116 lpfc_sli4_sp_handle_eqe(phba
, eqe
);
10117 if (!(++ecount
% LPFC_GET_QE_REL_INT
))
10118 lpfc_sli4_eq_release(speq
, LPFC_QUEUE_NOARM
);
10121 /* Always clear and re-arm the slow-path EQ */
10122 lpfc_sli4_eq_release(speq
, LPFC_QUEUE_REARM
);
10124 /* Catch the no cq entry condition */
10125 if (unlikely(ecount
== 0)) {
10126 if (phba
->intr_type
== MSIX
)
10127 /* MSI-X treated interrupt served as no EQ share INT */
10128 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
10129 "0357 MSI-X interrupt with no EQE\n");
10131 /* Non MSI-X treated on interrupt as EQ share INT */
10135 return IRQ_HANDLED
;
10136 } /* lpfc_sli4_sp_intr_handler */
10139 * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
10140 * @irq: Interrupt number.
10141 * @dev_id: The device context pointer.
10143 * This function is directly called from the PCI layer as an interrupt
10144 * service routine when device with SLI-4 interface spec is enabled with
10145 * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
10146 * ring event in the HBA. However, when the device is enabled with either
10147 * MSI or Pin-IRQ interrupt mode, this function is called as part of the
10148 * device-level interrupt handler. When the PCI slot is in error recovery
10149 * or the HBA is undergoing initialization, the interrupt handler will not
10150 * process the interrupt. The SCSI FCP fast-path ring event are handled in
10151 * the intrrupt context. This function is called without any lock held.
10152 * It gets the hbalock to access and update SLI data structures. Note that,
10153 * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
10154 * equal to that of FCP CQ index.
10156 * This function returns IRQ_HANDLED when interrupt is handled else it
10157 * returns IRQ_NONE.
10160 lpfc_sli4_fp_intr_handler(int irq
, void *dev_id
)
10162 struct lpfc_hba
*phba
;
10163 struct lpfc_fcp_eq_hdl
*fcp_eq_hdl
;
10164 struct lpfc_queue
*fpeq
;
10165 struct lpfc_eqe
*eqe
;
10166 unsigned long iflag
;
10168 uint32_t fcp_eqidx
;
10170 /* Get the driver's phba structure from the dev_id */
10171 fcp_eq_hdl
= (struct lpfc_fcp_eq_hdl
*)dev_id
;
10172 phba
= fcp_eq_hdl
->phba
;
10173 fcp_eqidx
= fcp_eq_hdl
->idx
;
10175 if (unlikely(!phba
))
10178 /* Get to the EQ struct associated with this vector */
10179 fpeq
= phba
->sli4_hba
.fp_eq
[fcp_eqidx
];
10181 /* Check device state for handling interrupt */
10182 if (unlikely(lpfc_intr_state_check(phba
))) {
10183 /* Check again for link_state with lock held */
10184 spin_lock_irqsave(&phba
->hbalock
, iflag
);
10185 if (phba
->link_state
< LPFC_LINK_DOWN
)
10186 /* Flush, clear interrupt, and rearm the EQ */
10187 lpfc_sli4_eq_flush(phba
, fpeq
);
10188 spin_unlock_irqrestore(&phba
->hbalock
, iflag
);
10193 * Process all the event on FCP fast-path EQ
10195 while ((eqe
= lpfc_sli4_eq_get(fpeq
))) {
10196 lpfc_sli4_fp_handle_eqe(phba
, eqe
, fcp_eqidx
);
10197 if (!(++ecount
% LPFC_GET_QE_REL_INT
))
10198 lpfc_sli4_eq_release(fpeq
, LPFC_QUEUE_NOARM
);
10201 /* Always clear and re-arm the fast-path EQ */
10202 lpfc_sli4_eq_release(fpeq
, LPFC_QUEUE_REARM
);
10204 if (unlikely(ecount
== 0)) {
10205 if (phba
->intr_type
== MSIX
)
10206 /* MSI-X treated interrupt served as no EQ share INT */
10207 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
10208 "0358 MSI-X interrupt with no EQE\n");
10210 /* Non MSI-X treated on interrupt as EQ share INT */
10214 return IRQ_HANDLED
;
10215 } /* lpfc_sli4_fp_intr_handler */
10218 * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
10219 * @irq: Interrupt number.
10220 * @dev_id: The device context pointer.
10222 * This function is the device-level interrupt handler to device with SLI-4
10223 * interface spec, called from the PCI layer when either MSI or Pin-IRQ
10224 * interrupt mode is enabled and there is an event in the HBA which requires
10225 * driver attention. This function invokes the slow-path interrupt attention
10226 * handling function and fast-path interrupt attention handling function in
10227 * turn to process the relevant HBA attention events. This function is called
10228 * without any lock held. It gets the hbalock to access and update SLI data
10231 * This function returns IRQ_HANDLED when interrupt is handled, else it
10232 * returns IRQ_NONE.
10235 lpfc_sli4_intr_handler(int irq
, void *dev_id
)
10237 struct lpfc_hba
*phba
;
10238 irqreturn_t sp_irq_rc
, fp_irq_rc
;
10239 bool fp_handled
= false;
10240 uint32_t fcp_eqidx
;
10242 /* Get the driver's phba structure from the dev_id */
10243 phba
= (struct lpfc_hba
*)dev_id
;
10245 if (unlikely(!phba
))
10249 * Invokes slow-path host attention interrupt handling as appropriate.
10251 sp_irq_rc
= lpfc_sli4_sp_intr_handler(irq
, dev_id
);
10254 * Invoke fast-path host attention interrupt handling as appropriate.
10256 for (fcp_eqidx
= 0; fcp_eqidx
< phba
->cfg_fcp_eq_count
; fcp_eqidx
++) {
10257 fp_irq_rc
= lpfc_sli4_fp_intr_handler(irq
,
10258 &phba
->sli4_hba
.fcp_eq_hdl
[fcp_eqidx
]);
10259 if (fp_irq_rc
== IRQ_HANDLED
)
10260 fp_handled
|= true;
10263 return (fp_handled
== true) ? IRQ_HANDLED
: sp_irq_rc
;
10264 } /* lpfc_sli4_intr_handler */
10267 * lpfc_sli4_queue_free - free a queue structure and associated memory
10268 * @queue: The queue structure to free.
10270 * This function frees a queue structure and the DMAable memory used for
10271 * the host resident queue. This function must be called after destroying the
10272 * queue on the HBA.
10275 lpfc_sli4_queue_free(struct lpfc_queue
*queue
)
10277 struct lpfc_dmabuf
*dmabuf
;
10282 while (!list_empty(&queue
->page_list
)) {
10283 list_remove_head(&queue
->page_list
, dmabuf
, struct lpfc_dmabuf
,
10285 dma_free_coherent(&queue
->phba
->pcidev
->dev
, SLI4_PAGE_SIZE
,
10286 dmabuf
->virt
, dmabuf
->phys
);
10294 * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
10295 * @phba: The HBA that this queue is being created on.
10296 * @entry_size: The size of each queue entry for this queue.
10297 * @entry count: The number of entries that this queue will handle.
10299 * This function allocates a queue structure and the DMAable memory used for
10300 * the host resident queue. This function must be called before creating the
10301 * queue on the HBA.
10303 struct lpfc_queue
*
10304 lpfc_sli4_queue_alloc(struct lpfc_hba
*phba
, uint32_t entry_size
,
10305 uint32_t entry_count
)
10307 struct lpfc_queue
*queue
;
10308 struct lpfc_dmabuf
*dmabuf
;
10309 int x
, total_qe_count
;
10311 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
10313 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
10314 hw_page_size
= SLI4_PAGE_SIZE
;
10316 queue
= kzalloc(sizeof(struct lpfc_queue
) +
10317 (sizeof(union sli4_qe
) * entry_count
), GFP_KERNEL
);
10320 queue
->page_count
= (ALIGN(entry_size
* entry_count
,
10321 hw_page_size
))/hw_page_size
;
10322 INIT_LIST_HEAD(&queue
->list
);
10323 INIT_LIST_HEAD(&queue
->page_list
);
10324 INIT_LIST_HEAD(&queue
->child_list
);
10325 for (x
= 0, total_qe_count
= 0; x
< queue
->page_count
; x
++) {
10326 dmabuf
= kzalloc(sizeof(struct lpfc_dmabuf
), GFP_KERNEL
);
10329 dmabuf
->virt
= dma_alloc_coherent(&phba
->pcidev
->dev
,
10330 hw_page_size
, &dmabuf
->phys
,
10332 if (!dmabuf
->virt
) {
10336 memset(dmabuf
->virt
, 0, hw_page_size
);
10337 dmabuf
->buffer_tag
= x
;
10338 list_add_tail(&dmabuf
->list
, &queue
->page_list
);
10339 /* initialize queue's entry array */
10340 dma_pointer
= dmabuf
->virt
;
10341 for (; total_qe_count
< entry_count
&&
10342 dma_pointer
< (hw_page_size
+ dmabuf
->virt
);
10343 total_qe_count
++, dma_pointer
+= entry_size
) {
10344 queue
->qe
[total_qe_count
].address
= dma_pointer
;
10347 queue
->entry_size
= entry_size
;
10348 queue
->entry_count
= entry_count
;
10349 queue
->phba
= phba
;
10353 lpfc_sli4_queue_free(queue
);
10358 * lpfc_eq_create - Create an Event Queue on the HBA
10359 * @phba: HBA structure that indicates port to create a queue on.
10360 * @eq: The queue structure to use to create the event queue.
10361 * @imax: The maximum interrupt per second limit.
10363 * This function creates an event queue, as detailed in @eq, on a port,
10364 * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
10366 * The @phba struct is used to send mailbox command to HBA. The @eq struct
10367 * is used to get the entry count and entry size that are necessary to
10368 * determine the number of pages to allocate and use for this queue. This
10369 * function will send the EQ_CREATE mailbox command to the HBA to setup the
10370 * event queue. This function is asynchronous and will wait for the mailbox
10371 * command to finish before continuing.
10373 * On success this function will return a zero. If unable to allocate enough
10374 * memory this function will return -ENOMEM. If the queue create mailbox command
10375 * fails this function will return -ENXIO.
10378 lpfc_eq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
, uint16_t imax
)
10380 struct lpfc_mbx_eq_create
*eq_create
;
10381 LPFC_MBOXQ_t
*mbox
;
10382 int rc
, length
, status
= 0;
10383 struct lpfc_dmabuf
*dmabuf
;
10384 uint32_t shdr_status
, shdr_add_status
;
10385 union lpfc_sli4_cfg_shdr
*shdr
;
10387 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
10389 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
10390 hw_page_size
= SLI4_PAGE_SIZE
;
10392 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
10395 length
= (sizeof(struct lpfc_mbx_eq_create
) -
10396 sizeof(struct lpfc_sli4_cfg_mhdr
));
10397 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
10398 LPFC_MBOX_OPCODE_EQ_CREATE
,
10399 length
, LPFC_SLI4_MBX_EMBED
);
10400 eq_create
= &mbox
->u
.mqe
.un
.eq_create
;
10401 bf_set(lpfc_mbx_eq_create_num_pages
, &eq_create
->u
.request
,
10403 bf_set(lpfc_eq_context_size
, &eq_create
->u
.request
.context
,
10405 bf_set(lpfc_eq_context_valid
, &eq_create
->u
.request
.context
, 1);
10406 /* Calculate delay multiper from maximum interrupt per second */
10407 dmult
= LPFC_DMULT_CONST
/imax
- 1;
10408 bf_set(lpfc_eq_context_delay_multi
, &eq_create
->u
.request
.context
,
10410 switch (eq
->entry_count
) {
10412 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10413 "0360 Unsupported EQ count. (%d)\n",
10415 if (eq
->entry_count
< 256)
10417 /* otherwise default to smallest count (drop through) */
10419 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
10423 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
10427 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
10431 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
10435 bf_set(lpfc_eq_context_count
, &eq_create
->u
.request
.context
,
10439 list_for_each_entry(dmabuf
, &eq
->page_list
, list
) {
10440 memset(dmabuf
->virt
, 0, hw_page_size
);
10441 eq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
10442 putPaddrLow(dmabuf
->phys
);
10443 eq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
10444 putPaddrHigh(dmabuf
->phys
);
10446 mbox
->vport
= phba
->pport
;
10447 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
10448 mbox
->context1
= NULL
;
10449 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
10450 shdr
= (union lpfc_sli4_cfg_shdr
*) &eq_create
->header
.cfg_shdr
;
10451 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10452 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
10453 if (shdr_status
|| shdr_add_status
|| rc
) {
10454 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10455 "2500 EQ_CREATE mailbox failed with "
10456 "status x%x add_status x%x, mbx status x%x\n",
10457 shdr_status
, shdr_add_status
, rc
);
10460 eq
->type
= LPFC_EQ
;
10461 eq
->subtype
= LPFC_NONE
;
10462 eq
->queue_id
= bf_get(lpfc_mbx_eq_create_q_id
, &eq_create
->u
.response
);
10463 if (eq
->queue_id
== 0xFFFF)
10465 eq
->host_index
= 0;
10468 mempool_free(mbox
, phba
->mbox_mem_pool
);
10473 * lpfc_cq_create - Create a Completion Queue on the HBA
10474 * @phba: HBA structure that indicates port to create a queue on.
10475 * @cq: The queue structure to use to create the completion queue.
10476 * @eq: The event queue to bind this completion queue to.
10478 * This function creates a completion queue, as detailed in @wq, on a port,
10479 * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
10481 * The @phba struct is used to send mailbox command to HBA. The @cq struct
10482 * is used to get the entry count and entry size that are necessary to
10483 * determine the number of pages to allocate and use for this queue. The @eq
10484 * is used to indicate which event queue to bind this completion queue to. This
10485 * function will send the CQ_CREATE mailbox command to the HBA to setup the
10486 * completion queue. This function is asynchronous and will wait for the mailbox
10487 * command to finish before continuing.
10489 * On success this function will return a zero. If unable to allocate enough
10490 * memory this function will return -ENOMEM. If the queue create mailbox command
10491 * fails this function will return -ENXIO.
10494 lpfc_cq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
,
10495 struct lpfc_queue
*eq
, uint32_t type
, uint32_t subtype
)
10497 struct lpfc_mbx_cq_create
*cq_create
;
10498 struct lpfc_dmabuf
*dmabuf
;
10499 LPFC_MBOXQ_t
*mbox
;
10500 int rc
, length
, status
= 0;
10501 uint32_t shdr_status
, shdr_add_status
;
10502 union lpfc_sli4_cfg_shdr
*shdr
;
10503 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
10505 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
10506 hw_page_size
= SLI4_PAGE_SIZE
;
10508 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
10511 length
= (sizeof(struct lpfc_mbx_cq_create
) -
10512 sizeof(struct lpfc_sli4_cfg_mhdr
));
10513 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
10514 LPFC_MBOX_OPCODE_CQ_CREATE
,
10515 length
, LPFC_SLI4_MBX_EMBED
);
10516 cq_create
= &mbox
->u
.mqe
.un
.cq_create
;
10517 shdr
= (union lpfc_sli4_cfg_shdr
*) &cq_create
->header
.cfg_shdr
;
10518 bf_set(lpfc_mbx_cq_create_num_pages
, &cq_create
->u
.request
,
10520 bf_set(lpfc_cq_context_event
, &cq_create
->u
.request
.context
, 1);
10521 bf_set(lpfc_cq_context_valid
, &cq_create
->u
.request
.context
, 1);
10522 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
10523 phba
->sli4_hba
.pc_sli4_params
.cqv
);
10524 if (phba
->sli4_hba
.pc_sli4_params
.cqv
== LPFC_Q_CREATE_VERSION_2
) {
10525 bf_set(lpfc_mbx_cq_create_page_size
, &cq_create
->u
.request
,
10526 (PAGE_SIZE
/SLI4_PAGE_SIZE
));
10527 bf_set(lpfc_cq_eq_id_2
, &cq_create
->u
.request
.context
,
10530 bf_set(lpfc_cq_eq_id
, &cq_create
->u
.request
.context
,
10533 switch (cq
->entry_count
) {
10535 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10536 "0361 Unsupported CQ count. (%d)\n",
10538 if (cq
->entry_count
< 256)
10540 /* otherwise default to smallest count (drop through) */
10542 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
10546 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
10550 bf_set(lpfc_cq_context_count
, &cq_create
->u
.request
.context
,
10554 list_for_each_entry(dmabuf
, &cq
->page_list
, list
) {
10555 memset(dmabuf
->virt
, 0, hw_page_size
);
10556 cq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
10557 putPaddrLow(dmabuf
->phys
);
10558 cq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
10559 putPaddrHigh(dmabuf
->phys
);
10561 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
10563 /* The IOCTL status is embedded in the mailbox subheader. */
10564 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10565 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
10566 if (shdr_status
|| shdr_add_status
|| rc
) {
10567 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10568 "2501 CQ_CREATE mailbox failed with "
10569 "status x%x add_status x%x, mbx status x%x\n",
10570 shdr_status
, shdr_add_status
, rc
);
10574 cq
->queue_id
= bf_get(lpfc_mbx_cq_create_q_id
, &cq_create
->u
.response
);
10575 if (cq
->queue_id
== 0xFFFF) {
10579 /* link the cq onto the parent eq child list */
10580 list_add_tail(&cq
->list
, &eq
->child_list
);
10581 /* Set up completion queue's type and subtype */
10583 cq
->subtype
= subtype
;
10584 cq
->queue_id
= bf_get(lpfc_mbx_cq_create_q_id
, &cq_create
->u
.response
);
10585 cq
->assoc_qid
= eq
->queue_id
;
10586 cq
->host_index
= 0;
10590 mempool_free(mbox
, phba
->mbox_mem_pool
);
10595 * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
10596 * @phba: HBA structure that indicates port to create a queue on.
10597 * @mq: The queue structure to use to create the mailbox queue.
10598 * @mbox: An allocated pointer to type LPFC_MBOXQ_t
10599 * @cq: The completion queue to associate with this cq.
10601 * This function provides failback (fb) functionality when the
10602 * mq_create_ext fails on older FW generations. It's purpose is identical
10603 * to mq_create_ext otherwise.
10605 * This routine cannot fail as all attributes were previously accessed and
10606 * initialized in mq_create_ext.
10609 lpfc_mq_create_fb_init(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
,
10610 LPFC_MBOXQ_t
*mbox
, struct lpfc_queue
*cq
)
10612 struct lpfc_mbx_mq_create
*mq_create
;
10613 struct lpfc_dmabuf
*dmabuf
;
10616 length
= (sizeof(struct lpfc_mbx_mq_create
) -
10617 sizeof(struct lpfc_sli4_cfg_mhdr
));
10618 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
10619 LPFC_MBOX_OPCODE_MQ_CREATE
,
10620 length
, LPFC_SLI4_MBX_EMBED
);
10621 mq_create
= &mbox
->u
.mqe
.un
.mq_create
;
10622 bf_set(lpfc_mbx_mq_create_num_pages
, &mq_create
->u
.request
,
10624 bf_set(lpfc_mq_context_cq_id
, &mq_create
->u
.request
.context
,
10626 bf_set(lpfc_mq_context_valid
, &mq_create
->u
.request
.context
, 1);
10627 switch (mq
->entry_count
) {
10629 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
10630 LPFC_MQ_RING_SIZE_16
);
10633 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
10634 LPFC_MQ_RING_SIZE_32
);
10637 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
10638 LPFC_MQ_RING_SIZE_64
);
10641 bf_set(lpfc_mq_context_ring_size
, &mq_create
->u
.request
.context
,
10642 LPFC_MQ_RING_SIZE_128
);
10645 list_for_each_entry(dmabuf
, &mq
->page_list
, list
) {
10646 mq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
10647 putPaddrLow(dmabuf
->phys
);
10648 mq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
10649 putPaddrHigh(dmabuf
->phys
);
10654 * lpfc_mq_create - Create a mailbox Queue on the HBA
10655 * @phba: HBA structure that indicates port to create a queue on.
10656 * @mq: The queue structure to use to create the mailbox queue.
10657 * @cq: The completion queue to associate with this cq.
10658 * @subtype: The queue's subtype.
10660 * This function creates a mailbox queue, as detailed in @mq, on a port,
10661 * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
10663 * The @phba struct is used to send mailbox command to HBA. The @cq struct
10664 * is used to get the entry count and entry size that are necessary to
10665 * determine the number of pages to allocate and use for this queue. This
10666 * function will send the MQ_CREATE mailbox command to the HBA to setup the
10667 * mailbox queue. This function is asynchronous and will wait for the mailbox
10668 * command to finish before continuing.
10670 * On success this function will return a zero. If unable to allocate enough
10671 * memory this function will return -ENOMEM. If the queue create mailbox command
10672 * fails this function will return -ENXIO.
10675 lpfc_mq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
,
10676 struct lpfc_queue
*cq
, uint32_t subtype
)
10678 struct lpfc_mbx_mq_create
*mq_create
;
10679 struct lpfc_mbx_mq_create_ext
*mq_create_ext
;
10680 struct lpfc_dmabuf
*dmabuf
;
10681 LPFC_MBOXQ_t
*mbox
;
10682 int rc
, length
, status
= 0;
10683 uint32_t shdr_status
, shdr_add_status
;
10684 union lpfc_sli4_cfg_shdr
*shdr
;
10685 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
10687 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
10688 hw_page_size
= SLI4_PAGE_SIZE
;
10690 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
10693 length
= (sizeof(struct lpfc_mbx_mq_create_ext
) -
10694 sizeof(struct lpfc_sli4_cfg_mhdr
));
10695 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
10696 LPFC_MBOX_OPCODE_MQ_CREATE_EXT
,
10697 length
, LPFC_SLI4_MBX_EMBED
);
10699 mq_create_ext
= &mbox
->u
.mqe
.un
.mq_create_ext
;
10700 shdr
= (union lpfc_sli4_cfg_shdr
*) &mq_create_ext
->header
.cfg_shdr
;
10701 bf_set(lpfc_mbx_mq_create_ext_num_pages
,
10702 &mq_create_ext
->u
.request
, mq
->page_count
);
10703 bf_set(lpfc_mbx_mq_create_ext_async_evt_link
,
10704 &mq_create_ext
->u
.request
, 1);
10705 bf_set(lpfc_mbx_mq_create_ext_async_evt_fip
,
10706 &mq_create_ext
->u
.request
, 1);
10707 bf_set(lpfc_mbx_mq_create_ext_async_evt_group5
,
10708 &mq_create_ext
->u
.request
, 1);
10709 bf_set(lpfc_mbx_mq_create_ext_async_evt_fc
,
10710 &mq_create_ext
->u
.request
, 1);
10711 bf_set(lpfc_mbx_mq_create_ext_async_evt_sli
,
10712 &mq_create_ext
->u
.request
, 1);
10713 bf_set(lpfc_mq_context_valid
, &mq_create_ext
->u
.request
.context
, 1);
10714 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
10715 phba
->sli4_hba
.pc_sli4_params
.mqv
);
10716 if (phba
->sli4_hba
.pc_sli4_params
.mqv
== LPFC_Q_CREATE_VERSION_1
)
10717 bf_set(lpfc_mbx_mq_create_ext_cq_id
, &mq_create_ext
->u
.request
,
10720 bf_set(lpfc_mq_context_cq_id
, &mq_create_ext
->u
.request
.context
,
10722 switch (mq
->entry_count
) {
10724 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10725 "0362 Unsupported MQ count. (%d)\n",
10727 if (mq
->entry_count
< 16)
10729 /* otherwise default to smallest count (drop through) */
10731 bf_set(lpfc_mq_context_ring_size
,
10732 &mq_create_ext
->u
.request
.context
,
10733 LPFC_MQ_RING_SIZE_16
);
10736 bf_set(lpfc_mq_context_ring_size
,
10737 &mq_create_ext
->u
.request
.context
,
10738 LPFC_MQ_RING_SIZE_32
);
10741 bf_set(lpfc_mq_context_ring_size
,
10742 &mq_create_ext
->u
.request
.context
,
10743 LPFC_MQ_RING_SIZE_64
);
10746 bf_set(lpfc_mq_context_ring_size
,
10747 &mq_create_ext
->u
.request
.context
,
10748 LPFC_MQ_RING_SIZE_128
);
10751 list_for_each_entry(dmabuf
, &mq
->page_list
, list
) {
10752 memset(dmabuf
->virt
, 0, hw_page_size
);
10753 mq_create_ext
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
10754 putPaddrLow(dmabuf
->phys
);
10755 mq_create_ext
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
10756 putPaddrHigh(dmabuf
->phys
);
10758 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
10759 mq
->queue_id
= bf_get(lpfc_mbx_mq_create_q_id
,
10760 &mq_create_ext
->u
.response
);
10761 if (rc
!= MBX_SUCCESS
) {
10762 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
10763 "2795 MQ_CREATE_EXT failed with "
10764 "status x%x. Failback to MQ_CREATE.\n",
10766 lpfc_mq_create_fb_init(phba
, mq
, mbox
, cq
);
10767 mq_create
= &mbox
->u
.mqe
.un
.mq_create
;
10768 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
10769 shdr
= (union lpfc_sli4_cfg_shdr
*) &mq_create
->header
.cfg_shdr
;
10770 mq
->queue_id
= bf_get(lpfc_mbx_mq_create_q_id
,
10771 &mq_create
->u
.response
);
10774 /* The IOCTL status is embedded in the mailbox subheader. */
10775 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10776 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
10777 if (shdr_status
|| shdr_add_status
|| rc
) {
10778 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10779 "2502 MQ_CREATE mailbox failed with "
10780 "status x%x add_status x%x, mbx status x%x\n",
10781 shdr_status
, shdr_add_status
, rc
);
10785 if (mq
->queue_id
== 0xFFFF) {
10789 mq
->type
= LPFC_MQ
;
10790 mq
->assoc_qid
= cq
->queue_id
;
10791 mq
->subtype
= subtype
;
10792 mq
->host_index
= 0;
10795 /* link the mq onto the parent cq child list */
10796 list_add_tail(&mq
->list
, &cq
->child_list
);
10798 mempool_free(mbox
, phba
->mbox_mem_pool
);
10803 * lpfc_wq_create - Create a Work Queue on the HBA
10804 * @phba: HBA structure that indicates port to create a queue on.
10805 * @wq: The queue structure to use to create the work queue.
10806 * @cq: The completion queue to bind this work queue to.
10807 * @subtype: The subtype of the work queue indicating its functionality.
10809 * This function creates a work queue, as detailed in @wq, on a port, described
10810 * by @phba by sending a WQ_CREATE mailbox command to the HBA.
10812 * The @phba struct is used to send mailbox command to HBA. The @wq struct
10813 * is used to get the entry count and entry size that are necessary to
10814 * determine the number of pages to allocate and use for this queue. The @cq
10815 * is used to indicate which completion queue to bind this work queue to. This
10816 * function will send the WQ_CREATE mailbox command to the HBA to setup the
10817 * work queue. This function is asynchronous and will wait for the mailbox
10818 * command to finish before continuing.
10820 * On success this function will return a zero. If unable to allocate enough
10821 * memory this function will return -ENOMEM. If the queue create mailbox command
10822 * fails this function will return -ENXIO.
10825 lpfc_wq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*wq
,
10826 struct lpfc_queue
*cq
, uint32_t subtype
)
10828 struct lpfc_mbx_wq_create
*wq_create
;
10829 struct lpfc_dmabuf
*dmabuf
;
10830 LPFC_MBOXQ_t
*mbox
;
10831 int rc
, length
, status
= 0;
10832 uint32_t shdr_status
, shdr_add_status
;
10833 union lpfc_sli4_cfg_shdr
*shdr
;
10834 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
10835 struct dma_address
*page
;
10837 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
10838 hw_page_size
= SLI4_PAGE_SIZE
;
10840 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
10843 length
= (sizeof(struct lpfc_mbx_wq_create
) -
10844 sizeof(struct lpfc_sli4_cfg_mhdr
));
10845 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
10846 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE
,
10847 length
, LPFC_SLI4_MBX_EMBED
);
10848 wq_create
= &mbox
->u
.mqe
.un
.wq_create
;
10849 shdr
= (union lpfc_sli4_cfg_shdr
*) &wq_create
->header
.cfg_shdr
;
10850 bf_set(lpfc_mbx_wq_create_num_pages
, &wq_create
->u
.request
,
10852 bf_set(lpfc_mbx_wq_create_cq_id
, &wq_create
->u
.request
,
10854 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
10855 phba
->sli4_hba
.pc_sli4_params
.wqv
);
10856 if (phba
->sli4_hba
.pc_sli4_params
.wqv
== LPFC_Q_CREATE_VERSION_1
) {
10857 bf_set(lpfc_mbx_wq_create_wqe_count
, &wq_create
->u
.request_1
,
10859 switch (wq
->entry_size
) {
10862 bf_set(lpfc_mbx_wq_create_wqe_size
,
10863 &wq_create
->u
.request_1
,
10864 LPFC_WQ_WQE_SIZE_64
);
10867 bf_set(lpfc_mbx_wq_create_wqe_size
,
10868 &wq_create
->u
.request_1
,
10869 LPFC_WQ_WQE_SIZE_128
);
10872 bf_set(lpfc_mbx_wq_create_page_size
, &wq_create
->u
.request_1
,
10873 (PAGE_SIZE
/SLI4_PAGE_SIZE
));
10874 page
= wq_create
->u
.request_1
.page
;
10876 page
= wq_create
->u
.request
.page
;
10878 list_for_each_entry(dmabuf
, &wq
->page_list
, list
) {
10879 memset(dmabuf
->virt
, 0, hw_page_size
);
10880 page
[dmabuf
->buffer_tag
].addr_lo
= putPaddrLow(dmabuf
->phys
);
10881 page
[dmabuf
->buffer_tag
].addr_hi
= putPaddrHigh(dmabuf
->phys
);
10883 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
10884 /* The IOCTL status is embedded in the mailbox subheader. */
10885 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
10886 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
10887 if (shdr_status
|| shdr_add_status
|| rc
) {
10888 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
10889 "2503 WQ_CREATE mailbox failed with "
10890 "status x%x add_status x%x, mbx status x%x\n",
10891 shdr_status
, shdr_add_status
, rc
);
10895 wq
->queue_id
= bf_get(lpfc_mbx_wq_create_q_id
, &wq_create
->u
.response
);
10896 if (wq
->queue_id
== 0xFFFF) {
10900 wq
->type
= LPFC_WQ
;
10901 wq
->assoc_qid
= cq
->queue_id
;
10902 wq
->subtype
= subtype
;
10903 wq
->host_index
= 0;
10906 /* link the wq onto the parent cq child list */
10907 list_add_tail(&wq
->list
, &cq
->child_list
);
10909 mempool_free(mbox
, phba
->mbox_mem_pool
);
10914 * lpfc_rq_create - Create a Receive Queue on the HBA
10915 * @phba: HBA structure that indicates port to create a queue on.
10916 * @hrq: The queue structure to use to create the header receive queue.
10917 * @drq: The queue structure to use to create the data receive queue.
10918 * @cq: The completion queue to bind this work queue to.
10920 * This function creates a receive buffer queue pair , as detailed in @hrq and
10921 * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
10924 * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
10925 * struct is used to get the entry count that is necessary to determine the
10926 * number of pages to use for this queue. The @cq is used to indicate which
10927 * completion queue to bind received buffers that are posted to these queues to.
10928 * This function will send the RQ_CREATE mailbox command to the HBA to setup the
10929 * receive queue pair. This function is asynchronous and will wait for the
10930 * mailbox command to finish before continuing.
10932 * On success this function will return a zero. If unable to allocate enough
10933 * memory this function will return -ENOMEM. If the queue create mailbox command
10934 * fails this function will return -ENXIO.
10937 lpfc_rq_create(struct lpfc_hba
*phba
, struct lpfc_queue
*hrq
,
10938 struct lpfc_queue
*drq
, struct lpfc_queue
*cq
, uint32_t subtype
)
10940 struct lpfc_mbx_rq_create
*rq_create
;
10941 struct lpfc_dmabuf
*dmabuf
;
10942 LPFC_MBOXQ_t
*mbox
;
10943 int rc
, length
, status
= 0;
10944 uint32_t shdr_status
, shdr_add_status
;
10945 union lpfc_sli4_cfg_shdr
*shdr
;
10946 uint32_t hw_page_size
= phba
->sli4_hba
.pc_sli4_params
.if_page_sz
;
10948 if (!phba
->sli4_hba
.pc_sli4_params
.supported
)
10949 hw_page_size
= SLI4_PAGE_SIZE
;
10951 if (hrq
->entry_count
!= drq
->entry_count
)
10953 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
10956 length
= (sizeof(struct lpfc_mbx_rq_create
) -
10957 sizeof(struct lpfc_sli4_cfg_mhdr
));
10958 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
10959 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE
,
10960 length
, LPFC_SLI4_MBX_EMBED
);
10961 rq_create
= &mbox
->u
.mqe
.un
.rq_create
;
10962 shdr
= (union lpfc_sli4_cfg_shdr
*) &rq_create
->header
.cfg_shdr
;
10963 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
10964 phba
->sli4_hba
.pc_sli4_params
.rqv
);
10965 if (phba
->sli4_hba
.pc_sli4_params
.rqv
== LPFC_Q_CREATE_VERSION_1
) {
10966 bf_set(lpfc_rq_context_rqe_count_1
,
10967 &rq_create
->u
.request
.context
,
10969 rq_create
->u
.request
.context
.buffer_size
= LPFC_HDR_BUF_SIZE
;
10971 switch (hrq
->entry_count
) {
10973 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
10974 "2535 Unsupported RQ count. (%d)\n",
10976 if (hrq
->entry_count
< 512)
10978 /* otherwise default to smallest count (drop through) */
10980 bf_set(lpfc_rq_context_rqe_count
,
10981 &rq_create
->u
.request
.context
,
10982 LPFC_RQ_RING_SIZE_512
);
10985 bf_set(lpfc_rq_context_rqe_count
,
10986 &rq_create
->u
.request
.context
,
10987 LPFC_RQ_RING_SIZE_1024
);
10990 bf_set(lpfc_rq_context_rqe_count
,
10991 &rq_create
->u
.request
.context
,
10992 LPFC_RQ_RING_SIZE_2048
);
10995 bf_set(lpfc_rq_context_rqe_count
,
10996 &rq_create
->u
.request
.context
,
10997 LPFC_RQ_RING_SIZE_4096
);
11000 bf_set(lpfc_rq_context_buf_size
, &rq_create
->u
.request
.context
,
11001 LPFC_HDR_BUF_SIZE
);
11003 bf_set(lpfc_rq_context_cq_id
, &rq_create
->u
.request
.context
,
11005 bf_set(lpfc_mbx_rq_create_num_pages
, &rq_create
->u
.request
,
11007 list_for_each_entry(dmabuf
, &hrq
->page_list
, list
) {
11008 memset(dmabuf
->virt
, 0, hw_page_size
);
11009 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
11010 putPaddrLow(dmabuf
->phys
);
11011 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
11012 putPaddrHigh(dmabuf
->phys
);
11014 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
11015 /* The IOCTL status is embedded in the mailbox subheader. */
11016 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
11017 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
11018 if (shdr_status
|| shdr_add_status
|| rc
) {
11019 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11020 "2504 RQ_CREATE mailbox failed with "
11021 "status x%x add_status x%x, mbx status x%x\n",
11022 shdr_status
, shdr_add_status
, rc
);
11026 hrq
->queue_id
= bf_get(lpfc_mbx_rq_create_q_id
, &rq_create
->u
.response
);
11027 if (hrq
->queue_id
== 0xFFFF) {
11031 hrq
->type
= LPFC_HRQ
;
11032 hrq
->assoc_qid
= cq
->queue_id
;
11033 hrq
->subtype
= subtype
;
11034 hrq
->host_index
= 0;
11035 hrq
->hba_index
= 0;
11037 /* now create the data queue */
11038 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
11039 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE
,
11040 length
, LPFC_SLI4_MBX_EMBED
);
11041 bf_set(lpfc_mbox_hdr_version
, &shdr
->request
,
11042 phba
->sli4_hba
.pc_sli4_params
.rqv
);
11043 if (phba
->sli4_hba
.pc_sli4_params
.rqv
== LPFC_Q_CREATE_VERSION_1
) {
11044 bf_set(lpfc_rq_context_rqe_count_1
,
11045 &rq_create
->u
.request
.context
,
11047 rq_create
->u
.request
.context
.buffer_size
= LPFC_DATA_BUF_SIZE
;
11049 switch (drq
->entry_count
) {
11051 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11052 "2536 Unsupported RQ count. (%d)\n",
11054 if (drq
->entry_count
< 512)
11056 /* otherwise default to smallest count (drop through) */
11058 bf_set(lpfc_rq_context_rqe_count
,
11059 &rq_create
->u
.request
.context
,
11060 LPFC_RQ_RING_SIZE_512
);
11063 bf_set(lpfc_rq_context_rqe_count
,
11064 &rq_create
->u
.request
.context
,
11065 LPFC_RQ_RING_SIZE_1024
);
11068 bf_set(lpfc_rq_context_rqe_count
,
11069 &rq_create
->u
.request
.context
,
11070 LPFC_RQ_RING_SIZE_2048
);
11073 bf_set(lpfc_rq_context_rqe_count
,
11074 &rq_create
->u
.request
.context
,
11075 LPFC_RQ_RING_SIZE_4096
);
11078 bf_set(lpfc_rq_context_buf_size
, &rq_create
->u
.request
.context
,
11079 LPFC_DATA_BUF_SIZE
);
11081 bf_set(lpfc_rq_context_cq_id
, &rq_create
->u
.request
.context
,
11083 bf_set(lpfc_mbx_rq_create_num_pages
, &rq_create
->u
.request
,
11085 list_for_each_entry(dmabuf
, &drq
->page_list
, list
) {
11086 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_lo
=
11087 putPaddrLow(dmabuf
->phys
);
11088 rq_create
->u
.request
.page
[dmabuf
->buffer_tag
].addr_hi
=
11089 putPaddrHigh(dmabuf
->phys
);
11091 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
11092 /* The IOCTL status is embedded in the mailbox subheader. */
11093 shdr
= (union lpfc_sli4_cfg_shdr
*) &rq_create
->header
.cfg_shdr
;
11094 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
11095 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
11096 if (shdr_status
|| shdr_add_status
|| rc
) {
11100 drq
->queue_id
= bf_get(lpfc_mbx_rq_create_q_id
, &rq_create
->u
.response
);
11101 if (drq
->queue_id
== 0xFFFF) {
11105 drq
->type
= LPFC_DRQ
;
11106 drq
->assoc_qid
= cq
->queue_id
;
11107 drq
->subtype
= subtype
;
11108 drq
->host_index
= 0;
11109 drq
->hba_index
= 0;
11111 /* link the header and data RQs onto the parent cq child list */
11112 list_add_tail(&hrq
->list
, &cq
->child_list
);
11113 list_add_tail(&drq
->list
, &cq
->child_list
);
11116 mempool_free(mbox
, phba
->mbox_mem_pool
);
11121 * lpfc_eq_destroy - Destroy an event Queue on the HBA
11122 * @eq: The queue structure associated with the queue to destroy.
11124 * This function destroys a queue, as detailed in @eq by sending an mailbox
11125 * command, specific to the type of queue, to the HBA.
11127 * The @eq struct is used to get the queue ID of the queue to destroy.
11129 * On success this function will return a zero. If the queue destroy mailbox
11130 * command fails this function will return -ENXIO.
11133 lpfc_eq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*eq
)
11135 LPFC_MBOXQ_t
*mbox
;
11136 int rc
, length
, status
= 0;
11137 uint32_t shdr_status
, shdr_add_status
;
11138 union lpfc_sli4_cfg_shdr
*shdr
;
11142 mbox
= mempool_alloc(eq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
11145 length
= (sizeof(struct lpfc_mbx_eq_destroy
) -
11146 sizeof(struct lpfc_sli4_cfg_mhdr
));
11147 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
11148 LPFC_MBOX_OPCODE_EQ_DESTROY
,
11149 length
, LPFC_SLI4_MBX_EMBED
);
11150 bf_set(lpfc_mbx_eq_destroy_q_id
, &mbox
->u
.mqe
.un
.eq_destroy
.u
.request
,
11152 mbox
->vport
= eq
->phba
->pport
;
11153 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
11155 rc
= lpfc_sli_issue_mbox(eq
->phba
, mbox
, MBX_POLL
);
11156 /* The IOCTL status is embedded in the mailbox subheader. */
11157 shdr
= (union lpfc_sli4_cfg_shdr
*)
11158 &mbox
->u
.mqe
.un
.eq_destroy
.header
.cfg_shdr
;
11159 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
11160 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
11161 if (shdr_status
|| shdr_add_status
|| rc
) {
11162 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11163 "2505 EQ_DESTROY mailbox failed with "
11164 "status x%x add_status x%x, mbx status x%x\n",
11165 shdr_status
, shdr_add_status
, rc
);
11169 /* Remove eq from any list */
11170 list_del_init(&eq
->list
);
11171 mempool_free(mbox
, eq
->phba
->mbox_mem_pool
);
11176 * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
11177 * @cq: The queue structure associated with the queue to destroy.
11179 * This function destroys a queue, as detailed in @cq by sending an mailbox
11180 * command, specific to the type of queue, to the HBA.
11182 * The @cq struct is used to get the queue ID of the queue to destroy.
11184 * On success this function will return a zero. If the queue destroy mailbox
11185 * command fails this function will return -ENXIO.
11188 lpfc_cq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*cq
)
11190 LPFC_MBOXQ_t
*mbox
;
11191 int rc
, length
, status
= 0;
11192 uint32_t shdr_status
, shdr_add_status
;
11193 union lpfc_sli4_cfg_shdr
*shdr
;
11197 mbox
= mempool_alloc(cq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
11200 length
= (sizeof(struct lpfc_mbx_cq_destroy
) -
11201 sizeof(struct lpfc_sli4_cfg_mhdr
));
11202 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
11203 LPFC_MBOX_OPCODE_CQ_DESTROY
,
11204 length
, LPFC_SLI4_MBX_EMBED
);
11205 bf_set(lpfc_mbx_cq_destroy_q_id
, &mbox
->u
.mqe
.un
.cq_destroy
.u
.request
,
11207 mbox
->vport
= cq
->phba
->pport
;
11208 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
11209 rc
= lpfc_sli_issue_mbox(cq
->phba
, mbox
, MBX_POLL
);
11210 /* The IOCTL status is embedded in the mailbox subheader. */
11211 shdr
= (union lpfc_sli4_cfg_shdr
*)
11212 &mbox
->u
.mqe
.un
.wq_create
.header
.cfg_shdr
;
11213 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
11214 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
11215 if (shdr_status
|| shdr_add_status
|| rc
) {
11216 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11217 "2506 CQ_DESTROY mailbox failed with "
11218 "status x%x add_status x%x, mbx status x%x\n",
11219 shdr_status
, shdr_add_status
, rc
);
11222 /* Remove cq from any list */
11223 list_del_init(&cq
->list
);
11224 mempool_free(mbox
, cq
->phba
->mbox_mem_pool
);
11229 * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
11230 * @qm: The queue structure associated with the queue to destroy.
11232 * This function destroys a queue, as detailed in @mq by sending an mailbox
11233 * command, specific to the type of queue, to the HBA.
11235 * The @mq struct is used to get the queue ID of the queue to destroy.
11237 * On success this function will return a zero. If the queue destroy mailbox
11238 * command fails this function will return -ENXIO.
11241 lpfc_mq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*mq
)
11243 LPFC_MBOXQ_t
*mbox
;
11244 int rc
, length
, status
= 0;
11245 uint32_t shdr_status
, shdr_add_status
;
11246 union lpfc_sli4_cfg_shdr
*shdr
;
11250 mbox
= mempool_alloc(mq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
11253 length
= (sizeof(struct lpfc_mbx_mq_destroy
) -
11254 sizeof(struct lpfc_sli4_cfg_mhdr
));
11255 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_COMMON
,
11256 LPFC_MBOX_OPCODE_MQ_DESTROY
,
11257 length
, LPFC_SLI4_MBX_EMBED
);
11258 bf_set(lpfc_mbx_mq_destroy_q_id
, &mbox
->u
.mqe
.un
.mq_destroy
.u
.request
,
11260 mbox
->vport
= mq
->phba
->pport
;
11261 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
11262 rc
= lpfc_sli_issue_mbox(mq
->phba
, mbox
, MBX_POLL
);
11263 /* The IOCTL status is embedded in the mailbox subheader. */
11264 shdr
= (union lpfc_sli4_cfg_shdr
*)
11265 &mbox
->u
.mqe
.un
.mq_destroy
.header
.cfg_shdr
;
11266 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
11267 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
11268 if (shdr_status
|| shdr_add_status
|| rc
) {
11269 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11270 "2507 MQ_DESTROY mailbox failed with "
11271 "status x%x add_status x%x, mbx status x%x\n",
11272 shdr_status
, shdr_add_status
, rc
);
11275 /* Remove mq from any list */
11276 list_del_init(&mq
->list
);
11277 mempool_free(mbox
, mq
->phba
->mbox_mem_pool
);
11282 * lpfc_wq_destroy - Destroy a Work Queue on the HBA
11283 * @wq: The queue structure associated with the queue to destroy.
11285 * This function destroys a queue, as detailed in @wq by sending an mailbox
11286 * command, specific to the type of queue, to the HBA.
11288 * The @wq struct is used to get the queue ID of the queue to destroy.
11290 * On success this function will return a zero. If the queue destroy mailbox
11291 * command fails this function will return -ENXIO.
11294 lpfc_wq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*wq
)
11296 LPFC_MBOXQ_t
*mbox
;
11297 int rc
, length
, status
= 0;
11298 uint32_t shdr_status
, shdr_add_status
;
11299 union lpfc_sli4_cfg_shdr
*shdr
;
11303 mbox
= mempool_alloc(wq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
11306 length
= (sizeof(struct lpfc_mbx_wq_destroy
) -
11307 sizeof(struct lpfc_sli4_cfg_mhdr
));
11308 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
11309 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY
,
11310 length
, LPFC_SLI4_MBX_EMBED
);
11311 bf_set(lpfc_mbx_wq_destroy_q_id
, &mbox
->u
.mqe
.un
.wq_destroy
.u
.request
,
11313 mbox
->vport
= wq
->phba
->pport
;
11314 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
11315 rc
= lpfc_sli_issue_mbox(wq
->phba
, mbox
, MBX_POLL
);
11316 shdr
= (union lpfc_sli4_cfg_shdr
*)
11317 &mbox
->u
.mqe
.un
.wq_destroy
.header
.cfg_shdr
;
11318 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
11319 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
11320 if (shdr_status
|| shdr_add_status
|| rc
) {
11321 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11322 "2508 WQ_DESTROY mailbox failed with "
11323 "status x%x add_status x%x, mbx status x%x\n",
11324 shdr_status
, shdr_add_status
, rc
);
11327 /* Remove wq from any list */
11328 list_del_init(&wq
->list
);
11329 mempool_free(mbox
, wq
->phba
->mbox_mem_pool
);
11334 * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
11335 * @rq: The queue structure associated with the queue to destroy.
11337 * This function destroys a queue, as detailed in @rq by sending an mailbox
11338 * command, specific to the type of queue, to the HBA.
11340 * The @rq struct is used to get the queue ID of the queue to destroy.
11342 * On success this function will return a zero. If the queue destroy mailbox
11343 * command fails this function will return -ENXIO.
11346 lpfc_rq_destroy(struct lpfc_hba
*phba
, struct lpfc_queue
*hrq
,
11347 struct lpfc_queue
*drq
)
11349 LPFC_MBOXQ_t
*mbox
;
11350 int rc
, length
, status
= 0;
11351 uint32_t shdr_status
, shdr_add_status
;
11352 union lpfc_sli4_cfg_shdr
*shdr
;
11356 mbox
= mempool_alloc(hrq
->phba
->mbox_mem_pool
, GFP_KERNEL
);
11359 length
= (sizeof(struct lpfc_mbx_rq_destroy
) -
11360 sizeof(struct lpfc_sli4_cfg_mhdr
));
11361 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
11362 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY
,
11363 length
, LPFC_SLI4_MBX_EMBED
);
11364 bf_set(lpfc_mbx_rq_destroy_q_id
, &mbox
->u
.mqe
.un
.rq_destroy
.u
.request
,
11366 mbox
->vport
= hrq
->phba
->pport
;
11367 mbox
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
11368 rc
= lpfc_sli_issue_mbox(hrq
->phba
, mbox
, MBX_POLL
);
11369 /* The IOCTL status is embedded in the mailbox subheader. */
11370 shdr
= (union lpfc_sli4_cfg_shdr
*)
11371 &mbox
->u
.mqe
.un
.rq_destroy
.header
.cfg_shdr
;
11372 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
11373 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
11374 if (shdr_status
|| shdr_add_status
|| rc
) {
11375 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11376 "2509 RQ_DESTROY mailbox failed with "
11377 "status x%x add_status x%x, mbx status x%x\n",
11378 shdr_status
, shdr_add_status
, rc
);
11379 if (rc
!= MBX_TIMEOUT
)
11380 mempool_free(mbox
, hrq
->phba
->mbox_mem_pool
);
11383 bf_set(lpfc_mbx_rq_destroy_q_id
, &mbox
->u
.mqe
.un
.rq_destroy
.u
.request
,
11385 rc
= lpfc_sli_issue_mbox(drq
->phba
, mbox
, MBX_POLL
);
11386 shdr
= (union lpfc_sli4_cfg_shdr
*)
11387 &mbox
->u
.mqe
.un
.rq_destroy
.header
.cfg_shdr
;
11388 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
11389 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
11390 if (shdr_status
|| shdr_add_status
|| rc
) {
11391 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11392 "2510 RQ_DESTROY mailbox failed with "
11393 "status x%x add_status x%x, mbx status x%x\n",
11394 shdr_status
, shdr_add_status
, rc
);
11397 list_del_init(&hrq
->list
);
11398 list_del_init(&drq
->list
);
11399 mempool_free(mbox
, hrq
->phba
->mbox_mem_pool
);
11404 * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
11405 * @phba: The virtual port for which this call being executed.
11406 * @pdma_phys_addr0: Physical address of the 1st SGL page.
11407 * @pdma_phys_addr1: Physical address of the 2nd SGL page.
11408 * @xritag: the xritag that ties this io to the SGL pages.
11410 * This routine will post the sgl pages for the IO that has the xritag
11411 * that is in the iocbq structure. The xritag is assigned during iocbq
11412 * creation and persists for as long as the driver is loaded.
11413 * if the caller has fewer than 256 scatter gather segments to map then
11414 * pdma_phys_addr1 should be 0.
11415 * If the caller needs to map more than 256 scatter gather segment then
11416 * pdma_phys_addr1 should be a valid physical address.
11417 * physical address for SGLs must be 64 byte aligned.
11418 * If you are going to map 2 SGL's then the first one must have 256 entries
11419 * the second sgl can have between 1 and 256 entries.
11423 * -ENXIO, -ENOMEM - Failure
11426 lpfc_sli4_post_sgl(struct lpfc_hba
*phba
,
11427 dma_addr_t pdma_phys_addr0
,
11428 dma_addr_t pdma_phys_addr1
,
11431 struct lpfc_mbx_post_sgl_pages
*post_sgl_pages
;
11432 LPFC_MBOXQ_t
*mbox
;
11434 uint32_t shdr_status
, shdr_add_status
;
11435 union lpfc_sli4_cfg_shdr
*shdr
;
11437 if (xritag
== NO_XRI
) {
11438 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11439 "0364 Invalid param:\n");
11443 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
11447 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
11448 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
,
11449 sizeof(struct lpfc_mbx_post_sgl_pages
) -
11450 sizeof(struct lpfc_sli4_cfg_mhdr
), LPFC_SLI4_MBX_EMBED
);
11452 post_sgl_pages
= (struct lpfc_mbx_post_sgl_pages
*)
11453 &mbox
->u
.mqe
.un
.post_sgl_pages
;
11454 bf_set(lpfc_post_sgl_pages_xri
, post_sgl_pages
, xritag
);
11455 bf_set(lpfc_post_sgl_pages_xricnt
, post_sgl_pages
, 1);
11457 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg0_addr_lo
=
11458 cpu_to_le32(putPaddrLow(pdma_phys_addr0
));
11459 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg0_addr_hi
=
11460 cpu_to_le32(putPaddrHigh(pdma_phys_addr0
));
11462 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg1_addr_lo
=
11463 cpu_to_le32(putPaddrLow(pdma_phys_addr1
));
11464 post_sgl_pages
->sgl_pg_pairs
[0].sgl_pg1_addr_hi
=
11465 cpu_to_le32(putPaddrHigh(pdma_phys_addr1
));
11466 if (!phba
->sli4_hba
.intr_enable
)
11467 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
11469 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, LPFC_MBOX_TMO
);
11470 /* The IOCTL status is embedded in the mailbox subheader. */
11471 shdr
= (union lpfc_sli4_cfg_shdr
*) &post_sgl_pages
->header
.cfg_shdr
;
11472 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
11473 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
11474 if (rc
!= MBX_TIMEOUT
)
11475 mempool_free(mbox
, phba
->mbox_mem_pool
);
11476 if (shdr_status
|| shdr_add_status
|| rc
) {
11477 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11478 "2511 POST_SGL mailbox failed with "
11479 "status x%x add_status x%x, mbx status x%x\n",
11480 shdr_status
, shdr_add_status
, rc
);
11487 * lpfc_sli4_next_xritag - Get an xritag for the io
11488 * @phba: Pointer to HBA context object.
11490 * This function gets an xritag for the iocb. If there is no unused xritag
11491 * it will return 0xffff.
11492 * The function returns the allocated xritag if successful, else returns zero.
11493 * Zero is not a valid xritag.
11494 * The caller is not required to hold any lock.
11497 lpfc_sli4_next_xritag(struct lpfc_hba
*phba
)
11501 spin_lock_irq(&phba
->hbalock
);
11502 xritag
= phba
->sli4_hba
.next_xri
;
11503 if ((xritag
!= (uint16_t) -1) && xritag
<
11504 (phba
->sli4_hba
.max_cfg_param
.max_xri
11505 + phba
->sli4_hba
.max_cfg_param
.xri_base
)) {
11506 phba
->sli4_hba
.next_xri
++;
11507 phba
->sli4_hba
.max_cfg_param
.xri_used
++;
11508 spin_unlock_irq(&phba
->hbalock
);
11511 spin_unlock_irq(&phba
->hbalock
);
11512 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
11513 "2004 Failed to allocate XRI.last XRITAG is %d"
11514 " Max XRI is %d, Used XRI is %d\n",
11515 phba
->sli4_hba
.next_xri
,
11516 phba
->sli4_hba
.max_cfg_param
.max_xri
,
11517 phba
->sli4_hba
.max_cfg_param
.xri_used
);
11522 * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
11523 * @phba: pointer to lpfc hba data structure.
11525 * This routine is invoked to post a block of driver's sgl pages to the
11526 * HBA using non-embedded mailbox command. No Lock is held. This routine
11527 * is only called when the driver is loading and after all IO has been
11531 lpfc_sli4_post_sgl_list(struct lpfc_hba
*phba
)
11533 struct lpfc_sglq
*sglq_entry
;
11534 struct lpfc_mbx_post_uembed_sgl_page1
*sgl
;
11535 struct sgl_page_pairs
*sgl_pg_pairs
;
11537 LPFC_MBOXQ_t
*mbox
;
11538 uint32_t reqlen
, alloclen
, pg_pairs
;
11540 uint16_t xritag_start
= 0;
11541 int els_xri_cnt
, rc
= 0;
11542 uint32_t shdr_status
, shdr_add_status
;
11543 union lpfc_sli4_cfg_shdr
*shdr
;
11545 /* The number of sgls to be posted */
11546 els_xri_cnt
= lpfc_sli4_get_els_iocb_cnt(phba
);
11548 reqlen
= els_xri_cnt
* sizeof(struct sgl_page_pairs
) +
11549 sizeof(union lpfc_sli4_cfg_shdr
) + sizeof(uint32_t);
11550 if (reqlen
> SLI4_PAGE_SIZE
) {
11551 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
11552 "2559 Block sgl registration required DMA "
11553 "size (%d) great than a page\n", reqlen
);
11556 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
11558 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11559 "2560 Failed to allocate mbox cmd memory\n");
11563 /* Allocate DMA memory and set up the non-embedded mailbox command */
11564 alloclen
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
11565 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
, reqlen
,
11566 LPFC_SLI4_MBX_NEMBED
);
11568 if (alloclen
< reqlen
) {
11569 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11570 "0285 Allocated DMA memory size (%d) is "
11571 "less than the requested DMA memory "
11572 "size (%d)\n", alloclen
, reqlen
);
11573 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
11576 /* Get the first SGE entry from the non-embedded DMA memory */
11577 viraddr
= mbox
->sge_array
->addr
[0];
11579 /* Set up the SGL pages in the non-embedded DMA pages */
11580 sgl
= (struct lpfc_mbx_post_uembed_sgl_page1
*)viraddr
;
11581 sgl_pg_pairs
= &sgl
->sgl_pg_pairs
;
11583 for (pg_pairs
= 0; pg_pairs
< els_xri_cnt
; pg_pairs
++) {
11584 sglq_entry
= phba
->sli4_hba
.lpfc_els_sgl_array
[pg_pairs
];
11585 /* Set up the sge entry */
11586 sgl_pg_pairs
->sgl_pg0_addr_lo
=
11587 cpu_to_le32(putPaddrLow(sglq_entry
->phys
));
11588 sgl_pg_pairs
->sgl_pg0_addr_hi
=
11589 cpu_to_le32(putPaddrHigh(sglq_entry
->phys
));
11590 sgl_pg_pairs
->sgl_pg1_addr_lo
=
11591 cpu_to_le32(putPaddrLow(0));
11592 sgl_pg_pairs
->sgl_pg1_addr_hi
=
11593 cpu_to_le32(putPaddrHigh(0));
11594 /* Keep the first xritag on the list */
11596 xritag_start
= sglq_entry
->sli4_xritag
;
11599 bf_set(lpfc_post_sgl_pages_xri
, sgl
, xritag_start
);
11600 bf_set(lpfc_post_sgl_pages_xricnt
, sgl
, els_xri_cnt
);
11601 /* Perform endian conversion if necessary */
11602 sgl
->word0
= cpu_to_le32(sgl
->word0
);
11604 if (!phba
->sli4_hba
.intr_enable
)
11605 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
11607 mbox_tmo
= lpfc_mbox_tmo_val(phba
, MBX_SLI4_CONFIG
);
11608 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
11610 shdr
= (union lpfc_sli4_cfg_shdr
*) &sgl
->cfg_shdr
;
11611 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
11612 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
11613 if (rc
!= MBX_TIMEOUT
)
11614 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
11615 if (shdr_status
|| shdr_add_status
|| rc
) {
11616 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11617 "2513 POST_SGL_BLOCK mailbox command failed "
11618 "status x%x add_status x%x mbx status x%x\n",
11619 shdr_status
, shdr_add_status
, rc
);
11626 * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
11627 * @phba: pointer to lpfc hba data structure.
11628 * @sblist: pointer to scsi buffer list.
11629 * @count: number of scsi buffers on the list.
11631 * This routine is invoked to post a block of @count scsi sgl pages from a
11632 * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
11637 lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba
*phba
, struct list_head
*sblist
,
11640 struct lpfc_scsi_buf
*psb
;
11641 struct lpfc_mbx_post_uembed_sgl_page1
*sgl
;
11642 struct sgl_page_pairs
*sgl_pg_pairs
;
11644 LPFC_MBOXQ_t
*mbox
;
11645 uint32_t reqlen
, alloclen
, pg_pairs
;
11647 uint16_t xritag_start
= 0;
11649 uint32_t shdr_status
, shdr_add_status
;
11650 dma_addr_t pdma_phys_bpl1
;
11651 union lpfc_sli4_cfg_shdr
*shdr
;
11653 /* Calculate the requested length of the dma memory */
11654 reqlen
= cnt
* sizeof(struct sgl_page_pairs
) +
11655 sizeof(union lpfc_sli4_cfg_shdr
) + sizeof(uint32_t);
11656 if (reqlen
> SLI4_PAGE_SIZE
) {
11657 lpfc_printf_log(phba
, KERN_WARNING
, LOG_INIT
,
11658 "0217 Block sgl registration required DMA "
11659 "size (%d) great than a page\n", reqlen
);
11662 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
11664 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11665 "0283 Failed to allocate mbox cmd memory\n");
11669 /* Allocate DMA memory and set up the non-embedded mailbox command */
11670 alloclen
= lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
11671 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES
, reqlen
,
11672 LPFC_SLI4_MBX_NEMBED
);
11674 if (alloclen
< reqlen
) {
11675 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
11676 "2561 Allocated DMA memory size (%d) is "
11677 "less than the requested DMA memory "
11678 "size (%d)\n", alloclen
, reqlen
);
11679 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
11682 /* Get the first SGE entry from the non-embedded DMA memory */
11683 viraddr
= mbox
->sge_array
->addr
[0];
11685 /* Set up the SGL pages in the non-embedded DMA pages */
11686 sgl
= (struct lpfc_mbx_post_uembed_sgl_page1
*)viraddr
;
11687 sgl_pg_pairs
= &sgl
->sgl_pg_pairs
;
11690 list_for_each_entry(psb
, sblist
, list
) {
11691 /* Set up the sge entry */
11692 sgl_pg_pairs
->sgl_pg0_addr_lo
=
11693 cpu_to_le32(putPaddrLow(psb
->dma_phys_bpl
));
11694 sgl_pg_pairs
->sgl_pg0_addr_hi
=
11695 cpu_to_le32(putPaddrHigh(psb
->dma_phys_bpl
));
11696 if (phba
->cfg_sg_dma_buf_size
> SGL_PAGE_SIZE
)
11697 pdma_phys_bpl1
= psb
->dma_phys_bpl
+ SGL_PAGE_SIZE
;
11699 pdma_phys_bpl1
= 0;
11700 sgl_pg_pairs
->sgl_pg1_addr_lo
=
11701 cpu_to_le32(putPaddrLow(pdma_phys_bpl1
));
11702 sgl_pg_pairs
->sgl_pg1_addr_hi
=
11703 cpu_to_le32(putPaddrHigh(pdma_phys_bpl1
));
11704 /* Keep the first xritag on the list */
11706 xritag_start
= psb
->cur_iocbq
.sli4_xritag
;
11710 bf_set(lpfc_post_sgl_pages_xri
, sgl
, xritag_start
);
11711 bf_set(lpfc_post_sgl_pages_xricnt
, sgl
, pg_pairs
);
11712 /* Perform endian conversion if necessary */
11713 sgl
->word0
= cpu_to_le32(sgl
->word0
);
11715 if (!phba
->sli4_hba
.intr_enable
)
11716 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_POLL
);
11718 mbox_tmo
= lpfc_mbox_tmo_val(phba
, MBX_SLI4_CONFIG
);
11719 rc
= lpfc_sli_issue_mbox_wait(phba
, mbox
, mbox_tmo
);
11721 shdr
= (union lpfc_sli4_cfg_shdr
*) &sgl
->cfg_shdr
;
11722 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
11723 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
11724 if (rc
!= MBX_TIMEOUT
)
11725 lpfc_sli4_mbox_cmd_free(phba
, mbox
);
11726 if (shdr_status
|| shdr_add_status
|| rc
) {
11727 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
11728 "2564 POST_SGL_BLOCK mailbox command failed "
11729 "status x%x add_status x%x mbx status x%x\n",
11730 shdr_status
, shdr_add_status
, rc
);
11737 * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
11738 * @phba: pointer to lpfc_hba struct that the frame was received on
11739 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
11741 * This function checks the fields in the @fc_hdr to see if the FC frame is a
11742 * valid type of frame that the LPFC driver will handle. This function will
11743 * return a zero if the frame is a valid frame or a non zero value when the
11744 * frame does not pass the check.
11747 lpfc_fc_frame_check(struct lpfc_hba
*phba
, struct fc_frame_header
*fc_hdr
)
11749 /* make rctl_names static to save stack space */
11750 static char *rctl_names
[] = FC_RCTL_NAMES_INIT
;
11751 char *type_names
[] = FC_TYPE_NAMES_INIT
;
11752 struct fc_vft_header
*fc_vft_hdr
;
11753 uint32_t *header
= (uint32_t *) fc_hdr
;
11755 switch (fc_hdr
->fh_r_ctl
) {
11756 case FC_RCTL_DD_UNCAT
: /* uncategorized information */
11757 case FC_RCTL_DD_SOL_DATA
: /* solicited data */
11758 case FC_RCTL_DD_UNSOL_CTL
: /* unsolicited control */
11759 case FC_RCTL_DD_SOL_CTL
: /* solicited control or reply */
11760 case FC_RCTL_DD_UNSOL_DATA
: /* unsolicited data */
11761 case FC_RCTL_DD_DATA_DESC
: /* data descriptor */
11762 case FC_RCTL_DD_UNSOL_CMD
: /* unsolicited command */
11763 case FC_RCTL_DD_CMD_STATUS
: /* command status */
11764 case FC_RCTL_ELS_REQ
: /* extended link services request */
11765 case FC_RCTL_ELS_REP
: /* extended link services reply */
11766 case FC_RCTL_ELS4_REQ
: /* FC-4 ELS request */
11767 case FC_RCTL_ELS4_REP
: /* FC-4 ELS reply */
11768 case FC_RCTL_BA_NOP
: /* basic link service NOP */
11769 case FC_RCTL_BA_ABTS
: /* basic link service abort */
11770 case FC_RCTL_BA_RMC
: /* remove connection */
11771 case FC_RCTL_BA_ACC
: /* basic accept */
11772 case FC_RCTL_BA_RJT
: /* basic reject */
11773 case FC_RCTL_BA_PRMT
:
11774 case FC_RCTL_ACK_1
: /* acknowledge_1 */
11775 case FC_RCTL_ACK_0
: /* acknowledge_0 */
11776 case FC_RCTL_P_RJT
: /* port reject */
11777 case FC_RCTL_F_RJT
: /* fabric reject */
11778 case FC_RCTL_P_BSY
: /* port busy */
11779 case FC_RCTL_F_BSY
: /* fabric busy to data frame */
11780 case FC_RCTL_F_BSYL
: /* fabric busy to link control frame */
11781 case FC_RCTL_LCR
: /* link credit reset */
11782 case FC_RCTL_END
: /* end */
11784 case FC_RCTL_VFTH
: /* Virtual Fabric tagging Header */
11785 fc_vft_hdr
= (struct fc_vft_header
*)fc_hdr
;
11786 fc_hdr
= &((struct fc_frame_header
*)fc_vft_hdr
)[1];
11787 return lpfc_fc_frame_check(phba
, fc_hdr
);
11791 switch (fc_hdr
->fh_type
) {
11803 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
11804 "2538 Received frame rctl:%s type:%s "
11805 "Frame Data:%08x %08x %08x %08x %08x %08x\n",
11806 rctl_names
[fc_hdr
->fh_r_ctl
],
11807 type_names
[fc_hdr
->fh_type
],
11808 be32_to_cpu(header
[0]), be32_to_cpu(header
[1]),
11809 be32_to_cpu(header
[2]), be32_to_cpu(header
[3]),
11810 be32_to_cpu(header
[4]), be32_to_cpu(header
[5]));
11813 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
,
11814 "2539 Dropped frame rctl:%s type:%s\n",
11815 rctl_names
[fc_hdr
->fh_r_ctl
],
11816 type_names
[fc_hdr
->fh_type
]);
11821 * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
11822 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
11824 * This function processes the FC header to retrieve the VFI from the VF
11825 * header, if one exists. This function will return the VFI if one exists
11826 * or 0 if no VSAN Header exists.
11829 lpfc_fc_hdr_get_vfi(struct fc_frame_header
*fc_hdr
)
11831 struct fc_vft_header
*fc_vft_hdr
= (struct fc_vft_header
*)fc_hdr
;
11833 if (fc_hdr
->fh_r_ctl
!= FC_RCTL_VFTH
)
11835 return bf_get(fc_vft_hdr_vf_id
, fc_vft_hdr
);
11839 * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
11840 * @phba: Pointer to the HBA structure to search for the vport on
11841 * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
11842 * @fcfi: The FC Fabric ID that the frame came from
11844 * This function searches the @phba for a vport that matches the content of the
11845 * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
11846 * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
11847 * returns the matching vport pointer or NULL if unable to match frame to a
11850 static struct lpfc_vport
*
11851 lpfc_fc_frame_to_vport(struct lpfc_hba
*phba
, struct fc_frame_header
*fc_hdr
,
11854 struct lpfc_vport
**vports
;
11855 struct lpfc_vport
*vport
= NULL
;
11857 uint32_t did
= (fc_hdr
->fh_d_id
[0] << 16 |
11858 fc_hdr
->fh_d_id
[1] << 8 |
11859 fc_hdr
->fh_d_id
[2]);
11861 vports
= lpfc_create_vport_work_array(phba
);
11862 if (vports
!= NULL
)
11863 for (i
= 0; i
<= phba
->max_vpi
&& vports
[i
] != NULL
; i
++) {
11864 if (phba
->fcf
.fcfi
== fcfi
&&
11865 vports
[i
]->vfi
== lpfc_fc_hdr_get_vfi(fc_hdr
) &&
11866 vports
[i
]->fc_myDID
== did
) {
11871 lpfc_destroy_vport_work_array(phba
, vports
);
11876 * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
11877 * @vport: The vport to work on.
11879 * This function updates the receive sequence time stamp for this vport. The
11880 * receive sequence time stamp indicates the time that the last frame of the
11881 * the sequence that has been idle for the longest amount of time was received.
11882 * the driver uses this time stamp to indicate if any received sequences have
11886 lpfc_update_rcv_time_stamp(struct lpfc_vport
*vport
)
11888 struct lpfc_dmabuf
*h_buf
;
11889 struct hbq_dmabuf
*dmabuf
= NULL
;
11891 /* get the oldest sequence on the rcv list */
11892 h_buf
= list_get_first(&vport
->rcv_buffer_list
,
11893 struct lpfc_dmabuf
, list
);
11896 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
11897 vport
->rcv_buffer_time_stamp
= dmabuf
->time_stamp
;
11901 * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
11902 * @vport: The vport that the received sequences were sent to.
11904 * This function cleans up all outstanding received sequences. This is called
11905 * by the driver when a link event or user action invalidates all the received
11909 lpfc_cleanup_rcv_buffers(struct lpfc_vport
*vport
)
11911 struct lpfc_dmabuf
*h_buf
, *hnext
;
11912 struct lpfc_dmabuf
*d_buf
, *dnext
;
11913 struct hbq_dmabuf
*dmabuf
= NULL
;
11915 /* start with the oldest sequence on the rcv list */
11916 list_for_each_entry_safe(h_buf
, hnext
, &vport
->rcv_buffer_list
, list
) {
11917 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
11918 list_del_init(&dmabuf
->hbuf
.list
);
11919 list_for_each_entry_safe(d_buf
, dnext
,
11920 &dmabuf
->dbuf
.list
, list
) {
11921 list_del_init(&d_buf
->list
);
11922 lpfc_in_buf_free(vport
->phba
, d_buf
);
11924 lpfc_in_buf_free(vport
->phba
, &dmabuf
->dbuf
);
11929 * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
11930 * @vport: The vport that the received sequences were sent to.
11932 * This function determines whether any received sequences have timed out by
11933 * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
11934 * indicates that there is at least one timed out sequence this routine will
11935 * go through the received sequences one at a time from most inactive to most
11936 * active to determine which ones need to be cleaned up. Once it has determined
11937 * that a sequence needs to be cleaned up it will simply free up the resources
11938 * without sending an abort.
11941 lpfc_rcv_seq_check_edtov(struct lpfc_vport
*vport
)
11943 struct lpfc_dmabuf
*h_buf
, *hnext
;
11944 struct lpfc_dmabuf
*d_buf
, *dnext
;
11945 struct hbq_dmabuf
*dmabuf
= NULL
;
11946 unsigned long timeout
;
11947 int abort_count
= 0;
11949 timeout
= (msecs_to_jiffies(vport
->phba
->fc_edtov
) +
11950 vport
->rcv_buffer_time_stamp
);
11951 if (list_empty(&vport
->rcv_buffer_list
) ||
11952 time_before(jiffies
, timeout
))
11954 /* start with the oldest sequence on the rcv list */
11955 list_for_each_entry_safe(h_buf
, hnext
, &vport
->rcv_buffer_list
, list
) {
11956 dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
11957 timeout
= (msecs_to_jiffies(vport
->phba
->fc_edtov
) +
11958 dmabuf
->time_stamp
);
11959 if (time_before(jiffies
, timeout
))
11962 list_del_init(&dmabuf
->hbuf
.list
);
11963 list_for_each_entry_safe(d_buf
, dnext
,
11964 &dmabuf
->dbuf
.list
, list
) {
11965 list_del_init(&d_buf
->list
);
11966 lpfc_in_buf_free(vport
->phba
, d_buf
);
11968 lpfc_in_buf_free(vport
->phba
, &dmabuf
->dbuf
);
11971 lpfc_update_rcv_time_stamp(vport
);
11975 * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
11976 * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
11978 * This function searches through the existing incomplete sequences that have
11979 * been sent to this @vport. If the frame matches one of the incomplete
11980 * sequences then the dbuf in the @dmabuf is added to the list of frames that
11981 * make up that sequence. If no sequence is found that matches this frame then
11982 * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
11983 * This function returns a pointer to the first dmabuf in the sequence list that
11984 * the frame was linked to.
11986 static struct hbq_dmabuf
*
11987 lpfc_fc_frame_add(struct lpfc_vport
*vport
, struct hbq_dmabuf
*dmabuf
)
11989 struct fc_frame_header
*new_hdr
;
11990 struct fc_frame_header
*temp_hdr
;
11991 struct lpfc_dmabuf
*d_buf
;
11992 struct lpfc_dmabuf
*h_buf
;
11993 struct hbq_dmabuf
*seq_dmabuf
= NULL
;
11994 struct hbq_dmabuf
*temp_dmabuf
= NULL
;
11996 INIT_LIST_HEAD(&dmabuf
->dbuf
.list
);
11997 dmabuf
->time_stamp
= jiffies
;
11998 new_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
11999 /* Use the hdr_buf to find the sequence that this frame belongs to */
12000 list_for_each_entry(h_buf
, &vport
->rcv_buffer_list
, list
) {
12001 temp_hdr
= (struct fc_frame_header
*)h_buf
->virt
;
12002 if ((temp_hdr
->fh_seq_id
!= new_hdr
->fh_seq_id
) ||
12003 (temp_hdr
->fh_ox_id
!= new_hdr
->fh_ox_id
) ||
12004 (memcmp(&temp_hdr
->fh_s_id
, &new_hdr
->fh_s_id
, 3)))
12006 /* found a pending sequence that matches this frame */
12007 seq_dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
12012 * This indicates first frame received for this sequence.
12013 * Queue the buffer on the vport's rcv_buffer_list.
12015 list_add_tail(&dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
12016 lpfc_update_rcv_time_stamp(vport
);
12019 temp_hdr
= seq_dmabuf
->hbuf
.virt
;
12020 if (be16_to_cpu(new_hdr
->fh_seq_cnt
) <
12021 be16_to_cpu(temp_hdr
->fh_seq_cnt
)) {
12022 list_del_init(&seq_dmabuf
->hbuf
.list
);
12023 list_add_tail(&dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
12024 list_add_tail(&dmabuf
->dbuf
.list
, &seq_dmabuf
->dbuf
.list
);
12025 lpfc_update_rcv_time_stamp(vport
);
12028 /* move this sequence to the tail to indicate a young sequence */
12029 list_move_tail(&seq_dmabuf
->hbuf
.list
, &vport
->rcv_buffer_list
);
12030 seq_dmabuf
->time_stamp
= jiffies
;
12031 lpfc_update_rcv_time_stamp(vport
);
12032 if (list_empty(&seq_dmabuf
->dbuf
.list
)) {
12033 temp_hdr
= dmabuf
->hbuf
.virt
;
12034 list_add_tail(&dmabuf
->dbuf
.list
, &seq_dmabuf
->dbuf
.list
);
12037 /* find the correct place in the sequence to insert this frame */
12038 list_for_each_entry_reverse(d_buf
, &seq_dmabuf
->dbuf
.list
, list
) {
12039 temp_dmabuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
12040 temp_hdr
= (struct fc_frame_header
*)temp_dmabuf
->hbuf
.virt
;
12042 * If the frame's sequence count is greater than the frame on
12043 * the list then insert the frame right after this frame
12045 if (be16_to_cpu(new_hdr
->fh_seq_cnt
) >
12046 be16_to_cpu(temp_hdr
->fh_seq_cnt
)) {
12047 list_add(&dmabuf
->dbuf
.list
, &temp_dmabuf
->dbuf
.list
);
12055 * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
12056 * @vport: pointer to a vitural port
12057 * @dmabuf: pointer to a dmabuf that describes the FC sequence
12059 * This function tries to abort from the partially assembed sequence, described
12060 * by the information from basic abbort @dmabuf. It checks to see whether such
12061 * partially assembled sequence held by the driver. If so, it shall free up all
12062 * the frames from the partially assembled sequence.
12065 * true -- if there is matching partially assembled sequence present and all
12066 * the frames freed with the sequence;
12067 * false -- if there is no matching partially assembled sequence present so
12068 * nothing got aborted in the lower layer driver
12071 lpfc_sli4_abort_partial_seq(struct lpfc_vport
*vport
,
12072 struct hbq_dmabuf
*dmabuf
)
12074 struct fc_frame_header
*new_hdr
;
12075 struct fc_frame_header
*temp_hdr
;
12076 struct lpfc_dmabuf
*d_buf
, *n_buf
, *h_buf
;
12077 struct hbq_dmabuf
*seq_dmabuf
= NULL
;
12079 /* Use the hdr_buf to find the sequence that matches this frame */
12080 INIT_LIST_HEAD(&dmabuf
->dbuf
.list
);
12081 INIT_LIST_HEAD(&dmabuf
->hbuf
.list
);
12082 new_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
12083 list_for_each_entry(h_buf
, &vport
->rcv_buffer_list
, list
) {
12084 temp_hdr
= (struct fc_frame_header
*)h_buf
->virt
;
12085 if ((temp_hdr
->fh_seq_id
!= new_hdr
->fh_seq_id
) ||
12086 (temp_hdr
->fh_ox_id
!= new_hdr
->fh_ox_id
) ||
12087 (memcmp(&temp_hdr
->fh_s_id
, &new_hdr
->fh_s_id
, 3)))
12089 /* found a pending sequence that matches this frame */
12090 seq_dmabuf
= container_of(h_buf
, struct hbq_dmabuf
, hbuf
);
12094 /* Free up all the frames from the partially assembled sequence */
12096 list_for_each_entry_safe(d_buf
, n_buf
,
12097 &seq_dmabuf
->dbuf
.list
, list
) {
12098 list_del_init(&d_buf
->list
);
12099 lpfc_in_buf_free(vport
->phba
, d_buf
);
12107 * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
12108 * @phba: Pointer to HBA context object.
12109 * @cmd_iocbq: pointer to the command iocbq structure.
12110 * @rsp_iocbq: pointer to the response iocbq structure.
12112 * This function handles the sequence abort response iocb command complete
12113 * event. It properly releases the memory allocated to the sequence abort
12117 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba
*phba
,
12118 struct lpfc_iocbq
*cmd_iocbq
,
12119 struct lpfc_iocbq
*rsp_iocbq
)
12122 lpfc_sli_release_iocbq(phba
, cmd_iocbq
);
12126 * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
12127 * @phba: Pointer to HBA context object.
12128 * @fc_hdr: pointer to a FC frame header.
12130 * This function sends a basic response to a previous unsol sequence abort
12131 * event after aborting the sequence handling.
12134 lpfc_sli4_seq_abort_rsp(struct lpfc_hba
*phba
,
12135 struct fc_frame_header
*fc_hdr
)
12137 struct lpfc_iocbq
*ctiocb
= NULL
;
12138 struct lpfc_nodelist
*ndlp
;
12139 uint16_t oxid
, rxid
;
12140 uint32_t sid
, fctl
;
12144 if (!lpfc_is_link_up(phba
))
12147 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
12148 oxid
= be16_to_cpu(fc_hdr
->fh_ox_id
);
12149 rxid
= be16_to_cpu(fc_hdr
->fh_rx_id
);
12151 ndlp
= lpfc_findnode_did(phba
->pport
, sid
);
12153 lpfc_printf_log(phba
, KERN_WARNING
, LOG_ELS
,
12154 "1268 Find ndlp returned NULL for oxid:x%x "
12155 "SID:x%x\n", oxid
, sid
);
12158 if (rxid
>= phba
->sli4_hba
.max_cfg_param
.xri_base
12159 && rxid
<= (phba
->sli4_hba
.max_cfg_param
.max_xri
12160 + phba
->sli4_hba
.max_cfg_param
.xri_base
))
12161 lpfc_set_rrq_active(phba
, ndlp
, rxid
, oxid
, 0);
12163 /* Allocate buffer for rsp iocb */
12164 ctiocb
= lpfc_sli_get_iocbq(phba
);
12168 /* Extract the F_CTL field from FC_HDR */
12169 fctl
= sli4_fctl_from_fc_hdr(fc_hdr
);
12171 icmd
= &ctiocb
->iocb
;
12172 icmd
->un
.xseq64
.bdl
.bdeSize
= 0;
12173 icmd
->un
.xseq64
.bdl
.ulpIoTag32
= 0;
12174 icmd
->un
.xseq64
.w5
.hcsw
.Dfctl
= 0;
12175 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_BA_ACC
;
12176 icmd
->un
.xseq64
.w5
.hcsw
.Type
= FC_TYPE_BLS
;
12178 /* Fill in the rest of iocb fields */
12179 icmd
->ulpCommand
= CMD_XMIT_BLS_RSP64_CX
;
12180 icmd
->ulpBdeCount
= 0;
12182 icmd
->ulpClass
= CLASS3
;
12183 icmd
->ulpContext
= ndlp
->nlp_rpi
;
12184 ctiocb
->context1
= ndlp
;
12186 ctiocb
->iocb_cmpl
= NULL
;
12187 ctiocb
->vport
= phba
->pport
;
12188 ctiocb
->iocb_cmpl
= lpfc_sli4_seq_abort_rsp_cmpl
;
12189 ctiocb
->sli4_xritag
= NO_XRI
;
12191 /* If the oxid maps to the FCP XRI range or if it is out of range,
12192 * send a BLS_RJT. The driver no longer has that exchange.
12193 * Override the IOCB for a BA_RJT.
12195 if (oxid
> (phba
->sli4_hba
.max_cfg_param
.max_xri
+
12196 phba
->sli4_hba
.max_cfg_param
.xri_base
) ||
12197 oxid
> (lpfc_sli4_get_els_iocb_cnt(phba
) +
12198 phba
->sli4_hba
.max_cfg_param
.xri_base
)) {
12199 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
= FC_RCTL_BA_RJT
;
12200 bf_set(lpfc_vndr_code
, &icmd
->un
.bls_rsp
, 0);
12201 bf_set(lpfc_rsn_expln
, &icmd
->un
.bls_rsp
, FC_BA_RJT_INV_XID
);
12202 bf_set(lpfc_rsn_code
, &icmd
->un
.bls_rsp
, FC_BA_RJT_UNABLE
);
12205 if (fctl
& FC_FC_EX_CTX
) {
12206 /* ABTS sent by responder to CT exchange, construction
12207 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
12208 * field and RX_ID from ABTS for RX_ID field.
12210 bf_set(lpfc_abts_orig
, &icmd
->un
.bls_rsp
, LPFC_ABTS_UNSOL_RSP
);
12211 bf_set(lpfc_abts_rxid
, &icmd
->un
.bls_rsp
, rxid
);
12213 /* ABTS sent by initiator to CT exchange, construction
12214 * of BA_ACC will need to allocate a new XRI as for the
12215 * XRI_TAG and RX_ID fields.
12217 bf_set(lpfc_abts_orig
, &icmd
->un
.bls_rsp
, LPFC_ABTS_UNSOL_INT
);
12218 bf_set(lpfc_abts_rxid
, &icmd
->un
.bls_rsp
, NO_XRI
);
12220 bf_set(lpfc_abts_oxid
, &icmd
->un
.bls_rsp
, oxid
);
12222 /* Xmit CT abts response on exchange <xid> */
12223 lpfc_printf_log(phba
, KERN_INFO
, LOG_ELS
,
12224 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
12225 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
, oxid
, phba
->link_state
);
12227 rc
= lpfc_sli_issue_iocb(phba
, LPFC_ELS_RING
, ctiocb
, 0);
12228 if (rc
== IOCB_ERROR
) {
12229 lpfc_printf_log(phba
, KERN_ERR
, LOG_ELS
,
12230 "2925 Failed to issue CT ABTS RSP x%x on "
12231 "xri x%x, Data x%x\n",
12232 icmd
->un
.xseq64
.w5
.hcsw
.Rctl
, oxid
,
12234 lpfc_sli_release_iocbq(phba
, ctiocb
);
12239 * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
12240 * @vport: Pointer to the vport on which this sequence was received
12241 * @dmabuf: pointer to a dmabuf that describes the FC sequence
12243 * This function handles an SLI-4 unsolicited abort event. If the unsolicited
12244 * receive sequence is only partially assembed by the driver, it shall abort
12245 * the partially assembled frames for the sequence. Otherwise, if the
12246 * unsolicited receive sequence has been completely assembled and passed to
12247 * the Upper Layer Protocol (UPL), it then mark the per oxid status for the
12248 * unsolicited sequence has been aborted. After that, it will issue a basic
12249 * accept to accept the abort.
12252 lpfc_sli4_handle_unsol_abort(struct lpfc_vport
*vport
,
12253 struct hbq_dmabuf
*dmabuf
)
12255 struct lpfc_hba
*phba
= vport
->phba
;
12256 struct fc_frame_header fc_hdr
;
12260 /* Make a copy of fc_hdr before the dmabuf being released */
12261 memcpy(&fc_hdr
, dmabuf
->hbuf
.virt
, sizeof(struct fc_frame_header
));
12262 fctl
= sli4_fctl_from_fc_hdr(&fc_hdr
);
12264 if (fctl
& FC_FC_EX_CTX
) {
12266 * ABTS sent by responder to exchange, just free the buffer
12268 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
12271 * ABTS sent by initiator to exchange, need to do cleanup
12273 /* Try to abort partially assembled seq */
12274 abts_par
= lpfc_sli4_abort_partial_seq(vport
, dmabuf
);
12276 /* Send abort to ULP if partially seq abort failed */
12277 if (abts_par
== false)
12278 lpfc_sli4_send_seq_to_ulp(vport
, dmabuf
);
12280 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
12282 /* Send basic accept (BA_ACC) to the abort requester */
12283 lpfc_sli4_seq_abort_rsp(phba
, &fc_hdr
);
12287 * lpfc_seq_complete - Indicates if a sequence is complete
12288 * @dmabuf: pointer to a dmabuf that describes the FC sequence
12290 * This function checks the sequence, starting with the frame described by
12291 * @dmabuf, to see if all the frames associated with this sequence are present.
12292 * the frames associated with this sequence are linked to the @dmabuf using the
12293 * dbuf list. This function looks for two major things. 1) That the first frame
12294 * has a sequence count of zero. 2) There is a frame with last frame of sequence
12295 * set. 3) That there are no holes in the sequence count. The function will
12296 * return 1 when the sequence is complete, otherwise it will return 0.
12299 lpfc_seq_complete(struct hbq_dmabuf
*dmabuf
)
12301 struct fc_frame_header
*hdr
;
12302 struct lpfc_dmabuf
*d_buf
;
12303 struct hbq_dmabuf
*seq_dmabuf
;
12307 hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
12308 /* make sure first fame of sequence has a sequence count of zero */
12309 if (hdr
->fh_seq_cnt
!= seq_count
)
12311 fctl
= (hdr
->fh_f_ctl
[0] << 16 |
12312 hdr
->fh_f_ctl
[1] << 8 |
12314 /* If last frame of sequence we can return success. */
12315 if (fctl
& FC_FC_END_SEQ
)
12317 list_for_each_entry(d_buf
, &dmabuf
->dbuf
.list
, list
) {
12318 seq_dmabuf
= container_of(d_buf
, struct hbq_dmabuf
, dbuf
);
12319 hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
12320 /* If there is a hole in the sequence count then fail. */
12321 if (++seq_count
!= be16_to_cpu(hdr
->fh_seq_cnt
))
12323 fctl
= (hdr
->fh_f_ctl
[0] << 16 |
12324 hdr
->fh_f_ctl
[1] << 8 |
12326 /* If last frame of sequence we can return success. */
12327 if (fctl
& FC_FC_END_SEQ
)
12334 * lpfc_prep_seq - Prep sequence for ULP processing
12335 * @vport: Pointer to the vport on which this sequence was received
12336 * @dmabuf: pointer to a dmabuf that describes the FC sequence
12338 * This function takes a sequence, described by a list of frames, and creates
12339 * a list of iocbq structures to describe the sequence. This iocbq list will be
12340 * used to issue to the generic unsolicited sequence handler. This routine
12341 * returns a pointer to the first iocbq in the list. If the function is unable
12342 * to allocate an iocbq then it throw out the received frames that were not
12343 * able to be described and return a pointer to the first iocbq. If unable to
12344 * allocate any iocbqs (including the first) this function will return NULL.
12346 static struct lpfc_iocbq
*
12347 lpfc_prep_seq(struct lpfc_vport
*vport
, struct hbq_dmabuf
*seq_dmabuf
)
12349 struct lpfc_dmabuf
*d_buf
, *n_buf
;
12350 struct lpfc_iocbq
*first_iocbq
, *iocbq
;
12351 struct fc_frame_header
*fc_hdr
;
12353 struct ulp_bde64
*pbde
;
12355 fc_hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
12356 /* remove from receive buffer list */
12357 list_del_init(&seq_dmabuf
->hbuf
.list
);
12358 lpfc_update_rcv_time_stamp(vport
);
12359 /* get the Remote Port's SID */
12360 sid
= sli4_sid_from_fc_hdr(fc_hdr
);
12361 /* Get an iocbq struct to fill in. */
12362 first_iocbq
= lpfc_sli_get_iocbq(vport
->phba
);
12364 /* Initialize the first IOCB. */
12365 first_iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
= 0;
12366 first_iocbq
->iocb
.ulpStatus
= IOSTAT_SUCCESS
;
12367 first_iocbq
->iocb
.ulpCommand
= CMD_IOCB_RCV_SEQ64_CX
;
12368 first_iocbq
->iocb
.ulpContext
= be16_to_cpu(fc_hdr
->fh_ox_id
);
12369 first_iocbq
->iocb
.unsli3
.rcvsli3
.vpi
=
12370 vport
->vpi
+ vport
->phba
->vpi_base
;
12371 /* put the first buffer into the first IOCBq */
12372 first_iocbq
->context2
= &seq_dmabuf
->dbuf
;
12373 first_iocbq
->context3
= NULL
;
12374 first_iocbq
->iocb
.ulpBdeCount
= 1;
12375 first_iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
=
12376 LPFC_DATA_BUF_SIZE
;
12377 first_iocbq
->iocb
.un
.rcvels
.remoteID
= sid
;
12378 first_iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
+=
12379 bf_get(lpfc_rcqe_length
,
12380 &seq_dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
12382 iocbq
= first_iocbq
;
12384 * Each IOCBq can have two Buffers assigned, so go through the list
12385 * of buffers for this sequence and save two buffers in each IOCBq
12387 list_for_each_entry_safe(d_buf
, n_buf
, &seq_dmabuf
->dbuf
.list
, list
) {
12389 lpfc_in_buf_free(vport
->phba
, d_buf
);
12392 if (!iocbq
->context3
) {
12393 iocbq
->context3
= d_buf
;
12394 iocbq
->iocb
.ulpBdeCount
++;
12395 pbde
= (struct ulp_bde64
*)
12396 &iocbq
->iocb
.unsli3
.sli3Words
[4];
12397 pbde
->tus
.f
.bdeSize
= LPFC_DATA_BUF_SIZE
;
12398 first_iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
+=
12399 bf_get(lpfc_rcqe_length
,
12400 &seq_dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
12402 iocbq
= lpfc_sli_get_iocbq(vport
->phba
);
12405 first_iocbq
->iocb
.ulpStatus
=
12406 IOSTAT_FCP_RSP_ERROR
;
12407 first_iocbq
->iocb
.un
.ulpWord
[4] =
12408 IOERR_NO_RESOURCES
;
12410 lpfc_in_buf_free(vport
->phba
, d_buf
);
12413 iocbq
->context2
= d_buf
;
12414 iocbq
->context3
= NULL
;
12415 iocbq
->iocb
.ulpBdeCount
= 1;
12416 iocbq
->iocb
.un
.cont64
[0].tus
.f
.bdeSize
=
12417 LPFC_DATA_BUF_SIZE
;
12418 first_iocbq
->iocb
.unsli3
.rcvsli3
.acc_len
+=
12419 bf_get(lpfc_rcqe_length
,
12420 &seq_dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
12421 iocbq
->iocb
.un
.rcvels
.remoteID
= sid
;
12422 list_add_tail(&iocbq
->list
, &first_iocbq
->list
);
12425 return first_iocbq
;
12429 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport
*vport
,
12430 struct hbq_dmabuf
*seq_dmabuf
)
12432 struct fc_frame_header
*fc_hdr
;
12433 struct lpfc_iocbq
*iocbq
, *curr_iocb
, *next_iocb
;
12434 struct lpfc_hba
*phba
= vport
->phba
;
12436 fc_hdr
= (struct fc_frame_header
*)seq_dmabuf
->hbuf
.virt
;
12437 iocbq
= lpfc_prep_seq(vport
, seq_dmabuf
);
12439 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12440 "2707 Ring %d handler: Failed to allocate "
12441 "iocb Rctl x%x Type x%x received\n",
12443 fc_hdr
->fh_r_ctl
, fc_hdr
->fh_type
);
12446 if (!lpfc_complete_unsol_iocb(phba
,
12447 &phba
->sli
.ring
[LPFC_ELS_RING
],
12448 iocbq
, fc_hdr
->fh_r_ctl
,
12450 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
12451 "2540 Ring %d handler: unexpected Rctl "
12452 "x%x Type x%x received\n",
12454 fc_hdr
->fh_r_ctl
, fc_hdr
->fh_type
);
12456 /* Free iocb created in lpfc_prep_seq */
12457 list_for_each_entry_safe(curr_iocb
, next_iocb
,
12458 &iocbq
->list
, list
) {
12459 list_del_init(&curr_iocb
->list
);
12460 lpfc_sli_release_iocbq(phba
, curr_iocb
);
12462 lpfc_sli_release_iocbq(phba
, iocbq
);
12466 * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
12467 * @phba: Pointer to HBA context object.
12469 * This function is called with no lock held. This function processes all
12470 * the received buffers and gives it to upper layers when a received buffer
12471 * indicates that it is the final frame in the sequence. The interrupt
12472 * service routine processes received buffers at interrupt contexts and adds
12473 * received dma buffers to the rb_pend_list queue and signals the worker thread.
12474 * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
12475 * appropriate receive function when the final frame in a sequence is received.
12478 lpfc_sli4_handle_received_buffer(struct lpfc_hba
*phba
,
12479 struct hbq_dmabuf
*dmabuf
)
12481 struct hbq_dmabuf
*seq_dmabuf
;
12482 struct fc_frame_header
*fc_hdr
;
12483 struct lpfc_vport
*vport
;
12486 /* Process each received buffer */
12487 fc_hdr
= (struct fc_frame_header
*)dmabuf
->hbuf
.virt
;
12488 /* check to see if this a valid type of frame */
12489 if (lpfc_fc_frame_check(phba
, fc_hdr
)) {
12490 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
12493 fcfi
= bf_get(lpfc_rcqe_fcf_id
, &dmabuf
->cq_event
.cqe
.rcqe_cmpl
);
12494 vport
= lpfc_fc_frame_to_vport(phba
, fc_hdr
, fcfi
);
12495 if (!vport
|| !(vport
->vpi_state
& LPFC_VPI_REGISTERED
)) {
12496 /* throw out the frame */
12497 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
12500 /* Handle the basic abort sequence (BA_ABTS) event */
12501 if (fc_hdr
->fh_r_ctl
== FC_RCTL_BA_ABTS
) {
12502 lpfc_sli4_handle_unsol_abort(vport
, dmabuf
);
12506 /* Link this frame */
12507 seq_dmabuf
= lpfc_fc_frame_add(vport
, dmabuf
);
12509 /* unable to add frame to vport - throw it out */
12510 lpfc_in_buf_free(phba
, &dmabuf
->dbuf
);
12513 /* If not last frame in sequence continue processing frames. */
12514 if (!lpfc_seq_complete(seq_dmabuf
))
12517 /* Send the complete sequence to the upper layer protocol */
12518 lpfc_sli4_send_seq_to_ulp(vport
, seq_dmabuf
);
12522 * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
12523 * @phba: pointer to lpfc hba data structure.
12525 * This routine is invoked to post rpi header templates to the
12526 * HBA consistent with the SLI-4 interface spec. This routine
12527 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
12528 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
12530 * This routine does not require any locks. It's usage is expected
12531 * to be driver load or reset recovery when the driver is
12536 * -EIO - The mailbox failed to complete successfully.
12537 * When this error occurs, the driver is not guaranteed
12538 * to have any rpi regions posted to the device and
12539 * must either attempt to repost the regions or take a
12543 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba
*phba
)
12545 struct lpfc_rpi_hdr
*rpi_page
;
12548 /* Post all rpi memory regions to the port. */
12549 list_for_each_entry(rpi_page
, &phba
->sli4_hba
.lpfc_rpi_hdr_list
, list
) {
12550 rc
= lpfc_sli4_post_rpi_hdr(phba
, rpi_page
);
12551 if (rc
!= MBX_SUCCESS
) {
12552 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12553 "2008 Error %d posting all rpi "
12564 * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
12565 * @phba: pointer to lpfc hba data structure.
12566 * @rpi_page: pointer to the rpi memory region.
12568 * This routine is invoked to post a single rpi header to the
12569 * HBA consistent with the SLI-4 interface spec. This memory region
12570 * maps up to 64 rpi context regions.
12574 * -ENOMEM - No available memory
12575 * -EIO - The mailbox failed to complete successfully.
12578 lpfc_sli4_post_rpi_hdr(struct lpfc_hba
*phba
, struct lpfc_rpi_hdr
*rpi_page
)
12580 LPFC_MBOXQ_t
*mboxq
;
12581 struct lpfc_mbx_post_hdr_tmpl
*hdr_tmpl
;
12584 uint32_t shdr_status
, shdr_add_status
;
12585 union lpfc_sli4_cfg_shdr
*shdr
;
12587 /* The port is notified of the header region via a mailbox command. */
12588 mboxq
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12590 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12591 "2001 Unable to allocate memory for issuing "
12592 "SLI_CONFIG_SPECIAL mailbox command\n");
12596 /* Post all rpi memory regions to the port. */
12597 hdr_tmpl
= &mboxq
->u
.mqe
.un
.hdr_tmpl
;
12598 mbox_tmo
= lpfc_mbox_tmo_val(phba
, MBX_SLI4_CONFIG
);
12599 lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
12600 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE
,
12601 sizeof(struct lpfc_mbx_post_hdr_tmpl
) -
12602 sizeof(struct lpfc_sli4_cfg_mhdr
),
12603 LPFC_SLI4_MBX_EMBED
);
12604 bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt
,
12605 hdr_tmpl
, rpi_page
->page_count
);
12606 bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset
, hdr_tmpl
,
12607 rpi_page
->start_rpi
);
12608 hdr_tmpl
->rpi_paddr_lo
= putPaddrLow(rpi_page
->dmabuf
->phys
);
12609 hdr_tmpl
->rpi_paddr_hi
= putPaddrHigh(rpi_page
->dmabuf
->phys
);
12610 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_POLL
);
12611 shdr
= (union lpfc_sli4_cfg_shdr
*) &hdr_tmpl
->header
.cfg_shdr
;
12612 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
12613 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
12614 if (rc
!= MBX_TIMEOUT
)
12615 mempool_free(mboxq
, phba
->mbox_mem_pool
);
12616 if (shdr_status
|| shdr_add_status
|| rc
) {
12617 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12618 "2514 POST_RPI_HDR mailbox failed with "
12619 "status x%x add_status x%x, mbx status x%x\n",
12620 shdr_status
, shdr_add_status
, rc
);
12627 * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
12628 * @phba: pointer to lpfc hba data structure.
12630 * This routine is invoked to post rpi header templates to the
12631 * HBA consistent with the SLI-4 interface spec. This routine
12632 * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
12633 * SLI4_PAGE_SIZE modulo 64 rpi context headers.
12636 * A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
12637 * LPFC_RPI_ALLOC_ERROR if no rpis are available.
12640 lpfc_sli4_alloc_rpi(struct lpfc_hba
*phba
)
12643 uint16_t max_rpi
, rpi_base
, rpi_limit
;
12644 uint16_t rpi_remaining
;
12645 struct lpfc_rpi_hdr
*rpi_hdr
;
12647 max_rpi
= phba
->sli4_hba
.max_cfg_param
.max_rpi
;
12648 rpi_base
= phba
->sli4_hba
.max_cfg_param
.rpi_base
;
12649 rpi_limit
= phba
->sli4_hba
.next_rpi
;
12652 * The valid rpi range is not guaranteed to be zero-based. Start
12653 * the search at the rpi_base as reported by the port.
12655 spin_lock_irq(&phba
->hbalock
);
12656 rpi
= find_next_zero_bit(phba
->sli4_hba
.rpi_bmask
, rpi_limit
, rpi_base
);
12657 if (rpi
>= rpi_limit
|| rpi
< rpi_base
)
12658 rpi
= LPFC_RPI_ALLOC_ERROR
;
12660 set_bit(rpi
, phba
->sli4_hba
.rpi_bmask
);
12661 phba
->sli4_hba
.max_cfg_param
.rpi_used
++;
12662 phba
->sli4_hba
.rpi_count
++;
12666 * Don't try to allocate more rpi header regions if the device limit
12667 * on available rpis max has been exhausted.
12669 if ((rpi
== LPFC_RPI_ALLOC_ERROR
) &&
12670 (phba
->sli4_hba
.rpi_count
>= max_rpi
)) {
12671 spin_unlock_irq(&phba
->hbalock
);
12676 * If the driver is running low on rpi resources, allocate another
12677 * page now. Note that the next_rpi value is used because
12678 * it represents how many are actually in use whereas max_rpi notes
12679 * how many are supported max by the device.
12681 rpi_remaining
= phba
->sli4_hba
.next_rpi
- rpi_base
-
12682 phba
->sli4_hba
.rpi_count
;
12683 spin_unlock_irq(&phba
->hbalock
);
12684 if (rpi_remaining
< LPFC_RPI_LOW_WATER_MARK
) {
12685 rpi_hdr
= lpfc_sli4_create_rpi_hdr(phba
);
12687 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12688 "2002 Error Could not grow rpi "
12691 lpfc_sli4_post_rpi_hdr(phba
, rpi_hdr
);
12699 * lpfc_sli4_free_rpi - Release an rpi for reuse.
12700 * @phba: pointer to lpfc hba data structure.
12702 * This routine is invoked to release an rpi to the pool of
12703 * available rpis maintained by the driver.
12706 __lpfc_sli4_free_rpi(struct lpfc_hba
*phba
, int rpi
)
12708 if (test_and_clear_bit(rpi
, phba
->sli4_hba
.rpi_bmask
)) {
12709 phba
->sli4_hba
.rpi_count
--;
12710 phba
->sli4_hba
.max_cfg_param
.rpi_used
--;
12715 * lpfc_sli4_free_rpi - Release an rpi for reuse.
12716 * @phba: pointer to lpfc hba data structure.
12718 * This routine is invoked to release an rpi to the pool of
12719 * available rpis maintained by the driver.
12722 lpfc_sli4_free_rpi(struct lpfc_hba
*phba
, int rpi
)
12724 spin_lock_irq(&phba
->hbalock
);
12725 __lpfc_sli4_free_rpi(phba
, rpi
);
12726 spin_unlock_irq(&phba
->hbalock
);
12730 * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
12731 * @phba: pointer to lpfc hba data structure.
12733 * This routine is invoked to remove the memory region that
12734 * provided rpi via a bitmask.
12737 lpfc_sli4_remove_rpis(struct lpfc_hba
*phba
)
12739 kfree(phba
->sli4_hba
.rpi_bmask
);
12743 * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
12744 * @phba: pointer to lpfc hba data structure.
12746 * This routine is invoked to remove the memory region that
12747 * provided rpi via a bitmask.
12750 lpfc_sli4_resume_rpi(struct lpfc_nodelist
*ndlp
)
12752 LPFC_MBOXQ_t
*mboxq
;
12753 struct lpfc_hba
*phba
= ndlp
->phba
;
12756 /* The port is notified of the header region via a mailbox command. */
12757 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12761 /* Post all rpi memory regions to the port. */
12762 lpfc_resume_rpi(mboxq
, ndlp
);
12763 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
12764 if (rc
== MBX_NOT_FINISHED
) {
12765 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
12766 "2010 Resume RPI Mailbox failed "
12767 "status %d, mbxStatus x%x\n", rc
,
12768 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
));
12769 mempool_free(mboxq
, phba
->mbox_mem_pool
);
12776 * lpfc_sli4_init_vpi - Initialize a vpi with the port
12777 * @vport: Pointer to the vport for which the vpi is being initialized
12779 * This routine is invoked to activate a vpi with the port.
12783 * -Evalue otherwise
12786 lpfc_sli4_init_vpi(struct lpfc_vport
*vport
)
12788 LPFC_MBOXQ_t
*mboxq
;
12790 int retval
= MBX_SUCCESS
;
12792 struct lpfc_hba
*phba
= vport
->phba
;
12793 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12796 lpfc_init_vpi(phba
, mboxq
, vport
->vpi
);
12797 mbox_tmo
= lpfc_mbox_tmo_val(phba
, MBX_INIT_VPI
);
12798 rc
= lpfc_sli_issue_mbox_wait(phba
, mboxq
, mbox_tmo
);
12799 if (rc
!= MBX_SUCCESS
) {
12800 lpfc_printf_vlog(vport
, KERN_ERR
, LOG_SLI
,
12801 "2022 INIT VPI Mailbox failed "
12802 "status %d, mbxStatus x%x\n", rc
,
12803 bf_get(lpfc_mqe_status
, &mboxq
->u
.mqe
));
12806 if (rc
!= MBX_TIMEOUT
)
12807 mempool_free(mboxq
, vport
->phba
->mbox_mem_pool
);
12813 * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
12814 * @phba: pointer to lpfc hba data structure.
12815 * @mboxq: Pointer to mailbox object.
12817 * This routine is invoked to manually add a single FCF record. The caller
12818 * must pass a completely initialized FCF_Record. This routine takes
12819 * care of the nonembedded mailbox operations.
12822 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mboxq
)
12825 union lpfc_sli4_cfg_shdr
*shdr
;
12826 uint32_t shdr_status
, shdr_add_status
;
12828 virt_addr
= mboxq
->sge_array
->addr
[0];
12829 /* The IOCTL status is embedded in the mailbox subheader. */
12830 shdr
= (union lpfc_sli4_cfg_shdr
*) virt_addr
;
12831 shdr_status
= bf_get(lpfc_mbox_hdr_status
, &shdr
->response
);
12832 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
, &shdr
->response
);
12834 if ((shdr_status
|| shdr_add_status
) &&
12835 (shdr_status
!= STATUS_FCF_IN_USE
))
12836 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12837 "2558 ADD_FCF_RECORD mailbox failed with "
12838 "status x%x add_status x%x\n",
12839 shdr_status
, shdr_add_status
);
12841 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
12845 * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
12846 * @phba: pointer to lpfc hba data structure.
12847 * @fcf_record: pointer to the initialized fcf record to add.
12849 * This routine is invoked to manually add a single FCF record. The caller
12850 * must pass a completely initialized FCF_Record. This routine takes
12851 * care of the nonembedded mailbox operations.
12854 lpfc_sli4_add_fcf_record(struct lpfc_hba
*phba
, struct fcf_record
*fcf_record
)
12857 LPFC_MBOXQ_t
*mboxq
;
12860 dma_addr_t phys_addr
;
12861 struct lpfc_mbx_sge sge
;
12862 uint32_t alloc_len
, req_len
;
12865 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12867 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12868 "2009 Failed to allocate mbox for ADD_FCF cmd\n");
12872 req_len
= sizeof(struct fcf_record
) + sizeof(union lpfc_sli4_cfg_shdr
) +
12875 /* Allocate DMA memory and set up the non-embedded mailbox command */
12876 alloc_len
= lpfc_sli4_config(phba
, mboxq
, LPFC_MBOX_SUBSYSTEM_FCOE
,
12877 LPFC_MBOX_OPCODE_FCOE_ADD_FCF
,
12878 req_len
, LPFC_SLI4_MBX_NEMBED
);
12879 if (alloc_len
< req_len
) {
12880 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12881 "2523 Allocated DMA memory size (x%x) is "
12882 "less than the requested DMA memory "
12883 "size (x%x)\n", alloc_len
, req_len
);
12884 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
12889 * Get the first SGE entry from the non-embedded DMA memory. This
12890 * routine only uses a single SGE.
12892 lpfc_sli4_mbx_sge_get(mboxq
, 0, &sge
);
12893 phys_addr
= getPaddr(sge
.pa_hi
, sge
.pa_lo
);
12894 virt_addr
= mboxq
->sge_array
->addr
[0];
12896 * Configure the FCF record for FCFI 0. This is the driver's
12897 * hardcoded default and gets used in nonFIP mode.
12899 fcfindex
= bf_get(lpfc_fcf_record_fcf_index
, fcf_record
);
12900 bytep
= virt_addr
+ sizeof(union lpfc_sli4_cfg_shdr
);
12901 lpfc_sli_pcimem_bcopy(&fcfindex
, bytep
, sizeof(uint32_t));
12904 * Copy the fcf_index and the FCF Record Data. The data starts after
12905 * the FCoE header plus word10. The data copy needs to be endian
12908 bytep
+= sizeof(uint32_t);
12909 lpfc_sli_pcimem_bcopy(fcf_record
, bytep
, sizeof(struct fcf_record
));
12910 mboxq
->vport
= phba
->pport
;
12911 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_add_fcf_record
;
12912 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
12913 if (rc
== MBX_NOT_FINISHED
) {
12914 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12915 "2515 ADD_FCF_RECORD mailbox failed with "
12916 "status 0x%x\n", rc
);
12917 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
12926 * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
12927 * @phba: pointer to lpfc hba data structure.
12928 * @fcf_record: pointer to the fcf record to write the default data.
12929 * @fcf_index: FCF table entry index.
12931 * This routine is invoked to build the driver's default FCF record. The
12932 * values used are hardcoded. This routine handles memory initialization.
12936 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba
*phba
,
12937 struct fcf_record
*fcf_record
,
12938 uint16_t fcf_index
)
12940 memset(fcf_record
, 0, sizeof(struct fcf_record
));
12941 fcf_record
->max_rcv_size
= LPFC_FCOE_MAX_RCV_SIZE
;
12942 fcf_record
->fka_adv_period
= LPFC_FCOE_FKA_ADV_PER
;
12943 fcf_record
->fip_priority
= LPFC_FCOE_FIP_PRIORITY
;
12944 bf_set(lpfc_fcf_record_mac_0
, fcf_record
, phba
->fc_map
[0]);
12945 bf_set(lpfc_fcf_record_mac_1
, fcf_record
, phba
->fc_map
[1]);
12946 bf_set(lpfc_fcf_record_mac_2
, fcf_record
, phba
->fc_map
[2]);
12947 bf_set(lpfc_fcf_record_mac_3
, fcf_record
, LPFC_FCOE_FCF_MAC3
);
12948 bf_set(lpfc_fcf_record_mac_4
, fcf_record
, LPFC_FCOE_FCF_MAC4
);
12949 bf_set(lpfc_fcf_record_mac_5
, fcf_record
, LPFC_FCOE_FCF_MAC5
);
12950 bf_set(lpfc_fcf_record_fc_map_0
, fcf_record
, phba
->fc_map
[0]);
12951 bf_set(lpfc_fcf_record_fc_map_1
, fcf_record
, phba
->fc_map
[1]);
12952 bf_set(lpfc_fcf_record_fc_map_2
, fcf_record
, phba
->fc_map
[2]);
12953 bf_set(lpfc_fcf_record_fcf_valid
, fcf_record
, 1);
12954 bf_set(lpfc_fcf_record_fcf_avail
, fcf_record
, 1);
12955 bf_set(lpfc_fcf_record_fcf_index
, fcf_record
, fcf_index
);
12956 bf_set(lpfc_fcf_record_mac_addr_prov
, fcf_record
,
12957 LPFC_FCF_FPMA
| LPFC_FCF_SPMA
);
12958 /* Set the VLAN bit map */
12959 if (phba
->valid_vlan
) {
12960 fcf_record
->vlan_bitmap
[phba
->vlan_id
/ 8]
12961 = 1 << (phba
->vlan_id
% 8);
12966 * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
12967 * @phba: pointer to lpfc hba data structure.
12968 * @fcf_index: FCF table entry offset.
12970 * This routine is invoked to scan the entire FCF table by reading FCF
12971 * record and processing it one at a time starting from the @fcf_index
12972 * for initial FCF discovery or fast FCF failover rediscovery.
12974 * Return 0 if the mailbox command is submitted sucessfully, none 0
12978 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
12981 LPFC_MBOXQ_t
*mboxq
;
12983 phba
->fcoe_eventtag_at_fcf_scan
= phba
->fcoe_eventtag
;
12984 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
12986 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
12987 "2000 Failed to allocate mbox for "
12990 goto fail_fcf_scan
;
12992 /* Construct the read FCF record mailbox command */
12993 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
12996 goto fail_fcf_scan
;
12998 /* Issue the mailbox command asynchronously */
12999 mboxq
->vport
= phba
->pport
;
13000 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_fcf_scan_read_fcf_rec
;
13002 spin_lock_irq(&phba
->hbalock
);
13003 phba
->hba_flag
|= FCF_TS_INPROG
;
13004 spin_unlock_irq(&phba
->hbalock
);
13006 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
13007 if (rc
== MBX_NOT_FINISHED
)
13010 /* Reset eligible FCF count for new scan */
13011 if (fcf_index
== LPFC_FCOE_FCF_GET_FIRST
)
13012 phba
->fcf
.eligible_fcf_cnt
= 0;
13018 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
13019 /* FCF scan failed, clear FCF_TS_INPROG flag */
13020 spin_lock_irq(&phba
->hbalock
);
13021 phba
->hba_flag
&= ~FCF_TS_INPROG
;
13022 spin_unlock_irq(&phba
->hbalock
);
13028 * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
13029 * @phba: pointer to lpfc hba data structure.
13030 * @fcf_index: FCF table entry offset.
13032 * This routine is invoked to read an FCF record indicated by @fcf_index
13033 * and to use it for FLOGI roundrobin FCF failover.
13035 * Return 0 if the mailbox command is submitted sucessfully, none 0
13039 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
13042 LPFC_MBOXQ_t
*mboxq
;
13044 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
13046 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_INIT
,
13047 "2763 Failed to allocate mbox for "
13050 goto fail_fcf_read
;
13052 /* Construct the read FCF record mailbox command */
13053 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
13056 goto fail_fcf_read
;
13058 /* Issue the mailbox command asynchronously */
13059 mboxq
->vport
= phba
->pport
;
13060 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_fcf_rr_read_fcf_rec
;
13061 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
13062 if (rc
== MBX_NOT_FINISHED
)
13068 if (error
&& mboxq
)
13069 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
13074 * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
13075 * @phba: pointer to lpfc hba data structure.
13076 * @fcf_index: FCF table entry offset.
13078 * This routine is invoked to read an FCF record indicated by @fcf_index to
13079 * determine whether it's eligible for FLOGI roundrobin failover list.
13081 * Return 0 if the mailbox command is submitted sucessfully, none 0
13085 lpfc_sli4_read_fcf_rec(struct lpfc_hba
*phba
, uint16_t fcf_index
)
13088 LPFC_MBOXQ_t
*mboxq
;
13090 mboxq
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
13092 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
| LOG_INIT
,
13093 "2758 Failed to allocate mbox for "
13096 goto fail_fcf_read
;
13098 /* Construct the read FCF record mailbox command */
13099 rc
= lpfc_sli4_mbx_read_fcf_rec(phba
, mboxq
, fcf_index
);
13102 goto fail_fcf_read
;
13104 /* Issue the mailbox command asynchronously */
13105 mboxq
->vport
= phba
->pport
;
13106 mboxq
->mbox_cmpl
= lpfc_mbx_cmpl_read_fcf_rec
;
13107 rc
= lpfc_sli_issue_mbox(phba
, mboxq
, MBX_NOWAIT
);
13108 if (rc
== MBX_NOT_FINISHED
)
13114 if (error
&& mboxq
)
13115 lpfc_sli4_mbox_cmd_free(phba
, mboxq
);
13120 * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
13121 * @phba: pointer to lpfc hba data structure.
13123 * This routine is to get the next eligible FCF record index in a round
13124 * robin fashion. If the next eligible FCF record index equals to the
13125 * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
13126 * shall be returned, otherwise, the next eligible FCF record's index
13127 * shall be returned.
13130 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba
*phba
)
13132 uint16_t next_fcf_index
;
13134 /* Search start from next bit of currently registered FCF index */
13135 next_fcf_index
= (phba
->fcf
.current_rec
.fcf_indx
+ 1) %
13136 LPFC_SLI4_FCF_TBL_INDX_MAX
;
13137 next_fcf_index
= find_next_bit(phba
->fcf
.fcf_rr_bmask
,
13138 LPFC_SLI4_FCF_TBL_INDX_MAX
,
13141 /* Wrap around condition on phba->fcf.fcf_rr_bmask */
13142 if (next_fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
)
13143 next_fcf_index
= find_next_bit(phba
->fcf
.fcf_rr_bmask
,
13144 LPFC_SLI4_FCF_TBL_INDX_MAX
, 0);
13146 /* Check roundrobin failover list empty condition */
13147 if (next_fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
13148 lpfc_printf_log(phba
, KERN_WARNING
, LOG_FIP
,
13149 "2844 No roundrobin failover FCF available\n");
13150 return LPFC_FCOE_FCF_NEXT_NONE
;
13153 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
13154 "2845 Get next roundrobin failover FCF (x%x)\n",
13157 return next_fcf_index
;
13161 * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
13162 * @phba: pointer to lpfc hba data structure.
13164 * This routine sets the FCF record index in to the eligible bmask for
13165 * roundrobin failover search. It checks to make sure that the index
13166 * does not go beyond the range of the driver allocated bmask dimension
13167 * before setting the bit.
13169 * Returns 0 if the index bit successfully set, otherwise, it returns
13173 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba
*phba
, uint16_t fcf_index
)
13175 if (fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
13176 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
13177 "2610 FCF (x%x) reached driver's book "
13178 "keeping dimension:x%x\n",
13179 fcf_index
, LPFC_SLI4_FCF_TBL_INDX_MAX
);
13182 /* Set the eligible FCF record index bmask */
13183 set_bit(fcf_index
, phba
->fcf
.fcf_rr_bmask
);
13185 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
13186 "2790 Set FCF (x%x) to roundrobin FCF failover "
13187 "bmask\n", fcf_index
);
13193 * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
13194 * @phba: pointer to lpfc hba data structure.
13196 * This routine clears the FCF record index from the eligible bmask for
13197 * roundrobin failover search. It checks to make sure that the index
13198 * does not go beyond the range of the driver allocated bmask dimension
13199 * before clearing the bit.
13202 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba
*phba
, uint16_t fcf_index
)
13204 if (fcf_index
>= LPFC_SLI4_FCF_TBL_INDX_MAX
) {
13205 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
13206 "2762 FCF (x%x) reached driver's book "
13207 "keeping dimension:x%x\n",
13208 fcf_index
, LPFC_SLI4_FCF_TBL_INDX_MAX
);
13211 /* Clear the eligible FCF record index bmask */
13212 clear_bit(fcf_index
, phba
->fcf
.fcf_rr_bmask
);
13214 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
13215 "2791 Clear FCF (x%x) from roundrobin failover "
13216 "bmask\n", fcf_index
);
13220 * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
13221 * @phba: pointer to lpfc hba data structure.
13223 * This routine is the completion routine for the rediscover FCF table mailbox
13224 * command. If the mailbox command returned failure, it will try to stop the
13225 * FCF rediscover wait timer.
13228 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba
*phba
, LPFC_MBOXQ_t
*mbox
)
13230 struct lpfc_mbx_redisc_fcf_tbl
*redisc_fcf
;
13231 uint32_t shdr_status
, shdr_add_status
;
13233 redisc_fcf
= &mbox
->u
.mqe
.un
.redisc_fcf_tbl
;
13235 shdr_status
= bf_get(lpfc_mbox_hdr_status
,
13236 &redisc_fcf
->header
.cfg_shdr
.response
);
13237 shdr_add_status
= bf_get(lpfc_mbox_hdr_add_status
,
13238 &redisc_fcf
->header
.cfg_shdr
.response
);
13239 if (shdr_status
|| shdr_add_status
) {
13240 lpfc_printf_log(phba
, KERN_ERR
, LOG_FIP
,
13241 "2746 Requesting for FCF rediscovery failed "
13242 "status x%x add_status x%x\n",
13243 shdr_status
, shdr_add_status
);
13244 if (phba
->fcf
.fcf_flag
& FCF_ACVL_DISC
) {
13245 spin_lock_irq(&phba
->hbalock
);
13246 phba
->fcf
.fcf_flag
&= ~FCF_ACVL_DISC
;
13247 spin_unlock_irq(&phba
->hbalock
);
13249 * CVL event triggered FCF rediscover request failed,
13250 * last resort to re-try current registered FCF entry.
13252 lpfc_retry_pport_discovery(phba
);
13254 spin_lock_irq(&phba
->hbalock
);
13255 phba
->fcf
.fcf_flag
&= ~FCF_DEAD_DISC
;
13256 spin_unlock_irq(&phba
->hbalock
);
13258 * DEAD FCF event triggered FCF rediscover request
13259 * failed, last resort to fail over as a link down
13260 * to FCF registration.
13262 lpfc_sli4_fcf_dead_failthrough(phba
);
13265 lpfc_printf_log(phba
, KERN_INFO
, LOG_FIP
,
13266 "2775 Start FCF rediscover quiescent timer\n");
13268 * Start FCF rediscovery wait timer for pending FCF
13269 * before rescan FCF record table.
13271 lpfc_fcf_redisc_wait_start_timer(phba
);
13274 mempool_free(mbox
, phba
->mbox_mem_pool
);
13278 * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
13279 * @phba: pointer to lpfc hba data structure.
13281 * This routine is invoked to request for rediscovery of the entire FCF table
13285 lpfc_sli4_redisc_fcf_table(struct lpfc_hba
*phba
)
13287 LPFC_MBOXQ_t
*mbox
;
13288 struct lpfc_mbx_redisc_fcf_tbl
*redisc_fcf
;
13291 /* Cancel retry delay timers to all vports before FCF rediscover */
13292 lpfc_cancel_all_vport_retry_delay_timer(phba
);
13294 mbox
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
13296 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13297 "2745 Failed to allocate mbox for "
13298 "requesting FCF rediscover.\n");
13302 length
= (sizeof(struct lpfc_mbx_redisc_fcf_tbl
) -
13303 sizeof(struct lpfc_sli4_cfg_mhdr
));
13304 lpfc_sli4_config(phba
, mbox
, LPFC_MBOX_SUBSYSTEM_FCOE
,
13305 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF
,
13306 length
, LPFC_SLI4_MBX_EMBED
);
13308 redisc_fcf
= &mbox
->u
.mqe
.un
.redisc_fcf_tbl
;
13309 /* Set count to 0 for invalidating the entire FCF database */
13310 bf_set(lpfc_mbx_redisc_fcf_count
, redisc_fcf
, 0);
13312 /* Issue the mailbox command asynchronously */
13313 mbox
->vport
= phba
->pport
;
13314 mbox
->mbox_cmpl
= lpfc_mbx_cmpl_redisc_fcf_table
;
13315 rc
= lpfc_sli_issue_mbox(phba
, mbox
, MBX_NOWAIT
);
13317 if (rc
== MBX_NOT_FINISHED
) {
13318 mempool_free(mbox
, phba
->mbox_mem_pool
);
13325 * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
13326 * @phba: pointer to lpfc hba data structure.
13328 * This function is the failover routine as a last resort to the FCF DEAD
13329 * event when driver failed to perform fast FCF failover.
13332 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba
*phba
)
13334 uint32_t link_state
;
13337 * Last resort as FCF DEAD event failover will treat this as
13338 * a link down, but save the link state because we don't want
13339 * it to be changed to Link Down unless it is already down.
13341 link_state
= phba
->link_state
;
13342 lpfc_linkdown(phba
);
13343 phba
->link_state
= link_state
;
13345 /* Unregister FCF if no devices connected to it */
13346 lpfc_unregister_unused_fcf(phba
);
13350 * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
13351 * @phba: pointer to lpfc hba data structure.
13353 * This function read region 23 and parse TLV for port status to
13354 * decide if the user disaled the port. If the TLV indicates the
13355 * port is disabled, the hba_flag is set accordingly.
13358 lpfc_sli_read_link_ste(struct lpfc_hba
*phba
)
13360 LPFC_MBOXQ_t
*pmb
= NULL
;
13362 uint8_t *rgn23_data
= NULL
;
13363 uint32_t offset
= 0, data_size
, sub_tlv_len
, tlv_offset
;
13366 pmb
= mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
13368 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13369 "2600 lpfc_sli_read_serdes_param failed to"
13370 " allocate mailbox memory\n");
13375 /* Get adapter Region 23 data */
13376 rgn23_data
= kzalloc(DMP_RGN23_SIZE
, GFP_KERNEL
);
13381 lpfc_dump_mem(phba
, pmb
, offset
, DMP_REGION_23
);
13382 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
13384 if (rc
!= MBX_SUCCESS
) {
13385 lpfc_printf_log(phba
, KERN_INFO
, LOG_INIT
,
13386 "2601 lpfc_sli_read_link_ste failed to"
13387 " read config region 23 rc 0x%x Status 0x%x\n",
13388 rc
, mb
->mbxStatus
);
13389 mb
->un
.varDmp
.word_cnt
= 0;
13392 * dump mem may return a zero when finished or we got a
13393 * mailbox error, either way we are done.
13395 if (mb
->un
.varDmp
.word_cnt
== 0)
13397 if (mb
->un
.varDmp
.word_cnt
> DMP_RGN23_SIZE
- offset
)
13398 mb
->un
.varDmp
.word_cnt
= DMP_RGN23_SIZE
- offset
;
13400 lpfc_sli_pcimem_bcopy(((uint8_t *)mb
) + DMP_RSP_OFFSET
,
13401 rgn23_data
+ offset
,
13402 mb
->un
.varDmp
.word_cnt
);
13403 offset
+= mb
->un
.varDmp
.word_cnt
;
13404 } while (mb
->un
.varDmp
.word_cnt
&& offset
< DMP_RGN23_SIZE
);
13406 data_size
= offset
;
13412 /* Check the region signature first */
13413 if (memcmp(&rgn23_data
[offset
], LPFC_REGION23_SIGNATURE
, 4)) {
13414 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13415 "2619 Config region 23 has bad signature\n");
13420 /* Check the data structure version */
13421 if (rgn23_data
[offset
] != LPFC_REGION23_VERSION
) {
13422 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
13423 "2620 Config region 23 has bad version\n");
13428 /* Parse TLV entries in the region */
13429 while (offset
< data_size
) {
13430 if (rgn23_data
[offset
] == LPFC_REGION23_LAST_REC
)
13433 * If the TLV is not driver specific TLV or driver id is
13434 * not linux driver id, skip the record.
13436 if ((rgn23_data
[offset
] != DRIVER_SPECIFIC_TYPE
) ||
13437 (rgn23_data
[offset
+ 2] != LINUX_DRIVER_ID
) ||
13438 (rgn23_data
[offset
+ 3] != 0)) {
13439 offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
13443 /* Driver found a driver specific TLV in the config region */
13444 sub_tlv_len
= rgn23_data
[offset
+ 1] * 4;
13449 * Search for configured port state sub-TLV.
13451 while ((offset
< data_size
) &&
13452 (tlv_offset
< sub_tlv_len
)) {
13453 if (rgn23_data
[offset
] == LPFC_REGION23_LAST_REC
) {
13458 if (rgn23_data
[offset
] != PORT_STE_TYPE
) {
13459 offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
13460 tlv_offset
+= rgn23_data
[offset
+ 1] * 4 + 4;
13464 /* This HBA contains PORT_STE configured */
13465 if (!rgn23_data
[offset
+ 2])
13466 phba
->hba_flag
|= LINK_DISABLED
;
13473 mempool_free(pmb
, phba
->mbox_mem_pool
);
13479 * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
13480 * @vport: pointer to vport data structure.
13482 * This function iterate through the mailboxq and clean up all REG_LOGIN
13483 * and REG_VPI mailbox commands associated with the vport. This function
13484 * is called when driver want to restart discovery of the vport due to
13485 * a Clear Virtual Link event.
13488 lpfc_cleanup_pending_mbox(struct lpfc_vport
*vport
)
13490 struct lpfc_hba
*phba
= vport
->phba
;
13491 LPFC_MBOXQ_t
*mb
, *nextmb
;
13492 struct lpfc_dmabuf
*mp
;
13493 struct lpfc_nodelist
*ndlp
;
13494 struct lpfc_nodelist
*act_mbx_ndlp
= NULL
;
13495 struct Scsi_Host
*shost
= lpfc_shost_from_vport(vport
);
13496 LIST_HEAD(mbox_cmd_list
);
13497 uint8_t restart_loop
;
13499 /* Clean up internally queued mailbox commands with the vport */
13500 spin_lock_irq(&phba
->hbalock
);
13501 list_for_each_entry_safe(mb
, nextmb
, &phba
->sli
.mboxq
, list
) {
13502 if (mb
->vport
!= vport
)
13505 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) &&
13506 (mb
->u
.mb
.mbxCommand
!= MBX_REG_VPI
))
13509 list_del(&mb
->list
);
13510 list_add_tail(&mb
->list
, &mbox_cmd_list
);
13512 /* Clean up active mailbox command with the vport */
13513 mb
= phba
->sli
.mbox_active
;
13514 if (mb
&& (mb
->vport
== vport
)) {
13515 if ((mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) ||
13516 (mb
->u
.mb
.mbxCommand
== MBX_REG_VPI
))
13517 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
13518 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
13519 act_mbx_ndlp
= (struct lpfc_nodelist
*)mb
->context2
;
13520 /* Put reference count for delayed processing */
13521 act_mbx_ndlp
= lpfc_nlp_get(act_mbx_ndlp
);
13522 /* Unregister the RPI when mailbox complete */
13523 mb
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
13526 /* Cleanup any mailbox completions which are not yet processed */
13529 list_for_each_entry(mb
, &phba
->sli
.mboxq_cmpl
, list
) {
13531 * If this mailox is already processed or it is
13532 * for another vport ignore it.
13534 if ((mb
->vport
!= vport
) ||
13535 (mb
->mbox_flag
& LPFC_MBX_IMED_UNREG
))
13538 if ((mb
->u
.mb
.mbxCommand
!= MBX_REG_LOGIN64
) &&
13539 (mb
->u
.mb
.mbxCommand
!= MBX_REG_VPI
))
13542 mb
->mbox_cmpl
= lpfc_sli_def_mbox_cmpl
;
13543 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
13544 ndlp
= (struct lpfc_nodelist
*)mb
->context2
;
13545 /* Unregister the RPI when mailbox complete */
13546 mb
->mbox_flag
|= LPFC_MBX_IMED_UNREG
;
13548 spin_unlock_irq(&phba
->hbalock
);
13549 spin_lock(shost
->host_lock
);
13550 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
13551 spin_unlock(shost
->host_lock
);
13552 spin_lock_irq(&phba
->hbalock
);
13556 } while (restart_loop
);
13558 spin_unlock_irq(&phba
->hbalock
);
13560 /* Release the cleaned-up mailbox commands */
13561 while (!list_empty(&mbox_cmd_list
)) {
13562 list_remove_head(&mbox_cmd_list
, mb
, LPFC_MBOXQ_t
, list
);
13563 if (mb
->u
.mb
.mbxCommand
== MBX_REG_LOGIN64
) {
13564 mp
= (struct lpfc_dmabuf
*) (mb
->context1
);
13566 __lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
13569 ndlp
= (struct lpfc_nodelist
*) mb
->context2
;
13570 mb
->context2
= NULL
;
13572 spin_lock(shost
->host_lock
);
13573 ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
13574 spin_unlock(shost
->host_lock
);
13575 lpfc_nlp_put(ndlp
);
13578 mempool_free(mb
, phba
->mbox_mem_pool
);
13581 /* Release the ndlp with the cleaned-up active mailbox command */
13582 if (act_mbx_ndlp
) {
13583 spin_lock(shost
->host_lock
);
13584 act_mbx_ndlp
->nlp_flag
&= ~NLP_IGNR_REG_CMPL
;
13585 spin_unlock(shost
->host_lock
);
13586 lpfc_nlp_put(act_mbx_ndlp
);
13591 * lpfc_drain_txq - Drain the txq
13592 * @phba: Pointer to HBA context object.
13594 * This function attempt to submit IOCBs on the txq
13595 * to the adapter. For SLI4 adapters, the txq contains
13596 * ELS IOCBs that have been deferred because the there
13597 * are no SGLs. This congestion can occur with large
13598 * vport counts during node discovery.
13602 lpfc_drain_txq(struct lpfc_hba
*phba
)
13604 LIST_HEAD(completions
);
13605 struct lpfc_sli_ring
*pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
13606 struct lpfc_iocbq
*piocbq
= 0;
13607 unsigned long iflags
= 0;
13608 char *fail_msg
= NULL
;
13609 struct lpfc_sglq
*sglq
;
13610 union lpfc_wqe wqe
;
13612 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13613 if (pring
->txq_cnt
> pring
->txq_max
)
13614 pring
->txq_max
= pring
->txq_cnt
;
13616 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13618 while (pring
->txq_cnt
) {
13619 spin_lock_irqsave(&phba
->hbalock
, iflags
);
13621 piocbq
= lpfc_sli_ringtx_get(phba
, pring
);
13622 sglq
= __lpfc_sli_get_sglq(phba
, piocbq
);
13624 __lpfc_sli_ringtx_put(phba
, pring
, piocbq
);
13625 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13629 /* The txq_cnt out of sync. This should
13632 sglq
= __lpfc_clear_active_sglq(phba
,
13633 sglq
->sli4_xritag
);
13634 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13635 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13636 "2823 txq empty and txq_cnt is %d\n ",
13642 /* The xri and iocb resources secured,
13643 * attempt to issue request
13645 piocbq
->sli4_xritag
= sglq
->sli4_xritag
;
13646 if (NO_XRI
== lpfc_sli4_bpl2sgl(phba
, piocbq
, sglq
))
13647 fail_msg
= "to convert bpl to sgl";
13648 else if (lpfc_sli4_iocb2wqe(phba
, piocbq
, &wqe
))
13649 fail_msg
= "to convert iocb to wqe";
13650 else if (lpfc_sli4_wq_put(phba
->sli4_hba
.els_wq
, &wqe
))
13651 fail_msg
= " - Wq is full";
13653 lpfc_sli_ringtxcmpl_put(phba
, pring
, piocbq
);
13656 /* Failed means we can't issue and need to cancel */
13657 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
13658 "2822 IOCB failed %s iotag 0x%x "
13661 piocbq
->iotag
, piocbq
->sli4_xritag
);
13662 list_add_tail(&piocbq
->list
, &completions
);
13664 spin_unlock_irqrestore(&phba
->hbalock
, iflags
);
13667 /* Cancel all the IOCBs that cannot be issued */
13668 lpfc_sli_cancel_iocbs(phba
, &completions
, IOSTAT_LOCAL_REJECT
,
13669 IOERR_SLI_ABORTED
);
13671 return pring
->txq_cnt
;