1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2009-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
10 * This program is free software; you can redistribute it and/or *
11 * modify it under the terms of version 2 of the GNU General *
12 * Public License as published by the Free Software Foundation. *
13 * This program is distributed in the hope that it will be useful. *
14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
18 * TO BE LEGALLY INVALID. See the GNU General Public License for *
19 * more details, a copy of which can be found in the file COPYING *
20 * included with this package. *
21 *******************************************************************/
23 #define LPFC_ACTIVE_MBOX_WAIT_CNT 100
24 #define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000
25 #define LPFC_XRI_EXCH_BUSY_WAIT_T1 10
26 #define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000
27 #define LPFC_RPI_LOW_WATER_MARK 10
29 #define LPFC_UNREG_FCF 1
30 #define LPFC_SKIP_UNREG_FCF 0
32 /* Amount of time in seconds for waiting FCF rediscovery to complete */
33 #define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */
35 /* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
36 #define LPFC_NEMBED_MBOX_SGL_CNT 254
38 /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
39 #define LPFC_HBA_IO_CHAN_MIN 0
40 #define LPFC_HBA_IO_CHAN_MAX 32
41 #define LPFC_FCP_IO_CHAN_DEF 4
42 #define LPFC_NVME_IO_CHAN_DEF 0
44 /* Number of channels used for Flash Optimized Fabric (FOF) operations */
46 #define LPFC_FOF_IO_CHAN_NUM 1
49 * Provide the default FCF Record attributes used by the driver
50 * when nonFIP mode is configured and there is no other default
51 * FCF Record attributes.
53 #define LPFC_FCOE_FCF_DEF_INDEX 0
54 #define LPFC_FCOE_FCF_GET_FIRST 0xFFFF
55 #define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF
57 #define LPFC_FCOE_NULL_VID 0xFFF
58 #define LPFC_FCOE_IGNORE_VID 0xFFFF
60 /* First 3 bytes of default FCF MAC is specified by FC_MAP */
61 #define LPFC_FCOE_FCF_MAC3 0xFF
62 #define LPFC_FCOE_FCF_MAC4 0xFF
63 #define LPFC_FCOE_FCF_MAC5 0xFE
64 #define LPFC_FCOE_FCF_MAP0 0x0E
65 #define LPFC_FCOE_FCF_MAP1 0xFC
66 #define LPFC_FCOE_FCF_MAP2 0x00
67 #define LPFC_FCOE_MAX_RCV_SIZE 0x800
68 #define LPFC_FCOE_FKA_ADV_PER 0
69 #define LPFC_FCOE_FIP_PRIORITY 0x80
71 #define sli4_sid_from_fc_hdr(fc_hdr) \
72 ((fc_hdr)->fh_s_id[0] << 16 | \
73 (fc_hdr)->fh_s_id[1] << 8 | \
76 #define sli4_did_from_fc_hdr(fc_hdr) \
77 ((fc_hdr)->fh_d_id[0] << 16 | \
78 (fc_hdr)->fh_d_id[1] << 8 | \
81 #define sli4_fctl_from_fc_hdr(fc_hdr) \
82 ((fc_hdr)->fh_f_ctl[0] << 16 | \
83 (fc_hdr)->fh_f_ctl[1] << 8 | \
84 (fc_hdr)->fh_f_ctl[2])
86 #define sli4_type_from_fc_hdr(fc_hdr) \
89 #define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
91 #define INT_FW_UPGRADE 0
92 #define RUN_FW_UPGRADE 1
94 enum lpfc_sli4_queue_type
{
106 /* The queue sub-type defines the functional purpose of the queue */
107 enum lpfc_sli4_queue_subtype
{
120 struct lpfc_eqe
*eqe
;
121 struct lpfc_cqe
*cqe
;
122 struct lpfc_mcqe
*mcqe
;
123 struct lpfc_wcqe_complete
*wcqe_complete
;
124 struct lpfc_wcqe_release
*wcqe_release
;
125 struct sli4_wcqe_xri_aborted
*wcqe_xri_aborted
;
126 struct lpfc_rcqe_complete
*rcqe_complete
;
127 struct lpfc_mqe
*mqe
;
129 union lpfc_wqe128
*wqe128
;
130 struct lpfc_rqe
*rqe
;
135 uint16_t entry_count
; /* Current number of RQ slots */
136 uint16_t buffer_count
; /* Current number of buffers posted */
137 struct list_head rqb_buffer_list
; /* buffers assigned to this HBQ */
138 /* Callback for HBQ buffer allocation */
139 struct rqb_dmabuf
*(*rqb_alloc_buffer
)(struct lpfc_hba
*);
140 /* Callback for HBQ buffer free */
141 void (*rqb_free_buffer
)(struct lpfc_hba
*,
142 struct rqb_dmabuf
*);
146 struct list_head list
;
147 struct list_head wq_list
;
148 struct list_head wqfull_list
;
149 enum lpfc_sli4_queue_type type
;
150 enum lpfc_sli4_queue_subtype subtype
;
151 struct lpfc_hba
*phba
;
152 struct list_head child_list
;
153 struct list_head page_list
;
154 struct list_head sgl_list
;
155 uint32_t entry_count
; /* Number of entries to support on the queue */
156 uint32_t entry_size
; /* Size of each queue entry. */
157 uint32_t entry_repost
; /* Count of entries before doorbell is rung */
158 #define LPFC_EQ_REPOST 8
159 #define LPFC_MQ_REPOST 8
160 #define LPFC_CQ_REPOST 64
161 #define LPFC_RQ_REPOST 64
162 #define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 /* For WQs */
163 uint32_t queue_id
; /* Queue ID assigned by the hardware */
164 uint32_t assoc_qid
; /* Queue ID associated with, for CQ/WQ/MQ */
165 uint32_t host_index
; /* The host's index for putting or getting */
166 uint32_t hba_index
; /* The last known hba index for get or put */
168 struct lpfc_sli_ring
*pring
; /* ptr to io ring associated with q */
169 struct lpfc_rqb
*rqbp
; /* ptr to RQ buffers */
172 uint16_t page_count
; /* Number of pages allocated for this queue */
173 uint16_t page_size
; /* size of page allocated for this queue */
174 #define LPFC_EXPANDED_PAGE_SIZE 16384
175 #define LPFC_DEFAULT_PAGE_SIZE 4096
176 uint16_t chann
; /* IO channel this queue is associated with */
178 #define LPFC_DB_RING_FORMAT 0x01
179 #define LPFC_DB_LIST_FORMAT 0x02
181 #define HBA_NVMET_WQFULL 0x1 /* We hit WQ Full condition for NVMET */
182 void __iomem
*db_regaddr
;
185 void __iomem
*dpp_regaddr
;
192 /* defines for EQ stats */
193 #define EQ_max_eqe q_cnt_1
194 #define EQ_no_entry q_cnt_2
195 #define EQ_cqe_cnt q_cnt_3
196 #define EQ_processed q_cnt_4
198 /* defines for CQ stats */
199 #define CQ_mbox q_cnt_1
200 #define CQ_max_cqe q_cnt_1
201 #define CQ_release_wqe q_cnt_2
202 #define CQ_xri_aborted q_cnt_3
203 #define CQ_wq q_cnt_4
205 /* defines for WQ stats */
206 #define WQ_overflow q_cnt_1
207 #define WQ_posted q_cnt_4
209 /* defines for RQ stats */
210 #define RQ_no_posted_buf q_cnt_1
211 #define RQ_no_buf_found q_cnt_2
212 #define RQ_buf_posted q_cnt_3
213 #define RQ_rcv_buf q_cnt_4
215 struct work_struct irqwork
;
216 struct work_struct spwork
;
218 uint64_t isr_timestamp
;
220 struct lpfc_queue
*assoc_qp
;
221 union sli4_qe qe
[1]; /* array to index entries (must be last) */
224 struct lpfc_sli4_link
{
231 uint16_t logical_speed
;
235 struct lpfc_fcf_rec
{
236 uint8_t fabric_name
[8];
237 uint8_t switch_name
[8];
244 #define BOOT_ENABLE 0x01
245 #define RECORD_VALID 0x02
248 struct lpfc_fcf_pri_rec
{
250 #define LPFC_FCF_ON_PRI_LIST 0x0001
251 #define LPFC_FCF_FLOGI_FAILED 0x0002
256 struct lpfc_fcf_pri
{
257 struct list_head list
;
258 struct lpfc_fcf_pri_rec fcf_rec
;
262 * Maximum FCF table index, it is for driver internal book keeping, it
263 * just needs to be no less than the supported HBA's FCF table size.
265 #define LPFC_SLI4_FCF_TBL_INDX_MAX 32
270 #define FCF_AVAILABLE 0x01 /* FCF available for discovery */
271 #define FCF_REGISTERED 0x02 /* FCF registered with FW */
272 #define FCF_SCAN_DONE 0x04 /* FCF table scan done */
273 #define FCF_IN_USE 0x08 /* Atleast one discovery completed */
274 #define FCF_INIT_DISC 0x10 /* Initial FCF discovery */
275 #define FCF_DEAD_DISC 0x20 /* FCF DEAD fast FCF failover discovery */
276 #define FCF_ACVL_DISC 0x40 /* All CVL fast FCF failover discovery */
277 #define FCF_DISCOVERY (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC)
278 #define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
279 #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
280 #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
281 #define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
283 uint32_t eligible_fcf_cnt
;
284 struct lpfc_fcf_rec current_rec
;
285 struct lpfc_fcf_rec failover_rec
;
286 struct list_head fcf_pri_list
;
287 struct lpfc_fcf_pri fcf_pri
[LPFC_SLI4_FCF_TBL_INDX_MAX
];
288 uint32_t current_fcf_scan_pri
;
289 struct timer_list redisc_wait
;
290 unsigned long *fcf_rr_bmask
; /* Eligible FCF indexes for RR failover */
294 #define LPFC_REGION23_SIGNATURE "RG23"
295 #define LPFC_REGION23_VERSION 1
296 #define LPFC_REGION23_LAST_REC 0xff
297 #define DRIVER_SPECIFIC_TYPE 0xA2
298 #define LINUX_DRIVER_ID 0x20
299 #define PORT_STE_TYPE 0x1
301 struct lpfc_fip_param_hdr
{
303 #define FCOE_PARAM_TYPE 0xA0
305 #define FCOE_PARAM_LENGTH 2
306 uint8_t parm_version
;
307 #define FIPP_VERSION 0x01
309 #define lpfc_fip_param_hdr_fipp_mode_SHIFT 6
310 #define lpfc_fip_param_hdr_fipp_mode_MASK 0x3
311 #define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags
312 #define FIPP_MODE_ON 0x1
313 #define FIPP_MODE_OFF 0x0
314 #define FIPP_VLAN_VALID 0x1
317 struct lpfc_fcoe_params
{
324 struct lpfc_fcf_conn_hdr
{
326 #define FCOE_CONN_TBL_TYPE 0xA1
327 uint8_t length
; /* words */
331 struct lpfc_fcf_conn_rec
{
333 #define FCFCNCT_VALID 0x0001
334 #define FCFCNCT_BOOT 0x0002
335 #define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */
336 #define FCFCNCT_FBNM_VALID 0x0008
337 #define FCFCNCT_SWNM_VALID 0x0010
338 #define FCFCNCT_VLAN_VALID 0x0020
339 #define FCFCNCT_AM_VALID 0x0040
340 #define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */
341 #define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */
344 uint8_t fabric_name
[8];
345 uint8_t switch_name
[8];
348 struct lpfc_fcf_conn_entry
{
349 struct list_head list
;
350 struct lpfc_fcf_conn_rec conn_rec
;
354 * Define the host's bootstrap mailbox. This structure contains
355 * the member attributes needed to create, use, and destroy the
356 * bootstrap mailbox region.
358 * The macro definitions for the bmbx data structure are defined
359 * in lpfc_hw4.h with the register definition.
362 struct lpfc_dmabuf
*dmabuf
;
363 struct dma_address dma_address
;
369 #define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
371 #define LPFC_EQE_SIZE_4B 4
372 #define LPFC_EQE_SIZE_16B 16
373 #define LPFC_CQE_SIZE 16
374 #define LPFC_WQE_SIZE 64
375 #define LPFC_WQE128_SIZE 128
376 #define LPFC_MQE_SIZE 256
377 #define LPFC_RQE_SIZE 8
379 #define LPFC_EQE_DEF_COUNT 1024
380 #define LPFC_CQE_DEF_COUNT 1024
381 #define LPFC_CQE_EXP_COUNT 4096
382 #define LPFC_WQE_DEF_COUNT 256
383 #define LPFC_WQE_EXP_COUNT 1024
384 #define LPFC_MQE_DEF_COUNT 16
385 #define LPFC_RQE_DEF_COUNT 512
387 #define LPFC_QUEUE_NOARM false
388 #define LPFC_QUEUE_REARM true
392 * SLI4 CT field defines
394 #define SLI4_CT_RPI 0
395 #define SLI4_CT_VPI 1
396 #define SLI4_CT_VFI 2
397 #define SLI4_CT_FCFI 3
400 * SLI4 specific data structures
402 struct lpfc_max_cfg_param
{
424 /* SLI4 HBA multi-fcp queue handler struct */
425 #define LPFC_SLI4_HANDLER_NAME_SZ 16
426 struct lpfc_hba_eq_hdl
{
428 char handler_name
[LPFC_SLI4_HANDLER_NAME_SZ
];
429 struct lpfc_hba
*phba
;
430 atomic_t hba_eq_in_use
;
431 struct cpumask
*cpumask
;
432 /* CPU affinitsed to or 0xffffffff if multiple */
434 #define LPFC_MULTI_CPU_AFFINITY 0xffffffff
437 /*BB Credit recovery value*/
438 struct lpfc_bbscn_params
{
440 #define lpfc_bbscn_min_SHIFT 0
441 #define lpfc_bbscn_min_MASK 0x0000000F
442 #define lpfc_bbscn_min_WORD word0
443 #define lpfc_bbscn_max_SHIFT 4
444 #define lpfc_bbscn_max_MASK 0x0000000F
445 #define lpfc_bbscn_max_WORD word0
446 #define lpfc_bbscn_def_SHIFT 8
447 #define lpfc_bbscn_def_MASK 0x0000000F
448 #define lpfc_bbscn_def_WORD word0
451 /* Port Capabilities for SLI4 Parameters */
452 struct lpfc_pc_sli4_params
{
457 uint32_t featurelevel_1
;
458 uint32_t featurelevel_2
;
459 uint32_t proto_types
;
460 #define LPFC_SLI4_PROTO_FCOE 0x0000001
461 #define LPFC_SLI4_PROTO_FC 0x0000002
462 #define LPFC_SLI4_PROTO_NIC 0x0000004
463 #define LPFC_SLI4_PROTO_ISCSI 0x0000008
464 #define LPFC_SLI4_PROTO_RDMA 0x0000010
465 uint32_t sge_supp_len
;
467 uint32_t rq_db_window
;
468 uint32_t loopbk_scope
;
469 uint32_t oas_supported
;
470 uint32_t eq_pages_max
;
472 uint32_t cq_pages_max
;
474 uint32_t mq_pages_max
;
476 uint32_t mq_elem_cnt
;
477 uint32_t wq_pages_max
;
479 uint32_t rq_pages_max
;
481 uint32_t hdr_pages_max
;
483 uint32_t hdr_pp_align
;
484 uint32_t sgl_pages_max
;
485 uint32_t sgl_pp_align
;
493 #define LPFC_WQ_SZ64_SUPPORT 1
494 #define LPFC_WQ_SZ128_SUPPORT 2
498 #define LPFC_CQ_4K_PAGE_SZ 0x1
499 #define LPFC_CQ_16K_PAGE_SZ 0x4
500 #define LPFC_WQ_4K_PAGE_SZ 0x1
501 #define LPFC_WQ_16K_PAGE_SZ 0x4
508 struct lpfc_sli4_lnk_info
{
510 #define LPFC_LNK_DAT_INVAL 0
511 #define LPFC_LNK_DAT_VAL 1
513 #define LPFC_LNK_GE 0x0 /* FCoE */
514 #define LPFC_LNK_FC 0x1 /* FC */
519 #define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \
520 LPFC_FOF_IO_CHAN_NUM)
522 /* Used for IRQ vector to CPU mapping */
523 struct lpfc_vector_map_info
{
529 #define LPFC_VECTOR_MAP_EMPTY 0xffff
531 /* SLI4 HBA data structure entries */
532 struct lpfc_sli4_hba
{
533 void __iomem
*conf_regs_memmap_p
; /* Kernel memory mapped address for
534 * config space registers
536 void __iomem
*ctrl_regs_memmap_p
; /* Kernel memory mapped address for
539 void __iomem
*drbl_regs_memmap_p
; /* Kernel memory mapped address for
542 void __iomem
*dpp_regs_memmap_p
; /* Kernel memory mapped address for
547 /* IF Type 0, BAR 0 PCI cfg space reg mem map */
548 void __iomem
*UERRLOregaddr
;
549 void __iomem
*UERRHIregaddr
;
550 void __iomem
*UEMASKLOregaddr
;
551 void __iomem
*UEMASKHIregaddr
;
554 /* IF Type 2, BAR 0 PCI cfg space reg mem map. */
555 void __iomem
*STATUSregaddr
;
556 void __iomem
*CTRLregaddr
;
557 void __iomem
*ERR1regaddr
;
558 #define SLIPORT_ERR1_REG_ERR_CODE_1 0x1
559 #define SLIPORT_ERR1_REG_ERR_CODE_2 0x2
560 void __iomem
*ERR2regaddr
;
561 #define SLIPORT_ERR2_REG_FW_RESTART 0x0
562 #define SLIPORT_ERR2_REG_FUNC_PROVISON 0x1
563 #define SLIPORT_ERR2_REG_FORCED_DUMP 0x2
564 #define SLIPORT_ERR2_REG_FAILURE_EQ 0x3
565 #define SLIPORT_ERR2_REG_FAILURE_CQ 0x4
566 #define SLIPORT_ERR2_REG_FAILURE_BUS 0x5
567 #define SLIPORT_ERR2_REG_FAILURE_RQ 0x6
568 void __iomem
*EQDregaddr
;
572 /* IF type 0, BAR1 and if type 2, Bar 0 CSR register memory map */
573 void __iomem
*PSMPHRregaddr
;
575 /* Well-known SLI INTF register memory map. */
576 void __iomem
*SLIINTFregaddr
;
578 /* IF type 0, BAR 1 function CSR register memory map */
579 void __iomem
*ISRregaddr
; /* HST_ISR register */
580 void __iomem
*IMRregaddr
; /* HST_IMR register */
581 void __iomem
*ISCRregaddr
; /* HST_ISCR register */
582 /* IF type 0, BAR 0 and if type 2, BAR 0 doorbell register memory map */
583 void __iomem
*RQDBregaddr
; /* RQ_DOORBELL register */
584 void __iomem
*WQDBregaddr
; /* WQ_DOORBELL register */
585 void __iomem
*CQDBregaddr
; /* CQ_DOORBELL register */
586 void __iomem
*EQDBregaddr
; /* EQ_DOORBELL register */
587 void __iomem
*MQDBregaddr
; /* MQ_DOORBELL register */
588 void __iomem
*BMBXregaddr
; /* BootStrap MBX register */
594 struct lpfc_register sli_intf
;
595 struct lpfc_pc_sli4_params pc_sli4_params
;
596 struct lpfc_bbscn_params bbscn_params
;
597 struct lpfc_hba_eq_hdl
*hba_eq_hdl
; /* HBA per-WQ handle */
599 void (*sli4_eq_clr_intr
)(struct lpfc_queue
*q
);
600 uint32_t (*sli4_eq_release
)(struct lpfc_queue
*q
, bool arm
);
601 uint32_t (*sli4_cq_release
)(struct lpfc_queue
*q
, bool arm
);
603 /* Pointers to the constructed SLI4 queues */
604 struct lpfc_queue
**hba_eq
; /* Event queues for HBA */
605 struct lpfc_queue
**fcp_cq
; /* Fast-path FCP compl queue */
606 struct lpfc_queue
**nvme_cq
; /* Fast-path NVME compl queue */
607 struct lpfc_queue
**nvmet_cqset
; /* Fast-path NVMET CQ Set queues */
608 struct lpfc_queue
**nvmet_mrq_hdr
; /* Fast-path NVMET hdr MRQs */
609 struct lpfc_queue
**nvmet_mrq_data
; /* Fast-path NVMET data MRQs */
610 struct lpfc_queue
**fcp_wq
; /* Fast-path FCP work queue */
611 struct lpfc_queue
**nvme_wq
; /* Fast-path NVME work queue */
612 uint16_t *fcp_cq_map
;
613 uint16_t *nvme_cq_map
;
614 struct list_head lpfc_wq_list
;
616 struct lpfc_queue
*mbx_cq
; /* Slow-path mailbox complete queue */
617 struct lpfc_queue
*els_cq
; /* Slow-path ELS response complete queue */
618 struct lpfc_queue
*nvmels_cq
; /* NVME LS complete queue */
619 struct lpfc_queue
*mbx_wq
; /* Slow-path MBOX work queue */
620 struct lpfc_queue
*els_wq
; /* Slow-path ELS work queue */
621 struct lpfc_queue
*nvmels_wq
; /* NVME LS work queue */
622 struct lpfc_queue
*hdr_rq
; /* Slow-path Header Receive queue */
623 struct lpfc_queue
*dat_rq
; /* Slow-path Data Receive queue */
625 struct lpfc_name wwnn
;
626 struct lpfc_name wwpn
;
628 uint32_t fw_func_mode
; /* FW function protocol mode */
629 uint32_t ulp0_mode
; /* ULP0 protocol mode */
630 uint32_t ulp1_mode
; /* ULP1 protocol mode */
632 struct lpfc_queue
*fof_eq
; /* Flash Optimized Fabric Event queue */
634 /* Optimized Access Storage specific queues/structures */
636 struct lpfc_queue
*oas_cq
; /* OAS completion queue */
637 struct lpfc_queue
*oas_wq
; /* OAS Work queue */
638 struct lpfc_sli_ring
*oas_ring
;
639 uint64_t oas_next_lun
;
640 uint8_t oas_next_tgt_wwpn
[8];
641 uint8_t oas_next_vpt_wwpn
[8];
643 /* Setup information for various queue parameters */
654 #define LPFC_SP_EQ_MAX_INTR_SEC 10000
655 #define LPFC_FP_EQ_MAX_INTR_SEC 10000
657 uint32_t intr_enable
;
658 struct lpfc_bmbx bmbx
;
659 struct lpfc_max_cfg_param max_cfg_param
;
660 uint16_t extents_in_use
; /* must allocate resource extents. */
661 uint16_t rpi_hdrs_in_use
; /* must post rpi hdrs if set. */
662 uint16_t next_xri
; /* last_xri - max_cfg_param.xri_base = used */
664 uint16_t nvme_xri_max
;
665 uint16_t nvme_xri_cnt
;
666 uint16_t nvme_xri_start
;
667 uint16_t scsi_xri_max
;
668 uint16_t scsi_xri_cnt
;
669 uint16_t scsi_xri_start
;
670 uint16_t els_xri_cnt
;
671 uint16_t nvmet_xri_cnt
;
672 uint16_t nvmet_io_wait_cnt
;
673 uint16_t nvmet_io_wait_total
;
674 struct list_head lpfc_els_sgl_list
;
675 struct list_head lpfc_abts_els_sgl_list
;
676 struct list_head lpfc_nvmet_sgl_list
;
677 struct list_head lpfc_abts_nvmet_ctx_list
;
678 struct list_head lpfc_abts_scsi_buf_list
;
679 struct list_head lpfc_abts_nvme_buf_list
;
680 struct list_head lpfc_nvmet_io_wait_list
;
681 struct lpfc_nvmet_ctx_info
*nvmet_ctx_info
;
682 struct lpfc_sglq
**lpfc_sglq_active_list
;
683 struct list_head lpfc_rpi_hdr_list
;
684 unsigned long *rpi_bmask
;
687 struct list_head lpfc_rpi_blk_list
;
688 unsigned long *xri_bmask
;
690 struct list_head lpfc_xri_blk_list
;
691 unsigned long *vfi_bmask
;
694 struct list_head lpfc_vfi_blk_list
;
695 struct lpfc_sli4_flags sli4_flags
;
696 struct list_head sp_queue_event
;
697 struct list_head sp_cqe_event_pool
;
698 struct list_head sp_asynce_work_queue
;
699 struct list_head sp_fcp_xri_aborted_work_queue
;
700 struct list_head sp_els_xri_aborted_work_queue
;
701 struct list_head sp_unsol_work_queue
;
702 struct lpfc_sli4_link link_state
;
703 struct lpfc_sli4_lnk_info lnk_info
;
704 uint32_t pport_name_sta
;
705 #define LPFC_SLI4_PPNAME_NON 0
706 #define LPFC_SLI4_PPNAME_GET 1
708 spinlock_t abts_nvme_buf_list_lock
; /* list of aborted SCSI IOs */
709 spinlock_t abts_scsi_buf_list_lock
; /* list of aborted SCSI IOs */
710 spinlock_t sgl_list_lock
; /* list of aborted els IOs */
711 spinlock_t nvmet_io_wait_lock
; /* IOs waiting for ctx resources */
712 uint32_t physical_port
;
714 /* CPU to vector mapping information */
715 struct lpfc_vector_map_info
*cpu_map
;
716 uint16_t num_online_cpu
;
717 uint16_t num_present_cpu
;
718 uint16_t curr_disp_cpu
;
727 enum lpfc_sgl_state
{
734 /* lpfc_sglqs are used in double linked lists */
735 struct list_head list
;
736 struct list_head clist
;
737 enum lpfc_sge_type buff_type
; /* is this a scsi sgl */
738 enum lpfc_sgl_state state
;
739 struct lpfc_nodelist
*ndlp
; /* ndlp associated with IO */
740 uint16_t iotag
; /* pre-assigned IO tag */
741 uint16_t sli4_lxritag
; /* logical pre-assigned xri. */
742 uint16_t sli4_xritag
; /* pre-assigned XRI, (OXID) tag. */
743 struct sli4_sge
*sgl
; /* pre-assigned SGL */
744 void *virt
; /* virtual address. */
745 dma_addr_t phys
; /* physical address */
748 struct lpfc_rpi_hdr
{
749 struct list_head list
;
751 struct lpfc_dmabuf
*dmabuf
;
757 struct lpfc_rsrc_blks
{
758 struct list_head list
;
764 struct lpfc_rdp_context
{
765 struct lpfc_nodelist
*ndlp
;
768 READ_LNK_VAR link_stat
;
769 uint8_t page_a0
[DMP_SFF_PAGE_A0_SIZE
];
770 uint8_t page_a2
[DMP_SFF_PAGE_A2_SIZE
];
771 void (*cmpl
)(struct lpfc_hba
*, struct lpfc_rdp_context
*, int);
774 struct lpfc_lcb_context
{
780 struct lpfc_nodelist
*ndlp
;
785 * SLI4 specific function prototypes
787 int lpfc_pci_function_reset(struct lpfc_hba
*);
788 int lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba
*);
789 int lpfc_sli4_hba_setup(struct lpfc_hba
*);
790 int lpfc_sli4_config(struct lpfc_hba
*, struct lpfcMboxq
*, uint8_t,
791 uint8_t, uint32_t, bool);
792 void lpfc_sli4_mbox_cmd_free(struct lpfc_hba
*, struct lpfcMboxq
*);
793 void lpfc_sli4_mbx_sge_set(struct lpfcMboxq
*, uint32_t, dma_addr_t
, uint32_t);
794 void lpfc_sli4_mbx_sge_get(struct lpfcMboxq
*, uint32_t,
795 struct lpfc_mbx_sge
*);
796 int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba
*, struct lpfcMboxq
*,
799 void lpfc_sli4_hba_reset(struct lpfc_hba
*);
800 struct lpfc_queue
*lpfc_sli4_queue_alloc(struct lpfc_hba
*, uint32_t,
802 void lpfc_sli4_queue_free(struct lpfc_queue
*);
803 int lpfc_eq_create(struct lpfc_hba
*, struct lpfc_queue
*, uint32_t);
804 int lpfc_modify_hba_eq_delay(struct lpfc_hba
*phba
, uint32_t startq
,
805 uint32_t numq
, uint32_t imax
);
806 int lpfc_cq_create(struct lpfc_hba
*, struct lpfc_queue
*,
807 struct lpfc_queue
*, uint32_t, uint32_t);
808 int lpfc_cq_create_set(struct lpfc_hba
*phba
, struct lpfc_queue
**cqp
,
809 struct lpfc_queue
**eqp
, uint32_t type
,
811 int32_t lpfc_mq_create(struct lpfc_hba
*, struct lpfc_queue
*,
812 struct lpfc_queue
*, uint32_t);
813 int lpfc_wq_create(struct lpfc_hba
*, struct lpfc_queue
*,
814 struct lpfc_queue
*, uint32_t);
815 int lpfc_rq_create(struct lpfc_hba
*, struct lpfc_queue
*,
816 struct lpfc_queue
*, struct lpfc_queue
*, uint32_t);
817 int lpfc_mrq_create(struct lpfc_hba
*phba
, struct lpfc_queue
**hrqp
,
818 struct lpfc_queue
**drqp
, struct lpfc_queue
**cqp
,
820 int lpfc_eq_destroy(struct lpfc_hba
*, struct lpfc_queue
*);
821 int lpfc_cq_destroy(struct lpfc_hba
*, struct lpfc_queue
*);
822 int lpfc_mq_destroy(struct lpfc_hba
*, struct lpfc_queue
*);
823 int lpfc_wq_destroy(struct lpfc_hba
*, struct lpfc_queue
*);
824 int lpfc_rq_destroy(struct lpfc_hba
*, struct lpfc_queue
*,
825 struct lpfc_queue
*);
826 int lpfc_sli4_queue_setup(struct lpfc_hba
*);
827 void lpfc_sli4_queue_unset(struct lpfc_hba
*);
828 int lpfc_sli4_post_sgl(struct lpfc_hba
*, dma_addr_t
, dma_addr_t
, uint16_t);
829 int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba
*);
830 int lpfc_repost_nvme_sgl_list(struct lpfc_hba
*phba
);
831 uint16_t lpfc_sli4_next_xritag(struct lpfc_hba
*);
832 void lpfc_sli4_free_xri(struct lpfc_hba
*, int);
833 int lpfc_sli4_post_async_mbox(struct lpfc_hba
*);
834 int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba
*, struct list_head
*, int);
835 struct lpfc_cq_event
*__lpfc_sli4_cq_event_alloc(struct lpfc_hba
*);
836 struct lpfc_cq_event
*lpfc_sli4_cq_event_alloc(struct lpfc_hba
*);
837 void __lpfc_sli4_cq_event_release(struct lpfc_hba
*, struct lpfc_cq_event
*);
838 void lpfc_sli4_cq_event_release(struct lpfc_hba
*, struct lpfc_cq_event
*);
839 int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba
*);
840 int lpfc_sli4_post_rpi_hdr(struct lpfc_hba
*, struct lpfc_rpi_hdr
*);
841 int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba
*);
842 struct lpfc_rpi_hdr
*lpfc_sli4_create_rpi_hdr(struct lpfc_hba
*);
843 void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba
*);
844 int lpfc_sli4_alloc_rpi(struct lpfc_hba
*);
845 void lpfc_sli4_free_rpi(struct lpfc_hba
*, int);
846 void lpfc_sli4_remove_rpis(struct lpfc_hba
*);
847 void lpfc_sli4_async_event_proc(struct lpfc_hba
*);
848 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba
*);
849 int lpfc_sli4_resume_rpi(struct lpfc_nodelist
*,
850 void (*)(struct lpfc_hba
*, LPFC_MBOXQ_t
*), void *);
851 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba
*);
852 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba
*);
853 void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba
*,
854 struct sli4_wcqe_xri_aborted
*);
855 void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba
*phba
,
856 struct sli4_wcqe_xri_aborted
*axri
);
857 void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba
*phba
,
858 struct sli4_wcqe_xri_aborted
*axri
);
859 void lpfc_sli4_els_xri_aborted(struct lpfc_hba
*,
860 struct sli4_wcqe_xri_aborted
*);
861 void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport
*);
862 void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport
*);
863 int lpfc_sli4_brdreset(struct lpfc_hba
*);
864 int lpfc_sli4_add_fcf_record(struct lpfc_hba
*, struct fcf_record
*);
865 void lpfc_sli_remove_dflt_fcf(struct lpfc_hba
*);
866 int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba
*);
867 int lpfc_sli4_get_iocb_cnt(struct lpfc_hba
*phba
);
868 int lpfc_sli4_init_vpi(struct lpfc_vport
*);
869 inline void lpfc_sli4_eq_clr_intr(struct lpfc_queue
*);
870 uint32_t lpfc_sli4_cq_release(struct lpfc_queue
*, bool);
871 uint32_t lpfc_sli4_eq_release(struct lpfc_queue
*, bool);
872 inline void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue
*q
);
873 uint32_t lpfc_sli4_if6_cq_release(struct lpfc_queue
*q
, bool arm
);
874 uint32_t lpfc_sli4_if6_eq_release(struct lpfc_queue
*q
, bool arm
);
875 void lpfc_sli4_fcfi_unreg(struct lpfc_hba
*, uint16_t);
876 int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba
*, uint16_t);
877 int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba
*, uint16_t);
878 int lpfc_sli4_read_fcf_rec(struct lpfc_hba
*, uint16_t);
879 void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
880 void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
881 void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
882 int lpfc_sli4_unregister_fcf(struct lpfc_hba
*);
883 int lpfc_sli4_post_status_check(struct lpfc_hba
*);
884 uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba
*, LPFC_MBOXQ_t
*);
885 uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba
*, LPFC_MBOXQ_t
*);