Merge 4.11-rc4 into tty-next
[linux/fpc-iii.git] / drivers / scsi / lpfc / lpfc_sli4.h
blob710458cf11d62f77c7a48973aded3f255b233277
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2009-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
8 * www.broadcom.com *
9 * *
10 * This program is free software; you can redistribute it and/or *
11 * modify it under the terms of version 2 of the GNU General *
12 * Public License as published by the Free Software Foundation. *
13 * This program is distributed in the hope that it will be useful. *
14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
18 * TO BE LEGALLY INVALID. See the GNU General Public License for *
19 * more details, a copy of which can be found in the file COPYING *
20 * included with this package. *
21 *******************************************************************/
23 #define LPFC_ACTIVE_MBOX_WAIT_CNT 100
24 #define LPFC_XRI_EXCH_BUSY_WAIT_TMO 10000
25 #define LPFC_XRI_EXCH_BUSY_WAIT_T1 10
26 #define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000
27 #define LPFC_RELEASE_NOTIFICATION_INTERVAL 32
28 #define LPFC_RPI_LOW_WATER_MARK 10
30 #define LPFC_UNREG_FCF 1
31 #define LPFC_SKIP_UNREG_FCF 0
33 /* Amount of time in seconds for waiting FCF rediscovery to complete */
34 #define LPFC_FCF_REDISCOVER_WAIT_TMO 2000 /* msec */
36 /* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
37 #define LPFC_NEMBED_MBOX_SGL_CNT 254
39 /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */
40 #define LPFC_HBA_IO_CHAN_MIN 0
41 #define LPFC_HBA_IO_CHAN_MAX 32
42 #define LPFC_FCP_IO_CHAN_DEF 4
43 #define LPFC_NVME_IO_CHAN_DEF 0
45 /* Number of channels used for Flash Optimized Fabric (FOF) operations */
47 #define LPFC_FOF_IO_CHAN_NUM 1
50 * Provide the default FCF Record attributes used by the driver
51 * when nonFIP mode is configured and there is no other default
52 * FCF Record attributes.
54 #define LPFC_FCOE_FCF_DEF_INDEX 0
55 #define LPFC_FCOE_FCF_GET_FIRST 0xFFFF
56 #define LPFC_FCOE_FCF_NEXT_NONE 0xFFFF
58 #define LPFC_FCOE_NULL_VID 0xFFF
59 #define LPFC_FCOE_IGNORE_VID 0xFFFF
61 /* First 3 bytes of default FCF MAC is specified by FC_MAP */
62 #define LPFC_FCOE_FCF_MAC3 0xFF
63 #define LPFC_FCOE_FCF_MAC4 0xFF
64 #define LPFC_FCOE_FCF_MAC5 0xFE
65 #define LPFC_FCOE_FCF_MAP0 0x0E
66 #define LPFC_FCOE_FCF_MAP1 0xFC
67 #define LPFC_FCOE_FCF_MAP2 0x00
68 #define LPFC_FCOE_MAX_RCV_SIZE 0x800
69 #define LPFC_FCOE_FKA_ADV_PER 0
70 #define LPFC_FCOE_FIP_PRIORITY 0x80
72 #define sli4_sid_from_fc_hdr(fc_hdr) \
73 ((fc_hdr)->fh_s_id[0] << 16 | \
74 (fc_hdr)->fh_s_id[1] << 8 | \
75 (fc_hdr)->fh_s_id[2])
77 #define sli4_did_from_fc_hdr(fc_hdr) \
78 ((fc_hdr)->fh_d_id[0] << 16 | \
79 (fc_hdr)->fh_d_id[1] << 8 | \
80 (fc_hdr)->fh_d_id[2])
82 #define sli4_fctl_from_fc_hdr(fc_hdr) \
83 ((fc_hdr)->fh_f_ctl[0] << 16 | \
84 (fc_hdr)->fh_f_ctl[1] << 8 | \
85 (fc_hdr)->fh_f_ctl[2])
87 #define sli4_type_from_fc_hdr(fc_hdr) \
88 ((fc_hdr)->fh_type)
90 #define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
92 #define INT_FW_UPGRADE 0
93 #define RUN_FW_UPGRADE 1
95 enum lpfc_sli4_queue_type {
96 LPFC_EQ,
97 LPFC_GCQ,
98 LPFC_MCQ,
99 LPFC_WCQ,
100 LPFC_RCQ,
101 LPFC_MQ,
102 LPFC_WQ,
103 LPFC_HRQ,
104 LPFC_DRQ
107 /* The queue sub-type defines the functional purpose of the queue */
108 enum lpfc_sli4_queue_subtype {
109 LPFC_NONE,
110 LPFC_MBOX,
111 LPFC_FCP,
112 LPFC_ELS,
113 LPFC_NVME,
114 LPFC_NVMET,
115 LPFC_NVME_LS,
116 LPFC_USOL
119 union sli4_qe {
120 void *address;
121 struct lpfc_eqe *eqe;
122 struct lpfc_cqe *cqe;
123 struct lpfc_mcqe *mcqe;
124 struct lpfc_wcqe_complete *wcqe_complete;
125 struct lpfc_wcqe_release *wcqe_release;
126 struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
127 struct lpfc_rcqe_complete *rcqe_complete;
128 struct lpfc_mqe *mqe;
129 union lpfc_wqe *wqe;
130 union lpfc_wqe128 *wqe128;
131 struct lpfc_rqe *rqe;
134 /* RQ buffer list */
135 struct lpfc_rqb {
136 uint16_t entry_count; /* Current number of RQ slots */
137 uint16_t buffer_count; /* Current number of buffers posted */
138 struct list_head rqb_buffer_list; /* buffers assigned to this HBQ */
139 /* Callback for HBQ buffer allocation */
140 struct rqb_dmabuf *(*rqb_alloc_buffer)(struct lpfc_hba *);
141 /* Callback for HBQ buffer free */
142 void (*rqb_free_buffer)(struct lpfc_hba *,
143 struct rqb_dmabuf *);
146 struct lpfc_queue {
147 struct list_head list;
148 struct list_head wq_list;
149 enum lpfc_sli4_queue_type type;
150 enum lpfc_sli4_queue_subtype subtype;
151 struct lpfc_hba *phba;
152 struct list_head child_list;
153 struct list_head page_list;
154 struct list_head sgl_list;
155 uint32_t entry_count; /* Number of entries to support on the queue */
156 uint32_t entry_size; /* Size of each queue entry. */
157 uint32_t entry_repost; /* Count of entries before doorbell is rung */
158 #define LPFC_QUEUE_MIN_REPOST 8
159 uint32_t queue_id; /* Queue ID assigned by the hardware */
160 uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */
161 uint32_t page_count; /* Number of pages allocated for this queue */
162 uint32_t host_index; /* The host's index for putting or getting */
163 uint32_t hba_index; /* The last known hba index for get or put */
165 struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
166 struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
168 uint16_t sgl_list_cnt;
169 uint16_t db_format;
170 #define LPFC_DB_RING_FORMAT 0x01
171 #define LPFC_DB_LIST_FORMAT 0x02
172 void __iomem *db_regaddr;
173 /* For q stats */
174 uint32_t q_cnt_1;
175 uint32_t q_cnt_2;
176 uint32_t q_cnt_3;
177 uint64_t q_cnt_4;
178 /* defines for EQ stats */
179 #define EQ_max_eqe q_cnt_1
180 #define EQ_no_entry q_cnt_2
181 #define EQ_badstate q_cnt_3
182 #define EQ_processed q_cnt_4
184 /* defines for CQ stats */
185 #define CQ_mbox q_cnt_1
186 #define CQ_max_cqe q_cnt_1
187 #define CQ_release_wqe q_cnt_2
188 #define CQ_xri_aborted q_cnt_3
189 #define CQ_wq q_cnt_4
191 /* defines for WQ stats */
192 #define WQ_overflow q_cnt_1
193 #define WQ_posted q_cnt_4
195 /* defines for RQ stats */
196 #define RQ_no_posted_buf q_cnt_1
197 #define RQ_no_buf_found q_cnt_2
198 #define RQ_buf_trunc q_cnt_3
199 #define RQ_rcv_buf q_cnt_4
201 uint64_t isr_timestamp;
202 struct lpfc_queue *assoc_qp;
203 union sli4_qe qe[1]; /* array to index entries (must be last) */
206 struct lpfc_sli4_link {
207 uint16_t speed;
208 uint8_t duplex;
209 uint8_t status;
210 uint8_t type;
211 uint8_t number;
212 uint8_t fault;
213 uint16_t logical_speed;
214 uint16_t topology;
217 struct lpfc_fcf_rec {
218 uint8_t fabric_name[8];
219 uint8_t switch_name[8];
220 uint8_t mac_addr[6];
221 uint16_t fcf_indx;
222 uint32_t priority;
223 uint16_t vlan_id;
224 uint32_t addr_mode;
225 uint32_t flag;
226 #define BOOT_ENABLE 0x01
227 #define RECORD_VALID 0x02
230 struct lpfc_fcf_pri_rec {
231 uint16_t fcf_index;
232 #define LPFC_FCF_ON_PRI_LIST 0x0001
233 #define LPFC_FCF_FLOGI_FAILED 0x0002
234 uint16_t flag;
235 uint32_t priority;
238 struct lpfc_fcf_pri {
239 struct list_head list;
240 struct lpfc_fcf_pri_rec fcf_rec;
244 * Maximum FCF table index, it is for driver internal book keeping, it
245 * just needs to be no less than the supported HBA's FCF table size.
247 #define LPFC_SLI4_FCF_TBL_INDX_MAX 32
249 struct lpfc_fcf {
250 uint16_t fcfi;
251 uint32_t fcf_flag;
252 #define FCF_AVAILABLE 0x01 /* FCF available for discovery */
253 #define FCF_REGISTERED 0x02 /* FCF registered with FW */
254 #define FCF_SCAN_DONE 0x04 /* FCF table scan done */
255 #define FCF_IN_USE 0x08 /* Atleast one discovery completed */
256 #define FCF_INIT_DISC 0x10 /* Initial FCF discovery */
257 #define FCF_DEAD_DISC 0x20 /* FCF DEAD fast FCF failover discovery */
258 #define FCF_ACVL_DISC 0x40 /* All CVL fast FCF failover discovery */
259 #define FCF_DISCOVERY (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC)
260 #define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
261 #define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
262 #define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
263 #define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
264 uint32_t addr_mode;
265 uint32_t eligible_fcf_cnt;
266 struct lpfc_fcf_rec current_rec;
267 struct lpfc_fcf_rec failover_rec;
268 struct list_head fcf_pri_list;
269 struct lpfc_fcf_pri fcf_pri[LPFC_SLI4_FCF_TBL_INDX_MAX];
270 uint32_t current_fcf_scan_pri;
271 struct timer_list redisc_wait;
272 unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
276 #define LPFC_REGION23_SIGNATURE "RG23"
277 #define LPFC_REGION23_VERSION 1
278 #define LPFC_REGION23_LAST_REC 0xff
279 #define DRIVER_SPECIFIC_TYPE 0xA2
280 #define LINUX_DRIVER_ID 0x20
281 #define PORT_STE_TYPE 0x1
283 struct lpfc_fip_param_hdr {
284 uint8_t type;
285 #define FCOE_PARAM_TYPE 0xA0
286 uint8_t length;
287 #define FCOE_PARAM_LENGTH 2
288 uint8_t parm_version;
289 #define FIPP_VERSION 0x01
290 uint8_t parm_flags;
291 #define lpfc_fip_param_hdr_fipp_mode_SHIFT 6
292 #define lpfc_fip_param_hdr_fipp_mode_MASK 0x3
293 #define lpfc_fip_param_hdr_fipp_mode_WORD parm_flags
294 #define FIPP_MODE_ON 0x1
295 #define FIPP_MODE_OFF 0x0
296 #define FIPP_VLAN_VALID 0x1
299 struct lpfc_fcoe_params {
300 uint8_t fc_map[3];
301 uint8_t reserved1;
302 uint16_t vlan_tag;
303 uint8_t reserved[2];
306 struct lpfc_fcf_conn_hdr {
307 uint8_t type;
308 #define FCOE_CONN_TBL_TYPE 0xA1
309 uint8_t length; /* words */
310 uint8_t reserved[2];
313 struct lpfc_fcf_conn_rec {
314 uint16_t flags;
315 #define FCFCNCT_VALID 0x0001
316 #define FCFCNCT_BOOT 0x0002
317 #define FCFCNCT_PRIMARY 0x0004 /* if not set, Secondary */
318 #define FCFCNCT_FBNM_VALID 0x0008
319 #define FCFCNCT_SWNM_VALID 0x0010
320 #define FCFCNCT_VLAN_VALID 0x0020
321 #define FCFCNCT_AM_VALID 0x0040
322 #define FCFCNCT_AM_PREFERRED 0x0080 /* if not set, AM Required */
323 #define FCFCNCT_AM_SPMA 0x0100 /* if not set, FPMA */
325 uint16_t vlan_tag;
326 uint8_t fabric_name[8];
327 uint8_t switch_name[8];
330 struct lpfc_fcf_conn_entry {
331 struct list_head list;
332 struct lpfc_fcf_conn_rec conn_rec;
336 * Define the host's bootstrap mailbox. This structure contains
337 * the member attributes needed to create, use, and destroy the
338 * bootstrap mailbox region.
340 * The macro definitions for the bmbx data structure are defined
341 * in lpfc_hw4.h with the register definition.
343 struct lpfc_bmbx {
344 struct lpfc_dmabuf *dmabuf;
345 struct dma_address dma_address;
346 void *avirt;
347 dma_addr_t aphys;
348 uint32_t bmbx_size;
351 #define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
353 #define LPFC_EQE_SIZE_4B 4
354 #define LPFC_EQE_SIZE_16B 16
355 #define LPFC_CQE_SIZE 16
356 #define LPFC_WQE_SIZE 64
357 #define LPFC_WQE128_SIZE 128
358 #define LPFC_MQE_SIZE 256
359 #define LPFC_RQE_SIZE 8
361 #define LPFC_EQE_DEF_COUNT 1024
362 #define LPFC_CQE_DEF_COUNT 1024
363 #define LPFC_WQE_DEF_COUNT 256
364 #define LPFC_WQE128_DEF_COUNT 128
365 #define LPFC_WQE128_MAX_COUNT 256
366 #define LPFC_MQE_DEF_COUNT 16
367 #define LPFC_RQE_DEF_COUNT 512
369 #define LPFC_QUEUE_NOARM false
370 #define LPFC_QUEUE_REARM true
374 * SLI4 CT field defines
376 #define SLI4_CT_RPI 0
377 #define SLI4_CT_VPI 1
378 #define SLI4_CT_VFI 2
379 #define SLI4_CT_FCFI 3
382 * SLI4 specific data structures
384 struct lpfc_max_cfg_param {
385 uint16_t max_xri;
386 uint16_t xri_base;
387 uint16_t xri_used;
388 uint16_t max_rpi;
389 uint16_t rpi_base;
390 uint16_t rpi_used;
391 uint16_t max_vpi;
392 uint16_t vpi_base;
393 uint16_t vpi_used;
394 uint16_t max_vfi;
395 uint16_t vfi_base;
396 uint16_t vfi_used;
397 uint16_t max_fcfi;
398 uint16_t fcfi_used;
399 uint16_t max_eq;
400 uint16_t max_rq;
401 uint16_t max_cq;
402 uint16_t max_wq;
405 struct lpfc_hba;
406 /* SLI4 HBA multi-fcp queue handler struct */
407 struct lpfc_hba_eq_hdl {
408 uint32_t idx;
409 struct lpfc_hba *phba;
410 atomic_t hba_eq_in_use;
411 struct cpumask *cpumask;
412 /* CPU affinitsed to or 0xffffffff if multiple */
413 uint32_t cpu;
414 #define LPFC_MULTI_CPU_AFFINITY 0xffffffff
417 /* Port Capabilities for SLI4 Parameters */
418 struct lpfc_pc_sli4_params {
419 uint32_t supported;
420 uint32_t if_type;
421 uint32_t sli_rev;
422 uint32_t sli_family;
423 uint32_t featurelevel_1;
424 uint32_t featurelevel_2;
425 uint32_t proto_types;
426 #define LPFC_SLI4_PROTO_FCOE 0x0000001
427 #define LPFC_SLI4_PROTO_FC 0x0000002
428 #define LPFC_SLI4_PROTO_NIC 0x0000004
429 #define LPFC_SLI4_PROTO_ISCSI 0x0000008
430 #define LPFC_SLI4_PROTO_RDMA 0x0000010
431 uint32_t sge_supp_len;
432 uint32_t if_page_sz;
433 uint32_t rq_db_window;
434 uint32_t loopbk_scope;
435 uint32_t oas_supported;
436 uint32_t eq_pages_max;
437 uint32_t eqe_size;
438 uint32_t cq_pages_max;
439 uint32_t cqe_size;
440 uint32_t mq_pages_max;
441 uint32_t mqe_size;
442 uint32_t mq_elem_cnt;
443 uint32_t wq_pages_max;
444 uint32_t wqe_size;
445 uint32_t rq_pages_max;
446 uint32_t rqe_size;
447 uint32_t hdr_pages_max;
448 uint32_t hdr_size;
449 uint32_t hdr_pp_align;
450 uint32_t sgl_pages_max;
451 uint32_t sgl_pp_align;
452 uint8_t cqv;
453 uint8_t mqv;
454 uint8_t wqv;
455 uint8_t rqv;
456 uint8_t wqsize;
457 #define LPFC_WQ_SZ64_SUPPORT 1
458 #define LPFC_WQ_SZ128_SUPPORT 2
459 uint8_t wqpcnt;
462 struct lpfc_iov {
463 uint32_t pf_number;
464 uint32_t vf_number;
467 struct lpfc_sli4_lnk_info {
468 uint8_t lnk_dv;
469 #define LPFC_LNK_DAT_INVAL 0
470 #define LPFC_LNK_DAT_VAL 1
471 uint8_t lnk_tp;
472 #define LPFC_LNK_GE 0x0 /* FCoE */
473 #define LPFC_LNK_FC 0x1 /* FC */
474 uint8_t lnk_no;
475 uint8_t optic_state;
478 #define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \
479 LPFC_FOF_IO_CHAN_NUM)
480 #define LPFC_SLI4_HANDLER_NAME_SZ 16
482 /* Used for IRQ vector to CPU mapping */
483 struct lpfc_vector_map_info {
484 uint16_t phys_id;
485 uint16_t core_id;
486 uint16_t irq;
487 uint16_t channel_id;
489 #define LPFC_VECTOR_MAP_EMPTY 0xffff
491 /* SLI4 HBA data structure entries */
492 struct lpfc_sli4_hba {
493 void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
494 PCI BAR0, config space registers */
495 void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
496 PCI BAR1, control registers */
497 void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
498 PCI BAR2, doorbell registers */
499 union {
500 struct {
501 /* IF Type 0, BAR 0 PCI cfg space reg mem map */
502 void __iomem *UERRLOregaddr;
503 void __iomem *UERRHIregaddr;
504 void __iomem *UEMASKLOregaddr;
505 void __iomem *UEMASKHIregaddr;
506 } if_type0;
507 struct {
508 /* IF Type 2, BAR 0 PCI cfg space reg mem map. */
509 void __iomem *STATUSregaddr;
510 void __iomem *CTRLregaddr;
511 void __iomem *ERR1regaddr;
512 #define SLIPORT_ERR1_REG_ERR_CODE_1 0x1
513 #define SLIPORT_ERR1_REG_ERR_CODE_2 0x2
514 void __iomem *ERR2regaddr;
515 #define SLIPORT_ERR2_REG_FW_RESTART 0x0
516 #define SLIPORT_ERR2_REG_FUNC_PROVISON 0x1
517 #define SLIPORT_ERR2_REG_FORCED_DUMP 0x2
518 #define SLIPORT_ERR2_REG_FAILURE_EQ 0x3
519 #define SLIPORT_ERR2_REG_FAILURE_CQ 0x4
520 #define SLIPORT_ERR2_REG_FAILURE_BUS 0x5
521 #define SLIPORT_ERR2_REG_FAILURE_RQ 0x6
522 } if_type2;
523 } u;
525 /* IF type 0, BAR1 and if type 2, Bar 0 CSR register memory map */
526 void __iomem *PSMPHRregaddr;
528 /* Well-known SLI INTF register memory map. */
529 void __iomem *SLIINTFregaddr;
531 /* IF type 0, BAR 1 function CSR register memory map */
532 void __iomem *ISRregaddr; /* HST_ISR register */
533 void __iomem *IMRregaddr; /* HST_IMR register */
534 void __iomem *ISCRregaddr; /* HST_ISCR register */
535 /* IF type 0, BAR 0 and if type 2, BAR 0 doorbell register memory map */
536 void __iomem *RQDBregaddr; /* RQ_DOORBELL register */
537 void __iomem *WQDBregaddr; /* WQ_DOORBELL register */
538 void __iomem *EQCQDBregaddr; /* EQCQ_DOORBELL register */
539 void __iomem *MQDBregaddr; /* MQ_DOORBELL register */
540 void __iomem *BMBXregaddr; /* BootStrap MBX register */
542 uint32_t ue_mask_lo;
543 uint32_t ue_mask_hi;
544 uint32_t ue_to_sr;
545 uint32_t ue_to_rp;
546 struct lpfc_register sli_intf;
547 struct lpfc_pc_sli4_params pc_sli4_params;
548 uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ];
549 struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
551 /* Pointers to the constructed SLI4 queues */
552 struct lpfc_queue **hba_eq; /* Event queues for HBA */
553 struct lpfc_queue **fcp_cq; /* Fast-path FCP compl queue */
554 struct lpfc_queue **nvme_cq; /* Fast-path NVME compl queue */
555 struct lpfc_queue **nvmet_cqset; /* Fast-path NVMET CQ Set queues */
556 struct lpfc_queue **nvmet_mrq_hdr; /* Fast-path NVMET hdr MRQs */
557 struct lpfc_queue **nvmet_mrq_data; /* Fast-path NVMET data MRQs */
558 struct lpfc_queue **fcp_wq; /* Fast-path FCP work queue */
559 struct lpfc_queue **nvme_wq; /* Fast-path NVME work queue */
560 uint16_t *fcp_cq_map;
561 uint16_t *nvme_cq_map;
562 struct list_head lpfc_wq_list;
564 struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
565 struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
566 struct lpfc_queue *nvmels_cq; /* NVME LS complete queue */
567 struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
568 struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
569 struct lpfc_queue *nvmels_wq; /* NVME LS work queue */
570 struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
571 struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
573 struct lpfc_name wwnn;
574 struct lpfc_name wwpn;
576 uint32_t fw_func_mode; /* FW function protocol mode */
577 uint32_t ulp0_mode; /* ULP0 protocol mode */
578 uint32_t ulp1_mode; /* ULP1 protocol mode */
580 struct lpfc_queue *fof_eq; /* Flash Optimized Fabric Event queue */
582 /* Optimized Access Storage specific queues/structures */
584 struct lpfc_queue *oas_cq; /* OAS completion queue */
585 struct lpfc_queue *oas_wq; /* OAS Work queue */
586 struct lpfc_sli_ring *oas_ring;
587 uint64_t oas_next_lun;
588 uint8_t oas_next_tgt_wwpn[8];
589 uint8_t oas_next_vpt_wwpn[8];
591 /* Setup information for various queue parameters */
592 int eq_esize;
593 int eq_ecount;
594 int cq_esize;
595 int cq_ecount;
596 int wq_esize;
597 int wq_ecount;
598 int mq_esize;
599 int mq_ecount;
600 int rq_esize;
601 int rq_ecount;
602 #define LPFC_SP_EQ_MAX_INTR_SEC 10000
603 #define LPFC_FP_EQ_MAX_INTR_SEC 10000
605 uint32_t intr_enable;
606 struct lpfc_bmbx bmbx;
607 struct lpfc_max_cfg_param max_cfg_param;
608 uint16_t extents_in_use; /* must allocate resource extents. */
609 uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
610 uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
611 uint16_t next_rpi;
612 uint16_t nvme_xri_max;
613 uint16_t nvme_xri_cnt;
614 uint16_t nvme_xri_start;
615 uint16_t scsi_xri_max;
616 uint16_t scsi_xri_cnt;
617 uint16_t scsi_xri_start;
618 uint16_t els_xri_cnt;
619 uint16_t nvmet_xri_cnt;
620 struct list_head lpfc_els_sgl_list;
621 struct list_head lpfc_abts_els_sgl_list;
622 struct list_head lpfc_nvmet_sgl_list;
623 struct list_head lpfc_abts_nvmet_sgl_list;
624 struct list_head lpfc_abts_scsi_buf_list;
625 struct list_head lpfc_abts_nvme_buf_list;
626 struct lpfc_sglq **lpfc_sglq_active_list;
627 struct list_head lpfc_rpi_hdr_list;
628 unsigned long *rpi_bmask;
629 uint16_t *rpi_ids;
630 uint16_t rpi_count;
631 struct list_head lpfc_rpi_blk_list;
632 unsigned long *xri_bmask;
633 uint16_t *xri_ids;
634 struct list_head lpfc_xri_blk_list;
635 unsigned long *vfi_bmask;
636 uint16_t *vfi_ids;
637 uint16_t vfi_count;
638 struct list_head lpfc_vfi_blk_list;
639 struct lpfc_sli4_flags sli4_flags;
640 struct list_head sp_queue_event;
641 struct list_head sp_cqe_event_pool;
642 struct list_head sp_asynce_work_queue;
643 struct list_head sp_fcp_xri_aborted_work_queue;
644 struct list_head sp_els_xri_aborted_work_queue;
645 struct list_head sp_nvme_xri_aborted_work_queue;
646 struct list_head sp_unsol_work_queue;
647 struct lpfc_sli4_link link_state;
648 struct lpfc_sli4_lnk_info lnk_info;
649 uint32_t pport_name_sta;
650 #define LPFC_SLI4_PPNAME_NON 0
651 #define LPFC_SLI4_PPNAME_GET 1
652 struct lpfc_iov iov;
653 spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */
654 spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
655 spinlock_t sgl_list_lock; /* list of aborted els IOs */
656 spinlock_t nvmet_io_lock;
657 uint32_t physical_port;
659 /* CPU to vector mapping information */
660 struct lpfc_vector_map_info *cpu_map;
661 uint16_t num_online_cpu;
662 uint16_t num_present_cpu;
663 uint16_t curr_disp_cpu;
665 uint16_t nvmet_mrq_post_idx;
668 enum lpfc_sge_type {
669 GEN_BUFF_TYPE,
670 SCSI_BUFF_TYPE,
671 NVMET_BUFF_TYPE
674 enum lpfc_sgl_state {
675 SGL_FREED,
676 SGL_ALLOCATED,
677 SGL_XRI_ABORTED
680 struct lpfc_sglq {
681 /* lpfc_sglqs are used in double linked lists */
682 struct list_head list;
683 struct list_head clist;
684 enum lpfc_sge_type buff_type; /* is this a scsi sgl */
685 enum lpfc_sgl_state state;
686 struct lpfc_nodelist *ndlp; /* ndlp associated with IO */
687 uint16_t iotag; /* pre-assigned IO tag */
688 uint16_t sli4_lxritag; /* logical pre-assigned xri. */
689 uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
690 struct sli4_sge *sgl; /* pre-assigned SGL */
691 void *virt; /* virtual address. */
692 dma_addr_t phys; /* physical address */
695 struct lpfc_rpi_hdr {
696 struct list_head list;
697 uint32_t len;
698 struct lpfc_dmabuf *dmabuf;
699 uint32_t page_count;
700 uint32_t start_rpi;
703 struct lpfc_rsrc_blks {
704 struct list_head list;
705 uint16_t rsrc_start;
706 uint16_t rsrc_size;
707 uint16_t rsrc_used;
710 struct lpfc_rdp_context {
711 struct lpfc_nodelist *ndlp;
712 uint16_t ox_id;
713 uint16_t rx_id;
714 READ_LNK_VAR link_stat;
715 uint8_t page_a0[DMP_SFF_PAGE_A0_SIZE];
716 uint8_t page_a2[DMP_SFF_PAGE_A2_SIZE];
717 void (*cmpl)(struct lpfc_hba *, struct lpfc_rdp_context*, int);
720 struct lpfc_lcb_context {
721 uint8_t sub_command;
722 uint8_t type;
723 uint8_t frequency;
724 uint16_t ox_id;
725 uint16_t rx_id;
726 struct lpfc_nodelist *ndlp;
731 * SLI4 specific function prototypes
733 int lpfc_pci_function_reset(struct lpfc_hba *);
734 int lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *);
735 int lpfc_sli4_hba_setup(struct lpfc_hba *);
736 int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
737 uint8_t, uint32_t, bool);
738 void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
739 void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
740 void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
741 struct lpfc_mbx_sge *);
742 int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
743 uint16_t);
745 void lpfc_sli4_hba_reset(struct lpfc_hba *);
746 struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
747 uint32_t);
748 void lpfc_sli4_queue_free(struct lpfc_queue *);
749 int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
750 int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq);
751 int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
752 struct lpfc_queue *, uint32_t, uint32_t);
753 int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
754 struct lpfc_queue **eqp, uint32_t type,
755 uint32_t subtype);
756 int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
757 struct lpfc_queue *, uint32_t);
758 int lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
759 struct lpfc_queue *, uint32_t);
760 int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
761 struct lpfc_queue *, struct lpfc_queue *, uint32_t);
762 int lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
763 struct lpfc_queue **drqp, struct lpfc_queue **cqp,
764 uint32_t subtype);
765 void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int);
766 int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
767 int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
768 int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
769 int lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
770 int lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
771 struct lpfc_queue *);
772 int lpfc_sli4_queue_setup(struct lpfc_hba *);
773 void lpfc_sli4_queue_unset(struct lpfc_hba *);
774 int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
775 int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
776 int lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba);
777 uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
778 void lpfc_sli4_free_xri(struct lpfc_hba *, int);
779 int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
780 int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
781 struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
782 struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
783 void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
784 void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
785 int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
786 int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
787 int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
788 struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
789 void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
790 int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
791 void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
792 void lpfc_sli4_remove_rpis(struct lpfc_hba *);
793 void lpfc_sli4_async_event_proc(struct lpfc_hba *);
794 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
795 int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
796 void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
797 void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
798 void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba);
799 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
800 void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
801 struct sli4_wcqe_xri_aborted *);
802 void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
803 struct sli4_wcqe_xri_aborted *axri);
804 void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
805 struct sli4_wcqe_xri_aborted *axri);
806 void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
807 struct sli4_wcqe_xri_aborted *);
808 void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
809 void lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *);
810 int lpfc_sli4_brdreset(struct lpfc_hba *);
811 int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
812 void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
813 int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
814 int lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba);
815 int lpfc_sli4_init_vpi(struct lpfc_vport *);
816 uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
817 uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
818 void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
819 int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t);
820 int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t);
821 int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t);
822 void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
823 void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
824 void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
825 int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
826 int lpfc_sli4_post_status_check(struct lpfc_hba *);
827 uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
828 uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);