Update supported PCI ids of efct driver.
[efct-Emulex_FC_Target.git] / efct / efct_hw.h
blobcd3136f801e699cd11da60c4b0b961343a061ee9
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
6 * *
7 * This program is free software; you can redistribute it and/or *
8 * modify it under the terms of version 2 of the GNU General *
9 * Public License as published by the Free Software Foundation. *
10 * This program is distributed in the hope that it will be useful. *
11 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
12 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
13 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
14 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
15 * TO BE LEGALLY INVALID. See the GNU General Public License for *
16 * more details, a copy of which can be found in the file COPYING *
17 * included with this package. *
18 ********************************************************************/
20 #ifndef _EFCT_HW_H
21 #define _EFCT_HW_H
23 #include "../libefc_sli/sli4.h"
24 #include "efct_utils.h"
27 * EFCT PCI IDs
29 #define EFCT_VENDOR_ID 0x10df /* Emulex */
30 #define EFCT_DEVICE_ID_LPE31004 0xe307 /* LightPulse 16Gb x 4
31 * FC (lancer-g6)
33 #define PCI_PRODUCT_EMULEX_LPE32002 0xe307 /* LightPulse 32Gb x 2
34 * FC (lancer-g6)
36 #define EFCT_DEVICE_ID_G7 0xf407 /* LightPulse 32Gb x 4
37 * FC (lancer-g7)
40 /*Define rq_threads seq cbuf size to 4K (equal to SLI RQ max length)*/
41 #define EFCT_RQTHREADS_MAX_SEQ_CBUF 4096
43 /*Default RQ entries len used by driver*/
44 #define EFCT_HW_RQ_ENTRIES_MIN 512
45 #define EFCT_HW_RQ_ENTRIES_DEF 1024
46 #define EFCT_HW_RQ_ENTRIES_MAX 4096
48 /*Defines the size of the RQ buffers used for each RQ*/
49 #define EFCT_HW_RQ_SIZE_HDR 128
50 #define EFCT_HW_RQ_SIZE_PAYLOAD 1024
52 /*Define the maximum number of multi-receive queues*/
53 #define EFCT_HW_MAX_MRQS 8
56 * Define count of when to set the WQEC bit in a submitted
57 * WQE, causing a consummed/released completion to be posted.
59 #define EFCT_HW_WQEC_SET_COUNT 32
61 /*Send frame timeout in seconds*/
62 #define EFCT_HW_SEND_FRAME_TIMEOUT 10
65 * FDT Transfer Hint value, reads greater than this value
66 * will be segmented to implement fairness. A value of zero disables
67 * the feature.
69 #define EFCT_HW_FDT_XFER_HINT 8192
72 * HW asserts/verify
74 extern void
75 _efct_hw_assert(const char *cond, const char *filename, int linenum);
76 extern int
77 _efct_hw_verify(const char *cond, const char *filename, int linenum);
79 #define efct_hw_assert(cond) \
80 do { \
81 if ((!(cond))) \
82 _efct_hw_assert(#cond, __FILE__, __LINE__); \
83 } while (0)
85 #define efct_hw_verify(cond, ...) _efct_hw_verify(#cond, __FILE__, __LINE__)
86 #define efct_hw_verify_arg(cond) efct_hw_verify(cond, EFCT_HW_RTN_INVALID_ARG)
89 * HW completion loop control parameters.
91 * The HW completion loop must terminate periodically
92 * to keep the OS happy. The loop terminates when a predefined
93 * time has elapsed, but to keep the overhead of
94 * computing time down, the time is only checked after a
95 * number of loop iterations has completed.
97 * EFCT_HW_TIMECHECK_ITERATIONS number of loop iterations
98 * between time checks
102 #define EFCT_HW_TIMECHECK_ITERATIONS 100
103 #define EFCT_HW_MAX_NUM_MQ 1
104 #define EFCT_HW_MAX_NUM_RQ 32
105 #define EFCT_HW_MAX_NUM_EQ 16
106 #define EFCT_HW_MAX_NUM_WQ 32
108 #define OCE_HW_MAX_NUM_MRQ_PAIRS 16
110 #define EFCT_HW_MAX_WQ_CLASS 4
111 #define EFCT_HW_MAX_WQ_CPU 128
114 * A CQ will be assinged to each WQ
115 * (CQ must have 2X entries of the WQ for abort
116 * processing), plus a separate one for each RQ PAIR and one for MQ
118 #define EFCT_HW_MAX_NUM_CQ \
119 ((EFCT_HW_MAX_NUM_WQ * 2) + 1 + (OCE_HW_MAX_NUM_MRQ_PAIRS * 2))
121 * Macros used to size the Q hash table.
123 #define B2(x) ((x) | ((x) >> 1))
124 #define B4(x) (B2(x) | (B2(x) >> 2))
125 #define B8(x) (B4(x) | (B4(x) >> 4))
126 #define B16(x) (B8(x) | (B8(x) >> 8))
127 #define B32(x) (B16(x) | (B16(x) >> 16))
128 #define B32_NEXT_POWER_OF_2(x) (B32((x) - 1) + 1)
131 * Q hash - size is the maximum of all the queue sizes, rounded up to the next
132 * power of 2
134 #define EFCT_HW_Q_HASH_SIZE \
135 B32_NEXT_POWER_OF_2(EFCT_HW_MAX_NUM_CQ)
137 #define EFCT_HW_RQ_HEADER_SIZE 128
138 #define EFCT_HW_RQ_HEADER_INDEX 0
141 * @brief Options for efct_hw_command().
143 enum {
144 /**< command executes synchronously and busy-waits for completion */
145 EFCT_CMD_POLL,
146 /**< command executes asynchronously. Uses callback */
147 EFCT_CMD_NOWAIT,
150 enum efct_hw_rtn_e {
151 EFCT_HW_RTN_SUCCESS = 0,
152 EFCT_HW_RTN_SUCCESS_SYNC = 1,
153 EFCT_HW_RTN_ERROR = -1,
154 EFCT_HW_RTN_NO_RESOURCES = -2,
155 EFCT_HW_RTN_NO_MEMORY = -3,
156 EFCT_HW_RTN_IO_NOT_ACTIVE = -4,
157 EFCT_HW_RTN_IO_ABORT_IN_PROGRESS = -5,
158 EFCT_HW_RTN_IO_PORT_OWNED_ALREADY_ABORTED = -6,
159 EFCT_HW_RTN_INVALID_ARG = -7,
162 #define EFCT_HW_RTN_IS_ERROR(e) ((e) < 0)
164 enum efct_hw_reset_e {
165 EFCT_HW_RESET_FUNCTION,
166 EFCT_HW_RESET_FIRMWARE,
167 EFCT_HW_RESET_MAX
170 enum efct_hw_property_e {
171 EFCT_HW_N_IO,
172 EFCT_HW_N_SGL,
173 EFCT_HW_MAX_IO,
174 EFCT_HW_MAX_SGE,
175 EFCT_HW_MAX_SGL,
176 EFCT_HW_MAX_NODES,
177 EFCT_HW_MAX_RQ_ENTRIES,
178 EFCT_HW_TOPOLOGY, /**< auto, nport, loop */
179 EFCT_HW_WWN_NODE,
180 EFCT_HW_WWN_PORT,
181 EFCT_HW_FW_REV,
182 EFCT_HW_FW_REV2,
183 EFCT_HW_IPL,
184 EFCT_HW_VPD,
185 EFCT_HW_VPD_LEN,
186 EFCT_HW_MODE, /**< initiator, target, both */
187 EFCT_HW_LINK_SPEED,
188 EFCT_HW_IF_TYPE,
189 EFCT_HW_SLI_REV,
190 EFCT_HW_SLI_FAMILY,
191 EFCT_HW_RQ_PROCESS_LIMIT,
192 EFCT_HW_RQ_DEFAULT_BUFFER_SIZE,
193 EFCT_HW_AUTO_XFER_RDY_CAPABLE,
194 EFCT_HW_AUTO_XFER_RDY_XRI_CNT,
195 EFCT_HW_AUTO_XFER_RDY_SIZE,
196 EFCT_HW_AUTO_XFER_RDY_BLK_SIZE,
197 EFCT_HW_AUTO_XFER_RDY_T10_ENABLE,
198 EFCT_HW_AUTO_XFER_RDY_P_TYPE,
199 EFCT_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA,
200 EFCT_HW_AUTO_XFER_RDY_APP_TAG_VALID,
201 EFCT_HW_AUTO_XFER_RDY_APP_TAG_VALUE,
202 EFCT_HW_DIF_CAPABLE,
203 EFCT_HW_DIF_SEED,
204 EFCT_HW_DIF_MODE,
205 EFCT_HW_DIF_MULTI_SEPARATE,
206 EFCT_HW_DUMP_MAX_SIZE,
207 EFCT_HW_DUMP_READY,
208 EFCT_HW_DUMP_PRESENT,
209 EFCT_HW_RESET_REQUIRED,
210 EFCT_HW_FW_ERROR,
211 EFCT_HW_FW_READY,
212 EFCT_HW_HIGH_LOGIN_MODE,
213 EFCT_HW_PREREGISTER_SGL,
214 EFCT_HW_HW_REV1,
215 EFCT_HW_HW_REV2,
216 EFCT_HW_HW_REV3,
217 EFCT_HW_LINKCFG,
218 EFCT_HW_ETH_LICENSE,
219 EFCT_HW_LINK_MODULE_TYPE,
220 EFCT_HW_NUM_CHUTES,
221 EFCT_HW_WAR_VERSION,
222 EFCT_HW_DISABLE_AR_TGT_DIF,
223 /**< emulate IAAB=0 for initiator-commands only */
224 EFCT_HW_EMULATE_I_ONLY_AAB,
225 /**< enable driver timeouts for target WQEs */
226 EFCT_HW_EMULATE_TARGET_WQE_TIMEOUT,
227 EFCT_HW_LINK_CONFIG_SPEED,
228 EFCT_HW_CONFIG_TOPOLOGY,
229 EFCT_HW_BOUNCE,
230 EFCT_HW_PORTNUM,
231 EFCT_HW_BIOS_VERSION_STRING,
232 EFCT_HW_RQ_SELECT_POLICY,
233 EFCT_HW_SGL_CHAINING_CAPABLE,
234 EFCT_HW_SGL_CHAINING_ALLOWED,
235 EFCT_HW_SGL_CHAINING_HOST_ALLOCATED,
236 EFCT_HW_SEND_FRAME_CAPABLE,
237 EFCT_HW_RQ_SELECTION_POLICY,
238 EFCT_HW_RR_QUANTA,
239 EFCT_HW_FILTER_DEF,
240 EFCT_HW_MAX_VPORTS,
241 EFCT_ESOC,
244 enum {
245 EFCT_HW_TOPOLOGY_AUTO,
246 EFCT_HW_TOPOLOGY_NPORT,
247 EFCT_HW_TOPOLOGY_LOOP,
248 EFCT_HW_TOPOLOGY_NONE,
249 EFCT_HW_TOPOLOGY_MAX
252 enum {
253 EFCT_HW_MODE_INITIATOR,
254 EFCT_HW_MODE_TARGET,
255 EFCT_HW_MODE_BOTH,
256 EFCT_HW_MODE_MAX
260 * @brief Port protocols
263 enum efct_hw_port_protocol_e {
264 EFCT_HW_PORT_PROTOCOL_FCOE,
265 EFCT_HW_PORT_PROTOCOL_FC,
266 EFCT_HW_PORT_PROTOCOL_OTHER,
269 #define EFCT_HW_MAX_PROFILES 40
271 * @brief A Profile Descriptor
273 struct efct_hw_profile_descriptor_s {
274 u32 profile_index;
275 u32 profile_id;
276 char profile_description[512];
280 * @brief A Profile List
282 struct efct_hw_profile_list_s {
283 u32 num_descriptors;
284 struct efct_hw_profile_descriptor_s
285 descriptors[EFCT_HW_MAX_PROFILES];
289 * HW workarounds
291 * This API contains the declarations that allow
292 * run-time selection of workarounds
293 * based on the asic type and revision, and range of fw revision.
297 * @brief pack fw revision values into a single uint64_t
300 /* Two levels of macro needed due to expansion */
301 #define HW_FWREV(a, b, c, d) (((uint64_t)(a) << 48) | ((uint64_t)(b) << 32)\
302 | ((uint64_t)(c) << 16) | ((uint64_t)(d)))
304 #define EFCT_FW_VER_STR(a, b, c, d) (#a "." #b "." #c "." #d)
306 struct efct_hw_workaround_s {
307 u64 fwrev;
309 /* Control Declarations here ...*/
311 u8 retain_tsend_io_length;
313 /* Use unregistered RPI */
314 bool use_unregistered_rpi;
315 u32 unregistered_rid;
316 u32 unregistered_index;
318 u8 disable_ar_tgt_dif; /* Disable auto rsp if target DIF */
319 bool disable_dump_loc;
320 bool use_dif_quarantine;
321 bool use_dif_sec_xri;
323 bool override_fcfi;
325 bool fw_version_too_low;
327 bool sglc_misreported;
329 u8 ignore_send_frame;
334 * @brief Defines DIF operation modes
336 enum {
337 EFCT_HW_DIF_MODE_INLINE,
338 EFCT_HW_DIF_MODE_SEPARATE,
342 * @brief T10 DIF operations.
344 enum efct_hw_dif_oper_e {
345 EFCT_HW_DIF_OPER_DISABLED,
346 EFCT_HW_SGE_DIFOP_INNODIFOUTCRC,
347 EFCT_HW_SGE_DIFOP_INCRCOUTNODIF,
348 EFCT_HW_SGE_DIFOP_INNODIFOUTCHKSUM,
349 EFCT_HW_SGE_DIFOP_INCHKSUMOUTNODIF,
350 EFCT_HW_SGE_DIFOP_INCRCOUTCRC,
351 EFCT_HW_SGE_DIFOP_INCHKSUMOUTCHKSUM,
352 EFCT_HW_SGE_DIFOP_INCRCOUTCHKSUM,
353 EFCT_HW_SGE_DIFOP_INCHKSUMOUTCRC,
354 EFCT_HW_SGE_DIFOP_INRAWOUTRAW,
357 #define EFCT_HW_DIF_OPER_PASS_THRU EFCT_HW_SGE_DIFOP_INCRCOUTCRC
358 #define EFCT_HW_DIF_OPER_STRIP EFCT_HW_SGE_DIFOP_INCRCOUTNODIF
359 #define EFCT_HW_DIF_OPER_INSERT EFCT_HW_SGE_DIFOP_INNODIFOUTCRC
362 * @brief T10 DIF block sizes.
364 enum efct_hw_dif_blk_size_e {
365 EFCT_HW_DIF_BK_SIZE_512,
366 EFCT_HW_DIF_BK_SIZE_1024,
367 EFCT_HW_DIF_BK_SIZE_2048,
368 EFCT_HW_DIF_BK_SIZE_4096,
369 EFCT_HW_DIF_BK_SIZE_520,
370 EFCT_HW_DIF_BK_SIZE_4104,
371 EFCT_HW_DIF_BK_SIZE_NA = 0
375 * @brief Link configurations.
377 enum efct_hw_linkcfg_e {
378 EFCT_HW_LINKCFG_4X10G = 0,
379 EFCT_HW_LINKCFG_1X40G,
380 EFCT_HW_LINKCFG_2X16G,
381 EFCT_HW_LINKCFG_4X8G,
382 EFCT_HW_LINKCFG_4X1G,
383 EFCT_HW_LINKCFG_2X10G,
384 EFCT_HW_LINKCFG_2X10G_2X8G,
386 /* must be last */
387 EFCT_HW_LINKCFG_NA,
391 * @brief link module types
393 * (note: these just happen to match SLI4 values)
396 enum {
397 EFCT_HW_LINK_MODULE_TYPE_1GB = 0x0004,
398 EFCT_HW_LINK_MODULE_TYPE_2GB = 0x0008,
399 EFCT_HW_LINK_MODULE_TYPE_4GB = 0x0040,
400 EFCT_HW_LINK_MODULE_TYPE_8GB = 0x0080,
401 EFCT_HW_LINK_MODULE_TYPE_10GB = 0x0100,
402 EFCT_HW_LINK_MODULE_TYPE_16GB = 0x0200,
403 EFCT_HW_LINK_MODULE_TYPE_32GB = 0x0400,
407 * @brief T10 DIF information passed to the transport.
410 struct efct_hw_dif_info_s {
411 enum efct_hw_dif_oper_e dif_oper;
412 enum efct_hw_dif_blk_size_e blk_size;
413 u32 ref_tag_cmp;
414 u32 ref_tag_repl;
415 u16 app_tag_cmp;
416 u16 app_tag_repl;
417 bool check_ref_tag;
418 bool check_app_tag;
419 bool check_guard;
420 bool auto_incr_ref_tag;
421 bool repl_app_tag;
422 bool repl_ref_tag;
423 bool dif_separate;
425 /* If the APP TAG is 0xFFFF, disable REF TAG and CRC field chk */
426 bool disable_app_ffff;
428 /* if the APP TAG is 0xFFFF and REF TAG is 0xFFFF_FFFF,
429 * disable checking the received CRC field.
431 bool disable_app_ref_ffff;
432 u16 dif_seed;
433 u8 dif;
436 enum efct_hw_io_type_e {
437 EFCT_HW_ELS_REQ, /**< ELS request */
438 EFCT_HW_ELS_RSP, /**< ELS response */
439 EFCT_HW_ELS_RSP_SID, /**< ELS response, override the S_ID */
440 EFCT_HW_FC_CT, /**< FC Common Transport */
441 EFCT_HW_FC_CT_RSP, /**< FC Common Transport Response */
442 EFCT_HW_BLS_ACC, /**< BLS accept (BA_ACC) */
443 EFCT_HW_BLS_ACC_SID, /**< BLS accept (BA_ACC), override the S_ID */
444 EFCT_HW_BLS_RJT, /**< BLS reject (BA_RJT) */
445 EFCT_HW_IO_TARGET_READ,
446 EFCT_HW_IO_TARGET_WRITE,
447 EFCT_HW_IO_TARGET_RSP,
448 EFCT_HW_IO_INITIATOR_READ,
449 EFCT_HW_IO_INITIATOR_WRITE,
450 EFCT_HW_IO_INITIATOR_NODATA,
451 EFCT_HW_IO_DNRX_REQUEUE,
452 EFCT_HW_IO_MAX,
455 enum efct_hw_io_state_e {
456 EFCT_HW_IO_STATE_FREE,
457 EFCT_HW_IO_STATE_INUSE,
458 EFCT_HW_IO_STATE_WAIT_FREE,
459 EFCT_HW_IO_STATE_WAIT_SEC_HIO,
462 /* Descriptive strings for the HW IO request types (note: these must always
463 * match up with the enum efct_hw_io_type_e declaration)
465 #define EFCT_HW_IO_TYPE_STRINGS \
466 "ELS request", \
467 "ELS response", \
468 "ELS response(set SID)", \
469 "FC CT request", \
470 "BLS accept", \
471 "BLS accept(set SID)", \
472 "BLS reject", \
473 "target read", \
474 "target write", \
475 "target response", \
476 "initiator read", \
477 "initiator write", \
478 "initiator nodata",
480 struct efct_hw_s;
482 * @brief HW command context.
484 * Stores the state for the asynchronous commands sent to the hardware.
486 struct efct_command_ctx_s {
487 struct list_head list_entry;
488 /**< Callback function */
489 int (*cb)(struct efct_hw_s *hw, int status, u8 *mqe, void *arg);
490 void *arg; /**< Argument for callback */
491 u8 *buf; /**< buffer holding command / results */
492 void *ctx; /**< upper layer context */
495 struct efct_hw_sgl_s {
496 uintptr_t addr;
497 size_t len;
500 union efct_hw_io_param_u {
501 struct {
502 __be16 ox_id;
503 __be16 rx_id;
504 u8 payload[12]; /**< big enough for ABTS BA_ACC */
505 } bls;
506 struct {
507 u32 s_id;
508 u16 ox_id;
509 u16 rx_id;
510 u8 payload[12]; /**< big enough for ABTS BA_ACC */
511 } bls_sid;
512 struct {
513 u8 r_ctl;
514 u8 type;
515 u8 df_ctl;
516 u8 timeout;
517 } bcast;
518 struct {
519 u16 ox_id;
520 u8 timeout;
521 } els;
522 struct {
523 u32 s_id;
524 u16 ox_id;
525 u8 timeout;
526 } els_sid;
527 struct {
528 u8 r_ctl;
529 u8 type;
530 u8 df_ctl;
531 u8 timeout;
532 } fc_ct;
533 struct {
534 u8 r_ctl;
535 u8 type;
536 u8 df_ctl;
537 u8 timeout;
538 u16 ox_id;
539 } fc_ct_rsp;
540 struct {
541 u32 offset;
542 u16 ox_id;
543 u16 flags;
544 u8 cs_ctl;
545 enum efct_hw_dif_oper_e dif_oper;
546 enum efct_hw_dif_blk_size_e blk_size;
547 u8 timeout;
548 u32 app_id;
549 } fcp_tgt;
550 struct {
551 struct efc_dma_s *cmnd;
552 struct efc_dma_s *rsp;
553 enum efct_hw_dif_oper_e dif_oper;
554 enum efct_hw_dif_blk_size_e blk_size;
555 u32 cmnd_size;
556 u16 flags;
557 u8 timeout;
558 u32 first_burst;
559 } fcp_ini;
563 * @brief WQ steering mode
565 enum efct_hw_wq_steering_e {
566 EFCT_HW_WQ_STEERING_CLASS,
567 EFCT_HW_WQ_STEERING_REQUEST,
568 EFCT_HW_WQ_STEERING_CPU,
572 * @brief HW wqe object
574 struct efct_hw_wqe_s {
575 struct list_head list_entry;
576 /**< set if abort wqe needs to be submitted */
577 bool abort_wqe_submit_needed;
578 /**< set to 1 to have hardware to automatically send ABTS */
579 bool send_abts;
580 /**< TRUE if DNRX was set on this IO */
581 bool auto_xfer_rdy_dnrx;
582 u32 id;
583 u32 abort_reqtag;
584 /**< work queue entry buffer */
585 u8 *wqebuf;
589 * @brief HW IO object.
591 * Stores the per-IO information necessary
592 * for both the lower (SLI) and upper
593 * layers (efct).
595 struct efct_hw_io_s {
596 /* Owned by HW */
598 /* reference counter and callback function */
599 struct kref ref;
600 void (*release)(struct kref *arg);
601 /**< used for busy, wait_free, free lists */
602 struct list_head list_entry;
603 /**< used for timed_wqe list */
604 struct list_head wqe_link;
605 /**< used for io posted dnrx list */
606 struct list_head dnrx_link;
607 /**< state of IO: free, busy, wait_free */
608 enum efct_hw_io_state_e state;
609 /**< Work queue object, with link for pending */
610 struct efct_hw_wqe_s wqe;
611 /**< Lock to synchronize TRSP and AXT Data/Cmd Cqes */
612 spinlock_t axr_lock;
613 /**< pointer back to hardware context */
614 struct efct_hw_s *hw;
615 struct efc_remote_node_s *rnode;
616 struct efct_hw_auto_xfer_rdy_buffer_s *axr_buf;
617 struct efc_dma_s xfer_rdy;
618 u16 type;
619 /**< IO abort count */
620 u32 port_owned_abort_count;
621 /**< WQ assigned to the exchange */
622 struct hw_wq_s *wq;
623 /**< Exchange is active in FW */
624 bool xbusy;
625 /**< Function called on IO completion */
627 (*done)(struct efct_hw_io_s *hio,
628 struct efc_remote_node_s *rnode,
629 u32 len, int status,
630 u32 ext, void *ul_arg);
631 /**< argument passed to "IO done" callback */
632 void *arg;
633 /**< Function called on abort completion */
635 (*abort_done)(struct efct_hw_io_s *hio,
636 struct efc_remote_node_s *rnode,
637 u32 len, int status,
638 u32 ext, void *ul_arg);
639 /**< argument passed to "abort done" callback */
640 void *abort_arg;
641 /**< needed for bug O127585: length of IO */
642 size_t length;
643 /**< timeout value for target WQEs */
644 u8 tgt_wqe_timeout;
645 /**< timestamp when current WQE was submitted */
646 u64 submit_ticks;
648 /**< if TRUE, latched status shld be returned */
649 bool status_saved;
650 /**< if TRUE, abort is in progress */
651 bool abort_in_progress;
652 bool quarantine; /**< set if IO to be quarantined */
653 /**< set if first phase of IO */
654 bool quarantine_first_phase;
655 /**< set if POST_XRI used to send XRI to chip */
656 bool is_port_owned;
657 /**< TRUE if DNRX was set on this IO */
658 bool auto_xfer_rdy_dnrx;
659 u32 saved_status; /**< latched status */
660 u32 saved_len; /**< latched length */
661 u32 saved_ext; /**< latched extended status */
663 /**< EQ that this HIO came up on */
664 struct hw_eq_s *eq;
665 /**< WQ steering mode request */
666 enum efct_hw_wq_steering_e wq_steering;
667 /**< WQ class if steering mode is Class */
668 u8 wq_class;
670 /* Owned by SLI layer */
671 u16 reqtag; /**< request tag for this HW IO */
672 /**< request tag for an abort of this HW IO
673 * (note: this is a 32 bit value
674 * to allow us to use UINT32_MAX as an uninitialized value)
676 u32 abort_reqtag;
677 u32 indicator; /**< XRI */
678 struct efc_dma_s def_sgl;/**< default scatter gather list */
679 u32 def_sgl_count; /**< count of SGEs in default SGL */
680 struct efc_dma_s *sgl; /**< pointer to current active SGL */
681 u32 sgl_count; /**< count of SGEs in io->sgl */
682 u32 first_data_sge; /**< index of first data SGE */
683 struct efc_dma_s *ovfl_sgl; /**< overflow SGL */
684 u32 ovfl_sgl_count; /**< count of SGEs in default SGL */
685 /**< pointer to overflow segment len */
686 struct sli4_lsp_sge_s *ovfl_lsp;
687 u32 n_sge; /**< number of active SGEs */
688 u32 sge_offset;
690 /**< Secondary HW IO context */
691 struct efct_hw_io_s *sec_hio;
692 /**< Secondary HW IO context saved iparam */
693 union efct_hw_io_param_u sec_iparam;
694 /**< Secondary HW IO context saved len */
695 u32 sec_len;
697 /* Owned by upper layer */
698 /**< where upper layer can store ref to its IO */
699 void *ul_io;
703 * @brief HW callback type
705 * Typedef for HW "done" callback.
707 typedef int (*efct_hw_done_t)(struct efct_hw_io_s *, struct efc_remote_node_s *,
708 u32 len, int status, u32 ext, void *ul_arg);
710 enum efct_hw_port_e {
711 EFCT_HW_PORT_INIT,
712 EFCT_HW_PORT_SHUTDOWN,
713 EFCT_HW_PORT_SET_LINK_CONFIG,
717 * @brief Node group rpi reference
719 struct efct_hw_rpi_ref_s {
720 atomic_t rpi_count;
721 atomic_t rpi_attached;
725 * @brief HW link stat types
727 enum efct_hw_link_stat_e {
728 EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT,
729 EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT,
730 EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT,
731 EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT,
732 EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT,
733 EFCT_HW_LINK_STAT_CRC_COUNT,
734 EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT,
735 EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT,
736 EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT,
737 EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT,
738 EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT,
739 EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT,
740 EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT,
741 EFCT_HW_LINK_STAT_RCV_EOFA_COUNT,
742 EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT,
743 EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT,
744 EFCT_HW_LINK_STAT_RCV_SOFF_COUNT,
745 EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT,
746 EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT,
747 EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT,
748 EFCT_HW_LINK_STAT_MAX, /**< must be last */
751 enum efct_hw_host_stat_e {
752 EFCT_HW_HOST_STAT_TX_KBYTE_COUNT,
753 EFCT_HW_HOST_STAT_RX_KBYTE_COUNT,
754 EFCT_HW_HOST_STAT_TX_FRAME_COUNT,
755 EFCT_HW_HOST_STAT_RX_FRAME_COUNT,
756 EFCT_HW_HOST_STAT_TX_SEQ_COUNT,
757 EFCT_HW_HOST_STAT_RX_SEQ_COUNT,
758 EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG,
759 EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP,
760 EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT,
761 EFCT_HW_HOST_STAT_RX_F_BSY_COUNT,
762 EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT,
763 EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT,
764 EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT,
765 EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT,
766 EFCT_HW_HOST_STAT_MAX /* MUST BE LAST */
769 enum efct_hw_state_e {
770 EFCT_HW_STATE_UNINITIALIZED, /* power-on, no allefct, no init */
771 EFCT_HW_STATE_QUEUES_ALLOCATED, /* chip is reset, alloc are cmpl
772 *(queues not registered)
774 EFCT_HW_STATE_ACTIVE, /* chip is up an running */
775 EFCT_HW_STATE_RESET_IN_PROGRESS,/* chip is being reset */
776 EFCT_HW_STATE_TEARDOWN_IN_PROGRESS,/* teardown has been started */
780 * @brief Structure to track optimized write buffers posted
781 * to chip owned XRIs.
783 * Note:
784 * The rqindex will be set the following "fake" indexes.
785 * This will be used when the buffer is returned via
786 * efct_seq_free() to make the buffer available
787 * for re-use on another XRI.
789 * The dma->alloc pointer on the dummy header will be used to
790 * get back to this structure when the buffer is freed.
792 * More of these object may be allocated on the fly if more XRIs
793 * are pushed to the chip.
795 #define EFCT_HW_RQ_INDEX_DUMMY_HDR 0xFF00
796 #define EFCT_HW_RQ_INDEX_DUMMY_DATA 0xFF01
797 struct efct_hw_auto_xfer_rdy_buffer_s {
798 struct fc_frame_header hdr; /* < used to build a dummy
799 * data hdr for unsol processing **/
800 struct efc_hw_rq_buffer_s header; /**< Points to dummy data hdr */
801 struct efc_hw_rq_buffer_s payload; /**< rcvd frame payload buff */
802 struct efc_hw_sequence_s seq; /**< seq for passing the buffs */
803 u8 data_cqe;
804 u8 cmd_cqe;
806 /* fields saved from cmd hdr that are needed when data arrives */
807 u8 fcfi;
809 /* To handle outof order completions save AXR cmd and data cqes */
810 u8 call_axr_cmd;
811 u8 call_axr_data;
812 struct efc_hw_sequence_s *cmd_seq;
816 * @brief Node group rpi reference
818 struct efct_hw_link_stat_counts_s {
819 u8 overflow;
820 u32 counter;
824 * @brief HW object describing fc host stats
826 struct efct_hw_host_stat_counts_s {
827 u32 counter;
830 #define TID_HASH_BITS 8
831 #define TID_HASH_LEN BIT(TID_HASH_BITS)
833 enum hw_cq_handler_e {
834 HW_CQ_HANDLER_LOCAL,
835 HW_CQ_HANDLER_THREAD,
838 #include "efct_hw_queues.h"
841 * @brief Structure used for the hash lookup of queue IDs
843 struct efct_queue_hash_s {
844 bool in_use;
845 u16 id;
846 u16 index;
850 * @brief Define the fields required to implement DIF quarantine.
852 #define EFCT_HW_QUARANTINE_QUEUE_DEPTH 4
854 struct efct_quarantine_info_s {
855 u32 quarantine_index;
856 struct efct_hw_io_s *quarantine_ios[EFCT_HW_QUARANTINE_QUEUE_DEPTH];
860 * @brief Define the WQ callback object
862 struct hw_wq_callback_s {
863 u16 instance_index; /**< use for request tag */
864 void (*callback)(void *arg, u8 *cqe, int status);
865 void *arg;
868 struct efct_hw_config {
869 u32 n_eq; /**< number of event queues */
870 u32 n_cq; /**< number of completion queues */
871 u32 n_mq; /**< number of mailbox queues */
872 u32 n_rq; /**< number of receive queues */
873 u32 n_wq; /**< number of work queues */
874 u32 n_io; /**< total number of IO objects */
875 u32 n_sgl;/**< length of SGL */
876 u32 speed; /** requested link speed in Mbps */
877 u32 topology; /** requested link topology */
878 /** size of the buffers for first burst */
879 u32 rq_default_buffer_size;
880 /** Initial XRIs to post to chip at init */
881 u32 auto_xfer_rdy_xri_cnt;
882 /** max size IO to use with this feature */
883 u32 auto_xfer_rdy_size;
884 /** block size to use with this feature */
885 u8 auto_xfer_rdy_blk_size_chip;
886 u8 esoc;
887 /** The seed for the DIF CRC calculation */
888 u16 dif_seed;
889 u16 auto_xfer_rdy_app_tag_value;
890 u8 dif_mode; /**< DIF mode to use */
891 /** Enable initiator-only auto-abort */
892 bool i_only_aab;
893 /** Enable driver target wqe timeouts */
894 u8 emulate_tgt_wqe_timeout;
895 bool bounce;
896 /**< Queue topology string */
897 const char *queue_topology;
898 /** Enable t10 PI for auto xfer ready */
899 u8 auto_xfer_rdy_t10_enable;
900 /** p_type for auto xfer ready */
901 u8 auto_xfer_rdy_p_type;
902 u8 auto_xfer_rdy_ref_tag_is_lba;
903 u8 auto_xfer_rdy_app_tag_valid;
904 /** MRQ RQ selection policy */
905 u8 rq_selection_policy;
906 /** RQ quanta if rq_selection_policy == 2 */
907 u8 rr_quanta;
908 u32 filter_def[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
912 * @brief HW object
914 struct efct_hw_s {
915 void *os;
916 struct sli4_s sli;
917 u16 ulp_start;
918 u16 ulp_max;
919 u32 dump_size;
920 enum efct_hw_state_e state;
921 bool hw_setup_called;
922 u8 sliport_healthcheck;
923 u16 watchdog_timeout;
925 /** HW configuration, subject to efct_hw_set() */
926 struct efct_hw_config config;
928 /* calculated queue sizes for each type */
929 u32 num_qentries[SLI_QTYPE_MAX];
931 /* Storage for SLI queue objects */
932 struct sli4_queue_s wq[EFCT_HW_MAX_NUM_WQ];
933 struct sli4_queue_s rq[EFCT_HW_MAX_NUM_RQ];
934 u16 hw_rq_lookup[EFCT_HW_MAX_NUM_RQ];
935 struct sli4_queue_s mq[EFCT_HW_MAX_NUM_MQ];
936 struct sli4_queue_s cq[EFCT_HW_MAX_NUM_CQ];
937 struct sli4_queue_s eq[EFCT_HW_MAX_NUM_EQ];
939 /* HW queue */
940 u32 eq_count;
941 u32 cq_count;
942 u32 mq_count;
943 u32 wq_count;
944 u32 rq_count; /**< count of SLI RQs */
945 struct list_head eq_list;
947 struct efct_queue_hash_s cq_hash[EFCT_HW_Q_HASH_SIZE];
948 struct efct_queue_hash_s rq_hash[EFCT_HW_Q_HASH_SIZE];
949 struct efct_queue_hash_s wq_hash[EFCT_HW_Q_HASH_SIZE];
951 /* Storage for HW queue objects */
952 struct hw_wq_s *hw_wq[EFCT_HW_MAX_NUM_WQ];
953 struct hw_rq_s *hw_rq[EFCT_HW_MAX_NUM_RQ];
954 struct hw_mq_s *hw_mq[EFCT_HW_MAX_NUM_MQ];
955 struct hw_cq_s *hw_cq[EFCT_HW_MAX_NUM_CQ];
956 struct hw_eq_s *hw_eq[EFCT_HW_MAX_NUM_EQ];
957 /**< count of hw_rq[] entries */
958 u32 hw_rq_count;
959 /**< count of multirq RQs */
960 u32 hw_mrq_count;
962 /**< pool per class WQs */
963 struct efct_varray_s *wq_class_array[EFCT_HW_MAX_WQ_CLASS];
964 /**< pool per CPU WQs */
965 struct efct_varray_s *wq_cpu_array[EFCT_HW_MAX_WQ_CPU];
967 /* Sequence objects used in incoming frame processing */
968 struct efct_array_s *seq_pool;
970 /* Auto XFER RDY Buffers - protect with io_lock */
971 /**< TRUE if auto xfer rdy enabled */
972 bool auto_xfer_rdy_enabled;
974 /**< pool of efct_hw_auto_xfer_rdy_buffer_t objects **/
975 struct efct_pool_s *auto_xfer_rdy_buf_pool;
977 /** Maintain an ordered, linked list of outstanding HW commands. */
978 spinlock_t cmd_lock;
979 struct list_head cmd_head;
980 struct list_head cmd_pending;
981 u32 cmd_head_count;
983 struct sli4_link_event_s link;
984 /**< link configuration setting */
985 enum efct_hw_linkcfg_e linkcfg;
986 /**< Ethernet license; to enable FCoE on Lancer */
987 u32 eth_license;
989 /* EFCT domain objects index by FCFI */
990 int first_domain_idx; /* Wrkarnd for srb->fcfi == 0 */
991 struct efc_domain_s *domains[SLI4_MAX_FCFI];
993 /* Table of FCFI values index by FCF_index */
994 u16 fcf_index_fcfi[SLI4_MAX_FCF_INDEX];
996 u16 fcf_indicator;
998 /**< pointer array of IO objects */
999 struct efct_hw_io_s **io;
1000 /**< array of WQE buffs mapped to IO objects */
1001 u8 *wqe_buffs;
1003 /**< IO lock to synchronize list access */
1004 spinlock_t io_lock;
1005 /**< IO lock to synchronize IO aborting */
1006 spinlock_t io_abort_lock;
1007 /**< List of IO objects in use */
1008 struct list_head io_inuse;
1009 /**< List of IO objects with a timed target WQE */
1010 struct list_head io_timed_wqe;
1011 /**< List of IO objects waiting to be freed */
1012 struct list_head io_wait_free;
1013 /**< List of IO objects available for allocation */
1014 struct list_head io_free;
1015 /**< List of IO objects posted for chip use */
1016 struct list_head io_port_owned;
1017 /**< List of IO objects needing auto xfer rdy buffers */
1018 struct list_head io_port_dnrx;
1020 struct efc_dma_s loop_map;
1022 struct efc_dma_s xfer_rdy;
1024 struct efc_dma_s dump_sges;
1026 struct efc_dma_s rnode_mem;
1028 struct efct_hw_rpi_ref_s *rpi_ref;
1030 char *hw_war_version;
1031 struct efct_hw_workaround_s workaround;
1033 atomic_t io_alloc_failed_count;
1035 /* Secondary HW IO context wait list */
1036 struct list_head sec_hio_wait_list;
1037 /* Workaround: Count of IOs that were put on the
1038 * Secondary HW IO wait list pointer to queue topology
1040 u32 sec_hio_wait_count;
1042 #define HW_MAX_TCMD_THREADS 16
1043 struct efct_hw_qtop_s *qtop; /* pointer to queue topology */
1045 /**< stat: wq sumbit count */
1046 u32 tcmd_wq_submit[EFCT_HW_MAX_NUM_WQ];
1047 /**< stat: wq complete count */
1048 u32 tcmd_wq_complete[EFCT_HW_MAX_NUM_WQ];
1050 struct timer_list wqe_timer; /**< Timer to periodically
1051 *check for WQE timeouts
1053 struct timer_list watchdog_timer; /**< Timer for heartbeat */
1054 bool in_active_wqe_timer; /* < TRUE if currently in
1055 * active wqe timer handler
1057 bool active_wqe_timer_shutdown; /* TRUE if wqe
1058 * timer is to be shutdown
1061 struct list_head iopc_list; /*< list of IO
1062 *processing contexts
1064 spinlock_t iopc_list_lock; /**< lock for iopc_list */
1066 struct efct_pool_s *wq_reqtag_pool; /* < pool of
1067 * struct hw_wq_callback_s obj
1070 atomic_t send_frame_seq_id; /* < send frame
1071 * sequence ID
1075 enum efct_hw_io_count_type_e {
1076 EFCT_HW_IO_INUSE_COUNT,
1077 EFCT_HW_IO_FREE_COUNT,
1078 EFCT_HW_IO_WAIT_FREE_COUNT,
1079 EFCT_HW_IO_PORT_OWNED_COUNT,
1080 EFCT_HW_IO_N_TOTAL_IO_COUNT,
1083 extern void
1084 (*tcmd_cq_handler)(struct efct_hw_s *hw, u32 cq_idx,
1085 void *cq_handler_arg);
1087 * HW queue data structures
1090 struct hw_eq_s {
1091 struct list_head list_entry;
1092 enum sli4_qtype_e type; /**< must be second */
1093 u32 instance;
1094 u32 entry_count;
1095 u32 entry_size;
1096 struct efct_hw_s *hw;
1097 struct sli4_queue_s *queue;
1098 struct list_head cq_list;
1099 u32 use_count;
1100 struct efct_varray_s *wq_array; /*<< array of WQs */
1103 struct hw_cq_s {
1104 struct list_head list_entry;
1105 enum sli4_qtype_e type; /**< must be second */
1106 u32 instance; /*<< CQ instance (cq_idx) */
1107 u32 entry_count; /*<< Number of entries */
1108 u32 entry_size; /*<< entry size */
1109 struct hw_eq_s *eq; /*<< parent EQ */
1110 struct sli4_queue_s *queue; /**< pointer to SLI4 queue */
1111 struct list_head q_list; /**< list of children queues */
1112 u32 use_count;
1115 void hw_thread_cq_handler(struct efct_hw_s *hw, struct hw_cq_s *cq);
1117 struct hw_q_s {
1118 struct list_head list_entry;
1119 enum sli4_qtype_e type; /*<< must be second */
1122 struct hw_mq_s {
1123 struct list_head list_entry;
1124 enum sli4_qtype_e type; /*<< must be second */
1125 u32 instance;
1127 u32 entry_count;
1128 u32 entry_size;
1129 struct hw_cq_s *cq;
1130 struct sli4_queue_s *queue;
1132 u32 use_count;
1135 struct hw_wq_s {
1136 struct list_head list_entry;
1137 enum sli4_qtype_e type; /*<< must be second */
1138 u32 instance;
1139 struct efct_hw_s *hw;
1141 u32 entry_count;
1142 u32 entry_size;
1143 struct hw_cq_s *cq;
1144 struct sli4_queue_s *queue;
1145 u32 class;
1146 u8 ulp;
1148 /* WQ consumed */
1149 u32 wqec_set_count; /* how often IOs are
1150 * submitted with wqce set
1152 u32 wqec_count; /* current wqce counter */
1153 u32 free_count; /* free count */
1154 u32 total_submit_count; /* total submit count */
1155 struct list_head pending_list; /* list of IOs pending for this WQ */
1158 * Driver must quarantine XRIs
1159 * for target writes and initiator read when using
1160 * DIF separates. Throw them on a queue until another
1161 * 4 similar requests are completed
1162 * to ensure they are flushed from the internal chip cache
1163 * before re-use. The must be a separate queue per CQ because
1164 * the actual chip completion order cannot be determined.
1165 * Since each WQ has a separate CQ, use the wq
1166 * associated with the IO.
1168 * Note: Protected by queue->lock
1170 struct efct_quarantine_info_s quarantine_info;
1173 * HW IO allocated for use with Send Frame
1175 struct efct_hw_io_s *send_frame_io;
1177 /* Stats */
1178 u32 use_count; /*<< use count */
1179 u32 wq_pending_count; /* count of HW IOs that
1180 * were queued on the WQ pending list
1184 struct hw_rq_s {
1185 struct list_head list_entry;
1186 enum sli4_qtype_e type; /*<< must be second */
1187 u32 instance;
1189 u32 entry_count;
1190 u32 use_count;
1191 u32 hdr_entry_size;
1192 u32 first_burst_entry_size;
1193 u32 data_entry_size;
1194 u8 ulp;
1195 bool is_mrq;
1196 u32 base_mrq_id;
1198 struct hw_cq_s *cq;
1200 u8 filter_mask; /* Filter mask value */
1201 struct sli4_queue_s *hdr;
1202 struct sli4_queue_s *first_burst;
1203 struct sli4_queue_s *data;
1205 struct efc_hw_rq_buffer_s *hdr_buf;
1206 struct efc_hw_rq_buffer_s *fb_buf;
1207 struct efc_hw_rq_buffer_s *payload_buf;
1209 struct efc_hw_sequence_s **rq_tracker; /* RQ tracker for this RQ */
1212 struct efct_hw_global_s {
1213 const char *queue_topology_string; /**< queue topo str */
1216 extern struct efct_hw_global_s hw_global;
1218 struct hw_eq_s *efct_hw_new_eq(struct efct_hw_s *hw, u32 entry_count);
1219 struct hw_cq_s *efct_hw_new_cq(struct hw_eq_s *eq, u32 entry_count);
1220 extern u32
1221 efct_hw_new_cq_set(struct hw_eq_s *eqs[], struct hw_cq_s *cqs[],
1222 u32 num_cqs, u32 entry_count);
1223 struct hw_mq_s *efct_hw_new_mq(struct hw_cq_s *cq, u32 entry_count);
1224 extern struct hw_wq_s
1225 *efct_hw_new_wq(struct hw_cq_s *cq, u32 entry_count,
1226 u32 class, u32 ulp);
1227 extern struct hw_rq_s
1228 *efct_hw_new_rq(struct hw_cq_s *cq, u32 entry_count, u32 ulp);
1229 extern u32
1230 efct_hw_new_rq_set(struct hw_cq_s *cqs[], struct hw_rq_s *rqs[],
1231 u32 num_rq_pairs,
1232 u32 entry_count, u32 ulp);
1233 void efct_hw_del_eq(struct hw_eq_s *eq);
1234 void efct_hw_del_cq(struct hw_cq_s *cq);
1235 void efct_hw_del_mq(struct hw_mq_s *mq);
1236 void efct_hw_del_wq(struct hw_wq_s *wq);
1237 void efct_hw_del_rq(struct hw_rq_s *rq);
1238 void efct_hw_queue_dump(struct efct_hw_s *hw);
1239 void efct_hw_queue_teardown(struct efct_hw_s *hw);
1240 int hw_route_rqe(struct efct_hw_s *hw, struct efc_hw_sequence_s *seq);
1241 extern int
1242 efct_hw_queue_hash_find(struct efct_queue_hash_s *hash, u16 id);
1243 extern enum efct_hw_rtn_e
1244 efct_hw_setup(struct efct_hw_s *hw, void *os, struct pci_dev *pdev);
1245 enum efct_hw_rtn_e efct_hw_init(struct efct_hw_s *hw);
1246 enum efct_hw_rtn_e efct_hw_teardown(struct efct_hw_s *hw);
1247 enum efct_hw_rtn_e
1248 efct_hw_reset(struct efct_hw_s *hw, enum efct_hw_reset_e reset);
1249 int efct_hw_get_num_eq(struct efct_hw_s *hw);
1250 extern enum efct_hw_rtn_e
1251 efct_hw_get(struct efct_hw_s *hw, enum efct_hw_property_e prop, u32 *value);
1252 extern void *
1253 efct_hw_get_ptr(struct efct_hw_s *hw, enum efct_hw_property_e prop);
1254 extern enum efct_hw_rtn_e
1255 efct_hw_set(struct efct_hw_s *hw, enum efct_hw_property_e prop, u32 value);
1256 extern enum efct_hw_rtn_e
1257 efct_hw_set_ptr(struct efct_hw_s *hw, enum efct_hw_property_e prop,
1258 void *value);
1259 extern int
1260 efct_hw_event_check(struct efct_hw_s *hw, u32 vector);
1261 extern int
1262 efct_hw_process(struct efct_hw_s *hw, u32 vector, u32 max_isr_time_msec);
1263 extern enum efct_hw_rtn_e
1264 efct_hw_command(struct efct_hw_s *hw, u8 *cmd, u32 opts, void *cb,
1265 void *arg);
1266 extern enum efct_hw_rtn_e
1267 efct_hw_port_alloc(struct efc_lport *efc, struct efc_sli_port_s *sport,
1268 struct efc_domain_s *domain, u8 *wwpn);
1269 extern enum efct_hw_rtn_e
1270 efct_hw_port_attach(struct efc_lport *efc, struct efc_sli_port_s *sport,
1271 u32 fc_id);
1272 extern enum efct_hw_rtn_e
1273 efct_hw_port_control(struct efct_hw_s *hw, enum efct_hw_port_e ctrl,
1274 uintptr_t value,
1275 void (*cb)(int status, uintptr_t value, void *arg),
1276 void *arg);
1277 extern enum efct_hw_rtn_e
1278 efct_hw_port_free(struct efc_lport *efc, struct efc_sli_port_s *sport);
1279 extern enum efct_hw_rtn_e
1280 efct_hw_domain_alloc(struct efc_lport *efc, struct efc_domain_s *domain,
1281 u32 fcf, u32 vlan);
1282 extern enum efct_hw_rtn_e
1283 efct_hw_domain_attach(struct efc_lport *efc,
1284 struct efc_domain_s *domain, u32 fc_id);
1285 extern enum efct_hw_rtn_e
1286 efct_hw_domain_free(struct efc_lport *efc, struct efc_domain_s *domain);
1287 extern enum efct_hw_rtn_e
1288 efct_hw_domain_force_free(struct efc_lport *efc, struct efc_domain_s *domain);
1289 struct efc_domain_s *efct_hw_domain_get(struct efc_lport *efc, u16 fcfi);
1290 extern enum efct_hw_rtn_e
1291 efct_hw_node_alloc(struct efc_lport *efc, struct efc_remote_node_s *rnode,
1292 u32 fc_addr, struct efc_sli_port_s *sport);
1293 extern enum efct_hw_rtn_e
1294 efct_hw_node_free_all(struct efct_hw_s *hw);
1295 extern enum efct_hw_rtn_e
1296 efct_hw_node_attach(struct efc_lport *efc, struct efc_remote_node_s *rnode,
1297 struct efc_dma_s *sparms);
1298 extern enum efct_hw_rtn_e
1299 efct_hw_node_detach(struct efc_lport *efc, struct efc_remote_node_s *rnode);
1300 extern enum efct_hw_rtn_e
1301 efct_hw_node_free_resources(struct efc_lport *efc,
1302 struct efc_remote_node_s *rnode);
1303 struct efct_hw_io_s *efct_hw_io_alloc(struct efct_hw_s *hw);
1304 extern struct efct_hw_io_s
1305 *efct_hw_io_activate_port_owned(struct efct_hw_s *hw, struct efct_hw_io_s *io);
1306 int efct_hw_io_free(struct efct_hw_s *hw, struct efct_hw_io_s *io);
1307 u8 efct_hw_io_inuse(struct efct_hw_s *hw, struct efct_hw_io_s *io);
1308 typedef int(*efct_hw_srrs_cb_t)(struct efct_hw_io_s *io,
1309 struct efc_remote_node_s *rnode, u32 length,
1310 int status, u32 ext_status, void *arg);
1312 extern enum efct_hw_rtn_e
1313 efct_hw_srrs_send(struct efct_hw_s *hw, enum efct_hw_io_type_e type,
1314 struct efct_hw_io_s *io,
1315 struct efc_dma_s *send, u32 len,
1316 struct efc_dma_s *receive, struct efc_remote_node_s *rnode,
1317 union efct_hw_io_param_u *iparam,
1318 efct_hw_srrs_cb_t cb,
1319 void *arg);
1320 extern enum efct_hw_rtn_e
1321 efct_hw_io_send(struct efct_hw_s *hw, enum efct_hw_io_type_e type,
1322 struct efct_hw_io_s *io, u32 len,
1323 union efct_hw_io_param_u *iparam,
1324 struct efc_remote_node_s *rnode, void *cb, void *arg);
1325 extern enum efct_hw_rtn_e
1326 efct_hw_io_register_sgl(struct efct_hw_s *hw, struct efct_hw_io_s *io,
1327 struct efc_dma_s *sgl,
1328 u32 sgl_count);
1329 extern enum efct_hw_rtn_e
1330 efct_hw_io_init_sges(struct efct_hw_s *hw,
1331 struct efct_hw_io_s *io, enum efct_hw_io_type_e type);
1332 extern enum efct_hw_rtn_e
1333 efct_hw_io_add_seed_sge(struct efct_hw_s *hw, struct efct_hw_io_s *io,
1334 struct efct_hw_dif_info_s *dif_info);
1335 extern enum efct_hw_rtn_e
1336 efct_hw_io_add_sge(struct efct_hw_s *hw, struct efct_hw_io_s *io,
1337 uintptr_t addr, u32 length);
1338 extern enum efct_hw_rtn_e
1339 efct_hw_io_add_dif_sge(struct efct_hw_s *hw, struct efct_hw_io_s *io,
1340 uintptr_t addr);
1341 extern enum efct_hw_rtn_e
1342 efct_hw_io_abort(struct efct_hw_s *hw, struct efct_hw_io_s *io_to_abort,
1343 bool send_abts, void *cb, void *arg);
1344 extern int
1345 efct_hw_io_get_xid(struct efct_hw_s *hw, struct efct_hw_io_s *io);
1346 extern u32
1347 efct_hw_io_get_count(struct efct_hw_s *hw,
1348 enum efct_hw_io_count_type_e io_count_type);
1349 extern u32
1350 efct_hw_get_rqes_produced_count(struct efct_hw_s *hw);
1352 extern enum efct_hw_rtn_e
1353 efct_hw_firmware_write(struct efct_hw_s *hw, struct efc_dma_s *dma,
1354 u32 size, u32 offset, int last,
1355 void (*cb)(int status, u32 bytes_written,
1356 u32 change_status, void *arg),
1357 void *arg);
1359 /* Function for retrieving SFP data */
1360 extern enum efct_hw_rtn_e
1361 efct_hw_get_sfp(struct efct_hw_s *hw, u16 page,
1362 void (*cb)(int, u32, u32 *, void *), void *arg);
1364 /* Function for retrieving temperature data */
1365 extern enum efct_hw_rtn_e
1366 efct_hw_get_temperature(struct efct_hw_s *hw,
1367 void (*efct_hw_temp_cb_t)(int status,
1368 u32 curr_temp,
1369 u32 crit_temp_thrshld,
1370 u32 warn_temp_thrshld,
1371 u32 norm_temp_thrshld,
1372 u32 fan_off_thrshld,
1373 u32 fan_on_thrshld,
1374 void *arg),
1375 void *arg);
1377 /* Function for retrieving link statistics */
1378 extern enum efct_hw_rtn_e
1379 efct_hw_get_link_stats(struct efct_hw_s *hw,
1380 u8 req_ext_counters,
1381 u8 clear_overflow_flags,
1382 u8 clear_all_counters,
1383 void (*efct_hw_link_stat_cb_t)(int status,
1384 u32 num_counters,
1385 struct efct_hw_link_stat_counts_s *counters,
1386 void *arg),
1387 void *arg);
1388 /* Function for retrieving host statistics */
1389 extern enum efct_hw_rtn_e
1390 efct_hw_get_host_stats(struct efct_hw_s *hw,
1391 u8 cc,
1392 void (*efct_hw_host_stat_cb_t)(int status,
1393 u32 num_counters,
1394 struct efct_hw_host_stat_counts_s *counters,
1395 void *arg),
1396 void *arg);
1398 enum efct_hw_rtn_e efct_hw_raise_ue(struct efct_hw_s *hw, u8 dump);
1399 extern enum efct_hw_rtn_e
1400 efct_hw_dump_get(struct efct_hw_s *hw, struct efc_dma_s *dma, u32 size,
1401 u32 offset,
1402 void (*cb)(int status, u32 bytes_read,
1403 u8 eof, void *arg),
1404 void *arg);
1405 extern enum efct_hw_rtn_e
1406 efct_hw_set_dump_location(struct efct_hw_s *hw, u32 num_buffers,
1407 struct efc_dma_s *dump_buffers, u8 fdb);
1409 extern enum efct_hw_rtn_e
1410 efct_hw_get_port_protocol(struct efct_hw_s *hw, u32 pci_func,
1411 void (*mgmt_cb)(int status,
1412 enum efct_hw_port_protocol_e
1413 port_protocol,
1414 void *arg),
1415 void *ul_arg);
1416 extern enum efct_hw_rtn_e
1417 efct_hw_set_port_protocol(struct efct_hw_s *hw,
1418 enum efct_hw_port_protocol_e profile,
1419 u32 pci_func,
1420 void (*mgmt_cb)(int status, void *arg),
1421 void *ul_arg);
1423 extern enum efct_hw_rtn_e
1424 efct_hw_get_profile_list(struct efct_hw_s *hw,
1425 void (*mgmt_cb)(int status,
1426 struct efct_hw_profile_list_s *,
1427 void *arg),
1428 void *arg);
1429 extern enum efct_hw_rtn_e
1430 efct_hw_get_active_profile(struct efct_hw_s *hw,
1431 void (*mgmt_cb)(int status,
1432 u32 active_profile, void *arg),
1433 void *arg);
1434 extern enum efct_hw_rtn_e
1435 efct_hw_set_active_profile(struct efct_hw_s *hw,
1436 void (*mgmt_cb)(int status, void *arg),
1437 u32 profile_id, void *arg);
1438 extern enum efct_hw_rtn_e
1439 efct_hw_get_nvparms(struct efct_hw_s *hw,
1440 void (*mgmt_cb)(int status, u8 *wwpn,
1441 u8 *wwnn, u8 hard_alpa,
1442 u32 preferred_d_id, void *arg),
1443 void *arg);
1444 extern
1445 enum efct_hw_rtn_e efct_hw_set_nvparms(struct efct_hw_s *hw,
1446 void (*mgmt_cb)(int status, void *arg),
1447 u8 *wwpn, u8 *wwnn, u8 hard_alpa,
1448 u32 preferred_d_id, void *arg);
1449 extern int
1450 efct_hw_eq_process(struct efct_hw_s *hw, struct hw_eq_s *eq,
1451 u32 max_isr_time_msec);
1452 void efct_hw_cq_process(struct efct_hw_s *hw, struct hw_cq_s *cq);
1453 extern void
1454 efct_hw_wq_process(struct efct_hw_s *hw, struct hw_cq_s *cq,
1455 u8 *cqe, int status, u16 rid);
1456 extern void
1457 efct_hw_xabt_process(struct efct_hw_s *hw, struct hw_cq_s *cq,
1458 u8 *cqe, u16 rid);
1459 int efct_hw_wq_write(struct hw_wq_s *wq, struct efct_hw_wqe_s *wqe);
1461 extern enum efct_hw_rtn_e
1462 efct_hw_dump_clear(struct efct_hw_s *hw,
1463 void (*cb)(int status, void *arg),
1464 void *arg);
1466 extern u8
1467 efct_hw_is_io_port_owned(struct efct_hw_s *hw, struct efct_hw_io_s *io);
1469 extern bool
1470 efct_hw_is_xri_port_owned(struct efct_hw_s *hw, u32 xri);
1471 extern struct efct_hw_io_s
1472 *efct_hw_io_lookup(struct efct_hw_s *hw, u32 indicator);
1473 extern u32
1474 efct_hw_xri_move_to_port_owned(struct efct_hw_s *hw,
1475 u32 num_xri);
1476 extern enum
1477 efct_hw_rtn_e efct_hw_xri_move_to_host_owned(struct efct_hw_s *hw,
1478 u8 num_xri);
1479 extern int
1480 efct_hw_reque_xri(struct efct_hw_s *hw, struct efct_hw_io_s *io);
1482 struct efct_hw_send_frame_context_s {
1483 /* structure elements used by HW */
1484 struct efct_hw_s *hw; /**> pointer to HW */
1485 struct hw_wq_callback_s *wqcb; /**> WQ callback object, request tag */
1486 struct efct_hw_wqe_s wqe; /* > WQE buf obj(may be queued
1487 * on WQ pending list)
1489 void (*callback)(int status, void *arg); /* > final
1490 * callback function
1492 void *arg; /**> final callback argument */
1494 /* General purpose elements */
1495 struct efc_hw_sequence_s *seq;
1496 struct efc_dma_s payload; /**> a payload DMA buffer */
1499 #define EFCT_HW_OBJECT_G5 0xfeaa0001
1500 #define EFCT_HW_OBJECT_G6 0xfeaa0003
1501 #define EFCT_FILE_TYPE_GROUP 0xf7
1502 #define EFCT_FILE_ID_GROUP 0xa2
1503 struct efct_hw_grp_hdr {
1504 u32 size;
1505 __be32 magic_number;
1506 u32 word2;
1507 u8 rev_name[128];
1508 u8 date[12];
1509 u8 revision[32];
1512 enum efct_hw_rtn_e
1513 efct_hw_send_frame(struct efct_hw_s *hw, struct fc_header_le_s *hdr,
1514 u8 sof, u8 eof, struct efc_dma_s *payload,
1515 struct efct_hw_send_frame_context_s *ctx,
1516 void (*callback)(void *arg, u8 *cqe, int status),
1517 void *arg);
1519 /* RQ completion handlers for RQ pair mode */
1520 extern int
1521 efct_hw_rqpair_process_rq(struct efct_hw_s *hw,
1522 struct hw_cq_s *cq, u8 *cqe);
1523 extern
1524 enum efct_hw_rtn_e efct_hw_rqpair_sequence_free(struct efct_hw_s *hw,
1525 struct efc_hw_sequence_s *seq);
1526 extern int
1527 efct_hw_rqpair_process_auto_xfr_rdy_cmd(struct efct_hw_s *hw,
1528 struct hw_cq_s *cq, u8 *cqe);
1529 extern int
1530 efct_hw_rqpair_process_auto_xfr_rdy_data(struct efct_hw_s *hw,
1531 struct hw_cq_s *cq, u8 *cqe);
1532 extern enum efct_hw_rtn_e
1533 efct_hw_rqpair_init(struct efct_hw_s *hw);
1534 extern enum efct_hw_rtn_e
1535 efct_hw_rqpair_auto_xfer_rdy_buffer_alloc(struct efct_hw_s *hw,
1536 u32 num_buffers);
1537 extern u8
1538 efct_hw_rqpair_auto_xfer_rdy_buffer_post(struct efct_hw_s *hw,
1539 struct efct_hw_io_s *io,
1540 bool reuse_buf);
1541 extern enum efct_hw_rtn_e
1542 efct_hw_rqpair_auto_xfer_rdy_move_to_port(struct efct_hw_s *hw,
1543 struct efct_hw_io_s *io);
1544 extern void
1545 efct_hw_rqpair_auto_xfer_rdy_move_to_host(struct efct_hw_s *hw,
1546 struct efct_hw_io_s *io);
1547 void efct_hw_rqpair_teardown(struct efct_hw_s *hw);
1548 void efct_hw_io_abort_all(struct efct_hw_s *hw);
1550 enum efct_hw_rtn_e efct_hw_rx_allocate(struct efct_hw_s *hw);
1551 enum efct_hw_rtn_e efct_hw_rx_post(struct efct_hw_s *hw);
1552 void efct_hw_rx_free(struct efct_hw_s *hw);
1554 typedef int
1555 (*efct_hw_async_cb_t)(struct efct_hw_s *hw,
1556 int status, u8 *mqe, void *arg);
1557 extern int
1558 efct_hw_async_call(struct efct_hw_s *hw,
1559 efct_hw_async_cb_t callback, void *arg);
1561 static inline void
1562 efct_hw_sequence_copy(struct efc_hw_sequence_s *dst,
1563 struct efc_hw_sequence_s *src)
1565 /* Copy src to dst, then zero out the linked list link */
1566 *dst = *src;
1569 static inline enum efct_hw_rtn_e
1570 efct_hw_sequence_free(struct efct_hw_s *hw, struct efc_hw_sequence_s *seq)
1572 /* Only RQ pair mode is supported */
1573 return efct_hw_rqpair_sequence_free(hw, seq);
1576 /* HW WQ request tag API */
1577 enum efct_hw_rtn_e efct_hw_reqtag_init(struct efct_hw_s *hw);
1578 extern struct hw_wq_callback_s
1579 *efct_hw_reqtag_alloc(struct efct_hw_s *hw,
1580 void (*callback)(void *arg, u8 *cqe,
1581 int status), void *arg);
1582 extern void
1583 efct_hw_reqtag_free(struct efct_hw_s *hw, struct hw_wq_callback_s *wqcb);
1584 extern struct hw_wq_callback_s
1585 *efct_hw_reqtag_get_instance(struct efct_hw_s *hw, u32 instance_index);
1586 void efct_hw_reqtag_reset(struct efct_hw_s *hw);
1588 void efct_hw_io_free_internal(struct kref *arg);
1589 void efct_hw_io_free_port_owned(struct kref *arg);
1590 #define CPUTRACE(efct, str)
1592 void efct_hw_workaround_setup(struct efct_hw_s *hw);
1594 extern uint64_t
1595 efct_get_wwn(struct efct_hw_s *hw, enum efct_hw_property_e prop);
1597 #endif /* __EFCT_H__ */