1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Linux MegaRAID driver for SAS based RAID controllers
5 * Copyright (c) 2009-2013 LSI Corporation
6 * Copyright (c) 2013-2016 Avago Technologies
7 * Copyright (c) 2016-2018 Broadcom Inc.
9 * FILE: megaraid_sas_fusion.h
11 * Authors: Broadcom Inc.
14 * Kashyap Desai <kashyap.desai@broadcom.com>
15 * Sumit Saxena <sumit.saxena@broadcom.com>
17 * Send feedback to: megaraidlinux.pdl@broadcom.com
20 #ifndef _MEGARAID_SAS_FUSION_H_
21 #define _MEGARAID_SAS_FUSION_H_
24 #define MEGASAS_CHAIN_FRAME_SZ_MIN 1024
25 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009)
26 #define MEGASAS_MAX_CHAIN_SHIFT 5
27 #define MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK 0x400000
28 #define MEGASAS_MAX_CHAIN_SIZE_MASK 0x3E0
29 #define MEGASAS_256K_IO 128
30 #define MEGASAS_1MB_IO (MEGASAS_256K_IO * 4)
31 #define MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256
32 #define MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0
33 #define MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST 0xF1
34 #define MEGASAS_LOAD_BALANCE_FLAG 0x1
35 #define MEGASAS_DCMD_MBOX_PEND_FLAG 0x1
36 #define HOST_DIAG_WRITE_ENABLE 0x80
37 #define HOST_DIAG_RESET_ADAPTER 0x4
38 #define MEGASAS_FUSION_MAX_RESET_TRIES 3
39 #define MAX_MSIX_QUEUES_FUSION 128
40 #define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16
41 #define RDPQ_MAX_CHUNK_COUNT (MAX_MSIX_QUEUES_FUSION / RDPQ_MAX_INDEX_IN_ONE_CHUNK)
44 #define MPI2_TYPE_CUDA 0x2
45 #define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000
46 #define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00
47 #define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
48 #define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
49 #define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
50 #define MR_RL_WRITE_THROUGH_MODE 0x00
51 #define MR_RL_WRITE_BACK_MODE 0x01
54 #define MR_PROT_INFO_TYPE_CONTROLLER 0x8
55 #define MEGASAS_SCSI_VARIABLE_LENGTH_CMD 0x7f
56 #define MEGASAS_SCSI_SERVICE_ACTION_READ32 0x9
57 #define MEGASAS_SCSI_SERVICE_ACTION_WRITE32 0xB
58 #define MEGASAS_SCSI_ADDL_CDB_LEN 0x18
59 #define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20
60 #define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60
62 #define MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C)
63 #define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
69 #define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4
70 #define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30
71 enum MR_RAID_FLAGS_IO_SUB_TYPE
{
72 MR_RAID_FLAGS_IO_SUB_TYPE_NONE
= 0,
73 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
= 1,
74 MR_RAID_FLAGS_IO_SUB_TYPE_RMW_DATA
= 2,
75 MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P
= 3,
76 MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q
= 4,
77 MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS
= 6,
78 MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
= 7,
79 MR_RAID_FLAGS_IO_SUB_TYPE_R56_DIV_OFFLOAD
= 8
83 * Request descriptor types
85 #define MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7
86 #define MEGASAS_REQ_DESCRIPT_FLAGS_MFA 0x1
87 #define MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2
88 #define MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1
90 #define MEGASAS_FP_CMD_LEN 16
91 #define MEGASAS_FUSION_IN_RESET 0
92 #define MEGASAS_FUSION_OCR_NOT_POSSIBLE 1
93 #define RAID_1_PEER_CMDS 2
94 #define JBOD_MAPS_COUNT 2
95 #define MEGASAS_REDUCE_QD_COUNT 64
96 #define IOC_INIT_FRAME_SIZE 4096
99 * Raid Context structure which describes MegaRAID specific IO Parameters
100 * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames
103 struct RAID_CONTEXT
{
104 #if defined(__BIG_ENDIAN_BITFIELD)
112 __le16 timeout_value
;
115 __le16 virtual_disk_tgt_id
;
116 __le64 reg_lock_row_lba
;
117 __le32 reg_lock_length
;
123 __le16 config_seq_num
;
131 * Raid Context structure which describes ventura MegaRAID specific
132 * IO Paramenters ,This resides at offset 0x60 where the SGL normally
133 * starts in MPT IO Frames
135 struct RAID_CONTEXT_G35
{
136 #define RAID_CONTEXT_NSEG_MASK 0x00F0
137 #define RAID_CONTEXT_NSEG_SHIFT 4
138 #define RAID_CONTEXT_TYPE_MASK 0x000F
139 #define RAID_CONTEXT_TYPE_SHIFT 0
141 u16 timeout_value
; /* 0x02 -0x03 */
142 u16 routing_flags
; // 0x04 -0x05 routing flags
143 u16 virtual_disk_tgt_id
; /* 0x06 -0x07 */
144 __le64 reg_lock_row_lba
; /* 0x08 - 0x0F */
145 u32 reg_lock_length
; /* 0x10 - 0x13 */
146 union { // flow specific
147 u16 rmw_op_index
; /* 0x14 - 0x15, R5/6 RMW: rmw operation index*/
148 u16 peer_smid
; /* 0x14 - 0x15, R1 Write: peer smid*/
149 u16 r56_arm_map
; /* 0x14 - 0x15, Unused [15], LogArm[14:10], P-Arm[9:5], Q-Arm[4:0] */
153 u8 ex_status
; /* 0x16 : OUT */
154 u8 status
; /* 0x17 status */
155 u8 raid_flags
; /* 0x18 resvd[7:6], ioSubType[5:4],
156 * resvd[3:1], preferredCpu[0]
158 u8 span_arm
; /* 0x1C span[7:5], arm[4:0] */
159 u16 config_seq_num
; /* 0x1A -0x1B */
163 * ---------------------------------
164 * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
165 * ---------------------------------
166 * Byte0 | numSGE[7]- numSGE[0] |
167 * ---------------------------------
168 * Byte1 |SD | resvd | numSGE 8-11 |
169 * --------------------------------
171 #define NUM_SGE_MASK_LOWER 0xFF
172 #define NUM_SGE_MASK_UPPER 0x0F
173 #define NUM_SGE_SHIFT_UPPER 8
174 #define STREAM_DETECT_SHIFT 7
175 #define STREAM_DETECT_MASK 0x80
177 #if defined(__BIG_ENDIAN_BITFIELD) /* 0x1C - 0x1D */
178 u16 stream_detected
:1;
184 u16 stream_detected
:1;
189 u8 resvd2
[2]; /* 0x1E-0x1F */
192 #define MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT 1
193 #define MR_RAID_CTX_ROUTINGFLAGS_C2D_SHIFT 2
194 #define MR_RAID_CTX_ROUTINGFLAGS_FWD_SHIFT 3
195 #define MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT 4
196 #define MR_RAID_CTX_ROUTINGFLAGS_SBS_SHIFT 5
197 #define MR_RAID_CTX_ROUTINGFLAGS_RW_SHIFT 6
198 #define MR_RAID_CTX_ROUTINGFLAGS_LOG_SHIFT 7
199 #define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT 8
200 #define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_MASK 0x0F00
201 #define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_SHIFT 12
202 #define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_MASK 0xF000
204 static inline void set_num_sge(struct RAID_CONTEXT_G35
*rctx_g35
,
207 rctx_g35
->u
.bytes
[0] = (u8
)(sge_count
& NUM_SGE_MASK_LOWER
);
208 rctx_g35
->u
.bytes
[1] |= (u8
)((sge_count
>> NUM_SGE_SHIFT_UPPER
)
209 & NUM_SGE_MASK_UPPER
);
212 static inline u16
get_num_sge(struct RAID_CONTEXT_G35
*rctx_g35
)
216 sge_count
= (u16
)(((rctx_g35
->u
.bytes
[1] & NUM_SGE_MASK_UPPER
)
217 << NUM_SGE_SHIFT_UPPER
) | (rctx_g35
->u
.bytes
[0]));
221 #define SET_STREAM_DETECTED(rctx_g35) \
222 (rctx_g35.u.bytes[1] |= STREAM_DETECT_MASK)
224 #define CLEAR_STREAM_DETECTED(rctx_g35) \
225 (rctx_g35.u.bytes[1] &= ~(STREAM_DETECT_MASK))
227 static inline bool is_stream_detected(struct RAID_CONTEXT_G35
*rctx_g35
)
229 return ((rctx_g35
->u
.bytes
[1] & STREAM_DETECT_MASK
));
232 union RAID_CONTEXT_UNION
{
233 struct RAID_CONTEXT raid_context
;
234 struct RAID_CONTEXT_G35 raid_context_g35
;
237 #define RAID_CTX_SPANARM_ARM_SHIFT (0)
238 #define RAID_CTX_SPANARM_ARM_MASK (0x1f)
240 #define RAID_CTX_SPANARM_SPAN_SHIFT (5)
241 #define RAID_CTX_SPANARM_SPAN_MASK (0xE0)
243 /* LogArm[14:10], P-Arm[9:5], Q-Arm[4:0] */
244 #define RAID_CTX_R56_Q_ARM_MASK (0x1F)
245 #define RAID_CTX_R56_P_ARM_SHIFT (5)
246 #define RAID_CTX_R56_P_ARM_MASK (0x3E0)
247 #define RAID_CTX_R56_LOG_ARM_SHIFT (10)
248 #define RAID_CTX_R56_LOG_ARM_MASK (0x7C00)
250 /* number of bits per index in U32 TrackStream */
251 #define BITS_PER_INDEX_STREAM 4
252 #define INVALID_STREAM_NUM 16
253 #define MR_STREAM_BITMAP 0x76543210
254 #define STREAM_MASK ((1 << BITS_PER_INDEX_STREAM) - 1)
255 #define ZERO_LAST_STREAM 0x0fffffff
256 #define MAX_STREAMS_TRACKED 8
259 * define region lock types
262 REGION_TYPE_UNUSED
= 0,
263 REGION_TYPE_SHARED_READ
= 1,
264 REGION_TYPE_SHARED_WRITE
= 2,
265 REGION_TYPE_EXCLUSIVE
= 3,
269 #define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */
270 #define MPI2_WHOINIT_HOST_DRIVER (0x04)
271 #define MPI2_VERSION_MAJOR (0x02)
272 #define MPI2_VERSION_MINOR (0x00)
273 #define MPI2_VERSION_MAJOR_MASK (0xFF00)
274 #define MPI2_VERSION_MAJOR_SHIFT (8)
275 #define MPI2_VERSION_MINOR_MASK (0x00FF)
276 #define MPI2_VERSION_MINOR_SHIFT (0)
277 #define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
279 #define MPI2_HEADER_VERSION_UNIT (0x10)
280 #define MPI2_HEADER_VERSION_DEV (0x00)
281 #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
282 #define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
283 #define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
284 #define MPI2_HEADER_VERSION_DEV_SHIFT (0)
285 #define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \
286 MPI2_HEADER_VERSION_DEV)
287 #define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
288 #define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
289 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
290 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
291 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
292 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
293 #define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
294 /* EEDP escape mode */
295 #define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040)
296 #define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
297 #define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01)
298 #define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x03)
299 #define MPI2_REQ_DESCRIPT_FLAGS_FP_IO (0x06)
300 #define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
301 #define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
302 #define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
303 #define MPI2_SCSIIO_CONTROL_READ (0x02000000)
304 #define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
305 #define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
306 #define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
307 #define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
308 #define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
309 #define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
310 #define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
311 #define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
312 #define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
313 #define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
314 #define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
315 #define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
317 struct MPI25_IEEE_SGE_CHAIN64
{
325 struct MPI2_SGE_SIMPLE_UNION
{
333 struct MPI2_SCSI_IO_CDB_EEDP32
{
334 u8 CDB
[20]; /* 0x00 */
335 __be32 PrimaryReferenceTag
; /* 0x14 */
336 __be16 PrimaryApplicationTag
; /* 0x18 */
337 __be16 PrimaryApplicationTagMask
; /* 0x1A */
338 __le32 TransferLength
; /* 0x1C */
341 struct MPI2_SGE_CHAIN_UNION
{
351 struct MPI2_IEEE_SGE_SIMPLE32
{
356 struct MPI2_IEEE_SGE_CHAIN32
{
361 struct MPI2_IEEE_SGE_SIMPLE64
{
369 struct MPI2_IEEE_SGE_CHAIN64
{
377 union MPI2_IEEE_SGE_SIMPLE_UNION
{
378 struct MPI2_IEEE_SGE_SIMPLE32 Simple32
;
379 struct MPI2_IEEE_SGE_SIMPLE64 Simple64
;
382 union MPI2_IEEE_SGE_CHAIN_UNION
{
383 struct MPI2_IEEE_SGE_CHAIN32 Chain32
;
384 struct MPI2_IEEE_SGE_CHAIN64 Chain64
;
387 union MPI2_SGE_IO_UNION
{
388 struct MPI2_SGE_SIMPLE_UNION MpiSimple
;
389 struct MPI2_SGE_CHAIN_UNION MpiChain
;
390 union MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple
;
391 union MPI2_IEEE_SGE_CHAIN_UNION IeeeChain
;
394 union MPI2_SCSI_IO_CDB_UNION
{
396 struct MPI2_SCSI_IO_CDB_EEDP32 EEDP32
;
397 struct MPI2_SGE_SIMPLE_UNION SGE
;
400 /****************************************************************************
401 * SCSI Task Management messages
402 ****************************************************************************/
404 /*SCSI Task Management Request Message */
405 struct MPI2_SCSI_TASK_MANAGE_REQUEST
{
406 u16 DevHandle
; /*0x00 */
407 u8 ChainOffset
; /*0x02 */
408 u8 Function
; /*0x03 */
409 u8 Reserved1
; /*0x04 */
410 u8 TaskType
; /*0x05 */
411 u8 Reserved2
; /*0x06 */
412 u8 MsgFlags
; /*0x07 */
415 u16 Reserved3
; /*0x0A */
417 u32 Reserved4
[7]; /*0x14 */
418 u16 TaskMID
; /*0x30 */
419 u16 Reserved5
; /*0x32 */
423 /*SCSI Task Management Reply Message */
424 struct MPI2_SCSI_TASK_MANAGE_REPLY
{
425 u16 DevHandle
; /*0x00 */
426 u8 MsgLength
; /*0x02 */
427 u8 Function
; /*0x03 */
428 u8 ResponseCode
; /*0x04 */
429 u8 TaskType
; /*0x05 */
430 u8 Reserved1
; /*0x06 */
431 u8 MsgFlags
; /*0x07 */
434 u16 Reserved2
; /*0x0A */
435 u16 Reserved3
; /*0x0C */
436 u16 IOCStatus
; /*0x0E */
437 u32 IOCLogInfo
; /*0x10 */
438 u32 TerminationCount
; /*0x14 */
439 u32 ResponseInfo
; /*0x18 */
442 struct MR_TM_REQUEST
{
450 /* SCSI Task Management Request Message */
451 struct MR_TASK_MANAGE_REQUEST
{
452 /*To be type casted to struct MPI2_SCSI_TASK_MANAGE_REQUEST */
453 struct MR_TM_REQUEST TmRequest
;
456 #if defined(__BIG_ENDIAN_BITFIELD)
467 struct MR_TM_REPLY TMReply
;
471 /* TaskType values */
473 #define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
474 #define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
475 #define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
476 #define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
477 #define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
478 #define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
479 #define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
480 #define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09)
481 #define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A)
483 /* ResponseCode values */
485 #define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00)
486 #define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02)
487 #define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04)
488 #define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
489 #define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
490 #define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
491 #define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
492 #define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
495 * RAID SCSI IO Request Message
496 * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST
498 struct MPI2_RAID_SCSI_IO_REQUEST
{
499 __le16 DevHandle
; /* 0x00 */
500 u8 ChainOffset
; /* 0x02 */
501 u8 Function
; /* 0x03 */
502 __le16 Reserved1
; /* 0x04 */
503 u8 Reserved2
; /* 0x06 */
504 u8 MsgFlags
; /* 0x07 */
507 __le16 Reserved3
; /* 0x0A */
508 __le32 SenseBufferLowAddress
; /* 0x0C */
509 __le16 SGLFlags
; /* 0x10 */
510 u8 SenseBufferLength
; /* 0x12 */
511 u8 Reserved4
; /* 0x13 */
512 u8 SGLOffset0
; /* 0x14 */
513 u8 SGLOffset1
; /* 0x15 */
514 u8 SGLOffset2
; /* 0x16 */
515 u8 SGLOffset3
; /* 0x17 */
516 __le32 SkipCount
; /* 0x18 */
517 __le32 DataLength
; /* 0x1C */
518 __le32 BidirectionalDataLength
; /* 0x20 */
519 __le16 IoFlags
; /* 0x24 */
520 __le16 EEDPFlags
; /* 0x26 */
521 __le32 EEDPBlockSize
; /* 0x28 */
522 __le32 SecondaryReferenceTag
; /* 0x2C */
523 __le16 SecondaryApplicationTag
; /* 0x30 */
524 __le16 ApplicationTagTranslationMask
; /* 0x32 */
525 u8 LUN
[8]; /* 0x34 */
526 __le32 Control
; /* 0x3C */
527 union MPI2_SCSI_IO_CDB_UNION CDB
; /* 0x40 */
528 union RAID_CONTEXT_UNION RaidContext
; /* 0x60 */
530 union MPI2_SGE_IO_UNION SGL
; /* 0x80 */
531 DECLARE_FLEX_ARRAY(union MPI2_SGE_IO_UNION
, SGLs
);
536 * MPT RAID MFA IO Descriptor.
538 struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR
{
540 u32 MessageAddress1
:24;
544 /* Default Request Descriptor */
545 struct MPI2_DEFAULT_REQUEST_DESCRIPTOR
{
546 u8 RequestFlags
; /* 0x00 */
547 u8 MSIxIndex
; /* 0x01 */
548 __le16 SMID
; /* 0x02 */
549 __le16 LMID
; /* 0x04 */
550 __le16 DescriptorTypeDependent
; /* 0x06 */
553 /* High Priority Request Descriptor */
554 struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR
{
555 u8 RequestFlags
; /* 0x00 */
556 u8 MSIxIndex
; /* 0x01 */
557 __le16 SMID
; /* 0x02 */
558 __le16 LMID
; /* 0x04 */
559 __le16 Reserved1
; /* 0x06 */
562 /* SCSI IO Request Descriptor */
563 struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR
{
564 u8 RequestFlags
; /* 0x00 */
565 u8 MSIxIndex
; /* 0x01 */
566 __le16 SMID
; /* 0x02 */
567 __le16 LMID
; /* 0x04 */
568 __le16 DevHandle
; /* 0x06 */
571 /* SCSI Target Request Descriptor */
572 struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR
{
573 u8 RequestFlags
; /* 0x00 */
574 u8 MSIxIndex
; /* 0x01 */
575 __le16 SMID
; /* 0x02 */
576 __le16 LMID
; /* 0x04 */
577 __le16 IoIndex
; /* 0x06 */
580 /* RAID Accelerator Request Descriptor */
581 struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR
{
582 u8 RequestFlags
; /* 0x00 */
583 u8 MSIxIndex
; /* 0x01 */
584 __le16 SMID
; /* 0x02 */
585 __le16 LMID
; /* 0x04 */
586 __le16 Reserved
; /* 0x06 */
589 /* union of Request Descriptors */
590 union MEGASAS_REQUEST_DESCRIPTOR_UNION
{
591 struct MPI2_DEFAULT_REQUEST_DESCRIPTOR Default
;
592 struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority
;
593 struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO
;
594 struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget
;
595 struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator
;
596 struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo
;
606 /* Default Reply Descriptor */
607 struct MPI2_DEFAULT_REPLY_DESCRIPTOR
{
608 u8 ReplyFlags
; /* 0x00 */
609 u8 MSIxIndex
; /* 0x01 */
610 __le16 DescriptorTypeDependent1
; /* 0x02 */
611 __le32 DescriptorTypeDependent2
; /* 0x04 */
614 /* Address Reply Descriptor */
615 struct MPI2_ADDRESS_REPLY_DESCRIPTOR
{
616 u8 ReplyFlags
; /* 0x00 */
617 u8 MSIxIndex
; /* 0x01 */
618 __le16 SMID
; /* 0x02 */
619 __le32 ReplyFrameAddress
; /* 0x04 */
622 /* SCSI IO Success Reply Descriptor */
623 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
{
624 u8 ReplyFlags
; /* 0x00 */
625 u8 MSIxIndex
; /* 0x01 */
626 __le16 SMID
; /* 0x02 */
627 __le16 TaskTag
; /* 0x04 */
628 __le16 Reserved1
; /* 0x06 */
631 /* TargetAssist Success Reply Descriptor */
632 struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR
{
633 u8 ReplyFlags
; /* 0x00 */
634 u8 MSIxIndex
; /* 0x01 */
635 __le16 SMID
; /* 0x02 */
636 u8 SequenceNumber
; /* 0x04 */
637 u8 Reserved1
; /* 0x05 */
638 __le16 IoIndex
; /* 0x06 */
641 /* Target Command Buffer Reply Descriptor */
642 struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR
{
643 u8 ReplyFlags
; /* 0x00 */
644 u8 MSIxIndex
; /* 0x01 */
647 __le16 InitiatorDevHandle
; /* 0x04 */
648 __le16 IoIndex
; /* 0x06 */
651 /* RAID Accelerator Success Reply Descriptor */
652 struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
{
653 u8 ReplyFlags
; /* 0x00 */
654 u8 MSIxIndex
; /* 0x01 */
655 __le16 SMID
; /* 0x02 */
656 __le32 Reserved
; /* 0x04 */
659 /* union of Reply Descriptors */
660 union MPI2_REPLY_DESCRIPTORS_UNION
{
661 struct MPI2_DEFAULT_REPLY_DESCRIPTOR Default
;
662 struct MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply
;
663 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess
;
664 struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess
;
665 struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer
;
666 struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
667 RAIDAcceleratorSuccess
;
671 /* IOCInit Request message */
672 struct MPI2_IOC_INIT_REQUEST
{
673 u8 WhoInit
; /* 0x00 */
674 u8 Reserved1
; /* 0x01 */
675 u8 ChainOffset
; /* 0x02 */
676 u8 Function
; /* 0x03 */
677 __le16 Reserved2
; /* 0x04 */
678 u8 Reserved3
; /* 0x06 */
679 u8 MsgFlags
; /* 0x07 */
682 __le16 Reserved4
; /* 0x0A */
683 __le16 MsgVersion
; /* 0x0C */
684 __le16 HeaderVersion
; /* 0x0E */
685 u32 Reserved5
; /* 0x10 */
686 __le16 Reserved6
; /* 0x14 */
687 u8 HostPageSize
; /* 0x16 */
688 u8 HostMSIxVectors
; /* 0x17 */
689 __le16 Reserved8
; /* 0x18 */
690 __le16 SystemRequestFrameSize
; /* 0x1A */
691 __le16 ReplyDescriptorPostQueueDepth
; /* 0x1C */
692 __le16 ReplyFreeQueueDepth
; /* 0x1E */
693 __le32 SenseBufferAddressHigh
; /* 0x20 */
694 __le32 SystemReplyAddressHigh
; /* 0x24 */
695 __le64 SystemRequestFrameBaseAddress
; /* 0x28 */
696 __le64 ReplyDescriptorPostQueueAddress
;/* 0x30 */
697 __le64 ReplyFreeQueueAddress
; /* 0x38 */
698 __le64 TimeStamp
; /* 0x40 */
702 #define MR_PD_INVALID 0xFFFF
703 #define MR_DEVHANDLE_INVALID 0xFFFF
704 #define MAX_SPAN_DEPTH 8
705 #define MAX_QUAD_DEPTH MAX_SPAN_DEPTH
706 #define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
707 #define MAX_ROW_SIZE 32
708 #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
709 #define MAX_LOGICAL_DRIVES 64
710 #define MAX_LOGICAL_DRIVES_EXT 256
711 #define MAX_LOGICAL_DRIVES_DYN 512
712 #define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
713 #define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
714 #define MAX_ARRAYS 128
715 #define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
716 #define MAX_ARRAYS_EXT 256
717 #define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
718 #define MAX_API_ARRAYS_DYN 512
719 #define MAX_PHYSICAL_DEVICES 256
720 #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
721 #define MAX_RAIDMAP_PHYSICAL_DEVICES_DYN 512
722 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
723 #define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102
724 #define MR_DCMD_DRV_GET_TARGET_PROP 0x0200e103
725 #define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
726 #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200
727 #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
728 #define MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 0x01200100
729 #define MR_DCMD_CTRL_DEVICE_LIST_GET 0x01190600
731 struct MR_DEV_HANDLE_INFO
{
738 struct MR_ARRAY_INFO
{
739 __le16 pd
[MAX_RAIDMAP_ROW_SIZE
];
742 struct MR_QUAD_ELEMENT
{
750 struct MR_SPAN_INFO
{
753 struct MR_QUAD_ELEMENT quad
[MAX_RAIDMAP_SPAN_DEPTH
];
765 struct MR_SPAN_BLOCK_INFO
{
767 struct MR_LD_SPAN span
;
768 struct MR_SPAN_INFO block_span_info
;
771 #define MR_RAID_CTX_CPUSEL_0 0
772 #define MR_RAID_CTX_CPUSEL_1 1
773 #define MR_RAID_CTX_CPUSEL_2 2
774 #define MR_RAID_CTX_CPUSEL_3 3
775 #define MR_RAID_CTX_CPUSEL_FCFS 0xF
777 struct MR_CPU_AFFINITY_MASK
{
780 #ifndef __BIG_ENDIAN_BITFIELD
800 struct MR_IO_AFFINITY
{
803 struct MR_CPU_AFFINITY_MASK pdRead
;
804 struct MR_CPU_AFFINITY_MASK pdWrite
;
805 struct MR_CPU_AFFINITY_MASK ldRead
;
806 struct MR_CPU_AFFINITY_MASK ldWrite
;
810 u8 maxCores
; /* Total cores + HW Path in ROC */
816 #if defined(__BIG_ENDIAN_BITFIELD)
818 u32 fp_cache_bypass_capable
:1;
819 u32 fp_rmw_capable
:1;
820 u32 disable_coalescing
:1;
821 u32 fpBypassRegionLock
:1;
823 u32 fpNonRWCapable
:1;
824 u32 fpReadAcrossStripe
:1;
825 u32 fpWriteAcrossStripe
:1;
827 u32 fpWriteCapable
:1;
828 u32 encryptionType
:8;
840 u32 encryptionType
:8;
841 u32 fpWriteCapable
:1;
843 u32 fpWriteAcrossStripe
:1;
844 u32 fpReadAcrossStripe
:1;
845 u32 fpNonRWCapable
:1;
847 u32 fpBypassRegionLock
:1;
848 u32 disable_coalescing
:1;
849 u32 fp_rmw_capable
:1;
850 u32 fp_cache_bypass_capable
:1;
866 u8 regTypeReqOnWrite
;
872 #ifndef __BIG_ENDIAN_BITFIELD
873 u32 ldSyncRequired
:1;
874 u32 regTypeReqOnReadIsValid
:1;
876 u32 enableSLDOnAllRWIOs
:1;
880 u32 enableSLDOnAllRWIOs
:1;
882 u32 regTypeReqOnReadIsValid
:1;
883 u32 ldSyncRequired
:1;
887 u8 LUN
[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
888 u8 fpIoTimeoutForLd
;/*0x2C timeout value used by driver in FP IO*/
889 /* Ox2D This LD accept priority boost of this type */
890 u8 ld_accept_priority_type
;
891 u8 reserved2
[2]; /* 0x2E - 0x2F */
892 /* 0x30 - 0x33, Logical block size for the LD */
893 u32 logical_block_length
;
895 #ifndef __BIG_ENDIAN_BITFIELD
896 /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
898 /* 0x34, LOGICAL BLOCKS PER PHYSICAL
899 * BLOCK EXPONENT from READ CAPACITY 16
901 u32 ld_logical_block_exp
:4;
902 u32 reserved1
:24; /* 0x34 */
904 u32 reserved1
:24; /* 0x34 */
905 /* 0x34, LOGICAL BLOCKS PER PHYSICAL
906 * BLOCK EXPONENT from READ CAPACITY 16
908 u32 ld_logical_block_exp
:4;
909 /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
913 /* 0x38 - 0x3f, This will determine which
914 * core will process LD IO and PD IO.
916 struct MR_IO_AFFINITY cpuAffinity
;
917 /* Bit definiations are specified by MR_IO_AFFINITY */
918 u8 reserved3
[0x80 - 0x40]; /* 0x40 - 0x7f */
921 struct MR_LD_SPAN_MAP
{
922 struct MR_LD_RAID ldRaid
;
923 u8 dataArmMap
[MAX_RAIDMAP_ROW_SIZE
];
924 struct MR_SPAN_BLOCK_INFO spanBlock
[MAX_RAIDMAP_SPAN_DEPTH
];
927 struct MR_FW_RAID_MAP
{
942 u8 ldTgtIdToLd
[MAX_RAIDMAP_LOGICAL_DRIVES
+
946 struct MR_ARRAY_INFO arMapInfo
[MAX_RAIDMAP_ARRAYS
];
947 struct MR_DEV_HANDLE_INFO devHndlInfo
[MAX_RAIDMAP_PHYSICAL_DEVICES
];
948 struct MR_LD_SPAN_MAP ldSpanMap
[];
951 struct IO_REQUEST_INFO
{
964 u8 span_arm
; /* span[7:5], arm[4:0] */
966 u16 r1_alt_dev_handle
; /* raid 1/10 only */
971 struct MR_LD_TARGET_SYNC
{
978 * RAID Map descriptor Types.
979 * Each element should uniquely idetify one data structure in the RAID map
981 enum MR_RAID_MAP_DESC_TYPE
{
982 /* MR_DEV_HANDLE_INFO data */
983 RAID_MAP_DESC_TYPE_DEVHDL_INFO
= 0x0,
984 /* target to Ld num Index map */
985 RAID_MAP_DESC_TYPE_TGTID_INFO
= 0x1,
986 /* MR_ARRAY_INFO data */
987 RAID_MAP_DESC_TYPE_ARRAY_INFO
= 0x2,
988 /* MR_LD_SPAN_MAP data */
989 RAID_MAP_DESC_TYPE_SPAN_INFO
= 0x3,
990 RAID_MAP_DESC_TYPE_COUNT
,
994 * This table defines the offset, size and num elements of each descriptor
995 * type in the RAID Map buffer
997 struct MR_RAID_MAP_DESC_TABLE
{
998 /* Raid map descriptor type */
999 u32 raid_map_desc_type
;
1000 /* Offset into the RAID map buffer where
1001 * descriptor data is saved
1003 u32 raid_map_desc_offset
;
1004 /* total size of the
1007 u32 raid_map_desc_buffer_size
;
1008 /* Number of elements contained in the
1011 u32 raid_map_desc_elements
;
1015 * Dynamic Raid Map Structure.
1017 struct MR_FW_RAID_MAP_DYNAMIC
{
1018 u32 raid_map_size
; /* total size of RAID Map structure */
1019 u32 desc_table_offset
;/* Offset of desc table into RAID map*/
1020 u32 desc_table_size
; /* Total Size of desc table */
1021 /* Total Number of elements in the desc table */
1022 u32 desc_table_num_elements
;
1024 u32 reserved2
[3]; /*future use */
1025 /* timeout value used by driver in FP IOs */
1026 u8 fp_pd_io_timeout_sec
;
1028 /* when this seqNum increments, driver needs to
1029 * release RMW buffers asap
1032 u16 ld_count
; /* count of lds. */
1033 u16 ar_count
; /* count of arrays */
1034 u16 span_count
; /* count of spans */
1037 * The below structure of pointers is only to be used by the driver.
1038 * This is added in the ,API to reduce the amount of code changes
1039 * needed in the driver to support dynamic RAID map Firmware should
1040 * not update these pointers while preparing the raid map
1044 struct MR_DEV_HANDLE_INFO
*dev_hndl_info
;
1045 u16
*ld_tgt_id_to_ld
;
1046 struct MR_ARRAY_INFO
*ar_map_info
;
1047 struct MR_LD_SPAN_MAP
*ld_span_map
;
1049 u64 ptr_structure_size
[RAID_MAP_DESC_TYPE_COUNT
];
1052 * RAID Map descriptor table defines the layout of data in the RAID Map.
1053 * The size of the descriptor table itself could change.
1055 /* Variable Size descriptor Table. */
1056 struct MR_RAID_MAP_DESC_TABLE
1057 raid_map_desc_table
[RAID_MAP_DESC_TYPE_COUNT
];
1058 /* Variable Size buffer containing all data */
1059 u32 raid_map_desc_data
[];
1060 }; /* Dynamicaly sized RAID MAp structure */
1062 #define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
1063 #define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
1064 #define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
1065 #define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
1066 #define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
1067 #define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
1068 #define IEEE_SGE_FLAGS_END_OF_LIST (0x40)
1070 #define MPI2_SGE_FLAGS_SHIFT (0x02)
1071 #define IEEE_SGE_FLAGS_FORMAT_MASK (0xC0)
1072 #define IEEE_SGE_FLAGS_FORMAT_IEEE (0x00)
1073 #define IEEE_SGE_FLAGS_FORMAT_NVME (0x02)
1075 #define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C)
1076 #define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00)
1077 #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08)
1078 #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10)
1080 #define MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME 15
1081 #define MEGASAS_MAX_SNAP_DUMP_WAIT_TIME 60
1083 struct megasas_register_set
;
1084 struct megasas_instance
;
1094 struct megasas_cmd_fusion
{
1095 struct MPI2_RAID_SCSI_IO_REQUEST
*io_request
;
1096 dma_addr_t io_request_phys_addr
;
1098 union MPI2_SGE_IO_UNION
*sg_frame
;
1099 dma_addr_t sg_frame_phys_addr
;
1102 dma_addr_t sense_phys_addr
;
1104 struct list_head list
;
1105 struct scsi_cmnd
*scmd
;
1106 struct megasas_instance
*instance
;
1108 u8 retry_for_fw_reset
;
1109 union MEGASAS_REQUEST_DESCRIPTOR_UNION
*request_desc
;
1112 * Context for a MFI frame.
1113 * Used to get the mfi cmd from list when a MFI cmd is completed
1118 struct completion done
;
1120 u16 r1_alt_dev_handle
; /* raid 1/10 only*/
1121 bool cmd_completed
; /* raid 1/10 fp writes status holder */
1125 struct LD_LOAD_BALANCE_INFO
{
1128 atomic_t scsi_pending_cmds
[MAX_PHYSICAL_DEVICES
];
1129 u64 last_accessed_block
[MAX_PHYSICAL_DEVICES
];
1132 /* SPAN_SET is info caclulated from span info from Raid map per LD */
1133 typedef struct _LD_SPAN_SET
{
1138 u64 data_strip_start
;
1142 u8 strip_offset
[MAX_SPAN_DEPTH
];
1143 u32 span_row_data_width
;
1146 } LD_SPAN_SET
, *PLD_SPAN_SET
;
1148 typedef struct LOG_BLOCK_SPAN_INFO
{
1149 LD_SPAN_SET span_set
[MAX_SPAN_DEPTH
];
1150 } LD_SPAN_INFO
, *PLD_SPAN_INFO
;
1152 struct MR_FW_RAID_MAP_ALL
{
1153 struct MR_FW_RAID_MAP raidMap
;
1154 struct MR_LD_SPAN_MAP ldSpanMap
[MAX_LOGICAL_DRIVES
];
1155 } __attribute__ ((packed
));
1157 struct MR_DRV_RAID_MAP
{
1158 /* total size of this structure, including this field.
1159 * This feild will be manupulated by driver for ext raid map,
1160 * else pick the value from firmware raid map.
1167 __le32 maxSpanDepth
;
1175 /* timeout value used by driver in FP IOs*/
1176 u8 fpPdIoTimeoutSec
;
1184 struct MR_DEV_HANDLE_INFO
1185 devHndlInfo
[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN
];
1186 u16 ldTgtIdToLd
[MAX_LOGICAL_DRIVES_DYN
];
1187 struct MR_ARRAY_INFO arMapInfo
[MAX_API_ARRAYS_DYN
];
1188 struct MR_LD_SPAN_MAP ldSpanMap
[];
1192 /* Driver raid map size is same as raid map ext
1193 * MR_DRV_RAID_MAP_ALL is created to sync with old raid.
1194 * And it is mainly for code re-use purpose.
1196 struct MR_DRV_RAID_MAP_ALL
{
1198 struct MR_DRV_RAID_MAP raidMap
;
1199 struct MR_LD_SPAN_MAP ldSpanMap
[MAX_LOGICAL_DRIVES_DYN
];
1204 struct MR_FW_RAID_MAP_EXT
{
1205 /* Not usred in new map */
1219 u8 fpPdIoTimeoutSec
;
1227 struct MR_DEV_HANDLE_INFO devHndlInfo
[MAX_RAIDMAP_PHYSICAL_DEVICES
];
1228 u8 ldTgtIdToLd
[MAX_LOGICAL_DRIVES_EXT
];
1229 struct MR_ARRAY_INFO arMapInfo
[MAX_API_ARRAYS_EXT
];
1230 struct MR_LD_SPAN_MAP ldSpanMap
[MAX_LOGICAL_DRIVES_EXT
];
1234 * * define MR_PD_CFG_SEQ structure for system PDs
1236 struct MR_PD_CFG_SEQ
{
1240 #if defined(__BIG_ENDIAN_BITFIELD)
1252 struct MR_PD_CFG_SEQ_NUM_SYNC
{
1255 struct MR_PD_CFG_SEQ seq
[];
1258 /* stream detection */
1259 struct STREAM_DETECT
{
1260 u64 next_seq_lba
; /* next LBA to match sequential access */
1261 struct megasas_cmd_fusion
*first_cmd_fusion
; /* first cmd in group */
1262 struct megasas_cmd_fusion
*last_cmd_fusion
; /* last cmd in group */
1263 u32 count_cmds_in_stream
; /* count of host commands in this stream */
1264 u16 num_sges_in_group
; /* total number of SGEs in grouped IOs */
1265 u8 is_read
; /* SCSI OpCode for this stream */
1266 u8 group_depth
; /* total number of host commands in group */
1267 /* TRUE if cannot add any more commands to this group */
1269 u8 reserved
[7]; /* pad to 64-bit alignment */
1272 struct LD_STREAM_DETECT
{
1273 bool write_back
; /* TRUE if WB, FALSE if WT */
1274 bool fp_write_enabled
;
1276 bool fp_cache_bypass_capable
;
1277 u32 mru_bit_map
; /* bitmap used to track MRU and LRU stream indicies */
1278 /* this is the array of stream detect structures (one per stream) */
1279 struct STREAM_DETECT stream_track
[MAX_STREAMS_TRACKED
];
1282 struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY
{
1283 u64 RDPQBaseAddress
;
1288 struct rdpq_alloc_detail
{
1289 struct dma_pool
*dma_pool_ptr
;
1290 dma_addr_t pool_entry_phys
;
1291 union MPI2_REPLY_DESCRIPTORS_UNION
*pool_entry_virt
;
1294 struct fusion_context
{
1295 struct megasas_cmd_fusion
**cmd_list
;
1296 dma_addr_t req_frames_desc_phys
;
1297 u8
*req_frames_desc
;
1299 struct dma_pool
*io_request_frames_pool
;
1300 dma_addr_t io_request_frames_phys
;
1301 u8
*io_request_frames
;
1303 struct dma_pool
*sg_dma_pool
;
1304 struct dma_pool
*sense_dma_pool
;
1307 dma_addr_t sense_phys_addr
;
1309 atomic_t busy_mq_poll
[MAX_MSIX_QUEUES_FUSION
];
1311 dma_addr_t reply_frames_desc_phys
[MAX_MSIX_QUEUES_FUSION
];
1312 union MPI2_REPLY_DESCRIPTORS_UNION
*reply_frames_desc
[MAX_MSIX_QUEUES_FUSION
];
1313 struct rdpq_alloc_detail rdpq_tracker
[RDPQ_MAX_CHUNK_COUNT
];
1314 struct dma_pool
*reply_frames_desc_pool
;
1315 struct dma_pool
*reply_frames_desc_pool_align
;
1317 u16 last_reply_idx
[MAX_MSIX_QUEUES_FUSION
];
1320 u32 request_alloc_sz
;
1322 u32 io_frames_alloc_sz
;
1324 struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY
*rdpq_virt
;
1325 dma_addr_t rdpq_phys
;
1326 u16 max_sge_in_main_msg
;
1327 u16 max_sge_in_chain
;
1329 u8 chain_offset_io_request
;
1330 u8 chain_offset_mfi_pthru
;
1332 struct MR_FW_RAID_MAP_DYNAMIC
*ld_map
[2];
1333 dma_addr_t ld_map_phys
[2];
1335 /*Non dma-able memory. Driver local copy.*/
1336 struct MR_DRV_RAID_MAP_ALL
*ld_drv_map
[2];
1344 struct MR_PD_CFG_SEQ_NUM_SYNC
*pd_seq_sync
[JBOD_MAPS_COUNT
];
1345 dma_addr_t pd_seq_phys
[JBOD_MAPS_COUNT
];
1347 struct LD_LOAD_BALANCE_INFO
*load_balance_info
;
1348 u32 load_balance_info_pages
;
1349 LD_SPAN_INFO
*log_to_span
;
1350 u32 log_to_span_pages
;
1351 struct LD_STREAM_DETECT
**stream_detect_by_ld
;
1352 dma_addr_t ioc_init_request_phys
;
1353 struct MPI2_IOC_INIT_REQUEST
*ioc_init_request
;
1354 struct megasas_cmd
*ioc_init_cmd
;
1355 bool pcie_bw_limitation
;
1356 bool r56_div_offload
;
1367 enum CMD_RET_VALUES
{
1373 struct MR_SNAPDUMP_PROPERTIES
{
1375 u8 max_num_supported
;
1376 u8 cur_num_supported
;
1377 u8 trigger_min_num_sec_before_ocr
;
1381 struct megasas_debugfs_buffer
{
1386 void megasas_free_cmds_fusion(struct megasas_instance
*instance
);
1387 int megasas_ioc_init_fusion(struct megasas_instance
*instance
);
1388 u8
megasas_get_map_info(struct megasas_instance
*instance
);
1389 int megasas_sync_map_info(struct megasas_instance
*instance
);
1390 void megasas_release_fusion(struct megasas_instance
*instance
);
1391 void megasas_reset_reply_desc(struct megasas_instance
*instance
);
1392 int megasas_check_mpio_paths(struct megasas_instance
*instance
,
1393 struct scsi_cmnd
*scmd
);
1394 void megasas_fusion_ocr_wq(struct work_struct
*work
);
1396 #endif /* _MEGARAID_SAS_FUSION_H_ */