lkdtm: Add Control Flow Integrity test
[linux/fpc-iii.git] / drivers / scsi / megaraid / megaraid_sas_fusion.h
blobc013c80fe4e6d3fa321ea708657794f08025888d
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Linux MegaRAID driver for SAS based RAID controllers
5 * Copyright (c) 2009-2013 LSI Corporation
6 * Copyright (c) 2013-2016 Avago Technologies
7 * Copyright (c) 2016-2018 Broadcom Inc.
9 * FILE: megaraid_sas_fusion.h
11 * Authors: Broadcom Inc.
12 * Manoj Jose
13 * Sumant Patro
14 * Kashyap Desai <kashyap.desai@broadcom.com>
15 * Sumit Saxena <sumit.saxena@broadcom.com>
17 * Send feedback to: megaraidlinux.pdl@broadcom.com
20 #ifndef _MEGARAID_SAS_FUSION_H_
21 #define _MEGARAID_SAS_FUSION_H_
23 /* Fusion defines */
24 #define MEGASAS_CHAIN_FRAME_SZ_MIN 1024
25 #define MFI_FUSION_ENABLE_INTERRUPT_MASK (0x00000009)
26 #define MEGASAS_MAX_CHAIN_SHIFT 5
27 #define MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK 0x400000
28 #define MEGASAS_MAX_CHAIN_SIZE_MASK 0x3E0
29 #define MEGASAS_256K_IO 128
30 #define MEGASAS_1MB_IO (MEGASAS_256K_IO * 4)
31 #define MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE 256
32 #define MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST 0xF0
33 #define MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST 0xF1
34 #define MEGASAS_LOAD_BALANCE_FLAG 0x1
35 #define MEGASAS_DCMD_MBOX_PEND_FLAG 0x1
36 #define HOST_DIAG_WRITE_ENABLE 0x80
37 #define HOST_DIAG_RESET_ADAPTER 0x4
38 #define MEGASAS_FUSION_MAX_RESET_TRIES 3
39 #define MAX_MSIX_QUEUES_FUSION 128
40 #define RDPQ_MAX_INDEX_IN_ONE_CHUNK 16
41 #define RDPQ_MAX_CHUNK_COUNT (MAX_MSIX_QUEUES_FUSION / RDPQ_MAX_INDEX_IN_ONE_CHUNK)
43 /* Invader defines */
44 #define MPI2_TYPE_CUDA 0x2
45 #define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000
46 #define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00
47 #define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10
48 #define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80
49 #define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8
50 #define MR_RL_WRITE_THROUGH_MODE 0x00
51 #define MR_RL_WRITE_BACK_MODE 0x01
53 /* T10 PI defines */
54 #define MR_PROT_INFO_TYPE_CONTROLLER 0x8
55 #define MEGASAS_SCSI_VARIABLE_LENGTH_CMD 0x7f
56 #define MEGASAS_SCSI_SERVICE_ACTION_READ32 0x9
57 #define MEGASAS_SCSI_SERVICE_ACTION_WRITE32 0xB
58 #define MEGASAS_SCSI_ADDL_CDB_LEN 0x18
59 #define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20
60 #define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60
62 #define MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C)
63 #define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
66 * Raid context flags
69 #define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT 0x4
70 #define MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_MASK 0x30
71 enum MR_RAID_FLAGS_IO_SUB_TYPE {
72 MR_RAID_FLAGS_IO_SUB_TYPE_NONE = 0,
73 MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD = 1,
74 MR_RAID_FLAGS_IO_SUB_TYPE_RMW_DATA = 2,
75 MR_RAID_FLAGS_IO_SUB_TYPE_RMW_P = 3,
76 MR_RAID_FLAGS_IO_SUB_TYPE_RMW_Q = 4,
77 MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS = 6,
78 MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT = 7,
79 MR_RAID_FLAGS_IO_SUB_TYPE_R56_DIV_OFFLOAD = 8
83 * Request descriptor types
85 #define MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7
86 #define MEGASAS_REQ_DESCRIPT_FLAGS_MFA 0x1
87 #define MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2
88 #define MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1
90 #define MEGASAS_FP_CMD_LEN 16
91 #define MEGASAS_FUSION_IN_RESET 0
92 #define RAID_1_PEER_CMDS 2
93 #define JBOD_MAPS_COUNT 2
94 #define MEGASAS_REDUCE_QD_COUNT 64
95 #define IOC_INIT_FRAME_SIZE 4096
98 * Raid Context structure which describes MegaRAID specific IO Parameters
99 * This resides at offset 0x60 where the SGL normally starts in MPT IO Frames
102 struct RAID_CONTEXT {
103 #if defined(__BIG_ENDIAN_BITFIELD)
104 u8 nseg:4;
105 u8 type:4;
106 #else
107 u8 type:4;
108 u8 nseg:4;
109 #endif
110 u8 resvd0;
111 __le16 timeout_value;
112 u8 reg_lock_flags;
113 u8 resvd1;
114 __le16 virtual_disk_tgt_id;
115 __le64 reg_lock_row_lba;
116 __le32 reg_lock_length;
117 __le16 next_lmid;
118 u8 ex_status;
119 u8 status;
120 u8 raid_flags;
121 u8 num_sge;
122 __le16 config_seq_num;
123 u8 span_arm;
124 u8 priority;
125 u8 num_sge_ext;
126 u8 resvd2;
130 * Raid Context structure which describes ventura MegaRAID specific
131 * IO Paramenters ,This resides at offset 0x60 where the SGL normally
132 * starts in MPT IO Frames
134 struct RAID_CONTEXT_G35 {
135 #define RAID_CONTEXT_NSEG_MASK 0x00F0
136 #define RAID_CONTEXT_NSEG_SHIFT 4
137 #define RAID_CONTEXT_TYPE_MASK 0x000F
138 #define RAID_CONTEXT_TYPE_SHIFT 0
139 u16 nseg_type;
140 u16 timeout_value; /* 0x02 -0x03 */
141 u16 routing_flags; // 0x04 -0x05 routing flags
142 u16 virtual_disk_tgt_id; /* 0x06 -0x07 */
143 __le64 reg_lock_row_lba; /* 0x08 - 0x0F */
144 u32 reg_lock_length; /* 0x10 - 0x13 */
145 union { // flow specific
146 u16 rmw_op_index; /* 0x14 - 0x15, R5/6 RMW: rmw operation index*/
147 u16 peer_smid; /* 0x14 - 0x15, R1 Write: peer smid*/
148 u16 r56_arm_map; /* 0x14 - 0x15, Unused [15], LogArm[14:10], P-Arm[9:5], Q-Arm[4:0] */
150 } flow_specific;
152 u8 ex_status; /* 0x16 : OUT */
153 u8 status; /* 0x17 status */
154 u8 raid_flags; /* 0x18 resvd[7:6], ioSubType[5:4],
155 * resvd[3:1], preferredCpu[0]
157 u8 span_arm; /* 0x1C span[7:5], arm[4:0] */
158 u16 config_seq_num; /* 0x1A -0x1B */
159 union {
161 * Bit format:
162 * ---------------------------------
163 * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 |
164 * ---------------------------------
165 * Byte0 | numSGE[7]- numSGE[0] |
166 * ---------------------------------
167 * Byte1 |SD | resvd | numSGE 8-11 |
168 * --------------------------------
170 #define NUM_SGE_MASK_LOWER 0xFF
171 #define NUM_SGE_MASK_UPPER 0x0F
172 #define NUM_SGE_SHIFT_UPPER 8
173 #define STREAM_DETECT_SHIFT 7
174 #define STREAM_DETECT_MASK 0x80
175 struct {
176 #if defined(__BIG_ENDIAN_BITFIELD) /* 0x1C - 0x1D */
177 u16 stream_detected:1;
178 u16 reserved:3;
179 u16 num_sge:12;
180 #else
181 u16 num_sge:12;
182 u16 reserved:3;
183 u16 stream_detected:1;
184 #endif
185 } bits;
186 u8 bytes[2];
187 } u;
188 u8 resvd2[2]; /* 0x1E-0x1F */
191 #define MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT 1
192 #define MR_RAID_CTX_ROUTINGFLAGS_C2D_SHIFT 2
193 #define MR_RAID_CTX_ROUTINGFLAGS_FWD_SHIFT 3
194 #define MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT 4
195 #define MR_RAID_CTX_ROUTINGFLAGS_SBS_SHIFT 5
196 #define MR_RAID_CTX_ROUTINGFLAGS_RW_SHIFT 6
197 #define MR_RAID_CTX_ROUTINGFLAGS_LOG_SHIFT 7
198 #define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT 8
199 #define MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_MASK 0x0F00
200 #define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_SHIFT 12
201 #define MR_RAID_CTX_ROUTINGFLAGS_SETDIVERT_MASK 0xF000
203 static inline void set_num_sge(struct RAID_CONTEXT_G35 *rctx_g35,
204 u16 sge_count)
206 rctx_g35->u.bytes[0] = (u8)(sge_count & NUM_SGE_MASK_LOWER);
207 rctx_g35->u.bytes[1] |= (u8)((sge_count >> NUM_SGE_SHIFT_UPPER)
208 & NUM_SGE_MASK_UPPER);
211 static inline u16 get_num_sge(struct RAID_CONTEXT_G35 *rctx_g35)
213 u16 sge_count;
215 sge_count = (u16)(((rctx_g35->u.bytes[1] & NUM_SGE_MASK_UPPER)
216 << NUM_SGE_SHIFT_UPPER) | (rctx_g35->u.bytes[0]));
217 return sge_count;
220 #define SET_STREAM_DETECTED(rctx_g35) \
221 (rctx_g35.u.bytes[1] |= STREAM_DETECT_MASK)
223 #define CLEAR_STREAM_DETECTED(rctx_g35) \
224 (rctx_g35.u.bytes[1] &= ~(STREAM_DETECT_MASK))
226 static inline bool is_stream_detected(struct RAID_CONTEXT_G35 *rctx_g35)
228 return ((rctx_g35->u.bytes[1] & STREAM_DETECT_MASK));
231 union RAID_CONTEXT_UNION {
232 struct RAID_CONTEXT raid_context;
233 struct RAID_CONTEXT_G35 raid_context_g35;
236 #define RAID_CTX_SPANARM_ARM_SHIFT (0)
237 #define RAID_CTX_SPANARM_ARM_MASK (0x1f)
239 #define RAID_CTX_SPANARM_SPAN_SHIFT (5)
240 #define RAID_CTX_SPANARM_SPAN_MASK (0xE0)
242 /* LogArm[14:10], P-Arm[9:5], Q-Arm[4:0] */
243 #define RAID_CTX_R56_Q_ARM_MASK (0x1F)
244 #define RAID_CTX_R56_P_ARM_SHIFT (5)
245 #define RAID_CTX_R56_P_ARM_MASK (0x3E0)
246 #define RAID_CTX_R56_LOG_ARM_SHIFT (10)
247 #define RAID_CTX_R56_LOG_ARM_MASK (0x7C00)
249 /* number of bits per index in U32 TrackStream */
250 #define BITS_PER_INDEX_STREAM 4
251 #define INVALID_STREAM_NUM 16
252 #define MR_STREAM_BITMAP 0x76543210
253 #define STREAM_MASK ((1 << BITS_PER_INDEX_STREAM) - 1)
254 #define ZERO_LAST_STREAM 0x0fffffff
255 #define MAX_STREAMS_TRACKED 8
258 * define region lock types
260 enum REGION_TYPE {
261 REGION_TYPE_UNUSED = 0,
262 REGION_TYPE_SHARED_READ = 1,
263 REGION_TYPE_SHARED_WRITE = 2,
264 REGION_TYPE_EXCLUSIVE = 3,
267 /* MPI2 defines */
268 #define MPI2_FUNCTION_IOC_INIT (0x02) /* IOC Init */
269 #define MPI2_WHOINIT_HOST_DRIVER (0x04)
270 #define MPI2_VERSION_MAJOR (0x02)
271 #define MPI2_VERSION_MINOR (0x00)
272 #define MPI2_VERSION_MAJOR_MASK (0xFF00)
273 #define MPI2_VERSION_MAJOR_SHIFT (8)
274 #define MPI2_VERSION_MINOR_MASK (0x00FF)
275 #define MPI2_VERSION_MINOR_SHIFT (0)
276 #define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
277 MPI2_VERSION_MINOR)
278 #define MPI2_HEADER_VERSION_UNIT (0x10)
279 #define MPI2_HEADER_VERSION_DEV (0x00)
280 #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
281 #define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
282 #define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
283 #define MPI2_HEADER_VERSION_DEV_SHIFT (0)
284 #define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \
285 MPI2_HEADER_VERSION_DEV)
286 #define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
287 #define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
288 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
289 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
290 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
291 #define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
292 #define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
293 /* EEDP escape mode */
294 #define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040)
295 #define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) /* SCSI IO */
296 #define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01)
297 #define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x03)
298 #define MPI2_REQ_DESCRIPT_FLAGS_FP_IO (0x06)
299 #define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
300 #define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
301 #define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
302 #define MPI2_SCSIIO_CONTROL_READ (0x02000000)
303 #define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
304 #define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
305 #define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
306 #define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
307 #define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
308 #define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
309 #define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
310 #define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
311 #define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
312 #define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
313 #define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
314 #define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
316 struct MPI25_IEEE_SGE_CHAIN64 {
317 __le64 Address;
318 __le32 Length;
319 __le16 Reserved1;
320 u8 NextChainOffset;
321 u8 Flags;
324 struct MPI2_SGE_SIMPLE_UNION {
325 __le32 FlagsLength;
326 union {
327 __le32 Address32;
328 __le64 Address64;
329 } u;
332 struct MPI2_SCSI_IO_CDB_EEDP32 {
333 u8 CDB[20]; /* 0x00 */
334 __be32 PrimaryReferenceTag; /* 0x14 */
335 __be16 PrimaryApplicationTag; /* 0x18 */
336 __be16 PrimaryApplicationTagMask; /* 0x1A */
337 __le32 TransferLength; /* 0x1C */
340 struct MPI2_SGE_CHAIN_UNION {
341 __le16 Length;
342 u8 NextChainOffset;
343 u8 Flags;
344 union {
345 __le32 Address32;
346 __le64 Address64;
347 } u;
350 struct MPI2_IEEE_SGE_SIMPLE32 {
351 __le32 Address;
352 __le32 FlagsLength;
355 struct MPI2_IEEE_SGE_CHAIN32 {
356 __le32 Address;
357 __le32 FlagsLength;
360 struct MPI2_IEEE_SGE_SIMPLE64 {
361 __le64 Address;
362 __le32 Length;
363 __le16 Reserved1;
364 u8 Reserved2;
365 u8 Flags;
368 struct MPI2_IEEE_SGE_CHAIN64 {
369 __le64 Address;
370 __le32 Length;
371 __le16 Reserved1;
372 u8 Reserved2;
373 u8 Flags;
376 union MPI2_IEEE_SGE_SIMPLE_UNION {
377 struct MPI2_IEEE_SGE_SIMPLE32 Simple32;
378 struct MPI2_IEEE_SGE_SIMPLE64 Simple64;
381 union MPI2_IEEE_SGE_CHAIN_UNION {
382 struct MPI2_IEEE_SGE_CHAIN32 Chain32;
383 struct MPI2_IEEE_SGE_CHAIN64 Chain64;
386 union MPI2_SGE_IO_UNION {
387 struct MPI2_SGE_SIMPLE_UNION MpiSimple;
388 struct MPI2_SGE_CHAIN_UNION MpiChain;
389 union MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
390 union MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
393 union MPI2_SCSI_IO_CDB_UNION {
394 u8 CDB32[32];
395 struct MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
396 struct MPI2_SGE_SIMPLE_UNION SGE;
399 /****************************************************************************
400 * SCSI Task Management messages
401 ****************************************************************************/
403 /*SCSI Task Management Request Message */
404 struct MPI2_SCSI_TASK_MANAGE_REQUEST {
405 u16 DevHandle; /*0x00 */
406 u8 ChainOffset; /*0x02 */
407 u8 Function; /*0x03 */
408 u8 Reserved1; /*0x04 */
409 u8 TaskType; /*0x05 */
410 u8 Reserved2; /*0x06 */
411 u8 MsgFlags; /*0x07 */
412 u8 VP_ID; /*0x08 */
413 u8 VF_ID; /*0x09 */
414 u16 Reserved3; /*0x0A */
415 u8 LUN[8]; /*0x0C */
416 u32 Reserved4[7]; /*0x14 */
417 u16 TaskMID; /*0x30 */
418 u16 Reserved5; /*0x32 */
422 /*SCSI Task Management Reply Message */
423 struct MPI2_SCSI_TASK_MANAGE_REPLY {
424 u16 DevHandle; /*0x00 */
425 u8 MsgLength; /*0x02 */
426 u8 Function; /*0x03 */
427 u8 ResponseCode; /*0x04 */
428 u8 TaskType; /*0x05 */
429 u8 Reserved1; /*0x06 */
430 u8 MsgFlags; /*0x07 */
431 u8 VP_ID; /*0x08 */
432 u8 VF_ID; /*0x09 */
433 u16 Reserved2; /*0x0A */
434 u16 Reserved3; /*0x0C */
435 u16 IOCStatus; /*0x0E */
436 u32 IOCLogInfo; /*0x10 */
437 u32 TerminationCount; /*0x14 */
438 u32 ResponseInfo; /*0x18 */
441 struct MR_TM_REQUEST {
442 char request[128];
445 struct MR_TM_REPLY {
446 char reply[128];
449 /* SCSI Task Management Request Message */
450 struct MR_TASK_MANAGE_REQUEST {
451 /*To be type casted to struct MPI2_SCSI_TASK_MANAGE_REQUEST */
452 struct MR_TM_REQUEST TmRequest;
453 union {
454 struct {
455 #if defined(__BIG_ENDIAN_BITFIELD)
456 u32 reserved1:30;
457 u32 isTMForPD:1;
458 u32 isTMForLD:1;
459 #else
460 u32 isTMForLD:1;
461 u32 isTMForPD:1;
462 u32 reserved1:30;
463 #endif
464 u32 reserved2;
465 } tmReqFlags;
466 struct MR_TM_REPLY TMReply;
470 /* TaskType values */
472 #define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
473 #define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
474 #define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
475 #define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
476 #define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
477 #define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
478 #define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
479 #define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09)
480 #define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A)
482 /* ResponseCode values */
484 #define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00)
485 #define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02)
486 #define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04)
487 #define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
488 #define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
489 #define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
490 #define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
491 #define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
494 * RAID SCSI IO Request Message
495 * Total SGE count will be one less than _MPI2_SCSI_IO_REQUEST
497 struct MPI2_RAID_SCSI_IO_REQUEST {
498 __le16 DevHandle; /* 0x00 */
499 u8 ChainOffset; /* 0x02 */
500 u8 Function; /* 0x03 */
501 __le16 Reserved1; /* 0x04 */
502 u8 Reserved2; /* 0x06 */
503 u8 MsgFlags; /* 0x07 */
504 u8 VP_ID; /* 0x08 */
505 u8 VF_ID; /* 0x09 */
506 __le16 Reserved3; /* 0x0A */
507 __le32 SenseBufferLowAddress; /* 0x0C */
508 __le16 SGLFlags; /* 0x10 */
509 u8 SenseBufferLength; /* 0x12 */
510 u8 Reserved4; /* 0x13 */
511 u8 SGLOffset0; /* 0x14 */
512 u8 SGLOffset1; /* 0x15 */
513 u8 SGLOffset2; /* 0x16 */
514 u8 SGLOffset3; /* 0x17 */
515 __le32 SkipCount; /* 0x18 */
516 __le32 DataLength; /* 0x1C */
517 __le32 BidirectionalDataLength; /* 0x20 */
518 __le16 IoFlags; /* 0x24 */
519 __le16 EEDPFlags; /* 0x26 */
520 __le32 EEDPBlockSize; /* 0x28 */
521 __le32 SecondaryReferenceTag; /* 0x2C */
522 __le16 SecondaryApplicationTag; /* 0x30 */
523 __le16 ApplicationTagTranslationMask; /* 0x32 */
524 u8 LUN[8]; /* 0x34 */
525 __le32 Control; /* 0x3C */
526 union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
527 union RAID_CONTEXT_UNION RaidContext; /* 0x60 */
528 union MPI2_SGE_IO_UNION SGL; /* 0x80 */
532 * MPT RAID MFA IO Descriptor.
534 struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR {
535 u32 RequestFlags:8;
536 u32 MessageAddress1:24;
537 u32 MessageAddress2;
540 /* Default Request Descriptor */
541 struct MPI2_DEFAULT_REQUEST_DESCRIPTOR {
542 u8 RequestFlags; /* 0x00 */
543 u8 MSIxIndex; /* 0x01 */
544 __le16 SMID; /* 0x02 */
545 __le16 LMID; /* 0x04 */
546 __le16 DescriptorTypeDependent; /* 0x06 */
549 /* High Priority Request Descriptor */
550 struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
551 u8 RequestFlags; /* 0x00 */
552 u8 MSIxIndex; /* 0x01 */
553 __le16 SMID; /* 0x02 */
554 __le16 LMID; /* 0x04 */
555 __le16 Reserved1; /* 0x06 */
558 /* SCSI IO Request Descriptor */
559 struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR {
560 u8 RequestFlags; /* 0x00 */
561 u8 MSIxIndex; /* 0x01 */
562 __le16 SMID; /* 0x02 */
563 __le16 LMID; /* 0x04 */
564 __le16 DevHandle; /* 0x06 */
567 /* SCSI Target Request Descriptor */
568 struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR {
569 u8 RequestFlags; /* 0x00 */
570 u8 MSIxIndex; /* 0x01 */
571 __le16 SMID; /* 0x02 */
572 __le16 LMID; /* 0x04 */
573 __le16 IoIndex; /* 0x06 */
576 /* RAID Accelerator Request Descriptor */
577 struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
578 u8 RequestFlags; /* 0x00 */
579 u8 MSIxIndex; /* 0x01 */
580 __le16 SMID; /* 0x02 */
581 __le16 LMID; /* 0x04 */
582 __le16 Reserved; /* 0x06 */
585 /* union of Request Descriptors */
586 union MEGASAS_REQUEST_DESCRIPTOR_UNION {
587 struct MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
588 struct MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
589 struct MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
590 struct MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
591 struct MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
592 struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR MFAIo;
593 union {
594 struct {
595 __le32 low;
596 __le32 high;
597 } u;
598 __le64 Words;
602 /* Default Reply Descriptor */
603 struct MPI2_DEFAULT_REPLY_DESCRIPTOR {
604 u8 ReplyFlags; /* 0x00 */
605 u8 MSIxIndex; /* 0x01 */
606 __le16 DescriptorTypeDependent1; /* 0x02 */
607 __le32 DescriptorTypeDependent2; /* 0x04 */
610 /* Address Reply Descriptor */
611 struct MPI2_ADDRESS_REPLY_DESCRIPTOR {
612 u8 ReplyFlags; /* 0x00 */
613 u8 MSIxIndex; /* 0x01 */
614 __le16 SMID; /* 0x02 */
615 __le32 ReplyFrameAddress; /* 0x04 */
618 /* SCSI IO Success Reply Descriptor */
619 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
620 u8 ReplyFlags; /* 0x00 */
621 u8 MSIxIndex; /* 0x01 */
622 __le16 SMID; /* 0x02 */
623 __le16 TaskTag; /* 0x04 */
624 __le16 Reserved1; /* 0x06 */
627 /* TargetAssist Success Reply Descriptor */
628 struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR {
629 u8 ReplyFlags; /* 0x00 */
630 u8 MSIxIndex; /* 0x01 */
631 __le16 SMID; /* 0x02 */
632 u8 SequenceNumber; /* 0x04 */
633 u8 Reserved1; /* 0x05 */
634 __le16 IoIndex; /* 0x06 */
637 /* Target Command Buffer Reply Descriptor */
638 struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR {
639 u8 ReplyFlags; /* 0x00 */
640 u8 MSIxIndex; /* 0x01 */
641 u8 VP_ID; /* 0x02 */
642 u8 Flags; /* 0x03 */
643 __le16 InitiatorDevHandle; /* 0x04 */
644 __le16 IoIndex; /* 0x06 */
647 /* RAID Accelerator Success Reply Descriptor */
648 struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
649 u8 ReplyFlags; /* 0x00 */
650 u8 MSIxIndex; /* 0x01 */
651 __le16 SMID; /* 0x02 */
652 __le32 Reserved; /* 0x04 */
655 /* union of Reply Descriptors */
656 union MPI2_REPLY_DESCRIPTORS_UNION {
657 struct MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
658 struct MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
659 struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
660 struct MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
661 struct MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
662 struct MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR
663 RAIDAcceleratorSuccess;
664 __le64 Words;
667 /* IOCInit Request message */
668 struct MPI2_IOC_INIT_REQUEST {
669 u8 WhoInit; /* 0x00 */
670 u8 Reserved1; /* 0x01 */
671 u8 ChainOffset; /* 0x02 */
672 u8 Function; /* 0x03 */
673 __le16 Reserved2; /* 0x04 */
674 u8 Reserved3; /* 0x06 */
675 u8 MsgFlags; /* 0x07 */
676 u8 VP_ID; /* 0x08 */
677 u8 VF_ID; /* 0x09 */
678 __le16 Reserved4; /* 0x0A */
679 __le16 MsgVersion; /* 0x0C */
680 __le16 HeaderVersion; /* 0x0E */
681 u32 Reserved5; /* 0x10 */
682 __le16 Reserved6; /* 0x14 */
683 u8 HostPageSize; /* 0x16 */
684 u8 HostMSIxVectors; /* 0x17 */
685 __le16 Reserved8; /* 0x18 */
686 __le16 SystemRequestFrameSize; /* 0x1A */
687 __le16 ReplyDescriptorPostQueueDepth; /* 0x1C */
688 __le16 ReplyFreeQueueDepth; /* 0x1E */
689 __le32 SenseBufferAddressHigh; /* 0x20 */
690 __le32 SystemReplyAddressHigh; /* 0x24 */
691 __le64 SystemRequestFrameBaseAddress; /* 0x28 */
692 __le64 ReplyDescriptorPostQueueAddress;/* 0x30 */
693 __le64 ReplyFreeQueueAddress; /* 0x38 */
694 __le64 TimeStamp; /* 0x40 */
697 /* mrpriv defines */
698 #define MR_PD_INVALID 0xFFFF
699 #define MR_DEVHANDLE_INVALID 0xFFFF
700 #define MAX_SPAN_DEPTH 8
701 #define MAX_QUAD_DEPTH MAX_SPAN_DEPTH
702 #define MAX_RAIDMAP_SPAN_DEPTH (MAX_SPAN_DEPTH)
703 #define MAX_ROW_SIZE 32
704 #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
705 #define MAX_LOGICAL_DRIVES 64
706 #define MAX_LOGICAL_DRIVES_EXT 256
707 #define MAX_LOGICAL_DRIVES_DYN 512
708 #define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
709 #define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
710 #define MAX_ARRAYS 128
711 #define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
712 #define MAX_ARRAYS_EXT 256
713 #define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
714 #define MAX_API_ARRAYS_DYN 512
715 #define MAX_PHYSICAL_DEVICES 256
716 #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
717 #define MAX_RAIDMAP_PHYSICAL_DEVICES_DYN 512
718 #define MR_DCMD_LD_MAP_GET_INFO 0x0300e101
719 #define MR_DCMD_SYSTEM_PD_MAP_GET_INFO 0x0200e102
720 #define MR_DCMD_DRV_GET_TARGET_PROP 0x0200e103
721 #define MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC 0x010e8485 /* SR-IOV HB alloc*/
722 #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111 0x03200200
723 #define MR_DCMD_LD_VF_MAP_GET_ALL_LDS 0x03150200
724 #define MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES 0x01200100
725 #define MR_DCMD_CTRL_DEVICE_LIST_GET 0x01190600
727 struct MR_DEV_HANDLE_INFO {
728 __le16 curDevHdl;
729 u8 validHandles;
730 u8 interfaceType;
731 __le16 devHandle[2];
734 struct MR_ARRAY_INFO {
735 __le16 pd[MAX_RAIDMAP_ROW_SIZE];
738 struct MR_QUAD_ELEMENT {
739 __le64 logStart;
740 __le64 logEnd;
741 __le64 offsetInSpan;
742 __le32 diff;
743 __le32 reserved1;
746 struct MR_SPAN_INFO {
747 __le32 noElements;
748 __le32 reserved1;
749 struct MR_QUAD_ELEMENT quad[MAX_RAIDMAP_SPAN_DEPTH];
752 struct MR_LD_SPAN {
753 __le64 startBlk;
754 __le64 numBlks;
755 __le16 arrayRef;
756 u8 spanRowSize;
757 u8 spanRowDataSize;
758 u8 reserved[4];
761 struct MR_SPAN_BLOCK_INFO {
762 __le64 num_rows;
763 struct MR_LD_SPAN span;
764 struct MR_SPAN_INFO block_span_info;
767 #define MR_RAID_CTX_CPUSEL_0 0
768 #define MR_RAID_CTX_CPUSEL_1 1
769 #define MR_RAID_CTX_CPUSEL_2 2
770 #define MR_RAID_CTX_CPUSEL_3 3
771 #define MR_RAID_CTX_CPUSEL_FCFS 0xF
773 struct MR_CPU_AFFINITY_MASK {
774 union {
775 struct {
776 #ifndef MFI_BIG_ENDIAN
777 u8 hw_path:1;
778 u8 cpu0:1;
779 u8 cpu1:1;
780 u8 cpu2:1;
781 u8 cpu3:1;
782 u8 reserved:3;
783 #else
784 u8 reserved:3;
785 u8 cpu3:1;
786 u8 cpu2:1;
787 u8 cpu1:1;
788 u8 cpu0:1;
789 u8 hw_path:1;
790 #endif
792 u8 core_mask;
796 struct MR_IO_AFFINITY {
797 union {
798 struct {
799 struct MR_CPU_AFFINITY_MASK pdRead;
800 struct MR_CPU_AFFINITY_MASK pdWrite;
801 struct MR_CPU_AFFINITY_MASK ldRead;
802 struct MR_CPU_AFFINITY_MASK ldWrite;
804 u32 word;
806 u8 maxCores; /* Total cores + HW Path in ROC */
807 u8 reserved[3];
810 struct MR_LD_RAID {
811 struct {
812 #if defined(__BIG_ENDIAN_BITFIELD)
813 u32 reserved4:2;
814 u32 fp_cache_bypass_capable:1;
815 u32 fp_rmw_capable:1;
816 u32 disable_coalescing:1;
817 u32 fpBypassRegionLock:1;
818 u32 tmCapable:1;
819 u32 fpNonRWCapable:1;
820 u32 fpReadAcrossStripe:1;
821 u32 fpWriteAcrossStripe:1;
822 u32 fpReadCapable:1;
823 u32 fpWriteCapable:1;
824 u32 encryptionType:8;
825 u32 pdPiMode:4;
826 u32 ldPiMode:4;
827 u32 reserved5:2;
828 u32 ra_capable:1;
829 u32 fpCapable:1;
830 #else
831 u32 fpCapable:1;
832 u32 ra_capable:1;
833 u32 reserved5:2;
834 u32 ldPiMode:4;
835 u32 pdPiMode:4;
836 u32 encryptionType:8;
837 u32 fpWriteCapable:1;
838 u32 fpReadCapable:1;
839 u32 fpWriteAcrossStripe:1;
840 u32 fpReadAcrossStripe:1;
841 u32 fpNonRWCapable:1;
842 u32 tmCapable:1;
843 u32 fpBypassRegionLock:1;
844 u32 disable_coalescing:1;
845 u32 fp_rmw_capable:1;
846 u32 fp_cache_bypass_capable:1;
847 u32 reserved4:2;
848 #endif
849 } capability;
850 __le32 reserved6;
851 __le64 size;
852 u8 spanDepth;
853 u8 level;
854 u8 stripeShift;
855 u8 rowSize;
856 u8 rowDataSize;
857 u8 writeMode;
858 u8 PRL;
859 u8 SRL;
860 __le16 targetId;
861 u8 ldState;
862 u8 regTypeReqOnWrite;
863 u8 modFactor;
864 u8 regTypeReqOnRead;
865 __le16 seqNum;
867 struct {
868 u32 ldSyncRequired:1;
869 u32 reserved:31;
870 } flags;
872 u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */
873 u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/
874 /* Ox2D This LD accept priority boost of this type */
875 u8 ld_accept_priority_type;
876 u8 reserved2[2]; /* 0x2E - 0x2F */
877 /* 0x30 - 0x33, Logical block size for the LD */
878 u32 logical_block_length;
879 struct {
880 #ifndef MFI_BIG_ENDIAN
881 /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
882 u32 ld_pi_exp:4;
883 /* 0x34, LOGICAL BLOCKS PER PHYSICAL
884 * BLOCK EXPONENT from READ CAPACITY 16
886 u32 ld_logical_block_exp:4;
887 u32 reserved1:24; /* 0x34 */
888 #else
889 u32 reserved1:24; /* 0x34 */
890 /* 0x34, LOGICAL BLOCKS PER PHYSICAL
891 * BLOCK EXPONENT from READ CAPACITY 16
893 u32 ld_logical_block_exp:4;
894 /* 0x34, P_I_EXPONENT from READ CAPACITY 16 */
895 u32 ld_pi_exp:4;
896 #endif
897 }; /* 0x34 - 0x37 */
898 /* 0x38 - 0x3f, This will determine which
899 * core will process LD IO and PD IO.
901 struct MR_IO_AFFINITY cpuAffinity;
902 /* Bit definiations are specified by MR_IO_AFFINITY */
903 u8 reserved3[0x80 - 0x40]; /* 0x40 - 0x7f */
906 struct MR_LD_SPAN_MAP {
907 struct MR_LD_RAID ldRaid;
908 u8 dataArmMap[MAX_RAIDMAP_ROW_SIZE];
909 struct MR_SPAN_BLOCK_INFO spanBlock[MAX_RAIDMAP_SPAN_DEPTH];
912 struct MR_FW_RAID_MAP {
913 __le32 totalSize;
914 union {
915 struct {
916 __le32 maxLd;
917 __le32 maxSpanDepth;
918 __le32 maxRowSize;
919 __le32 maxPdCount;
920 __le32 maxArrays;
921 } validationInfo;
922 __le32 version[5];
925 __le32 ldCount;
926 __le32 Reserved1;
927 u8 ldTgtIdToLd[MAX_RAIDMAP_LOGICAL_DRIVES+
928 MAX_RAIDMAP_VIEWS];
929 u8 fpPdIoTimeoutSec;
930 u8 reserved2[7];
931 struct MR_ARRAY_INFO arMapInfo[MAX_RAIDMAP_ARRAYS];
932 struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
933 struct MR_LD_SPAN_MAP ldSpanMap[1];
936 struct IO_REQUEST_INFO {
937 u64 ldStartBlock;
938 u32 numBlocks;
939 u16 ldTgtId;
940 u8 isRead;
941 __le16 devHandle;
942 u8 pd_interface;
943 u64 pdBlock;
944 u8 fpOkForIo;
945 u8 IoforUnevenSpan;
946 u8 start_span;
947 u8 do_fp_rlbypass;
948 u64 start_row;
949 u8 span_arm; /* span[7:5], arm[4:0] */
950 u8 pd_after_lb;
951 u16 r1_alt_dev_handle; /* raid 1/10 only */
952 bool ra_capable;
953 u8 data_arms;
956 struct MR_LD_TARGET_SYNC {
957 u8 targetId;
958 u8 reserved;
959 __le16 seqNum;
963 * RAID Map descriptor Types.
964 * Each element should uniquely idetify one data structure in the RAID map
966 enum MR_RAID_MAP_DESC_TYPE {
967 /* MR_DEV_HANDLE_INFO data */
968 RAID_MAP_DESC_TYPE_DEVHDL_INFO = 0x0,
969 /* target to Ld num Index map */
970 RAID_MAP_DESC_TYPE_TGTID_INFO = 0x1,
971 /* MR_ARRAY_INFO data */
972 RAID_MAP_DESC_TYPE_ARRAY_INFO = 0x2,
973 /* MR_LD_SPAN_MAP data */
974 RAID_MAP_DESC_TYPE_SPAN_INFO = 0x3,
975 RAID_MAP_DESC_TYPE_COUNT,
979 * This table defines the offset, size and num elements of each descriptor
980 * type in the RAID Map buffer
982 struct MR_RAID_MAP_DESC_TABLE {
983 /* Raid map descriptor type */
984 u32 raid_map_desc_type;
985 /* Offset into the RAID map buffer where
986 * descriptor data is saved
988 u32 raid_map_desc_offset;
989 /* total size of the
990 * descriptor buffer
992 u32 raid_map_desc_buffer_size;
993 /* Number of elements contained in the
994 * descriptor buffer
996 u32 raid_map_desc_elements;
1000 * Dynamic Raid Map Structure.
1002 struct MR_FW_RAID_MAP_DYNAMIC {
1003 u32 raid_map_size; /* total size of RAID Map structure */
1004 u32 desc_table_offset;/* Offset of desc table into RAID map*/
1005 u32 desc_table_size; /* Total Size of desc table */
1006 /* Total Number of elements in the desc table */
1007 u32 desc_table_num_elements;
1008 u64 reserved1;
1009 u32 reserved2[3]; /*future use */
1010 /* timeout value used by driver in FP IOs */
1011 u8 fp_pd_io_timeout_sec;
1012 u8 reserved3[3];
1013 /* when this seqNum increments, driver needs to
1014 * release RMW buffers asap
1016 u32 rmw_fp_seq_num;
1017 u16 ld_count; /* count of lds. */
1018 u16 ar_count; /* count of arrays */
1019 u16 span_count; /* count of spans */
1020 u16 reserved4[3];
1022 * The below structure of pointers is only to be used by the driver.
1023 * This is added in the ,API to reduce the amount of code changes
1024 * needed in the driver to support dynamic RAID map Firmware should
1025 * not update these pointers while preparing the raid map
1027 union {
1028 struct {
1029 struct MR_DEV_HANDLE_INFO *dev_hndl_info;
1030 u16 *ld_tgt_id_to_ld;
1031 struct MR_ARRAY_INFO *ar_map_info;
1032 struct MR_LD_SPAN_MAP *ld_span_map;
1034 u64 ptr_structure_size[RAID_MAP_DESC_TYPE_COUNT];
1037 * RAID Map descriptor table defines the layout of data in the RAID Map.
1038 * The size of the descriptor table itself could change.
1040 /* Variable Size descriptor Table. */
1041 struct MR_RAID_MAP_DESC_TABLE
1042 raid_map_desc_table[RAID_MAP_DESC_TYPE_COUNT];
1043 /* Variable Size buffer containing all data */
1044 u32 raid_map_desc_data[1];
1045 }; /* Dynamicaly sized RAID MAp structure */
1047 #define IEEE_SGE_FLAGS_ADDR_MASK (0x03)
1048 #define IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
1049 #define IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
1050 #define IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
1051 #define IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
1052 #define IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
1053 #define IEEE_SGE_FLAGS_END_OF_LIST (0x40)
1055 #define MPI2_SGE_FLAGS_SHIFT (0x02)
1056 #define IEEE_SGE_FLAGS_FORMAT_MASK (0xC0)
1057 #define IEEE_SGE_FLAGS_FORMAT_IEEE (0x00)
1058 #define IEEE_SGE_FLAGS_FORMAT_NVME (0x02)
1060 #define MPI26_IEEE_SGE_FLAGS_NSF_MASK (0x1C)
1061 #define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE (0x00)
1062 #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP (0x08)
1063 #define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL (0x10)
1065 #define MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME 15
1066 #define MEGASAS_MAX_SNAP_DUMP_WAIT_TIME 60
1068 struct megasas_register_set;
1069 struct megasas_instance;
1071 union desc_word {
1072 u64 word;
1073 struct {
1074 u32 low;
1075 u32 high;
1076 } u;
1079 struct megasas_cmd_fusion {
1080 struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
1081 dma_addr_t io_request_phys_addr;
1083 union MPI2_SGE_IO_UNION *sg_frame;
1084 dma_addr_t sg_frame_phys_addr;
1086 u8 *sense;
1087 dma_addr_t sense_phys_addr;
1089 struct list_head list;
1090 struct scsi_cmnd *scmd;
1091 struct megasas_instance *instance;
1093 u8 retry_for_fw_reset;
1094 union MEGASAS_REQUEST_DESCRIPTOR_UNION *request_desc;
1097 * Context for a MFI frame.
1098 * Used to get the mfi cmd from list when a MFI cmd is completed
1100 u32 sync_cmd_idx;
1101 u32 index;
1102 u8 pd_r1_lb;
1103 struct completion done;
1104 u8 pd_interface;
1105 u16 r1_alt_dev_handle; /* raid 1/10 only*/
1106 bool cmd_completed; /* raid 1/10 fp writes status holder */
1110 struct LD_LOAD_BALANCE_INFO {
1111 u8 loadBalanceFlag;
1112 u8 reserved1;
1113 atomic_t scsi_pending_cmds[MAX_PHYSICAL_DEVICES];
1114 u64 last_accessed_block[MAX_PHYSICAL_DEVICES];
1117 /* SPAN_SET is info caclulated from span info from Raid map per LD */
1118 typedef struct _LD_SPAN_SET {
1119 u64 log_start_lba;
1120 u64 log_end_lba;
1121 u64 span_row_start;
1122 u64 span_row_end;
1123 u64 data_strip_start;
1124 u64 data_strip_end;
1125 u64 data_row_start;
1126 u64 data_row_end;
1127 u8 strip_offset[MAX_SPAN_DEPTH];
1128 u32 span_row_data_width;
1129 u32 diff;
1130 u32 reserved[2];
1131 } LD_SPAN_SET, *PLD_SPAN_SET;
1133 typedef struct LOG_BLOCK_SPAN_INFO {
1134 LD_SPAN_SET span_set[MAX_SPAN_DEPTH];
1135 } LD_SPAN_INFO, *PLD_SPAN_INFO;
1137 struct MR_FW_RAID_MAP_ALL {
1138 struct MR_FW_RAID_MAP raidMap;
1139 struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
1140 } __attribute__ ((packed));
1142 struct MR_DRV_RAID_MAP {
1143 /* total size of this structure, including this field.
1144 * This feild will be manupulated by driver for ext raid map,
1145 * else pick the value from firmware raid map.
1147 __le32 totalSize;
1149 union {
1150 struct {
1151 __le32 maxLd;
1152 __le32 maxSpanDepth;
1153 __le32 maxRowSize;
1154 __le32 maxPdCount;
1155 __le32 maxArrays;
1156 } validationInfo;
1157 __le32 version[5];
1160 /* timeout value used by driver in FP IOs*/
1161 u8 fpPdIoTimeoutSec;
1162 u8 reserved2[7];
1164 __le16 ldCount;
1165 __le16 arCount;
1166 __le16 spanCount;
1167 __le16 reserve3;
1169 struct MR_DEV_HANDLE_INFO
1170 devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES_DYN];
1171 u16 ldTgtIdToLd[MAX_LOGICAL_DRIVES_DYN];
1172 struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_DYN];
1173 struct MR_LD_SPAN_MAP ldSpanMap[1];
1177 /* Driver raid map size is same as raid map ext
1178 * MR_DRV_RAID_MAP_ALL is created to sync with old raid.
1179 * And it is mainly for code re-use purpose.
1181 struct MR_DRV_RAID_MAP_ALL {
1183 struct MR_DRV_RAID_MAP raidMap;
1184 struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_DYN - 1];
1185 } __packed;
1189 struct MR_FW_RAID_MAP_EXT {
1190 /* Not usred in new map */
1191 u32 reserved;
1193 union {
1194 struct {
1195 u32 maxLd;
1196 u32 maxSpanDepth;
1197 u32 maxRowSize;
1198 u32 maxPdCount;
1199 u32 maxArrays;
1200 } validationInfo;
1201 u32 version[5];
1204 u8 fpPdIoTimeoutSec;
1205 u8 reserved2[7];
1207 __le16 ldCount;
1208 __le16 arCount;
1209 __le16 spanCount;
1210 __le16 reserve3;
1212 struct MR_DEV_HANDLE_INFO devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
1213 u8 ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
1214 struct MR_ARRAY_INFO arMapInfo[MAX_API_ARRAYS_EXT];
1215 struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES_EXT];
1219 * * define MR_PD_CFG_SEQ structure for system PDs
1220 * */
1221 struct MR_PD_CFG_SEQ {
1222 u16 seqNum;
1223 u16 devHandle;
1224 struct {
1225 #if defined(__BIG_ENDIAN_BITFIELD)
1226 u8 reserved:7;
1227 u8 tmCapable:1;
1228 #else
1229 u8 tmCapable:1;
1230 u8 reserved:7;
1231 #endif
1232 } capability;
1233 u8 reserved;
1234 u16 pd_target_id;
1235 } __packed;
1237 struct MR_PD_CFG_SEQ_NUM_SYNC {
1238 __le32 size;
1239 __le32 count;
1240 struct MR_PD_CFG_SEQ seq[1];
1241 } __packed;
1243 /* stream detection */
1244 struct STREAM_DETECT {
1245 u64 next_seq_lba; /* next LBA to match sequential access */
1246 struct megasas_cmd_fusion *first_cmd_fusion; /* first cmd in group */
1247 struct megasas_cmd_fusion *last_cmd_fusion; /* last cmd in group */
1248 u32 count_cmds_in_stream; /* count of host commands in this stream */
1249 u16 num_sges_in_group; /* total number of SGEs in grouped IOs */
1250 u8 is_read; /* SCSI OpCode for this stream */
1251 u8 group_depth; /* total number of host commands in group */
1252 /* TRUE if cannot add any more commands to this group */
1253 bool group_flush;
1254 u8 reserved[7]; /* pad to 64-bit alignment */
1257 struct LD_STREAM_DETECT {
1258 bool write_back; /* TRUE if WB, FALSE if WT */
1259 bool fp_write_enabled;
1260 bool members_ssds;
1261 bool fp_cache_bypass_capable;
1262 u32 mru_bit_map; /* bitmap used to track MRU and LRU stream indicies */
1263 /* this is the array of stream detect structures (one per stream) */
1264 struct STREAM_DETECT stream_track[MAX_STREAMS_TRACKED];
1267 struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
1268 u64 RDPQBaseAddress;
1269 u32 Reserved1;
1270 u32 Reserved2;
1273 struct rdpq_alloc_detail {
1274 struct dma_pool *dma_pool_ptr;
1275 dma_addr_t pool_entry_phys;
1276 union MPI2_REPLY_DESCRIPTORS_UNION *pool_entry_virt;
1279 struct fusion_context {
1280 struct megasas_cmd_fusion **cmd_list;
1281 dma_addr_t req_frames_desc_phys;
1282 u8 *req_frames_desc;
1284 struct dma_pool *io_request_frames_pool;
1285 dma_addr_t io_request_frames_phys;
1286 u8 *io_request_frames;
1288 struct dma_pool *sg_dma_pool;
1289 struct dma_pool *sense_dma_pool;
1291 u8 *sense;
1292 dma_addr_t sense_phys_addr;
1294 dma_addr_t reply_frames_desc_phys[MAX_MSIX_QUEUES_FUSION];
1295 union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc[MAX_MSIX_QUEUES_FUSION];
1296 struct rdpq_alloc_detail rdpq_tracker[RDPQ_MAX_CHUNK_COUNT];
1297 struct dma_pool *reply_frames_desc_pool;
1298 struct dma_pool *reply_frames_desc_pool_align;
1300 u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION];
1302 u32 reply_q_depth;
1303 u32 request_alloc_sz;
1304 u32 reply_alloc_sz;
1305 u32 io_frames_alloc_sz;
1307 struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY *rdpq_virt;
1308 dma_addr_t rdpq_phys;
1309 u16 max_sge_in_main_msg;
1310 u16 max_sge_in_chain;
1312 u8 chain_offset_io_request;
1313 u8 chain_offset_mfi_pthru;
1315 struct MR_FW_RAID_MAP_DYNAMIC *ld_map[2];
1316 dma_addr_t ld_map_phys[2];
1318 /*Non dma-able memory. Driver local copy.*/
1319 struct MR_DRV_RAID_MAP_ALL *ld_drv_map[2];
1321 u32 max_map_sz;
1322 u32 current_map_sz;
1323 u32 old_map_sz;
1324 u32 new_map_sz;
1325 u32 drv_map_sz;
1326 u32 drv_map_pages;
1327 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_seq_sync[JBOD_MAPS_COUNT];
1328 dma_addr_t pd_seq_phys[JBOD_MAPS_COUNT];
1329 u8 fast_path_io;
1330 struct LD_LOAD_BALANCE_INFO *load_balance_info;
1331 u32 load_balance_info_pages;
1332 LD_SPAN_INFO *log_to_span;
1333 u32 log_to_span_pages;
1334 struct LD_STREAM_DETECT **stream_detect_by_ld;
1335 dma_addr_t ioc_init_request_phys;
1336 struct MPI2_IOC_INIT_REQUEST *ioc_init_request;
1337 struct megasas_cmd *ioc_init_cmd;
1338 bool pcie_bw_limitation;
1339 bool r56_div_offload;
1342 union desc_value {
1343 __le64 word;
1344 struct {
1345 __le32 low;
1346 __le32 high;
1347 } u;
1350 enum CMD_RET_VALUES {
1351 REFIRE_CMD = 1,
1352 COMPLETE_CMD = 2,
1353 RETURN_CMD = 3,
1356 struct MR_SNAPDUMP_PROPERTIES {
1357 u8 offload_num;
1358 u8 max_num_supported;
1359 u8 cur_num_supported;
1360 u8 trigger_min_num_sec_before_ocr;
1361 u8 reserved[12];
1364 struct megasas_debugfs_buffer {
1365 void *buf;
1366 u32 len;
1369 void megasas_free_cmds_fusion(struct megasas_instance *instance);
1370 int megasas_ioc_init_fusion(struct megasas_instance *instance);
1371 u8 megasas_get_map_info(struct megasas_instance *instance);
1372 int megasas_sync_map_info(struct megasas_instance *instance);
1373 void megasas_release_fusion(struct megasas_instance *instance);
1374 void megasas_reset_reply_desc(struct megasas_instance *instance);
1375 int megasas_check_mpio_paths(struct megasas_instance *instance,
1376 struct scsi_cmnd *scmd);
1377 void megasas_fusion_ocr_wq(struct work_struct *work);
1379 #endif /* _MEGARAID_SAS_FUSION_H_ */