2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
5 * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
6 * Copyright (C) 2012-2014 LSI Corporation
7 * Copyright (C) 2013-2014 Avago Technologies
8 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
21 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
22 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
23 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
24 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
25 * solely responsible for determining the appropriateness of using and
26 * distributing the Program and assumes all risks associated with its
27 * exercise of rights under this Agreement, including but not limited to
28 * the risks and costs of program errors, damage to or loss of data,
29 * programs or equipment, and unavailability or interruption of operations.
31 * DISCLAIMER OF LIABILITY
32 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
33 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
38 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
40 * You should have received a copy of the GNU General Public License
41 * along with this program; if not, write to the Free Software
42 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/kdev_t.h>
54 #include <linux/blkdev.h>
55 #include <linux/delay.h>
56 #include <linux/interrupt.h>
57 #include <linux/dma-mapping.h>
59 #include <linux/time.h>
60 #include <linux/ktime.h>
61 #include <linux/kthread.h>
62 #include <asm/page.h> /* To get host page size per arch */
65 #include "mpt3sas_base.h"
67 static MPT_CALLBACK mpt_callbacks
[MPT_MAX_CALLBACKS
];
70 #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
72 /* maximum controller queue depth */
73 #define MAX_HBA_QUEUE_DEPTH 30000
74 #define MAX_CHAIN_DEPTH 100000
75 static int max_queue_depth
= -1;
76 module_param(max_queue_depth
, int, 0444);
77 MODULE_PARM_DESC(max_queue_depth
, " max controller queue depth ");
79 static int max_sgl_entries
= -1;
80 module_param(max_sgl_entries
, int, 0444);
81 MODULE_PARM_DESC(max_sgl_entries
, " max sg entries ");
83 static int msix_disable
= -1;
84 module_param(msix_disable
, int, 0444);
85 MODULE_PARM_DESC(msix_disable
, " disable msix routed interrupts (default=0)");
87 static int smp_affinity_enable
= 1;
88 module_param(smp_affinity_enable
, int, 0444);
89 MODULE_PARM_DESC(smp_affinity_enable
, "SMP affinity feature enable/disable Default: enable(1)");
91 static int max_msix_vectors
= -1;
92 module_param(max_msix_vectors
, int, 0444);
93 MODULE_PARM_DESC(max_msix_vectors
,
96 static int irqpoll_weight
= -1;
97 module_param(irqpoll_weight
, int, 0444);
98 MODULE_PARM_DESC(irqpoll_weight
,
99 "irq poll weight (default= one fourth of HBA queue depth)");
101 static int mpt3sas_fwfault_debug
;
102 MODULE_PARM_DESC(mpt3sas_fwfault_debug
,
103 " enable detection of firmware fault and halt firmware - (default=0)");
105 static int perf_mode
= -1;
106 module_param(perf_mode
, int, 0444);
107 MODULE_PARM_DESC(perf_mode
,
108 "Performance mode (only for Aero/Sea Generation), options:\n\t\t"
109 "0 - balanced: high iops mode is enabled &\n\t\t"
110 "interrupt coalescing is enabled only on high iops queues,\n\t\t"
111 "1 - iops: high iops mode is disabled &\n\t\t"
112 "interrupt coalescing is enabled on all queues,\n\t\t"
113 "2 - latency: high iops mode is disabled &\n\t\t"
114 "interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
115 "\t\tdefault - default perf_mode is 'balanced'"
118 static int poll_queues
;
119 module_param(poll_queues
, int, 0444);
120 MODULE_PARM_DESC(poll_queues
, "Number of queues to be use for io_uring poll mode.\n\t\t"
121 "This parameter is effective only if host_tagset_enable=1. &\n\t\t"
122 "when poll_queues are enabled then &\n\t\t"
123 "perf_mode is set to latency mode. &\n\t\t"
126 enum mpt3sas_perf_mode
{
127 MPT_PERF_MODE_DEFAULT
= -1,
128 MPT_PERF_MODE_BALANCED
= 0,
129 MPT_PERF_MODE_IOPS
= 1,
130 MPT_PERF_MODE_LATENCY
= 2,
134 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER
*ioc
,
135 u32 ioc_state
, int timeout
);
137 _base_get_ioc_facts(struct MPT3SAS_ADAPTER
*ioc
);
139 _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER
*ioc
);
142 _base_readl_ext_retry(const void __iomem
*addr
);
145 * mpt3sas_base_check_cmd_timeout - Function
146 * to check timeout and command termination due
149 * @ioc: per adapter object.
150 * @status: Status of issued command.
151 * @mpi_request:mf request pointer.
152 * @sz: size of buffer.
154 * Return: 1/0 Reset to be done or Not
157 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER
*ioc
,
158 u8 status
, void *mpi_request
, int sz
)
162 if (!(status
& MPT3_CMD_RESET
))
165 ioc_err(ioc
, "Command %s\n",
166 issue_reset
== 0 ? "terminated due to Host Reset" : "Timeout");
167 _debug_dump_mf(mpi_request
, sz
);
173 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
180 _scsih_set_fwfault_debug(const char *val
, const struct kernel_param
*kp
)
182 int ret
= param_set_int(val
, kp
);
183 struct MPT3SAS_ADAPTER
*ioc
;
188 /* global ioc spinlock to protect controller list on list operations */
189 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug
);
190 spin_lock(&gioc_lock
);
191 list_for_each_entry(ioc
, &mpt3sas_ioc_list
, list
)
192 ioc
->fwfault_debug
= mpt3sas_fwfault_debug
;
193 spin_unlock(&gioc_lock
);
196 module_param_call(mpt3sas_fwfault_debug
, _scsih_set_fwfault_debug
,
197 param_get_int
, &mpt3sas_fwfault_debug
, 0644);
200 * _base_readl_aero - retry readl for max three times.
201 * @addr: MPT Fusion system interface register address
203 * Retry the readl() for max three times if it gets zero value
204 * while reading the system interface register.
207 _base_readl_aero(const void __iomem
*addr
)
212 ret_val
= readl(addr
);
214 } while (ret_val
== 0 && i
< 3);
220 _base_readl_ext_retry(const void __iomem
*addr
)
224 for (i
= 0 ; i
< 30 ; i
++) {
225 ret_val
= readl(addr
);
234 _base_readl(const void __iomem
*addr
)
240 * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
243 * @ioc: per adapter object
244 * @reply: reply message frame(lower 32bit addr)
245 * @index: System request message index.
248 _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER
*ioc
, u32 reply
,
252 * 256 is offset within sys register.
253 * 256 offset MPI frame starts. Max MPI frame supported is 32.
254 * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
256 u16 cmd_credit
= ioc
->facts
.RequestCredit
+ 1;
257 void __iomem
*reply_free_iomem
= (void __iomem
*)ioc
->chip
+
258 MPI_FRAME_START_OFFSET
+
259 (cmd_credit
* ioc
->request_sz
) + (index
* sizeof(u32
));
261 writel(reply
, reply_free_iomem
);
265 * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
266 * to system/BAR0 region.
268 * @dst_iomem: Pointer to the destination location in BAR0 space.
269 * @src: Pointer to the Source data.
270 * @size: Size of data to be copied.
273 _base_clone_mpi_to_sys_mem(void *dst_iomem
, void *src
, u32 size
)
276 u32
*src_virt_mem
= (u32
*)src
;
278 for (i
= 0; i
< size
/4; i
++)
279 writel((u32
)src_virt_mem
[i
],
280 (void __iomem
*)dst_iomem
+ (i
* 4));
284 * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
286 * @dst_iomem: Pointer to the destination location in BAR0 space.
287 * @src: Pointer to the Source data.
288 * @size: Size of data to be copied.
291 _base_clone_to_sys_mem(void __iomem
*dst_iomem
, void *src
, u32 size
)
294 u32
*src_virt_mem
= (u32
*)(src
);
296 for (i
= 0; i
< size
/4; i
++)
297 writel((u32
)src_virt_mem
[i
],
298 (void __iomem
*)dst_iomem
+ (i
* 4));
302 * _base_get_chain - Calculates and Returns virtual chain address
303 * for the provided smid in BAR0 space.
305 * @ioc: per adapter object
306 * @smid: system request message index
307 * @sge_chain_count: Scatter gather chain count.
309 * Return: the chain address.
311 static inline void __iomem
*
312 _base_get_chain(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
,
315 void __iomem
*base_chain
, *chain_virt
;
316 u16 cmd_credit
= ioc
->facts
.RequestCredit
+ 1;
318 base_chain
= (void __iomem
*)ioc
->chip
+ MPI_FRAME_START_OFFSET
+
319 (cmd_credit
* ioc
->request_sz
) +
320 REPLY_FREE_POOL_SIZE
;
321 chain_virt
= base_chain
+ (smid
* ioc
->facts
.MaxChainDepth
*
322 ioc
->request_sz
) + (sge_chain_count
* ioc
->request_sz
);
327 * _base_get_chain_phys - Calculates and Returns physical address
328 * in BAR0 for scatter gather chains, for
331 * @ioc: per adapter object
332 * @smid: system request message index
333 * @sge_chain_count: Scatter gather chain count.
335 * Return: Physical chain address.
337 static inline phys_addr_t
338 _base_get_chain_phys(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
,
341 phys_addr_t base_chain_phys
, chain_phys
;
342 u16 cmd_credit
= ioc
->facts
.RequestCredit
+ 1;
344 base_chain_phys
= ioc
->chip_phys
+ MPI_FRAME_START_OFFSET
+
345 (cmd_credit
* ioc
->request_sz
) +
346 REPLY_FREE_POOL_SIZE
;
347 chain_phys
= base_chain_phys
+ (smid
* ioc
->facts
.MaxChainDepth
*
348 ioc
->request_sz
) + (sge_chain_count
* ioc
->request_sz
);
353 * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
354 * buffer address for the provided smid.
355 * (Each smid can have 64K starts from 17024)
357 * @ioc: per adapter object
358 * @smid: system request message index
360 * Return: Pointer to buffer location in BAR0.
363 static void __iomem
*
364 _base_get_buffer_bar0(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
)
366 u16 cmd_credit
= ioc
->facts
.RequestCredit
+ 1;
367 // Added extra 1 to reach end of chain.
368 void __iomem
*chain_end
= _base_get_chain(ioc
,
370 ioc
->facts
.MaxChainDepth
);
371 return chain_end
+ (smid
* 64 * 1024);
375 * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
376 * Host buffer Physical address for the provided smid.
377 * (Each smid can have 64K starts from 17024)
379 * @ioc: per adapter object
380 * @smid: system request message index
382 * Return: Pointer to buffer location in BAR0.
385 _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
)
387 u16 cmd_credit
= ioc
->facts
.RequestCredit
+ 1;
388 phys_addr_t chain_end_phys
= _base_get_chain_phys(ioc
,
390 ioc
->facts
.MaxChainDepth
);
391 return chain_end_phys
+ (smid
* 64 * 1024);
395 * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
396 * lookup list and Provides chain_buffer
397 * address for the matching dma address.
398 * (Each smid can have 64K starts from 17024)
400 * @ioc: per adapter object
401 * @chain_buffer_dma: Chain buffer dma address.
403 * Return: Pointer to chain buffer. Or Null on Failure.
406 _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER
*ioc
,
407 dma_addr_t chain_buffer_dma
)
410 struct chain_tracker
*ct
;
412 for (index
= 0; index
< ioc
->scsiio_depth
; index
++) {
413 for (j
= 0; j
< ioc
->chains_needed_per_io
; j
++) {
414 ct
= &ioc
->chain_lookup
[index
].chains_per_smid
[j
];
415 if (ct
&& ct
->chain_buffer_dma
== chain_buffer_dma
)
416 return ct
->chain_buffer
;
419 ioc_info(ioc
, "Provided chain_buffer_dma address is not in the lookup list\n");
424 * _clone_sg_entries - MPI EP's scsiio and config requests
425 * are handled here. Base function for
426 * double buffering, before submitting
429 * @ioc: per adapter object.
430 * @mpi_request: mf request pointer.
431 * @smid: system request message index.
433 static void _clone_sg_entries(struct MPT3SAS_ADAPTER
*ioc
,
434 void *mpi_request
, u16 smid
)
436 Mpi2SGESimple32_t
*sgel
, *sgel_next
;
437 u32 sgl_flags
, sge_chain_count
= 0;
438 bool is_write
= false;
440 void __iomem
*buffer_iomem
;
441 phys_addr_t buffer_iomem_phys
;
442 void __iomem
*buff_ptr
;
443 phys_addr_t buff_ptr_phys
;
444 void __iomem
*dst_chain_addr
[MCPU_MAX_CHAINS_PER_IO
];
445 void *src_chain_addr
[MCPU_MAX_CHAINS_PER_IO
];
446 phys_addr_t dst_addr_phys
;
447 MPI2RequestHeader_t
*request_hdr
;
448 struct scsi_cmnd
*scmd
;
449 struct scatterlist
*sg_scmd
= NULL
;
450 int is_scsiio_req
= 0;
452 request_hdr
= (MPI2RequestHeader_t
*) mpi_request
;
454 if (request_hdr
->Function
== MPI2_FUNCTION_SCSI_IO_REQUEST
) {
455 Mpi25SCSIIORequest_t
*scsiio_request
=
456 (Mpi25SCSIIORequest_t
*)mpi_request
;
457 sgel
= (Mpi2SGESimple32_t
*) &scsiio_request
->SGL
;
459 } else if (request_hdr
->Function
== MPI2_FUNCTION_CONFIG
) {
460 Mpi2ConfigRequest_t
*config_req
=
461 (Mpi2ConfigRequest_t
*)mpi_request
;
462 sgel
= (Mpi2SGESimple32_t
*) &config_req
->PageBufferSGE
;
466 /* From smid we can get scsi_cmd, once we have sg_scmd,
467 * we just need to get sg_virt and sg_next to get virtual
468 * address associated with sgel->Address.
472 /* Get scsi_cmd using smid */
473 scmd
= mpt3sas_scsih_scsi_lookup_get(ioc
, smid
);
475 ioc_err(ioc
, "scmd is NULL\n");
479 /* Get sg_scmd from scmd provided */
480 sg_scmd
= scsi_sglist(scmd
);
484 * 0 - 255 System register
485 * 256 - 4352 MPI Frame. (This is based on maxCredit 32)
486 * 4352 - 4864 Reply_free pool (512 byte is reserved
487 * considering maxCredit 32. Reply need extra
488 * room, for mCPU case kept four times of
490 * 4864 - 17152 SGE chain element. (32cmd * 3 chain of
491 * 128 byte size = 12288)
492 * 17152 - x Host buffer mapped with smid.
493 * (Each smid can have 64K Max IO.)
494 * BAR0+Last 1K MSIX Addr and Data
495 * Total size in use 2113664 bytes of 4MB BAR0
498 buffer_iomem
= _base_get_buffer_bar0(ioc
, smid
);
499 buffer_iomem_phys
= _base_get_buffer_phys_bar0(ioc
, smid
);
501 buff_ptr
= buffer_iomem
;
502 buff_ptr_phys
= buffer_iomem_phys
;
503 WARN_ON(buff_ptr_phys
> U32_MAX
);
505 if (le32_to_cpu(sgel
->FlagsLength
) &
506 (MPI2_SGE_FLAGS_HOST_TO_IOC
<< MPI2_SGE_FLAGS_SHIFT
))
509 for (i
= 0; i
< MPT_MIN_PHYS_SEGMENTS
+ ioc
->facts
.MaxChainDepth
; i
++) {
512 (le32_to_cpu(sgel
->FlagsLength
) >> MPI2_SGE_FLAGS_SHIFT
);
514 switch (sgl_flags
& MPI2_SGE_FLAGS_ELEMENT_MASK
) {
515 case MPI2_SGE_FLAGS_CHAIN_ELEMENT
:
517 * Helper function which on passing
518 * chain_buffer_dma returns chain_buffer. Get
519 * the virtual address for sgel->Address
522 _base_get_chain_buffer_dma_to_chain_buffer(ioc
,
523 le32_to_cpu(sgel
->Address
));
524 if (sgel_next
== NULL
)
527 * This is coping 128 byte chain
528 * frame (not a host buffer)
530 dst_chain_addr
[sge_chain_count
] =
532 smid
, sge_chain_count
);
533 src_chain_addr
[sge_chain_count
] =
535 dst_addr_phys
= _base_get_chain_phys(ioc
,
536 smid
, sge_chain_count
);
537 WARN_ON(dst_addr_phys
> U32_MAX
);
539 cpu_to_le32(lower_32_bits(dst_addr_phys
));
543 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT
:
546 _base_clone_to_sys_mem(buff_ptr
,
548 (le32_to_cpu(sgel
->FlagsLength
) &
551 * FIXME: this relies on a a zero
555 cpu_to_le32((u32
)buff_ptr_phys
);
557 _base_clone_to_sys_mem(buff_ptr
,
559 (le32_to_cpu(sgel
->FlagsLength
) &
562 cpu_to_le32((u32
)buff_ptr_phys
);
565 buff_ptr
+= (le32_to_cpu(sgel
->FlagsLength
) &
567 buff_ptr_phys
+= (le32_to_cpu(sgel
->FlagsLength
) &
569 if ((le32_to_cpu(sgel
->FlagsLength
) &
570 (MPI2_SGE_FLAGS_END_OF_BUFFER
571 << MPI2_SGE_FLAGS_SHIFT
)))
572 goto eob_clone_chain
;
575 * Every single element in MPT will have
576 * associated sg_next. Better to sanity that
577 * sg_next is not NULL, but it will be a bug
581 sg_scmd
= sg_next(sg_scmd
);
585 goto eob_clone_chain
;
593 for (i
= 0; i
< sge_chain_count
; i
++) {
595 _base_clone_to_sys_mem(dst_chain_addr
[i
],
596 src_chain_addr
[i
], ioc
->request_sz
);
601 * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
602 * @arg: input argument, used to derive ioc
605 * 0 if controller is removed from pci subsystem.
608 static int mpt3sas_remove_dead_ioc_func(void *arg
)
610 struct MPT3SAS_ADAPTER
*ioc
= (struct MPT3SAS_ADAPTER
*)arg
;
611 struct pci_dev
*pdev
;
619 pci_stop_and_remove_bus_device_locked(pdev
);
624 * _base_sync_drv_fw_timestamp - Sync Drive-Fw TimeStamp.
625 * @ioc: Per Adapter Object
629 static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER
*ioc
)
631 Mpi26IoUnitControlRequest_t
*mpi_request
;
632 Mpi26IoUnitControlReply_t
*mpi_reply
;
634 ktime_t current_time
;
638 mutex_lock(&ioc
->scsih_cmds
.mutex
);
639 if (ioc
->scsih_cmds
.status
!= MPT3_CMD_NOT_USED
) {
640 ioc_err(ioc
, "scsih_cmd in use %s\n", __func__
);
643 ioc
->scsih_cmds
.status
= MPT3_CMD_PENDING
;
644 smid
= mpt3sas_base_get_smid(ioc
, ioc
->scsih_cb_idx
);
646 ioc_err(ioc
, "Failed obtaining a smid %s\n", __func__
);
647 ioc
->scsih_cmds
.status
= MPT3_CMD_NOT_USED
;
650 mpi_request
= mpt3sas_base_get_msg_frame(ioc
, smid
);
651 ioc
->scsih_cmds
.smid
= smid
;
652 memset(mpi_request
, 0, sizeof(Mpi26IoUnitControlRequest_t
));
653 mpi_request
->Function
= MPI2_FUNCTION_IO_UNIT_CONTROL
;
654 mpi_request
->Operation
= MPI26_CTRL_OP_SET_IOC_PARAMETER
;
655 mpi_request
->IOCParameter
= MPI26_SET_IOC_PARAMETER_SYNC_TIMESTAMP
;
656 current_time
= ktime_get_real();
657 TimeStamp
= ktime_to_ms(current_time
);
658 mpi_request
->Reserved7
= cpu_to_le32(TimeStamp
>> 32);
659 mpi_request
->IOCParameterValue
= cpu_to_le32(TimeStamp
& 0xFFFFFFFF);
660 init_completion(&ioc
->scsih_cmds
.done
);
661 ioc
->put_smid_default(ioc
, smid
);
662 dinitprintk(ioc
, ioc_info(ioc
,
663 "Io Unit Control Sync TimeStamp (sending), @time %lld ms\n",
665 wait_for_completion_timeout(&ioc
->scsih_cmds
.done
,
666 MPT3SAS_TIMESYNC_TIMEOUT_SECONDS
*HZ
);
667 if (!(ioc
->scsih_cmds
.status
& MPT3_CMD_COMPLETE
)) {
668 mpt3sas_check_cmd_timeout(ioc
,
669 ioc
->scsih_cmds
.status
, mpi_request
,
670 sizeof(Mpi2SasIoUnitControlRequest_t
)/4, issue_reset
);
671 goto issue_host_reset
;
673 if (ioc
->scsih_cmds
.status
& MPT3_CMD_REPLY_VALID
) {
674 mpi_reply
= ioc
->scsih_cmds
.reply
;
675 dinitprintk(ioc
, ioc_info(ioc
,
676 "Io Unit Control sync timestamp (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
677 le16_to_cpu(mpi_reply
->IOCStatus
),
678 le32_to_cpu(mpi_reply
->IOCLogInfo
)));
682 mpt3sas_base_hard_reset_handler(ioc
, FORCE_BIG_HAMMER
);
683 ioc
->scsih_cmds
.status
= MPT3_CMD_NOT_USED
;
685 mutex_unlock(&ioc
->scsih_cmds
.mutex
);
689 * _base_fault_reset_work - workq handling ioc fault conditions
690 * @work: input argument, used to derive ioc
695 _base_fault_reset_work(struct work_struct
*work
)
697 struct MPT3SAS_ADAPTER
*ioc
=
698 container_of(work
, struct MPT3SAS_ADAPTER
, fault_reset_work
.work
);
702 struct task_struct
*p
;
705 spin_lock_irqsave(&ioc
->ioc_reset_in_progress_lock
, flags
);
706 if ((ioc
->shost_recovery
&& (ioc
->ioc_coredump_loop
== 0)) ||
707 ioc
->pci_error_recovery
)
709 spin_unlock_irqrestore(&ioc
->ioc_reset_in_progress_lock
, flags
);
711 doorbell
= mpt3sas_base_get_iocstate(ioc
, 0);
712 if ((doorbell
& MPI2_IOC_STATE_MASK
) == MPI2_IOC_STATE_MASK
) {
713 ioc_err(ioc
, "SAS host is non-operational !!!!\n");
715 /* It may be possible that EEH recovery can resolve some of
716 * pci bus failure issues rather removing the dead ioc function
717 * by considering controller is in a non-operational state. So
718 * here priority is given to the EEH recovery. If it doesn't
719 * not resolve this issue, mpt3sas driver will consider this
720 * controller to non-operational state and remove the dead ioc
723 if (ioc
->non_operational_loop
++ < 5) {
724 spin_lock_irqsave(&ioc
->ioc_reset_in_progress_lock
,
730 * Call _scsih_flush_pending_cmds callback so that we flush all
731 * pending commands back to OS. This call is required to avoid
732 * deadlock at block layer. Dead IOC will fail to do diag reset,
733 * and this call is safe since dead ioc will never return any
734 * command back from HW.
736 mpt3sas_base_pause_mq_polling(ioc
);
737 ioc
->schedule_dead_ioc_flush_running_cmds(ioc
);
739 * Set remove_host flag early since kernel thread will
740 * take some time to execute.
742 ioc
->remove_host
= 1;
743 /*Remove the Dead Host */
744 p
= kthread_run(mpt3sas_remove_dead_ioc_func
, ioc
,
745 "%s_dead_ioc_%d", ioc
->driver_name
, ioc
->id
);
747 ioc_err(ioc
, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
750 ioc_err(ioc
, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
752 return; /* don't rearm timer */
755 if ((doorbell
& MPI2_IOC_STATE_MASK
) == MPI2_IOC_STATE_COREDUMP
) {
756 u8 timeout
= (ioc
->manu_pg11
.CoreDumpTOSec
) ?
757 ioc
->manu_pg11
.CoreDumpTOSec
:
758 MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS
;
760 timeout
/= (FAULT_POLLING_INTERVAL
/1000);
762 if (ioc
->ioc_coredump_loop
== 0) {
763 mpt3sas_print_coredump_info(ioc
,
764 doorbell
& MPI2_DOORBELL_DATA_MASK
);
765 /* do not accept any IOs and disable the interrupts */
767 &ioc
->ioc_reset_in_progress_lock
, flags
);
768 ioc
->shost_recovery
= 1;
769 spin_unlock_irqrestore(
770 &ioc
->ioc_reset_in_progress_lock
, flags
);
771 mpt3sas_base_mask_interrupts(ioc
);
772 mpt3sas_base_pause_mq_polling(ioc
);
773 _base_clear_outstanding_commands(ioc
);
776 ioc_info(ioc
, "%s: CoreDump loop %d.",
777 __func__
, ioc
->ioc_coredump_loop
);
779 /* Wait until CoreDump completes or times out */
780 if (ioc
->ioc_coredump_loop
++ < timeout
) {
782 &ioc
->ioc_reset_in_progress_lock
, flags
);
787 if (ioc
->ioc_coredump_loop
) {
788 if ((doorbell
& MPI2_IOC_STATE_MASK
) != MPI2_IOC_STATE_COREDUMP
)
789 ioc_err(ioc
, "%s: CoreDump completed. LoopCount: %d",
790 __func__
, ioc
->ioc_coredump_loop
);
792 ioc_err(ioc
, "%s: CoreDump Timed out. LoopCount: %d",
793 __func__
, ioc
->ioc_coredump_loop
);
794 ioc
->ioc_coredump_loop
= MPT3SAS_COREDUMP_LOOP_DONE
;
796 ioc
->non_operational_loop
= 0;
797 if ((doorbell
& MPI2_IOC_STATE_MASK
) != MPI2_IOC_STATE_OPERATIONAL
) {
798 rc
= mpt3sas_base_hard_reset_handler(ioc
, FORCE_BIG_HAMMER
);
799 ioc_warn(ioc
, "%s: hard reset: %s\n",
800 __func__
, rc
== 0 ? "success" : "failed");
801 doorbell
= mpt3sas_base_get_iocstate(ioc
, 0);
802 if ((doorbell
& MPI2_IOC_STATE_MASK
) == MPI2_IOC_STATE_FAULT
) {
803 mpt3sas_print_fault_code(ioc
, doorbell
&
804 MPI2_DOORBELL_DATA_MASK
);
805 } else if ((doorbell
& MPI2_IOC_STATE_MASK
) ==
806 MPI2_IOC_STATE_COREDUMP
)
807 mpt3sas_print_coredump_info(ioc
, doorbell
&
808 MPI2_DOORBELL_DATA_MASK
);
809 if (rc
&& (doorbell
& MPI2_IOC_STATE_MASK
) !=
810 MPI2_IOC_STATE_OPERATIONAL
)
811 return; /* don't rearm timer */
813 ioc
->ioc_coredump_loop
= 0;
814 if (ioc
->time_sync_interval
&&
815 ++ioc
->timestamp_update_count
>= ioc
->time_sync_interval
) {
816 ioc
->timestamp_update_count
= 0;
817 _base_sync_drv_fw_timestamp(ioc
);
819 spin_lock_irqsave(&ioc
->ioc_reset_in_progress_lock
, flags
);
821 if (ioc
->fault_reset_work_q
)
822 queue_delayed_work(ioc
->fault_reset_work_q
,
823 &ioc
->fault_reset_work
,
824 msecs_to_jiffies(FAULT_POLLING_INTERVAL
));
825 spin_unlock_irqrestore(&ioc
->ioc_reset_in_progress_lock
, flags
);
829 * mpt3sas_base_start_watchdog - start the fault_reset_work_q
830 * @ioc: per adapter object
835 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER
*ioc
)
839 if (ioc
->fault_reset_work_q
)
842 ioc
->timestamp_update_count
= 0;
843 /* initialize fault polling */
845 INIT_DELAYED_WORK(&ioc
->fault_reset_work
, _base_fault_reset_work
);
846 snprintf(ioc
->fault_reset_work_q_name
,
847 sizeof(ioc
->fault_reset_work_q_name
), "poll_%s%d_status",
848 ioc
->driver_name
, ioc
->id
);
849 ioc
->fault_reset_work_q
= alloc_ordered_workqueue(
850 "%s", WQ_MEM_RECLAIM
, ioc
->fault_reset_work_q_name
);
851 if (!ioc
->fault_reset_work_q
) {
852 ioc_err(ioc
, "%s: failed (line=%d)\n", __func__
, __LINE__
);
855 spin_lock_irqsave(&ioc
->ioc_reset_in_progress_lock
, flags
);
856 if (ioc
->fault_reset_work_q
)
857 queue_delayed_work(ioc
->fault_reset_work_q
,
858 &ioc
->fault_reset_work
,
859 msecs_to_jiffies(FAULT_POLLING_INTERVAL
));
860 spin_unlock_irqrestore(&ioc
->ioc_reset_in_progress_lock
, flags
);
864 * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
865 * @ioc: per adapter object
870 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER
*ioc
)
873 struct workqueue_struct
*wq
;
875 spin_lock_irqsave(&ioc
->ioc_reset_in_progress_lock
, flags
);
876 wq
= ioc
->fault_reset_work_q
;
877 ioc
->fault_reset_work_q
= NULL
;
878 spin_unlock_irqrestore(&ioc
->ioc_reset_in_progress_lock
, flags
);
880 if (!cancel_delayed_work_sync(&ioc
->fault_reset_work
))
882 destroy_workqueue(wq
);
887 * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
888 * @ioc: per adapter object
889 * @fault_code: fault code
892 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER
*ioc
, u16 fault_code
)
894 ioc_err(ioc
, "fault_state(0x%04x)!\n", fault_code
);
898 * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state
899 * @ioc: per adapter object
900 * @fault_code: fault code
905 mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER
*ioc
, u16 fault_code
)
907 ioc_err(ioc
, "coredump_state(0x%04x)!\n", fault_code
);
911 * mpt3sas_base_wait_for_coredump_completion - Wait until coredump
912 * completes or times out
913 * @ioc: per adapter object
914 * @caller: caller function name
916 * Return: 0 for success, non-zero for failure.
919 mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER
*ioc
,
922 u8 timeout
= (ioc
->manu_pg11
.CoreDumpTOSec
) ?
923 ioc
->manu_pg11
.CoreDumpTOSec
:
924 MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS
;
926 int ioc_state
= _base_wait_on_iocstate(ioc
, MPI2_IOC_STATE_FAULT
,
931 "%s: CoreDump timed out. (ioc_state=0x%x)\n",
935 "%s: CoreDump completed. (ioc_state=0x%x)\n",
942 * mpt3sas_halt_firmware - halt's mpt controller firmware
943 * @ioc: per adapter object
945 * For debugging timeout related issues. Writing 0xCOFFEE00
946 * to the doorbell register will halt controller firmware. With
947 * the purpose to stop both driver and firmware, the enduser can
948 * obtain a ring buffer from controller UART.
951 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER
*ioc
)
955 if (!ioc
->fwfault_debug
)
960 doorbell
= ioc
->base_readl_ext_retry(&ioc
->chip
->Doorbell
);
961 if ((doorbell
& MPI2_IOC_STATE_MASK
) == MPI2_IOC_STATE_FAULT
) {
962 mpt3sas_print_fault_code(ioc
, doorbell
&
963 MPI2_DOORBELL_DATA_MASK
);
964 } else if ((doorbell
& MPI2_IOC_STATE_MASK
) ==
965 MPI2_IOC_STATE_COREDUMP
) {
966 mpt3sas_print_coredump_info(ioc
, doorbell
&
967 MPI2_DOORBELL_DATA_MASK
);
969 writel(0xC0FFEE00, &ioc
->chip
->Doorbell
);
970 ioc_err(ioc
, "Firmware is halted due to command timeout\n");
973 if (ioc
->fwfault_debug
== 2)
977 panic("panic in %s\n", __func__
);
981 * _base_sas_ioc_info - verbose translation of the ioc status
982 * @ioc: per adapter object
983 * @mpi_reply: reply mf payload returned from firmware
984 * @request_hdr: request mf
987 _base_sas_ioc_info(struct MPT3SAS_ADAPTER
*ioc
, MPI2DefaultReply_t
*mpi_reply
,
988 MPI2RequestHeader_t
*request_hdr
)
990 u16 ioc_status
= le16_to_cpu(mpi_reply
->IOCStatus
) &
994 char *func_str
= NULL
;
996 /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
997 if (request_hdr
->Function
== MPI2_FUNCTION_SCSI_IO_REQUEST
||
998 request_hdr
->Function
== MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH
||
999 request_hdr
->Function
== MPI2_FUNCTION_EVENT_NOTIFICATION
)
1002 if (ioc_status
== MPI2_IOCSTATUS_CONFIG_INVALID_PAGE
)
1005 * Older Firmware version doesn't support driver trigger pages.
1006 * So, skip displaying 'config invalid type' type
1009 if (request_hdr
->Function
== MPI2_FUNCTION_CONFIG
) {
1010 Mpi2ConfigRequest_t
*rqst
= (Mpi2ConfigRequest_t
*)request_hdr
;
1012 if ((rqst
->ExtPageType
==
1013 MPI2_CONFIG_EXTPAGETYPE_DRIVER_PERSISTENT_TRIGGER
) &&
1014 !(ioc
->logging_level
& MPT_DEBUG_CONFIG
)) {
1019 switch (ioc_status
) {
1021 /****************************************************************************
1022 * Common IOCStatus values for all replies
1023 ****************************************************************************/
1025 case MPI2_IOCSTATUS_INVALID_FUNCTION
:
1026 desc
= "invalid function";
1028 case MPI2_IOCSTATUS_BUSY
:
1031 case MPI2_IOCSTATUS_INVALID_SGL
:
1032 desc
= "invalid sgl";
1034 case MPI2_IOCSTATUS_INTERNAL_ERROR
:
1035 desc
= "internal error";
1037 case MPI2_IOCSTATUS_INVALID_VPID
:
1038 desc
= "invalid vpid";
1040 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES
:
1041 desc
= "insufficient resources";
1043 case MPI2_IOCSTATUS_INSUFFICIENT_POWER
:
1044 desc
= "insufficient power";
1046 case MPI2_IOCSTATUS_INVALID_FIELD
:
1047 desc
= "invalid field";
1049 case MPI2_IOCSTATUS_INVALID_STATE
:
1050 desc
= "invalid state";
1052 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED
:
1053 desc
= "op state not supported";
1056 /****************************************************************************
1057 * Config IOCStatus values
1058 ****************************************************************************/
1060 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION
:
1061 desc
= "config invalid action";
1063 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE
:
1064 desc
= "config invalid type";
1066 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE
:
1067 desc
= "config invalid page";
1069 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA
:
1070 desc
= "config invalid data";
1072 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS
:
1073 desc
= "config no defaults";
1075 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT
:
1076 desc
= "config can't commit";
1079 /****************************************************************************
1081 ****************************************************************************/
1083 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR
:
1084 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE
:
1085 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE
:
1086 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN
:
1087 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN
:
1088 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR
:
1089 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR
:
1090 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED
:
1091 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH
:
1092 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED
:
1093 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED
:
1094 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED
:
1097 /****************************************************************************
1098 * For use by SCSI Initiator and SCSI Target end-to-end data protection
1099 ****************************************************************************/
1101 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR
:
1102 desc
= "eedp guard error";
1104 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR
:
1105 desc
= "eedp ref tag error";
1107 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR
:
1108 desc
= "eedp app tag error";
1111 /****************************************************************************
1112 * SCSI Target values
1113 ****************************************************************************/
1115 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX
:
1116 desc
= "target invalid io index";
1118 case MPI2_IOCSTATUS_TARGET_ABORTED
:
1119 desc
= "target aborted";
1121 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE
:
1122 desc
= "target no conn retryable";
1124 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION
:
1125 desc
= "target no connection";
1127 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH
:
1128 desc
= "target xfer count mismatch";
1130 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR
:
1131 desc
= "target data offset error";
1133 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA
:
1134 desc
= "target too much write data";
1136 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT
:
1137 desc
= "target iu too short";
1139 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT
:
1140 desc
= "target ack nak timeout";
1142 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED
:
1143 desc
= "target nak received";
1146 /****************************************************************************
1147 * Serial Attached SCSI values
1148 ****************************************************************************/
1150 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED
:
1151 desc
= "smp request failed";
1153 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN
:
1154 desc
= "smp data overrun";
1157 /****************************************************************************
1158 * Diagnostic Buffer Post / Diagnostic Release values
1159 ****************************************************************************/
1161 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED
:
1162 desc
= "diagnostic released";
1171 switch (request_hdr
->Function
) {
1172 case MPI2_FUNCTION_CONFIG
:
1173 frame_sz
= sizeof(Mpi2ConfigRequest_t
) + ioc
->sge_size
;
1174 func_str
= "config_page";
1176 case MPI2_FUNCTION_SCSI_TASK_MGMT
:
1177 frame_sz
= sizeof(Mpi2SCSITaskManagementRequest_t
);
1178 func_str
= "task_mgmt";
1180 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL
:
1181 frame_sz
= sizeof(Mpi2SasIoUnitControlRequest_t
);
1182 func_str
= "sas_iounit_ctl";
1184 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR
:
1185 frame_sz
= sizeof(Mpi2SepRequest_t
);
1186 func_str
= "enclosure";
1188 case MPI2_FUNCTION_IOC_INIT
:
1189 frame_sz
= sizeof(Mpi2IOCInitRequest_t
);
1190 func_str
= "ioc_init";
1192 case MPI2_FUNCTION_PORT_ENABLE
:
1193 frame_sz
= sizeof(Mpi2PortEnableRequest_t
);
1194 func_str
= "port_enable";
1196 case MPI2_FUNCTION_SMP_PASSTHROUGH
:
1197 frame_sz
= sizeof(Mpi2SmpPassthroughRequest_t
) + ioc
->sge_size
;
1198 func_str
= "smp_passthru";
1200 case MPI2_FUNCTION_NVME_ENCAPSULATED
:
1201 frame_sz
= sizeof(Mpi26NVMeEncapsulatedRequest_t
) +
1203 func_str
= "nvme_encapsulated";
1207 func_str
= "unknown";
1211 ioc_warn(ioc
, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
1212 desc
, ioc_status
, request_hdr
, func_str
);
1214 _debug_dump_mf(request_hdr
, frame_sz
/4);
1218 * _base_display_event_data - verbose translation of firmware asyn events
1219 * @ioc: per adapter object
1220 * @mpi_reply: reply mf payload returned from firmware
1223 _base_display_event_data(struct MPT3SAS_ADAPTER
*ioc
,
1224 Mpi2EventNotificationReply_t
*mpi_reply
)
1229 if (!(ioc
->logging_level
& MPT_DEBUG_EVENTS
))
1232 event
= le16_to_cpu(mpi_reply
->Event
);
1235 case MPI2_EVENT_LOG_DATA
:
1238 case MPI2_EVENT_STATE_CHANGE
:
1239 desc
= "Status Change";
1241 case MPI2_EVENT_HARD_RESET_RECEIVED
:
1242 desc
= "Hard Reset Received";
1244 case MPI2_EVENT_EVENT_CHANGE
:
1245 desc
= "Event Change";
1247 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE
:
1248 desc
= "Device Status Change";
1250 case MPI2_EVENT_IR_OPERATION_STATUS
:
1251 if (!ioc
->hide_ir_msg
)
1252 desc
= "IR Operation Status";
1254 case MPI2_EVENT_SAS_DISCOVERY
:
1256 Mpi2EventDataSasDiscovery_t
*event_data
=
1257 (Mpi2EventDataSasDiscovery_t
*)mpi_reply
->EventData
;
1258 ioc_info(ioc
, "Discovery: (%s)",
1259 event_data
->ReasonCode
== MPI2_EVENT_SAS_DISC_RC_STARTED
?
1261 if (event_data
->DiscoveryStatus
)
1262 pr_cont(" discovery_status(0x%08x)",
1263 le32_to_cpu(event_data
->DiscoveryStatus
));
1267 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE
:
1268 desc
= "SAS Broadcast Primitive";
1270 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE
:
1271 desc
= "SAS Init Device Status Change";
1273 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW
:
1274 desc
= "SAS Init Table Overflow";
1276 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST
:
1277 desc
= "SAS Topology Change List";
1279 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE
:
1280 desc
= "SAS Enclosure Device Status Change";
1282 case MPI2_EVENT_IR_VOLUME
:
1283 if (!ioc
->hide_ir_msg
)
1286 case MPI2_EVENT_IR_PHYSICAL_DISK
:
1287 if (!ioc
->hide_ir_msg
)
1288 desc
= "IR Physical Disk";
1290 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST
:
1291 if (!ioc
->hide_ir_msg
)
1292 desc
= "IR Configuration Change List";
1294 case MPI2_EVENT_LOG_ENTRY_ADDED
:
1295 if (!ioc
->hide_ir_msg
)
1296 desc
= "Log Entry Added";
1298 case MPI2_EVENT_TEMP_THRESHOLD
:
1299 desc
= "Temperature Threshold";
1301 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION
:
1302 desc
= "Cable Event";
1304 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR
:
1305 desc
= "SAS Device Discovery Error";
1307 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE
:
1308 desc
= "PCIE Device Status Change";
1310 case MPI2_EVENT_PCIE_ENUMERATION
:
1312 Mpi26EventDataPCIeEnumeration_t
*event_data
=
1313 (Mpi26EventDataPCIeEnumeration_t
*)mpi_reply
->EventData
;
1314 ioc_info(ioc
, "PCIE Enumeration: (%s)",
1315 event_data
->ReasonCode
== MPI26_EVENT_PCIE_ENUM_RC_STARTED
?
1317 if (event_data
->EnumerationStatus
)
1318 pr_cont("enumeration_status(0x%08x)",
1319 le32_to_cpu(event_data
->EnumerationStatus
));
1323 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST
:
1324 desc
= "PCIE Topology Change List";
1331 ioc_info(ioc
, "%s\n", desc
);
1335 * _base_sas_log_info - verbose translation of firmware log info
1336 * @ioc: per adapter object
1337 * @log_info: log info
1340 _base_sas_log_info(struct MPT3SAS_ADAPTER
*ioc
, u32 log_info
)
1342 union loginfo_type
{
1351 union loginfo_type sas_loginfo
;
1352 char *originator_str
= NULL
;
1354 sas_loginfo
.loginfo
= log_info
;
1355 if (sas_loginfo
.dw
.bus_type
!= 3 /*SAS*/)
1358 /* each nexus loss loginfo */
1359 if (log_info
== 0x31170000)
1362 /* eat the loginfos associated with task aborts */
1363 if (ioc
->ignore_loginfos
&& (log_info
== 0x30050000 || log_info
==
1364 0x31140000 || log_info
== 0x31130000))
1367 switch (sas_loginfo
.dw
.originator
) {
1369 originator_str
= "IOP";
1372 originator_str
= "PL";
1375 if (!ioc
->hide_ir_msg
)
1376 originator_str
= "IR";
1378 originator_str
= "WarpDrive";
1382 ioc_warn(ioc
, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1384 originator_str
, sas_loginfo
.dw
.code
, sas_loginfo
.dw
.subcode
);
1388 * _base_display_reply_info - handle reply descriptors depending on IOC Status
1389 * @ioc: per adapter object
1390 * @smid: system request message index
1391 * @msix_index: MSIX table index supplied by the OS
1392 * @reply: reply message frame (lower 32bit addr)
1395 _base_display_reply_info(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
, u8 msix_index
,
1398 MPI2DefaultReply_t
*mpi_reply
;
1402 mpi_reply
= mpt3sas_base_get_reply_virt_addr(ioc
, reply
);
1403 if (unlikely(!mpi_reply
)) {
1404 ioc_err(ioc
, "mpi_reply not valid at %s:%d/%s()!\n",
1405 __FILE__
, __LINE__
, __func__
);
1408 ioc_status
= le16_to_cpu(mpi_reply
->IOCStatus
);
1410 if ((ioc_status
& MPI2_IOCSTATUS_MASK
) &&
1411 (ioc
->logging_level
& MPT_DEBUG_REPLY
)) {
1412 _base_sas_ioc_info(ioc
, mpi_reply
,
1413 mpt3sas_base_get_msg_frame(ioc
, smid
));
1416 if (ioc_status
& MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE
) {
1417 loginfo
= le32_to_cpu(mpi_reply
->IOCLogInfo
);
1418 _base_sas_log_info(ioc
, loginfo
);
1421 if (ioc_status
|| loginfo
) {
1422 ioc_status
&= MPI2_IOCSTATUS_MASK
;
1423 mpt3sas_trigger_mpi(ioc
, ioc_status
, loginfo
);
1428 * mpt3sas_base_done - base internal command completion routine
1429 * @ioc: per adapter object
1430 * @smid: system request message index
1431 * @msix_index: MSIX table index supplied by the OS
1432 * @reply: reply message frame(lower 32bit addr)
1435 * 1 meaning mf should be freed from _base_interrupt
1436 * 0 means the mf is freed from this function.
1439 mpt3sas_base_done(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
, u8 msix_index
,
1442 MPI2DefaultReply_t
*mpi_reply
;
1444 mpi_reply
= mpt3sas_base_get_reply_virt_addr(ioc
, reply
);
1445 if (mpi_reply
&& mpi_reply
->Function
== MPI2_FUNCTION_EVENT_ACK
)
1446 return mpt3sas_check_for_pending_internal_cmds(ioc
, smid
);
1448 if (ioc
->base_cmds
.status
== MPT3_CMD_NOT_USED
)
1451 ioc
->base_cmds
.status
|= MPT3_CMD_COMPLETE
;
1453 ioc
->base_cmds
.status
|= MPT3_CMD_REPLY_VALID
;
1454 memcpy(ioc
->base_cmds
.reply
, mpi_reply
, mpi_reply
->MsgLength
*4);
1456 ioc
->base_cmds
.status
&= ~MPT3_CMD_PENDING
;
1458 complete(&ioc
->base_cmds
.done
);
1463 * _base_async_event - main callback handler for firmware asyn events
1464 * @ioc: per adapter object
1465 * @msix_index: MSIX table index supplied by the OS
1466 * @reply: reply message frame(lower 32bit addr)
1469 * 1 meaning mf should be freed from _base_interrupt
1470 * 0 means the mf is freed from this function.
1473 _base_async_event(struct MPT3SAS_ADAPTER
*ioc
, u8 msix_index
, u32 reply
)
1475 Mpi2EventNotificationReply_t
*mpi_reply
;
1476 Mpi2EventAckRequest_t
*ack_request
;
1478 struct _event_ack_list
*delayed_event_ack
;
1480 mpi_reply
= mpt3sas_base_get_reply_virt_addr(ioc
, reply
);
1483 if (mpi_reply
->Function
!= MPI2_FUNCTION_EVENT_NOTIFICATION
)
1486 _base_display_event_data(ioc
, mpi_reply
);
1488 if (!(mpi_reply
->AckRequired
& MPI2_EVENT_NOTIFICATION_ACK_REQUIRED
))
1490 smid
= mpt3sas_base_get_smid(ioc
, ioc
->base_cb_idx
);
1492 delayed_event_ack
= kzalloc(sizeof(*delayed_event_ack
),
1494 if (!delayed_event_ack
)
1496 INIT_LIST_HEAD(&delayed_event_ack
->list
);
1497 delayed_event_ack
->Event
= mpi_reply
->Event
;
1498 delayed_event_ack
->EventContext
= mpi_reply
->EventContext
;
1499 list_add_tail(&delayed_event_ack
->list
,
1500 &ioc
->delayed_event_ack_list
);
1502 ioc_info(ioc
, "DELAYED: EVENT ACK: event (0x%04x)\n",
1503 le16_to_cpu(mpi_reply
->Event
)));
1507 ack_request
= mpt3sas_base_get_msg_frame(ioc
, smid
);
1508 memset(ack_request
, 0, sizeof(Mpi2EventAckRequest_t
));
1509 ack_request
->Function
= MPI2_FUNCTION_EVENT_ACK
;
1510 ack_request
->Event
= mpi_reply
->Event
;
1511 ack_request
->EventContext
= mpi_reply
->EventContext
;
1512 ack_request
->VF_ID
= 0; /* TODO */
1513 ack_request
->VP_ID
= 0;
1514 ioc
->put_smid_default(ioc
, smid
);
1518 /* scsih callback handler */
1519 mpt3sas_scsih_event_callback(ioc
, msix_index
, reply
);
1521 /* ctl callback handler */
1522 mpt3sas_ctl_event_callback(ioc
, msix_index
, reply
);
1527 static struct scsiio_tracker
*
1528 _get_st_from_smid(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
)
1530 struct scsi_cmnd
*cmd
;
1532 if (WARN_ON(!smid
) ||
1533 WARN_ON(smid
>= ioc
->hi_priority_smid
))
1536 cmd
= mpt3sas_scsih_scsi_lookup_get(ioc
, smid
);
1538 return scsi_cmd_priv(cmd
);
1544 * _base_get_cb_idx - obtain the callback index
1545 * @ioc: per adapter object
1546 * @smid: system request message index
1548 * Return: callback index.
1551 _base_get_cb_idx(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
)
1554 u16 ctl_smid
= ioc
->scsiio_depth
- INTERNAL_SCSIIO_CMDS_COUNT
+ 1;
1557 if (smid
< ioc
->hi_priority_smid
) {
1558 struct scsiio_tracker
*st
;
1560 if (smid
< ctl_smid
) {
1561 st
= _get_st_from_smid(ioc
, smid
);
1563 cb_idx
= st
->cb_idx
;
1564 } else if (smid
== ctl_smid
)
1565 cb_idx
= ioc
->ctl_cb_idx
;
1566 } else if (smid
< ioc
->internal_smid
) {
1567 i
= smid
- ioc
->hi_priority_smid
;
1568 cb_idx
= ioc
->hpr_lookup
[i
].cb_idx
;
1569 } else if (smid
<= ioc
->hba_queue_depth
) {
1570 i
= smid
- ioc
->internal_smid
;
1571 cb_idx
= ioc
->internal_lookup
[i
].cb_idx
;
1577 * mpt3sas_base_pause_mq_polling - pause polling on the mq poll queues
1578 * when driver is flushing out the IOs.
1579 * @ioc: per adapter object
1581 * Pause polling on the mq poll (io uring) queues when driver is flushing
1582 * out the IOs. Otherwise we may see the race condition of completing the same
1583 * IO from two paths.
1588 mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER
*ioc
)
1590 int iopoll_q_count
=
1591 ioc
->reply_queue_count
- ioc
->iopoll_q_start_index
;
1594 for (qid
= 0; qid
< iopoll_q_count
; qid
++)
1595 atomic_set(&ioc
->io_uring_poll_queues
[qid
].pause
, 1);
1598 * wait for current poll to complete.
1600 for (qid
= 0; qid
< iopoll_q_count
; qid
++) {
1601 while (atomic_read(&ioc
->io_uring_poll_queues
[qid
].busy
)) {
1609 * mpt3sas_base_resume_mq_polling - Resume polling on mq poll queues.
1610 * @ioc: per adapter object
1615 mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER
*ioc
)
1617 int iopoll_q_count
=
1618 ioc
->reply_queue_count
- ioc
->iopoll_q_start_index
;
1621 for (qid
= 0; qid
< iopoll_q_count
; qid
++)
1622 atomic_set(&ioc
->io_uring_poll_queues
[qid
].pause
, 0);
1626 * mpt3sas_base_mask_interrupts - disable interrupts
1627 * @ioc: per adapter object
1629 * Disabling ResetIRQ, Reply and Doorbell Interrupts
1632 mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER
*ioc
)
1636 ioc
->mask_interrupts
= 1;
1637 him_register
= ioc
->base_readl(&ioc
->chip
->HostInterruptMask
);
1638 him_register
|= MPI2_HIM_DIM
+ MPI2_HIM_RIM
+ MPI2_HIM_RESET_IRQ_MASK
;
1639 writel(him_register
, &ioc
->chip
->HostInterruptMask
);
1640 ioc
->base_readl(&ioc
->chip
->HostInterruptMask
);
1644 * mpt3sas_base_unmask_interrupts - enable interrupts
1645 * @ioc: per adapter object
1647 * Enabling only Reply Interrupts
1650 mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER
*ioc
)
1654 him_register
= ioc
->base_readl(&ioc
->chip
->HostInterruptMask
);
1655 him_register
&= ~MPI2_HIM_RIM
;
1656 writel(him_register
, &ioc
->chip
->HostInterruptMask
);
1657 ioc
->mask_interrupts
= 0;
1660 union reply_descriptor
{
1668 static u32
base_mod64(u64 dividend
, u32 divisor
)
1673 pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
1674 remainder
= do_div(dividend
, divisor
);
1679 * _base_process_reply_queue - Process reply descriptors from reply
1680 * descriptor post queue.
1681 * @reply_q: per IRQ's reply queue object.
1683 * Return: number of reply descriptors processed from reply
1687 _base_process_reply_queue(struct adapter_reply_queue
*reply_q
)
1689 union reply_descriptor rd
;
1691 u8 request_descript_type
;
1695 u8 msix_index
= reply_q
->msix_index
;
1696 struct MPT3SAS_ADAPTER
*ioc
= reply_q
->ioc
;
1697 Mpi2ReplyDescriptorsUnion_t
*rpf
;
1701 if (!atomic_add_unless(&reply_q
->busy
, 1, 1))
1702 return completed_cmds
;
1704 rpf
= &reply_q
->reply_post_free
[reply_q
->reply_post_host_index
];
1705 request_descript_type
= rpf
->Default
.ReplyFlags
1706 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK
;
1707 if (request_descript_type
== MPI2_RPY_DESCRIPT_FLAGS_UNUSED
) {
1708 atomic_dec(&reply_q
->busy
);
1709 return completed_cmds
;
1714 rd
.word
= le64_to_cpu(rpf
->Words
);
1715 if (rd
.u
.low
== UINT_MAX
|| rd
.u
.high
== UINT_MAX
)
1718 smid
= le16_to_cpu(rpf
->Default
.DescriptorTypeDependent1
);
1719 if (request_descript_type
==
1720 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS
||
1721 request_descript_type
==
1722 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS
||
1723 request_descript_type
==
1724 MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS
) {
1725 cb_idx
= _base_get_cb_idx(ioc
, smid
);
1726 if ((likely(cb_idx
< MPT_MAX_CALLBACKS
)) &&
1727 (likely(mpt_callbacks
[cb_idx
] != NULL
))) {
1728 rc
= mpt_callbacks
[cb_idx
](ioc
, smid
,
1731 mpt3sas_base_free_smid(ioc
, smid
);
1733 } else if (request_descript_type
==
1734 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY
) {
1735 reply
= le32_to_cpu(
1736 rpf
->AddressReply
.ReplyFrameAddress
);
1737 if (reply
> ioc
->reply_dma_max_address
||
1738 reply
< ioc
->reply_dma_min_address
)
1741 cb_idx
= _base_get_cb_idx(ioc
, smid
);
1742 if ((likely(cb_idx
< MPT_MAX_CALLBACKS
)) &&
1743 (likely(mpt_callbacks
[cb_idx
] != NULL
))) {
1744 rc
= mpt_callbacks
[cb_idx
](ioc
, smid
,
1747 _base_display_reply_info(ioc
,
1748 smid
, msix_index
, reply
);
1750 mpt3sas_base_free_smid(ioc
,
1754 _base_async_event(ioc
, msix_index
, reply
);
1757 /* reply free queue handling */
1759 ioc
->reply_free_host_index
=
1760 (ioc
->reply_free_host_index
==
1761 (ioc
->reply_free_queue_depth
- 1)) ?
1762 0 : ioc
->reply_free_host_index
+ 1;
1763 ioc
->reply_free
[ioc
->reply_free_host_index
] =
1765 if (ioc
->is_mcpu_endpoint
)
1766 _base_clone_reply_to_sys_mem(ioc
,
1768 ioc
->reply_free_host_index
);
1769 writel(ioc
->reply_free_host_index
,
1770 &ioc
->chip
->ReplyFreeHostIndex
);
1774 rpf
->Words
= cpu_to_le64(ULLONG_MAX
);
1775 reply_q
->reply_post_host_index
=
1776 (reply_q
->reply_post_host_index
==
1777 (ioc
->reply_post_queue_depth
- 1)) ? 0 :
1778 reply_q
->reply_post_host_index
+ 1;
1779 request_descript_type
=
1780 reply_q
->reply_post_free
[reply_q
->reply_post_host_index
].
1781 Default
.ReplyFlags
& MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK
;
1783 /* Update the reply post host index after continuously
1784 * processing the threshold number of Reply Descriptors.
1785 * So that FW can find enough entries to post the Reply
1786 * Descriptors in the reply descriptor post queue.
1788 if (completed_cmds
>= ioc
->thresh_hold
) {
1789 if (ioc
->combined_reply_queue
) {
1790 writel(reply_q
->reply_post_host_index
|
1791 ((msix_index
& 7) <<
1792 MPI2_RPHI_MSIX_INDEX_SHIFT
),
1793 ioc
->replyPostRegisterIndex
[msix_index
/8]);
1795 writel(reply_q
->reply_post_host_index
|
1797 MPI2_RPHI_MSIX_INDEX_SHIFT
),
1798 &ioc
->chip
->ReplyPostHostIndex
);
1800 if (!reply_q
->is_iouring_poll_q
&&
1801 !reply_q
->irq_poll_scheduled
) {
1802 reply_q
->irq_poll_scheduled
= true;
1803 irq_poll_sched(&reply_q
->irqpoll
);
1805 atomic_dec(&reply_q
->busy
);
1806 return completed_cmds
;
1808 if (request_descript_type
== MPI2_RPY_DESCRIPT_FLAGS_UNUSED
)
1810 if (!reply_q
->reply_post_host_index
)
1811 rpf
= reply_q
->reply_post_free
;
1818 if (!completed_cmds
) {
1819 atomic_dec(&reply_q
->busy
);
1820 return completed_cmds
;
1823 if (ioc
->is_warpdrive
) {
1824 writel(reply_q
->reply_post_host_index
,
1825 ioc
->reply_post_host_index
[msix_index
]);
1826 atomic_dec(&reply_q
->busy
);
1827 return completed_cmds
;
1830 /* Update Reply Post Host Index.
1831 * For those HBA's which support combined reply queue feature
1832 * 1. Get the correct Supplemental Reply Post Host Index Register.
1833 * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
1834 * Index Register address bank i.e replyPostRegisterIndex[],
1835 * 2. Then update this register with new reply host index value
1836 * in ReplyPostIndex field and the MSIxIndex field with
1837 * msix_index value reduced to a value between 0 and 7,
1838 * using a modulo 8 operation. Since each Supplemental Reply Post
1839 * Host Index Register supports 8 MSI-X vectors.
1841 * For other HBA's just update the Reply Post Host Index register with
1842 * new reply host index value in ReplyPostIndex Field and msix_index
1843 * value in MSIxIndex field.
1845 if (ioc
->combined_reply_queue
)
1846 writel(reply_q
->reply_post_host_index
| ((msix_index
& 7) <<
1847 MPI2_RPHI_MSIX_INDEX_SHIFT
),
1848 ioc
->replyPostRegisterIndex
[msix_index
/8]);
1850 writel(reply_q
->reply_post_host_index
| (msix_index
<<
1851 MPI2_RPHI_MSIX_INDEX_SHIFT
),
1852 &ioc
->chip
->ReplyPostHostIndex
);
1853 atomic_dec(&reply_q
->busy
);
1854 return completed_cmds
;
1858 * mpt3sas_blk_mq_poll - poll the blk mq poll queue
1859 * @shost: Scsi_Host object
1860 * @queue_num: hw ctx queue number
1862 * Return number of entries that has been processed from poll queue.
1864 int mpt3sas_blk_mq_poll(struct Scsi_Host
*shost
, unsigned int queue_num
)
1866 struct MPT3SAS_ADAPTER
*ioc
=
1867 (struct MPT3SAS_ADAPTER
*)shost
->hostdata
;
1868 struct adapter_reply_queue
*reply_q
;
1869 int num_entries
= 0;
1870 int qid
= queue_num
- ioc
->iopoll_q_start_index
;
1872 if (atomic_read(&ioc
->io_uring_poll_queues
[qid
].pause
) ||
1873 !atomic_add_unless(&ioc
->io_uring_poll_queues
[qid
].busy
, 1, 1))
1876 reply_q
= ioc
->io_uring_poll_queues
[qid
].reply_q
;
1878 num_entries
= _base_process_reply_queue(reply_q
);
1879 atomic_dec(&ioc
->io_uring_poll_queues
[qid
].busy
);
1885 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
1886 * @irq: irq number (not used)
1887 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
1889 * Return: IRQ_HANDLED if processed, else IRQ_NONE.
1892 _base_interrupt(int irq
, void *bus_id
)
1894 struct adapter_reply_queue
*reply_q
= bus_id
;
1895 struct MPT3SAS_ADAPTER
*ioc
= reply_q
->ioc
;
1897 if (ioc
->mask_interrupts
)
1899 if (reply_q
->irq_poll_scheduled
)
1901 return ((_base_process_reply_queue(reply_q
) > 0) ?
1902 IRQ_HANDLED
: IRQ_NONE
);
1906 * _base_irqpoll - IRQ poll callback handler
1907 * @irqpoll: irq_poll object
1908 * @budget: irq poll weight
1910 * Return: number of reply descriptors processed
1913 _base_irqpoll(struct irq_poll
*irqpoll
, int budget
)
1915 struct adapter_reply_queue
*reply_q
;
1916 int num_entries
= 0;
1918 reply_q
= container_of(irqpoll
, struct adapter_reply_queue
,
1920 if (reply_q
->irq_line_enable
) {
1921 disable_irq_nosync(reply_q
->os_irq
);
1922 reply_q
->irq_line_enable
= false;
1924 num_entries
= _base_process_reply_queue(reply_q
);
1925 if (num_entries
< budget
) {
1926 irq_poll_complete(irqpoll
);
1927 reply_q
->irq_poll_scheduled
= false;
1928 reply_q
->irq_line_enable
= true;
1929 enable_irq(reply_q
->os_irq
);
1931 * Go for one more round of processing the
1932 * reply descriptor post queue in case the HBA
1933 * Firmware has posted some reply descriptors
1934 * while reenabling the IRQ.
1936 _base_process_reply_queue(reply_q
);
1943 * _base_init_irqpolls - initliaze IRQ polls
1944 * @ioc: per adapter object
1949 _base_init_irqpolls(struct MPT3SAS_ADAPTER
*ioc
)
1951 struct adapter_reply_queue
*reply_q
, *next
;
1953 if (list_empty(&ioc
->reply_queue_list
))
1956 list_for_each_entry_safe(reply_q
, next
, &ioc
->reply_queue_list
, list
) {
1957 if (reply_q
->is_iouring_poll_q
)
1959 irq_poll_init(&reply_q
->irqpoll
,
1960 ioc
->hba_queue_depth
/4, _base_irqpoll
);
1961 reply_q
->irq_poll_scheduled
= false;
1962 reply_q
->irq_line_enable
= true;
1963 reply_q
->os_irq
= pci_irq_vector(ioc
->pdev
,
1964 reply_q
->msix_index
);
1969 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1970 * @ioc: per adapter object
1972 * Return: Whether or not MSI/X is enabled.
1975 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER
*ioc
)
1977 return (ioc
->facts
.IOCCapabilities
&
1978 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX
) && ioc
->msix_enable
;
1982 * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
1983 * @ioc: per adapter object
1984 * @poll: poll over reply descriptor pools incase interrupt for
1985 * timed-out SCSI command got delayed
1986 * Context: non-ISR context
1988 * Called when a Task Management request has completed.
1991 mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER
*ioc
, u8 poll
)
1993 struct adapter_reply_queue
*reply_q
;
1995 /* If MSIX capability is turned off
1996 * then multi-queues are not enabled
1998 if (!_base_is_controller_msix_enabled(ioc
))
2001 list_for_each_entry(reply_q
, &ioc
->reply_queue_list
, list
) {
2002 if (ioc
->shost_recovery
|| ioc
->remove_host
||
2003 ioc
->pci_error_recovery
)
2005 /* TMs are on msix_index == 0 */
2006 if (reply_q
->msix_index
== 0)
2009 if (reply_q
->is_iouring_poll_q
) {
2010 _base_process_reply_queue(reply_q
);
2014 synchronize_irq(pci_irq_vector(ioc
->pdev
, reply_q
->msix_index
));
2015 if (reply_q
->irq_poll_scheduled
) {
2016 /* Calling irq_poll_disable will wait for any pending
2017 * callbacks to have completed.
2019 irq_poll_disable(&reply_q
->irqpoll
);
2020 irq_poll_enable(&reply_q
->irqpoll
);
2021 /* check how the scheduled poll has ended,
2022 * clean up only if necessary
2024 if (reply_q
->irq_poll_scheduled
) {
2025 reply_q
->irq_poll_scheduled
= false;
2026 reply_q
->irq_line_enable
= true;
2027 enable_irq(reply_q
->os_irq
);
2032 _base_process_reply_queue(reply_q
);
2037 * mpt3sas_base_release_callback_handler - clear interrupt callback handler
2038 * @cb_idx: callback index
2041 mpt3sas_base_release_callback_handler(u8 cb_idx
)
2043 mpt_callbacks
[cb_idx
] = NULL
;
2047 * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
2048 * @cb_func: callback function
2050 * Return: Index of @cb_func.
2053 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func
)
2057 for (cb_idx
= MPT_MAX_CALLBACKS
-1; cb_idx
; cb_idx
--)
2058 if (mpt_callbacks
[cb_idx
] == NULL
)
2061 mpt_callbacks
[cb_idx
] = cb_func
;
2066 * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
2069 mpt3sas_base_initialize_callback_handler(void)
2073 for (cb_idx
= 0; cb_idx
< MPT_MAX_CALLBACKS
; cb_idx
++)
2074 mpt3sas_base_release_callback_handler(cb_idx
);
2079 * _base_build_zero_len_sge - build zero length sg entry
2080 * @ioc: per adapter object
2081 * @paddr: virtual address for SGE
2083 * Create a zero length scatter gather entry to insure the IOCs hardware has
2084 * something to use if the target device goes brain dead and tries
2085 * to send data even when none is asked for.
2088 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER
*ioc
, void *paddr
)
2090 u32 flags_length
= (u32
)((MPI2_SGE_FLAGS_LAST_ELEMENT
|
2091 MPI2_SGE_FLAGS_END_OF_BUFFER
| MPI2_SGE_FLAGS_END_OF_LIST
|
2092 MPI2_SGE_FLAGS_SIMPLE_ELEMENT
) <<
2093 MPI2_SGE_FLAGS_SHIFT
);
2094 ioc
->base_add_sg_single(paddr
, flags_length
, -1);
2098 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
2099 * @paddr: virtual address for SGE
2100 * @flags_length: SGE flags and data transfer length
2101 * @dma_addr: Physical address
2104 _base_add_sg_single_32(void *paddr
, u32 flags_length
, dma_addr_t dma_addr
)
2106 Mpi2SGESimple32_t
*sgel
= paddr
;
2108 flags_length
|= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING
|
2109 MPI2_SGE_FLAGS_SYSTEM_ADDRESS
) << MPI2_SGE_FLAGS_SHIFT
;
2110 sgel
->FlagsLength
= cpu_to_le32(flags_length
);
2111 sgel
->Address
= cpu_to_le32(dma_addr
);
2116 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
2117 * @paddr: virtual address for SGE
2118 * @flags_length: SGE flags and data transfer length
2119 * @dma_addr: Physical address
2122 _base_add_sg_single_64(void *paddr
, u32 flags_length
, dma_addr_t dma_addr
)
2124 Mpi2SGESimple64_t
*sgel
= paddr
;
2126 flags_length
|= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING
|
2127 MPI2_SGE_FLAGS_SYSTEM_ADDRESS
) << MPI2_SGE_FLAGS_SHIFT
;
2128 sgel
->FlagsLength
= cpu_to_le32(flags_length
);
2129 sgel
->Address
= cpu_to_le64(dma_addr
);
2133 * _base_get_chain_buffer_tracker - obtain chain tracker
2134 * @ioc: per adapter object
2135 * @scmd: SCSI commands of the IO request
2137 * Return: chain tracker from chain_lookup table using key as
2138 * smid and smid's chain_offset.
2140 static struct chain_tracker
*
2141 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER
*ioc
,
2142 struct scsi_cmnd
*scmd
)
2144 struct chain_tracker
*chain_req
;
2145 struct scsiio_tracker
*st
= scsi_cmd_priv(scmd
);
2146 u16 smid
= st
->smid
;
2148 atomic_read(&ioc
->chain_lookup
[smid
- 1].chain_offset
);
2150 if (chain_offset
== ioc
->chains_needed_per_io
)
2153 chain_req
= &ioc
->chain_lookup
[smid
- 1].chains_per_smid
[chain_offset
];
2154 atomic_inc(&ioc
->chain_lookup
[smid
- 1].chain_offset
);
2160 * _base_build_sg - build generic sg
2161 * @ioc: per adapter object
2162 * @psge: virtual address for SGE
2163 * @data_out_dma: physical address for WRITES
2164 * @data_out_sz: data xfer size for WRITES
2165 * @data_in_dma: physical address for READS
2166 * @data_in_sz: data xfer size for READS
2169 _base_build_sg(struct MPT3SAS_ADAPTER
*ioc
, void *psge
,
2170 dma_addr_t data_out_dma
, size_t data_out_sz
, dma_addr_t data_in_dma
,
2175 if (!data_out_sz
&& !data_in_sz
) {
2176 _base_build_zero_len_sge(ioc
, psge
);
2180 if (data_out_sz
&& data_in_sz
) {
2181 /* WRITE sgel first */
2182 sgl_flags
= (MPI2_SGE_FLAGS_SIMPLE_ELEMENT
|
2183 MPI2_SGE_FLAGS_END_OF_BUFFER
| MPI2_SGE_FLAGS_HOST_TO_IOC
);
2184 sgl_flags
= sgl_flags
<< MPI2_SGE_FLAGS_SHIFT
;
2185 ioc
->base_add_sg_single(psge
, sgl_flags
|
2186 data_out_sz
, data_out_dma
);
2189 psge
+= ioc
->sge_size
;
2191 /* READ sgel last */
2192 sgl_flags
= (MPI2_SGE_FLAGS_SIMPLE_ELEMENT
|
2193 MPI2_SGE_FLAGS_LAST_ELEMENT
| MPI2_SGE_FLAGS_END_OF_BUFFER
|
2194 MPI2_SGE_FLAGS_END_OF_LIST
);
2195 sgl_flags
= sgl_flags
<< MPI2_SGE_FLAGS_SHIFT
;
2196 ioc
->base_add_sg_single(psge
, sgl_flags
|
2197 data_in_sz
, data_in_dma
);
2198 } else if (data_out_sz
) /* WRITE */ {
2199 sgl_flags
= (MPI2_SGE_FLAGS_SIMPLE_ELEMENT
|
2200 MPI2_SGE_FLAGS_LAST_ELEMENT
| MPI2_SGE_FLAGS_END_OF_BUFFER
|
2201 MPI2_SGE_FLAGS_END_OF_LIST
| MPI2_SGE_FLAGS_HOST_TO_IOC
);
2202 sgl_flags
= sgl_flags
<< MPI2_SGE_FLAGS_SHIFT
;
2203 ioc
->base_add_sg_single(psge
, sgl_flags
|
2204 data_out_sz
, data_out_dma
);
2205 } else if (data_in_sz
) /* READ */ {
2206 sgl_flags
= (MPI2_SGE_FLAGS_SIMPLE_ELEMENT
|
2207 MPI2_SGE_FLAGS_LAST_ELEMENT
| MPI2_SGE_FLAGS_END_OF_BUFFER
|
2208 MPI2_SGE_FLAGS_END_OF_LIST
);
2209 sgl_flags
= sgl_flags
<< MPI2_SGE_FLAGS_SHIFT
;
2210 ioc
->base_add_sg_single(psge
, sgl_flags
|
2211 data_in_sz
, data_in_dma
);
2215 /* IEEE format sgls */
2218 * _base_build_nvme_prp - This function is called for NVMe end devices to build
2219 * a native SGL (NVMe PRP).
2220 * @ioc: per adapter object
2221 * @smid: system request message index for getting asscociated SGL
2222 * @nvme_encap_request: the NVMe request msg frame pointer
2223 * @data_out_dma: physical address for WRITES
2224 * @data_out_sz: data xfer size for WRITES
2225 * @data_in_dma: physical address for READS
2226 * @data_in_sz: data xfer size for READS
2228 * The native SGL is built starting in the first PRP
2229 * entry of the NVMe message (PRP1). If the data buffer is small enough to be
2230 * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is
2231 * used to describe a larger data buffer. If the data buffer is too large to
2232 * describe using the two PRP entriess inside the NVMe message, then PRP1
2233 * describes the first data memory segment, and PRP2 contains a pointer to a PRP
2234 * list located elsewhere in memory to describe the remaining data memory
2235 * segments. The PRP list will be contiguous.
2237 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
2238 * consists of a list of PRP entries to describe a number of noncontigous
2239 * physical memory segments as a single memory buffer, just as a SGL does. Note
2240 * however, that this function is only used by the IOCTL call, so the memory
2241 * given will be guaranteed to be contiguous. There is no need to translate
2242 * non-contiguous SGL into a PRP in this case. All PRPs will describe
2243 * contiguous space that is one page size each.
2245 * Each NVMe message contains two PRP entries. The first (PRP1) either contains
2246 * a PRP list pointer or a PRP element, depending upon the command. PRP2
2247 * contains the second PRP element if the memory being described fits within 2
2248 * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
2250 * A PRP list pointer contains the address of a PRP list, structured as a linear
2251 * array of PRP entries. Each PRP entry in this list describes a segment of
2254 * Each 64-bit PRP entry comprises an address and an offset field. The address
2255 * always points at the beginning of a 4KB physical memory page, and the offset
2256 * describes where within that 4KB page the memory segment begins. Only the
2257 * first element in a PRP list may contain a non-zero offset, implying that all
2258 * memory segments following the first begin at the start of a 4KB page.
2260 * Each PRP element normally describes 4KB of physical memory, with exceptions
2261 * for the first and last elements in the list. If the memory being described
2262 * by the list begins at a non-zero offset within the first 4KB page, then the
2263 * first PRP element will contain a non-zero offset indicating where the region
2264 * begins within the 4KB page. The last memory segment may end before the end
2265 * of the 4KB segment, depending upon the overall size of the memory being
2266 * described by the PRP list.
2268 * Since PRP entries lack any indication of size, the overall data buffer length
2269 * is used to determine where the end of the data memory buffer is located, and
2270 * how many PRP entries are required to describe it.
2273 _base_build_nvme_prp(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
,
2274 Mpi26NVMeEncapsulatedRequest_t
*nvme_encap_request
,
2275 dma_addr_t data_out_dma
, size_t data_out_sz
, dma_addr_t data_in_dma
,
2278 int prp_size
= NVME_PRP_SIZE
;
2279 __le64
*prp_entry
, *prp1_entry
, *prp2_entry
;
2281 dma_addr_t prp_entry_dma
, prp_page_dma
, dma_addr
;
2282 u32 offset
, entry_len
;
2283 u32 page_mask_result
, page_mask
;
2285 struct mpt3sas_nvme_cmd
*nvme_cmd
=
2286 (void *)nvme_encap_request
->NVMe_Command
;
2289 * Not all commands require a data transfer. If no data, just return
2290 * without constructing any PRP.
2292 if (!data_in_sz
&& !data_out_sz
)
2294 prp1_entry
= &nvme_cmd
->prp1
;
2295 prp2_entry
= &nvme_cmd
->prp2
;
2296 prp_entry
= prp1_entry
;
2298 * For the PRP entries, use the specially allocated buffer of
2299 * contiguous memory.
2301 prp_page
= (__le64
*)mpt3sas_base_get_pcie_sgl(ioc
, smid
);
2302 prp_page_dma
= mpt3sas_base_get_pcie_sgl_dma(ioc
, smid
);
2305 * Check if we are within 1 entry of a page boundary we don't
2306 * want our first entry to be a PRP List entry.
2308 page_mask
= ioc
->page_size
- 1;
2309 page_mask_result
= (uintptr_t)((u8
*)prp_page
+ prp_size
) & page_mask
;
2310 if (!page_mask_result
) {
2311 /* Bump up to next page boundary. */
2312 prp_page
= (__le64
*)((u8
*)prp_page
+ prp_size
);
2313 prp_page_dma
= prp_page_dma
+ prp_size
;
2317 * Set PRP physical pointer, which initially points to the current PRP
2320 prp_entry_dma
= prp_page_dma
;
2322 /* Get physical address and length of the data buffer. */
2324 dma_addr
= data_in_dma
;
2325 length
= data_in_sz
;
2327 dma_addr
= data_out_dma
;
2328 length
= data_out_sz
;
2331 /* Loop while the length is not zero. */
2334 * Check if we need to put a list pointer here if we are at
2335 * page boundary - prp_size (8 bytes).
2337 page_mask_result
= (prp_entry_dma
+ prp_size
) & page_mask
;
2338 if (!page_mask_result
) {
2340 * This is the last entry in a PRP List, so we need to
2341 * put a PRP list pointer here. What this does is:
2342 * - bump the current memory pointer to the next
2343 * address, which will be the next full page.
2344 * - set the PRP Entry to point to that page. This
2345 * is now the PRP List pointer.
2346 * - bump the PRP Entry pointer the start of the
2347 * next page. Since all of this PRP memory is
2348 * contiguous, no need to get a new page - it's
2349 * just the next address.
2352 *prp_entry
= cpu_to_le64(prp_entry_dma
);
2356 /* Need to handle if entry will be part of a page. */
2357 offset
= dma_addr
& page_mask
;
2358 entry_len
= ioc
->page_size
- offset
;
2360 if (prp_entry
== prp1_entry
) {
2362 * Must fill in the first PRP pointer (PRP1) before
2365 *prp1_entry
= cpu_to_le64(dma_addr
);
2368 * Now point to the second PRP entry within the
2371 prp_entry
= prp2_entry
;
2372 } else if (prp_entry
== prp2_entry
) {
2374 * Should the PRP2 entry be a PRP List pointer or just
2375 * a regular PRP pointer? If there is more than one
2376 * more page of data, must use a PRP List pointer.
2378 if (length
> ioc
->page_size
) {
2380 * PRP2 will contain a PRP List pointer because
2381 * more PRP's are needed with this command. The
2382 * list will start at the beginning of the
2383 * contiguous buffer.
2385 *prp2_entry
= cpu_to_le64(prp_entry_dma
);
2388 * The next PRP Entry will be the start of the
2391 prp_entry
= prp_page
;
2394 * After this, the PRP Entries are complete.
2395 * This command uses 2 PRP's and no PRP list.
2397 *prp2_entry
= cpu_to_le64(dma_addr
);
2401 * Put entry in list and bump the addresses.
2403 * After PRP1 and PRP2 are filled in, this will fill in
2404 * all remaining PRP entries in a PRP List, one per
2405 * each time through the loop.
2407 *prp_entry
= cpu_to_le64(dma_addr
);
2413 * Bump the phys address of the command's data buffer by the
2416 dma_addr
+= entry_len
;
2418 /* Decrement length accounting for last partial page. */
2419 if (entry_len
> length
)
2422 length
-= entry_len
;
2427 * base_make_prp_nvme - Prepare PRPs (Physical Region Page) -
2428 * SGLs specific to NVMe drives only
2430 * @ioc: per adapter object
2431 * @scmd: SCSI command from the mid-layer
2432 * @mpi_request: mpi request
2434 * @sge_count: scatter gather element count.
2436 * Return: true: PRPs are built
2437 * false: IEEE SGLs needs to be built
2440 base_make_prp_nvme(struct MPT3SAS_ADAPTER
*ioc
,
2441 struct scsi_cmnd
*scmd
,
2442 Mpi25SCSIIORequest_t
*mpi_request
,
2443 u16 smid
, int sge_count
)
2445 int sge_len
, num_prp_in_chain
= 0;
2446 Mpi25IeeeSgeChain64_t
*main_chain_element
, *ptr_first_sgl
;
2448 dma_addr_t msg_dma
, sge_addr
, offset
;
2449 u32 page_mask
, page_mask_result
;
2450 struct scatterlist
*sg_scmd
;
2452 int data_len
= scsi_bufflen(scmd
);
2455 nvme_pg_size
= max_t(u32
, ioc
->page_size
, NVME_PRP_PAGE_SIZE
);
2457 * Nvme has a very convoluted prp format. One prp is required
2458 * for each page or partial page. Driver need to split up OS sg_list
2459 * entries if it is longer than one page or cross a page
2460 * boundary. Driver also have to insert a PRP list pointer entry as
2461 * the last entry in each physical page of the PRP list.
2463 * NOTE: The first PRP "entry" is actually placed in the first
2464 * SGL entry in the main message as IEEE 64 format. The 2nd
2465 * entry in the main message is the chain element, and the rest
2466 * of the PRP entries are built in the contiguous pcie buffer.
2468 page_mask
= nvme_pg_size
- 1;
2471 * Native SGL is needed.
2472 * Put a chain element in main message frame that points to the first
2475 * NOTE: The ChainOffset field must be 0 when using a chain pointer to
2479 /* Set main message chain element pointer */
2480 main_chain_element
= (pMpi25IeeeSgeChain64_t
)&mpi_request
->SGL
;
2482 * For NVMe the chain element needs to be the 2nd SG entry in the main
2485 main_chain_element
= (Mpi25IeeeSgeChain64_t
*)
2486 ((u8
*)main_chain_element
+ sizeof(MPI25_IEEE_SGE_CHAIN64
));
2489 * For the PRP entries, use the specially allocated buffer of
2490 * contiguous memory. Normal chain buffers can't be used
2491 * because each chain buffer would need to be the size of an OS
2494 curr_buff
= mpt3sas_base_get_pcie_sgl(ioc
, smid
);
2495 msg_dma
= mpt3sas_base_get_pcie_sgl_dma(ioc
, smid
);
2497 main_chain_element
->Address
= cpu_to_le64(msg_dma
);
2498 main_chain_element
->NextChainOffset
= 0;
2499 main_chain_element
->Flags
= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
|
2500 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR
|
2501 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP
;
2503 /* Build first prp, sge need not to be page aligned*/
2504 ptr_first_sgl
= (pMpi25IeeeSgeChain64_t
)&mpi_request
->SGL
;
2505 sg_scmd
= scsi_sglist(scmd
);
2506 sge_addr
= sg_dma_address(sg_scmd
);
2507 sge_len
= sg_dma_len(sg_scmd
);
2509 offset
= sge_addr
& page_mask
;
2510 first_prp_len
= nvme_pg_size
- offset
;
2512 ptr_first_sgl
->Address
= cpu_to_le64(sge_addr
);
2513 ptr_first_sgl
->Length
= cpu_to_le32(first_prp_len
);
2515 data_len
-= first_prp_len
;
2517 if (sge_len
> first_prp_len
) {
2518 sge_addr
+= first_prp_len
;
2519 sge_len
-= first_prp_len
;
2520 } else if (data_len
&& (sge_len
== first_prp_len
)) {
2521 sg_scmd
= sg_next(sg_scmd
);
2522 sge_addr
= sg_dma_address(sg_scmd
);
2523 sge_len
= sg_dma_len(sg_scmd
);
2527 offset
= sge_addr
& page_mask
;
2529 /* Put PRP pointer due to page boundary*/
2530 page_mask_result
= (uintptr_t)(curr_buff
+ 1) & page_mask
;
2531 if (unlikely(!page_mask_result
)) {
2532 scmd_printk(KERN_NOTICE
,
2533 scmd
, "page boundary curr_buff: 0x%p\n",
2536 *curr_buff
= cpu_to_le64(msg_dma
);
2541 *curr_buff
= cpu_to_le64(sge_addr
);
2546 sge_addr
+= nvme_pg_size
;
2547 sge_len
-= nvme_pg_size
;
2548 data_len
-= nvme_pg_size
;
2556 sg_scmd
= sg_next(sg_scmd
);
2557 sge_addr
= sg_dma_address(sg_scmd
);
2558 sge_len
= sg_dma_len(sg_scmd
);
2561 main_chain_element
->Length
=
2562 cpu_to_le32(num_prp_in_chain
* sizeof(u64
));
2567 base_is_prp_possible(struct MPT3SAS_ADAPTER
*ioc
,
2568 struct _pcie_device
*pcie_device
, struct scsi_cmnd
*scmd
, int sge_count
)
2570 u32 data_length
= 0;
2571 bool build_prp
= true;
2573 data_length
= scsi_bufflen(scmd
);
2575 (mpt3sas_scsih_is_pcie_scsi_device(pcie_device
->device_info
))) {
2580 /* If Datalenth is <= 16K and number of SGE’s entries are <= 2
2583 if ((data_length
<= NVME_PRP_PAGE_SIZE
*4) && (sge_count
<= 2))
2590 * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
2591 * determine if the driver needs to build a native SGL. If so, that native
2592 * SGL is built in the special contiguous buffers allocated especially for
2593 * PCIe SGL creation. If the driver will not build a native SGL, return
2594 * TRUE and a normal IEEE SGL will be built. Currently this routine
2596 * @ioc: per adapter object
2597 * @mpi_request: mf request pointer
2598 * @smid: system request message index
2599 * @scmd: scsi command
2600 * @pcie_device: points to the PCIe device's info
2602 * Return: 0 if native SGL was built, 1 if no SGL was built
2605 _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER
*ioc
,
2606 Mpi25SCSIIORequest_t
*mpi_request
, u16 smid
, struct scsi_cmnd
*scmd
,
2607 struct _pcie_device
*pcie_device
)
2611 /* Get the SG list pointer and info. */
2612 sges_left
= scsi_dma_map(scmd
);
2616 /* Check if we need to build a native SG list. */
2617 if (!base_is_prp_possible(ioc
, pcie_device
,
2619 /* We built a native SG list, just return. */
2624 * Build native NVMe PRP.
2626 base_make_prp_nvme(ioc
, scmd
, mpi_request
,
2631 scsi_dma_unmap(scmd
);
2636 * _base_add_sg_single_ieee - add sg element for IEEE format
2637 * @paddr: virtual address for SGE
2639 * @chain_offset: number of 128 byte elements from start of segment
2640 * @length: data transfer length
2641 * @dma_addr: Physical address
2644 _base_add_sg_single_ieee(void *paddr
, u8 flags
, u8 chain_offset
, u32 length
,
2645 dma_addr_t dma_addr
)
2647 Mpi25IeeeSgeChain64_t
*sgel
= paddr
;
2649 sgel
->Flags
= flags
;
2650 sgel
->NextChainOffset
= chain_offset
;
2651 sgel
->Length
= cpu_to_le32(length
);
2652 sgel
->Address
= cpu_to_le64(dma_addr
);
2656 * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
2657 * @ioc: per adapter object
2658 * @paddr: virtual address for SGE
2660 * Create a zero length scatter gather entry to insure the IOCs hardware has
2661 * something to use if the target device goes brain dead and tries
2662 * to send data even when none is asked for.
2665 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER
*ioc
, void *paddr
)
2667 u8 sgl_flags
= (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT
|
2668 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR
|
2669 MPI25_IEEE_SGE_FLAGS_END_OF_LIST
);
2671 _base_add_sg_single_ieee(paddr
, sgl_flags
, 0, 0, -1);
2674 static inline int _base_scsi_dma_map(struct scsi_cmnd
*cmd
)
2677 * Some firmware versions byte-swap the REPORT ZONES command reply from
2678 * ATA-ZAC devices by directly accessing in the host buffer. This does
2679 * not respect the default command DMA direction and causes IOMMU page
2680 * faults on some architectures with an IOMMU enforcing write mappings
2681 * (e.g. AMD hosts). Avoid such issue by making the report zones buffer
2682 * mapping bi-directional.
2684 if (cmd
->cmnd
[0] == ZBC_IN
&& cmd
->cmnd
[1] == ZI_REPORT_ZONES
)
2685 cmd
->sc_data_direction
= DMA_BIDIRECTIONAL
;
2687 return scsi_dma_map(cmd
);
2691 * _base_build_sg_scmd - main sg creation routine
2692 * pcie_device is unused here!
2693 * @ioc: per adapter object
2694 * @scmd: scsi command
2695 * @smid: system request message index
2696 * @unused: unused pcie_device pointer
2699 * The main routine that builds scatter gather table from a given
2700 * scsi request sent via the .queuecommand main handler.
2702 * Return: 0 success, anything else error
2705 _base_build_sg_scmd(struct MPT3SAS_ADAPTER
*ioc
,
2706 struct scsi_cmnd
*scmd
, u16 smid
, struct _pcie_device
*unused
)
2708 Mpi2SCSIIORequest_t
*mpi_request
;
2709 dma_addr_t chain_dma
;
2710 struct scatterlist
*sg_scmd
;
2711 void *sg_local
, *chain
;
2716 u32 sges_in_segment
;
2718 u32 sgl_flags_last_element
;
2719 u32 sgl_flags_end_buffer
;
2720 struct chain_tracker
*chain_req
;
2722 mpi_request
= mpt3sas_base_get_msg_frame(ioc
, smid
);
2724 /* init scatter gather flags */
2725 sgl_flags
= MPI2_SGE_FLAGS_SIMPLE_ELEMENT
;
2726 if (scmd
->sc_data_direction
== DMA_TO_DEVICE
)
2727 sgl_flags
|= MPI2_SGE_FLAGS_HOST_TO_IOC
;
2728 sgl_flags_last_element
= (sgl_flags
| MPI2_SGE_FLAGS_LAST_ELEMENT
)
2729 << MPI2_SGE_FLAGS_SHIFT
;
2730 sgl_flags_end_buffer
= (sgl_flags
| MPI2_SGE_FLAGS_LAST_ELEMENT
|
2731 MPI2_SGE_FLAGS_END_OF_BUFFER
| MPI2_SGE_FLAGS_END_OF_LIST
)
2732 << MPI2_SGE_FLAGS_SHIFT
;
2733 sgl_flags
= sgl_flags
<< MPI2_SGE_FLAGS_SHIFT
;
2735 sg_scmd
= scsi_sglist(scmd
);
2736 sges_left
= _base_scsi_dma_map(scmd
);
2740 sg_local
= &mpi_request
->SGL
;
2741 sges_in_segment
= ioc
->max_sges_in_main_message
;
2742 if (sges_left
<= sges_in_segment
)
2743 goto fill_in_last_segment
;
2745 mpi_request
->ChainOffset
= (offsetof(Mpi2SCSIIORequest_t
, SGL
) +
2746 (sges_in_segment
* ioc
->sge_size
))/4;
2748 /* fill in main message segment when there is a chain following */
2749 while (sges_in_segment
) {
2750 if (sges_in_segment
== 1)
2751 ioc
->base_add_sg_single(sg_local
,
2752 sgl_flags_last_element
| sg_dma_len(sg_scmd
),
2753 sg_dma_address(sg_scmd
));
2755 ioc
->base_add_sg_single(sg_local
, sgl_flags
|
2756 sg_dma_len(sg_scmd
), sg_dma_address(sg_scmd
));
2757 sg_scmd
= sg_next(sg_scmd
);
2758 sg_local
+= ioc
->sge_size
;
2763 /* initializing the chain flags and pointers */
2764 chain_flags
= MPI2_SGE_FLAGS_CHAIN_ELEMENT
<< MPI2_SGE_FLAGS_SHIFT
;
2765 chain_req
= _base_get_chain_buffer_tracker(ioc
, scmd
);
2768 chain
= chain_req
->chain_buffer
;
2769 chain_dma
= chain_req
->chain_buffer_dma
;
2771 sges_in_segment
= (sges_left
<=
2772 ioc
->max_sges_in_chain_message
) ? sges_left
:
2773 ioc
->max_sges_in_chain_message
;
2774 chain_offset
= (sges_left
== sges_in_segment
) ?
2775 0 : (sges_in_segment
* ioc
->sge_size
)/4;
2776 chain_length
= sges_in_segment
* ioc
->sge_size
;
2778 chain_offset
= chain_offset
<<
2779 MPI2_SGE_CHAIN_OFFSET_SHIFT
;
2780 chain_length
+= ioc
->sge_size
;
2782 ioc
->base_add_sg_single(sg_local
, chain_flags
| chain_offset
|
2783 chain_length
, chain_dma
);
2786 goto fill_in_last_segment
;
2788 /* fill in chain segments */
2789 while (sges_in_segment
) {
2790 if (sges_in_segment
== 1)
2791 ioc
->base_add_sg_single(sg_local
,
2792 sgl_flags_last_element
|
2793 sg_dma_len(sg_scmd
),
2794 sg_dma_address(sg_scmd
));
2796 ioc
->base_add_sg_single(sg_local
, sgl_flags
|
2797 sg_dma_len(sg_scmd
),
2798 sg_dma_address(sg_scmd
));
2799 sg_scmd
= sg_next(sg_scmd
);
2800 sg_local
+= ioc
->sge_size
;
2805 chain_req
= _base_get_chain_buffer_tracker(ioc
, scmd
);
2808 chain
= chain_req
->chain_buffer
;
2809 chain_dma
= chain_req
->chain_buffer_dma
;
2813 fill_in_last_segment
:
2815 /* fill the last segment */
2818 ioc
->base_add_sg_single(sg_local
, sgl_flags_end_buffer
|
2819 sg_dma_len(sg_scmd
), sg_dma_address(sg_scmd
));
2821 ioc
->base_add_sg_single(sg_local
, sgl_flags
|
2822 sg_dma_len(sg_scmd
), sg_dma_address(sg_scmd
));
2823 sg_scmd
= sg_next(sg_scmd
);
2824 sg_local
+= ioc
->sge_size
;
2832 * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
2833 * @ioc: per adapter object
2834 * @scmd: scsi command
2835 * @smid: system request message index
2836 * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
2837 * constructed on need.
2840 * The main routine that builds scatter gather table from a given
2841 * scsi request sent via the .queuecommand main handler.
2843 * Return: 0 success, anything else error
2846 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER
*ioc
,
2847 struct scsi_cmnd
*scmd
, u16 smid
, struct _pcie_device
*pcie_device
)
2849 Mpi25SCSIIORequest_t
*mpi_request
;
2850 dma_addr_t chain_dma
;
2851 struct scatterlist
*sg_scmd
;
2852 void *sg_local
, *chain
;
2856 u32 sges_in_segment
;
2857 u8 simple_sgl_flags
;
2858 u8 simple_sgl_flags_last
;
2860 struct chain_tracker
*chain_req
;
2862 mpi_request
= mpt3sas_base_get_msg_frame(ioc
, smid
);
2864 /* init scatter gather flags */
2865 simple_sgl_flags
= MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT
|
2866 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR
;
2867 simple_sgl_flags_last
= simple_sgl_flags
|
2868 MPI25_IEEE_SGE_FLAGS_END_OF_LIST
;
2869 chain_sgl_flags
= MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT
|
2870 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR
;
2872 /* Check if we need to build a native SG list. */
2873 if ((pcie_device
) && (_base_check_pcie_native_sgl(ioc
, mpi_request
,
2874 smid
, scmd
, pcie_device
) == 0)) {
2875 /* We built a native SG list, just return. */
2879 sg_scmd
= scsi_sglist(scmd
);
2880 sges_left
= _base_scsi_dma_map(scmd
);
2884 sg_local
= &mpi_request
->SGL
;
2885 sges_in_segment
= (ioc
->request_sz
-
2886 offsetof(Mpi25SCSIIORequest_t
, SGL
))/ioc
->sge_size_ieee
;
2887 if (sges_left
<= sges_in_segment
)
2888 goto fill_in_last_segment
;
2890 mpi_request
->ChainOffset
= (sges_in_segment
- 1 /* chain element */) +
2891 (offsetof(Mpi25SCSIIORequest_t
, SGL
)/ioc
->sge_size_ieee
);
2893 /* fill in main message segment when there is a chain following */
2894 while (sges_in_segment
> 1) {
2895 _base_add_sg_single_ieee(sg_local
, simple_sgl_flags
, 0,
2896 sg_dma_len(sg_scmd
), sg_dma_address(sg_scmd
));
2897 sg_scmd
= sg_next(sg_scmd
);
2898 sg_local
+= ioc
->sge_size_ieee
;
2903 /* initializing the pointers */
2904 chain_req
= _base_get_chain_buffer_tracker(ioc
, scmd
);
2907 chain
= chain_req
->chain_buffer
;
2908 chain_dma
= chain_req
->chain_buffer_dma
;
2910 sges_in_segment
= (sges_left
<=
2911 ioc
->max_sges_in_chain_message
) ? sges_left
:
2912 ioc
->max_sges_in_chain_message
;
2913 chain_offset
= (sges_left
== sges_in_segment
) ?
2914 0 : sges_in_segment
;
2915 chain_length
= sges_in_segment
* ioc
->sge_size_ieee
;
2917 chain_length
+= ioc
->sge_size_ieee
;
2918 _base_add_sg_single_ieee(sg_local
, chain_sgl_flags
,
2919 chain_offset
, chain_length
, chain_dma
);
2923 goto fill_in_last_segment
;
2925 /* fill in chain segments */
2926 while (sges_in_segment
) {
2927 _base_add_sg_single_ieee(sg_local
, simple_sgl_flags
, 0,
2928 sg_dma_len(sg_scmd
), sg_dma_address(sg_scmd
));
2929 sg_scmd
= sg_next(sg_scmd
);
2930 sg_local
+= ioc
->sge_size_ieee
;
2935 chain_req
= _base_get_chain_buffer_tracker(ioc
, scmd
);
2938 chain
= chain_req
->chain_buffer
;
2939 chain_dma
= chain_req
->chain_buffer_dma
;
2943 fill_in_last_segment
:
2945 /* fill the last segment */
2946 while (sges_left
> 0) {
2948 _base_add_sg_single_ieee(sg_local
,
2949 simple_sgl_flags_last
, 0, sg_dma_len(sg_scmd
),
2950 sg_dma_address(sg_scmd
));
2952 _base_add_sg_single_ieee(sg_local
, simple_sgl_flags
, 0,
2953 sg_dma_len(sg_scmd
), sg_dma_address(sg_scmd
));
2954 sg_scmd
= sg_next(sg_scmd
);
2955 sg_local
+= ioc
->sge_size_ieee
;
2963 * _base_build_sg_ieee - build generic sg for IEEE format
2964 * @ioc: per adapter object
2965 * @psge: virtual address for SGE
2966 * @data_out_dma: physical address for WRITES
2967 * @data_out_sz: data xfer size for WRITES
2968 * @data_in_dma: physical address for READS
2969 * @data_in_sz: data xfer size for READS
2972 _base_build_sg_ieee(struct MPT3SAS_ADAPTER
*ioc
, void *psge
,
2973 dma_addr_t data_out_dma
, size_t data_out_sz
, dma_addr_t data_in_dma
,
2978 if (!data_out_sz
&& !data_in_sz
) {
2979 _base_build_zero_len_sge_ieee(ioc
, psge
);
2983 if (data_out_sz
&& data_in_sz
) {
2984 /* WRITE sgel first */
2985 sgl_flags
= MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT
|
2986 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR
;
2987 _base_add_sg_single_ieee(psge
, sgl_flags
, 0, data_out_sz
,
2991 psge
+= ioc
->sge_size_ieee
;
2993 /* READ sgel last */
2994 sgl_flags
|= MPI25_IEEE_SGE_FLAGS_END_OF_LIST
;
2995 _base_add_sg_single_ieee(psge
, sgl_flags
, 0, data_in_sz
,
2997 } else if (data_out_sz
) /* WRITE */ {
2998 sgl_flags
= MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT
|
2999 MPI25_IEEE_SGE_FLAGS_END_OF_LIST
|
3000 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR
;
3001 _base_add_sg_single_ieee(psge
, sgl_flags
, 0, data_out_sz
,
3003 } else if (data_in_sz
) /* READ */ {
3004 sgl_flags
= MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT
|
3005 MPI25_IEEE_SGE_FLAGS_END_OF_LIST
|
3006 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR
;
3007 _base_add_sg_single_ieee(psge
, sgl_flags
, 0, data_in_sz
,
3012 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
3015 * _base_config_dma_addressing - set dma addressing
3016 * @ioc: per adapter object
3017 * @pdev: PCI device struct
3019 * Return: 0 for success, non-zero for failure.
3022 _base_config_dma_addressing(struct MPT3SAS_ADAPTER
*ioc
, struct pci_dev
*pdev
)
3025 u64 coherent_dma_mask
, dma_mask
;
3027 if (ioc
->is_mcpu_endpoint
|| sizeof(dma_addr_t
) == 4) {
3029 coherent_dma_mask
= dma_mask
= DMA_BIT_MASK(32);
3030 /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
3031 } else if (ioc
->hba_mpi_version_belonged
> MPI2_VERSION
) {
3033 coherent_dma_mask
= dma_mask
= DMA_BIT_MASK(63);
3036 coherent_dma_mask
= dma_mask
= DMA_BIT_MASK(64);
3039 if (ioc
->use_32bit_dma
)
3040 coherent_dma_mask
= DMA_BIT_MASK(32);
3042 if (dma_set_mask(&pdev
->dev
, dma_mask
) ||
3043 dma_set_coherent_mask(&pdev
->dev
, coherent_dma_mask
))
3046 if (ioc
->dma_mask
> 32) {
3047 ioc
->base_add_sg_single
= &_base_add_sg_single_64
;
3048 ioc
->sge_size
= sizeof(Mpi2SGESimple64_t
);
3050 ioc
->base_add_sg_single
= &_base_add_sg_single_32
;
3051 ioc
->sge_size
= sizeof(Mpi2SGESimple32_t
);
3055 ioc_info(ioc
, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
3056 ioc
->dma_mask
, convert_to_kb(s
.totalram
));
3062 * _base_check_enable_msix - checks MSIX capabable.
3063 * @ioc: per adapter object
3065 * Check to see if card is capable of MSIX, and set number
3066 * of available msix vectors
3069 _base_check_enable_msix(struct MPT3SAS_ADAPTER
*ioc
)
3072 u16 message_control
;
3074 /* Check whether controller SAS2008 B0 controller,
3075 * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
3077 if (ioc
->pdev
->device
== MPI2_MFGPAGE_DEVID_SAS2008
&&
3078 ioc
->pdev
->revision
== SAS2_PCI_DEVICE_B0_REVISION
) {
3082 base
= pci_find_capability(ioc
->pdev
, PCI_CAP_ID_MSIX
);
3084 dfailprintk(ioc
, ioc_info(ioc
, "msix not supported\n"));
3088 /* get msix vector count */
3089 /* NUMA_IO not supported for older controllers */
3090 if (ioc
->pdev
->device
== MPI2_MFGPAGE_DEVID_SAS2004
||
3091 ioc
->pdev
->device
== MPI2_MFGPAGE_DEVID_SAS2008
||
3092 ioc
->pdev
->device
== MPI2_MFGPAGE_DEVID_SAS2108_1
||
3093 ioc
->pdev
->device
== MPI2_MFGPAGE_DEVID_SAS2108_2
||
3094 ioc
->pdev
->device
== MPI2_MFGPAGE_DEVID_SAS2108_3
||
3095 ioc
->pdev
->device
== MPI2_MFGPAGE_DEVID_SAS2116_1
||
3096 ioc
->pdev
->device
== MPI2_MFGPAGE_DEVID_SAS2116_2
)
3097 ioc
->msix_vector_count
= 1;
3099 pci_read_config_word(ioc
->pdev
, base
+ 2, &message_control
);
3100 ioc
->msix_vector_count
= (message_control
& 0x3FF) + 1;
3102 dinitprintk(ioc
, ioc_info(ioc
, "msix is supported, vector_count(%d)\n",
3103 ioc
->msix_vector_count
));
3108 * mpt3sas_base_free_irq - free irq
3109 * @ioc: per adapter object
3111 * Freeing respective reply_queue from the list.
3114 mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER
*ioc
)
3117 struct adapter_reply_queue
*reply_q
, *next
;
3119 if (list_empty(&ioc
->reply_queue_list
))
3122 list_for_each_entry_safe(reply_q
, next
, &ioc
->reply_queue_list
, list
) {
3123 list_del(&reply_q
->list
);
3124 if (reply_q
->is_iouring_poll_q
) {
3129 if (ioc
->smp_affinity_enable
) {
3130 irq
= pci_irq_vector(ioc
->pdev
, reply_q
->msix_index
);
3131 irq_update_affinity_hint(irq
, NULL
);
3133 free_irq(pci_irq_vector(ioc
->pdev
, reply_q
->msix_index
),
3140 * _base_request_irq - request irq
3141 * @ioc: per adapter object
3142 * @index: msix index into vector table
3144 * Inserting respective reply_queue into the list.
3147 _base_request_irq(struct MPT3SAS_ADAPTER
*ioc
, u8 index
)
3149 struct pci_dev
*pdev
= ioc
->pdev
;
3150 struct adapter_reply_queue
*reply_q
;
3153 reply_q
= kzalloc(sizeof(struct adapter_reply_queue
), GFP_KERNEL
);
3155 ioc_err(ioc
, "unable to allocate memory %zu!\n",
3156 sizeof(struct adapter_reply_queue
));
3160 reply_q
->msix_index
= index
;
3162 atomic_set(&reply_q
->busy
, 0);
3164 if (index
>= ioc
->iopoll_q_start_index
) {
3165 qid
= index
- ioc
->iopoll_q_start_index
;
3166 snprintf(reply_q
->name
, MPT_NAME_LENGTH
, "%s%d-mq-poll%d",
3167 ioc
->driver_name
, ioc
->id
, qid
);
3168 reply_q
->is_iouring_poll_q
= 1;
3169 ioc
->io_uring_poll_queues
[qid
].reply_q
= reply_q
;
3174 if (ioc
->msix_enable
)
3175 snprintf(reply_q
->name
, MPT_NAME_LENGTH
, "%s%d-msix%d",
3176 ioc
->driver_name
, ioc
->id
, index
);
3178 snprintf(reply_q
->name
, MPT_NAME_LENGTH
, "%s%d",
3179 ioc
->driver_name
, ioc
->id
);
3180 r
= request_irq(pci_irq_vector(pdev
, index
), _base_interrupt
,
3181 IRQF_SHARED
, reply_q
->name
, reply_q
);
3183 pr_err("%s: unable to allocate interrupt %d!\n",
3184 reply_q
->name
, pci_irq_vector(pdev
, index
));
3189 INIT_LIST_HEAD(&reply_q
->list
);
3190 list_add_tail(&reply_q
->list
, &ioc
->reply_queue_list
);
3195 * _base_assign_reply_queues - assigning msix index for each cpu
3196 * @ioc: per adapter object
3198 * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
3201 _base_assign_reply_queues(struct MPT3SAS_ADAPTER
*ioc
)
3203 unsigned int cpu
, nr_cpus
, nr_msix
, index
= 0, irq
;
3204 struct adapter_reply_queue
*reply_q
;
3205 int iopoll_q_count
= ioc
->reply_queue_count
-
3206 ioc
->iopoll_q_start_index
;
3207 const struct cpumask
*mask
;
3209 if (!_base_is_controller_msix_enabled(ioc
))
3212 if (ioc
->msix_load_balance
)
3215 memset(ioc
->cpu_msix_table
, 0, ioc
->cpu_msix_table_sz
);
3217 nr_cpus
= num_online_cpus();
3218 nr_msix
= ioc
->reply_queue_count
= min(ioc
->reply_queue_count
,
3219 ioc
->facts
.MaxMSIxVectors
);
3223 if (ioc
->smp_affinity_enable
) {
3226 * set irq affinity to local numa node for those irqs
3227 * corresponding to high iops queues.
3229 if (ioc
->high_iops_queues
) {
3230 mask
= cpumask_of_node(dev_to_node(&ioc
->pdev
->dev
));
3231 for (index
= 0; index
< ioc
->high_iops_queues
;
3233 irq
= pci_irq_vector(ioc
->pdev
, index
);
3234 irq_set_affinity_and_hint(irq
, mask
);
3238 list_for_each_entry(reply_q
, &ioc
->reply_queue_list
, list
) {
3239 const cpumask_t
*mask
;
3241 if (reply_q
->msix_index
< ioc
->high_iops_queues
||
3242 reply_q
->msix_index
>= ioc
->iopoll_q_start_index
)
3245 mask
= pci_irq_get_affinity(ioc
->pdev
,
3246 reply_q
->msix_index
);
3248 ioc_warn(ioc
, "no affinity for msi %x\n",
3249 reply_q
->msix_index
);
3253 for_each_cpu_and(cpu
, mask
, cpu_online_mask
) {
3254 if (cpu
>= ioc
->cpu_msix_table_sz
)
3256 ioc
->cpu_msix_table
[cpu
] = reply_q
->msix_index
;
3263 cpu
= cpumask_first(cpu_online_mask
);
3264 nr_msix
-= (ioc
->high_iops_queues
- iopoll_q_count
);
3267 list_for_each_entry(reply_q
, &ioc
->reply_queue_list
, list
) {
3268 unsigned int i
, group
= nr_cpus
/ nr_msix
;
3270 if (reply_q
->msix_index
< ioc
->high_iops_queues
||
3271 reply_q
->msix_index
>= ioc
->iopoll_q_start_index
)
3277 if (index
< nr_cpus
% nr_msix
)
3280 for (i
= 0 ; i
< group
; i
++) {
3281 ioc
->cpu_msix_table
[cpu
] = reply_q
->msix_index
;
3282 cpu
= cpumask_next(cpu
, cpu_online_mask
);
3289 * _base_check_and_enable_high_iops_queues - enable high iops mode
3290 * @ioc: per adapter object
3291 * @hba_msix_vector_count: msix vectors supported by HBA
3293 * Enable high iops queues only if
3294 * - HBA is a SEA/AERO controller and
3295 * - MSI-Xs vector supported by the HBA is 128 and
3296 * - total CPU count in the system >=16 and
3297 * - loaded driver with default max_msix_vectors module parameter and
3298 * - system booted in non kdump mode
3303 _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER
*ioc
,
3304 int hba_msix_vector_count
)
3309 * Disable high iops queues if io uring poll queues are enabled.
3311 if (perf_mode
== MPT_PERF_MODE_IOPS
||
3312 perf_mode
== MPT_PERF_MODE_LATENCY
||
3313 ioc
->io_uring_poll_queues
) {
3314 ioc
->high_iops_queues
= 0;
3318 if (perf_mode
== MPT_PERF_MODE_DEFAULT
) {
3320 pcie_capability_read_word(ioc
->pdev
, PCI_EXP_LNKSTA
, &lnksta
);
3321 speed
= lnksta
& PCI_EXP_LNKSTA_CLS
;
3324 ioc
->high_iops_queues
= 0;
3329 if (!reset_devices
&& ioc
->is_aero_ioc
&&
3330 hba_msix_vector_count
== MPT3SAS_GEN35_MAX_MSIX_QUEUES
&&
3331 num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES
&&
3332 max_msix_vectors
== -1)
3333 ioc
->high_iops_queues
= MPT3SAS_HIGH_IOPS_REPLY_QUEUES
;
3335 ioc
->high_iops_queues
= 0;
3339 * mpt3sas_base_disable_msix - disables msix
3340 * @ioc: per adapter object
3344 mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER
*ioc
)
3346 if (!ioc
->msix_enable
)
3348 pci_free_irq_vectors(ioc
->pdev
);
3349 ioc
->msix_enable
= 0;
3350 kfree(ioc
->io_uring_poll_queues
);
3354 * _base_alloc_irq_vectors - allocate msix vectors
3355 * @ioc: per adapter object
3359 _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER
*ioc
)
3361 int i
, irq_flags
= PCI_IRQ_MSIX
;
3362 struct irq_affinity desc
= { .pre_vectors
= ioc
->high_iops_queues
};
3363 struct irq_affinity
*descp
= &desc
;
3365 * Don't allocate msix vectors for poll_queues.
3366 * msix_vectors is always within a range of FW supported reply queue.
3368 int nr_msix_vectors
= ioc
->iopoll_q_start_index
;
3371 if (ioc
->smp_affinity_enable
)
3372 irq_flags
|= PCI_IRQ_AFFINITY
| PCI_IRQ_ALL_TYPES
;
3376 ioc_info(ioc
, " %d %d %d\n", ioc
->high_iops_queues
,
3377 ioc
->reply_queue_count
, nr_msix_vectors
);
3379 i
= pci_alloc_irq_vectors_affinity(ioc
->pdev
,
3380 ioc
->high_iops_queues
,
3381 nr_msix_vectors
, irq_flags
, descp
);
3387 * _base_enable_msix - enables msix, failback to io_apic
3388 * @ioc: per adapter object
3392 _base_enable_msix(struct MPT3SAS_ADAPTER
*ioc
)
3395 int i
, local_max_msix_vectors
;
3397 int iopoll_q_count
= 0;
3399 ioc
->msix_load_balance
= false;
3401 if (msix_disable
== -1 || msix_disable
== 0)
3407 if (_base_check_enable_msix(ioc
) != 0)
3410 ioc_info(ioc
, "MSI-X vectors supported: %d\n", ioc
->msix_vector_count
);
3411 pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
3412 ioc
->cpu_count
, max_msix_vectors
);
3414 ioc
->reply_queue_count
=
3415 min_t(int, ioc
->cpu_count
, ioc
->msix_vector_count
);
3417 if (!ioc
->rdpq_array_enable
&& max_msix_vectors
== -1)
3418 local_max_msix_vectors
= (reset_devices
) ? 1 : 8;
3420 local_max_msix_vectors
= max_msix_vectors
;
3422 if (local_max_msix_vectors
== 0)
3426 * Enable msix_load_balance only if combined reply queue mode is
3427 * disabled on SAS3 & above generation HBA devices.
3429 if (!ioc
->combined_reply_queue
&&
3430 ioc
->hba_mpi_version_belonged
!= MPI2_VERSION
) {
3432 "combined ReplyQueue is off, Enabling msix load balance\n");
3433 ioc
->msix_load_balance
= true;
3437 * smp affinity setting is not need when msix load balance
3440 if (ioc
->msix_load_balance
)
3441 ioc
->smp_affinity_enable
= 0;
3443 if (!ioc
->smp_affinity_enable
|| ioc
->reply_queue_count
<= 1)
3444 ioc
->shost
->host_tagset
= 0;
3447 * Enable io uring poll queues only if host_tagset is enabled.
3449 if (ioc
->shost
->host_tagset
)
3450 iopoll_q_count
= poll_queues
;
3452 if (iopoll_q_count
) {
3453 ioc
->io_uring_poll_queues
= kcalloc(iopoll_q_count
,
3454 sizeof(struct io_uring_poll_queue
), GFP_KERNEL
);
3455 if (!ioc
->io_uring_poll_queues
)
3459 if (ioc
->is_aero_ioc
)
3460 _base_check_and_enable_high_iops_queues(ioc
,
3461 ioc
->msix_vector_count
);
3464 * Add high iops queues count to reply queue count if high iops queues
3467 ioc
->reply_queue_count
= min_t(int,
3468 ioc
->reply_queue_count
+ ioc
->high_iops_queues
,
3469 ioc
->msix_vector_count
);
3472 * Adjust the reply queue count incase reply queue count
3473 * exceeds the user provided MSIx vectors count.
3475 if (local_max_msix_vectors
> 0)
3476 ioc
->reply_queue_count
= min_t(int, local_max_msix_vectors
,
3477 ioc
->reply_queue_count
);
3479 * Add io uring poll queues count to reply queues count
3480 * if io uring is enabled in driver.
3482 if (iopoll_q_count
) {
3483 if (ioc
->reply_queue_count
< (iopoll_q_count
+ MPT3_MIN_IRQS
))
3485 ioc
->reply_queue_count
= min_t(int,
3486 ioc
->reply_queue_count
+ iopoll_q_count
,
3487 ioc
->msix_vector_count
);
3491 * Starting index of io uring poll queues in reply queue list.
3493 ioc
->iopoll_q_start_index
=
3494 ioc
->reply_queue_count
- iopoll_q_count
;
3496 r
= _base_alloc_irq_vectors(ioc
);
3498 ioc_info(ioc
, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r
);
3503 * Adjust the reply queue count if the allocated
3504 * MSIx vectors is less then the requested number
3507 if (r
< ioc
->iopoll_q_start_index
) {
3508 ioc
->reply_queue_count
= r
+ iopoll_q_count
;
3509 ioc
->iopoll_q_start_index
=
3510 ioc
->reply_queue_count
- iopoll_q_count
;
3513 ioc
->msix_enable
= 1;
3514 for (i
= 0; i
< ioc
->reply_queue_count
; i
++) {
3515 r
= _base_request_irq(ioc
, i
);
3517 mpt3sas_base_free_irq(ioc
);
3518 mpt3sas_base_disable_msix(ioc
);
3523 ioc_info(ioc
, "High IOPs queues : %s\n",
3524 ioc
->high_iops_queues
? "enabled" : "disabled");
3528 /* failback to io_apic interrupt routing */
3530 ioc
->high_iops_queues
= 0;
3531 ioc_info(ioc
, "High IOPs queues : disabled\n");
3532 ioc
->reply_queue_count
= 1;
3533 ioc
->iopoll_q_start_index
= ioc
->reply_queue_count
- 0;
3534 r
= pci_alloc_irq_vectors(ioc
->pdev
, 1, 1, PCI_IRQ_INTX
);
3537 ioc_info(ioc
, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
3540 r
= _base_request_irq(ioc
, 0);
3546 * mpt3sas_base_unmap_resources - free controller resources
3547 * @ioc: per adapter object
3550 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER
*ioc
)
3552 struct pci_dev
*pdev
= ioc
->pdev
;
3554 dexitprintk(ioc
, ioc_info(ioc
, "%s\n", __func__
));
3556 mpt3sas_base_free_irq(ioc
);
3557 mpt3sas_base_disable_msix(ioc
);
3559 kfree(ioc
->replyPostRegisterIndex
);
3560 ioc
->replyPostRegisterIndex
= NULL
;
3563 if (ioc
->chip_phys
) {
3568 if (pci_is_enabled(pdev
)) {
3569 pci_release_selected_regions(ioc
->pdev
, ioc
->bars
);
3570 pci_disable_device(pdev
);
3575 _base_diag_reset(struct MPT3SAS_ADAPTER
*ioc
);
3578 * mpt3sas_base_check_for_fault_and_issue_reset - check if IOC is in fault state
3579 * and if it is in fault state then issue diag reset.
3580 * @ioc: per adapter object
3582 * Return: 0 for success, non-zero for failure.
3585 mpt3sas_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER
*ioc
)
3590 dinitprintk(ioc
, pr_info("%s\n", __func__
));
3591 if (ioc
->pci_error_recovery
)
3593 ioc_state
= mpt3sas_base_get_iocstate(ioc
, 0);
3594 dhsprintk(ioc
, pr_info("%s: ioc_state(0x%08x)\n", __func__
, ioc_state
));
3596 if ((ioc_state
& MPI2_IOC_STATE_MASK
) == MPI2_IOC_STATE_FAULT
) {
3597 mpt3sas_print_fault_code(ioc
, ioc_state
&
3598 MPI2_DOORBELL_DATA_MASK
);
3599 mpt3sas_base_mask_interrupts(ioc
);
3600 rc
= _base_diag_reset(ioc
);
3601 } else if ((ioc_state
& MPI2_IOC_STATE_MASK
) ==
3602 MPI2_IOC_STATE_COREDUMP
) {
3603 mpt3sas_print_coredump_info(ioc
, ioc_state
&
3604 MPI2_DOORBELL_DATA_MASK
);
3605 mpt3sas_base_wait_for_coredump_completion(ioc
, __func__
);
3606 mpt3sas_base_mask_interrupts(ioc
);
3607 rc
= _base_diag_reset(ioc
);
3614 * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
3615 * @ioc: per adapter object
3617 * Return: 0 for success, non-zero for failure.
3620 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER
*ioc
)
3622 struct pci_dev
*pdev
= ioc
->pdev
;
3627 phys_addr_t chip_phys
= 0;
3628 struct adapter_reply_queue
*reply_q
;
3629 int iopoll_q_count
= 0;
3631 dinitprintk(ioc
, ioc_info(ioc
, "%s\n", __func__
));
3633 ioc
->bars
= pci_select_bars(pdev
, IORESOURCE_MEM
);
3634 if (pci_enable_device_mem(pdev
)) {
3635 ioc_warn(ioc
, "pci_enable_device_mem: failed\n");
3641 if (pci_request_selected_regions(pdev
, ioc
->bars
,
3642 ioc
->driver_name
)) {
3643 ioc_warn(ioc
, "pci_request_selected_regions: failed\n");
3649 pci_set_master(pdev
);
3652 if (_base_config_dma_addressing(ioc
, pdev
) != 0) {
3653 ioc_warn(ioc
, "no suitable DMA mask for %s\n", pci_name(pdev
));
3658 for (i
= 0, memap_sz
= 0, pio_sz
= 0; (i
< DEVICE_COUNT_RESOURCE
) &&
3659 (!memap_sz
|| !pio_sz
); i
++) {
3660 if (pci_resource_flags(pdev
, i
) & IORESOURCE_IO
) {
3663 pio_chip
= (u64
)pci_resource_start(pdev
, i
);
3664 pio_sz
= pci_resource_len(pdev
, i
);
3665 } else if (pci_resource_flags(pdev
, i
) & IORESOURCE_MEM
) {
3668 ioc
->chip_phys
= pci_resource_start(pdev
, i
);
3669 chip_phys
= ioc
->chip_phys
;
3670 memap_sz
= pci_resource_len(pdev
, i
);
3671 ioc
->chip
= ioremap(ioc
->chip_phys
, memap_sz
);
3675 if (ioc
->chip
== NULL
) {
3677 "unable to map adapter memory! or resource not found\n");
3682 mpt3sas_base_mask_interrupts(ioc
);
3684 r
= _base_get_ioc_facts(ioc
);
3686 rc
= mpt3sas_base_check_for_fault_and_issue_reset(ioc
);
3687 if (rc
|| (_base_get_ioc_facts(ioc
)))
3691 if (!ioc
->rdpq_array_enable_assigned
) {
3692 ioc
->rdpq_array_enable
= ioc
->rdpq_array_capable
;
3693 ioc
->rdpq_array_enable_assigned
= 1;
3696 r
= _base_enable_msix(ioc
);
3700 iopoll_q_count
= ioc
->reply_queue_count
- ioc
->iopoll_q_start_index
;
3701 for (i
= 0; i
< iopoll_q_count
; i
++) {
3702 atomic_set(&ioc
->io_uring_poll_queues
[i
].busy
, 0);
3703 atomic_set(&ioc
->io_uring_poll_queues
[i
].pause
, 0);
3706 if (!ioc
->is_driver_loading
)
3707 _base_init_irqpolls(ioc
);
3708 /* Use the Combined reply queue feature only for SAS3 C0 & higher
3709 * revision HBAs and also only when reply queue count is greater than 8
3711 if (ioc
->combined_reply_queue
) {
3712 /* Determine the Supplemental Reply Post Host Index Registers
3713 * Addresse. Supplemental Reply Post Host Index Registers
3714 * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
3715 * each register is at offset bytes of
3716 * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
3718 ioc
->replyPostRegisterIndex
= kcalloc(
3719 ioc
->combined_reply_index_count
,
3720 sizeof(resource_size_t
*), GFP_KERNEL
);
3721 if (!ioc
->replyPostRegisterIndex
) {
3723 "allocation for replyPostRegisterIndex failed!\n");
3728 for (i
= 0; i
< ioc
->combined_reply_index_count
; i
++) {
3729 ioc
->replyPostRegisterIndex
[i
] =
3730 (resource_size_t __iomem
*)
3731 ((u8 __force
*)&ioc
->chip
->Doorbell
+
3732 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET
+
3733 (i
* MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET
));
3737 if (ioc
->is_warpdrive
) {
3738 ioc
->reply_post_host_index
[0] = (resource_size_t __iomem
*)
3739 &ioc
->chip
->ReplyPostHostIndex
;
3741 for (i
= 1; i
< ioc
->cpu_msix_table_sz
; i
++)
3742 ioc
->reply_post_host_index
[i
] =
3743 (resource_size_t __iomem
*)
3744 ((u8 __iomem
*)&ioc
->chip
->Doorbell
+ (0x4000 + ((i
- 1)
3748 list_for_each_entry(reply_q
, &ioc
->reply_queue_list
, list
) {
3749 if (reply_q
->msix_index
>= ioc
->iopoll_q_start_index
) {
3750 pr_info("%s: enabled: index: %d\n",
3751 reply_q
->name
, reply_q
->msix_index
);
3755 pr_info("%s: %s enabled: IRQ %d\n",
3757 ioc
->msix_enable
? "PCI-MSI-X" : "IO-APIC",
3758 pci_irq_vector(ioc
->pdev
, reply_q
->msix_index
));
3761 ioc_info(ioc
, "iomem(%pap), mapped(0x%p), size(%d)\n",
3762 &chip_phys
, ioc
->chip
, memap_sz
);
3763 ioc_info(ioc
, "ioport(0x%016llx), size(%d)\n",
3764 (unsigned long long)pio_chip
, pio_sz
);
3766 /* Save PCI configuration state for recovery from PCI AER/EEH errors */
3767 pci_save_state(pdev
);
3771 mpt3sas_base_unmap_resources(ioc
);
3776 * mpt3sas_base_get_msg_frame - obtain request mf pointer
3777 * @ioc: per adapter object
3778 * @smid: system request message index(smid zero is invalid)
3780 * Return: virt pointer to message frame.
3783 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
)
3785 return (void *)(ioc
->request
+ (smid
* ioc
->request_sz
));
3789 * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
3790 * @ioc: per adapter object
3791 * @smid: system request message index
3793 * Return: virt pointer to sense buffer.
3796 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
)
3798 return (void *)(ioc
->sense
+ ((smid
- 1) * SCSI_SENSE_BUFFERSIZE
));
3802 * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
3803 * @ioc: per adapter object
3804 * @smid: system request message index
3806 * Return: phys pointer to the low 32bit address of the sense buffer.
3809 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
)
3811 return cpu_to_le32(ioc
->sense_dma
+ ((smid
- 1) *
3812 SCSI_SENSE_BUFFERSIZE
));
3816 * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
3817 * @ioc: per adapter object
3818 * @smid: system request message index
3820 * Return: virt pointer to a PCIe SGL.
3823 mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
)
3825 return (void *)(ioc
->pcie_sg_lookup
[smid
- 1].pcie_sgl
);
3829 * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
3830 * @ioc: per adapter object
3831 * @smid: system request message index
3833 * Return: phys pointer to the address of the PCIe buffer.
3836 mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
)
3838 return ioc
->pcie_sg_lookup
[smid
- 1].pcie_sgl_dma
;
3842 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
3843 * @ioc: per adapter object
3844 * @phys_addr: lower 32 physical addr of the reply
3846 * Converts 32bit lower physical addr into a virt address.
3849 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER
*ioc
, u32 phys_addr
)
3853 return ioc
->reply
+ (phys_addr
- (u32
)ioc
->reply_dma
);
3857 * _base_get_msix_index - get the msix index
3858 * @ioc: per adapter object
3859 * @scmd: scsi_cmnd object
3861 * Return: msix index of general reply queues,
3862 * i.e. reply queue on which IO request's reply
3863 * should be posted by the HBA firmware.
3866 _base_get_msix_index(struct MPT3SAS_ADAPTER
*ioc
,
3867 struct scsi_cmnd
*scmd
)
3869 /* Enables reply_queue load balancing */
3870 if (ioc
->msix_load_balance
)
3871 return ioc
->reply_queue_count
?
3872 base_mod64(atomic64_add_return(1,
3873 &ioc
->total_io_cnt
), ioc
->reply_queue_count
) : 0;
3875 if (scmd
&& ioc
->shost
->nr_hw_queues
> 1) {
3876 u32 tag
= blk_mq_unique_tag(scsi_cmd_to_rq(scmd
));
3878 return blk_mq_unique_tag_to_hwq(tag
) +
3879 ioc
->high_iops_queues
;
3882 return ioc
->cpu_msix_table
[raw_smp_processor_id()];
3886 * _base_get_high_iops_msix_index - get the msix index of
3888 * @ioc: per adapter object
3889 * @scmd: scsi_cmnd object
3891 * Return: msix index of high iops reply queues.
3892 * i.e. high iops reply queue on which IO request's
3893 * reply should be posted by the HBA firmware.
3896 _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER
*ioc
,
3897 struct scsi_cmnd
*scmd
)
3900 * Round robin the IO interrupts among the high iops
3901 * reply queues in terms of batch count 16 when outstanding
3902 * IOs on the target device is >=8.
3905 if (scsi_device_busy(scmd
->device
) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH
)
3907 atomic64_add_return(1, &ioc
->high_iops_outstanding
) /
3908 MPT3SAS_HIGH_IOPS_BATCH_COUNT
),
3909 MPT3SAS_HIGH_IOPS_REPLY_QUEUES
);
3911 return _base_get_msix_index(ioc
, scmd
);
3915 * mpt3sas_base_get_smid - obtain a free smid from internal queue
3916 * @ioc: per adapter object
3917 * @cb_idx: callback index
3919 * Return: smid (zero is invalid)
3922 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER
*ioc
, u8 cb_idx
)
3924 unsigned long flags
;
3925 struct request_tracker
*request
;
3928 spin_lock_irqsave(&ioc
->scsi_lookup_lock
, flags
);
3929 if (list_empty(&ioc
->internal_free_list
)) {
3930 spin_unlock_irqrestore(&ioc
->scsi_lookup_lock
, flags
);
3931 ioc_err(ioc
, "%s: smid not available\n", __func__
);
3935 request
= list_entry(ioc
->internal_free_list
.next
,
3936 struct request_tracker
, tracker_list
);
3937 request
->cb_idx
= cb_idx
;
3938 smid
= request
->smid
;
3939 list_del(&request
->tracker_list
);
3940 spin_unlock_irqrestore(&ioc
->scsi_lookup_lock
, flags
);
3945 * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
3946 * @ioc: per adapter object
3947 * @cb_idx: callback index
3948 * @scmd: pointer to scsi command object
3950 * Return: smid (zero is invalid)
3953 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER
*ioc
, u8 cb_idx
,
3954 struct scsi_cmnd
*scmd
)
3956 struct scsiio_tracker
*request
= scsi_cmd_priv(scmd
);
3958 u32 tag
, unique_tag
;
3960 unique_tag
= blk_mq_unique_tag(scsi_cmd_to_rq(scmd
));
3961 tag
= blk_mq_unique_tag_to_tag(unique_tag
);
3964 * Store hw queue number corresponding to the tag.
3965 * This hw queue number is used later to determine
3966 * the unique_tag using the logic below. This unique_tag
3967 * is used to retrieve the scmd pointer corresponding
3968 * to tag using scsi_host_find_tag() API.
3971 * unique_tag = ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
3973 ioc
->io_queue_num
[tag
] = blk_mq_unique_tag_to_hwq(unique_tag
);
3976 request
->cb_idx
= cb_idx
;
3977 request
->smid
= smid
;
3978 request
->scmd
= scmd
;
3979 INIT_LIST_HEAD(&request
->chain_list
);
3984 * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
3985 * @ioc: per adapter object
3986 * @cb_idx: callback index
3988 * Return: smid (zero is invalid)
3991 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER
*ioc
, u8 cb_idx
)
3993 unsigned long flags
;
3994 struct request_tracker
*request
;
3997 spin_lock_irqsave(&ioc
->scsi_lookup_lock
, flags
);
3998 if (list_empty(&ioc
->hpr_free_list
)) {
3999 spin_unlock_irqrestore(&ioc
->scsi_lookup_lock
, flags
);
4003 request
= list_entry(ioc
->hpr_free_list
.next
,
4004 struct request_tracker
, tracker_list
);
4005 request
->cb_idx
= cb_idx
;
4006 smid
= request
->smid
;
4007 list_del(&request
->tracker_list
);
4008 spin_unlock_irqrestore(&ioc
->scsi_lookup_lock
, flags
);
4013 _base_recovery_check(struct MPT3SAS_ADAPTER
*ioc
)
4016 * See _wait_for_commands_to_complete() call with regards to this code.
4018 if (ioc
->shost_recovery
&& ioc
->pending_io_count
) {
4019 ioc
->pending_io_count
= scsi_host_busy(ioc
->shost
);
4020 if (ioc
->pending_io_count
== 0)
4021 wake_up(&ioc
->reset_wq
);
4025 void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER
*ioc
,
4026 struct scsiio_tracker
*st
)
4028 if (WARN_ON(st
->smid
== 0))
4033 atomic_set(&ioc
->chain_lookup
[st
->smid
- 1].chain_offset
, 0);
4038 * mpt3sas_base_free_smid - put smid back on free_list
4039 * @ioc: per adapter object
4040 * @smid: system request message index
4043 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
)
4045 unsigned long flags
;
4048 if (smid
< ioc
->hi_priority_smid
) {
4049 struct scsiio_tracker
*st
;
4052 st
= _get_st_from_smid(ioc
, smid
);
4054 _base_recovery_check(ioc
);
4058 /* Clear MPI request frame */
4059 request
= mpt3sas_base_get_msg_frame(ioc
, smid
);
4060 memset(request
, 0, ioc
->request_sz
);
4062 mpt3sas_base_clear_st(ioc
, st
);
4063 _base_recovery_check(ioc
);
4064 ioc
->io_queue_num
[smid
- 1] = 0;
4068 spin_lock_irqsave(&ioc
->scsi_lookup_lock
, flags
);
4069 if (smid
< ioc
->internal_smid
) {
4071 i
= smid
- ioc
->hi_priority_smid
;
4072 ioc
->hpr_lookup
[i
].cb_idx
= 0xFF;
4073 list_add(&ioc
->hpr_lookup
[i
].tracker_list
, &ioc
->hpr_free_list
);
4074 } else if (smid
<= ioc
->hba_queue_depth
) {
4075 /* internal queue */
4076 i
= smid
- ioc
->internal_smid
;
4077 ioc
->internal_lookup
[i
].cb_idx
= 0xFF;
4078 list_add(&ioc
->internal_lookup
[i
].tracker_list
,
4079 &ioc
->internal_free_list
);
4081 spin_unlock_irqrestore(&ioc
->scsi_lookup_lock
, flags
);
4085 * _base_mpi_ep_writeq - 32 bit write to MMIO
4087 * @addr: address in MMIO space
4088 * @writeq_lock: spin lock
4090 * This special handling for MPI EP to take care of 32 bit
4091 * environment where its not quarenteed to send the entire word
4095 _base_mpi_ep_writeq(__u64 b
, volatile void __iomem
*addr
,
4096 spinlock_t
*writeq_lock
)
4098 unsigned long flags
;
4100 spin_lock_irqsave(writeq_lock
, flags
);
4101 __raw_writel((u32
)(b
), addr
);
4102 __raw_writel((u32
)(b
>> 32), (addr
+ 4));
4103 spin_unlock_irqrestore(writeq_lock
, flags
);
4107 * _base_writeq - 64 bit write to MMIO
4109 * @addr: address in MMIO space
4110 * @writeq_lock: spin lock
4112 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
4113 * care of 32 bit environment where its not quarenteed to send the entire word
4116 #if defined(writeq) && defined(CONFIG_64BIT)
4118 _base_writeq(__u64 b
, volatile void __iomem
*addr
, spinlock_t
*writeq_lock
)
4121 __raw_writeq(b
, addr
);
4126 _base_writeq(__u64 b
, volatile void __iomem
*addr
, spinlock_t
*writeq_lock
)
4128 _base_mpi_ep_writeq(b
, addr
, writeq_lock
);
4133 * _base_set_and_get_msix_index - get the msix index and assign to msix_io
4134 * variable of scsi tracker
4135 * @ioc: per adapter object
4136 * @smid: system request message index
4138 * Return: msix index.
4141 _base_set_and_get_msix_index(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
)
4143 struct scsiio_tracker
*st
= NULL
;
4145 if (smid
< ioc
->hi_priority_smid
)
4146 st
= _get_st_from_smid(ioc
, smid
);
4149 return _base_get_msix_index(ioc
, NULL
);
4151 st
->msix_io
= ioc
->get_msix_index_for_smlio(ioc
, st
->scmd
);
4156 * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
4157 * @ioc: per adapter object
4158 * @smid: system request message index
4159 * @handle: device handle
4162 _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER
*ioc
,
4163 u16 smid
, u16 handle
)
4165 Mpi2RequestDescriptorUnion_t descriptor
;
4166 u64
*request
= (u64
*)&descriptor
;
4167 void *mpi_req_iomem
;
4168 __le32
*mfp
= (__le32
*)mpt3sas_base_get_msg_frame(ioc
, smid
);
4170 _clone_sg_entries(ioc
, (void *) mfp
, smid
);
4171 mpi_req_iomem
= (void __force
*)ioc
->chip
+
4172 MPI_FRAME_START_OFFSET
+ (smid
* ioc
->request_sz
);
4173 _base_clone_mpi_to_sys_mem(mpi_req_iomem
, (void *)mfp
,
4175 descriptor
.SCSIIO
.RequestFlags
= MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO
;
4176 descriptor
.SCSIIO
.MSIxIndex
= _base_set_and_get_msix_index(ioc
, smid
);
4177 descriptor
.SCSIIO
.SMID
= cpu_to_le16(smid
);
4178 descriptor
.SCSIIO
.DevHandle
= cpu_to_le16(handle
);
4179 descriptor
.SCSIIO
.LMID
= 0;
4180 _base_mpi_ep_writeq(*request
, &ioc
->chip
->RequestDescriptorPostLow
,
4181 &ioc
->scsi_lookup_lock
);
4185 * _base_put_smid_scsi_io - send SCSI_IO request to firmware
4186 * @ioc: per adapter object
4187 * @smid: system request message index
4188 * @handle: device handle
4191 _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
, u16 handle
)
4193 Mpi2RequestDescriptorUnion_t descriptor
;
4194 u64
*request
= (u64
*)&descriptor
;
4197 descriptor
.SCSIIO
.RequestFlags
= MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO
;
4198 descriptor
.SCSIIO
.MSIxIndex
= _base_set_and_get_msix_index(ioc
, smid
);
4199 descriptor
.SCSIIO
.SMID
= cpu_to_le16(smid
);
4200 descriptor
.SCSIIO
.DevHandle
= cpu_to_le16(handle
);
4201 descriptor
.SCSIIO
.LMID
= 0;
4202 _base_writeq(*request
, &ioc
->chip
->RequestDescriptorPostLow
,
4203 &ioc
->scsi_lookup_lock
);
4207 * _base_put_smid_fast_path - send fast path request to firmware
4208 * @ioc: per adapter object
4209 * @smid: system request message index
4210 * @handle: device handle
4213 _base_put_smid_fast_path(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
,
4216 Mpi2RequestDescriptorUnion_t descriptor
;
4217 u64
*request
= (u64
*)&descriptor
;
4219 descriptor
.SCSIIO
.RequestFlags
=
4220 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO
;
4221 descriptor
.SCSIIO
.MSIxIndex
= _base_set_and_get_msix_index(ioc
, smid
);
4222 descriptor
.SCSIIO
.SMID
= cpu_to_le16(smid
);
4223 descriptor
.SCSIIO
.DevHandle
= cpu_to_le16(handle
);
4224 descriptor
.SCSIIO
.LMID
= 0;
4225 _base_writeq(*request
, &ioc
->chip
->RequestDescriptorPostLow
,
4226 &ioc
->scsi_lookup_lock
);
4230 * _base_put_smid_hi_priority - send Task Management request to firmware
4231 * @ioc: per adapter object
4232 * @smid: system request message index
4233 * @msix_task: msix_task will be same as msix of IO in case of task abort else 0
4236 _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
,
4239 Mpi2RequestDescriptorUnion_t descriptor
;
4240 void *mpi_req_iomem
;
4243 if (ioc
->is_mcpu_endpoint
) {
4244 __le32
*mfp
= (__le32
*)mpt3sas_base_get_msg_frame(ioc
, smid
);
4246 /* TBD 256 is offset within sys register. */
4247 mpi_req_iomem
= (void __force
*)ioc
->chip
4248 + MPI_FRAME_START_OFFSET
4249 + (smid
* ioc
->request_sz
);
4250 _base_clone_mpi_to_sys_mem(mpi_req_iomem
, (void *)mfp
,
4254 request
= (u64
*)&descriptor
;
4256 descriptor
.HighPriority
.RequestFlags
=
4257 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
;
4258 descriptor
.HighPriority
.MSIxIndex
= msix_task
;
4259 descriptor
.HighPriority
.SMID
= cpu_to_le16(smid
);
4260 descriptor
.HighPriority
.LMID
= 0;
4261 descriptor
.HighPriority
.Reserved1
= 0;
4262 if (ioc
->is_mcpu_endpoint
)
4263 _base_mpi_ep_writeq(*request
,
4264 &ioc
->chip
->RequestDescriptorPostLow
,
4265 &ioc
->scsi_lookup_lock
);
4267 _base_writeq(*request
, &ioc
->chip
->RequestDescriptorPostLow
,
4268 &ioc
->scsi_lookup_lock
);
4272 * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
4274 * @ioc: per adapter object
4275 * @smid: system request message index
4278 mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
)
4280 Mpi2RequestDescriptorUnion_t descriptor
;
4281 u64
*request
= (u64
*)&descriptor
;
4283 descriptor
.Default
.RequestFlags
=
4284 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED
;
4285 descriptor
.Default
.MSIxIndex
= _base_set_and_get_msix_index(ioc
, smid
);
4286 descriptor
.Default
.SMID
= cpu_to_le16(smid
);
4287 descriptor
.Default
.LMID
= 0;
4288 descriptor
.Default
.DescriptorTypeDependent
= 0;
4289 _base_writeq(*request
, &ioc
->chip
->RequestDescriptorPostLow
,
4290 &ioc
->scsi_lookup_lock
);
4294 * _base_put_smid_default - Default, primarily used for config pages
4295 * @ioc: per adapter object
4296 * @smid: system request message index
4299 _base_put_smid_default(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
)
4301 Mpi2RequestDescriptorUnion_t descriptor
;
4302 void *mpi_req_iomem
;
4305 if (ioc
->is_mcpu_endpoint
) {
4306 __le32
*mfp
= (__le32
*)mpt3sas_base_get_msg_frame(ioc
, smid
);
4308 _clone_sg_entries(ioc
, (void *) mfp
, smid
);
4309 /* TBD 256 is offset within sys register */
4310 mpi_req_iomem
= (void __force
*)ioc
->chip
+
4311 MPI_FRAME_START_OFFSET
+ (smid
* ioc
->request_sz
);
4312 _base_clone_mpi_to_sys_mem(mpi_req_iomem
, (void *)mfp
,
4315 request
= (u64
*)&descriptor
;
4316 descriptor
.Default
.RequestFlags
= MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE
;
4317 descriptor
.Default
.MSIxIndex
= _base_set_and_get_msix_index(ioc
, smid
);
4318 descriptor
.Default
.SMID
= cpu_to_le16(smid
);
4319 descriptor
.Default
.LMID
= 0;
4320 descriptor
.Default
.DescriptorTypeDependent
= 0;
4321 if (ioc
->is_mcpu_endpoint
)
4322 _base_mpi_ep_writeq(*request
,
4323 &ioc
->chip
->RequestDescriptorPostLow
,
4324 &ioc
->scsi_lookup_lock
);
4326 _base_writeq(*request
, &ioc
->chip
->RequestDescriptorPostLow
,
4327 &ioc
->scsi_lookup_lock
);
4331 * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
4332 * Atomic Request Descriptor
4333 * @ioc: per adapter object
4334 * @smid: system request message index
4335 * @handle: device handle, unused in this function, for function type match
4340 _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
,
4343 Mpi26AtomicRequestDescriptor_t descriptor
;
4344 u32
*request
= (u32
*)&descriptor
;
4346 descriptor
.RequestFlags
= MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO
;
4347 descriptor
.MSIxIndex
= _base_set_and_get_msix_index(ioc
, smid
);
4348 descriptor
.SMID
= cpu_to_le16(smid
);
4350 writel(cpu_to_le32(*request
), &ioc
->chip
->AtomicRequestDescriptorPost
);
4354 * _base_put_smid_fast_path_atomic - send fast path request to firmware
4355 * using Atomic Request Descriptor
4356 * @ioc: per adapter object
4357 * @smid: system request message index
4358 * @handle: device handle, unused in this function, for function type match
4362 _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
,
4365 Mpi26AtomicRequestDescriptor_t descriptor
;
4366 u32
*request
= (u32
*)&descriptor
;
4368 descriptor
.RequestFlags
= MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO
;
4369 descriptor
.MSIxIndex
= _base_set_and_get_msix_index(ioc
, smid
);
4370 descriptor
.SMID
= cpu_to_le16(smid
);
4372 writel(cpu_to_le32(*request
), &ioc
->chip
->AtomicRequestDescriptorPost
);
4376 * _base_put_smid_hi_priority_atomic - send Task Management request to
4377 * firmware using Atomic Request Descriptor
4378 * @ioc: per adapter object
4379 * @smid: system request message index
4380 * @msix_task: msix_task will be same as msix of IO in case of task abort else 0
4385 _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
,
4388 Mpi26AtomicRequestDescriptor_t descriptor
;
4389 u32
*request
= (u32
*)&descriptor
;
4391 descriptor
.RequestFlags
= MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
;
4392 descriptor
.MSIxIndex
= msix_task
;
4393 descriptor
.SMID
= cpu_to_le16(smid
);
4395 writel(cpu_to_le32(*request
), &ioc
->chip
->AtomicRequestDescriptorPost
);
4399 * _base_put_smid_default_atomic - Default, primarily used for config pages
4400 * use Atomic Request Descriptor
4401 * @ioc: per adapter object
4402 * @smid: system request message index
4407 _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
)
4409 Mpi26AtomicRequestDescriptor_t descriptor
;
4410 u32
*request
= (u32
*)&descriptor
;
4412 descriptor
.RequestFlags
= MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE
;
4413 descriptor
.MSIxIndex
= _base_set_and_get_msix_index(ioc
, smid
);
4414 descriptor
.SMID
= cpu_to_le16(smid
);
4416 writel(cpu_to_le32(*request
), &ioc
->chip
->AtomicRequestDescriptorPost
);
4420 * _base_display_OEMs_branding - Display branding string
4421 * @ioc: per adapter object
4424 _base_display_OEMs_branding(struct MPT3SAS_ADAPTER
*ioc
)
4426 if (ioc
->pdev
->subsystem_vendor
!= PCI_VENDOR_ID_INTEL
)
4429 switch (ioc
->pdev
->subsystem_vendor
) {
4430 case PCI_VENDOR_ID_INTEL
:
4431 switch (ioc
->pdev
->device
) {
4432 case MPI2_MFGPAGE_DEVID_SAS2008
:
4433 switch (ioc
->pdev
->subsystem_device
) {
4434 case MPT2SAS_INTEL_RMS2LL080_SSDID
:
4435 ioc_info(ioc
, "%s\n",
4436 MPT2SAS_INTEL_RMS2LL080_BRANDING
);
4438 case MPT2SAS_INTEL_RMS2LL040_SSDID
:
4439 ioc_info(ioc
, "%s\n",
4440 MPT2SAS_INTEL_RMS2LL040_BRANDING
);
4442 case MPT2SAS_INTEL_SSD910_SSDID
:
4443 ioc_info(ioc
, "%s\n",
4444 MPT2SAS_INTEL_SSD910_BRANDING
);
4447 ioc_info(ioc
, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4448 ioc
->pdev
->subsystem_device
);
4452 case MPI2_MFGPAGE_DEVID_SAS2308_2
:
4453 switch (ioc
->pdev
->subsystem_device
) {
4454 case MPT2SAS_INTEL_RS25GB008_SSDID
:
4455 ioc_info(ioc
, "%s\n",
4456 MPT2SAS_INTEL_RS25GB008_BRANDING
);
4458 case MPT2SAS_INTEL_RMS25JB080_SSDID
:
4459 ioc_info(ioc
, "%s\n",
4460 MPT2SAS_INTEL_RMS25JB080_BRANDING
);
4462 case MPT2SAS_INTEL_RMS25JB040_SSDID
:
4463 ioc_info(ioc
, "%s\n",
4464 MPT2SAS_INTEL_RMS25JB040_BRANDING
);
4466 case MPT2SAS_INTEL_RMS25KB080_SSDID
:
4467 ioc_info(ioc
, "%s\n",
4468 MPT2SAS_INTEL_RMS25KB080_BRANDING
);
4470 case MPT2SAS_INTEL_RMS25KB040_SSDID
:
4471 ioc_info(ioc
, "%s\n",
4472 MPT2SAS_INTEL_RMS25KB040_BRANDING
);
4474 case MPT2SAS_INTEL_RMS25LB040_SSDID
:
4475 ioc_info(ioc
, "%s\n",
4476 MPT2SAS_INTEL_RMS25LB040_BRANDING
);
4478 case MPT2SAS_INTEL_RMS25LB080_SSDID
:
4479 ioc_info(ioc
, "%s\n",
4480 MPT2SAS_INTEL_RMS25LB080_BRANDING
);
4483 ioc_info(ioc
, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4484 ioc
->pdev
->subsystem_device
);
4488 case MPI25_MFGPAGE_DEVID_SAS3008
:
4489 switch (ioc
->pdev
->subsystem_device
) {
4490 case MPT3SAS_INTEL_RMS3JC080_SSDID
:
4491 ioc_info(ioc
, "%s\n",
4492 MPT3SAS_INTEL_RMS3JC080_BRANDING
);
4495 case MPT3SAS_INTEL_RS3GC008_SSDID
:
4496 ioc_info(ioc
, "%s\n",
4497 MPT3SAS_INTEL_RS3GC008_BRANDING
);
4499 case MPT3SAS_INTEL_RS3FC044_SSDID
:
4500 ioc_info(ioc
, "%s\n",
4501 MPT3SAS_INTEL_RS3FC044_BRANDING
);
4503 case MPT3SAS_INTEL_RS3UC080_SSDID
:
4504 ioc_info(ioc
, "%s\n",
4505 MPT3SAS_INTEL_RS3UC080_BRANDING
);
4508 ioc_info(ioc
, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4509 ioc
->pdev
->subsystem_device
);
4514 ioc_info(ioc
, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4515 ioc
->pdev
->subsystem_device
);
4519 case PCI_VENDOR_ID_DELL
:
4520 switch (ioc
->pdev
->device
) {
4521 case MPI2_MFGPAGE_DEVID_SAS2008
:
4522 switch (ioc
->pdev
->subsystem_device
) {
4523 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID
:
4524 ioc_info(ioc
, "%s\n",
4525 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING
);
4527 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID
:
4528 ioc_info(ioc
, "%s\n",
4529 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING
);
4531 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID
:
4532 ioc_info(ioc
, "%s\n",
4533 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING
);
4535 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID
:
4536 ioc_info(ioc
, "%s\n",
4537 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING
);
4539 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID
:
4540 ioc_info(ioc
, "%s\n",
4541 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING
);
4543 case MPT2SAS_DELL_PERC_H200_SSDID
:
4544 ioc_info(ioc
, "%s\n",
4545 MPT2SAS_DELL_PERC_H200_BRANDING
);
4547 case MPT2SAS_DELL_6GBPS_SAS_SSDID
:
4548 ioc_info(ioc
, "%s\n",
4549 MPT2SAS_DELL_6GBPS_SAS_BRANDING
);
4552 ioc_info(ioc
, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
4553 ioc
->pdev
->subsystem_device
);
4557 case MPI25_MFGPAGE_DEVID_SAS3008
:
4558 switch (ioc
->pdev
->subsystem_device
) {
4559 case MPT3SAS_DELL_12G_HBA_SSDID
:
4560 ioc_info(ioc
, "%s\n",
4561 MPT3SAS_DELL_12G_HBA_BRANDING
);
4564 ioc_info(ioc
, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
4565 ioc
->pdev
->subsystem_device
);
4570 ioc_info(ioc
, "Dell HBA: Subsystem ID: 0x%X\n",
4571 ioc
->pdev
->subsystem_device
);
4575 case PCI_VENDOR_ID_CISCO
:
4576 switch (ioc
->pdev
->device
) {
4577 case MPI25_MFGPAGE_DEVID_SAS3008
:
4578 switch (ioc
->pdev
->subsystem_device
) {
4579 case MPT3SAS_CISCO_12G_8E_HBA_SSDID
:
4580 ioc_info(ioc
, "%s\n",
4581 MPT3SAS_CISCO_12G_8E_HBA_BRANDING
);
4583 case MPT3SAS_CISCO_12G_8I_HBA_SSDID
:
4584 ioc_info(ioc
, "%s\n",
4585 MPT3SAS_CISCO_12G_8I_HBA_BRANDING
);
4587 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID
:
4588 ioc_info(ioc
, "%s\n",
4589 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING
);
4592 ioc_info(ioc
, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4593 ioc
->pdev
->subsystem_device
);
4597 case MPI25_MFGPAGE_DEVID_SAS3108_1
:
4598 switch (ioc
->pdev
->subsystem_device
) {
4599 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID
:
4600 ioc_info(ioc
, "%s\n",
4601 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING
);
4603 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID
:
4604 ioc_info(ioc
, "%s\n",
4605 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING
);
4608 ioc_info(ioc
, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4609 ioc
->pdev
->subsystem_device
);
4614 ioc_info(ioc
, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
4615 ioc
->pdev
->subsystem_device
);
4619 case MPT2SAS_HP_3PAR_SSVID
:
4620 switch (ioc
->pdev
->device
) {
4621 case MPI2_MFGPAGE_DEVID_SAS2004
:
4622 switch (ioc
->pdev
->subsystem_device
) {
4623 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID
:
4624 ioc_info(ioc
, "%s\n",
4625 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING
);
4628 ioc_info(ioc
, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4629 ioc
->pdev
->subsystem_device
);
4633 case MPI2_MFGPAGE_DEVID_SAS2308_2
:
4634 switch (ioc
->pdev
->subsystem_device
) {
4635 case MPT2SAS_HP_2_4_INTERNAL_SSDID
:
4636 ioc_info(ioc
, "%s\n",
4637 MPT2SAS_HP_2_4_INTERNAL_BRANDING
);
4639 case MPT2SAS_HP_2_4_EXTERNAL_SSDID
:
4640 ioc_info(ioc
, "%s\n",
4641 MPT2SAS_HP_2_4_EXTERNAL_BRANDING
);
4643 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID
:
4644 ioc_info(ioc
, "%s\n",
4645 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING
);
4647 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID
:
4648 ioc_info(ioc
, "%s\n",
4649 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING
);
4652 ioc_info(ioc
, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4653 ioc
->pdev
->subsystem_device
);
4658 ioc_info(ioc
, "HP SAS HBA: Subsystem ID: 0x%X\n",
4659 ioc
->pdev
->subsystem_device
);
4669 * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
4670 * version from FW Image Header.
4671 * @ioc: per adapter object
4673 * Return: 0 for success, non-zero for failure.
4676 _base_display_fwpkg_version(struct MPT3SAS_ADAPTER
*ioc
)
4678 Mpi2FWImageHeader_t
*fw_img_hdr
;
4679 Mpi26ComponentImageHeader_t
*cmp_img_hdr
;
4680 Mpi25FWUploadRequest_t
*mpi_request
;
4681 Mpi2FWUploadReply_t mpi_reply
;
4682 int r
= 0, issue_diag_reset
= 0;
4683 u32 package_version
= 0;
4684 void *fwpkg_data
= NULL
;
4685 dma_addr_t fwpkg_data_dma
;
4686 u16 smid
, ioc_status
;
4689 dinitprintk(ioc
, ioc_info(ioc
, "%s\n", __func__
));
4691 if (ioc
->base_cmds
.status
& MPT3_CMD_PENDING
) {
4692 ioc_err(ioc
, "%s: internal command already in use\n", __func__
);
4696 data_length
= sizeof(Mpi2FWImageHeader_t
);
4697 fwpkg_data
= dma_alloc_coherent(&ioc
->pdev
->dev
, data_length
,
4698 &fwpkg_data_dma
, GFP_KERNEL
);
4701 "Memory allocation for fwpkg data failed at %s:%d/%s()!\n",
4702 __FILE__
, __LINE__
, __func__
);
4706 smid
= mpt3sas_base_get_smid(ioc
, ioc
->base_cb_idx
);
4708 ioc_err(ioc
, "%s: failed obtaining a smid\n", __func__
);
4713 ioc
->base_cmds
.status
= MPT3_CMD_PENDING
;
4714 mpi_request
= mpt3sas_base_get_msg_frame(ioc
, smid
);
4715 ioc
->base_cmds
.smid
= smid
;
4716 memset(mpi_request
, 0, sizeof(Mpi25FWUploadRequest_t
));
4717 mpi_request
->Function
= MPI2_FUNCTION_FW_UPLOAD
;
4718 mpi_request
->ImageType
= MPI2_FW_UPLOAD_ITYPE_FW_FLASH
;
4719 mpi_request
->ImageSize
= cpu_to_le32(data_length
);
4720 ioc
->build_sg(ioc
, &mpi_request
->SGL
, 0, 0, fwpkg_data_dma
,
4722 init_completion(&ioc
->base_cmds
.done
);
4723 ioc
->put_smid_default(ioc
, smid
);
4724 /* Wait for 15 seconds */
4725 wait_for_completion_timeout(&ioc
->base_cmds
.done
,
4726 FW_IMG_HDR_READ_TIMEOUT
*HZ
);
4727 ioc_info(ioc
, "%s: complete\n", __func__
);
4728 if (!(ioc
->base_cmds
.status
& MPT3_CMD_COMPLETE
)) {
4729 ioc_err(ioc
, "%s: timeout\n", __func__
);
4730 _debug_dump_mf(mpi_request
,
4731 sizeof(Mpi25FWUploadRequest_t
)/4);
4732 issue_diag_reset
= 1;
4734 memset(&mpi_reply
, 0, sizeof(Mpi2FWUploadReply_t
));
4735 if (ioc
->base_cmds
.status
& MPT3_CMD_REPLY_VALID
) {
4736 memcpy(&mpi_reply
, ioc
->base_cmds
.reply
,
4737 sizeof(Mpi2FWUploadReply_t
));
4738 ioc_status
= le16_to_cpu(mpi_reply
.IOCStatus
) &
4739 MPI2_IOCSTATUS_MASK
;
4740 if (ioc_status
== MPI2_IOCSTATUS_SUCCESS
) {
4741 fw_img_hdr
= (Mpi2FWImageHeader_t
*)fwpkg_data
;
4742 if (le32_to_cpu(fw_img_hdr
->Signature
) ==
4743 MPI26_IMAGE_HEADER_SIGNATURE0_MPI26
) {
4745 (Mpi26ComponentImageHeader_t
*)
4749 cmp_img_hdr
->ApplicationSpecific
);
4753 fw_img_hdr
->PackageVersion
.Word
);
4754 if (package_version
)
4756 "FW Package Ver(%02d.%02d.%02d.%02d)\n",
4757 ((package_version
) & 0xFF000000) >> 24,
4758 ((package_version
) & 0x00FF0000) >> 16,
4759 ((package_version
) & 0x0000FF00) >> 8,
4760 (package_version
) & 0x000000FF);
4762 _debug_dump_mf(&mpi_reply
,
4763 sizeof(Mpi2FWUploadReply_t
)/4);
4767 ioc
->base_cmds
.status
= MPT3_CMD_NOT_USED
;
4770 dma_free_coherent(&ioc
->pdev
->dev
, data_length
, fwpkg_data
,
4772 if (issue_diag_reset
) {
4773 if (ioc
->drv_internal_flags
& MPT_DRV_INTERNAL_FIRST_PE_ISSUED
)
4775 if (mpt3sas_base_check_for_fault_and_issue_reset(ioc
))
4783 * _base_display_ioc_capabilities - Display IOC's capabilities.
4784 * @ioc: per adapter object
4787 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER
*ioc
)
4790 char desc
[17] = {0};
4791 u32 iounit_pg1_flags
;
4793 memtostr(desc
, ioc
->manu_pg0
.ChipName
);
4794 ioc_info(ioc
, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x)\n",
4796 (ioc
->facts
.FWVersion
.Word
& 0xFF000000) >> 24,
4797 (ioc
->facts
.FWVersion
.Word
& 0x00FF0000) >> 16,
4798 (ioc
->facts
.FWVersion
.Word
& 0x0000FF00) >> 8,
4799 ioc
->facts
.FWVersion
.Word
& 0x000000FF,
4800 ioc
->pdev
->revision
);
4802 _base_display_OEMs_branding(ioc
);
4804 if (ioc
->facts
.ProtocolFlags
& MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES
) {
4805 pr_info("%sNVMe", i
? "," : "");
4809 ioc_info(ioc
, "Protocol=(");
4811 if (ioc
->facts
.ProtocolFlags
& MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR
) {
4812 pr_cont("Initiator");
4816 if (ioc
->facts
.ProtocolFlags
& MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET
) {
4817 pr_cont("%sTarget", i
? "," : "");
4822 pr_cont("), Capabilities=(");
4824 if (!ioc
->hide_ir_msg
) {
4825 if (ioc
->facts
.IOCCapabilities
&
4826 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID
) {
4832 if (ioc
->facts
.IOCCapabilities
& MPI2_IOCFACTS_CAPABILITY_TLR
) {
4833 pr_cont("%sTLR", i
? "," : "");
4837 if (ioc
->facts
.IOCCapabilities
& MPI2_IOCFACTS_CAPABILITY_MULTICAST
) {
4838 pr_cont("%sMulticast", i
? "," : "");
4842 if (ioc
->facts
.IOCCapabilities
&
4843 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET
) {
4844 pr_cont("%sBIDI Target", i
? "," : "");
4848 if (ioc
->facts
.IOCCapabilities
& MPI2_IOCFACTS_CAPABILITY_EEDP
) {
4849 pr_cont("%sEEDP", i
? "," : "");
4853 if (ioc
->facts
.IOCCapabilities
&
4854 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER
) {
4855 pr_cont("%sSnapshot Buffer", i
? "," : "");
4859 if (ioc
->facts
.IOCCapabilities
&
4860 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER
) {
4861 pr_cont("%sDiag Trace Buffer", i
? "," : "");
4865 if (ioc
->facts
.IOCCapabilities
&
4866 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER
) {
4867 pr_cont("%sDiag Extended Buffer", i
? "," : "");
4871 if (ioc
->facts
.IOCCapabilities
&
4872 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING
) {
4873 pr_cont("%sTask Set Full", i
? "," : "");
4877 iounit_pg1_flags
= le32_to_cpu(ioc
->iounit_pg1
.Flags
);
4878 if (!(iounit_pg1_flags
& MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE
)) {
4879 pr_cont("%sNCQ", i
? "," : "");
4887 * mpt3sas_base_update_missing_delay - change the missing delay timers
4888 * @ioc: per adapter object
4889 * @device_missing_delay: amount of time till device is reported missing
4890 * @io_missing_delay: interval IO is returned when there is a missing device
4892 * Passed on the command line, this function will modify the device missing
4893 * delay, as well as the io missing delay. This should be called at driver
4897 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER
*ioc
,
4898 u16 device_missing_delay
, u8 io_missing_delay
)
4900 u16 dmd
, dmd_new
, dmd_orignal
;
4901 u8 io_missing_delay_original
;
4903 Mpi2SasIOUnitPage1_t
*sas_iounit_pg1
= NULL
;
4904 Mpi2ConfigReply_t mpi_reply
;
4908 mpt3sas_config_get_number_hba_phys(ioc
, &num_phys
);
4912 sz
= struct_size(sas_iounit_pg1
, PhyData
, num_phys
);
4913 sas_iounit_pg1
= kzalloc(sz
, GFP_KERNEL
);
4914 if (!sas_iounit_pg1
) {
4915 ioc_err(ioc
, "failure at %s:%d/%s()!\n",
4916 __FILE__
, __LINE__
, __func__
);
4919 if ((mpt3sas_config_get_sas_iounit_pg1(ioc
, &mpi_reply
,
4920 sas_iounit_pg1
, sz
))) {
4921 ioc_err(ioc
, "failure at %s:%d/%s()!\n",
4922 __FILE__
, __LINE__
, __func__
);
4925 ioc_status
= le16_to_cpu(mpi_reply
.IOCStatus
) &
4926 MPI2_IOCSTATUS_MASK
;
4927 if (ioc_status
!= MPI2_IOCSTATUS_SUCCESS
) {
4928 ioc_err(ioc
, "failure at %s:%d/%s()!\n",
4929 __FILE__
, __LINE__
, __func__
);
4933 /* device missing delay */
4934 dmd
= sas_iounit_pg1
->ReportDeviceMissingDelay
;
4935 if (dmd
& MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16
)
4936 dmd
= (dmd
& MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK
) * 16;
4938 dmd
= dmd
& MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK
;
4940 if (device_missing_delay
> 0x7F) {
4941 dmd
= (device_missing_delay
> 0x7F0) ? 0x7F0 :
4942 device_missing_delay
;
4944 dmd
|= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16
;
4946 dmd
= device_missing_delay
;
4947 sas_iounit_pg1
->ReportDeviceMissingDelay
= dmd
;
4949 /* io missing delay */
4950 io_missing_delay_original
= sas_iounit_pg1
->IODeviceMissingDelay
;
4951 sas_iounit_pg1
->IODeviceMissingDelay
= io_missing_delay
;
4953 if (!mpt3sas_config_set_sas_iounit_pg1(ioc
, &mpi_reply
, sas_iounit_pg1
,
4955 if (dmd
& MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16
)
4957 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK
) * 16;
4960 dmd
& MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK
;
4961 ioc_info(ioc
, "device_missing_delay: old(%d), new(%d)\n",
4962 dmd_orignal
, dmd_new
);
4963 ioc_info(ioc
, "ioc_missing_delay: old(%d), new(%d)\n",
4964 io_missing_delay_original
,
4966 ioc
->device_missing_delay
= dmd_new
;
4967 ioc
->io_missing_delay
= io_missing_delay
;
4971 kfree(sas_iounit_pg1
);
4975 * _base_update_ioc_page1_inlinewith_perf_mode - Update IOC Page1 fields
4976 * according to performance mode.
4977 * @ioc : per adapter object
4979 * Return: zero on success; otherwise return EAGAIN error code asking the
4983 _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER
*ioc
)
4985 Mpi2IOCPage1_t ioc_pg1
;
4986 Mpi2ConfigReply_t mpi_reply
;
4989 rc
= mpt3sas_config_get_ioc_pg1(ioc
, &mpi_reply
, &ioc
->ioc_pg1_copy
);
4992 memcpy(&ioc_pg1
, &ioc
->ioc_pg1_copy
, sizeof(Mpi2IOCPage1_t
));
4994 switch (perf_mode
) {
4995 case MPT_PERF_MODE_DEFAULT
:
4996 case MPT_PERF_MODE_BALANCED
:
4997 if (ioc
->high_iops_queues
) {
4999 "Enable interrupt coalescing only for first\t"
5000 "%d reply queues\n",
5001 MPT3SAS_HIGH_IOPS_REPLY_QUEUES
);
5003 * If 31st bit is zero then interrupt coalescing is
5004 * enabled for all reply descriptor post queues.
5005 * If 31st bit is set to one then user can
5006 * enable/disable interrupt coalescing on per reply
5007 * descriptor post queue group(8) basis. So to enable
5008 * interrupt coalescing only on first reply descriptor
5009 * post queue group 31st bit and zero th bit is enabled.
5011 ioc_pg1
.ProductSpecific
= cpu_to_le32(0x80000000 |
5012 ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES
/8) - 1));
5013 rc
= mpt3sas_config_set_ioc_pg1(ioc
, &mpi_reply
, &ioc_pg1
);
5016 ioc_info(ioc
, "performance mode: balanced\n");
5020 case MPT_PERF_MODE_LATENCY
:
5022 * Enable interrupt coalescing on all reply queues
5023 * with timeout value 0xA
5025 ioc_pg1
.CoalescingTimeout
= cpu_to_le32(0xa);
5026 ioc_pg1
.Flags
|= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING
);
5027 ioc_pg1
.ProductSpecific
= 0;
5028 rc
= mpt3sas_config_set_ioc_pg1(ioc
, &mpi_reply
, &ioc_pg1
);
5031 ioc_info(ioc
, "performance mode: latency\n");
5033 case MPT_PERF_MODE_IOPS
:
5035 * Enable interrupt coalescing on all reply queues.
5038 "performance mode: iops with coalescing timeout: 0x%x\n",
5039 le32_to_cpu(ioc_pg1
.CoalescingTimeout
));
5040 ioc_pg1
.Flags
|= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING
);
5041 ioc_pg1
.ProductSpecific
= 0;
5042 rc
= mpt3sas_config_set_ioc_pg1(ioc
, &mpi_reply
, &ioc_pg1
);
5051 * _base_get_event_diag_triggers - get event diag trigger values from
5053 * @ioc : per adapter object
5058 _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER
*ioc
)
5060 Mpi26DriverTriggerPage2_t trigger_pg2
;
5061 struct SL_WH_EVENT_TRIGGER_T
*event_tg
;
5062 MPI26_DRIVER_MPI_EVENT_TRIGGER_ENTRY
*mpi_event_tg
;
5063 Mpi2ConfigReply_t mpi_reply
;
5068 r
= mpt3sas_config_get_driver_trigger_pg2(ioc
, &mpi_reply
,
5073 ioc_status
= le16_to_cpu(mpi_reply
.IOCStatus
) &
5074 MPI2_IOCSTATUS_MASK
;
5075 if (ioc_status
!= MPI2_IOCSTATUS_SUCCESS
) {
5078 "%s: Failed to get trigger pg2, ioc_status(0x%04x)\n",
5079 __func__
, ioc_status
));
5083 if (le16_to_cpu(trigger_pg2
.NumMPIEventTrigger
)) {
5084 count
= le16_to_cpu(trigger_pg2
.NumMPIEventTrigger
);
5085 count
= min_t(u16
, NUM_VALID_ENTRIES
, count
);
5086 ioc
->diag_trigger_event
.ValidEntries
= count
;
5088 event_tg
= &ioc
->diag_trigger_event
.EventTriggerEntry
[0];
5089 mpi_event_tg
= &trigger_pg2
.MPIEventTriggers
[0];
5090 for (i
= 0; i
< count
; i
++) {
5091 event_tg
->EventValue
= le16_to_cpu(
5092 mpi_event_tg
->MPIEventCode
);
5093 event_tg
->LogEntryQualifier
= le16_to_cpu(
5094 mpi_event_tg
->MPIEventCodeSpecific
);
5103 * _base_get_scsi_diag_triggers - get scsi diag trigger values from
5105 * @ioc : per adapter object
5107 * Return: 0 on success; otherwise return failure status.
5110 _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER
*ioc
)
5112 Mpi26DriverTriggerPage3_t trigger_pg3
;
5113 struct SL_WH_SCSI_TRIGGER_T
*scsi_tg
;
5114 MPI26_DRIVER_SCSI_SENSE_TRIGGER_ENTRY
*mpi_scsi_tg
;
5115 Mpi2ConfigReply_t mpi_reply
;
5120 r
= mpt3sas_config_get_driver_trigger_pg3(ioc
, &mpi_reply
,
5125 ioc_status
= le16_to_cpu(mpi_reply
.IOCStatus
) &
5126 MPI2_IOCSTATUS_MASK
;
5127 if (ioc_status
!= MPI2_IOCSTATUS_SUCCESS
) {
5130 "%s: Failed to get trigger pg3, ioc_status(0x%04x)\n",
5131 __func__
, ioc_status
));
5135 if (le16_to_cpu(trigger_pg3
.NumSCSISenseTrigger
)) {
5136 count
= le16_to_cpu(trigger_pg3
.NumSCSISenseTrigger
);
5137 count
= min_t(u16
, NUM_VALID_ENTRIES
, count
);
5138 ioc
->diag_trigger_scsi
.ValidEntries
= count
;
5140 scsi_tg
= &ioc
->diag_trigger_scsi
.SCSITriggerEntry
[0];
5141 mpi_scsi_tg
= &trigger_pg3
.SCSISenseTriggers
[0];
5142 for (i
= 0; i
< count
; i
++) {
5143 scsi_tg
->ASCQ
= mpi_scsi_tg
->ASCQ
;
5144 scsi_tg
->ASC
= mpi_scsi_tg
->ASC
;
5145 scsi_tg
->SenseKey
= mpi_scsi_tg
->SenseKey
;
5155 * _base_get_mpi_diag_triggers - get mpi diag trigger values from
5157 * @ioc : per adapter object
5159 * Return: 0 on success; otherwise return failure status.
5162 _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER
*ioc
)
5164 Mpi26DriverTriggerPage4_t trigger_pg4
;
5165 struct SL_WH_MPI_TRIGGER_T
*status_tg
;
5166 MPI26_DRIVER_IOCSTATUS_LOGINFO_TRIGGER_ENTRY
*mpi_status_tg
;
5167 Mpi2ConfigReply_t mpi_reply
;
5172 r
= mpt3sas_config_get_driver_trigger_pg4(ioc
, &mpi_reply
,
5177 ioc_status
= le16_to_cpu(mpi_reply
.IOCStatus
) &
5178 MPI2_IOCSTATUS_MASK
;
5179 if (ioc_status
!= MPI2_IOCSTATUS_SUCCESS
) {
5182 "%s: Failed to get trigger pg4, ioc_status(0x%04x)\n",
5183 __func__
, ioc_status
));
5187 if (le16_to_cpu(trigger_pg4
.NumIOCStatusLogInfoTrigger
)) {
5188 count
= le16_to_cpu(trigger_pg4
.NumIOCStatusLogInfoTrigger
);
5189 count
= min_t(u16
, NUM_VALID_ENTRIES
, count
);
5190 ioc
->diag_trigger_mpi
.ValidEntries
= count
;
5192 status_tg
= &ioc
->diag_trigger_mpi
.MPITriggerEntry
[0];
5193 mpi_status_tg
= &trigger_pg4
.IOCStatusLoginfoTriggers
[0];
5195 for (i
= 0; i
< count
; i
++) {
5196 status_tg
->IOCStatus
= le16_to_cpu(
5197 mpi_status_tg
->IOCStatus
);
5198 status_tg
->IocLogInfo
= le32_to_cpu(
5199 mpi_status_tg
->LogInfo
);
5209 * _base_get_master_diag_triggers - get master diag trigger values from
5211 * @ioc : per adapter object
5216 _base_get_master_diag_triggers(struct MPT3SAS_ADAPTER
*ioc
)
5218 Mpi26DriverTriggerPage1_t trigger_pg1
;
5219 Mpi2ConfigReply_t mpi_reply
;
5223 r
= mpt3sas_config_get_driver_trigger_pg1(ioc
, &mpi_reply
,
5228 ioc_status
= le16_to_cpu(mpi_reply
.IOCStatus
) &
5229 MPI2_IOCSTATUS_MASK
;
5230 if (ioc_status
!= MPI2_IOCSTATUS_SUCCESS
) {
5233 "%s: Failed to get trigger pg1, ioc_status(0x%04x)\n",
5234 __func__
, ioc_status
));
5238 if (le16_to_cpu(trigger_pg1
.NumMasterTrigger
))
5239 ioc
->diag_trigger_master
.MasterData
|=
5241 trigger_pg1
.MasterTriggers
[0].MasterTriggerFlags
);
5246 * _base_check_for_trigger_pages_support - checks whether HBA FW supports
5247 * driver trigger pages or not
5248 * @ioc : per adapter object
5249 * @trigger_flags : address where trigger page0's TriggerFlags value is copied
5251 * Return: trigger flags mask if HBA FW supports driver trigger pages;
5252 * otherwise returns %-EFAULT if driver trigger pages are not supported by FW or
5253 * return EAGAIN if diag reset occurred due to FW fault and asking the
5254 * caller to retry the command.
5258 _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER
*ioc
, u32
*trigger_flags
)
5260 Mpi26DriverTriggerPage0_t trigger_pg0
;
5262 Mpi2ConfigReply_t mpi_reply
;
5265 r
= mpt3sas_config_get_driver_trigger_pg0(ioc
, &mpi_reply
,
5270 ioc_status
= le16_to_cpu(mpi_reply
.IOCStatus
) &
5271 MPI2_IOCSTATUS_MASK
;
5272 if (ioc_status
!= MPI2_IOCSTATUS_SUCCESS
)
5275 *trigger_flags
= le16_to_cpu(trigger_pg0
.TriggerFlags
);
5280 * _base_get_diag_triggers - Retrieve diag trigger values from
5282 * @ioc : per adapter object
5284 * Return: zero on success; otherwise return EAGAIN error codes
5285 * asking the caller to retry.
5288 _base_get_diag_triggers(struct MPT3SAS_ADAPTER
*ioc
)
5294 * Default setting of master trigger.
5296 ioc
->diag_trigger_master
.MasterData
=
5297 (MASTER_TRIGGER_FW_FAULT
+ MASTER_TRIGGER_ADAPTER_RESET
);
5299 r
= _base_check_for_trigger_pages_support(ioc
, &trigger_flags
);
5304 * Don't go for error handling when FW doesn't support
5305 * driver trigger pages.
5310 ioc
->supports_trigger_pages
= 1;
5313 * Retrieve master diag trigger values from driver trigger pg1
5314 * if master trigger bit enabled in TriggerFlags.
5316 if ((u16
)trigger_flags
&
5317 MPI26_DRIVER_TRIGGER0_FLAG_MASTER_TRIGGER_VALID
) {
5318 r
= _base_get_master_diag_triggers(ioc
);
5324 * Retrieve event diag trigger values from driver trigger pg2
5325 * if event trigger bit enabled in TriggerFlags.
5327 if ((u16
)trigger_flags
&
5328 MPI26_DRIVER_TRIGGER0_FLAG_MPI_EVENT_TRIGGER_VALID
) {
5329 r
= _base_get_event_diag_triggers(ioc
);
5335 * Retrieve scsi diag trigger values from driver trigger pg3
5336 * if scsi trigger bit enabled in TriggerFlags.
5338 if ((u16
)trigger_flags
&
5339 MPI26_DRIVER_TRIGGER0_FLAG_SCSI_SENSE_TRIGGER_VALID
) {
5340 r
= _base_get_scsi_diag_triggers(ioc
);
5345 * Retrieve mpi error diag trigger values from driver trigger pg4
5346 * if loginfo trigger bit enabled in TriggerFlags.
5348 if ((u16
)trigger_flags
&
5349 MPI26_DRIVER_TRIGGER0_FLAG_LOGINFO_TRIGGER_VALID
) {
5350 r
= _base_get_mpi_diag_triggers(ioc
);
5358 * _base_update_diag_trigger_pages - Update the driver trigger pages after
5359 * online FW update, in case updated FW supports driver
5361 * @ioc : per adapter object
5366 _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER
*ioc
)
5369 if (ioc
->diag_trigger_master
.MasterData
)
5370 mpt3sas_config_update_driver_trigger_pg1(ioc
,
5371 &ioc
->diag_trigger_master
, 1);
5373 if (ioc
->diag_trigger_event
.ValidEntries
)
5374 mpt3sas_config_update_driver_trigger_pg2(ioc
,
5375 &ioc
->diag_trigger_event
, 1);
5377 if (ioc
->diag_trigger_scsi
.ValidEntries
)
5378 mpt3sas_config_update_driver_trigger_pg3(ioc
,
5379 &ioc
->diag_trigger_scsi
, 1);
5381 if (ioc
->diag_trigger_mpi
.ValidEntries
)
5382 mpt3sas_config_update_driver_trigger_pg4(ioc
,
5383 &ioc
->diag_trigger_mpi
, 1);
5387 * _base_assign_fw_reported_qd - Get FW reported QD for SAS/SATA devices.
5388 * - On failure set default QD values.
5389 * @ioc : per adapter object
5391 * Returns 0 for success, non-zero for failure.
5394 static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER
*ioc
)
5396 Mpi2ConfigReply_t mpi_reply
;
5397 Mpi2SasIOUnitPage1_t sas_iounit_pg1
;
5398 Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1
;
5402 ioc
->max_wideport_qd
= MPT3SAS_SAS_QUEUE_DEPTH
;
5403 ioc
->max_narrowport_qd
= MPT3SAS_SAS_QUEUE_DEPTH
;
5404 ioc
->max_sata_qd
= MPT3SAS_SATA_QUEUE_DEPTH
;
5405 ioc
->max_nvme_qd
= MPT3SAS_NVME_QUEUE_DEPTH
;
5406 if (!ioc
->is_gen35_ioc
)
5408 /* sas iounit page 1 */
5409 rc
= mpt3sas_config_get_sas_iounit_pg1(ioc
, &mpi_reply
,
5410 &sas_iounit_pg1
, sizeof(Mpi2SasIOUnitPage1_t
));
5412 pr_err("%s: failure at %s:%d/%s()!\n",
5413 ioc
->name
, __FILE__
, __LINE__
, __func__
);
5417 depth
= le16_to_cpu(sas_iounit_pg1
.SASWideMaxQueueDepth
);
5418 ioc
->max_wideport_qd
= (depth
? depth
: MPT3SAS_SAS_QUEUE_DEPTH
);
5420 depth
= le16_to_cpu(sas_iounit_pg1
.SASNarrowMaxQueueDepth
);
5421 ioc
->max_narrowport_qd
= (depth
? depth
: MPT3SAS_SAS_QUEUE_DEPTH
);
5423 depth
= sas_iounit_pg1
.SATAMaxQDepth
;
5424 ioc
->max_sata_qd
= (depth
? depth
: MPT3SAS_SATA_QUEUE_DEPTH
);
5426 /* pcie iounit page 1 */
5427 rc
= mpt3sas_config_get_pcie_iounit_pg1(ioc
, &mpi_reply
,
5428 &pcie_iounit_pg1
, sizeof(Mpi26PCIeIOUnitPage1_t
));
5430 pr_err("%s: failure at %s:%d/%s()!\n",
5431 ioc
->name
, __FILE__
, __LINE__
, __func__
);
5434 ioc
->max_nvme_qd
= (le16_to_cpu(pcie_iounit_pg1
.NVMeMaxQueueDepth
)) ?
5435 (le16_to_cpu(pcie_iounit_pg1
.NVMeMaxQueueDepth
)) :
5436 MPT3SAS_NVME_QUEUE_DEPTH
;
5438 dinitprintk(ioc
, pr_err(
5439 "MaxWidePortQD: 0x%x MaxNarrowPortQD: 0x%x MaxSataQD: 0x%x MaxNvmeQD: 0x%x\n",
5440 ioc
->max_wideport_qd
, ioc
->max_narrowport_qd
,
5441 ioc
->max_sata_qd
, ioc
->max_nvme_qd
));
5446 * mpt3sas_atto_validate_nvram - validate the ATTO nvram read from mfg pg1
5448 * @ioc : per adapter object
5449 * @n : ptr to the ATTO nvram structure
5450 * Return: 0 for success, non-zero for failure.
5453 mpt3sas_atto_validate_nvram(struct MPT3SAS_ADAPTER
*ioc
,
5454 struct ATTO_SAS_NVRAM
*n
)
5457 union ATTO_SAS_ADDRESS
*s1
;
5462 /* validate nvram checksum */
5464 ckSum
= ATTO_SASNVR_CKSUM_SEED
;
5465 len
= sizeof(struct ATTO_SAS_NVRAM
);
5468 ckSum
= ckSum
+ pb
[len
];
5471 ioc_err(ioc
, "Invalid ATTO NVRAM checksum\n");
5475 s1
= (union ATTO_SAS_ADDRESS
*) n
->SasAddr
;
5477 if (n
->Signature
[0] != 'E'
5478 || n
->Signature
[1] != 'S'
5479 || n
->Signature
[2] != 'A'
5480 || n
->Signature
[3] != 'S')
5481 ioc_err(ioc
, "Invalid ATTO NVRAM signature\n");
5482 else if (n
->Version
> ATTO_SASNVR_VERSION
)
5483 ioc_info(ioc
, "Invalid ATTO NVRAM version");
5484 else if ((n
->SasAddr
[7] & (ATTO_SAS_ADDR_ALIGN
- 1))
5488 || (s1
->b
[3] & 0xF0) != 0x60
5489 || ((s1
->b
[3] & 0x0F) | le32_to_cpu(s1
->d
[1])) == 0) {
5490 ioc_err(ioc
, "Invalid ATTO SAS address\n");
5497 * mpt3sas_atto_get_sas_addr - get the ATTO SAS address from mfg page 1
5499 * @ioc : per adapter object
5500 * @sas_addr : return sas address
5501 * Return: 0 for success, non-zero for failure.
5504 mpt3sas_atto_get_sas_addr(struct MPT3SAS_ADAPTER
*ioc
, union ATTO_SAS_ADDRESS
*sas_addr
)
5506 Mpi2ManufacturingPage1_t mfg_pg1
;
5507 Mpi2ConfigReply_t mpi_reply
;
5508 struct ATTO_SAS_NVRAM
*nvram
;
5512 r
= mpt3sas_config_get_manufacturing_pg1(ioc
, &mpi_reply
, &mfg_pg1
);
5514 ioc_err(ioc
, "Failed to read manufacturing page 1\n");
5518 /* validate nvram */
5519 nvram
= (struct ATTO_SAS_NVRAM
*) mfg_pg1
.VPD
;
5520 r
= mpt3sas_atto_validate_nvram(ioc
, nvram
);
5524 addr
= *((__be64
*) nvram
->SasAddr
);
5525 sas_addr
->q
= cpu_to_le64(be64_to_cpu(addr
));
5530 * mpt3sas_atto_init - perform initializaion for ATTO branded
5532 * @ioc : per adapter object
5534 * Return: 0 for success, non-zero for failure.
5537 mpt3sas_atto_init(struct MPT3SAS_ADAPTER
*ioc
)
5540 Mpi2BiosPage4_t
*bios_pg4
= NULL
;
5541 Mpi2ConfigReply_t mpi_reply
;
5544 union ATTO_SAS_ADDRESS sas_addr
;
5545 union ATTO_SAS_ADDRESS temp
;
5546 union ATTO_SAS_ADDRESS bias
;
5548 r
= mpt3sas_atto_get_sas_addr(ioc
, &sas_addr
);
5552 /* get header first to get size */
5553 r
= mpt3sas_config_get_bios_pg4(ioc
, &mpi_reply
, NULL
, 0);
5555 ioc_err(ioc
, "Failed to read ATTO bios page 4 header.\n");
5559 sz
= mpi_reply
.Header
.PageLength
* sizeof(u32
);
5560 bios_pg4
= kzalloc(sz
, GFP_KERNEL
);
5562 ioc_err(ioc
, "Failed to allocate memory for ATTO bios page.\n");
5566 /* read bios page 4 */
5567 r
= mpt3sas_config_get_bios_pg4(ioc
, &mpi_reply
, bios_pg4
, sz
);
5569 ioc_err(ioc
, "Failed to read ATTO bios page 4\n");
5573 /* Update bios page 4 with the ATTO WWID */
5574 bias
.q
= sas_addr
.q
;
5575 bias
.b
[7] += ATTO_SAS_ADDR_DEVNAME_BIAS
;
5577 for (ix
= 0; ix
< bios_pg4
->NumPhys
; ix
++) {
5578 temp
.q
= sas_addr
.q
;
5580 bios_pg4
->Phy
[ix
].ReassignmentWWID
= temp
.q
;
5581 bios_pg4
->Phy
[ix
].ReassignmentDeviceName
= bias
.q
;
5583 r
= mpt3sas_config_set_bios_pg4(ioc
, &mpi_reply
, bios_pg4
, sz
);
5591 * _base_static_config_pages - static start of day config pages
5592 * @ioc: per adapter object
5595 _base_static_config_pages(struct MPT3SAS_ADAPTER
*ioc
)
5597 Mpi2IOUnitPage8_t iounit_pg8
;
5598 Mpi2ConfigReply_t mpi_reply
;
5599 u32 iounit_pg1_flags
;
5602 ioc
->nvme_abort_timeout
= 30;
5604 rc
= mpt3sas_config_get_manufacturing_pg0(ioc
, &mpi_reply
,
5608 if (ioc
->ir_firmware
) {
5609 rc
= mpt3sas_config_get_manufacturing_pg10(ioc
, &mpi_reply
,
5615 if (ioc
->pdev
->vendor
== MPI2_MFGPAGE_VENDORID_ATTO
) {
5616 rc
= mpt3sas_atto_init(ioc
);
5622 * Ensure correct T10 PI operation if vendor left EEDPTagMode
5623 * flag unset in NVDATA.
5625 rc
= mpt3sas_config_get_manufacturing_pg11(ioc
, &mpi_reply
,
5629 if (!ioc
->is_gen35_ioc
&& ioc
->manu_pg11
.EEDPTagMode
== 0) {
5630 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
5632 ioc
->manu_pg11
.EEDPTagMode
&= ~0x3;
5633 ioc
->manu_pg11
.EEDPTagMode
|= 0x1;
5634 mpt3sas_config_set_manufacturing_pg11(ioc
, &mpi_reply
,
5637 if (ioc
->manu_pg11
.AddlFlags2
& NVME_TASK_MNGT_CUSTOM_MASK
)
5638 ioc
->tm_custom_handling
= 1;
5640 ioc
->tm_custom_handling
= 0;
5641 if (ioc
->manu_pg11
.NVMeAbortTO
< NVME_TASK_ABORT_MIN_TIMEOUT
)
5642 ioc
->nvme_abort_timeout
= NVME_TASK_ABORT_MIN_TIMEOUT
;
5643 else if (ioc
->manu_pg11
.NVMeAbortTO
>
5644 NVME_TASK_ABORT_MAX_TIMEOUT
)
5645 ioc
->nvme_abort_timeout
= NVME_TASK_ABORT_MAX_TIMEOUT
;
5647 ioc
->nvme_abort_timeout
= ioc
->manu_pg11
.NVMeAbortTO
;
5649 ioc
->time_sync_interval
=
5650 ioc
->manu_pg11
.TimeSyncInterval
& MPT3SAS_TIMESYNC_MASK
;
5651 if (ioc
->time_sync_interval
) {
5652 if (ioc
->manu_pg11
.TimeSyncInterval
& MPT3SAS_TIMESYNC_UNIT_MASK
)
5653 ioc
->time_sync_interval
=
5654 ioc
->time_sync_interval
* SECONDS_PER_HOUR
;
5656 ioc
->time_sync_interval
=
5657 ioc
->time_sync_interval
* SECONDS_PER_MIN
;
5658 dinitprintk(ioc
, ioc_info(ioc
,
5659 "Driver-FW TimeSync interval is %d seconds. ManuPg11 TimeSync Unit is in %s\n",
5660 ioc
->time_sync_interval
, (ioc
->manu_pg11
.TimeSyncInterval
&
5661 MPT3SAS_TIMESYNC_UNIT_MASK
) ? "Hour" : "Minute"));
5663 if (ioc
->is_gen35_ioc
)
5665 "TimeSync Interval in Manuf page-11 is not enabled. Periodic Time-Sync will be disabled\n");
5667 rc
= _base_assign_fw_reported_qd(ioc
);
5672 * ATTO doesn't use bios page 2 and 3 for bios settings.
5674 if (ioc
->pdev
->vendor
== MPI2_MFGPAGE_VENDORID_ATTO
)
5675 ioc
->bios_pg3
.BiosVersion
= 0;
5677 rc
= mpt3sas_config_get_bios_pg2(ioc
, &mpi_reply
, &ioc
->bios_pg2
);
5680 rc
= mpt3sas_config_get_bios_pg3(ioc
, &mpi_reply
, &ioc
->bios_pg3
);
5685 rc
= mpt3sas_config_get_ioc_pg8(ioc
, &mpi_reply
, &ioc
->ioc_pg8
);
5688 rc
= mpt3sas_config_get_iounit_pg0(ioc
, &mpi_reply
, &ioc
->iounit_pg0
);
5691 rc
= mpt3sas_config_get_iounit_pg1(ioc
, &mpi_reply
, &ioc
->iounit_pg1
);
5694 rc
= mpt3sas_config_get_iounit_pg8(ioc
, &mpi_reply
, &iounit_pg8
);
5697 _base_display_ioc_capabilities(ioc
);
5700 * Enable task_set_full handling in iounit_pg1 when the
5701 * facts capabilities indicate that its supported.
5703 iounit_pg1_flags
= le32_to_cpu(ioc
->iounit_pg1
.Flags
);
5704 if ((ioc
->facts
.IOCCapabilities
&
5705 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING
))
5707 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING
;
5710 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING
;
5711 ioc
->iounit_pg1
.Flags
= cpu_to_le32(iounit_pg1_flags
);
5712 rc
= mpt3sas_config_set_iounit_pg1(ioc
, &mpi_reply
, &ioc
->iounit_pg1
);
5716 if (iounit_pg8
.NumSensors
)
5717 ioc
->temp_sensors_count
= iounit_pg8
.NumSensors
;
5718 if (ioc
->is_aero_ioc
) {
5719 rc
= _base_update_ioc_page1_inlinewith_perf_mode(ioc
);
5723 if (ioc
->is_gen35_ioc
) {
5724 if (ioc
->is_driver_loading
) {
5725 rc
= _base_get_diag_triggers(ioc
);
5730 * In case of online HBA FW update operation,
5731 * check whether updated FW supports the driver trigger
5733 * - If previous FW has not supported driver trigger
5734 * pages and newer FW supports them then update these
5735 * pages with current diag trigger values.
5736 * - If previous FW has supported driver trigger pages
5737 * and new FW doesn't support them then disable
5738 * support_trigger_pages flag.
5740 _base_check_for_trigger_pages_support(ioc
, &tg_flags
);
5741 if (!ioc
->supports_trigger_pages
&& tg_flags
!= -EFAULT
)
5742 _base_update_diag_trigger_pages(ioc
);
5743 else if (ioc
->supports_trigger_pages
&&
5744 tg_flags
== -EFAULT
)
5745 ioc
->supports_trigger_pages
= 0;
5752 * mpt3sas_free_enclosure_list - release memory
5753 * @ioc: per adapter object
5755 * Free memory allocated during enclosure add.
5758 mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER
*ioc
)
5760 struct _enclosure_node
*enclosure_dev
, *enclosure_dev_next
;
5762 /* Free enclosure list */
5763 list_for_each_entry_safe(enclosure_dev
,
5764 enclosure_dev_next
, &ioc
->enclosure_list
, list
) {
5765 list_del(&enclosure_dev
->list
);
5766 kfree(enclosure_dev
);
5771 * _base_release_memory_pools - release memory
5772 * @ioc: per adapter object
5774 * Free memory allocated from _base_allocate_memory_pools.
5777 _base_release_memory_pools(struct MPT3SAS_ADAPTER
*ioc
)
5781 int dma_alloc_count
= 0;
5782 struct chain_tracker
*ct
;
5783 int count
= ioc
->rdpq_array_enable
? ioc
->reply_queue_count
: 1;
5785 dexitprintk(ioc
, ioc_info(ioc
, "%s\n", __func__
));
5788 dma_free_coherent(&ioc
->pdev
->dev
, ioc
->request_dma_sz
,
5789 ioc
->request
, ioc
->request_dma
);
5791 ioc_info(ioc
, "request_pool(0x%p): free\n",
5793 ioc
->request
= NULL
;
5797 dma_pool_free(ioc
->sense_dma_pool
, ioc
->sense
, ioc
->sense_dma
);
5798 dma_pool_destroy(ioc
->sense_dma_pool
);
5800 ioc_info(ioc
, "sense_pool(0x%p): free\n",
5806 dma_pool_free(ioc
->reply_dma_pool
, ioc
->reply
, ioc
->reply_dma
);
5807 dma_pool_destroy(ioc
->reply_dma_pool
);
5809 ioc_info(ioc
, "reply_pool(0x%p): free\n",
5814 if (ioc
->reply_free
) {
5815 dma_pool_free(ioc
->reply_free_dma_pool
, ioc
->reply_free
,
5816 ioc
->reply_free_dma
);
5817 dma_pool_destroy(ioc
->reply_free_dma_pool
);
5819 ioc_info(ioc
, "reply_free_pool(0x%p): free\n",
5821 ioc
->reply_free
= NULL
;
5824 if (ioc
->reply_post
) {
5825 dma_alloc_count
= DIV_ROUND_UP(count
,
5826 RDPQ_MAX_INDEX_IN_ONE_CHUNK
);
5827 for (i
= 0; i
< count
; i
++) {
5828 if (i
% RDPQ_MAX_INDEX_IN_ONE_CHUNK
== 0
5829 && dma_alloc_count
) {
5830 if (ioc
->reply_post
[i
].reply_post_free
) {
5832 ioc
->reply_post_free_dma_pool
,
5833 ioc
->reply_post
[i
].reply_post_free
,
5834 ioc
->reply_post
[i
].reply_post_free_dma
);
5835 dexitprintk(ioc
, ioc_info(ioc
,
5836 "reply_post_free_pool(0x%p): free\n",
5837 ioc
->reply_post
[i
].reply_post_free
));
5838 ioc
->reply_post
[i
].reply_post_free
=
5844 dma_pool_destroy(ioc
->reply_post_free_dma_pool
);
5845 if (ioc
->reply_post_free_array
&&
5846 ioc
->rdpq_array_enable
) {
5847 dma_pool_free(ioc
->reply_post_free_array_dma_pool
,
5848 ioc
->reply_post_free_array
,
5849 ioc
->reply_post_free_array_dma
);
5850 ioc
->reply_post_free_array
= NULL
;
5852 dma_pool_destroy(ioc
->reply_post_free_array_dma_pool
);
5853 kfree(ioc
->reply_post
);
5856 if (ioc
->pcie_sgl_dma_pool
) {
5857 for (i
= 0; i
< ioc
->scsiio_depth
; i
++) {
5858 dma_pool_free(ioc
->pcie_sgl_dma_pool
,
5859 ioc
->pcie_sg_lookup
[i
].pcie_sgl
,
5860 ioc
->pcie_sg_lookup
[i
].pcie_sgl_dma
);
5861 ioc
->pcie_sg_lookup
[i
].pcie_sgl
= NULL
;
5863 dma_pool_destroy(ioc
->pcie_sgl_dma_pool
);
5865 kfree(ioc
->pcie_sg_lookup
);
5866 ioc
->pcie_sg_lookup
= NULL
;
5868 if (ioc
->config_page
) {
5870 ioc_info(ioc
, "config_page(0x%p): free\n",
5872 dma_free_coherent(&ioc
->pdev
->dev
, ioc
->config_page_sz
,
5873 ioc
->config_page
, ioc
->config_page_dma
);
5876 kfree(ioc
->hpr_lookup
);
5877 ioc
->hpr_lookup
= NULL
;
5878 kfree(ioc
->internal_lookup
);
5879 ioc
->internal_lookup
= NULL
;
5880 if (ioc
->chain_lookup
) {
5881 for (i
= 0; i
< ioc
->scsiio_depth
; i
++) {
5882 for (j
= ioc
->chains_per_prp_buffer
;
5883 j
< ioc
->chains_needed_per_io
; j
++) {
5884 ct
= &ioc
->chain_lookup
[i
].chains_per_smid
[j
];
5885 if (ct
&& ct
->chain_buffer
)
5886 dma_pool_free(ioc
->chain_dma_pool
,
5888 ct
->chain_buffer_dma
);
5890 kfree(ioc
->chain_lookup
[i
].chains_per_smid
);
5892 dma_pool_destroy(ioc
->chain_dma_pool
);
5893 kfree(ioc
->chain_lookup
);
5894 ioc
->chain_lookup
= NULL
;
5897 kfree(ioc
->io_queue_num
);
5898 ioc
->io_queue_num
= NULL
;
5902 * mpt3sas_check_same_4gb_region - checks whether all reply queues in a set are
5903 * having same upper 32bits in their base memory address.
5904 * @start_address: Base address of a reply queue set
5905 * @pool_sz: Size of single Reply Descriptor Post Queues pool size
5907 * Return: 1 if reply queues in a set have a same upper 32bits in their base
5908 * memory address, else 0.
5911 mpt3sas_check_same_4gb_region(dma_addr_t start_address
, u32 pool_sz
)
5913 dma_addr_t end_address
;
5915 end_address
= start_address
+ pool_sz
- 1;
5917 if (upper_32_bits(start_address
) == upper_32_bits(end_address
))
5924 * _base_reduce_hba_queue_depth- Retry with reduced queue depth
5925 * @ioc: Adapter object
5927 * Return: 0 for success, non-zero for failure.
5930 _base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER
*ioc
)
5934 if ((ioc
->hba_queue_depth
- reduce_sz
) >
5935 (ioc
->internal_depth
+ INTERNAL_SCSIIO_CMDS_COUNT
)) {
5936 ioc
->hba_queue_depth
-= reduce_sz
;
5943 * _base_allocate_pcie_sgl_pool - Allocating DMA'able memory
5944 * for pcie sgl pools.
5945 * @ioc: Adapter object
5946 * @sz: DMA Pool size
5948 * Return: 0 for success, non-zero for failure.
5952 _base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER
*ioc
, u32 sz
)
5955 struct chain_tracker
*ct
;
5957 ioc
->pcie_sgl_dma_pool
=
5958 dma_pool_create("PCIe SGL pool", &ioc
->pdev
->dev
, sz
,
5960 if (!ioc
->pcie_sgl_dma_pool
) {
5961 ioc_err(ioc
, "PCIe SGL pool: dma_pool_create failed\n");
5965 ioc
->chains_per_prp_buffer
= sz
/ioc
->chain_segment_sz
;
5966 ioc
->chains_per_prp_buffer
=
5967 min(ioc
->chains_per_prp_buffer
, ioc
->chains_needed_per_io
);
5968 for (i
= 0; i
< ioc
->scsiio_depth
; i
++) {
5969 ioc
->pcie_sg_lookup
[i
].pcie_sgl
=
5970 dma_pool_alloc(ioc
->pcie_sgl_dma_pool
, GFP_KERNEL
,
5971 &ioc
->pcie_sg_lookup
[i
].pcie_sgl_dma
);
5972 if (!ioc
->pcie_sg_lookup
[i
].pcie_sgl
) {
5973 ioc_err(ioc
, "PCIe SGL pool: dma_pool_alloc failed\n");
5977 if (!mpt3sas_check_same_4gb_region(
5978 ioc
->pcie_sg_lookup
[i
].pcie_sgl_dma
, sz
)) {
5979 ioc_err(ioc
, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
5980 ioc
->pcie_sg_lookup
[i
].pcie_sgl
,
5981 (unsigned long long)
5982 ioc
->pcie_sg_lookup
[i
].pcie_sgl_dma
);
5983 ioc
->use_32bit_dma
= true;
5987 for (j
= 0; j
< ioc
->chains_per_prp_buffer
; j
++) {
5988 ct
= &ioc
->chain_lookup
[i
].chains_per_smid
[j
];
5990 ioc
->pcie_sg_lookup
[i
].pcie_sgl
+
5991 (j
* ioc
->chain_segment_sz
);
5992 ct
->chain_buffer_dma
=
5993 ioc
->pcie_sg_lookup
[i
].pcie_sgl_dma
+
5994 (j
* ioc
->chain_segment_sz
);
5997 dinitprintk(ioc
, ioc_info(ioc
,
5998 "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
5999 ioc
->scsiio_depth
, sz
, (sz
* ioc
->scsiio_depth
)/1024));
6000 dinitprintk(ioc
, ioc_info(ioc
,
6001 "Number of chains can fit in a PRP page(%d)\n",
6002 ioc
->chains_per_prp_buffer
));
6007 * _base_allocate_chain_dma_pool - Allocating DMA'able memory
6008 * for chain dma pool.
6009 * @ioc: Adapter object
6010 * @sz: DMA Pool size
6012 * Return: 0 for success, non-zero for failure.
6015 _base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER
*ioc
, u32 sz
)
6018 struct chain_tracker
*ctr
;
6020 ioc
->chain_dma_pool
= dma_pool_create("chain pool", &ioc
->pdev
->dev
,
6021 ioc
->chain_segment_sz
, 16, 0);
6022 if (!ioc
->chain_dma_pool
)
6025 for (i
= 0; i
< ioc
->scsiio_depth
; i
++) {
6026 for (j
= ioc
->chains_per_prp_buffer
;
6027 j
< ioc
->chains_needed_per_io
; j
++) {
6028 ctr
= &ioc
->chain_lookup
[i
].chains_per_smid
[j
];
6029 ctr
->chain_buffer
= dma_pool_alloc(ioc
->chain_dma_pool
,
6030 GFP_KERNEL
, &ctr
->chain_buffer_dma
);
6031 if (!ctr
->chain_buffer
)
6033 if (!mpt3sas_check_same_4gb_region(
6034 ctr
->chain_buffer_dma
, ioc
->chain_segment_sz
)) {
6036 "Chain buffers are not in same 4G !!! Chain buff (0x%p) dma = (0x%llx)\n",
6038 (unsigned long long)ctr
->chain_buffer_dma
);
6039 ioc
->use_32bit_dma
= true;
6044 dinitprintk(ioc
, ioc_info(ioc
,
6045 "chain_lookup depth (%d), frame_size(%d), pool_size(%d kB)\n",
6046 ioc
->scsiio_depth
, ioc
->chain_segment_sz
, ((ioc
->scsiio_depth
*
6047 (ioc
->chains_needed_per_io
- ioc
->chains_per_prp_buffer
) *
6048 ioc
->chain_segment_sz
))/1024));
6053 * _base_allocate_sense_dma_pool - Allocating DMA'able memory
6054 * for sense dma pool.
6055 * @ioc: Adapter object
6056 * @sz: DMA Pool size
6057 * Return: 0 for success, non-zero for failure.
6060 _base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER
*ioc
, u32 sz
)
6062 ioc
->sense_dma_pool
=
6063 dma_pool_create("sense pool", &ioc
->pdev
->dev
, sz
, 4, 0);
6064 if (!ioc
->sense_dma_pool
)
6066 ioc
->sense
= dma_pool_alloc(ioc
->sense_dma_pool
,
6067 GFP_KERNEL
, &ioc
->sense_dma
);
6070 if (!mpt3sas_check_same_4gb_region(ioc
->sense_dma
, sz
)) {
6071 dinitprintk(ioc
, pr_err(
6072 "Bad Sense Pool! sense (0x%p) sense_dma = (0x%llx)\n",
6073 ioc
->sense
, (unsigned long long) ioc
->sense_dma
));
6074 ioc
->use_32bit_dma
= true;
6078 "sense pool(0x%p) - dma(0x%llx): depth(%d), element_size(%d), pool_size (%d kB)\n",
6079 ioc
->sense
, (unsigned long long)ioc
->sense_dma
,
6080 ioc
->scsiio_depth
, SCSI_SENSE_BUFFERSIZE
, sz
/1024);
6085 * _base_allocate_reply_pool - Allocating DMA'able memory
6087 * @ioc: Adapter object
6088 * @sz: DMA Pool size
6089 * Return: 0 for success, non-zero for failure.
6092 _base_allocate_reply_pool(struct MPT3SAS_ADAPTER
*ioc
, u32 sz
)
6094 /* reply pool, 4 byte align */
6095 ioc
->reply_dma_pool
= dma_pool_create("reply pool",
6096 &ioc
->pdev
->dev
, sz
, 4, 0);
6097 if (!ioc
->reply_dma_pool
)
6099 ioc
->reply
= dma_pool_alloc(ioc
->reply_dma_pool
, GFP_KERNEL
,
6103 if (!mpt3sas_check_same_4gb_region(ioc
->reply_dma
, sz
)) {
6104 dinitprintk(ioc
, pr_err(
6105 "Bad Reply Pool! Reply (0x%p) Reply dma = (0x%llx)\n",
6106 ioc
->reply
, (unsigned long long) ioc
->reply_dma
));
6107 ioc
->use_32bit_dma
= true;
6110 ioc
->reply_dma_min_address
= (u32
)(ioc
->reply_dma
);
6111 ioc
->reply_dma_max_address
= (u32
)(ioc
->reply_dma
) + sz
;
6113 "reply pool(0x%p) - dma(0x%llx): depth(%d), frame_size(%d), pool_size(%d kB)\n",
6114 ioc
->reply
, (unsigned long long)ioc
->reply_dma
,
6115 ioc
->reply_free_queue_depth
, ioc
->reply_sz
, sz
/1024);
6120 * _base_allocate_reply_free_dma_pool - Allocating DMA'able memory
6121 * for reply free dma pool.
6122 * @ioc: Adapter object
6123 * @sz: DMA Pool size
6124 * Return: 0 for success, non-zero for failure.
6127 _base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER
*ioc
, u32 sz
)
6129 /* reply free queue, 16 byte align */
6130 ioc
->reply_free_dma_pool
= dma_pool_create(
6131 "reply_free pool", &ioc
->pdev
->dev
, sz
, 16, 0);
6132 if (!ioc
->reply_free_dma_pool
)
6134 ioc
->reply_free
= dma_pool_alloc(ioc
->reply_free_dma_pool
,
6135 GFP_KERNEL
, &ioc
->reply_free_dma
);
6136 if (!ioc
->reply_free
)
6138 if (!mpt3sas_check_same_4gb_region(ioc
->reply_free_dma
, sz
)) {
6140 pr_err("Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
6141 ioc
->reply_free
, (unsigned long long) ioc
->reply_free_dma
));
6142 ioc
->use_32bit_dma
= true;
6145 memset(ioc
->reply_free
, 0, sz
);
6146 dinitprintk(ioc
, ioc_info(ioc
,
6147 "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
6148 ioc
->reply_free
, ioc
->reply_free_queue_depth
, 4, sz
/1024));
6149 dinitprintk(ioc
, ioc_info(ioc
,
6150 "reply_free_dma (0x%llx)\n",
6151 (unsigned long long)ioc
->reply_free_dma
));
6156 * _base_allocate_reply_post_free_array - Allocating DMA'able memory
6157 * for reply post free array.
6158 * @ioc: Adapter object
6159 * @reply_post_free_array_sz: DMA Pool size
6160 * Return: 0 for success, non-zero for failure.
6164 _base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER
*ioc
,
6165 u32 reply_post_free_array_sz
)
6167 ioc
->reply_post_free_array_dma_pool
=
6168 dma_pool_create("reply_post_free_array pool",
6169 &ioc
->pdev
->dev
, reply_post_free_array_sz
, 16, 0);
6170 if (!ioc
->reply_post_free_array_dma_pool
)
6172 ioc
->reply_post_free_array
=
6173 dma_pool_alloc(ioc
->reply_post_free_array_dma_pool
,
6174 GFP_KERNEL
, &ioc
->reply_post_free_array_dma
);
6175 if (!ioc
->reply_post_free_array
)
6177 if (!mpt3sas_check_same_4gb_region(ioc
->reply_post_free_array_dma
,
6178 reply_post_free_array_sz
)) {
6179 dinitprintk(ioc
, pr_err(
6180 "Bad Reply Free Pool! Reply Free (0x%p) Reply Free dma = (0x%llx)\n",
6182 (unsigned long long) ioc
->reply_free_dma
));
6183 ioc
->use_32bit_dma
= true;
6189 * base_alloc_rdpq_dma_pool - Allocating DMA'able memory
6191 * @ioc: per adapter object
6192 * @sz: DMA Pool size
6193 * Return: 0 for success, non-zero for failure.
6196 base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER
*ioc
, int sz
)
6199 u32 dma_alloc_count
= 0;
6200 int reply_post_free_sz
= ioc
->reply_post_queue_depth
*
6201 sizeof(Mpi2DefaultReplyDescriptor_t
);
6202 int count
= ioc
->rdpq_array_enable
? ioc
->reply_queue_count
: 1;
6204 ioc
->reply_post
= kcalloc(count
, sizeof(struct reply_post_struct
),
6206 if (!ioc
->reply_post
)
6209 * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
6210 * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should
6211 * be within 4GB boundary i.e reply queues in a set must have same
6212 * upper 32-bits in their memory address. so here driver is allocating
6213 * the DMA'able memory for reply queues according.
6214 * Driver uses limitation of
6215 * VENTURA_SERIES to manage INVADER_SERIES as well.
6217 dma_alloc_count
= DIV_ROUND_UP(count
,
6218 RDPQ_MAX_INDEX_IN_ONE_CHUNK
);
6219 ioc
->reply_post_free_dma_pool
=
6220 dma_pool_create("reply_post_free pool",
6221 &ioc
->pdev
->dev
, sz
, 16, 0);
6222 if (!ioc
->reply_post_free_dma_pool
)
6224 for (i
= 0; i
< count
; i
++) {
6225 if ((i
% RDPQ_MAX_INDEX_IN_ONE_CHUNK
== 0) && dma_alloc_count
) {
6226 ioc
->reply_post
[i
].reply_post_free
=
6227 dma_pool_zalloc(ioc
->reply_post_free_dma_pool
,
6229 &ioc
->reply_post
[i
].reply_post_free_dma
);
6230 if (!ioc
->reply_post
[i
].reply_post_free
)
6233 * Each set of RDPQ pool must satisfy 4gb boundary
6235 * 1) Check if allocated resources for RDPQ pool are in
6236 * the same 4GB range.
6237 * 2) If #1 is true, continue with 64 bit DMA.
6238 * 3) If #1 is false, return 1. which means free all the
6239 * resources and set DMA mask to 32 and allocate.
6241 if (!mpt3sas_check_same_4gb_region(
6242 ioc
->reply_post
[i
].reply_post_free_dma
, sz
)) {
6244 ioc_err(ioc
, "bad Replypost free pool(0x%p)"
6245 "reply_post_free_dma = (0x%llx)\n",
6246 ioc
->reply_post
[i
].reply_post_free
,
6247 (unsigned long long)
6248 ioc
->reply_post
[i
].reply_post_free_dma
));
6254 ioc
->reply_post
[i
].reply_post_free
=
6255 (Mpi2ReplyDescriptorsUnion_t
*)
6256 ((long)ioc
->reply_post
[i
-1].reply_post_free
6257 + reply_post_free_sz
);
6258 ioc
->reply_post
[i
].reply_post_free_dma
=
6260 (ioc
->reply_post
[i
-1].reply_post_free_dma
+
6261 reply_post_free_sz
);
6268 * _base_allocate_memory_pools - allocate start of day memory pools
6269 * @ioc: per adapter object
6271 * Return: 0 success, anything else error.
6274 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER
*ioc
)
6276 struct mpt3sas_facts
*facts
;
6277 u16 max_sge_elements
;
6278 u16 chains_needed_per_io
;
6279 u32 sz
, total_sz
, reply_post_free_sz
, reply_post_free_array_sz
;
6281 u32 rdpq_sz
= 0, sense_sz
= 0;
6282 u16 max_request_credit
, nvme_blocks_needed
;
6283 unsigned short sg_tablesize
;
6286 int ret
= 0, rc
= 0;
6288 dinitprintk(ioc
, ioc_info(ioc
, "%s\n", __func__
));
6292 facts
= &ioc
->facts
;
6294 /* command line tunables for max sgl entries */
6295 if (max_sgl_entries
!= -1)
6296 sg_tablesize
= max_sgl_entries
;
6298 if (ioc
->hba_mpi_version_belonged
== MPI2_VERSION
)
6299 sg_tablesize
= MPT2SAS_SG_DEPTH
;
6301 sg_tablesize
= MPT3SAS_SG_DEPTH
;
6304 /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
6306 sg_tablesize
= min_t(unsigned short, sg_tablesize
,
6307 MPT_KDUMP_MIN_PHYS_SEGMENTS
);
6309 if (ioc
->is_mcpu_endpoint
)
6310 ioc
->shost
->sg_tablesize
= MPT_MIN_PHYS_SEGMENTS
;
6312 if (sg_tablesize
< MPT_MIN_PHYS_SEGMENTS
)
6313 sg_tablesize
= MPT_MIN_PHYS_SEGMENTS
;
6314 else if (sg_tablesize
> MPT_MAX_PHYS_SEGMENTS
) {
6315 sg_tablesize
= min_t(unsigned short, sg_tablesize
,
6317 ioc_warn(ioc
, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
6318 sg_tablesize
, MPT_MAX_PHYS_SEGMENTS
);
6320 ioc
->shost
->sg_tablesize
= sg_tablesize
;
6323 ioc
->internal_depth
= min_t(int, (facts
->HighPriorityCredit
+ (5)),
6324 (facts
->RequestCredit
/ 4));
6325 if (ioc
->internal_depth
< INTERNAL_CMDS_COUNT
) {
6326 if (facts
->RequestCredit
<= (INTERNAL_CMDS_COUNT
+
6327 INTERNAL_SCSIIO_CMDS_COUNT
)) {
6328 ioc_err(ioc
, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
6329 facts
->RequestCredit
);
6332 ioc
->internal_depth
= 10;
6335 ioc
->hi_priority_depth
= ioc
->internal_depth
- (5);
6336 /* command line tunables for max controller queue depth */
6337 if (max_queue_depth
!= -1 && max_queue_depth
!= 0) {
6338 max_request_credit
= min_t(u16
, max_queue_depth
+
6339 ioc
->internal_depth
, facts
->RequestCredit
);
6340 if (max_request_credit
> MAX_HBA_QUEUE_DEPTH
)
6341 max_request_credit
= MAX_HBA_QUEUE_DEPTH
;
6342 } else if (reset_devices
)
6343 max_request_credit
= min_t(u16
, facts
->RequestCredit
,
6344 (MPT3SAS_KDUMP_SCSI_IO_DEPTH
+ ioc
->internal_depth
));
6346 max_request_credit
= min_t(u16
, facts
->RequestCredit
,
6347 MAX_HBA_QUEUE_DEPTH
);
6349 /* Firmware maintains additional facts->HighPriorityCredit number of
6350 * credits for HiPriprity Request messages, so hba queue depth will be
6351 * sum of max_request_credit and high priority queue depth.
6353 ioc
->hba_queue_depth
= max_request_credit
+ ioc
->hi_priority_depth
;
6355 /* request frame size */
6356 ioc
->request_sz
= facts
->IOCRequestFrameSize
* 4;
6358 /* reply frame size */
6359 ioc
->reply_sz
= facts
->ReplyFrameSize
* 4;
6361 /* chain segment size */
6362 if (ioc
->hba_mpi_version_belonged
!= MPI2_VERSION
) {
6363 if (facts
->IOCMaxChainSegmentSize
)
6364 ioc
->chain_segment_sz
=
6365 facts
->IOCMaxChainSegmentSize
*
6368 /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
6369 ioc
->chain_segment_sz
= DEFAULT_NUM_FWCHAIN_ELEMTS
*
6372 ioc
->chain_segment_sz
= ioc
->request_sz
;
6374 /* calculate the max scatter element size */
6375 sge_size
= max_t(u16
, ioc
->sge_size
, ioc
->sge_size_ieee
);
6379 /* calculate number of sg elements left over in the 1st frame */
6380 max_sge_elements
= ioc
->request_sz
- ((sizeof(Mpi2SCSIIORequest_t
) -
6381 sizeof(Mpi2SGEIOUnion_t
)) + sge_size
);
6382 ioc
->max_sges_in_main_message
= max_sge_elements
/sge_size
;
6384 /* now do the same for a chain buffer */
6385 max_sge_elements
= ioc
->chain_segment_sz
- sge_size
;
6386 ioc
->max_sges_in_chain_message
= max_sge_elements
/sge_size
;
6389 * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
6391 chains_needed_per_io
= ((ioc
->shost
->sg_tablesize
-
6392 ioc
->max_sges_in_main_message
)/ioc
->max_sges_in_chain_message
)
6394 if (chains_needed_per_io
> facts
->MaxChainDepth
) {
6395 chains_needed_per_io
= facts
->MaxChainDepth
;
6396 ioc
->shost
->sg_tablesize
= min_t(u16
,
6397 ioc
->max_sges_in_main_message
+ (ioc
->max_sges_in_chain_message
6398 * chains_needed_per_io
), ioc
->shost
->sg_tablesize
);
6400 ioc
->chains_needed_per_io
= chains_needed_per_io
;
6402 /* reply free queue sizing - taking into account for 64 FW events */
6403 ioc
->reply_free_queue_depth
= ioc
->hba_queue_depth
+ 64;
6405 /* mCPU manage single counters for simplicity */
6406 if (ioc
->is_mcpu_endpoint
)
6407 ioc
->reply_post_queue_depth
= ioc
->reply_free_queue_depth
;
6409 /* calculate reply descriptor post queue depth */
6410 ioc
->reply_post_queue_depth
= ioc
->hba_queue_depth
+
6411 ioc
->reply_free_queue_depth
+ 1;
6412 /* align the reply post queue on the next 16 count boundary */
6413 if (ioc
->reply_post_queue_depth
% 16)
6414 ioc
->reply_post_queue_depth
+= 16 -
6415 (ioc
->reply_post_queue_depth
% 16);
6418 if (ioc
->reply_post_queue_depth
>
6419 facts
->MaxReplyDescriptorPostQueueDepth
) {
6420 ioc
->reply_post_queue_depth
=
6421 facts
->MaxReplyDescriptorPostQueueDepth
-
6422 (facts
->MaxReplyDescriptorPostQueueDepth
% 16);
6423 ioc
->hba_queue_depth
=
6424 ((ioc
->reply_post_queue_depth
- 64) / 2) - 1;
6425 ioc
->reply_free_queue_depth
= ioc
->hba_queue_depth
+ 64;
6429 "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), "
6430 "sge_per_io(%d), chains_per_io(%d)\n",
6431 ioc
->max_sges_in_main_message
,
6432 ioc
->max_sges_in_chain_message
,
6433 ioc
->shost
->sg_tablesize
,
6434 ioc
->chains_needed_per_io
);
6436 /* reply post queue, 16 byte align */
6437 reply_post_free_sz
= ioc
->reply_post_queue_depth
*
6438 sizeof(Mpi2DefaultReplyDescriptor_t
);
6439 rdpq_sz
= reply_post_free_sz
* RDPQ_MAX_INDEX_IN_ONE_CHUNK
;
6440 if ((_base_is_controller_msix_enabled(ioc
) && !ioc
->rdpq_array_enable
)
6441 || (ioc
->reply_queue_count
< RDPQ_MAX_INDEX_IN_ONE_CHUNK
))
6442 rdpq_sz
= reply_post_free_sz
* ioc
->reply_queue_count
;
6443 ret
= base_alloc_rdpq_dma_pool(ioc
, rdpq_sz
);
6444 if (ret
== -EAGAIN
) {
6446 * Free allocated bad RDPQ memory pools.
6447 * Change dma coherent mask to 32 bit and reallocate RDPQ
6449 _base_release_memory_pools(ioc
);
6450 ioc
->use_32bit_dma
= true;
6451 if (_base_config_dma_addressing(ioc
, ioc
->pdev
) != 0) {
6453 "32 DMA mask failed %s\n", pci_name(ioc
->pdev
));
6456 if (base_alloc_rdpq_dma_pool(ioc
, rdpq_sz
))
6458 } else if (ret
== -ENOMEM
)
6460 total_sz
= rdpq_sz
* (!ioc
->rdpq_array_enable
? 1 :
6461 DIV_ROUND_UP(ioc
->reply_queue_count
, RDPQ_MAX_INDEX_IN_ONE_CHUNK
));
6462 ioc
->scsiio_depth
= ioc
->hba_queue_depth
-
6463 ioc
->hi_priority_depth
- ioc
->internal_depth
;
6465 /* set the scsi host can_queue depth
6466 * with some internal commands that could be outstanding
6468 ioc
->shost
->can_queue
= ioc
->scsiio_depth
- INTERNAL_SCSIIO_CMDS_COUNT
;
6470 ioc_info(ioc
, "scsi host: can_queue depth (%d)\n",
6471 ioc
->shost
->can_queue
));
6473 /* contiguous pool for request and chains, 16 byte align, one extra "
6476 ioc
->chain_depth
= ioc
->chains_needed_per_io
* ioc
->scsiio_depth
;
6477 sz
= ((ioc
->scsiio_depth
+ 1) * ioc
->request_sz
);
6479 /* hi-priority queue */
6480 sz
+= (ioc
->hi_priority_depth
* ioc
->request_sz
);
6482 /* internal queue */
6483 sz
+= (ioc
->internal_depth
* ioc
->request_sz
);
6485 ioc
->request_dma_sz
= sz
;
6486 ioc
->request
= dma_alloc_coherent(&ioc
->pdev
->dev
, sz
,
6487 &ioc
->request_dma
, GFP_KERNEL
);
6488 if (!ioc
->request
) {
6489 ioc_err(ioc
, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
6490 ioc
->hba_queue_depth
, ioc
->chains_needed_per_io
,
6491 ioc
->request_sz
, sz
/ 1024);
6492 if (ioc
->scsiio_depth
< MPT3SAS_SAS_QUEUE_DEPTH
)
6495 ioc
->hba_queue_depth
-= retry_sz
;
6496 _base_release_memory_pools(ioc
);
6497 goto retry_allocation
;
6501 ioc_err(ioc
, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
6502 ioc
->hba_queue_depth
, ioc
->chains_needed_per_io
,
6503 ioc
->request_sz
, sz
/ 1024);
6505 /* hi-priority queue */
6506 ioc
->hi_priority
= ioc
->request
+ ((ioc
->scsiio_depth
+ 1) *
6508 ioc
->hi_priority_dma
= ioc
->request_dma
+ ((ioc
->scsiio_depth
+ 1) *
6511 /* internal queue */
6512 ioc
->internal
= ioc
->hi_priority
+ (ioc
->hi_priority_depth
*
6514 ioc
->internal_dma
= ioc
->hi_priority_dma
+ (ioc
->hi_priority_depth
*
6518 "request pool(0x%p) - dma(0x%llx): "
6519 "depth(%d), frame_size(%d), pool_size(%d kB)\n",
6520 ioc
->request
, (unsigned long long) ioc
->request_dma
,
6521 ioc
->hba_queue_depth
, ioc
->request_sz
,
6522 (ioc
->hba_queue_depth
* ioc
->request_sz
) / 1024);
6527 ioc_info(ioc
, "scsiio(0x%p): depth(%d)\n",
6528 ioc
->request
, ioc
->scsiio_depth
));
6530 ioc
->chain_depth
= min_t(u32
, ioc
->chain_depth
, MAX_CHAIN_DEPTH
);
6531 sz
= ioc
->scsiio_depth
* sizeof(struct chain_lookup
);
6532 ioc
->chain_lookup
= kzalloc(sz
, GFP_KERNEL
);
6533 if (!ioc
->chain_lookup
) {
6534 ioc_err(ioc
, "chain_lookup: __get_free_pages failed\n");
6538 sz
= ioc
->chains_needed_per_io
* sizeof(struct chain_tracker
);
6539 for (i
= 0; i
< ioc
->scsiio_depth
; i
++) {
6540 ioc
->chain_lookup
[i
].chains_per_smid
= kzalloc(sz
, GFP_KERNEL
);
6541 if (!ioc
->chain_lookup
[i
].chains_per_smid
) {
6542 ioc_err(ioc
, "chain_lookup: kzalloc failed\n");
6547 /* initialize hi-priority queue smid's */
6548 ioc
->hpr_lookup
= kcalloc(ioc
->hi_priority_depth
,
6549 sizeof(struct request_tracker
), GFP_KERNEL
);
6550 if (!ioc
->hpr_lookup
) {
6551 ioc_err(ioc
, "hpr_lookup: kcalloc failed\n");
6554 ioc
->hi_priority_smid
= ioc
->scsiio_depth
+ 1;
6556 ioc_info(ioc
, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
6558 ioc
->hi_priority_depth
, ioc
->hi_priority_smid
));
6560 /* initialize internal queue smid's */
6561 ioc
->internal_lookup
= kcalloc(ioc
->internal_depth
,
6562 sizeof(struct request_tracker
), GFP_KERNEL
);
6563 if (!ioc
->internal_lookup
) {
6564 ioc_err(ioc
, "internal_lookup: kcalloc failed\n");
6567 ioc
->internal_smid
= ioc
->hi_priority_smid
+ ioc
->hi_priority_depth
;
6569 ioc_info(ioc
, "internal(0x%p): depth(%d), start smid(%d)\n",
6571 ioc
->internal_depth
, ioc
->internal_smid
));
6573 ioc
->io_queue_num
= kcalloc(ioc
->scsiio_depth
,
6574 sizeof(u16
), GFP_KERNEL
);
6575 if (!ioc
->io_queue_num
)
6578 * The number of NVMe page sized blocks needed is:
6579 * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
6580 * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
6581 * that is placed in the main message frame. 8 is the size of each PRP
6582 * entry or PRP list pointer entry. 8 is subtracted from page_size
6583 * because of the PRP list pointer entry at the end of a page, so this
6584 * is not counted as a PRP entry. The 1 added page is a round up.
6586 * To avoid allocation failures due to the amount of memory that could
6587 * be required for NVMe PRP's, only each set of NVMe blocks will be
6588 * contiguous, so a new set is allocated for each possible I/O.
6591 ioc
->chains_per_prp_buffer
= 0;
6592 if (ioc
->facts
.ProtocolFlags
& MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES
) {
6593 nvme_blocks_needed
=
6594 (ioc
->shost
->sg_tablesize
* NVME_PRP_SIZE
) - 1;
6595 nvme_blocks_needed
/= (ioc
->page_size
- NVME_PRP_SIZE
);
6596 nvme_blocks_needed
++;
6598 sz
= sizeof(struct pcie_sg_list
) * ioc
->scsiio_depth
;
6599 ioc
->pcie_sg_lookup
= kzalloc(sz
, GFP_KERNEL
);
6600 if (!ioc
->pcie_sg_lookup
) {
6601 ioc_info(ioc
, "PCIe SGL lookup: kzalloc failed\n");
6604 sz
= nvme_blocks_needed
* ioc
->page_size
;
6605 rc
= _base_allocate_pcie_sgl_pool(ioc
, sz
);
6608 else if (rc
== -EAGAIN
)
6610 total_sz
+= sz
* ioc
->scsiio_depth
;
6613 rc
= _base_allocate_chain_dma_pool(ioc
, ioc
->chain_segment_sz
);
6616 else if (rc
== -EAGAIN
)
6618 total_sz
+= ioc
->chain_segment_sz
* ((ioc
->chains_needed_per_io
-
6619 ioc
->chains_per_prp_buffer
) * ioc
->scsiio_depth
);
6621 ioc_info(ioc
, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
6622 ioc
->chain_depth
, ioc
->chain_segment_sz
,
6623 (ioc
->chain_depth
* ioc
->chain_segment_sz
) / 1024));
6624 /* sense buffers, 4 byte align */
6625 sense_sz
= ioc
->scsiio_depth
* SCSI_SENSE_BUFFERSIZE
;
6626 rc
= _base_allocate_sense_dma_pool(ioc
, sense_sz
);
6629 else if (rc
== -EAGAIN
)
6631 total_sz
+= sense_sz
;
6632 /* reply pool, 4 byte align */
6633 sz
= ioc
->reply_free_queue_depth
* ioc
->reply_sz
;
6634 rc
= _base_allocate_reply_pool(ioc
, sz
);
6637 else if (rc
== -EAGAIN
)
6641 /* reply free queue, 16 byte align */
6642 sz
= ioc
->reply_free_queue_depth
* 4;
6643 rc
= _base_allocate_reply_free_dma_pool(ioc
, sz
);
6646 else if (rc
== -EAGAIN
)
6649 ioc_info(ioc
, "reply_free_dma (0x%llx)\n",
6650 (unsigned long long)ioc
->reply_free_dma
));
6652 if (ioc
->rdpq_array_enable
) {
6653 reply_post_free_array_sz
= ioc
->reply_queue_count
*
6654 sizeof(Mpi2IOCInitRDPQArrayEntry
);
6655 rc
= _base_allocate_reply_post_free_array(ioc
,
6656 reply_post_free_array_sz
);
6659 else if (rc
== -EAGAIN
)
6662 ioc
->config_page_sz
= 512;
6663 ioc
->config_page
= dma_alloc_coherent(&ioc
->pdev
->dev
,
6664 ioc
->config_page_sz
, &ioc
->config_page_dma
, GFP_KERNEL
);
6665 if (!ioc
->config_page
) {
6666 ioc_err(ioc
, "config page: dma_pool_alloc failed\n");
6670 ioc_info(ioc
, "config page(0x%p) - dma(0x%llx): size(%d)\n",
6671 ioc
->config_page
, (unsigned long long)ioc
->config_page_dma
,
6672 ioc
->config_page_sz
);
6673 total_sz
+= ioc
->config_page_sz
;
6675 ioc_info(ioc
, "Allocated physical memory: size(%d kB)\n",
6677 ioc_info(ioc
, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
6678 ioc
->shost
->can_queue
, facts
->RequestCredit
);
6679 ioc_info(ioc
, "Scatter Gather Elements per IO(%d)\n",
6680 ioc
->shost
->sg_tablesize
);
6684 _base_release_memory_pools(ioc
);
6685 if (ioc
->use_32bit_dma
&& (ioc
->dma_mask
> 32)) {
6686 /* Change dma coherent mask to 32 bit and reallocate */
6687 if (_base_config_dma_addressing(ioc
, ioc
->pdev
) != 0) {
6688 pr_err("Setting 32 bit coherent DMA mask Failed %s\n",
6689 pci_name(ioc
->pdev
));
6692 } else if (_base_reduce_hba_queue_depth(ioc
) != 0)
6694 goto retry_allocation
;
6701 * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
6702 * @ioc: Pointer to MPT_ADAPTER structure
6703 * @cooked: Request raw or cooked IOC state
6705 * Return: all IOC Doorbell register bits if cooked==0, else just the
6706 * Doorbell bits in MPI_IOC_STATE_MASK.
6709 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER
*ioc
, int cooked
)
6713 s
= ioc
->base_readl_ext_retry(&ioc
->chip
->Doorbell
);
6714 sc
= s
& MPI2_IOC_STATE_MASK
;
6715 return cooked
? sc
: s
;
6719 * _base_wait_on_iocstate - waiting on a particular ioc state
6721 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
6722 * @timeout: timeout in second
6724 * Return: 0 for success, non-zero for failure.
6727 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER
*ioc
, u32 ioc_state
, int timeout
)
6733 cntdn
= 1000 * timeout
;
6735 current_state
= mpt3sas_base_get_iocstate(ioc
, 1);
6736 if (current_state
== ioc_state
)
6738 if (count
&& current_state
== MPI2_IOC_STATE_FAULT
)
6740 if (count
&& current_state
== MPI2_IOC_STATE_COREDUMP
)
6743 usleep_range(1000, 1500);
6747 return current_state
;
6751 * _base_dump_reg_set - This function will print hexdump of register set.
6752 * @ioc: per adapter object
6757 _base_dump_reg_set(struct MPT3SAS_ADAPTER
*ioc
)
6759 unsigned int i
, sz
= 256;
6760 u32 __iomem
*reg
= (u32 __iomem
*)ioc
->chip
;
6762 ioc_info(ioc
, "System Register set:\n");
6763 for (i
= 0; i
< (sz
/ sizeof(u32
)); i
++)
6764 pr_info("%08x: %08x\n", (i
* 4), readl(®
[i
]));
6768 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
6769 * a write to the doorbell)
6770 * @ioc: per adapter object
6771 * @timeout: timeout in seconds
6773 * Return: 0 for success, non-zero for failure.
6775 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
6779 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER
*ioc
, int timeout
)
6785 cntdn
= 1000 * timeout
;
6787 int_status
= ioc
->base_readl(&ioc
->chip
->HostInterruptStatus
);
6788 if (int_status
& MPI2_HIS_IOC2SYS_DB_STATUS
) {
6790 ioc_info(ioc
, "%s: successful count(%d), timeout(%d)\n",
6791 __func__
, count
, timeout
));
6795 usleep_range(1000, 1500);
6799 ioc_err(ioc
, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6800 __func__
, count
, int_status
);
6805 _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER
*ioc
, int timeout
)
6811 cntdn
= 2000 * timeout
;
6813 int_status
= ioc
->base_readl(&ioc
->chip
->HostInterruptStatus
);
6814 if (int_status
& MPI2_HIS_IOC2SYS_DB_STATUS
) {
6816 ioc_info(ioc
, "%s: successful count(%d), timeout(%d)\n",
6817 __func__
, count
, timeout
));
6825 ioc_err(ioc
, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6826 __func__
, count
, int_status
);
6832 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
6833 * @ioc: per adapter object
6834 * @timeout: timeout in second
6836 * Return: 0 for success, non-zero for failure.
6838 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
6842 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER
*ioc
, int timeout
)
6849 cntdn
= 1000 * timeout
;
6851 int_status
= ioc
->base_readl(&ioc
->chip
->HostInterruptStatus
);
6852 if (!(int_status
& MPI2_HIS_SYS2IOC_DB_STATUS
)) {
6854 ioc_info(ioc
, "%s: successful count(%d), timeout(%d)\n",
6855 __func__
, count
, timeout
));
6857 } else if (int_status
& MPI2_HIS_IOC2SYS_DB_STATUS
) {
6858 doorbell
= ioc
->base_readl_ext_retry(&ioc
->chip
->Doorbell
);
6859 if ((doorbell
& MPI2_IOC_STATE_MASK
) ==
6860 MPI2_IOC_STATE_FAULT
) {
6861 mpt3sas_print_fault_code(ioc
, doorbell
);
6864 if ((doorbell
& MPI2_IOC_STATE_MASK
) ==
6865 MPI2_IOC_STATE_COREDUMP
) {
6866 mpt3sas_print_coredump_info(ioc
, doorbell
);
6869 } else if (int_status
== 0xFFFFFFFF)
6872 usleep_range(1000, 1500);
6877 ioc_err(ioc
, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6878 __func__
, count
, int_status
);
6883 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
6884 * @ioc: per adapter object
6885 * @timeout: timeout in second
6887 * Return: 0 for success, non-zero for failure.
6890 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER
*ioc
, int timeout
)
6896 cntdn
= 1000 * timeout
;
6898 doorbell_reg
= ioc
->base_readl_ext_retry(&ioc
->chip
->Doorbell
);
6899 if (!(doorbell_reg
& MPI2_DOORBELL_USED
)) {
6901 ioc_info(ioc
, "%s: successful count(%d), timeout(%d)\n",
6902 __func__
, count
, timeout
));
6906 usleep_range(1000, 1500);
6910 ioc_err(ioc
, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
6911 __func__
, count
, doorbell_reg
);
6916 * _base_send_ioc_reset - send doorbell reset
6917 * @ioc: per adapter object
6918 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
6919 * @timeout: timeout in second
6921 * Return: 0 for success, non-zero for failure.
6924 _base_send_ioc_reset(struct MPT3SAS_ADAPTER
*ioc
, u8 reset_type
, int timeout
)
6928 unsigned long flags
;
6930 if (reset_type
!= MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
) {
6931 ioc_err(ioc
, "%s: unknown reset_type\n", __func__
);
6935 if (!(ioc
->facts
.IOCCapabilities
&
6936 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY
))
6939 ioc_info(ioc
, "sending message unit reset !!\n");
6941 writel(reset_type
<< MPI2_DOORBELL_FUNCTION_SHIFT
,
6942 &ioc
->chip
->Doorbell
);
6943 if ((_base_wait_for_doorbell_ack(ioc
, 15))) {
6948 ioc_state
= _base_wait_on_iocstate(ioc
, MPI2_IOC_STATE_READY
, timeout
);
6950 ioc_err(ioc
, "%s: failed going to ready state (ioc_state=0x%x)\n",
6951 __func__
, ioc_state
);
6957 ioc_state
= mpt3sas_base_get_iocstate(ioc
, 0);
6958 spin_lock_irqsave(&ioc
->ioc_reset_in_progress_lock
, flags
);
6960 * Wait for IOC state CoreDump to clear only during
6961 * HBA initialization & release time.
6963 if ((ioc_state
& MPI2_IOC_STATE_MASK
) ==
6964 MPI2_IOC_STATE_COREDUMP
&& (ioc
->is_driver_loading
== 1 ||
6965 ioc
->fault_reset_work_q
== NULL
)) {
6966 spin_unlock_irqrestore(
6967 &ioc
->ioc_reset_in_progress_lock
, flags
);
6968 mpt3sas_print_coredump_info(ioc
, ioc_state
);
6969 mpt3sas_base_wait_for_coredump_completion(ioc
,
6972 &ioc
->ioc_reset_in_progress_lock
, flags
);
6974 spin_unlock_irqrestore(&ioc
->ioc_reset_in_progress_lock
, flags
);
6976 ioc_info(ioc
, "message unit reset: %s\n",
6977 r
== 0 ? "SUCCESS" : "FAILED");
6982 * mpt3sas_wait_for_ioc - IOC's operational state is checked here.
6983 * @ioc: per adapter object
6984 * @timeout: timeout in seconds
6986 * Return: Waits up to timeout seconds for the IOC to
6987 * become operational. Returns 0 if IOC is present
6988 * and operational; otherwise returns %-EFAULT.
6992 mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER
*ioc
, int timeout
)
6994 int wait_state_count
= 0;
6998 ioc_state
= mpt3sas_base_get_iocstate(ioc
, 1);
6999 if (ioc_state
== MPI2_IOC_STATE_OPERATIONAL
)
7003 * Watchdog thread will be started after IOC Initialization, so
7004 * no need to wait here for IOC state to become operational
7005 * when IOC Initialization is on. Instead the driver will
7006 * return ETIME status, so that calling function can issue
7007 * diag reset operation and retry the command.
7009 if (ioc
->is_driver_loading
)
7013 ioc_info(ioc
, "%s: waiting for operational state(count=%d)\n",
7014 __func__
, ++wait_state_count
);
7015 } while (--timeout
);
7017 ioc_err(ioc
, "%s: failed due to ioc not operational\n", __func__
);
7020 if (wait_state_count
)
7021 ioc_info(ioc
, "ioc is operational\n");
7026 * _base_handshake_req_reply_wait - send request thru doorbell interface
7027 * @ioc: per adapter object
7028 * @request_bytes: request length
7029 * @request: pointer having request payload
7030 * @reply_bytes: reply length
7031 * @reply: pointer to reply payload
7032 * @timeout: timeout in second
7034 * Return: 0 for success, non-zero for failure.
7037 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER
*ioc
, int request_bytes
,
7038 u32
*request
, int reply_bytes
, u16
*reply
, int timeout
)
7040 MPI2DefaultReply_t
*default_reply
= (MPI2DefaultReply_t
*)reply
;
7045 /* make sure doorbell is not in use */
7046 if ((ioc
->base_readl_ext_retry(&ioc
->chip
->Doorbell
) & MPI2_DOORBELL_USED
)) {
7047 ioc_err(ioc
, "doorbell is in use (line=%d)\n", __LINE__
);
7051 /* clear pending doorbell interrupts from previous state changes */
7052 if (ioc
->base_readl(&ioc
->chip
->HostInterruptStatus
) &
7053 MPI2_HIS_IOC2SYS_DB_STATUS
)
7054 writel(0, &ioc
->chip
->HostInterruptStatus
);
7056 /* send message to ioc */
7057 writel(((MPI2_FUNCTION_HANDSHAKE
<<MPI2_DOORBELL_FUNCTION_SHIFT
) |
7058 ((request_bytes
/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT
)),
7059 &ioc
->chip
->Doorbell
);
7061 if ((_base_spin_on_doorbell_int(ioc
, 5))) {
7062 ioc_err(ioc
, "doorbell handshake int failed (line=%d)\n",
7066 writel(0, &ioc
->chip
->HostInterruptStatus
);
7068 if ((_base_wait_for_doorbell_ack(ioc
, 5))) {
7069 ioc_err(ioc
, "doorbell handshake ack failed (line=%d)\n",
7074 /* send message 32-bits at a time */
7075 for (i
= 0, failed
= 0; i
< request_bytes
/4 && !failed
; i
++) {
7076 writel(cpu_to_le32(request
[i
]), &ioc
->chip
->Doorbell
);
7077 if ((_base_wait_for_doorbell_ack(ioc
, 5)))
7082 ioc_err(ioc
, "doorbell handshake sending request failed (line=%d)\n",
7087 /* now wait for the reply */
7088 if ((_base_wait_for_doorbell_int(ioc
, timeout
))) {
7089 ioc_err(ioc
, "doorbell handshake int failed (line=%d)\n",
7094 /* read the first two 16-bits, it gives the total length of the reply */
7095 reply
[0] = le16_to_cpu(ioc
->base_readl_ext_retry(&ioc
->chip
->Doorbell
)
7096 & MPI2_DOORBELL_DATA_MASK
);
7097 writel(0, &ioc
->chip
->HostInterruptStatus
);
7098 if ((_base_wait_for_doorbell_int(ioc
, 5))) {
7099 ioc_err(ioc
, "doorbell handshake int failed (line=%d)\n",
7103 reply
[1] = le16_to_cpu(ioc
->base_readl_ext_retry(&ioc
->chip
->Doorbell
)
7104 & MPI2_DOORBELL_DATA_MASK
);
7105 writel(0, &ioc
->chip
->HostInterruptStatus
);
7107 for (i
= 2; i
< default_reply
->MsgLength
* 2; i
++) {
7108 if ((_base_wait_for_doorbell_int(ioc
, 5))) {
7109 ioc_err(ioc
, "doorbell handshake int failed (line=%d)\n",
7113 if (i
>= reply_bytes
/2) /* overflow case */
7114 ioc
->base_readl_ext_retry(&ioc
->chip
->Doorbell
);
7116 reply
[i
] = le16_to_cpu(
7117 ioc
->base_readl_ext_retry(&ioc
->chip
->Doorbell
)
7118 & MPI2_DOORBELL_DATA_MASK
);
7119 writel(0, &ioc
->chip
->HostInterruptStatus
);
7122 _base_wait_for_doorbell_int(ioc
, 5);
7123 if (_base_wait_for_doorbell_not_used(ioc
, 5) != 0) {
7125 ioc_info(ioc
, "doorbell is in use (line=%d)\n",
7128 writel(0, &ioc
->chip
->HostInterruptStatus
);
7130 if (ioc
->logging_level
& MPT_DEBUG_INIT
) {
7131 mfp
= (__le32
*)reply
;
7132 pr_info("\toffset:data\n");
7133 for (i
= 0; i
< reply_bytes
/4; i
++)
7134 ioc_info(ioc
, "\t[0x%02x]:%08x\n", i
*4,
7135 le32_to_cpu(mfp
[i
]));
7141 * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
7142 * @ioc: per adapter object
7143 * @mpi_reply: the reply payload from FW
7144 * @mpi_request: the request payload sent to FW
7146 * The SAS IO Unit Control Request message allows the host to perform low-level
7147 * operations, such as resets on the PHYs of the IO Unit, also allows the host
7148 * to obtain the IOC assigned device handles for a device if it has other
7149 * identifying information about the device, in addition allows the host to
7150 * remove IOC resources associated with the device.
7152 * Return: 0 for success, non-zero for failure.
7155 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER
*ioc
,
7156 Mpi2SasIoUnitControlReply_t
*mpi_reply
,
7157 Mpi2SasIoUnitControlRequest_t
*mpi_request
)
7164 dinitprintk(ioc
, ioc_info(ioc
, "%s\n", __func__
));
7166 mutex_lock(&ioc
->base_cmds
.mutex
);
7168 if (ioc
->base_cmds
.status
!= MPT3_CMD_NOT_USED
) {
7169 ioc_err(ioc
, "%s: base_cmd in use\n", __func__
);
7174 rc
= mpt3sas_wait_for_ioc(ioc
, IOC_OPERATIONAL_WAIT_COUNT
);
7178 smid
= mpt3sas_base_get_smid(ioc
, ioc
->base_cb_idx
);
7180 ioc_err(ioc
, "%s: failed obtaining a smid\n", __func__
);
7186 ioc
->base_cmds
.status
= MPT3_CMD_PENDING
;
7187 request
= mpt3sas_base_get_msg_frame(ioc
, smid
);
7188 ioc
->base_cmds
.smid
= smid
;
7189 memcpy(request
, mpi_request
, sizeof(Mpi2SasIoUnitControlRequest_t
));
7190 if (mpi_request
->Operation
== MPI2_SAS_OP_PHY_HARD_RESET
||
7191 mpi_request
->Operation
== MPI2_SAS_OP_PHY_LINK_RESET
)
7192 ioc
->ioc_link_reset_in_progress
= 1;
7193 init_completion(&ioc
->base_cmds
.done
);
7194 ioc
->put_smid_default(ioc
, smid
);
7195 wait_for_completion_timeout(&ioc
->base_cmds
.done
,
7196 msecs_to_jiffies(10000));
7197 if ((mpi_request
->Operation
== MPI2_SAS_OP_PHY_HARD_RESET
||
7198 mpi_request
->Operation
== MPI2_SAS_OP_PHY_LINK_RESET
) &&
7199 ioc
->ioc_link_reset_in_progress
)
7200 ioc
->ioc_link_reset_in_progress
= 0;
7201 if (!(ioc
->base_cmds
.status
& MPT3_CMD_COMPLETE
)) {
7202 mpt3sas_check_cmd_timeout(ioc
, ioc
->base_cmds
.status
,
7203 mpi_request
, sizeof(Mpi2SasIoUnitControlRequest_t
)/4,
7205 goto issue_host_reset
;
7207 if (ioc
->base_cmds
.status
& MPT3_CMD_REPLY_VALID
)
7208 memcpy(mpi_reply
, ioc
->base_cmds
.reply
,
7209 sizeof(Mpi2SasIoUnitControlReply_t
));
7211 memset(mpi_reply
, 0, sizeof(Mpi2SasIoUnitControlReply_t
));
7212 ioc
->base_cmds
.status
= MPT3_CMD_NOT_USED
;
7217 mpt3sas_base_hard_reset_handler(ioc
, FORCE_BIG_HAMMER
);
7218 ioc
->base_cmds
.status
= MPT3_CMD_NOT_USED
;
7221 mutex_unlock(&ioc
->base_cmds
.mutex
);
7226 * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
7227 * @ioc: per adapter object
7228 * @mpi_reply: the reply payload from FW
7229 * @mpi_request: the request payload sent to FW
7231 * The SCSI Enclosure Processor request message causes the IOC to
7232 * communicate with SES devices to control LED status signals.
7234 * Return: 0 for success, non-zero for failure.
7237 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER
*ioc
,
7238 Mpi2SepReply_t
*mpi_reply
, Mpi2SepRequest_t
*mpi_request
)
7245 dinitprintk(ioc
, ioc_info(ioc
, "%s\n", __func__
));
7247 mutex_lock(&ioc
->base_cmds
.mutex
);
7249 if (ioc
->base_cmds
.status
!= MPT3_CMD_NOT_USED
) {
7250 ioc_err(ioc
, "%s: base_cmd in use\n", __func__
);
7255 rc
= mpt3sas_wait_for_ioc(ioc
, IOC_OPERATIONAL_WAIT_COUNT
);
7259 smid
= mpt3sas_base_get_smid(ioc
, ioc
->base_cb_idx
);
7261 ioc_err(ioc
, "%s: failed obtaining a smid\n", __func__
);
7267 ioc
->base_cmds
.status
= MPT3_CMD_PENDING
;
7268 request
= mpt3sas_base_get_msg_frame(ioc
, smid
);
7269 ioc
->base_cmds
.smid
= smid
;
7270 memset(request
, 0, ioc
->request_sz
);
7271 memcpy(request
, mpi_request
, sizeof(Mpi2SepReply_t
));
7272 init_completion(&ioc
->base_cmds
.done
);
7273 ioc
->put_smid_default(ioc
, smid
);
7274 wait_for_completion_timeout(&ioc
->base_cmds
.done
,
7275 msecs_to_jiffies(10000));
7276 if (!(ioc
->base_cmds
.status
& MPT3_CMD_COMPLETE
)) {
7277 mpt3sas_check_cmd_timeout(ioc
,
7278 ioc
->base_cmds
.status
, mpi_request
,
7279 sizeof(Mpi2SepRequest_t
)/4, issue_reset
);
7280 goto issue_host_reset
;
7282 if (ioc
->base_cmds
.status
& MPT3_CMD_REPLY_VALID
)
7283 memcpy(mpi_reply
, ioc
->base_cmds
.reply
,
7284 sizeof(Mpi2SepReply_t
));
7286 memset(mpi_reply
, 0, sizeof(Mpi2SepReply_t
));
7287 ioc
->base_cmds
.status
= MPT3_CMD_NOT_USED
;
7292 mpt3sas_base_hard_reset_handler(ioc
, FORCE_BIG_HAMMER
);
7293 ioc
->base_cmds
.status
= MPT3_CMD_NOT_USED
;
7296 mutex_unlock(&ioc
->base_cmds
.mutex
);
7301 * _base_get_port_facts - obtain port facts reply and save in ioc
7302 * @ioc: per adapter object
7305 * Return: 0 for success, non-zero for failure.
7308 _base_get_port_facts(struct MPT3SAS_ADAPTER
*ioc
, int port
)
7310 Mpi2PortFactsRequest_t mpi_request
;
7311 Mpi2PortFactsReply_t mpi_reply
;
7312 struct mpt3sas_port_facts
*pfacts
;
7313 int mpi_reply_sz
, mpi_request_sz
, r
;
7315 dinitprintk(ioc
, ioc_info(ioc
, "%s\n", __func__
));
7317 mpi_reply_sz
= sizeof(Mpi2PortFactsReply_t
);
7318 mpi_request_sz
= sizeof(Mpi2PortFactsRequest_t
);
7319 memset(&mpi_request
, 0, mpi_request_sz
);
7320 mpi_request
.Function
= MPI2_FUNCTION_PORT_FACTS
;
7321 mpi_request
.PortNumber
= port
;
7322 r
= _base_handshake_req_reply_wait(ioc
, mpi_request_sz
,
7323 (u32
*)&mpi_request
, mpi_reply_sz
, (u16
*)&mpi_reply
, 5);
7326 ioc_err(ioc
, "%s: handshake failed (r=%d)\n", __func__
, r
);
7330 pfacts
= &ioc
->pfacts
[port
];
7331 memset(pfacts
, 0, sizeof(struct mpt3sas_port_facts
));
7332 pfacts
->PortNumber
= mpi_reply
.PortNumber
;
7333 pfacts
->VP_ID
= mpi_reply
.VP_ID
;
7334 pfacts
->VF_ID
= mpi_reply
.VF_ID
;
7335 pfacts
->MaxPostedCmdBuffers
=
7336 le16_to_cpu(mpi_reply
.MaxPostedCmdBuffers
);
7342 * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
7343 * @ioc: per adapter object
7346 * Return: 0 for success, non-zero for failure.
7349 _base_wait_for_iocstate(struct MPT3SAS_ADAPTER
*ioc
, int timeout
)
7354 dinitprintk(ioc
, ioc_info(ioc
, "%s\n", __func__
));
7356 if (ioc
->pci_error_recovery
) {
7358 ioc_info(ioc
, "%s: host in pci error recovery\n",
7363 ioc_state
= mpt3sas_base_get_iocstate(ioc
, 0);
7365 ioc_info(ioc
, "%s: ioc_state(0x%08x)\n",
7366 __func__
, ioc_state
));
7368 if (((ioc_state
& MPI2_IOC_STATE_MASK
) == MPI2_IOC_STATE_READY
) ||
7369 (ioc_state
& MPI2_IOC_STATE_MASK
) == MPI2_IOC_STATE_OPERATIONAL
)
7372 if (ioc_state
& MPI2_DOORBELL_USED
) {
7373 dhsprintk(ioc
, ioc_info(ioc
, "unexpected doorbell active!\n"));
7374 goto issue_diag_reset
;
7377 if ((ioc_state
& MPI2_IOC_STATE_MASK
) == MPI2_IOC_STATE_FAULT
) {
7378 mpt3sas_print_fault_code(ioc
, ioc_state
&
7379 MPI2_DOORBELL_DATA_MASK
);
7380 goto issue_diag_reset
;
7381 } else if ((ioc_state
& MPI2_IOC_STATE_MASK
) ==
7382 MPI2_IOC_STATE_COREDUMP
) {
7384 "%s: Skipping the diag reset here. (ioc_state=0x%x)\n",
7385 __func__
, ioc_state
);
7389 ioc_state
= _base_wait_on_iocstate(ioc
, MPI2_IOC_STATE_READY
, timeout
);
7392 ioc_info(ioc
, "%s: failed going to ready state (ioc_state=0x%x)\n",
7393 __func__
, ioc_state
));
7400 rc
= _base_diag_reset(ioc
);
7405 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
7406 * @ioc: per adapter object
7408 * Return: 0 for success, non-zero for failure.
7411 _base_get_ioc_facts(struct MPT3SAS_ADAPTER
*ioc
)
7413 Mpi2IOCFactsRequest_t mpi_request
;
7414 Mpi2IOCFactsReply_t mpi_reply
;
7415 struct mpt3sas_facts
*facts
;
7416 int mpi_reply_sz
, mpi_request_sz
, r
;
7418 dinitprintk(ioc
, ioc_info(ioc
, "%s\n", __func__
));
7420 r
= _base_wait_for_iocstate(ioc
, 10);
7423 ioc_info(ioc
, "%s: failed getting to correct state\n",
7427 mpi_reply_sz
= sizeof(Mpi2IOCFactsReply_t
);
7428 mpi_request_sz
= sizeof(Mpi2IOCFactsRequest_t
);
7429 memset(&mpi_request
, 0, mpi_request_sz
);
7430 mpi_request
.Function
= MPI2_FUNCTION_IOC_FACTS
;
7431 r
= _base_handshake_req_reply_wait(ioc
, mpi_request_sz
,
7432 (u32
*)&mpi_request
, mpi_reply_sz
, (u16
*)&mpi_reply
, 5);
7435 ioc_err(ioc
, "%s: handshake failed (r=%d)\n", __func__
, r
);
7439 facts
= &ioc
->facts
;
7440 memset(facts
, 0, sizeof(struct mpt3sas_facts
));
7441 facts
->MsgVersion
= le16_to_cpu(mpi_reply
.MsgVersion
);
7442 facts
->HeaderVersion
= le16_to_cpu(mpi_reply
.HeaderVersion
);
7443 facts
->VP_ID
= mpi_reply
.VP_ID
;
7444 facts
->VF_ID
= mpi_reply
.VF_ID
;
7445 facts
->IOCExceptions
= le16_to_cpu(mpi_reply
.IOCExceptions
);
7446 facts
->MaxChainDepth
= mpi_reply
.MaxChainDepth
;
7447 facts
->WhoInit
= mpi_reply
.WhoInit
;
7448 facts
->NumberOfPorts
= mpi_reply
.NumberOfPorts
;
7449 facts
->MaxMSIxVectors
= mpi_reply
.MaxMSIxVectors
;
7450 if (ioc
->msix_enable
&& (facts
->MaxMSIxVectors
<=
7451 MAX_COMBINED_MSIX_VECTORS(ioc
->is_gen35_ioc
)))
7452 ioc
->combined_reply_queue
= 0;
7453 facts
->RequestCredit
= le16_to_cpu(mpi_reply
.RequestCredit
);
7454 facts
->MaxReplyDescriptorPostQueueDepth
=
7455 le16_to_cpu(mpi_reply
.MaxReplyDescriptorPostQueueDepth
);
7456 facts
->ProductID
= le16_to_cpu(mpi_reply
.ProductID
);
7457 facts
->IOCCapabilities
= le32_to_cpu(mpi_reply
.IOCCapabilities
);
7458 if ((facts
->IOCCapabilities
& MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID
))
7459 ioc
->ir_firmware
= 1;
7460 if ((facts
->IOCCapabilities
&
7461 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE
) && (!reset_devices
))
7462 ioc
->rdpq_array_capable
= 1;
7463 if ((facts
->IOCCapabilities
& MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ
)
7464 && ioc
->is_aero_ioc
)
7465 ioc
->atomic_desc_capable
= 1;
7466 facts
->FWVersion
.Word
= le32_to_cpu(mpi_reply
.FWVersion
.Word
);
7467 facts
->IOCRequestFrameSize
=
7468 le16_to_cpu(mpi_reply
.IOCRequestFrameSize
);
7469 if (ioc
->hba_mpi_version_belonged
!= MPI2_VERSION
) {
7470 facts
->IOCMaxChainSegmentSize
=
7471 le16_to_cpu(mpi_reply
.IOCMaxChainSegmentSize
);
7473 facts
->MaxInitiators
= le16_to_cpu(mpi_reply
.MaxInitiators
);
7474 facts
->MaxTargets
= le16_to_cpu(mpi_reply
.MaxTargets
);
7475 ioc
->shost
->max_id
= -1;
7476 facts
->MaxSasExpanders
= le16_to_cpu(mpi_reply
.MaxSasExpanders
);
7477 facts
->MaxEnclosures
= le16_to_cpu(mpi_reply
.MaxEnclosures
);
7478 facts
->ProtocolFlags
= le16_to_cpu(mpi_reply
.ProtocolFlags
);
7479 facts
->HighPriorityCredit
=
7480 le16_to_cpu(mpi_reply
.HighPriorityCredit
);
7481 facts
->ReplyFrameSize
= mpi_reply
.ReplyFrameSize
;
7482 facts
->MaxDevHandle
= le16_to_cpu(mpi_reply
.MaxDevHandle
);
7483 facts
->CurrentHostPageSize
= mpi_reply
.CurrentHostPageSize
;
7486 * Get the Page Size from IOC Facts. If it's 0, default to 4k.
7488 ioc
->page_size
= 1 << facts
->CurrentHostPageSize
;
7489 if (ioc
->page_size
== 1) {
7490 ioc_info(ioc
, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
7491 ioc
->page_size
= 1 << MPT3SAS_HOST_PAGE_SIZE_4K
;
7494 ioc_info(ioc
, "CurrentHostPageSize(%d)\n",
7495 facts
->CurrentHostPageSize
));
7498 ioc_info(ioc
, "hba queue depth(%d), max chains per io(%d)\n",
7499 facts
->RequestCredit
, facts
->MaxChainDepth
));
7501 ioc_info(ioc
, "request frame size(%d), reply frame size(%d)\n",
7502 facts
->IOCRequestFrameSize
* 4,
7503 facts
->ReplyFrameSize
* 4));
7508 * _base_send_ioc_init - send ioc_init to firmware
7509 * @ioc: per adapter object
7511 * Return: 0 for success, non-zero for failure.
7514 _base_send_ioc_init(struct MPT3SAS_ADAPTER
*ioc
)
7516 Mpi2IOCInitRequest_t mpi_request
;
7517 Mpi2IOCInitReply_t mpi_reply
;
7519 ktime_t current_time
;
7521 u32 reply_post_free_array_sz
= 0;
7523 dinitprintk(ioc
, ioc_info(ioc
, "%s\n", __func__
));
7525 memset(&mpi_request
, 0, sizeof(Mpi2IOCInitRequest_t
));
7526 mpi_request
.Function
= MPI2_FUNCTION_IOC_INIT
;
7527 mpi_request
.WhoInit
= MPI2_WHOINIT_HOST_DRIVER
;
7528 mpi_request
.VF_ID
= 0; /* TODO */
7529 mpi_request
.VP_ID
= 0;
7530 mpi_request
.MsgVersion
= cpu_to_le16(ioc
->hba_mpi_version_belonged
);
7531 mpi_request
.HeaderVersion
= cpu_to_le16(MPI2_HEADER_VERSION
);
7532 mpi_request
.HostPageSize
= MPT3SAS_HOST_PAGE_SIZE_4K
;
7534 if (_base_is_controller_msix_enabled(ioc
))
7535 mpi_request
.HostMSIxVectors
= ioc
->reply_queue_count
;
7536 mpi_request
.SystemRequestFrameSize
= cpu_to_le16(ioc
->request_sz
/4);
7537 mpi_request
.ReplyDescriptorPostQueueDepth
=
7538 cpu_to_le16(ioc
->reply_post_queue_depth
);
7539 mpi_request
.ReplyFreeQueueDepth
=
7540 cpu_to_le16(ioc
->reply_free_queue_depth
);
7542 mpi_request
.SenseBufferAddressHigh
=
7543 cpu_to_le32((u64
)ioc
->sense_dma
>> 32);
7544 mpi_request
.SystemReplyAddressHigh
=
7545 cpu_to_le32((u64
)ioc
->reply_dma
>> 32);
7546 mpi_request
.SystemRequestFrameBaseAddress
=
7547 cpu_to_le64((u64
)ioc
->request_dma
);
7548 mpi_request
.ReplyFreeQueueAddress
=
7549 cpu_to_le64((u64
)ioc
->reply_free_dma
);
7551 if (ioc
->rdpq_array_enable
) {
7552 reply_post_free_array_sz
= ioc
->reply_queue_count
*
7553 sizeof(Mpi2IOCInitRDPQArrayEntry
);
7554 memset(ioc
->reply_post_free_array
, 0, reply_post_free_array_sz
);
7555 for (i
= 0; i
< ioc
->reply_queue_count
; i
++)
7556 ioc
->reply_post_free_array
[i
].RDPQBaseAddress
=
7558 (u64
)ioc
->reply_post
[i
].reply_post_free_dma
);
7559 mpi_request
.MsgFlags
= MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE
;
7560 mpi_request
.ReplyDescriptorPostQueueAddress
=
7561 cpu_to_le64((u64
)ioc
->reply_post_free_array_dma
);
7563 mpi_request
.ReplyDescriptorPostQueueAddress
=
7564 cpu_to_le64((u64
)ioc
->reply_post
[0].reply_post_free_dma
);
7568 * Set the flag to enable CoreDump state feature in IOC firmware.
7570 mpi_request
.ConfigurationFlags
|=
7571 cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE
);
7573 /* This time stamp specifies number of milliseconds
7574 * since epoch ~ midnight January 1, 1970.
7576 current_time
= ktime_get_real();
7577 mpi_request
.TimeStamp
= cpu_to_le64(ktime_to_ms(current_time
));
7579 if (ioc
->logging_level
& MPT_DEBUG_INIT
) {
7583 mfp
= (__le32
*)&mpi_request
;
7584 ioc_info(ioc
, "\toffset:data\n");
7585 for (i
= 0; i
< sizeof(Mpi2IOCInitRequest_t
)/4; i
++)
7586 ioc_info(ioc
, "\t[0x%02x]:%08x\n", i
*4,
7587 le32_to_cpu(mfp
[i
]));
7590 r
= _base_handshake_req_reply_wait(ioc
,
7591 sizeof(Mpi2IOCInitRequest_t
), (u32
*)&mpi_request
,
7592 sizeof(Mpi2IOCInitReply_t
), (u16
*)&mpi_reply
, 30);
7595 ioc_err(ioc
, "%s: handshake failed (r=%d)\n", __func__
, r
);
7599 ioc_status
= le16_to_cpu(mpi_reply
.IOCStatus
) & MPI2_IOCSTATUS_MASK
;
7600 if (ioc_status
!= MPI2_IOCSTATUS_SUCCESS
||
7601 mpi_reply
.IOCLogInfo
) {
7602 ioc_err(ioc
, "%s: failed\n", __func__
);
7606 /* Reset TimeSync Counter*/
7607 ioc
->timestamp_update_count
= 0;
7612 * mpt3sas_port_enable_done - command completion routine for port enable
7613 * @ioc: per adapter object
7614 * @smid: system request message index
7615 * @msix_index: MSIX table index supplied by the OS
7616 * @reply: reply message frame(lower 32bit addr)
7618 * Return: 1 meaning mf should be freed from _base_interrupt
7619 * 0 means the mf is freed from this function.
7622 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER
*ioc
, u16 smid
, u8 msix_index
,
7625 MPI2DefaultReply_t
*mpi_reply
;
7628 if (ioc
->port_enable_cmds
.status
== MPT3_CMD_NOT_USED
)
7631 mpi_reply
= mpt3sas_base_get_reply_virt_addr(ioc
, reply
);
7635 if (mpi_reply
->Function
!= MPI2_FUNCTION_PORT_ENABLE
)
7638 ioc
->port_enable_cmds
.status
&= ~MPT3_CMD_PENDING
;
7639 ioc
->port_enable_cmds
.status
|= MPT3_CMD_COMPLETE
;
7640 ioc
->port_enable_cmds
.status
|= MPT3_CMD_REPLY_VALID
;
7641 memcpy(ioc
->port_enable_cmds
.reply
, mpi_reply
, mpi_reply
->MsgLength
*4);
7642 ioc_status
= le16_to_cpu(mpi_reply
->IOCStatus
) & MPI2_IOCSTATUS_MASK
;
7643 if (ioc_status
!= MPI2_IOCSTATUS_SUCCESS
)
7644 ioc
->port_enable_failed
= 1;
7646 if (ioc
->port_enable_cmds
.status
& MPT3_CMD_COMPLETE_ASYNC
) {
7647 ioc
->port_enable_cmds
.status
&= ~MPT3_CMD_COMPLETE_ASYNC
;
7648 if (ioc_status
== MPI2_IOCSTATUS_SUCCESS
) {
7649 mpt3sas_port_enable_complete(ioc
);
7652 ioc
->start_scan_failed
= ioc_status
;
7653 ioc
->start_scan
= 0;
7657 complete(&ioc
->port_enable_cmds
.done
);
7662 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
7663 * @ioc: per adapter object
7665 * Return: 0 for success, non-zero for failure.
7668 _base_send_port_enable(struct MPT3SAS_ADAPTER
*ioc
)
7670 Mpi2PortEnableRequest_t
*mpi_request
;
7671 Mpi2PortEnableReply_t
*mpi_reply
;
7676 ioc_info(ioc
, "sending port enable !!\n");
7678 if (ioc
->port_enable_cmds
.status
& MPT3_CMD_PENDING
) {
7679 ioc_err(ioc
, "%s: internal command already in use\n", __func__
);
7683 smid
= mpt3sas_base_get_smid(ioc
, ioc
->port_enable_cb_idx
);
7685 ioc_err(ioc
, "%s: failed obtaining a smid\n", __func__
);
7689 ioc
->port_enable_cmds
.status
= MPT3_CMD_PENDING
;
7690 mpi_request
= mpt3sas_base_get_msg_frame(ioc
, smid
);
7691 ioc
->port_enable_cmds
.smid
= smid
;
7692 memset(mpi_request
, 0, sizeof(Mpi2PortEnableRequest_t
));
7693 mpi_request
->Function
= MPI2_FUNCTION_PORT_ENABLE
;
7695 init_completion(&ioc
->port_enable_cmds
.done
);
7696 ioc
->put_smid_default(ioc
, smid
);
7697 wait_for_completion_timeout(&ioc
->port_enable_cmds
.done
, 300*HZ
);
7698 if (!(ioc
->port_enable_cmds
.status
& MPT3_CMD_COMPLETE
)) {
7699 ioc_err(ioc
, "%s: timeout\n", __func__
);
7700 _debug_dump_mf(mpi_request
,
7701 sizeof(Mpi2PortEnableRequest_t
)/4);
7702 if (ioc
->port_enable_cmds
.status
& MPT3_CMD_RESET
)
7709 mpi_reply
= ioc
->port_enable_cmds
.reply
;
7710 ioc_status
= le16_to_cpu(mpi_reply
->IOCStatus
) & MPI2_IOCSTATUS_MASK
;
7711 if (ioc_status
!= MPI2_IOCSTATUS_SUCCESS
) {
7712 ioc_err(ioc
, "%s: failed with (ioc_status=0x%08x)\n",
7713 __func__
, ioc_status
);
7719 ioc
->port_enable_cmds
.status
= MPT3_CMD_NOT_USED
;
7720 ioc_info(ioc
, "port enable: %s\n", r
== 0 ? "SUCCESS" : "FAILED");
7725 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
7726 * @ioc: per adapter object
7728 * Return: 0 for success, non-zero for failure.
7731 mpt3sas_port_enable(struct MPT3SAS_ADAPTER
*ioc
)
7733 Mpi2PortEnableRequest_t
*mpi_request
;
7736 ioc_info(ioc
, "sending port enable !!\n");
7738 if (ioc
->port_enable_cmds
.status
& MPT3_CMD_PENDING
) {
7739 ioc_err(ioc
, "%s: internal command already in use\n", __func__
);
7743 smid
= mpt3sas_base_get_smid(ioc
, ioc
->port_enable_cb_idx
);
7745 ioc_err(ioc
, "%s: failed obtaining a smid\n", __func__
);
7748 ioc
->drv_internal_flags
|= MPT_DRV_INTERNAL_FIRST_PE_ISSUED
;
7749 ioc
->port_enable_cmds
.status
= MPT3_CMD_PENDING
;
7750 ioc
->port_enable_cmds
.status
|= MPT3_CMD_COMPLETE_ASYNC
;
7751 mpi_request
= mpt3sas_base_get_msg_frame(ioc
, smid
);
7752 ioc
->port_enable_cmds
.smid
= smid
;
7753 memset(mpi_request
, 0, sizeof(Mpi2PortEnableRequest_t
));
7754 mpi_request
->Function
= MPI2_FUNCTION_PORT_ENABLE
;
7756 ioc
->put_smid_default(ioc
, smid
);
7761 * _base_determine_wait_on_discovery - desposition
7762 * @ioc: per adapter object
7764 * Decide whether to wait on discovery to complete. Used to either
7765 * locate boot device, or report volumes ahead of physical devices.
7767 * Return: 1 for wait, 0 for don't wait.
7770 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER
*ioc
)
7772 /* We wait for discovery to complete if IR firmware is loaded.
7773 * The sas topology events arrive before PD events, so we need time to
7774 * turn on the bit in ioc->pd_handles to indicate PD
7775 * Also, it maybe required to report Volumes ahead of physical
7776 * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
7778 if (ioc
->ir_firmware
)
7781 /* if no Bios, then we don't need to wait */
7782 if (!ioc
->bios_pg3
.BiosVersion
)
7785 /* Bios is present, then we drop down here.
7787 * If there any entries in the Bios Page 2, then we wait
7788 * for discovery to complete.
7791 /* Current Boot Device */
7792 if ((ioc
->bios_pg2
.CurrentBootDeviceForm
&
7793 MPI2_BIOSPAGE2_FORM_MASK
) ==
7794 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED
&&
7795 /* Request Boot Device */
7796 (ioc
->bios_pg2
.ReqBootDeviceForm
&
7797 MPI2_BIOSPAGE2_FORM_MASK
) ==
7798 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED
&&
7799 /* Alternate Request Boot Device */
7800 (ioc
->bios_pg2
.ReqAltBootDeviceForm
&
7801 MPI2_BIOSPAGE2_FORM_MASK
) ==
7802 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED
)
7809 * _base_unmask_events - turn on notification for this event
7810 * @ioc: per adapter object
7811 * @event: firmware event
7813 * The mask is stored in ioc->event_masks.
7816 _base_unmask_events(struct MPT3SAS_ADAPTER
*ioc
, u16 event
)
7823 desired_event
= (1 << (event
% 32));
7826 ioc
->event_masks
[0] &= ~desired_event
;
7827 else if (event
< 64)
7828 ioc
->event_masks
[1] &= ~desired_event
;
7829 else if (event
< 96)
7830 ioc
->event_masks
[2] &= ~desired_event
;
7831 else if (event
< 128)
7832 ioc
->event_masks
[3] &= ~desired_event
;
7836 * _base_event_notification - send event notification
7837 * @ioc: per adapter object
7839 * Return: 0 for success, non-zero for failure.
7842 _base_event_notification(struct MPT3SAS_ADAPTER
*ioc
)
7844 Mpi2EventNotificationRequest_t
*mpi_request
;
7847 int i
, issue_diag_reset
= 0;
7849 dinitprintk(ioc
, ioc_info(ioc
, "%s\n", __func__
));
7851 if (ioc
->base_cmds
.status
& MPT3_CMD_PENDING
) {
7852 ioc_err(ioc
, "%s: internal command already in use\n", __func__
);
7856 smid
= mpt3sas_base_get_smid(ioc
, ioc
->base_cb_idx
);
7858 ioc_err(ioc
, "%s: failed obtaining a smid\n", __func__
);
7861 ioc
->base_cmds
.status
= MPT3_CMD_PENDING
;
7862 mpi_request
= mpt3sas_base_get_msg_frame(ioc
, smid
);
7863 ioc
->base_cmds
.smid
= smid
;
7864 memset(mpi_request
, 0, sizeof(Mpi2EventNotificationRequest_t
));
7865 mpi_request
->Function
= MPI2_FUNCTION_EVENT_NOTIFICATION
;
7866 mpi_request
->VF_ID
= 0; /* TODO */
7867 mpi_request
->VP_ID
= 0;
7868 for (i
= 0; i
< MPI2_EVENT_NOTIFY_EVENTMASK_WORDS
; i
++)
7869 mpi_request
->EventMasks
[i
] =
7870 cpu_to_le32(ioc
->event_masks
[i
]);
7871 init_completion(&ioc
->base_cmds
.done
);
7872 ioc
->put_smid_default(ioc
, smid
);
7873 wait_for_completion_timeout(&ioc
->base_cmds
.done
, 30*HZ
);
7874 if (!(ioc
->base_cmds
.status
& MPT3_CMD_COMPLETE
)) {
7875 ioc_err(ioc
, "%s: timeout\n", __func__
);
7876 _debug_dump_mf(mpi_request
,
7877 sizeof(Mpi2EventNotificationRequest_t
)/4);
7878 if (ioc
->base_cmds
.status
& MPT3_CMD_RESET
)
7881 issue_diag_reset
= 1;
7884 dinitprintk(ioc
, ioc_info(ioc
, "%s: complete\n", __func__
));
7885 ioc
->base_cmds
.status
= MPT3_CMD_NOT_USED
;
7887 if (issue_diag_reset
) {
7888 if (ioc
->drv_internal_flags
& MPT_DRV_INTERNAL_FIRST_PE_ISSUED
)
7890 if (mpt3sas_base_check_for_fault_and_issue_reset(ioc
))
7898 * mpt3sas_base_validate_event_type - validating event types
7899 * @ioc: per adapter object
7900 * @event_type: firmware event
7902 * This will turn on firmware event notification when application
7903 * ask for that event. We don't mask events that are already enabled.
7906 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER
*ioc
, u32
*event_type
)
7909 u32 event_mask
, desired_event
;
7910 u8 send_update_to_fw
;
7912 for (i
= 0, send_update_to_fw
= 0; i
<
7913 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS
; i
++) {
7914 event_mask
= ~event_type
[i
];
7916 for (j
= 0; j
< 32; j
++) {
7917 if (!(event_mask
& desired_event
) &&
7918 (ioc
->event_masks
[i
] & desired_event
)) {
7919 ioc
->event_masks
[i
] &= ~desired_event
;
7920 send_update_to_fw
= 1;
7922 desired_event
= (desired_event
<< 1);
7926 if (!send_update_to_fw
)
7929 mutex_lock(&ioc
->base_cmds
.mutex
);
7930 _base_event_notification(ioc
);
7931 mutex_unlock(&ioc
->base_cmds
.mutex
);
7935 * mpt3sas_base_unlock_and_get_host_diagnostic- enable Host Diagnostic Register writes
7936 * @ioc: per adapter object
7937 * @host_diagnostic: host diagnostic register content
7939 * Return: 0 for success, non-zero for failure.
7943 mpt3sas_base_unlock_and_get_host_diagnostic(struct MPT3SAS_ADAPTER
*ioc
,
7944 u32
*host_diagnostic
)
7948 *host_diagnostic
= 0;
7952 /* Write magic sequence to WriteSequence register
7953 * Loop until in diagnostic mode
7955 drsprintk(ioc
, ioc_info(ioc
, "write magic sequence\n"));
7956 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE
, &ioc
->chip
->WriteSequence
);
7957 writel(MPI2_WRSEQ_1ST_KEY_VALUE
, &ioc
->chip
->WriteSequence
);
7958 writel(MPI2_WRSEQ_2ND_KEY_VALUE
, &ioc
->chip
->WriteSequence
);
7959 writel(MPI2_WRSEQ_3RD_KEY_VALUE
, &ioc
->chip
->WriteSequence
);
7960 writel(MPI2_WRSEQ_4TH_KEY_VALUE
, &ioc
->chip
->WriteSequence
);
7961 writel(MPI2_WRSEQ_5TH_KEY_VALUE
, &ioc
->chip
->WriteSequence
);
7962 writel(MPI2_WRSEQ_6TH_KEY_VALUE
, &ioc
->chip
->WriteSequence
);
7969 "Stop writing magic sequence after 20 retries\n");
7970 _base_dump_reg_set(ioc
);
7974 *host_diagnostic
= ioc
->base_readl_ext_retry(&ioc
->chip
->HostDiagnostic
);
7976 ioc_info(ioc
, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
7977 count
, *host_diagnostic
));
7979 } while ((*host_diagnostic
& MPI2_DIAG_DIAG_WRITE_ENABLE
) == 0);
7984 * mpt3sas_base_lock_host_diagnostic: Disable Host Diagnostic Register writes
7985 * @ioc: per adapter object
7989 mpt3sas_base_lock_host_diagnostic(struct MPT3SAS_ADAPTER
*ioc
)
7991 drsprintk(ioc
, ioc_info(ioc
, "disable writes to the diagnostic register\n"));
7992 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE
, &ioc
->chip
->WriteSequence
);
7996 * _base_diag_reset - the "big hammer" start of day reset
7997 * @ioc: per adapter object
7999 * Return: 0 for success, non-zero for failure.
8002 _base_diag_reset(struct MPT3SAS_ADAPTER
*ioc
)
8004 u32 host_diagnostic
;
8009 ioc_info(ioc
, "sending diag reset !!\n");
8011 pci_cfg_access_lock(ioc
->pdev
);
8013 drsprintk(ioc
, ioc_info(ioc
, "clear interrupts\n"));
8015 mutex_lock(&ioc
->hostdiag_unlock_mutex
);
8016 if (mpt3sas_base_unlock_and_get_host_diagnostic(ioc
, &host_diagnostic
))
8019 hcb_size
= ioc
->base_readl(&ioc
->chip
->HCBSize
);
8020 drsprintk(ioc
, ioc_info(ioc
, "diag reset: issued\n"));
8021 writel(host_diagnostic
| MPI2_DIAG_RESET_ADAPTER
,
8022 &ioc
->chip
->HostDiagnostic
);
8024 /* This delay allows the chip PCIe hardware time to finish reset tasks */
8025 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC
/1000);
8027 /* Approximately 300 second max wait */
8028 for (count
= 0; count
< (300000000 /
8029 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
); count
++) {
8031 host_diagnostic
= ioc
->base_readl_ext_retry(&ioc
->chip
->HostDiagnostic
);
8033 if (host_diagnostic
== 0xFFFFFFFF) {
8035 "Invalid host diagnostic register value\n");
8036 _base_dump_reg_set(ioc
);
8039 if (!(host_diagnostic
& MPI2_DIAG_RESET_ADAPTER
))
8042 /* Wait to pass the second read delay window */
8043 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC
/1000);
8046 if (host_diagnostic
& MPI2_DIAG_HCB_MODE
) {
8049 ioc_info(ioc
, "restart the adapter assuming the\n"
8050 "HCB Address points to good F/W\n"));
8051 host_diagnostic
&= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK
;
8052 host_diagnostic
|= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW
;
8053 writel(host_diagnostic
, &ioc
->chip
->HostDiagnostic
);
8055 drsprintk(ioc
, ioc_info(ioc
, "re-enable the HCDW\n"));
8056 writel(hcb_size
| MPI2_HCB_SIZE_HCB_ENABLE
,
8057 &ioc
->chip
->HCBSize
);
8060 drsprintk(ioc
, ioc_info(ioc
, "restart the adapter\n"));
8061 writel(host_diagnostic
& ~MPI2_DIAG_HOLD_IOC_RESET
,
8062 &ioc
->chip
->HostDiagnostic
);
8064 mpt3sas_base_lock_host_diagnostic(ioc
);
8065 mutex_unlock(&ioc
->hostdiag_unlock_mutex
);
8067 drsprintk(ioc
, ioc_info(ioc
, "Wait for FW to go to the READY state\n"));
8068 ioc_state
= _base_wait_on_iocstate(ioc
, MPI2_IOC_STATE_READY
, 20);
8070 ioc_err(ioc
, "%s: failed going to ready state (ioc_state=0x%x)\n",
8071 __func__
, ioc_state
);
8072 _base_dump_reg_set(ioc
);
8076 pci_cfg_access_unlock(ioc
->pdev
);
8077 ioc_info(ioc
, "diag reset: SUCCESS\n");
8081 pci_cfg_access_unlock(ioc
->pdev
);
8082 ioc_err(ioc
, "diag reset: FAILED\n");
8083 mutex_unlock(&ioc
->hostdiag_unlock_mutex
);
8088 * mpt3sas_base_make_ioc_ready - put controller in READY state
8089 * @ioc: per adapter object
8090 * @type: FORCE_BIG_HAMMER or SOFT_RESET
8092 * Return: 0 for success, non-zero for failure.
8095 mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER
*ioc
, enum reset_type type
)
8101 dinitprintk(ioc
, ioc_info(ioc
, "%s\n", __func__
));
8103 if (ioc
->pci_error_recovery
)
8106 ioc_state
= mpt3sas_base_get_iocstate(ioc
, 0);
8108 ioc_info(ioc
, "%s: ioc_state(0x%08x)\n",
8109 __func__
, ioc_state
));
8111 /* if in RESET state, it should move to READY state shortly */
8113 if ((ioc_state
& MPI2_IOC_STATE_MASK
) == MPI2_IOC_STATE_RESET
) {
8114 while ((ioc_state
& MPI2_IOC_STATE_MASK
) !=
8115 MPI2_IOC_STATE_READY
) {
8116 if (count
++ == 10) {
8117 ioc_err(ioc
, "%s: failed going to ready state (ioc_state=0x%x)\n",
8118 __func__
, ioc_state
);
8122 ioc_state
= mpt3sas_base_get_iocstate(ioc
, 0);
8126 if ((ioc_state
& MPI2_IOC_STATE_MASK
) == MPI2_IOC_STATE_READY
)
8129 if (ioc_state
& MPI2_DOORBELL_USED
) {
8130 ioc_info(ioc
, "unexpected doorbell active!\n");
8131 goto issue_diag_reset
;
8134 if ((ioc_state
& MPI2_IOC_STATE_MASK
) == MPI2_IOC_STATE_FAULT
) {
8135 mpt3sas_print_fault_code(ioc
, ioc_state
&
8136 MPI2_DOORBELL_DATA_MASK
);
8137 goto issue_diag_reset
;
8140 if ((ioc_state
& MPI2_IOC_STATE_MASK
) == MPI2_IOC_STATE_COREDUMP
) {
8142 * if host reset is invoked while watch dog thread is waiting
8143 * for IOC state to be changed to Fault state then driver has
8144 * to wait here for CoreDump state to clear otherwise reset
8145 * will be issued to the FW and FW move the IOC state to
8146 * reset state without copying the FW logs to coredump region.
8148 if (ioc
->ioc_coredump_loop
!= MPT3SAS_COREDUMP_LOOP_DONE
) {
8149 mpt3sas_print_coredump_info(ioc
, ioc_state
&
8150 MPI2_DOORBELL_DATA_MASK
);
8151 mpt3sas_base_wait_for_coredump_completion(ioc
,
8154 goto issue_diag_reset
;
8157 if (type
== FORCE_BIG_HAMMER
)
8158 goto issue_diag_reset
;
8160 if ((ioc_state
& MPI2_IOC_STATE_MASK
) == MPI2_IOC_STATE_OPERATIONAL
)
8161 if (!(_base_send_ioc_reset(ioc
,
8162 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
, 15))) {
8167 rc
= _base_diag_reset(ioc
);
8172 * _base_make_ioc_operational - put controller in OPERATIONAL state
8173 * @ioc: per adapter object
8175 * Return: 0 for success, non-zero for failure.
8178 _base_make_ioc_operational(struct MPT3SAS_ADAPTER
*ioc
)
8180 int r
, i
, index
, rc
;
8181 unsigned long flags
;
8184 struct _tr_list
*delayed_tr
, *delayed_tr_next
;
8185 struct _sc_list
*delayed_sc
, *delayed_sc_next
;
8186 struct _event_ack_list
*delayed_event_ack
, *delayed_event_ack_next
;
8188 struct adapter_reply_queue
*reply_q
;
8189 Mpi2ReplyDescriptorsUnion_t
*reply_post_free_contig
;
8191 dinitprintk(ioc
, ioc_info(ioc
, "%s\n", __func__
));
8193 /* clean the delayed target reset list */
8194 list_for_each_entry_safe(delayed_tr
, delayed_tr_next
,
8195 &ioc
->delayed_tr_list
, list
) {
8196 list_del(&delayed_tr
->list
);
8201 list_for_each_entry_safe(delayed_tr
, delayed_tr_next
,
8202 &ioc
->delayed_tr_volume_list
, list
) {
8203 list_del(&delayed_tr
->list
);
8207 list_for_each_entry_safe(delayed_sc
, delayed_sc_next
,
8208 &ioc
->delayed_sc_list
, list
) {
8209 list_del(&delayed_sc
->list
);
8213 list_for_each_entry_safe(delayed_event_ack
, delayed_event_ack_next
,
8214 &ioc
->delayed_event_ack_list
, list
) {
8215 list_del(&delayed_event_ack
->list
);
8216 kfree(delayed_event_ack
);
8219 spin_lock_irqsave(&ioc
->scsi_lookup_lock
, flags
);
8221 /* hi-priority queue */
8222 INIT_LIST_HEAD(&ioc
->hpr_free_list
);
8223 smid
= ioc
->hi_priority_smid
;
8224 for (i
= 0; i
< ioc
->hi_priority_depth
; i
++, smid
++) {
8225 ioc
->hpr_lookup
[i
].cb_idx
= 0xFF;
8226 ioc
->hpr_lookup
[i
].smid
= smid
;
8227 list_add_tail(&ioc
->hpr_lookup
[i
].tracker_list
,
8228 &ioc
->hpr_free_list
);
8231 /* internal queue */
8232 INIT_LIST_HEAD(&ioc
->internal_free_list
);
8233 smid
= ioc
->internal_smid
;
8234 for (i
= 0; i
< ioc
->internal_depth
; i
++, smid
++) {
8235 ioc
->internal_lookup
[i
].cb_idx
= 0xFF;
8236 ioc
->internal_lookup
[i
].smid
= smid
;
8237 list_add_tail(&ioc
->internal_lookup
[i
].tracker_list
,
8238 &ioc
->internal_free_list
);
8241 spin_unlock_irqrestore(&ioc
->scsi_lookup_lock
, flags
);
8243 /* initialize Reply Free Queue */
8244 for (i
= 0, reply_address
= (u32
)ioc
->reply_dma
;
8245 i
< ioc
->reply_free_queue_depth
; i
++, reply_address
+=
8247 ioc
->reply_free
[i
] = cpu_to_le32(reply_address
);
8248 if (ioc
->is_mcpu_endpoint
)
8249 _base_clone_reply_to_sys_mem(ioc
,
8253 /* initialize reply queues */
8254 if (ioc
->is_driver_loading
)
8255 _base_assign_reply_queues(ioc
);
8257 /* initialize Reply Post Free Queue */
8259 reply_post_free_contig
= ioc
->reply_post
[0].reply_post_free
;
8260 list_for_each_entry(reply_q
, &ioc
->reply_queue_list
, list
) {
8262 * If RDPQ is enabled, switch to the next allocation.
8263 * Otherwise advance within the contiguous region.
8265 if (ioc
->rdpq_array_enable
) {
8266 reply_q
->reply_post_free
=
8267 ioc
->reply_post
[index
++].reply_post_free
;
8269 reply_q
->reply_post_free
= reply_post_free_contig
;
8270 reply_post_free_contig
+= ioc
->reply_post_queue_depth
;
8273 reply_q
->reply_post_host_index
= 0;
8274 for (i
= 0; i
< ioc
->reply_post_queue_depth
; i
++)
8275 reply_q
->reply_post_free
[i
].Words
=
8276 cpu_to_le64(ULLONG_MAX
);
8277 if (!_base_is_controller_msix_enabled(ioc
))
8278 goto skip_init_reply_post_free_queue
;
8280 skip_init_reply_post_free_queue
:
8282 r
= _base_send_ioc_init(ioc
);
8285 * No need to check IOC state for fault state & issue
8286 * diag reset during host reset. This check is need
8287 * only during driver load time.
8289 if (!ioc
->is_driver_loading
)
8292 rc
= mpt3sas_base_check_for_fault_and_issue_reset(ioc
);
8293 if (rc
|| (_base_send_ioc_init(ioc
)))
8297 /* initialize reply free host index */
8298 ioc
->reply_free_host_index
= ioc
->reply_free_queue_depth
- 1;
8299 writel(ioc
->reply_free_host_index
, &ioc
->chip
->ReplyFreeHostIndex
);
8301 /* initialize reply post host index */
8302 list_for_each_entry(reply_q
, &ioc
->reply_queue_list
, list
) {
8303 if (ioc
->combined_reply_queue
)
8304 writel((reply_q
->msix_index
& 7)<<
8305 MPI2_RPHI_MSIX_INDEX_SHIFT
,
8306 ioc
->replyPostRegisterIndex
[reply_q
->msix_index
/8]);
8308 writel(reply_q
->msix_index
<<
8309 MPI2_RPHI_MSIX_INDEX_SHIFT
,
8310 &ioc
->chip
->ReplyPostHostIndex
);
8312 if (!_base_is_controller_msix_enabled(ioc
))
8313 goto skip_init_reply_post_host_index
;
8316 skip_init_reply_post_host_index
:
8318 mpt3sas_base_unmask_interrupts(ioc
);
8320 if (ioc
->hba_mpi_version_belonged
!= MPI2_VERSION
) {
8321 r
= _base_display_fwpkg_version(ioc
);
8326 r
= _base_static_config_pages(ioc
);
8330 r
= _base_event_notification(ioc
);
8334 if (!ioc
->shost_recovery
) {
8336 if (ioc
->is_warpdrive
&& ioc
->manu_pg10
.OEMIdentifier
8339 le32_to_cpu(ioc
->manu_pg10
.OEMSpecificFlags0
) &
8340 MFG_PAGE10_HIDE_SSDS_MASK
);
8341 if (hide_flag
!= MFG_PAGE10_HIDE_SSDS_MASK
)
8342 ioc
->mfg_pg10_hide_flag
= hide_flag
;
8345 ioc
->wait_for_discovery_to_complete
=
8346 _base_determine_wait_on_discovery(ioc
);
8348 return r
; /* scan_start and scan_finished support */
8351 r
= _base_send_port_enable(ioc
);
8359 * mpt3sas_base_free_resources - free resources controller resources
8360 * @ioc: per adapter object
8363 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER
*ioc
)
8365 dexitprintk(ioc
, ioc_info(ioc
, "%s\n", __func__
));
8367 /* synchronizing freeing resource with pci_access_mutex lock */
8368 mutex_lock(&ioc
->pci_access_mutex
);
8369 if (ioc
->chip_phys
&& ioc
->chip
) {
8370 mpt3sas_base_mask_interrupts(ioc
);
8371 ioc
->shost_recovery
= 1;
8372 mpt3sas_base_make_ioc_ready(ioc
, SOFT_RESET
);
8373 ioc
->shost_recovery
= 0;
8376 mpt3sas_base_unmap_resources(ioc
);
8377 mutex_unlock(&ioc
->pci_access_mutex
);
8382 * mpt3sas_base_attach - attach controller instance
8383 * @ioc: per adapter object
8385 * Return: 0 for success, non-zero for failure.
8388 mpt3sas_base_attach(struct MPT3SAS_ADAPTER
*ioc
)
8391 int cpu_id
, last_cpu_id
= 0;
8393 dinitprintk(ioc
, ioc_info(ioc
, "%s\n", __func__
));
8395 /* setup cpu_msix_table */
8396 ioc
->cpu_count
= num_online_cpus();
8397 for_each_online_cpu(cpu_id
)
8398 last_cpu_id
= cpu_id
;
8399 ioc
->cpu_msix_table_sz
= last_cpu_id
+ 1;
8400 ioc
->cpu_msix_table
= kzalloc(ioc
->cpu_msix_table_sz
, GFP_KERNEL
);
8401 ioc
->reply_queue_count
= 1;
8402 if (!ioc
->cpu_msix_table
) {
8403 ioc_info(ioc
, "Allocation for cpu_msix_table failed!!!\n");
8405 goto out_free_resources
;
8408 if (ioc
->is_warpdrive
) {
8409 ioc
->reply_post_host_index
= kcalloc(ioc
->cpu_msix_table_sz
,
8410 sizeof(resource_size_t
*), GFP_KERNEL
);
8411 if (!ioc
->reply_post_host_index
) {
8412 ioc_info(ioc
, "Allocation for reply_post_host_index failed!!!\n");
8414 goto out_free_resources
;
8418 ioc
->smp_affinity_enable
= smp_affinity_enable
;
8420 ioc
->rdpq_array_enable_assigned
= 0;
8421 ioc
->use_32bit_dma
= false;
8423 if (ioc
->is_aero_ioc
) {
8424 ioc
->base_readl
= &_base_readl_aero
;
8425 ioc
->base_readl_ext_retry
= &_base_readl_ext_retry
;
8427 ioc
->base_readl
= &_base_readl
;
8428 ioc
->base_readl_ext_retry
= &_base_readl
;
8430 r
= mpt3sas_base_map_resources(ioc
);
8432 goto out_free_resources
;
8434 pci_set_drvdata(ioc
->pdev
, ioc
->shost
);
8435 r
= _base_get_ioc_facts(ioc
);
8437 rc
= mpt3sas_base_check_for_fault_and_issue_reset(ioc
);
8438 if (rc
|| (_base_get_ioc_facts(ioc
)))
8439 goto out_free_resources
;
8442 switch (ioc
->hba_mpi_version_belonged
) {
8444 ioc
->build_sg_scmd
= &_base_build_sg_scmd
;
8445 ioc
->build_sg
= &_base_build_sg
;
8446 ioc
->build_zero_len_sge
= &_base_build_zero_len_sge
;
8447 ioc
->get_msix_index_for_smlio
= &_base_get_msix_index
;
8453 * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
8454 * Target Status - all require the IEEE formatted scatter gather
8457 ioc
->build_sg_scmd
= &_base_build_sg_scmd_ieee
;
8458 ioc
->build_sg
= &_base_build_sg_ieee
;
8459 ioc
->build_nvme_prp
= &_base_build_nvme_prp
;
8460 ioc
->build_zero_len_sge
= &_base_build_zero_len_sge_ieee
;
8461 ioc
->sge_size_ieee
= sizeof(Mpi2IeeeSgeSimple64_t
);
8462 if (ioc
->high_iops_queues
)
8463 ioc
->get_msix_index_for_smlio
=
8464 &_base_get_high_iops_msix_index
;
8466 ioc
->get_msix_index_for_smlio
= &_base_get_msix_index
;
8469 if (ioc
->atomic_desc_capable
) {
8470 ioc
->put_smid_default
= &_base_put_smid_default_atomic
;
8471 ioc
->put_smid_scsi_io
= &_base_put_smid_scsi_io_atomic
;
8472 ioc
->put_smid_fast_path
=
8473 &_base_put_smid_fast_path_atomic
;
8474 ioc
->put_smid_hi_priority
=
8475 &_base_put_smid_hi_priority_atomic
;
8477 ioc
->put_smid_default
= &_base_put_smid_default
;
8478 ioc
->put_smid_fast_path
= &_base_put_smid_fast_path
;
8479 ioc
->put_smid_hi_priority
= &_base_put_smid_hi_priority
;
8480 if (ioc
->is_mcpu_endpoint
)
8481 ioc
->put_smid_scsi_io
=
8482 &_base_put_smid_mpi_ep_scsi_io
;
8484 ioc
->put_smid_scsi_io
= &_base_put_smid_scsi_io
;
8487 * These function pointers for other requests that don't
8488 * the require IEEE scatter gather elements.
8490 * For example Configuration Pages and SAS IOUNIT Control don't.
8492 ioc
->build_sg_mpi
= &_base_build_sg
;
8493 ioc
->build_zero_len_sge_mpi
= &_base_build_zero_len_sge
;
8495 r
= mpt3sas_base_make_ioc_ready(ioc
, SOFT_RESET
);
8497 goto out_free_resources
;
8499 ioc
->pfacts
= kcalloc(ioc
->facts
.NumberOfPorts
,
8500 sizeof(struct mpt3sas_port_facts
), GFP_KERNEL
);
8503 goto out_free_resources
;
8506 for (i
= 0 ; i
< ioc
->facts
.NumberOfPorts
; i
++) {
8507 r
= _base_get_port_facts(ioc
, i
);
8509 rc
= mpt3sas_base_check_for_fault_and_issue_reset(ioc
);
8510 if (rc
|| (_base_get_port_facts(ioc
, i
)))
8511 goto out_free_resources
;
8515 r
= _base_allocate_memory_pools(ioc
);
8517 goto out_free_resources
;
8519 if (irqpoll_weight
> 0)
8520 ioc
->thresh_hold
= irqpoll_weight
;
8522 ioc
->thresh_hold
= ioc
->hba_queue_depth
/4;
8524 _base_init_irqpolls(ioc
);
8525 init_waitqueue_head(&ioc
->reset_wq
);
8527 /* allocate memory pd handle bitmask list */
8528 ioc
->pd_handles_sz
= (ioc
->facts
.MaxDevHandle
/ 8);
8529 if (ioc
->facts
.MaxDevHandle
% 8)
8530 ioc
->pd_handles_sz
++;
8532 * pd_handles_sz should have, at least, the minimal room for
8533 * set_bit()/test_bit(), otherwise out-of-memory touch may occur.
8535 ioc
->pd_handles_sz
= ALIGN(ioc
->pd_handles_sz
, sizeof(unsigned long));
8537 ioc
->pd_handles
= kzalloc(ioc
->pd_handles_sz
,
8539 if (!ioc
->pd_handles
) {
8541 goto out_free_resources
;
8543 ioc
->blocking_handles
= kzalloc(ioc
->pd_handles_sz
,
8545 if (!ioc
->blocking_handles
) {
8547 goto out_free_resources
;
8550 /* allocate memory for pending OS device add list */
8551 ioc
->pend_os_device_add_sz
= (ioc
->facts
.MaxDevHandle
/ 8);
8552 if (ioc
->facts
.MaxDevHandle
% 8)
8553 ioc
->pend_os_device_add_sz
++;
8556 * pend_os_device_add_sz should have, at least, the minimal room for
8557 * set_bit()/test_bit(), otherwise out-of-memory may occur.
8559 ioc
->pend_os_device_add_sz
= ALIGN(ioc
->pend_os_device_add_sz
,
8560 sizeof(unsigned long));
8561 ioc
->pend_os_device_add
= kzalloc(ioc
->pend_os_device_add_sz
,
8563 if (!ioc
->pend_os_device_add
) {
8565 goto out_free_resources
;
8568 ioc
->device_remove_in_progress_sz
= ioc
->pend_os_device_add_sz
;
8569 ioc
->device_remove_in_progress
=
8570 kzalloc(ioc
->device_remove_in_progress_sz
, GFP_KERNEL
);
8571 if (!ioc
->device_remove_in_progress
) {
8573 goto out_free_resources
;
8576 ioc
->fwfault_debug
= mpt3sas_fwfault_debug
;
8578 /* base internal command bits */
8579 mutex_init(&ioc
->base_cmds
.mutex
);
8580 ioc
->base_cmds
.reply
= kzalloc(ioc
->reply_sz
, GFP_KERNEL
);
8581 ioc
->base_cmds
.status
= MPT3_CMD_NOT_USED
;
8583 /* port_enable command bits */
8584 ioc
->port_enable_cmds
.reply
= kzalloc(ioc
->reply_sz
, GFP_KERNEL
);
8585 ioc
->port_enable_cmds
.status
= MPT3_CMD_NOT_USED
;
8587 /* transport internal command bits */
8588 ioc
->transport_cmds
.reply
= kzalloc(ioc
->reply_sz
, GFP_KERNEL
);
8589 ioc
->transport_cmds
.status
= MPT3_CMD_NOT_USED
;
8590 mutex_init(&ioc
->transport_cmds
.mutex
);
8592 /* scsih internal command bits */
8593 ioc
->scsih_cmds
.reply
= kzalloc(ioc
->reply_sz
, GFP_KERNEL
);
8594 ioc
->scsih_cmds
.status
= MPT3_CMD_NOT_USED
;
8595 mutex_init(&ioc
->scsih_cmds
.mutex
);
8597 /* task management internal command bits */
8598 ioc
->tm_cmds
.reply
= kzalloc(ioc
->reply_sz
, GFP_KERNEL
);
8599 ioc
->tm_cmds
.status
= MPT3_CMD_NOT_USED
;
8600 mutex_init(&ioc
->tm_cmds
.mutex
);
8602 /* config page internal command bits */
8603 ioc
->config_cmds
.reply
= kzalloc(ioc
->reply_sz
, GFP_KERNEL
);
8604 ioc
->config_cmds
.status
= MPT3_CMD_NOT_USED
;
8605 mutex_init(&ioc
->config_cmds
.mutex
);
8607 /* ctl module internal command bits */
8608 ioc
->ctl_cmds
.reply
= kzalloc(ioc
->reply_sz
, GFP_KERNEL
);
8609 ioc
->ctl_cmds
.sense
= kzalloc(SCSI_SENSE_BUFFERSIZE
, GFP_KERNEL
);
8610 ioc
->ctl_cmds
.status
= MPT3_CMD_NOT_USED
;
8611 mutex_init(&ioc
->ctl_cmds
.mutex
);
8613 if (!ioc
->base_cmds
.reply
|| !ioc
->port_enable_cmds
.reply
||
8614 !ioc
->transport_cmds
.reply
|| !ioc
->scsih_cmds
.reply
||
8615 !ioc
->tm_cmds
.reply
|| !ioc
->config_cmds
.reply
||
8616 !ioc
->ctl_cmds
.reply
|| !ioc
->ctl_cmds
.sense
) {
8618 goto out_free_resources
;
8621 for (i
= 0; i
< MPI2_EVENT_NOTIFY_EVENTMASK_WORDS
; i
++)
8622 ioc
->event_masks
[i
] = -1;
8624 /* here we enable the events we care about */
8625 _base_unmask_events(ioc
, MPI2_EVENT_SAS_DISCOVERY
);
8626 _base_unmask_events(ioc
, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE
);
8627 _base_unmask_events(ioc
, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST
);
8628 _base_unmask_events(ioc
, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE
);
8629 _base_unmask_events(ioc
, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE
);
8630 _base_unmask_events(ioc
, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST
);
8631 _base_unmask_events(ioc
, MPI2_EVENT_IR_VOLUME
);
8632 _base_unmask_events(ioc
, MPI2_EVENT_IR_PHYSICAL_DISK
);
8633 _base_unmask_events(ioc
, MPI2_EVENT_IR_OPERATION_STATUS
);
8634 _base_unmask_events(ioc
, MPI2_EVENT_LOG_ENTRY_ADDED
);
8635 _base_unmask_events(ioc
, MPI2_EVENT_TEMP_THRESHOLD
);
8636 _base_unmask_events(ioc
, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION
);
8637 _base_unmask_events(ioc
, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR
);
8638 if (ioc
->hba_mpi_version_belonged
== MPI26_VERSION
) {
8639 if (ioc
->is_gen35_ioc
) {
8640 _base_unmask_events(ioc
,
8641 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE
);
8642 _base_unmask_events(ioc
, MPI2_EVENT_PCIE_ENUMERATION
);
8643 _base_unmask_events(ioc
,
8644 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST
);
8647 r
= _base_make_ioc_operational(ioc
);
8649 r
= _base_make_ioc_operational(ioc
);
8651 goto out_free_resources
;
8655 * Copy current copy of IOCFacts in prev_fw_facts
8656 * and it will be used during online firmware upgrade.
8658 memcpy(&ioc
->prev_fw_facts
, &ioc
->facts
,
8659 sizeof(struct mpt3sas_facts
));
8661 ioc
->non_operational_loop
= 0;
8662 ioc
->ioc_coredump_loop
= 0;
8663 ioc
->got_task_abort_from_ioctl
= 0;
8668 ioc
->remove_host
= 1;
8670 mpt3sas_base_free_resources(ioc
);
8671 _base_release_memory_pools(ioc
);
8672 pci_set_drvdata(ioc
->pdev
, NULL
);
8673 kfree(ioc
->cpu_msix_table
);
8674 if (ioc
->is_warpdrive
)
8675 kfree(ioc
->reply_post_host_index
);
8676 kfree(ioc
->pd_handles
);
8677 kfree(ioc
->blocking_handles
);
8678 kfree(ioc
->device_remove_in_progress
);
8679 kfree(ioc
->pend_os_device_add
);
8680 kfree(ioc
->tm_cmds
.reply
);
8681 kfree(ioc
->transport_cmds
.reply
);
8682 kfree(ioc
->scsih_cmds
.reply
);
8683 kfree(ioc
->config_cmds
.reply
);
8684 kfree(ioc
->base_cmds
.reply
);
8685 kfree(ioc
->port_enable_cmds
.reply
);
8686 kfree(ioc
->ctl_cmds
.reply
);
8687 kfree(ioc
->ctl_cmds
.sense
);
8689 ioc
->ctl_cmds
.reply
= NULL
;
8690 ioc
->base_cmds
.reply
= NULL
;
8691 ioc
->tm_cmds
.reply
= NULL
;
8692 ioc
->scsih_cmds
.reply
= NULL
;
8693 ioc
->transport_cmds
.reply
= NULL
;
8694 ioc
->config_cmds
.reply
= NULL
;
8701 * mpt3sas_base_detach - remove controller instance
8702 * @ioc: per adapter object
8705 mpt3sas_base_detach(struct MPT3SAS_ADAPTER
*ioc
)
8707 dexitprintk(ioc
, ioc_info(ioc
, "%s\n", __func__
));
8709 mpt3sas_base_stop_watchdog(ioc
);
8710 mpt3sas_base_free_resources(ioc
);
8711 _base_release_memory_pools(ioc
);
8712 mpt3sas_free_enclosure_list(ioc
);
8713 pci_set_drvdata(ioc
->pdev
, NULL
);
8714 kfree(ioc
->cpu_msix_table
);
8715 if (ioc
->is_warpdrive
)
8716 kfree(ioc
->reply_post_host_index
);
8717 kfree(ioc
->pd_handles
);
8718 kfree(ioc
->blocking_handles
);
8719 kfree(ioc
->device_remove_in_progress
);
8720 kfree(ioc
->pend_os_device_add
);
8722 kfree(ioc
->ctl_cmds
.reply
);
8723 kfree(ioc
->ctl_cmds
.sense
);
8724 kfree(ioc
->base_cmds
.reply
);
8725 kfree(ioc
->port_enable_cmds
.reply
);
8726 kfree(ioc
->tm_cmds
.reply
);
8727 kfree(ioc
->transport_cmds
.reply
);
8728 kfree(ioc
->scsih_cmds
.reply
);
8729 kfree(ioc
->config_cmds
.reply
);
8733 * _base_pre_reset_handler - pre reset handler
8734 * @ioc: per adapter object
8736 static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER
*ioc
)
8738 mpt3sas_scsih_pre_reset_handler(ioc
);
8739 mpt3sas_ctl_pre_reset_handler(ioc
);
8740 dtmprintk(ioc
, ioc_info(ioc
, "%s: MPT3_IOC_PRE_RESET\n", __func__
));
8744 * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands
8745 * @ioc: per adapter object
8748 _base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER
*ioc
)
8751 ioc_info(ioc
, "%s: clear outstanding mpt cmds\n", __func__
));
8752 if (ioc
->transport_cmds
.status
& MPT3_CMD_PENDING
) {
8753 ioc
->transport_cmds
.status
|= MPT3_CMD_RESET
;
8754 mpt3sas_base_free_smid(ioc
, ioc
->transport_cmds
.smid
);
8755 complete(&ioc
->transport_cmds
.done
);
8757 if (ioc
->base_cmds
.status
& MPT3_CMD_PENDING
) {
8758 ioc
->base_cmds
.status
|= MPT3_CMD_RESET
;
8759 mpt3sas_base_free_smid(ioc
, ioc
->base_cmds
.smid
);
8760 complete(&ioc
->base_cmds
.done
);
8762 if (ioc
->port_enable_cmds
.status
& MPT3_CMD_PENDING
) {
8763 ioc
->port_enable_failed
= 1;
8764 ioc
->port_enable_cmds
.status
|= MPT3_CMD_RESET
;
8765 mpt3sas_base_free_smid(ioc
, ioc
->port_enable_cmds
.smid
);
8766 if (ioc
->is_driver_loading
) {
8767 ioc
->start_scan_failed
=
8768 MPI2_IOCSTATUS_INTERNAL_ERROR
;
8769 ioc
->start_scan
= 0;
8771 complete(&ioc
->port_enable_cmds
.done
);
8774 if (ioc
->config_cmds
.status
& MPT3_CMD_PENDING
) {
8775 ioc
->config_cmds
.status
|= MPT3_CMD_RESET
;
8776 mpt3sas_base_free_smid(ioc
, ioc
->config_cmds
.smid
);
8777 ioc
->config_cmds
.smid
= USHRT_MAX
;
8778 complete(&ioc
->config_cmds
.done
);
8783 * _base_clear_outstanding_commands - clear all outstanding commands
8784 * @ioc: per adapter object
8786 static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER
*ioc
)
8788 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc
);
8789 mpt3sas_ctl_clear_outstanding_ioctls(ioc
);
8790 _base_clear_outstanding_mpt_commands(ioc
);
8794 * _base_reset_done_handler - reset done handler
8795 * @ioc: per adapter object
8797 static void _base_reset_done_handler(struct MPT3SAS_ADAPTER
*ioc
)
8799 mpt3sas_scsih_reset_done_handler(ioc
);
8800 mpt3sas_ctl_reset_done_handler(ioc
);
8801 dtmprintk(ioc
, ioc_info(ioc
, "%s: MPT3_IOC_DONE_RESET\n", __func__
));
8805 * mpt3sas_wait_for_commands_to_complete - reset controller
8806 * @ioc: Pointer to MPT_ADAPTER structure
8808 * This function is waiting 10s for all pending commands to complete
8809 * prior to putting controller in reset.
8812 mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER
*ioc
)
8816 ioc
->pending_io_count
= 0;
8818 ioc_state
= mpt3sas_base_get_iocstate(ioc
, 0);
8819 if ((ioc_state
& MPI2_IOC_STATE_MASK
) != MPI2_IOC_STATE_OPERATIONAL
)
8822 /* pending command count */
8823 ioc
->pending_io_count
= scsi_host_busy(ioc
->shost
);
8825 if (!ioc
->pending_io_count
)
8828 /* wait for pending commands to complete */
8829 wait_event_timeout(ioc
->reset_wq
, ioc
->pending_io_count
== 0, 10 * HZ
);
8833 * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts
8834 * attributes during online firmware upgrade and update the corresponding
8835 * IOC variables accordingly.
8837 * @ioc: Pointer to MPT_ADAPTER structure
8840 _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER
*ioc
)
8843 void *pd_handles
= NULL
, *blocking_handles
= NULL
;
8844 void *pend_os_device_add
= NULL
, *device_remove_in_progress
= NULL
;
8845 struct mpt3sas_facts
*old_facts
= &ioc
->prev_fw_facts
;
8847 if (ioc
->facts
.MaxDevHandle
> old_facts
->MaxDevHandle
) {
8848 pd_handles_sz
= (ioc
->facts
.MaxDevHandle
/ 8);
8849 if (ioc
->facts
.MaxDevHandle
% 8)
8853 * pd_handles should have, at least, the minimal room for
8854 * set_bit()/test_bit(), otherwise out-of-memory touch may
8857 pd_handles_sz
= ALIGN(pd_handles_sz
, sizeof(unsigned long));
8858 pd_handles
= krealloc(ioc
->pd_handles
, pd_handles_sz
,
8862 "Unable to allocate the memory for pd_handles of sz: %d\n",
8866 memset(pd_handles
+ ioc
->pd_handles_sz
, 0,
8867 (pd_handles_sz
- ioc
->pd_handles_sz
));
8868 ioc
->pd_handles
= pd_handles
;
8870 blocking_handles
= krealloc(ioc
->blocking_handles
,
8871 pd_handles_sz
, GFP_KERNEL
);
8872 if (!blocking_handles
) {
8874 "Unable to allocate the memory for "
8875 "blocking_handles of sz: %d\n",
8879 memset(blocking_handles
+ ioc
->pd_handles_sz
, 0,
8880 (pd_handles_sz
- ioc
->pd_handles_sz
));
8881 ioc
->blocking_handles
= blocking_handles
;
8882 ioc
->pd_handles_sz
= pd_handles_sz
;
8884 pend_os_device_add
= krealloc(ioc
->pend_os_device_add
,
8885 pd_handles_sz
, GFP_KERNEL
);
8886 if (!pend_os_device_add
) {
8888 "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
8892 memset(pend_os_device_add
+ ioc
->pend_os_device_add_sz
, 0,
8893 (pd_handles_sz
- ioc
->pend_os_device_add_sz
));
8894 ioc
->pend_os_device_add
= pend_os_device_add
;
8895 ioc
->pend_os_device_add_sz
= pd_handles_sz
;
8897 device_remove_in_progress
= krealloc(
8898 ioc
->device_remove_in_progress
, pd_handles_sz
, GFP_KERNEL
);
8899 if (!device_remove_in_progress
) {
8901 "Unable to allocate the memory for device_remove_in_progress of sz: %d\n",
8905 memset(device_remove_in_progress
+
8906 ioc
->device_remove_in_progress_sz
, 0,
8907 (pd_handles_sz
- ioc
->device_remove_in_progress_sz
));
8908 ioc
->device_remove_in_progress
= device_remove_in_progress
;
8909 ioc
->device_remove_in_progress_sz
= pd_handles_sz
;
8912 memcpy(&ioc
->prev_fw_facts
, &ioc
->facts
, sizeof(struct mpt3sas_facts
));
8917 * mpt3sas_base_hard_reset_handler - reset controller
8918 * @ioc: Pointer to MPT_ADAPTER structure
8919 * @type: FORCE_BIG_HAMMER or SOFT_RESET
8921 * Return: 0 for success, non-zero for failure.
8924 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER
*ioc
,
8925 enum reset_type type
)
8928 unsigned long flags
;
8930 u8 is_fault
= 0, is_trigger
= 0;
8932 dtmprintk(ioc
, ioc_info(ioc
, "%s: enter\n", __func__
));
8934 if (ioc
->pci_error_recovery
) {
8935 ioc_err(ioc
, "%s: pci error recovery reset\n", __func__
);
8940 if (mpt3sas_fwfault_debug
)
8941 mpt3sas_halt_firmware(ioc
);
8943 /* wait for an active reset in progress to complete */
8944 mutex_lock(&ioc
->reset_in_progress_mutex
);
8946 spin_lock_irqsave(&ioc
->ioc_reset_in_progress_lock
, flags
);
8947 ioc
->shost_recovery
= 1;
8948 spin_unlock_irqrestore(&ioc
->ioc_reset_in_progress_lock
, flags
);
8950 if ((ioc
->diag_buffer_status
[MPI2_DIAG_BUF_TYPE_TRACE
] &
8951 MPT3_DIAG_BUFFER_IS_REGISTERED
) &&
8952 (!(ioc
->diag_buffer_status
[MPI2_DIAG_BUF_TYPE_TRACE
] &
8953 MPT3_DIAG_BUFFER_IS_RELEASED
))) {
8955 ioc_state
= mpt3sas_base_get_iocstate(ioc
, 0);
8956 if ((ioc_state
& MPI2_IOC_STATE_MASK
) == MPI2_IOC_STATE_FAULT
||
8957 (ioc_state
& MPI2_IOC_STATE_MASK
) ==
8958 MPI2_IOC_STATE_COREDUMP
) {
8960 ioc
->htb_rel
.trigger_info_dwords
[1] =
8961 (ioc_state
& MPI2_DOORBELL_DATA_MASK
);
8964 _base_pre_reset_handler(ioc
);
8965 mpt3sas_wait_for_commands_to_complete(ioc
);
8966 mpt3sas_base_mask_interrupts(ioc
);
8967 mpt3sas_base_pause_mq_polling(ioc
);
8968 r
= mpt3sas_base_make_ioc_ready(ioc
, type
);
8971 _base_clear_outstanding_commands(ioc
);
8973 /* If this hard reset is called while port enable is active, then
8974 * there is no reason to call make_ioc_operational
8976 if (ioc
->is_driver_loading
&& ioc
->port_enable_failed
) {
8977 ioc
->remove_host
= 1;
8981 r
= _base_get_ioc_facts(ioc
);
8985 r
= _base_check_ioc_facts_changes(ioc
);
8988 "Some of the parameters got changed in this new firmware"
8989 " image and it requires system reboot\n");
8992 if (ioc
->rdpq_array_enable
&& !ioc
->rdpq_array_capable
)
8993 panic("%s: Issue occurred with flashing controller firmware."
8994 "Please reboot the system and ensure that the correct"
8995 " firmware version is running\n", ioc
->name
);
8997 r
= _base_make_ioc_operational(ioc
);
8999 _base_reset_done_handler(ioc
);
9002 ioc_info(ioc
, "%s: %s\n", __func__
, r
== 0 ? "SUCCESS" : "FAILED");
9004 spin_lock_irqsave(&ioc
->ioc_reset_in_progress_lock
, flags
);
9005 ioc
->shost_recovery
= 0;
9006 spin_unlock_irqrestore(&ioc
->ioc_reset_in_progress_lock
, flags
);
9007 ioc
->ioc_reset_count
++;
9008 mutex_unlock(&ioc
->reset_in_progress_mutex
);
9009 mpt3sas_base_resume_mq_polling(ioc
);
9012 if ((r
== 0) && is_trigger
) {
9014 mpt3sas_trigger_master(ioc
, MASTER_TRIGGER_FW_FAULT
);
9016 mpt3sas_trigger_master(ioc
,
9017 MASTER_TRIGGER_ADAPTER_RESET
);
9019 dtmprintk(ioc
, ioc_info(ioc
, "%s: exit\n", __func__
));