treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / scsi / megaraid / megaraid_sas_base.c
blobacb82181f70f833738bb5b3499a24ae56b51ec9a
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux MegaRAID driver for SAS based RAID controllers
5 * Copyright (c) 2003-2013 LSI Corporation
6 * Copyright (c) 2013-2016 Avago Technologies
7 * Copyright (c) 2016-2018 Broadcom Inc.
9 * Authors: Broadcom Inc.
10 * Sreenivas Bagalkote
11 * Sumant Patro
12 * Bo Yang
13 * Adam Radford
14 * Kashyap Desai <kashyap.desai@broadcom.com>
15 * Sumit Saxena <sumit.saxena@broadcom.com>
17 * Send feedback to: megaraidlinux.pdl@broadcom.com
20 #include <linux/kernel.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/uio.h>
30 #include <linux/slab.h>
31 #include <linux/uaccess.h>
32 #include <asm/unaligned.h>
33 #include <linux/fs.h>
34 #include <linux/compat.h>
35 #include <linux/blkdev.h>
36 #include <linux/mutex.h>
37 #include <linux/poll.h>
38 #include <linux/vmalloc.h>
39 #include <linux/irq_poll.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <scsi/scsi_dbg.h>
47 #include "megaraid_sas_fusion.h"
48 #include "megaraid_sas.h"
51 * Number of sectors per IO command
52 * Will be set in megasas_init_mfi if user does not provide
54 static unsigned int max_sectors;
55 module_param_named(max_sectors, max_sectors, int, 0444);
56 MODULE_PARM_DESC(max_sectors,
57 "Maximum number of sectors per IO command");
59 static int msix_disable;
60 module_param(msix_disable, int, 0444);
61 MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
63 static unsigned int msix_vectors;
64 module_param(msix_vectors, int, 0444);
65 MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
67 static int allow_vf_ioctls;
68 module_param(allow_vf_ioctls, int, 0444);
69 MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
71 static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
72 module_param(throttlequeuedepth, int, 0444);
73 MODULE_PARM_DESC(throttlequeuedepth,
74 "Adapter queue depth when throttled due to I/O timeout. Default: 16");
76 unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
77 module_param(resetwaittime, int, 0444);
78 MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
80 int smp_affinity_enable = 1;
81 module_param(smp_affinity_enable, int, 0444);
82 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
84 int rdpq_enable = 1;
85 module_param(rdpq_enable, int, 0444);
86 MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
88 unsigned int dual_qdepth_disable;
89 module_param(dual_qdepth_disable, int, 0444);
90 MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
92 unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
93 module_param(scmd_timeout, int, 0444);
94 MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
96 int perf_mode = -1;
97 module_param(perf_mode, int, 0444);
98 MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
99 "0 - balanced: High iops and low latency queues are allocated &\n\t\t"
100 "interrupt coalescing is enabled only on high iops queues\n\t\t"
101 "1 - iops: High iops queues are not allocated &\n\t\t"
102 "interrupt coalescing is enabled on all queues\n\t\t"
103 "2 - latency: High iops queues are not allocated &\n\t\t"
104 "interrupt coalescing is disabled on all queues\n\t\t"
105 "default mode is 'balanced'"
108 int event_log_level = MFI_EVT_CLASS_CRITICAL;
109 module_param(event_log_level, int, 0644);
110 MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
112 unsigned int enable_sdev_max_qd;
113 module_param(enable_sdev_max_qd, int, 0444);
114 MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
116 MODULE_LICENSE("GPL");
117 MODULE_VERSION(MEGASAS_VERSION);
118 MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
119 MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
121 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
122 static int megasas_get_pd_list(struct megasas_instance *instance);
123 static int megasas_ld_list_query(struct megasas_instance *instance,
124 u8 query_type);
125 static int megasas_issue_init_mfi(struct megasas_instance *instance);
126 static int megasas_register_aen(struct megasas_instance *instance,
127 u32 seq_num, u32 class_locale_word);
128 static void megasas_get_pd_info(struct megasas_instance *instance,
129 struct scsi_device *sdev);
132 * PCI ID table for all supported controllers
134 static struct pci_device_id megasas_pci_table[] = {
136 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
137 /* xscale IOP */
138 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
139 /* ppc IOP */
140 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
141 /* ppc IOP */
142 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
143 /* gen2*/
144 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
145 /* gen2*/
146 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
147 /* skinny*/
148 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
149 /* skinny*/
150 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
151 /* xscale IOP, vega */
152 {PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
153 /* xscale IOP */
154 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
155 /* Fusion */
156 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
157 /* Plasma */
158 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
159 /* Invader */
160 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
161 /* Fury */
162 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
163 /* Intruder */
164 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
165 /* Intruder 24 port*/
166 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
167 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
168 /* VENTURA */
169 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
170 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
171 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
172 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
173 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
174 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
175 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
176 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
177 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
178 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
179 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)},
180 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)},
181 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)},
182 {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)},
186 MODULE_DEVICE_TABLE(pci, megasas_pci_table);
188 static int megasas_mgmt_majorno;
189 struct megasas_mgmt_info megasas_mgmt_info;
190 static struct fasync_struct *megasas_async_queue;
191 static DEFINE_MUTEX(megasas_async_queue_mutex);
193 static int megasas_poll_wait_aen;
194 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
195 static u32 support_poll_for_event;
196 u32 megasas_dbg_lvl;
197 static u32 support_device_change;
198 static bool support_nvme_encapsulation;
199 static bool support_pci_lane_margining;
201 /* define lock for aen poll */
202 static spinlock_t poll_aen_lock;
204 extern struct dentry *megasas_debugfs_root;
205 extern void megasas_init_debugfs(void);
206 extern void megasas_exit_debugfs(void);
207 extern void megasas_setup_debugfs(struct megasas_instance *instance);
208 extern void megasas_destroy_debugfs(struct megasas_instance *instance);
210 void
211 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
212 u8 alt_status);
213 static u32
214 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
215 static int
216 megasas_adp_reset_gen2(struct megasas_instance *instance,
217 struct megasas_register_set __iomem *reg_set);
218 static irqreturn_t megasas_isr(int irq, void *devp);
219 static u32
220 megasas_init_adapter_mfi(struct megasas_instance *instance);
222 megasas_build_and_issue_cmd(struct megasas_instance *instance,
223 struct scsi_cmnd *scmd);
224 static void megasas_complete_cmd_dpc(unsigned long instance_addr);
226 wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
227 int seconds);
228 void megasas_fusion_ocr_wq(struct work_struct *work);
229 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
230 int initial);
231 static int
232 megasas_set_dma_mask(struct megasas_instance *instance);
233 static int
234 megasas_alloc_ctrl_mem(struct megasas_instance *instance);
235 static inline void
236 megasas_free_ctrl_mem(struct megasas_instance *instance);
237 static inline int
238 megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
239 static inline void
240 megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
241 static inline void
242 megasas_init_ctrl_params(struct megasas_instance *instance);
244 u32 megasas_readl(struct megasas_instance *instance,
245 const volatile void __iomem *addr)
247 u32 i = 0, ret_val;
249 * Due to a HW errata in Aero controllers, reads to certain
250 * Fusion registers could intermittently return all zeroes.
251 * This behavior is transient in nature and subsequent reads will
252 * return valid value. As a workaround in driver, retry readl for
253 * upto three times until a non-zero value is read.
255 if (instance->adapter_type == AERO_SERIES) {
256 do {
257 ret_val = readl(addr);
258 i++;
259 } while (ret_val == 0 && i < 3);
260 return ret_val;
261 } else {
262 return readl(addr);
267 * megasas_set_dma_settings - Populate DMA address, length and flags for DCMDs
268 * @instance: Adapter soft state
269 * @dcmd: DCMD frame inside MFI command
270 * @dma_addr: DMA address of buffer to be passed to FW
271 * @dma_len: Length of DMA buffer to be passed to FW
272 * @return: void
274 void megasas_set_dma_settings(struct megasas_instance *instance,
275 struct megasas_dcmd_frame *dcmd,
276 dma_addr_t dma_addr, u32 dma_len)
278 if (instance->consistent_mask_64bit) {
279 dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
280 dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
281 dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
283 } else {
284 dcmd->sgl.sge32[0].phys_addr =
285 cpu_to_le32(lower_32_bits(dma_addr));
286 dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
287 dcmd->flags = cpu_to_le16(dcmd->flags);
291 static void
292 megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
294 instance->instancet->fire_cmd(instance,
295 cmd->frame_phys_addr, 0, instance->reg_set);
296 return;
300 * megasas_get_cmd - Get a command from the free pool
301 * @instance: Adapter soft state
303 * Returns a free command from the pool
305 struct megasas_cmd *megasas_get_cmd(struct megasas_instance
306 *instance)
308 unsigned long flags;
309 struct megasas_cmd *cmd = NULL;
311 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
313 if (!list_empty(&instance->cmd_pool)) {
314 cmd = list_entry((&instance->cmd_pool)->next,
315 struct megasas_cmd, list);
316 list_del_init(&cmd->list);
317 } else {
318 dev_err(&instance->pdev->dev, "Command pool empty!\n");
321 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
322 return cmd;
326 * megasas_return_cmd - Return a cmd to free command pool
327 * @instance: Adapter soft state
328 * @cmd: Command packet to be returned to free command pool
330 void
331 megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
333 unsigned long flags;
334 u32 blk_tags;
335 struct megasas_cmd_fusion *cmd_fusion;
336 struct fusion_context *fusion = instance->ctrl_context;
338 /* This flag is used only for fusion adapter.
339 * Wait for Interrupt for Polled mode DCMD
341 if (cmd->flags & DRV_DCMD_POLLED_MODE)
342 return;
344 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
346 if (fusion) {
347 blk_tags = instance->max_scsi_cmds + cmd->index;
348 cmd_fusion = fusion->cmd_list[blk_tags];
349 megasas_return_cmd_fusion(instance, cmd_fusion);
351 cmd->scmd = NULL;
352 cmd->frame_count = 0;
353 cmd->flags = 0;
354 memset(cmd->frame, 0, instance->mfi_frame_size);
355 cmd->frame->io.context = cpu_to_le32(cmd->index);
356 if (!fusion && reset_devices)
357 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
358 list_add(&cmd->list, (&instance->cmd_pool)->next);
360 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
364 static const char *
365 format_timestamp(uint32_t timestamp)
367 static char buffer[32];
369 if ((timestamp & 0xff000000) == 0xff000000)
370 snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
371 0x00ffffff);
372 else
373 snprintf(buffer, sizeof(buffer), "%us", timestamp);
374 return buffer;
377 static const char *
378 format_class(int8_t class)
380 static char buffer[6];
382 switch (class) {
383 case MFI_EVT_CLASS_DEBUG:
384 return "debug";
385 case MFI_EVT_CLASS_PROGRESS:
386 return "progress";
387 case MFI_EVT_CLASS_INFO:
388 return "info";
389 case MFI_EVT_CLASS_WARNING:
390 return "WARN";
391 case MFI_EVT_CLASS_CRITICAL:
392 return "CRIT";
393 case MFI_EVT_CLASS_FATAL:
394 return "FATAL";
395 case MFI_EVT_CLASS_DEAD:
396 return "DEAD";
397 default:
398 snprintf(buffer, sizeof(buffer), "%d", class);
399 return buffer;
404 * megasas_decode_evt: Decode FW AEN event and print critical event
405 * for information.
406 * @instance: Adapter soft state
408 static void
409 megasas_decode_evt(struct megasas_instance *instance)
411 struct megasas_evt_detail *evt_detail = instance->evt_detail;
412 union megasas_evt_class_locale class_locale;
413 class_locale.word = le32_to_cpu(evt_detail->cl.word);
415 if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
416 (event_log_level > MFI_EVT_CLASS_DEAD)) {
417 printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
418 event_log_level = MFI_EVT_CLASS_CRITICAL;
421 if (class_locale.members.class >= event_log_level)
422 dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
423 le32_to_cpu(evt_detail->seq_num),
424 format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
425 (class_locale.members.locale),
426 format_class(class_locale.members.class),
427 evt_detail->description);
431 * The following functions are defined for xscale
432 * (deviceid : 1064R, PERC5) controllers
436 * megasas_enable_intr_xscale - Enables interrupts
437 * @regs: MFI register set
439 static inline void
440 megasas_enable_intr_xscale(struct megasas_instance *instance)
442 struct megasas_register_set __iomem *regs;
444 regs = instance->reg_set;
445 writel(0, &(regs)->outbound_intr_mask);
447 /* Dummy readl to force pci flush */
448 readl(&regs->outbound_intr_mask);
452 * megasas_disable_intr_xscale -Disables interrupt
453 * @regs: MFI register set
455 static inline void
456 megasas_disable_intr_xscale(struct megasas_instance *instance)
458 struct megasas_register_set __iomem *regs;
459 u32 mask = 0x1f;
461 regs = instance->reg_set;
462 writel(mask, &regs->outbound_intr_mask);
463 /* Dummy readl to force pci flush */
464 readl(&regs->outbound_intr_mask);
468 * megasas_read_fw_status_reg_xscale - returns the current FW status value
469 * @regs: MFI register set
471 static u32
472 megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
474 return readl(&instance->reg_set->outbound_msg_0);
477 * megasas_clear_interrupt_xscale - Check & clear interrupt
478 * @regs: MFI register set
480 static int
481 megasas_clear_intr_xscale(struct megasas_instance *instance)
483 u32 status;
484 u32 mfiStatus = 0;
485 struct megasas_register_set __iomem *regs;
486 regs = instance->reg_set;
489 * Check if it is our interrupt
491 status = readl(&regs->outbound_intr_status);
493 if (status & MFI_OB_INTR_STATUS_MASK)
494 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
495 if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
496 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
499 * Clear the interrupt by writing back the same value
501 if (mfiStatus)
502 writel(status, &regs->outbound_intr_status);
504 /* Dummy readl to force pci flush */
505 readl(&regs->outbound_intr_status);
507 return mfiStatus;
511 * megasas_fire_cmd_xscale - Sends command to the FW
512 * @frame_phys_addr : Physical address of cmd
513 * @frame_count : Number of frames for the command
514 * @regs : MFI register set
516 static inline void
517 megasas_fire_cmd_xscale(struct megasas_instance *instance,
518 dma_addr_t frame_phys_addr,
519 u32 frame_count,
520 struct megasas_register_set __iomem *regs)
522 unsigned long flags;
524 spin_lock_irqsave(&instance->hba_lock, flags);
525 writel((frame_phys_addr >> 3)|(frame_count),
526 &(regs)->inbound_queue_port);
527 spin_unlock_irqrestore(&instance->hba_lock, flags);
531 * megasas_adp_reset_xscale - For controller reset
532 * @regs: MFI register set
534 static int
535 megasas_adp_reset_xscale(struct megasas_instance *instance,
536 struct megasas_register_set __iomem *regs)
538 u32 i;
539 u32 pcidata;
541 writel(MFI_ADP_RESET, &regs->inbound_doorbell);
543 for (i = 0; i < 3; i++)
544 msleep(1000); /* sleep for 3 secs */
545 pcidata = 0;
546 pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
547 dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
548 if (pcidata & 0x2) {
549 dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
550 pcidata &= ~0x2;
551 pci_write_config_dword(instance->pdev,
552 MFI_1068_PCSR_OFFSET, pcidata);
554 for (i = 0; i < 2; i++)
555 msleep(1000); /* need to wait 2 secs again */
557 pcidata = 0;
558 pci_read_config_dword(instance->pdev,
559 MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
560 dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
561 if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
562 dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
563 pcidata = 0;
564 pci_write_config_dword(instance->pdev,
565 MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
568 return 0;
572 * megasas_check_reset_xscale - For controller reset check
573 * @regs: MFI register set
575 static int
576 megasas_check_reset_xscale(struct megasas_instance *instance,
577 struct megasas_register_set __iomem *regs)
579 if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
580 (le32_to_cpu(*instance->consumer) ==
581 MEGASAS_ADPRESET_INPROG_SIGN))
582 return 1;
583 return 0;
586 static struct megasas_instance_template megasas_instance_template_xscale = {
588 .fire_cmd = megasas_fire_cmd_xscale,
589 .enable_intr = megasas_enable_intr_xscale,
590 .disable_intr = megasas_disable_intr_xscale,
591 .clear_intr = megasas_clear_intr_xscale,
592 .read_fw_status_reg = megasas_read_fw_status_reg_xscale,
593 .adp_reset = megasas_adp_reset_xscale,
594 .check_reset = megasas_check_reset_xscale,
595 .service_isr = megasas_isr,
596 .tasklet = megasas_complete_cmd_dpc,
597 .init_adapter = megasas_init_adapter_mfi,
598 .build_and_issue_cmd = megasas_build_and_issue_cmd,
599 .issue_dcmd = megasas_issue_dcmd,
603 * This is the end of set of functions & definitions specific
604 * to xscale (deviceid : 1064R, PERC5) controllers
608 * The following functions are defined for ppc (deviceid : 0x60)
609 * controllers
613 * megasas_enable_intr_ppc - Enables interrupts
614 * @regs: MFI register set
616 static inline void
617 megasas_enable_intr_ppc(struct megasas_instance *instance)
619 struct megasas_register_set __iomem *regs;
621 regs = instance->reg_set;
622 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
624 writel(~0x80000000, &(regs)->outbound_intr_mask);
626 /* Dummy readl to force pci flush */
627 readl(&regs->outbound_intr_mask);
631 * megasas_disable_intr_ppc - Disable interrupt
632 * @regs: MFI register set
634 static inline void
635 megasas_disable_intr_ppc(struct megasas_instance *instance)
637 struct megasas_register_set __iomem *regs;
638 u32 mask = 0xFFFFFFFF;
640 regs = instance->reg_set;
641 writel(mask, &regs->outbound_intr_mask);
642 /* Dummy readl to force pci flush */
643 readl(&regs->outbound_intr_mask);
647 * megasas_read_fw_status_reg_ppc - returns the current FW status value
648 * @regs: MFI register set
650 static u32
651 megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
653 return readl(&instance->reg_set->outbound_scratch_pad_0);
657 * megasas_clear_interrupt_ppc - Check & clear interrupt
658 * @regs: MFI register set
660 static int
661 megasas_clear_intr_ppc(struct megasas_instance *instance)
663 u32 status, mfiStatus = 0;
664 struct megasas_register_set __iomem *regs;
665 regs = instance->reg_set;
668 * Check if it is our interrupt
670 status = readl(&regs->outbound_intr_status);
672 if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
673 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
675 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
676 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
679 * Clear the interrupt by writing back the same value
681 writel(status, &regs->outbound_doorbell_clear);
683 /* Dummy readl to force pci flush */
684 readl(&regs->outbound_doorbell_clear);
686 return mfiStatus;
690 * megasas_fire_cmd_ppc - Sends command to the FW
691 * @frame_phys_addr : Physical address of cmd
692 * @frame_count : Number of frames for the command
693 * @regs : MFI register set
695 static inline void
696 megasas_fire_cmd_ppc(struct megasas_instance *instance,
697 dma_addr_t frame_phys_addr,
698 u32 frame_count,
699 struct megasas_register_set __iomem *regs)
701 unsigned long flags;
703 spin_lock_irqsave(&instance->hba_lock, flags);
704 writel((frame_phys_addr | (frame_count<<1))|1,
705 &(regs)->inbound_queue_port);
706 spin_unlock_irqrestore(&instance->hba_lock, flags);
710 * megasas_check_reset_ppc - For controller reset check
711 * @regs: MFI register set
713 static int
714 megasas_check_reset_ppc(struct megasas_instance *instance,
715 struct megasas_register_set __iomem *regs)
717 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
718 return 1;
720 return 0;
723 static struct megasas_instance_template megasas_instance_template_ppc = {
725 .fire_cmd = megasas_fire_cmd_ppc,
726 .enable_intr = megasas_enable_intr_ppc,
727 .disable_intr = megasas_disable_intr_ppc,
728 .clear_intr = megasas_clear_intr_ppc,
729 .read_fw_status_reg = megasas_read_fw_status_reg_ppc,
730 .adp_reset = megasas_adp_reset_xscale,
731 .check_reset = megasas_check_reset_ppc,
732 .service_isr = megasas_isr,
733 .tasklet = megasas_complete_cmd_dpc,
734 .init_adapter = megasas_init_adapter_mfi,
735 .build_and_issue_cmd = megasas_build_and_issue_cmd,
736 .issue_dcmd = megasas_issue_dcmd,
740 * megasas_enable_intr_skinny - Enables interrupts
741 * @regs: MFI register set
743 static inline void
744 megasas_enable_intr_skinny(struct megasas_instance *instance)
746 struct megasas_register_set __iomem *regs;
748 regs = instance->reg_set;
749 writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
751 writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
753 /* Dummy readl to force pci flush */
754 readl(&regs->outbound_intr_mask);
758 * megasas_disable_intr_skinny - Disables interrupt
759 * @regs: MFI register set
761 static inline void
762 megasas_disable_intr_skinny(struct megasas_instance *instance)
764 struct megasas_register_set __iomem *regs;
765 u32 mask = 0xFFFFFFFF;
767 regs = instance->reg_set;
768 writel(mask, &regs->outbound_intr_mask);
769 /* Dummy readl to force pci flush */
770 readl(&regs->outbound_intr_mask);
774 * megasas_read_fw_status_reg_skinny - returns the current FW status value
775 * @regs: MFI register set
777 static u32
778 megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
780 return readl(&instance->reg_set->outbound_scratch_pad_0);
784 * megasas_clear_interrupt_skinny - Check & clear interrupt
785 * @regs: MFI register set
787 static int
788 megasas_clear_intr_skinny(struct megasas_instance *instance)
790 u32 status;
791 u32 mfiStatus = 0;
792 struct megasas_register_set __iomem *regs;
793 regs = instance->reg_set;
796 * Check if it is our interrupt
798 status = readl(&regs->outbound_intr_status);
800 if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
801 return 0;
805 * Check if it is our interrupt
807 if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
808 MFI_STATE_FAULT) {
809 mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
810 } else
811 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
814 * Clear the interrupt by writing back the same value
816 writel(status, &regs->outbound_intr_status);
819 * dummy read to flush PCI
821 readl(&regs->outbound_intr_status);
823 return mfiStatus;
827 * megasas_fire_cmd_skinny - Sends command to the FW
828 * @frame_phys_addr : Physical address of cmd
829 * @frame_count : Number of frames for the command
830 * @regs : MFI register set
832 static inline void
833 megasas_fire_cmd_skinny(struct megasas_instance *instance,
834 dma_addr_t frame_phys_addr,
835 u32 frame_count,
836 struct megasas_register_set __iomem *regs)
838 unsigned long flags;
840 spin_lock_irqsave(&instance->hba_lock, flags);
841 writel(upper_32_bits(frame_phys_addr),
842 &(regs)->inbound_high_queue_port);
843 writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
844 &(regs)->inbound_low_queue_port);
845 spin_unlock_irqrestore(&instance->hba_lock, flags);
849 * megasas_check_reset_skinny - For controller reset check
850 * @regs: MFI register set
852 static int
853 megasas_check_reset_skinny(struct megasas_instance *instance,
854 struct megasas_register_set __iomem *regs)
856 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
857 return 1;
859 return 0;
862 static struct megasas_instance_template megasas_instance_template_skinny = {
864 .fire_cmd = megasas_fire_cmd_skinny,
865 .enable_intr = megasas_enable_intr_skinny,
866 .disable_intr = megasas_disable_intr_skinny,
867 .clear_intr = megasas_clear_intr_skinny,
868 .read_fw_status_reg = megasas_read_fw_status_reg_skinny,
869 .adp_reset = megasas_adp_reset_gen2,
870 .check_reset = megasas_check_reset_skinny,
871 .service_isr = megasas_isr,
872 .tasklet = megasas_complete_cmd_dpc,
873 .init_adapter = megasas_init_adapter_mfi,
874 .build_and_issue_cmd = megasas_build_and_issue_cmd,
875 .issue_dcmd = megasas_issue_dcmd,
880 * The following functions are defined for gen2 (deviceid : 0x78 0x79)
881 * controllers
885 * megasas_enable_intr_gen2 - Enables interrupts
886 * @regs: MFI register set
888 static inline void
889 megasas_enable_intr_gen2(struct megasas_instance *instance)
891 struct megasas_register_set __iomem *regs;
893 regs = instance->reg_set;
894 writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
896 /* write ~0x00000005 (4 & 1) to the intr mask*/
897 writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
899 /* Dummy readl to force pci flush */
900 readl(&regs->outbound_intr_mask);
904 * megasas_disable_intr_gen2 - Disables interrupt
905 * @regs: MFI register set
907 static inline void
908 megasas_disable_intr_gen2(struct megasas_instance *instance)
910 struct megasas_register_set __iomem *regs;
911 u32 mask = 0xFFFFFFFF;
913 regs = instance->reg_set;
914 writel(mask, &regs->outbound_intr_mask);
915 /* Dummy readl to force pci flush */
916 readl(&regs->outbound_intr_mask);
920 * megasas_read_fw_status_reg_gen2 - returns the current FW status value
921 * @regs: MFI register set
923 static u32
924 megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
926 return readl(&instance->reg_set->outbound_scratch_pad_0);
930 * megasas_clear_interrupt_gen2 - Check & clear interrupt
931 * @regs: MFI register set
933 static int
934 megasas_clear_intr_gen2(struct megasas_instance *instance)
936 u32 status;
937 u32 mfiStatus = 0;
938 struct megasas_register_set __iomem *regs;
939 regs = instance->reg_set;
942 * Check if it is our interrupt
944 status = readl(&regs->outbound_intr_status);
946 if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
947 mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
949 if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
950 mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
954 * Clear the interrupt by writing back the same value
956 if (mfiStatus)
957 writel(status, &regs->outbound_doorbell_clear);
959 /* Dummy readl to force pci flush */
960 readl(&regs->outbound_intr_status);
962 return mfiStatus;
965 * megasas_fire_cmd_gen2 - Sends command to the FW
966 * @frame_phys_addr : Physical address of cmd
967 * @frame_count : Number of frames for the command
968 * @regs : MFI register set
970 static inline void
971 megasas_fire_cmd_gen2(struct megasas_instance *instance,
972 dma_addr_t frame_phys_addr,
973 u32 frame_count,
974 struct megasas_register_set __iomem *regs)
976 unsigned long flags;
978 spin_lock_irqsave(&instance->hba_lock, flags);
979 writel((frame_phys_addr | (frame_count<<1))|1,
980 &(regs)->inbound_queue_port);
981 spin_unlock_irqrestore(&instance->hba_lock, flags);
985 * megasas_adp_reset_gen2 - For controller reset
986 * @regs: MFI register set
988 static int
989 megasas_adp_reset_gen2(struct megasas_instance *instance,
990 struct megasas_register_set __iomem *reg_set)
992 u32 retry = 0 ;
993 u32 HostDiag;
994 u32 __iomem *seq_offset = &reg_set->seq_offset;
995 u32 __iomem *hostdiag_offset = &reg_set->host_diag;
997 if (instance->instancet == &megasas_instance_template_skinny) {
998 seq_offset = &reg_set->fusion_seq_offset;
999 hostdiag_offset = &reg_set->fusion_host_diag;
1002 writel(0, seq_offset);
1003 writel(4, seq_offset);
1004 writel(0xb, seq_offset);
1005 writel(2, seq_offset);
1006 writel(7, seq_offset);
1007 writel(0xd, seq_offset);
1009 msleep(1000);
1011 HostDiag = (u32)readl(hostdiag_offset);
1013 while (!(HostDiag & DIAG_WRITE_ENABLE)) {
1014 msleep(100);
1015 HostDiag = (u32)readl(hostdiag_offset);
1016 dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
1017 retry, HostDiag);
1019 if (retry++ >= 100)
1020 return 1;
1024 dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
1026 writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
1028 ssleep(10);
1030 HostDiag = (u32)readl(hostdiag_offset);
1031 while (HostDiag & DIAG_RESET_ADAPTER) {
1032 msleep(100);
1033 HostDiag = (u32)readl(hostdiag_offset);
1034 dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
1035 retry, HostDiag);
1037 if (retry++ >= 1000)
1038 return 1;
1041 return 0;
1045 * megasas_check_reset_gen2 - For controller reset check
1046 * @regs: MFI register set
1048 static int
1049 megasas_check_reset_gen2(struct megasas_instance *instance,
1050 struct megasas_register_set __iomem *regs)
1052 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1053 return 1;
1055 return 0;
1058 static struct megasas_instance_template megasas_instance_template_gen2 = {
1060 .fire_cmd = megasas_fire_cmd_gen2,
1061 .enable_intr = megasas_enable_intr_gen2,
1062 .disable_intr = megasas_disable_intr_gen2,
1063 .clear_intr = megasas_clear_intr_gen2,
1064 .read_fw_status_reg = megasas_read_fw_status_reg_gen2,
1065 .adp_reset = megasas_adp_reset_gen2,
1066 .check_reset = megasas_check_reset_gen2,
1067 .service_isr = megasas_isr,
1068 .tasklet = megasas_complete_cmd_dpc,
1069 .init_adapter = megasas_init_adapter_mfi,
1070 .build_and_issue_cmd = megasas_build_and_issue_cmd,
1071 .issue_dcmd = megasas_issue_dcmd,
1075 * This is the end of set of functions & definitions
1076 * specific to gen2 (deviceid : 0x78, 0x79) controllers
1080 * Template added for TB (Fusion)
1082 extern struct megasas_instance_template megasas_instance_template_fusion;
1085 * megasas_issue_polled - Issues a polling command
1086 * @instance: Adapter soft state
1087 * @cmd: Command packet to be issued
1089 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
1092 megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
1094 struct megasas_header *frame_hdr = &cmd->frame->hdr;
1096 frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1097 frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1099 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1100 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1101 __func__, __LINE__);
1102 return DCMD_INIT;
1105 instance->instancet->issue_dcmd(instance, cmd);
1107 return wait_and_poll(instance, cmd, instance->requestorId ?
1108 MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1112 * megasas_issue_blocked_cmd - Synchronous wrapper around regular FW cmds
1113 * @instance: Adapter soft state
1114 * @cmd: Command to be issued
1115 * @timeout: Timeout in seconds
1117 * This function waits on an event for the command to be returned from ISR.
1118 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1119 * Used to issue ioctl commands.
1122 megasas_issue_blocked_cmd(struct megasas_instance *instance,
1123 struct megasas_cmd *cmd, int timeout)
1125 int ret = 0;
1126 cmd->cmd_status_drv = DCMD_INIT;
1128 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1129 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1130 __func__, __LINE__);
1131 return DCMD_INIT;
1134 instance->instancet->issue_dcmd(instance, cmd);
1136 if (timeout) {
1137 ret = wait_event_timeout(instance->int_cmd_wait_q,
1138 cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1139 if (!ret) {
1140 dev_err(&instance->pdev->dev,
1141 "DCMD(opcode: 0x%x) is timed out, func:%s\n",
1142 cmd->frame->dcmd.opcode, __func__);
1143 return DCMD_TIMEOUT;
1145 } else
1146 wait_event(instance->int_cmd_wait_q,
1147 cmd->cmd_status_drv != DCMD_INIT);
1149 return cmd->cmd_status_drv;
1153 * megasas_issue_blocked_abort_cmd - Aborts previously issued cmd
1154 * @instance: Adapter soft state
1155 * @cmd_to_abort: Previously issued cmd to be aborted
1156 * @timeout: Timeout in seconds
1158 * MFI firmware can abort previously issued AEN comamnd (automatic event
1159 * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1160 * cmd and waits for return status.
1161 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1163 static int
1164 megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1165 struct megasas_cmd *cmd_to_abort, int timeout)
1167 struct megasas_cmd *cmd;
1168 struct megasas_abort_frame *abort_fr;
1169 int ret = 0;
1170 u32 opcode;
1172 cmd = megasas_get_cmd(instance);
1174 if (!cmd)
1175 return -1;
1177 abort_fr = &cmd->frame->abort;
1180 * Prepare and issue the abort frame
1182 abort_fr->cmd = MFI_CMD_ABORT;
1183 abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1184 abort_fr->flags = cpu_to_le16(0);
1185 abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1186 abort_fr->abort_mfi_phys_addr_lo =
1187 cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1188 abort_fr->abort_mfi_phys_addr_hi =
1189 cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1191 cmd->sync_cmd = 1;
1192 cmd->cmd_status_drv = DCMD_INIT;
1194 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1195 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1196 __func__, __LINE__);
1197 return DCMD_INIT;
1200 instance->instancet->issue_dcmd(instance, cmd);
1202 if (timeout) {
1203 ret = wait_event_timeout(instance->abort_cmd_wait_q,
1204 cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1205 if (!ret) {
1206 opcode = cmd_to_abort->frame->dcmd.opcode;
1207 dev_err(&instance->pdev->dev,
1208 "Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n",
1209 opcode, __func__);
1210 return DCMD_TIMEOUT;
1212 } else
1213 wait_event(instance->abort_cmd_wait_q,
1214 cmd->cmd_status_drv != DCMD_INIT);
1216 cmd->sync_cmd = 0;
1218 megasas_return_cmd(instance, cmd);
1219 return cmd->cmd_status_drv;
1223 * megasas_make_sgl32 - Prepares 32-bit SGL
1224 * @instance: Adapter soft state
1225 * @scp: SCSI command from the mid-layer
1226 * @mfi_sgl: SGL to be filled in
1228 * If successful, this function returns the number of SG elements. Otherwise,
1229 * it returnes -1.
1231 static int
1232 megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1233 union megasas_sgl *mfi_sgl)
1235 int i;
1236 int sge_count;
1237 struct scatterlist *os_sgl;
1239 sge_count = scsi_dma_map(scp);
1240 BUG_ON(sge_count < 0);
1242 if (sge_count) {
1243 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1244 mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1245 mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1248 return sge_count;
1252 * megasas_make_sgl64 - Prepares 64-bit SGL
1253 * @instance: Adapter soft state
1254 * @scp: SCSI command from the mid-layer
1255 * @mfi_sgl: SGL to be filled in
1257 * If successful, this function returns the number of SG elements. Otherwise,
1258 * it returnes -1.
1260 static int
1261 megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1262 union megasas_sgl *mfi_sgl)
1264 int i;
1265 int sge_count;
1266 struct scatterlist *os_sgl;
1268 sge_count = scsi_dma_map(scp);
1269 BUG_ON(sge_count < 0);
1271 if (sge_count) {
1272 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1273 mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1274 mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1277 return sge_count;
1281 * megasas_make_sgl_skinny - Prepares IEEE SGL
1282 * @instance: Adapter soft state
1283 * @scp: SCSI command from the mid-layer
1284 * @mfi_sgl: SGL to be filled in
1286 * If successful, this function returns the number of SG elements. Otherwise,
1287 * it returnes -1.
1289 static int
1290 megasas_make_sgl_skinny(struct megasas_instance *instance,
1291 struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1293 int i;
1294 int sge_count;
1295 struct scatterlist *os_sgl;
1297 sge_count = scsi_dma_map(scp);
1299 if (sge_count) {
1300 scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1301 mfi_sgl->sge_skinny[i].length =
1302 cpu_to_le32(sg_dma_len(os_sgl));
1303 mfi_sgl->sge_skinny[i].phys_addr =
1304 cpu_to_le64(sg_dma_address(os_sgl));
1305 mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1308 return sge_count;
1312 * megasas_get_frame_count - Computes the number of frames
1313 * @frame_type : type of frame- io or pthru frame
1314 * @sge_count : number of sg elements
1316 * Returns the number of frames required for numnber of sge's (sge_count)
1319 static u32 megasas_get_frame_count(struct megasas_instance *instance,
1320 u8 sge_count, u8 frame_type)
1322 int num_cnt;
1323 int sge_bytes;
1324 u32 sge_sz;
1325 u32 frame_count = 0;
1327 sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1328 sizeof(struct megasas_sge32);
1330 if (instance->flag_ieee) {
1331 sge_sz = sizeof(struct megasas_sge_skinny);
1335 * Main frame can contain 2 SGEs for 64-bit SGLs and
1336 * 3 SGEs for 32-bit SGLs for ldio &
1337 * 1 SGEs for 64-bit SGLs and
1338 * 2 SGEs for 32-bit SGLs for pthru frame
1340 if (unlikely(frame_type == PTHRU_FRAME)) {
1341 if (instance->flag_ieee == 1) {
1342 num_cnt = sge_count - 1;
1343 } else if (IS_DMA64)
1344 num_cnt = sge_count - 1;
1345 else
1346 num_cnt = sge_count - 2;
1347 } else {
1348 if (instance->flag_ieee == 1) {
1349 num_cnt = sge_count - 1;
1350 } else if (IS_DMA64)
1351 num_cnt = sge_count - 2;
1352 else
1353 num_cnt = sge_count - 3;
1356 if (num_cnt > 0) {
1357 sge_bytes = sge_sz * num_cnt;
1359 frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1360 ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1362 /* Main frame */
1363 frame_count += 1;
1365 if (frame_count > 7)
1366 frame_count = 8;
1367 return frame_count;
1371 * megasas_build_dcdb - Prepares a direct cdb (DCDB) command
1372 * @instance: Adapter soft state
1373 * @scp: SCSI command
1374 * @cmd: Command to be prepared in
1376 * This function prepares CDB commands. These are typcially pass-through
1377 * commands to the devices.
1379 static int
1380 megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1381 struct megasas_cmd *cmd)
1383 u32 is_logical;
1384 u32 device_id;
1385 u16 flags = 0;
1386 struct megasas_pthru_frame *pthru;
1388 is_logical = MEGASAS_IS_LOGICAL(scp->device);
1389 device_id = MEGASAS_DEV_INDEX(scp);
1390 pthru = (struct megasas_pthru_frame *)cmd->frame;
1392 if (scp->sc_data_direction == DMA_TO_DEVICE)
1393 flags = MFI_FRAME_DIR_WRITE;
1394 else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1395 flags = MFI_FRAME_DIR_READ;
1396 else if (scp->sc_data_direction == DMA_NONE)
1397 flags = MFI_FRAME_DIR_NONE;
1399 if (instance->flag_ieee == 1) {
1400 flags |= MFI_FRAME_IEEE;
1404 * Prepare the DCDB frame
1406 pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1407 pthru->cmd_status = 0x0;
1408 pthru->scsi_status = 0x0;
1409 pthru->target_id = device_id;
1410 pthru->lun = scp->device->lun;
1411 pthru->cdb_len = scp->cmd_len;
1412 pthru->timeout = 0;
1413 pthru->pad_0 = 0;
1414 pthru->flags = cpu_to_le16(flags);
1415 pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1417 memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1420 * If the command is for the tape device, set the
1421 * pthru timeout to the os layer timeout value.
1423 if (scp->device->type == TYPE_TAPE) {
1424 if ((scp->request->timeout / HZ) > 0xFFFF)
1425 pthru->timeout = cpu_to_le16(0xFFFF);
1426 else
1427 pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1431 * Construct SGL
1433 if (instance->flag_ieee == 1) {
1434 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1435 pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1436 &pthru->sgl);
1437 } else if (IS_DMA64) {
1438 pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1439 pthru->sge_count = megasas_make_sgl64(instance, scp,
1440 &pthru->sgl);
1441 } else
1442 pthru->sge_count = megasas_make_sgl32(instance, scp,
1443 &pthru->sgl);
1445 if (pthru->sge_count > instance->max_num_sge) {
1446 dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1447 pthru->sge_count);
1448 return 0;
1452 * Sense info specific
1454 pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1455 pthru->sense_buf_phys_addr_hi =
1456 cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1457 pthru->sense_buf_phys_addr_lo =
1458 cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1461 * Compute the total number of frames this command consumes. FW uses
1462 * this number to pull sufficient number of frames from host memory.
1464 cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1465 PTHRU_FRAME);
1467 return cmd->frame_count;
1471 * megasas_build_ldio - Prepares IOs to logical devices
1472 * @instance: Adapter soft state
1473 * @scp: SCSI command
1474 * @cmd: Command to be prepared
1476 * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1478 static int
1479 megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1480 struct megasas_cmd *cmd)
1482 u32 device_id;
1483 u8 sc = scp->cmnd[0];
1484 u16 flags = 0;
1485 struct megasas_io_frame *ldio;
1487 device_id = MEGASAS_DEV_INDEX(scp);
1488 ldio = (struct megasas_io_frame *)cmd->frame;
1490 if (scp->sc_data_direction == DMA_TO_DEVICE)
1491 flags = MFI_FRAME_DIR_WRITE;
1492 else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1493 flags = MFI_FRAME_DIR_READ;
1495 if (instance->flag_ieee == 1) {
1496 flags |= MFI_FRAME_IEEE;
1500 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1502 ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1503 ldio->cmd_status = 0x0;
1504 ldio->scsi_status = 0x0;
1505 ldio->target_id = device_id;
1506 ldio->timeout = 0;
1507 ldio->reserved_0 = 0;
1508 ldio->pad_0 = 0;
1509 ldio->flags = cpu_to_le16(flags);
1510 ldio->start_lba_hi = 0;
1511 ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1514 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1516 if (scp->cmd_len == 6) {
1517 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1518 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1519 ((u32) scp->cmnd[2] << 8) |
1520 (u32) scp->cmnd[3]);
1522 ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1526 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1528 else if (scp->cmd_len == 10) {
1529 ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1530 ((u32) scp->cmnd[7] << 8));
1531 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1532 ((u32) scp->cmnd[3] << 16) |
1533 ((u32) scp->cmnd[4] << 8) |
1534 (u32) scp->cmnd[5]);
1538 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1540 else if (scp->cmd_len == 12) {
1541 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1542 ((u32) scp->cmnd[7] << 16) |
1543 ((u32) scp->cmnd[8] << 8) |
1544 (u32) scp->cmnd[9]);
1546 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1547 ((u32) scp->cmnd[3] << 16) |
1548 ((u32) scp->cmnd[4] << 8) |
1549 (u32) scp->cmnd[5]);
1553 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1555 else if (scp->cmd_len == 16) {
1556 ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1557 ((u32) scp->cmnd[11] << 16) |
1558 ((u32) scp->cmnd[12] << 8) |
1559 (u32) scp->cmnd[13]);
1561 ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1562 ((u32) scp->cmnd[7] << 16) |
1563 ((u32) scp->cmnd[8] << 8) |
1564 (u32) scp->cmnd[9]);
1566 ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1567 ((u32) scp->cmnd[3] << 16) |
1568 ((u32) scp->cmnd[4] << 8) |
1569 (u32) scp->cmnd[5]);
1574 * Construct SGL
1576 if (instance->flag_ieee) {
1577 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1578 ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1579 &ldio->sgl);
1580 } else if (IS_DMA64) {
1581 ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1582 ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1583 } else
1584 ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1586 if (ldio->sge_count > instance->max_num_sge) {
1587 dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1588 ldio->sge_count);
1589 return 0;
1593 * Sense info specific
1595 ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1596 ldio->sense_buf_phys_addr_hi = 0;
1597 ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1600 * Compute the total number of frames this command consumes. FW uses
1601 * this number to pull sufficient number of frames from host memory.
1603 cmd->frame_count = megasas_get_frame_count(instance,
1604 ldio->sge_count, IO_FRAME);
1606 return cmd->frame_count;
1610 * megasas_cmd_type - Checks if the cmd is for logical drive/sysPD
1611 * and whether it's RW or non RW
1612 * @scmd: SCSI command
1615 inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1617 int ret;
1619 switch (cmd->cmnd[0]) {
1620 case READ_10:
1621 case WRITE_10:
1622 case READ_12:
1623 case WRITE_12:
1624 case READ_6:
1625 case WRITE_6:
1626 case READ_16:
1627 case WRITE_16:
1628 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1629 READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1630 break;
1631 default:
1632 ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1633 NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1635 return ret;
1639 * megasas_dump_pending_frames - Dumps the frame address of all pending cmds
1640 * in FW
1641 * @instance: Adapter soft state
1643 static inline void
1644 megasas_dump_pending_frames(struct megasas_instance *instance)
1646 struct megasas_cmd *cmd;
1647 int i,n;
1648 union megasas_sgl *mfi_sgl;
1649 struct megasas_io_frame *ldio;
1650 struct megasas_pthru_frame *pthru;
1651 u32 sgcount;
1652 u16 max_cmd = instance->max_fw_cmds;
1654 dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1655 dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1656 if (IS_DMA64)
1657 dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1658 else
1659 dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1661 dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1662 for (i = 0; i < max_cmd; i++) {
1663 cmd = instance->cmd_list[i];
1664 if (!cmd->scmd)
1665 continue;
1666 dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1667 if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1668 ldio = (struct megasas_io_frame *)cmd->frame;
1669 mfi_sgl = &ldio->sgl;
1670 sgcount = ldio->sge_count;
1671 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1672 " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1673 instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1674 le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1675 le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1676 } else {
1677 pthru = (struct megasas_pthru_frame *) cmd->frame;
1678 mfi_sgl = &pthru->sgl;
1679 sgcount = pthru->sge_count;
1680 dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1681 "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1682 instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1683 pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1684 le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1686 if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1687 for (n = 0; n < sgcount; n++) {
1688 if (IS_DMA64)
1689 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1690 le32_to_cpu(mfi_sgl->sge64[n].length),
1691 le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1692 else
1693 dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1694 le32_to_cpu(mfi_sgl->sge32[n].length),
1695 le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1698 } /*for max_cmd*/
1699 dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1700 for (i = 0; i < max_cmd; i++) {
1702 cmd = instance->cmd_list[i];
1704 if (cmd->sync_cmd == 1)
1705 dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1707 dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1711 megasas_build_and_issue_cmd(struct megasas_instance *instance,
1712 struct scsi_cmnd *scmd)
1714 struct megasas_cmd *cmd;
1715 u32 frame_count;
1717 cmd = megasas_get_cmd(instance);
1718 if (!cmd)
1719 return SCSI_MLQUEUE_HOST_BUSY;
1722 * Logical drive command
1724 if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1725 frame_count = megasas_build_ldio(instance, scmd, cmd);
1726 else
1727 frame_count = megasas_build_dcdb(instance, scmd, cmd);
1729 if (!frame_count)
1730 goto out_return_cmd;
1732 cmd->scmd = scmd;
1733 scmd->SCp.ptr = (char *)cmd;
1736 * Issue the command to the FW
1738 atomic_inc(&instance->fw_outstanding);
1740 instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1741 cmd->frame_count-1, instance->reg_set);
1743 return 0;
1744 out_return_cmd:
1745 megasas_return_cmd(instance, cmd);
1746 return SCSI_MLQUEUE_HOST_BUSY;
1751 * megasas_queue_command - Queue entry point
1752 * @scmd: SCSI command to be queued
1753 * @done: Callback entry point
1755 static int
1756 megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1758 struct megasas_instance *instance;
1759 struct MR_PRIV_DEVICE *mr_device_priv_data;
1761 instance = (struct megasas_instance *)
1762 scmd->device->host->hostdata;
1764 if (instance->unload == 1) {
1765 scmd->result = DID_NO_CONNECT << 16;
1766 scmd->scsi_done(scmd);
1767 return 0;
1770 if (instance->issuepend_done == 0)
1771 return SCSI_MLQUEUE_HOST_BUSY;
1774 /* Check for an mpio path and adjust behavior */
1775 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1776 if (megasas_check_mpio_paths(instance, scmd) ==
1777 (DID_REQUEUE << 16)) {
1778 return SCSI_MLQUEUE_HOST_BUSY;
1779 } else {
1780 scmd->result = DID_NO_CONNECT << 16;
1781 scmd->scsi_done(scmd);
1782 return 0;
1786 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1787 scmd->result = DID_NO_CONNECT << 16;
1788 scmd->scsi_done(scmd);
1789 return 0;
1792 mr_device_priv_data = scmd->device->hostdata;
1793 if (!mr_device_priv_data) {
1794 scmd->result = DID_NO_CONNECT << 16;
1795 scmd->scsi_done(scmd);
1796 return 0;
1799 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1800 return SCSI_MLQUEUE_HOST_BUSY;
1802 if (mr_device_priv_data->tm_busy)
1803 return SCSI_MLQUEUE_DEVICE_BUSY;
1806 scmd->result = 0;
1808 if (MEGASAS_IS_LOGICAL(scmd->device) &&
1809 (scmd->device->id >= instance->fw_supported_vd_count ||
1810 scmd->device->lun)) {
1811 scmd->result = DID_BAD_TARGET << 16;
1812 goto out_done;
1815 if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1816 MEGASAS_IS_LOGICAL(scmd->device) &&
1817 (!instance->fw_sync_cache_support)) {
1818 scmd->result = DID_OK << 16;
1819 goto out_done;
1822 return instance->instancet->build_and_issue_cmd(instance, scmd);
1824 out_done:
1825 scmd->scsi_done(scmd);
1826 return 0;
1829 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1831 int i;
1833 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1835 if ((megasas_mgmt_info.instance[i]) &&
1836 (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1837 return megasas_mgmt_info.instance[i];
1840 return NULL;
1844 * megasas_set_dynamic_target_properties -
1845 * Device property set by driver may not be static and it is required to be
1846 * updated after OCR
1848 * set tm_capable.
1849 * set dma alignment (only for eedp protection enable vd).
1851 * @sdev: OS provided scsi device
1853 * Returns void
1855 void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
1856 bool is_target_prop)
1858 u16 pd_index = 0, ld;
1859 u32 device_id;
1860 struct megasas_instance *instance;
1861 struct fusion_context *fusion;
1862 struct MR_PRIV_DEVICE *mr_device_priv_data;
1863 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1864 struct MR_LD_RAID *raid;
1865 struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1867 instance = megasas_lookup_instance(sdev->host->host_no);
1868 fusion = instance->ctrl_context;
1869 mr_device_priv_data = sdev->hostdata;
1871 if (!fusion || !mr_device_priv_data)
1872 return;
1874 if (MEGASAS_IS_LOGICAL(sdev)) {
1875 device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1876 + sdev->id;
1877 local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1878 ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1879 if (ld >= instance->fw_supported_vd_count)
1880 return;
1881 raid = MR_LdRaidGet(ld, local_map_ptr);
1883 if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1884 blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1886 mr_device_priv_data->is_tm_capable =
1887 raid->capability.tmCapable;
1889 if (!raid->flags.isEPD)
1890 sdev->no_write_same = 1;
1892 } else if (instance->use_seqnum_jbod_fp) {
1893 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1894 sdev->id;
1895 pd_sync = (void *)fusion->pd_seq_sync
1896 [(instance->pd_seq_map_id - 1) & 1];
1897 mr_device_priv_data->is_tm_capable =
1898 pd_sync->seq[pd_index].capability.tmCapable;
1901 if (is_target_prop && instance->tgt_prop->reset_tmo) {
1903 * If FW provides a target reset timeout value, driver will use
1904 * it. If not set, fallback to default values.
1906 mr_device_priv_data->target_reset_tmo =
1907 min_t(u8, instance->max_reset_tmo,
1908 instance->tgt_prop->reset_tmo);
1909 mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
1910 } else {
1911 mr_device_priv_data->target_reset_tmo =
1912 MEGASAS_DEFAULT_TM_TIMEOUT;
1913 mr_device_priv_data->task_abort_tmo =
1914 MEGASAS_DEFAULT_TM_TIMEOUT;
1919 * megasas_set_nvme_device_properties -
1920 * set nomerges=2
1921 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1922 * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1924 * MR firmware provides value in KB. Caller of this function converts
1925 * kb into bytes.
1927 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1928 * MR firmware provides value 128 as (32 * 4K) = 128K.
1930 * @sdev: scsi device
1931 * @max_io_size: maximum io transfer size
1934 static inline void
1935 megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1937 struct megasas_instance *instance;
1938 u32 mr_nvme_pg_size;
1940 instance = (struct megasas_instance *)sdev->host->hostdata;
1941 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1942 MR_DEFAULT_NVME_PAGE_SIZE);
1944 blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1946 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1947 blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1951 * megasas_set_fw_assisted_qd -
1952 * set device queue depth to can_queue
1953 * set device queue depth to fw assisted qd
1955 * @sdev: scsi device
1956 * @is_target_prop true, if fw provided target properties.
1958 static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
1959 bool is_target_prop)
1961 u8 interface_type;
1962 u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1963 u32 tgt_device_qd;
1964 struct megasas_instance *instance;
1965 struct MR_PRIV_DEVICE *mr_device_priv_data;
1967 instance = megasas_lookup_instance(sdev->host->host_no);
1968 mr_device_priv_data = sdev->hostdata;
1969 interface_type = mr_device_priv_data->interface_type;
1971 switch (interface_type) {
1972 case SAS_PD:
1973 device_qd = MEGASAS_SAS_QD;
1974 break;
1975 case SATA_PD:
1976 device_qd = MEGASAS_SATA_QD;
1977 break;
1978 case NVME_PD:
1979 device_qd = MEGASAS_NVME_QD;
1980 break;
1983 if (is_target_prop) {
1984 tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
1985 if (tgt_device_qd &&
1986 (tgt_device_qd <= instance->host->can_queue))
1987 device_qd = tgt_device_qd;
1990 if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
1991 device_qd = instance->host->can_queue;
1993 scsi_change_queue_depth(sdev, device_qd);
1997 * megasas_set_static_target_properties -
1998 * Device property set by driver are static and it is not required to be
1999 * updated after OCR.
2001 * set io timeout
2002 * set device queue depth
2003 * set nvme device properties. see - megasas_set_nvme_device_properties
2005 * @sdev: scsi device
2006 * @is_target_prop true, if fw provided target properties.
2008 static void megasas_set_static_target_properties(struct scsi_device *sdev,
2009 bool is_target_prop)
2011 u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
2012 struct megasas_instance *instance;
2014 instance = megasas_lookup_instance(sdev->host->host_no);
2017 * The RAID firmware may require extended timeouts.
2019 blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
2021 /* max_io_size_kb will be set to non zero for
2022 * nvme based vd and syspd.
2024 if (is_target_prop)
2025 max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
2027 if (instance->nvme_page_size && max_io_size_kb)
2028 megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
2030 megasas_set_fw_assisted_qd(sdev, is_target_prop);
2034 static int megasas_slave_configure(struct scsi_device *sdev)
2036 u16 pd_index = 0;
2037 struct megasas_instance *instance;
2038 int ret_target_prop = DCMD_FAILED;
2039 bool is_target_prop = false;
2041 instance = megasas_lookup_instance(sdev->host->host_no);
2042 if (instance->pd_list_not_supported) {
2043 if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
2044 pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2045 sdev->id;
2046 if (instance->pd_list[pd_index].driveState !=
2047 MR_PD_STATE_SYSTEM)
2048 return -ENXIO;
2052 mutex_lock(&instance->reset_mutex);
2053 /* Send DCMD to Firmware and cache the information */
2054 if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
2055 megasas_get_pd_info(instance, sdev);
2057 /* Some ventura firmware may not have instance->nvme_page_size set.
2058 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
2060 if ((instance->tgt_prop) && (instance->nvme_page_size))
2061 ret_target_prop = megasas_get_target_prop(instance, sdev);
2063 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
2064 megasas_set_static_target_properties(sdev, is_target_prop);
2066 /* This sdev property may change post OCR */
2067 megasas_set_dynamic_target_properties(sdev, is_target_prop);
2069 mutex_unlock(&instance->reset_mutex);
2071 return 0;
2074 static int megasas_slave_alloc(struct scsi_device *sdev)
2076 u16 pd_index = 0;
2077 struct megasas_instance *instance ;
2078 struct MR_PRIV_DEVICE *mr_device_priv_data;
2080 instance = megasas_lookup_instance(sdev->host->host_no);
2081 if (!MEGASAS_IS_LOGICAL(sdev)) {
2083 * Open the OS scan to the SYSTEM PD
2085 pd_index =
2086 (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2087 sdev->id;
2088 if ((instance->pd_list_not_supported ||
2089 instance->pd_list[pd_index].driveState ==
2090 MR_PD_STATE_SYSTEM)) {
2091 goto scan_target;
2093 return -ENXIO;
2096 scan_target:
2097 mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
2098 GFP_KERNEL);
2099 if (!mr_device_priv_data)
2100 return -ENOMEM;
2101 sdev->hostdata = mr_device_priv_data;
2103 atomic_set(&mr_device_priv_data->r1_ldio_hint,
2104 instance->r1_ldio_hint_default);
2105 return 0;
2108 static void megasas_slave_destroy(struct scsi_device *sdev)
2110 kfree(sdev->hostdata);
2111 sdev->hostdata = NULL;
2115 * megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
2116 * kill adapter
2117 * @instance: Adapter soft state
2120 static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
2122 int i;
2123 struct megasas_cmd *cmd_mfi;
2124 struct megasas_cmd_fusion *cmd_fusion;
2125 struct fusion_context *fusion = instance->ctrl_context;
2127 /* Find all outstanding ioctls */
2128 if (fusion) {
2129 for (i = 0; i < instance->max_fw_cmds; i++) {
2130 cmd_fusion = fusion->cmd_list[i];
2131 if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
2132 cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2133 if (cmd_mfi->sync_cmd &&
2134 (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2135 cmd_mfi->frame->hdr.cmd_status =
2136 MFI_STAT_WRONG_STATE;
2137 megasas_complete_cmd(instance,
2138 cmd_mfi, DID_OK);
2142 } else {
2143 for (i = 0; i < instance->max_fw_cmds; i++) {
2144 cmd_mfi = instance->cmd_list[i];
2145 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2146 MFI_CMD_ABORT)
2147 megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2153 void megaraid_sas_kill_hba(struct megasas_instance *instance)
2155 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2156 dev_warn(&instance->pdev->dev,
2157 "Adapter already dead, skipping kill HBA\n");
2158 return;
2161 /* Set critical error to block I/O & ioctls in case caller didn't */
2162 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2163 /* Wait 1 second to ensure IO or ioctls in build have posted */
2164 msleep(1000);
2165 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2166 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2167 (instance->adapter_type != MFI_SERIES)) {
2168 if (!instance->requestorId) {
2169 writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2170 /* Flush */
2171 readl(&instance->reg_set->doorbell);
2173 if (instance->requestorId && instance->peerIsPresent)
2174 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2175 } else {
2176 writel(MFI_STOP_ADP,
2177 &instance->reg_set->inbound_doorbell);
2179 /* Complete outstanding ioctls when adapter is killed */
2180 megasas_complete_outstanding_ioctls(instance);
2184 * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2185 * restored to max value
2186 * @instance: Adapter soft state
2189 void
2190 megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2192 unsigned long flags;
2194 if (instance->flag & MEGASAS_FW_BUSY
2195 && time_after(jiffies, instance->last_time + 5 * HZ)
2196 && atomic_read(&instance->fw_outstanding) <
2197 instance->throttlequeuedepth + 1) {
2199 spin_lock_irqsave(instance->host->host_lock, flags);
2200 instance->flag &= ~MEGASAS_FW_BUSY;
2202 instance->host->can_queue = instance->cur_can_queue;
2203 spin_unlock_irqrestore(instance->host->host_lock, flags);
2208 * megasas_complete_cmd_dpc - Returns FW's controller structure
2209 * @instance_addr: Address of adapter soft state
2211 * Tasklet to complete cmds
2213 static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2215 u32 producer;
2216 u32 consumer;
2217 u32 context;
2218 struct megasas_cmd *cmd;
2219 struct megasas_instance *instance =
2220 (struct megasas_instance *)instance_addr;
2221 unsigned long flags;
2223 /* If we have already declared adapter dead, donot complete cmds */
2224 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2225 return;
2227 spin_lock_irqsave(&instance->completion_lock, flags);
2229 producer = le32_to_cpu(*instance->producer);
2230 consumer = le32_to_cpu(*instance->consumer);
2232 while (consumer != producer) {
2233 context = le32_to_cpu(instance->reply_queue[consumer]);
2234 if (context >= instance->max_fw_cmds) {
2235 dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2236 context);
2237 BUG();
2240 cmd = instance->cmd_list[context];
2242 megasas_complete_cmd(instance, cmd, DID_OK);
2244 consumer++;
2245 if (consumer == (instance->max_fw_cmds + 1)) {
2246 consumer = 0;
2250 *instance->consumer = cpu_to_le32(producer);
2252 spin_unlock_irqrestore(&instance->completion_lock, flags);
2255 * Check if we can restore can_queue
2257 megasas_check_and_restore_queue_depth(instance);
2260 static void megasas_sriov_heartbeat_handler(struct timer_list *t);
2263 * megasas_start_timer - Initializes sriov heartbeat timer object
2264 * @instance: Adapter soft state
2267 void megasas_start_timer(struct megasas_instance *instance)
2269 struct timer_list *timer = &instance->sriov_heartbeat_timer;
2271 timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
2272 timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
2273 add_timer(timer);
2276 static void
2277 megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2279 static void
2280 process_fw_state_change_wq(struct work_struct *work);
2282 static void megasas_do_ocr(struct megasas_instance *instance)
2284 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2285 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2286 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2287 *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2289 instance->instancet->disable_intr(instance);
2290 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2291 instance->issuepend_done = 0;
2293 atomic_set(&instance->fw_outstanding, 0);
2294 megasas_internal_reset_defer_cmds(instance);
2295 process_fw_state_change_wq(&instance->work_init);
2298 static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2299 int initial)
2301 struct megasas_cmd *cmd;
2302 struct megasas_dcmd_frame *dcmd;
2303 struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2304 dma_addr_t new_affiliation_111_h;
2305 int ld, retval = 0;
2306 u8 thisVf;
2308 cmd = megasas_get_cmd(instance);
2310 if (!cmd) {
2311 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2312 "Failed to get cmd for scsi%d\n",
2313 instance->host->host_no);
2314 return -ENOMEM;
2317 dcmd = &cmd->frame->dcmd;
2319 if (!instance->vf_affiliation_111) {
2320 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2321 "affiliation for scsi%d\n", instance->host->host_no);
2322 megasas_return_cmd(instance, cmd);
2323 return -ENOMEM;
2326 if (initial)
2327 memset(instance->vf_affiliation_111, 0,
2328 sizeof(struct MR_LD_VF_AFFILIATION_111));
2329 else {
2330 new_affiliation_111 =
2331 dma_alloc_coherent(&instance->pdev->dev,
2332 sizeof(struct MR_LD_VF_AFFILIATION_111),
2333 &new_affiliation_111_h, GFP_KERNEL);
2334 if (!new_affiliation_111) {
2335 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2336 "memory for new affiliation for scsi%d\n",
2337 instance->host->host_no);
2338 megasas_return_cmd(instance, cmd);
2339 return -ENOMEM;
2343 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2345 dcmd->cmd = MFI_CMD_DCMD;
2346 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2347 dcmd->sge_count = 1;
2348 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2349 dcmd->timeout = 0;
2350 dcmd->pad_0 = 0;
2351 dcmd->data_xfer_len =
2352 cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2353 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2355 if (initial)
2356 dcmd->sgl.sge32[0].phys_addr =
2357 cpu_to_le32(instance->vf_affiliation_111_h);
2358 else
2359 dcmd->sgl.sge32[0].phys_addr =
2360 cpu_to_le32(new_affiliation_111_h);
2362 dcmd->sgl.sge32[0].length = cpu_to_le32(
2363 sizeof(struct MR_LD_VF_AFFILIATION_111));
2365 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2366 "scsi%d\n", instance->host->host_no);
2368 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2369 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2370 " failed with status 0x%x for scsi%d\n",
2371 dcmd->cmd_status, instance->host->host_no);
2372 retval = 1; /* Do a scan if we couldn't get affiliation */
2373 goto out;
2376 if (!initial) {
2377 thisVf = new_affiliation_111->thisVf;
2378 for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2379 if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2380 new_affiliation_111->map[ld].policy[thisVf]) {
2381 dev_warn(&instance->pdev->dev, "SR-IOV: "
2382 "Got new LD/VF affiliation for scsi%d\n",
2383 instance->host->host_no);
2384 memcpy(instance->vf_affiliation_111,
2385 new_affiliation_111,
2386 sizeof(struct MR_LD_VF_AFFILIATION_111));
2387 retval = 1;
2388 goto out;
2391 out:
2392 if (new_affiliation_111) {
2393 dma_free_coherent(&instance->pdev->dev,
2394 sizeof(struct MR_LD_VF_AFFILIATION_111),
2395 new_affiliation_111,
2396 new_affiliation_111_h);
2399 megasas_return_cmd(instance, cmd);
2401 return retval;
2404 static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2405 int initial)
2407 struct megasas_cmd *cmd;
2408 struct megasas_dcmd_frame *dcmd;
2409 struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2410 struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2411 dma_addr_t new_affiliation_h;
2412 int i, j, retval = 0, found = 0, doscan = 0;
2413 u8 thisVf;
2415 cmd = megasas_get_cmd(instance);
2417 if (!cmd) {
2418 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2419 "Failed to get cmd for scsi%d\n",
2420 instance->host->host_no);
2421 return -ENOMEM;
2424 dcmd = &cmd->frame->dcmd;
2426 if (!instance->vf_affiliation) {
2427 dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2428 "affiliation for scsi%d\n", instance->host->host_no);
2429 megasas_return_cmd(instance, cmd);
2430 return -ENOMEM;
2433 if (initial)
2434 memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2435 sizeof(struct MR_LD_VF_AFFILIATION));
2436 else {
2437 new_affiliation =
2438 dma_alloc_coherent(&instance->pdev->dev,
2439 (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
2440 &new_affiliation_h, GFP_KERNEL);
2441 if (!new_affiliation) {
2442 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2443 "memory for new affiliation for scsi%d\n",
2444 instance->host->host_no);
2445 megasas_return_cmd(instance, cmd);
2446 return -ENOMEM;
2450 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2452 dcmd->cmd = MFI_CMD_DCMD;
2453 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2454 dcmd->sge_count = 1;
2455 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2456 dcmd->timeout = 0;
2457 dcmd->pad_0 = 0;
2458 dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2459 sizeof(struct MR_LD_VF_AFFILIATION));
2460 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2462 if (initial)
2463 dcmd->sgl.sge32[0].phys_addr =
2464 cpu_to_le32(instance->vf_affiliation_h);
2465 else
2466 dcmd->sgl.sge32[0].phys_addr =
2467 cpu_to_le32(new_affiliation_h);
2469 dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2470 sizeof(struct MR_LD_VF_AFFILIATION));
2472 dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2473 "scsi%d\n", instance->host->host_no);
2476 if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2477 dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2478 " failed with status 0x%x for scsi%d\n",
2479 dcmd->cmd_status, instance->host->host_no);
2480 retval = 1; /* Do a scan if we couldn't get affiliation */
2481 goto out;
2484 if (!initial) {
2485 if (!new_affiliation->ldCount) {
2486 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2487 "affiliation for passive path for scsi%d\n",
2488 instance->host->host_no);
2489 retval = 1;
2490 goto out;
2492 newmap = new_affiliation->map;
2493 savedmap = instance->vf_affiliation->map;
2494 thisVf = new_affiliation->thisVf;
2495 for (i = 0 ; i < new_affiliation->ldCount; i++) {
2496 found = 0;
2497 for (j = 0; j < instance->vf_affiliation->ldCount;
2498 j++) {
2499 if (newmap->ref.targetId ==
2500 savedmap->ref.targetId) {
2501 found = 1;
2502 if (newmap->policy[thisVf] !=
2503 savedmap->policy[thisVf]) {
2504 doscan = 1;
2505 goto out;
2508 savedmap = (struct MR_LD_VF_MAP *)
2509 ((unsigned char *)savedmap +
2510 savedmap->size);
2512 if (!found && newmap->policy[thisVf] !=
2513 MR_LD_ACCESS_HIDDEN) {
2514 doscan = 1;
2515 goto out;
2517 newmap = (struct MR_LD_VF_MAP *)
2518 ((unsigned char *)newmap + newmap->size);
2521 newmap = new_affiliation->map;
2522 savedmap = instance->vf_affiliation->map;
2524 for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2525 found = 0;
2526 for (j = 0 ; j < new_affiliation->ldCount; j++) {
2527 if (savedmap->ref.targetId ==
2528 newmap->ref.targetId) {
2529 found = 1;
2530 if (savedmap->policy[thisVf] !=
2531 newmap->policy[thisVf]) {
2532 doscan = 1;
2533 goto out;
2536 newmap = (struct MR_LD_VF_MAP *)
2537 ((unsigned char *)newmap +
2538 newmap->size);
2540 if (!found && savedmap->policy[thisVf] !=
2541 MR_LD_ACCESS_HIDDEN) {
2542 doscan = 1;
2543 goto out;
2545 savedmap = (struct MR_LD_VF_MAP *)
2546 ((unsigned char *)savedmap +
2547 savedmap->size);
2550 out:
2551 if (doscan) {
2552 dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2553 "affiliation for scsi%d\n", instance->host->host_no);
2554 memcpy(instance->vf_affiliation, new_affiliation,
2555 new_affiliation->size);
2556 retval = 1;
2559 if (new_affiliation)
2560 dma_free_coherent(&instance->pdev->dev,
2561 (MAX_LOGICAL_DRIVES + 1) *
2562 sizeof(struct MR_LD_VF_AFFILIATION),
2563 new_affiliation, new_affiliation_h);
2564 megasas_return_cmd(instance, cmd);
2566 return retval;
2569 /* This function will get the current SR-IOV LD/VF affiliation */
2570 static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2571 int initial)
2573 int retval;
2575 if (instance->PlasmaFW111)
2576 retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2577 else
2578 retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2579 return retval;
2582 /* This function will tell FW to start the SR-IOV heartbeat */
2583 int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2584 int initial)
2586 struct megasas_cmd *cmd;
2587 struct megasas_dcmd_frame *dcmd;
2588 int retval = 0;
2590 cmd = megasas_get_cmd(instance);
2592 if (!cmd) {
2593 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2594 "Failed to get cmd for scsi%d\n",
2595 instance->host->host_no);
2596 return -ENOMEM;
2599 dcmd = &cmd->frame->dcmd;
2601 if (initial) {
2602 instance->hb_host_mem =
2603 dma_alloc_coherent(&instance->pdev->dev,
2604 sizeof(struct MR_CTRL_HB_HOST_MEM),
2605 &instance->hb_host_mem_h,
2606 GFP_KERNEL);
2607 if (!instance->hb_host_mem) {
2608 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2609 " memory for heartbeat host memory for scsi%d\n",
2610 instance->host->host_no);
2611 retval = -ENOMEM;
2612 goto out;
2616 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2618 dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2619 dcmd->cmd = MFI_CMD_DCMD;
2620 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2621 dcmd->sge_count = 1;
2622 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2623 dcmd->timeout = 0;
2624 dcmd->pad_0 = 0;
2625 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2626 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2628 megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2629 sizeof(struct MR_CTRL_HB_HOST_MEM));
2631 dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2632 instance->host->host_no);
2634 if ((instance->adapter_type != MFI_SERIES) &&
2635 !instance->mask_interrupts)
2636 retval = megasas_issue_blocked_cmd(instance, cmd,
2637 MEGASAS_ROUTINE_WAIT_TIME_VF);
2638 else
2639 retval = megasas_issue_polled(instance, cmd);
2641 if (retval) {
2642 dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2643 "_MEM_ALLOC DCMD %s for scsi%d\n",
2644 (dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2645 "timed out" : "failed", instance->host->host_no);
2646 retval = 1;
2649 out:
2650 megasas_return_cmd(instance, cmd);
2652 return retval;
2655 /* Handler for SR-IOV heartbeat */
2656 static void megasas_sriov_heartbeat_handler(struct timer_list *t)
2658 struct megasas_instance *instance =
2659 from_timer(instance, t, sriov_heartbeat_timer);
2661 if (instance->hb_host_mem->HB.fwCounter !=
2662 instance->hb_host_mem->HB.driverCounter) {
2663 instance->hb_host_mem->HB.driverCounter =
2664 instance->hb_host_mem->HB.fwCounter;
2665 mod_timer(&instance->sriov_heartbeat_timer,
2666 jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2667 } else {
2668 dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2669 "completed for scsi%d\n", instance->host->host_no);
2670 schedule_work(&instance->work_init);
2675 * megasas_wait_for_outstanding - Wait for all outstanding cmds
2676 * @instance: Adapter soft state
2678 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2679 * complete all its outstanding commands. Returns error if one or more IOs
2680 * are pending after this time period. It also marks the controller dead.
2682 static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2684 int i, sl, outstanding;
2685 u32 reset_index;
2686 u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2687 unsigned long flags;
2688 struct list_head clist_local;
2689 struct megasas_cmd *reset_cmd;
2690 u32 fw_state;
2692 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2693 dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2694 __func__, __LINE__);
2695 return FAILED;
2698 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2700 INIT_LIST_HEAD(&clist_local);
2701 spin_lock_irqsave(&instance->hba_lock, flags);
2702 list_splice_init(&instance->internal_reset_pending_q,
2703 &clist_local);
2704 spin_unlock_irqrestore(&instance->hba_lock, flags);
2706 dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2707 for (i = 0; i < wait_time; i++) {
2708 msleep(1000);
2709 if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2710 break;
2713 if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2714 dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2715 atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2716 return FAILED;
2719 reset_index = 0;
2720 while (!list_empty(&clist_local)) {
2721 reset_cmd = list_entry((&clist_local)->next,
2722 struct megasas_cmd, list);
2723 list_del_init(&reset_cmd->list);
2724 if (reset_cmd->scmd) {
2725 reset_cmd->scmd->result = DID_REQUEUE << 16;
2726 dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2727 reset_index, reset_cmd,
2728 reset_cmd->scmd->cmnd[0]);
2730 reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2731 megasas_return_cmd(instance, reset_cmd);
2732 } else if (reset_cmd->sync_cmd) {
2733 dev_notice(&instance->pdev->dev, "%p synch cmds"
2734 "reset queue\n",
2735 reset_cmd);
2737 reset_cmd->cmd_status_drv = DCMD_INIT;
2738 instance->instancet->fire_cmd(instance,
2739 reset_cmd->frame_phys_addr,
2740 0, instance->reg_set);
2741 } else {
2742 dev_notice(&instance->pdev->dev, "%p unexpected"
2743 "cmds lst\n",
2744 reset_cmd);
2746 reset_index++;
2749 return SUCCESS;
2752 for (i = 0; i < resetwaittime; i++) {
2753 outstanding = atomic_read(&instance->fw_outstanding);
2755 if (!outstanding)
2756 break;
2758 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2759 dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2760 "commands to complete\n",i,outstanding);
2762 * Call cmd completion routine. Cmd to be
2763 * be completed directly without depending on isr.
2765 megasas_complete_cmd_dpc((unsigned long)instance);
2768 msleep(1000);
2771 i = 0;
2772 outstanding = atomic_read(&instance->fw_outstanding);
2773 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2775 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2776 goto no_outstanding;
2778 if (instance->disableOnlineCtrlReset)
2779 goto kill_hba_and_failed;
2780 do {
2781 if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2782 dev_info(&instance->pdev->dev,
2783 "%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
2784 __func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2785 if (i == 3)
2786 goto kill_hba_and_failed;
2787 megasas_do_ocr(instance);
2789 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2790 dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2791 __func__, __LINE__);
2792 return FAILED;
2794 dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2795 __func__, __LINE__);
2797 for (sl = 0; sl < 10; sl++)
2798 msleep(500);
2800 outstanding = atomic_read(&instance->fw_outstanding);
2802 fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2803 if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2804 goto no_outstanding;
2806 i++;
2807 } while (i <= 3);
2809 no_outstanding:
2811 dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2812 __func__, __LINE__);
2813 return SUCCESS;
2815 kill_hba_and_failed:
2817 /* Reset not supported, kill adapter */
2818 dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2819 " disableOnlineCtrlReset %d fw_outstanding %d \n",
2820 __func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2821 atomic_read(&instance->fw_outstanding));
2822 megasas_dump_pending_frames(instance);
2823 megaraid_sas_kill_hba(instance);
2825 return FAILED;
2829 * megasas_generic_reset - Generic reset routine
2830 * @scmd: Mid-layer SCSI command
2832 * This routine implements a generic reset handler for device, bus and host
2833 * reset requests. Device, bus and host specific reset handlers can use this
2834 * function after they do their specific tasks.
2836 static int megasas_generic_reset(struct scsi_cmnd *scmd)
2838 int ret_val;
2839 struct megasas_instance *instance;
2841 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2843 scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2844 scmd->cmnd[0], scmd->retries);
2846 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2847 dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2848 return FAILED;
2851 ret_val = megasas_wait_for_outstanding(instance);
2852 if (ret_val == SUCCESS)
2853 dev_notice(&instance->pdev->dev, "reset successful\n");
2854 else
2855 dev_err(&instance->pdev->dev, "failed to do reset\n");
2857 return ret_val;
2861 * megasas_reset_timer - quiesce the adapter if required
2862 * @scmd: scsi cmnd
2864 * Sets the FW busy flag and reduces the host->can_queue if the
2865 * cmd has not been completed within the timeout period.
2867 static enum
2868 blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2870 struct megasas_instance *instance;
2871 unsigned long flags;
2873 if (time_after(jiffies, scmd->jiffies_at_alloc +
2874 (scmd_timeout * 2) * HZ)) {
2875 return BLK_EH_DONE;
2878 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2879 if (!(instance->flag & MEGASAS_FW_BUSY)) {
2880 /* FW is busy, throttle IO */
2881 spin_lock_irqsave(instance->host->host_lock, flags);
2883 instance->host->can_queue = instance->throttlequeuedepth;
2884 instance->last_time = jiffies;
2885 instance->flag |= MEGASAS_FW_BUSY;
2887 spin_unlock_irqrestore(instance->host->host_lock, flags);
2889 return BLK_EH_RESET_TIMER;
2893 * megasas_dump - This function will print hexdump of provided buffer.
2894 * @buf: Buffer to be dumped
2895 * @sz: Size in bytes
2896 * @format: Different formats of dumping e.g. format=n will
2897 * cause only 'n' 32 bit words to be dumped in a single
2898 * line.
2900 inline void
2901 megasas_dump(void *buf, int sz, int format)
2903 int i;
2904 __le32 *buf_loc = (__le32 *)buf;
2906 for (i = 0; i < (sz / sizeof(__le32)); i++) {
2907 if ((i % format) == 0) {
2908 if (i != 0)
2909 printk(KERN_CONT "\n");
2910 printk(KERN_CONT "%08x: ", (i * 4));
2912 printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i]));
2914 printk(KERN_CONT "\n");
2918 * megasas_dump_reg_set - This function will print hexdump of register set
2919 * @buf: Buffer to be dumped
2920 * @sz: Size in bytes
2921 * @format: Different formats of dumping e.g. format=n will
2922 * cause only 'n' 32 bit words to be dumped in a
2923 * single line.
2925 inline void
2926 megasas_dump_reg_set(void __iomem *reg_set)
2928 unsigned int i, sz = 256;
2929 u32 __iomem *reg = (u32 __iomem *)reg_set;
2931 for (i = 0; i < (sz / sizeof(u32)); i++)
2932 printk("%08x: %08x\n", (i * 4), readl(&reg[i]));
2936 * megasas_dump_fusion_io - This function will print key details
2937 * of SCSI IO
2938 * @scmd: SCSI command pointer of SCSI IO
2940 void
2941 megasas_dump_fusion_io(struct scsi_cmnd *scmd)
2943 struct megasas_cmd_fusion *cmd;
2944 union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2945 struct megasas_instance *instance;
2947 cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2948 instance = (struct megasas_instance *)scmd->device->host->hostdata;
2950 scmd_printk(KERN_INFO, scmd,
2951 "scmd: (0x%p) retries: 0x%x allowed: 0x%x\n",
2952 scmd, scmd->retries, scmd->allowed);
2953 scsi_print_command(scmd);
2955 if (cmd) {
2956 req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
2957 scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n");
2958 scmd_printk(KERN_INFO, scmd,
2959 "RequestFlags:0x%x MSIxIndex:0x%x SMID:0x%x LMID:0x%x DevHandle:0x%x\n",
2960 req_desc->SCSIIO.RequestFlags,
2961 req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID,
2962 req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle);
2964 printk(KERN_INFO "IO request frame:\n");
2965 megasas_dump(cmd->io_request,
2966 MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8);
2967 printk(KERN_INFO "Chain frame:\n");
2968 megasas_dump(cmd->sg_frame,
2969 instance->max_chain_frame_sz, 8);
2975 * megasas_dump_sys_regs - This function will dump system registers through
2976 * sysfs.
2977 * @reg_set: Pointer to System register set.
2978 * @buf: Buffer to which output is to be written.
2979 * @return: Number of bytes written to buffer.
2981 static inline ssize_t
2982 megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
2984 unsigned int i, sz = 256;
2985 int bytes_wrote = 0;
2986 char *loc = (char *)buf;
2987 u32 __iomem *reg = (u32 __iomem *)reg_set;
2989 for (i = 0; i < sz / sizeof(u32); i++) {
2990 bytes_wrote += snprintf(loc + bytes_wrote, PAGE_SIZE,
2991 "%08x: %08x\n", (i * 4),
2992 readl(&reg[i]));
2994 return bytes_wrote;
2998 * megasas_reset_bus_host - Bus & host reset handler entry point
3000 static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
3002 int ret;
3003 struct megasas_instance *instance;
3005 instance = (struct megasas_instance *)scmd->device->host->hostdata;
3007 scmd_printk(KERN_INFO, scmd,
3008 "OCR is requested due to IO timeout!!\n");
3010 scmd_printk(KERN_INFO, scmd,
3011 "SCSI host state: %d SCSI host busy: %d FW outstanding: %d\n",
3012 scmd->device->host->shost_state,
3013 scsi_host_busy(scmd->device->host),
3014 atomic_read(&instance->fw_outstanding));
3016 * First wait for all commands to complete
3018 if (instance->adapter_type == MFI_SERIES) {
3019 ret = megasas_generic_reset(scmd);
3020 } else {
3021 megasas_dump_fusion_io(scmd);
3022 ret = megasas_reset_fusion(scmd->device->host,
3023 SCSIIO_TIMEOUT_OCR);
3026 return ret;
3030 * megasas_task_abort - Issues task abort request to firmware
3031 * (supported only for fusion adapters)
3032 * @scmd: SCSI command pointer
3034 static int megasas_task_abort(struct scsi_cmnd *scmd)
3036 int ret;
3037 struct megasas_instance *instance;
3039 instance = (struct megasas_instance *)scmd->device->host->hostdata;
3041 if (instance->adapter_type != MFI_SERIES)
3042 ret = megasas_task_abort_fusion(scmd);
3043 else {
3044 sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
3045 ret = FAILED;
3048 return ret;
3052 * megasas_reset_target: Issues target reset request to firmware
3053 * (supported only for fusion adapters)
3054 * @scmd: SCSI command pointer
3056 static int megasas_reset_target(struct scsi_cmnd *scmd)
3058 int ret;
3059 struct megasas_instance *instance;
3061 instance = (struct megasas_instance *)scmd->device->host->hostdata;
3063 if (instance->adapter_type != MFI_SERIES)
3064 ret = megasas_reset_target_fusion(scmd);
3065 else {
3066 sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
3067 ret = FAILED;
3070 return ret;
3074 * megasas_bios_param - Returns disk geometry for a disk
3075 * @sdev: device handle
3076 * @bdev: block device
3077 * @capacity: drive capacity
3078 * @geom: geometry parameters
3080 static int
3081 megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
3082 sector_t capacity, int geom[])
3084 int heads;
3085 int sectors;
3086 sector_t cylinders;
3087 unsigned long tmp;
3089 /* Default heads (64) & sectors (32) */
3090 heads = 64;
3091 sectors = 32;
3093 tmp = heads * sectors;
3094 cylinders = capacity;
3096 sector_div(cylinders, tmp);
3099 * Handle extended translation size for logical drives > 1Gb
3102 if (capacity >= 0x200000) {
3103 heads = 255;
3104 sectors = 63;
3105 tmp = heads*sectors;
3106 cylinders = capacity;
3107 sector_div(cylinders, tmp);
3110 geom[0] = heads;
3111 geom[1] = sectors;
3112 geom[2] = cylinders;
3114 return 0;
3117 static void megasas_aen_polling(struct work_struct *work);
3120 * megasas_service_aen - Processes an event notification
3121 * @instance: Adapter soft state
3122 * @cmd: AEN command completed by the ISR
3124 * For AEN, driver sends a command down to FW that is held by the FW till an
3125 * event occurs. When an event of interest occurs, FW completes the command
3126 * that it was previously holding.
3128 * This routines sends SIGIO signal to processes that have registered with the
3129 * driver for AEN.
3131 static void
3132 megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
3134 unsigned long flags;
3137 * Don't signal app if it is just an aborted previously registered aen
3139 if ((!cmd->abort_aen) && (instance->unload == 0)) {
3140 spin_lock_irqsave(&poll_aen_lock, flags);
3141 megasas_poll_wait_aen = 1;
3142 spin_unlock_irqrestore(&poll_aen_lock, flags);
3143 wake_up(&megasas_poll_wait);
3144 kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
3146 else
3147 cmd->abort_aen = 0;
3149 instance->aen_cmd = NULL;
3151 megasas_return_cmd(instance, cmd);
3153 if ((instance->unload == 0) &&
3154 ((instance->issuepend_done == 1))) {
3155 struct megasas_aen_event *ev;
3157 ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
3158 if (!ev) {
3159 dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
3160 } else {
3161 ev->instance = instance;
3162 instance->ev = ev;
3163 INIT_DELAYED_WORK(&ev->hotplug_work,
3164 megasas_aen_polling);
3165 schedule_delayed_work(&ev->hotplug_work, 0);
3170 static ssize_t
3171 fw_crash_buffer_store(struct device *cdev,
3172 struct device_attribute *attr, const char *buf, size_t count)
3174 struct Scsi_Host *shost = class_to_shost(cdev);
3175 struct megasas_instance *instance =
3176 (struct megasas_instance *) shost->hostdata;
3177 int val = 0;
3178 unsigned long flags;
3180 if (kstrtoint(buf, 0, &val) != 0)
3181 return -EINVAL;
3183 spin_lock_irqsave(&instance->crashdump_lock, flags);
3184 instance->fw_crash_buffer_offset = val;
3185 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3186 return strlen(buf);
3189 static ssize_t
3190 fw_crash_buffer_show(struct device *cdev,
3191 struct device_attribute *attr, char *buf)
3193 struct Scsi_Host *shost = class_to_shost(cdev);
3194 struct megasas_instance *instance =
3195 (struct megasas_instance *) shost->hostdata;
3196 u32 size;
3197 unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3198 unsigned long chunk_left_bytes;
3199 unsigned long src_addr;
3200 unsigned long flags;
3201 u32 buff_offset;
3203 spin_lock_irqsave(&instance->crashdump_lock, flags);
3204 buff_offset = instance->fw_crash_buffer_offset;
3205 if (!instance->crash_dump_buf &&
3206 !((instance->fw_crash_state == AVAILABLE) ||
3207 (instance->fw_crash_state == COPYING))) {
3208 dev_err(&instance->pdev->dev,
3209 "Firmware crash dump is not available\n");
3210 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3211 return -EINVAL;
3214 if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3215 dev_err(&instance->pdev->dev,
3216 "Firmware crash dump offset is out of range\n");
3217 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3218 return 0;
3221 size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3222 chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
3223 size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
3224 size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3226 src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3227 (buff_offset % dmachunk);
3228 memcpy(buf, (void *)src_addr, size);
3229 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3231 return size;
3234 static ssize_t
3235 fw_crash_buffer_size_show(struct device *cdev,
3236 struct device_attribute *attr, char *buf)
3238 struct Scsi_Host *shost = class_to_shost(cdev);
3239 struct megasas_instance *instance =
3240 (struct megasas_instance *) shost->hostdata;
3242 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3243 ((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3246 static ssize_t
3247 fw_crash_state_store(struct device *cdev,
3248 struct device_attribute *attr, const char *buf, size_t count)
3250 struct Scsi_Host *shost = class_to_shost(cdev);
3251 struct megasas_instance *instance =
3252 (struct megasas_instance *) shost->hostdata;
3253 int val = 0;
3254 unsigned long flags;
3256 if (kstrtoint(buf, 0, &val) != 0)
3257 return -EINVAL;
3259 if ((val <= AVAILABLE || val > COPY_ERROR)) {
3260 dev_err(&instance->pdev->dev, "application updates invalid "
3261 "firmware crash state\n");
3262 return -EINVAL;
3265 instance->fw_crash_state = val;
3267 if ((val == COPIED) || (val == COPY_ERROR)) {
3268 spin_lock_irqsave(&instance->crashdump_lock, flags);
3269 megasas_free_host_crash_buffer(instance);
3270 spin_unlock_irqrestore(&instance->crashdump_lock, flags);
3271 if (val == COPY_ERROR)
3272 dev_info(&instance->pdev->dev, "application failed to "
3273 "copy Firmware crash dump\n");
3274 else
3275 dev_info(&instance->pdev->dev, "Firmware crash dump "
3276 "copied successfully\n");
3278 return strlen(buf);
3281 static ssize_t
3282 fw_crash_state_show(struct device *cdev,
3283 struct device_attribute *attr, char *buf)
3285 struct Scsi_Host *shost = class_to_shost(cdev);
3286 struct megasas_instance *instance =
3287 (struct megasas_instance *) shost->hostdata;
3289 return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3292 static ssize_t
3293 page_size_show(struct device *cdev,
3294 struct device_attribute *attr, char *buf)
3296 return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3299 static ssize_t
3300 ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3301 char *buf)
3303 struct Scsi_Host *shost = class_to_shost(cdev);
3304 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3306 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3309 static ssize_t
3310 fw_cmds_outstanding_show(struct device *cdev,
3311 struct device_attribute *attr, char *buf)
3313 struct Scsi_Host *shost = class_to_shost(cdev);
3314 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3316 return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
3319 static ssize_t
3320 enable_sdev_max_qd_show(struct device *cdev,
3321 struct device_attribute *attr, char *buf)
3323 struct Scsi_Host *shost = class_to_shost(cdev);
3324 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3326 return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd);
3329 static ssize_t
3330 enable_sdev_max_qd_store(struct device *cdev,
3331 struct device_attribute *attr, const char *buf, size_t count)
3333 struct Scsi_Host *shost = class_to_shost(cdev);
3334 struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3335 u32 val = 0;
3336 bool is_target_prop;
3337 int ret_target_prop = DCMD_FAILED;
3338 struct scsi_device *sdev;
3340 if (kstrtou32(buf, 0, &val) != 0) {
3341 pr_err("megasas: could not set enable_sdev_max_qd\n");
3342 return -EINVAL;
3345 mutex_lock(&instance->reset_mutex);
3346 if (val)
3347 instance->enable_sdev_max_qd = true;
3348 else
3349 instance->enable_sdev_max_qd = false;
3351 shost_for_each_device(sdev, shost) {
3352 ret_target_prop = megasas_get_target_prop(instance, sdev);
3353 is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
3354 megasas_set_fw_assisted_qd(sdev, is_target_prop);
3356 mutex_unlock(&instance->reset_mutex);
3358 return strlen(buf);
3361 static ssize_t
3362 dump_system_regs_show(struct device *cdev,
3363 struct device_attribute *attr, char *buf)
3365 struct Scsi_Host *shost = class_to_shost(cdev);
3366 struct megasas_instance *instance =
3367 (struct megasas_instance *)shost->hostdata;
3369 return megasas_dump_sys_regs(instance->reg_set, buf);
3372 static ssize_t
3373 raid_map_id_show(struct device *cdev, struct device_attribute *attr,
3374 char *buf)
3376 struct Scsi_Host *shost = class_to_shost(cdev);
3377 struct megasas_instance *instance =
3378 (struct megasas_instance *)shost->hostdata;
3380 return snprintf(buf, PAGE_SIZE, "%ld\n",
3381 (unsigned long)instance->map_id);
3384 static DEVICE_ATTR_RW(fw_crash_buffer);
3385 static DEVICE_ATTR_RO(fw_crash_buffer_size);
3386 static DEVICE_ATTR_RW(fw_crash_state);
3387 static DEVICE_ATTR_RO(page_size);
3388 static DEVICE_ATTR_RO(ldio_outstanding);
3389 static DEVICE_ATTR_RO(fw_cmds_outstanding);
3390 static DEVICE_ATTR_RW(enable_sdev_max_qd);
3391 static DEVICE_ATTR_RO(dump_system_regs);
3392 static DEVICE_ATTR_RO(raid_map_id);
3394 static struct device_attribute *megaraid_host_attrs[] = {
3395 &dev_attr_fw_crash_buffer_size,
3396 &dev_attr_fw_crash_buffer,
3397 &dev_attr_fw_crash_state,
3398 &dev_attr_page_size,
3399 &dev_attr_ldio_outstanding,
3400 &dev_attr_fw_cmds_outstanding,
3401 &dev_attr_enable_sdev_max_qd,
3402 &dev_attr_dump_system_regs,
3403 &dev_attr_raid_map_id,
3404 NULL,
3408 * Scsi host template for megaraid_sas driver
3410 static struct scsi_host_template megasas_template = {
3412 .module = THIS_MODULE,
3413 .name = "Avago SAS based MegaRAID driver",
3414 .proc_name = "megaraid_sas",
3415 .slave_configure = megasas_slave_configure,
3416 .slave_alloc = megasas_slave_alloc,
3417 .slave_destroy = megasas_slave_destroy,
3418 .queuecommand = megasas_queue_command,
3419 .eh_target_reset_handler = megasas_reset_target,
3420 .eh_abort_handler = megasas_task_abort,
3421 .eh_host_reset_handler = megasas_reset_bus_host,
3422 .eh_timed_out = megasas_reset_timer,
3423 .shost_attrs = megaraid_host_attrs,
3424 .bios_param = megasas_bios_param,
3425 .change_queue_depth = scsi_change_queue_depth,
3426 .max_segment_size = 0xffffffff,
3430 * megasas_complete_int_cmd - Completes an internal command
3431 * @instance: Adapter soft state
3432 * @cmd: Command to be completed
3434 * The megasas_issue_blocked_cmd() function waits for a command to complete
3435 * after it issues a command. This function wakes up that waiting routine by
3436 * calling wake_up() on the wait queue.
3438 static void
3439 megasas_complete_int_cmd(struct megasas_instance *instance,
3440 struct megasas_cmd *cmd)
3442 if (cmd->cmd_status_drv == DCMD_INIT)
3443 cmd->cmd_status_drv =
3444 (cmd->frame->io.cmd_status == MFI_STAT_OK) ?
3445 DCMD_SUCCESS : DCMD_FAILED;
3447 wake_up(&instance->int_cmd_wait_q);
3451 * megasas_complete_abort - Completes aborting a command
3452 * @instance: Adapter soft state
3453 * @cmd: Cmd that was issued to abort another cmd
3455 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3456 * after it issues an abort on a previously issued command. This function
3457 * wakes up all functions waiting on the same wait queue.
3459 static void
3460 megasas_complete_abort(struct megasas_instance *instance,
3461 struct megasas_cmd *cmd)
3463 if (cmd->sync_cmd) {
3464 cmd->sync_cmd = 0;
3465 cmd->cmd_status_drv = DCMD_SUCCESS;
3466 wake_up(&instance->abort_cmd_wait_q);
3471 * megasas_complete_cmd - Completes a command
3472 * @instance: Adapter soft state
3473 * @cmd: Command to be completed
3474 * @alt_status: If non-zero, use this value as status to
3475 * SCSI mid-layer instead of the value returned
3476 * by the FW. This should be used if caller wants
3477 * an alternate status (as in the case of aborted
3478 * commands)
3480 void
3481 megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3482 u8 alt_status)
3484 int exception = 0;
3485 struct megasas_header *hdr = &cmd->frame->hdr;
3486 unsigned long flags;
3487 struct fusion_context *fusion = instance->ctrl_context;
3488 u32 opcode, status;
3490 /* flag for the retry reset */
3491 cmd->retry_for_fw_reset = 0;
3493 if (cmd->scmd)
3494 cmd->scmd->SCp.ptr = NULL;
3496 switch (hdr->cmd) {
3497 case MFI_CMD_INVALID:
3498 /* Some older 1068 controller FW may keep a pended
3499 MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3500 when booting the kdump kernel. Ignore this command to
3501 prevent a kernel panic on shutdown of the kdump kernel. */
3502 dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3503 "completed\n");
3504 dev_warn(&instance->pdev->dev, "If you have a controller "
3505 "other than PERC5, please upgrade your firmware\n");
3506 break;
3507 case MFI_CMD_PD_SCSI_IO:
3508 case MFI_CMD_LD_SCSI_IO:
3511 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3512 * issued either through an IO path or an IOCTL path. If it
3513 * was via IOCTL, we will send it to internal completion.
3515 if (cmd->sync_cmd) {
3516 cmd->sync_cmd = 0;
3517 megasas_complete_int_cmd(instance, cmd);
3518 break;
3520 /* fall through */
3522 case MFI_CMD_LD_READ:
3523 case MFI_CMD_LD_WRITE:
3525 if (alt_status) {
3526 cmd->scmd->result = alt_status << 16;
3527 exception = 1;
3530 if (exception) {
3532 atomic_dec(&instance->fw_outstanding);
3534 scsi_dma_unmap(cmd->scmd);
3535 cmd->scmd->scsi_done(cmd->scmd);
3536 megasas_return_cmd(instance, cmd);
3538 break;
3541 switch (hdr->cmd_status) {
3543 case MFI_STAT_OK:
3544 cmd->scmd->result = DID_OK << 16;
3545 break;
3547 case MFI_STAT_SCSI_IO_FAILED:
3548 case MFI_STAT_LD_INIT_IN_PROGRESS:
3549 cmd->scmd->result =
3550 (DID_ERROR << 16) | hdr->scsi_status;
3551 break;
3553 case MFI_STAT_SCSI_DONE_WITH_ERROR:
3555 cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3557 if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3558 memset(cmd->scmd->sense_buffer, 0,
3559 SCSI_SENSE_BUFFERSIZE);
3560 memcpy(cmd->scmd->sense_buffer, cmd->sense,
3561 hdr->sense_len);
3563 cmd->scmd->result |= DRIVER_SENSE << 24;
3566 break;
3568 case MFI_STAT_LD_OFFLINE:
3569 case MFI_STAT_DEVICE_NOT_FOUND:
3570 cmd->scmd->result = DID_BAD_TARGET << 16;
3571 break;
3573 default:
3574 dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3575 hdr->cmd_status);
3576 cmd->scmd->result = DID_ERROR << 16;
3577 break;
3580 atomic_dec(&instance->fw_outstanding);
3582 scsi_dma_unmap(cmd->scmd);
3583 cmd->scmd->scsi_done(cmd->scmd);
3584 megasas_return_cmd(instance, cmd);
3586 break;
3588 case MFI_CMD_SMP:
3589 case MFI_CMD_STP:
3590 case MFI_CMD_NVME:
3591 case MFI_CMD_TOOLBOX:
3592 megasas_complete_int_cmd(instance, cmd);
3593 break;
3595 case MFI_CMD_DCMD:
3596 opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3597 /* Check for LD map update */
3598 if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3599 && (cmd->frame->dcmd.mbox.b[1] == 1)) {
3600 fusion->fast_path_io = 0;
3601 spin_lock_irqsave(instance->host->host_lock, flags);
3602 status = cmd->frame->hdr.cmd_status;
3603 instance->map_update_cmd = NULL;
3604 if (status != MFI_STAT_OK) {
3605 if (status != MFI_STAT_NOT_FOUND)
3606 dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3607 cmd->frame->hdr.cmd_status);
3608 else {
3609 megasas_return_cmd(instance, cmd);
3610 spin_unlock_irqrestore(
3611 instance->host->host_lock,
3612 flags);
3613 break;
3617 megasas_return_cmd(instance, cmd);
3620 * Set fast path IO to ZERO.
3621 * Validate Map will set proper value.
3622 * Meanwhile all IOs will go as LD IO.
3624 if (status == MFI_STAT_OK &&
3625 (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
3626 instance->map_id++;
3627 fusion->fast_path_io = 1;
3628 } else {
3629 fusion->fast_path_io = 0;
3632 megasas_sync_map_info(instance);
3633 spin_unlock_irqrestore(instance->host->host_lock,
3634 flags);
3635 break;
3637 if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3638 opcode == MR_DCMD_CTRL_EVENT_GET) {
3639 spin_lock_irqsave(&poll_aen_lock, flags);
3640 megasas_poll_wait_aen = 0;
3641 spin_unlock_irqrestore(&poll_aen_lock, flags);
3644 /* FW has an updated PD sequence */
3645 if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3646 (cmd->frame->dcmd.mbox.b[0] == 1)) {
3648 spin_lock_irqsave(instance->host->host_lock, flags);
3649 status = cmd->frame->hdr.cmd_status;
3650 instance->jbod_seq_cmd = NULL;
3651 megasas_return_cmd(instance, cmd);
3653 if (status == MFI_STAT_OK) {
3654 instance->pd_seq_map_id++;
3655 /* Re-register a pd sync seq num cmd */
3656 if (megasas_sync_pd_seq_num(instance, true))
3657 instance->use_seqnum_jbod_fp = false;
3658 } else
3659 instance->use_seqnum_jbod_fp = false;
3661 spin_unlock_irqrestore(instance->host->host_lock, flags);
3662 break;
3666 * See if got an event notification
3668 if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3669 megasas_service_aen(instance, cmd);
3670 else
3671 megasas_complete_int_cmd(instance, cmd);
3673 break;
3675 case MFI_CMD_ABORT:
3677 * Cmd issued to abort another cmd returned
3679 megasas_complete_abort(instance, cmd);
3680 break;
3682 default:
3683 dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3684 hdr->cmd);
3685 megasas_complete_int_cmd(instance, cmd);
3686 break;
3691 * megasas_issue_pending_cmds_again - issue all pending cmds
3692 * in FW again because of the fw reset
3693 * @instance: Adapter soft state
3695 static inline void
3696 megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3698 struct megasas_cmd *cmd;
3699 struct list_head clist_local;
3700 union megasas_evt_class_locale class_locale;
3701 unsigned long flags;
3702 u32 seq_num;
3704 INIT_LIST_HEAD(&clist_local);
3705 spin_lock_irqsave(&instance->hba_lock, flags);
3706 list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3707 spin_unlock_irqrestore(&instance->hba_lock, flags);
3709 while (!list_empty(&clist_local)) {
3710 cmd = list_entry((&clist_local)->next,
3711 struct megasas_cmd, list);
3712 list_del_init(&cmd->list);
3714 if (cmd->sync_cmd || cmd->scmd) {
3715 dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3716 "detected to be pending while HBA reset\n",
3717 cmd, cmd->scmd, cmd->sync_cmd);
3719 cmd->retry_for_fw_reset++;
3721 if (cmd->retry_for_fw_reset == 3) {
3722 dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3723 "was tried multiple times during reset."
3724 "Shutting down the HBA\n",
3725 cmd, cmd->scmd, cmd->sync_cmd);
3726 instance->instancet->disable_intr(instance);
3727 atomic_set(&instance->fw_reset_no_pci_access, 1);
3728 megaraid_sas_kill_hba(instance);
3729 return;
3733 if (cmd->sync_cmd == 1) {
3734 if (cmd->scmd) {
3735 dev_notice(&instance->pdev->dev, "unexpected"
3736 "cmd attached to internal command!\n");
3738 dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3739 "on the internal reset queue,"
3740 "issue it again.\n", cmd);
3741 cmd->cmd_status_drv = DCMD_INIT;
3742 instance->instancet->fire_cmd(instance,
3743 cmd->frame_phys_addr,
3744 0, instance->reg_set);
3745 } else if (cmd->scmd) {
3746 dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3747 "detected on the internal queue, issue again.\n",
3748 cmd, cmd->scmd->cmnd[0]);
3750 atomic_inc(&instance->fw_outstanding);
3751 instance->instancet->fire_cmd(instance,
3752 cmd->frame_phys_addr,
3753 cmd->frame_count-1, instance->reg_set);
3754 } else {
3755 dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3756 "internal reset defer list while re-issue!!\n",
3757 cmd);
3761 if (instance->aen_cmd) {
3762 dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3763 megasas_return_cmd(instance, instance->aen_cmd);
3765 instance->aen_cmd = NULL;
3769 * Initiate AEN (Asynchronous Event Notification)
3771 seq_num = instance->last_seq_num;
3772 class_locale.members.reserved = 0;
3773 class_locale.members.locale = MR_EVT_LOCALE_ALL;
3774 class_locale.members.class = MR_EVT_CLASS_DEBUG;
3776 megasas_register_aen(instance, seq_num, class_locale.word);
3780 * Move the internal reset pending commands to a deferred queue.
3782 * We move the commands pending at internal reset time to a
3783 * pending queue. This queue would be flushed after successful
3784 * completion of the internal reset sequence. if the internal reset
3785 * did not complete in time, the kernel reset handler would flush
3786 * these commands.
3788 static void
3789 megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3791 struct megasas_cmd *cmd;
3792 int i;
3793 u16 max_cmd = instance->max_fw_cmds;
3794 u32 defer_index;
3795 unsigned long flags;
3797 defer_index = 0;
3798 spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3799 for (i = 0; i < max_cmd; i++) {
3800 cmd = instance->cmd_list[i];
3801 if (cmd->sync_cmd == 1 || cmd->scmd) {
3802 dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3803 "on the defer queue as internal\n",
3804 defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3806 if (!list_empty(&cmd->list)) {
3807 dev_notice(&instance->pdev->dev, "ERROR while"
3808 " moving this cmd:%p, %d %p, it was"
3809 "discovered on some list?\n",
3810 cmd, cmd->sync_cmd, cmd->scmd);
3812 list_del_init(&cmd->list);
3814 defer_index++;
3815 list_add_tail(&cmd->list,
3816 &instance->internal_reset_pending_q);
3819 spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3823 static void
3824 process_fw_state_change_wq(struct work_struct *work)
3826 struct megasas_instance *instance =
3827 container_of(work, struct megasas_instance, work_init);
3828 u32 wait;
3829 unsigned long flags;
3831 if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3832 dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3833 atomic_read(&instance->adprecovery));
3834 return ;
3837 if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3838 dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3839 "state, restarting it...\n");
3841 instance->instancet->disable_intr(instance);
3842 atomic_set(&instance->fw_outstanding, 0);
3844 atomic_set(&instance->fw_reset_no_pci_access, 1);
3845 instance->instancet->adp_reset(instance, instance->reg_set);
3846 atomic_set(&instance->fw_reset_no_pci_access, 0);
3848 dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3849 "initiating next stage...\n");
3851 dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3852 "state 2 starting...\n");
3854 /* waiting for about 20 second before start the second init */
3855 for (wait = 0; wait < 30; wait++) {
3856 msleep(1000);
3859 if (megasas_transition_to_ready(instance, 1)) {
3860 dev_notice(&instance->pdev->dev, "adapter not ready\n");
3862 atomic_set(&instance->fw_reset_no_pci_access, 1);
3863 megaraid_sas_kill_hba(instance);
3864 return ;
3867 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3868 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3869 (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3871 *instance->consumer = *instance->producer;
3872 } else {
3873 *instance->consumer = 0;
3874 *instance->producer = 0;
3877 megasas_issue_init_mfi(instance);
3879 spin_lock_irqsave(&instance->hba_lock, flags);
3880 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3881 spin_unlock_irqrestore(&instance->hba_lock, flags);
3882 instance->instancet->enable_intr(instance);
3884 megasas_issue_pending_cmds_again(instance);
3885 instance->issuepend_done = 1;
3890 * megasas_deplete_reply_queue - Processes all completed commands
3891 * @instance: Adapter soft state
3892 * @alt_status: Alternate status to be returned to
3893 * SCSI mid-layer instead of the status
3894 * returned by the FW
3895 * Note: this must be called with hba lock held
3897 static int
3898 megasas_deplete_reply_queue(struct megasas_instance *instance,
3899 u8 alt_status)
3901 u32 mfiStatus;
3902 u32 fw_state;
3904 if ((mfiStatus = instance->instancet->check_reset(instance,
3905 instance->reg_set)) == 1) {
3906 return IRQ_HANDLED;
3909 mfiStatus = instance->instancet->clear_intr(instance);
3910 if (mfiStatus == 0) {
3911 /* Hardware may not set outbound_intr_status in MSI-X mode */
3912 if (!instance->msix_vectors)
3913 return IRQ_NONE;
3916 instance->mfiStatus = mfiStatus;
3918 if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
3919 fw_state = instance->instancet->read_fw_status_reg(
3920 instance) & MFI_STATE_MASK;
3922 if (fw_state != MFI_STATE_FAULT) {
3923 dev_notice(&instance->pdev->dev, "fw state:%x\n",
3924 fw_state);
3927 if ((fw_state == MFI_STATE_FAULT) &&
3928 (instance->disableOnlineCtrlReset == 0)) {
3929 dev_notice(&instance->pdev->dev, "wait adp restart\n");
3931 if ((instance->pdev->device ==
3932 PCI_DEVICE_ID_LSI_SAS1064R) ||
3933 (instance->pdev->device ==
3934 PCI_DEVICE_ID_DELL_PERC5) ||
3935 (instance->pdev->device ==
3936 PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
3938 *instance->consumer =
3939 cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
3943 instance->instancet->disable_intr(instance);
3944 atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
3945 instance->issuepend_done = 0;
3947 atomic_set(&instance->fw_outstanding, 0);
3948 megasas_internal_reset_defer_cmds(instance);
3950 dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
3951 fw_state, atomic_read(&instance->adprecovery));
3953 schedule_work(&instance->work_init);
3954 return IRQ_HANDLED;
3956 } else {
3957 dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
3958 fw_state, instance->disableOnlineCtrlReset);
3962 tasklet_schedule(&instance->isr_tasklet);
3963 return IRQ_HANDLED;
3966 * megasas_isr - isr entry point
3968 static irqreturn_t megasas_isr(int irq, void *devp)
3970 struct megasas_irq_context *irq_context = devp;
3971 struct megasas_instance *instance = irq_context->instance;
3972 unsigned long flags;
3973 irqreturn_t rc;
3975 if (atomic_read(&instance->fw_reset_no_pci_access))
3976 return IRQ_HANDLED;
3978 spin_lock_irqsave(&instance->hba_lock, flags);
3979 rc = megasas_deplete_reply_queue(instance, DID_OK);
3980 spin_unlock_irqrestore(&instance->hba_lock, flags);
3982 return rc;
3986 * megasas_transition_to_ready - Move the FW to READY state
3987 * @instance: Adapter soft state
3989 * During the initialization, FW passes can potentially be in any one of
3990 * several possible states. If the FW in operational, waiting-for-handshake
3991 * states, driver must take steps to bring it to ready state. Otherwise, it
3992 * has to wait for the ready state.
3995 megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3997 int i;
3998 u8 max_wait;
3999 u32 fw_state;
4000 u32 abs_state, curr_abs_state;
4002 abs_state = instance->instancet->read_fw_status_reg(instance);
4003 fw_state = abs_state & MFI_STATE_MASK;
4005 if (fw_state != MFI_STATE_READY)
4006 dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
4007 " state\n");
4009 while (fw_state != MFI_STATE_READY) {
4011 switch (fw_state) {
4013 case MFI_STATE_FAULT:
4014 dev_printk(KERN_ERR, &instance->pdev->dev,
4015 "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n",
4016 abs_state & MFI_STATE_FAULT_CODE,
4017 abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
4018 if (ocr) {
4019 max_wait = MEGASAS_RESET_WAIT_TIME;
4020 break;
4021 } else {
4022 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4023 megasas_dump_reg_set(instance->reg_set);
4024 return -ENODEV;
4027 case MFI_STATE_WAIT_HANDSHAKE:
4029 * Set the CLR bit in inbound doorbell
4031 if ((instance->pdev->device ==
4032 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4033 (instance->pdev->device ==
4034 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4035 (instance->adapter_type != MFI_SERIES))
4036 writel(
4037 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4038 &instance->reg_set->doorbell);
4039 else
4040 writel(
4041 MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4042 &instance->reg_set->inbound_doorbell);
4044 max_wait = MEGASAS_RESET_WAIT_TIME;
4045 break;
4047 case MFI_STATE_BOOT_MESSAGE_PENDING:
4048 if ((instance->pdev->device ==
4049 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4050 (instance->pdev->device ==
4051 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4052 (instance->adapter_type != MFI_SERIES))
4053 writel(MFI_INIT_HOTPLUG,
4054 &instance->reg_set->doorbell);
4055 else
4056 writel(MFI_INIT_HOTPLUG,
4057 &instance->reg_set->inbound_doorbell);
4059 max_wait = MEGASAS_RESET_WAIT_TIME;
4060 break;
4062 case MFI_STATE_OPERATIONAL:
4064 * Bring it to READY state; assuming max wait 10 secs
4066 instance->instancet->disable_intr(instance);
4067 if ((instance->pdev->device ==
4068 PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4069 (instance->pdev->device ==
4070 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4071 (instance->adapter_type != MFI_SERIES)) {
4072 writel(MFI_RESET_FLAGS,
4073 &instance->reg_set->doorbell);
4075 if (instance->adapter_type != MFI_SERIES) {
4076 for (i = 0; i < (10 * 1000); i += 20) {
4077 if (megasas_readl(
4078 instance,
4079 &instance->
4080 reg_set->
4081 doorbell) & 1)
4082 msleep(20);
4083 else
4084 break;
4087 } else
4088 writel(MFI_RESET_FLAGS,
4089 &instance->reg_set->inbound_doorbell);
4091 max_wait = MEGASAS_RESET_WAIT_TIME;
4092 break;
4094 case MFI_STATE_UNDEFINED:
4096 * This state should not last for more than 2 seconds
4098 max_wait = MEGASAS_RESET_WAIT_TIME;
4099 break;
4101 case MFI_STATE_BB_INIT:
4102 max_wait = MEGASAS_RESET_WAIT_TIME;
4103 break;
4105 case MFI_STATE_FW_INIT:
4106 max_wait = MEGASAS_RESET_WAIT_TIME;
4107 break;
4109 case MFI_STATE_FW_INIT_2:
4110 max_wait = MEGASAS_RESET_WAIT_TIME;
4111 break;
4113 case MFI_STATE_DEVICE_SCAN:
4114 max_wait = MEGASAS_RESET_WAIT_TIME;
4115 break;
4117 case MFI_STATE_FLUSH_CACHE:
4118 max_wait = MEGASAS_RESET_WAIT_TIME;
4119 break;
4121 default:
4122 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
4123 fw_state);
4124 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4125 megasas_dump_reg_set(instance->reg_set);
4126 return -ENODEV;
4130 * The cur_state should not last for more than max_wait secs
4132 for (i = 0; i < max_wait * 50; i++) {
4133 curr_abs_state = instance->instancet->
4134 read_fw_status_reg(instance);
4136 if (abs_state == curr_abs_state) {
4137 msleep(20);
4138 } else
4139 break;
4143 * Return error if fw_state hasn't changed after max_wait
4145 if (curr_abs_state == abs_state) {
4146 dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
4147 "in %d secs\n", fw_state, max_wait);
4148 dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4149 megasas_dump_reg_set(instance->reg_set);
4150 return -ENODEV;
4153 abs_state = curr_abs_state;
4154 fw_state = curr_abs_state & MFI_STATE_MASK;
4156 dev_info(&instance->pdev->dev, "FW now in Ready state\n");
4158 return 0;
4162 * megasas_teardown_frame_pool - Destroy the cmd frame DMA pool
4163 * @instance: Adapter soft state
4165 static void megasas_teardown_frame_pool(struct megasas_instance *instance)
4167 int i;
4168 u16 max_cmd = instance->max_mfi_cmds;
4169 struct megasas_cmd *cmd;
4171 if (!instance->frame_dma_pool)
4172 return;
4175 * Return all frames to pool
4177 for (i = 0; i < max_cmd; i++) {
4179 cmd = instance->cmd_list[i];
4181 if (cmd->frame)
4182 dma_pool_free(instance->frame_dma_pool, cmd->frame,
4183 cmd->frame_phys_addr);
4185 if (cmd->sense)
4186 dma_pool_free(instance->sense_dma_pool, cmd->sense,
4187 cmd->sense_phys_addr);
4191 * Now destroy the pool itself
4193 dma_pool_destroy(instance->frame_dma_pool);
4194 dma_pool_destroy(instance->sense_dma_pool);
4196 instance->frame_dma_pool = NULL;
4197 instance->sense_dma_pool = NULL;
4201 * megasas_create_frame_pool - Creates DMA pool for cmd frames
4202 * @instance: Adapter soft state
4204 * Each command packet has an embedded DMA memory buffer that is used for
4205 * filling MFI frame and the SG list that immediately follows the frame. This
4206 * function creates those DMA memory buffers for each command packet by using
4207 * PCI pool facility.
4209 static int megasas_create_frame_pool(struct megasas_instance *instance)
4211 int i;
4212 u16 max_cmd;
4213 u32 frame_count;
4214 struct megasas_cmd *cmd;
4216 max_cmd = instance->max_mfi_cmds;
4219 * For MFI controllers.
4220 * max_num_sge = 60
4221 * max_sge_sz = 16 byte (sizeof megasas_sge_skinny)
4222 * Total 960 byte (15 MFI frame of 64 byte)
4224 * Fusion adapter require only 3 extra frame.
4225 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
4226 * max_sge_sz = 12 byte (sizeof megasas_sge64)
4227 * Total 192 byte (3 MFI frame of 64 byte)
4229 frame_count = (instance->adapter_type == MFI_SERIES) ?
4230 (15 + 1) : (3 + 1);
4231 instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
4233 * Use DMA pool facility provided by PCI layer
4235 instance->frame_dma_pool = dma_pool_create("megasas frame pool",
4236 &instance->pdev->dev,
4237 instance->mfi_frame_size, 256, 0);
4239 if (!instance->frame_dma_pool) {
4240 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
4241 return -ENOMEM;
4244 instance->sense_dma_pool = dma_pool_create("megasas sense pool",
4245 &instance->pdev->dev, 128,
4246 4, 0);
4248 if (!instance->sense_dma_pool) {
4249 dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
4251 dma_pool_destroy(instance->frame_dma_pool);
4252 instance->frame_dma_pool = NULL;
4254 return -ENOMEM;
4258 * Allocate and attach a frame to each of the commands in cmd_list.
4259 * By making cmd->index as the context instead of the &cmd, we can
4260 * always use 32bit context regardless of the architecture
4262 for (i = 0; i < max_cmd; i++) {
4264 cmd = instance->cmd_list[i];
4266 cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
4267 GFP_KERNEL, &cmd->frame_phys_addr);
4269 cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
4270 GFP_KERNEL, &cmd->sense_phys_addr);
4273 * megasas_teardown_frame_pool() takes care of freeing
4274 * whatever has been allocated
4276 if (!cmd->frame || !cmd->sense) {
4277 dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
4278 megasas_teardown_frame_pool(instance);
4279 return -ENOMEM;
4282 cmd->frame->io.context = cpu_to_le32(cmd->index);
4283 cmd->frame->io.pad_0 = 0;
4284 if ((instance->adapter_type == MFI_SERIES) && reset_devices)
4285 cmd->frame->hdr.cmd = MFI_CMD_INVALID;
4288 return 0;
4292 * megasas_free_cmds - Free all the cmds in the free cmd pool
4293 * @instance: Adapter soft state
4295 void megasas_free_cmds(struct megasas_instance *instance)
4297 int i;
4299 /* First free the MFI frame pool */
4300 megasas_teardown_frame_pool(instance);
4302 /* Free all the commands in the cmd_list */
4303 for (i = 0; i < instance->max_mfi_cmds; i++)
4305 kfree(instance->cmd_list[i]);
4307 /* Free the cmd_list buffer itself */
4308 kfree(instance->cmd_list);
4309 instance->cmd_list = NULL;
4311 INIT_LIST_HEAD(&instance->cmd_pool);
4315 * megasas_alloc_cmds - Allocates the command packets
4316 * @instance: Adapter soft state
4318 * Each command that is issued to the FW, whether IO commands from the OS or
4319 * internal commands like IOCTLs, are wrapped in local data structure called
4320 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4321 * the FW.
4323 * Each frame has a 32-bit field called context (tag). This context is used
4324 * to get back the megasas_cmd from the frame when a frame gets completed in
4325 * the ISR. Typically the address of the megasas_cmd itself would be used as
4326 * the context. But we wanted to keep the differences between 32 and 64 bit
4327 * systems to the mininum. We always use 32 bit integers for the context. In
4328 * this driver, the 32 bit values are the indices into an array cmd_list.
4329 * This array is used only to look up the megasas_cmd given the context. The
4330 * free commands themselves are maintained in a linked list called cmd_pool.
4332 int megasas_alloc_cmds(struct megasas_instance *instance)
4334 int i;
4335 int j;
4336 u16 max_cmd;
4337 struct megasas_cmd *cmd;
4339 max_cmd = instance->max_mfi_cmds;
4342 * instance->cmd_list is an array of struct megasas_cmd pointers.
4343 * Allocate the dynamic array first and then allocate individual
4344 * commands.
4346 instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4348 if (!instance->cmd_list) {
4349 dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4350 return -ENOMEM;
4353 memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4355 for (i = 0; i < max_cmd; i++) {
4356 instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4357 GFP_KERNEL);
4359 if (!instance->cmd_list[i]) {
4361 for (j = 0; j < i; j++)
4362 kfree(instance->cmd_list[j]);
4364 kfree(instance->cmd_list);
4365 instance->cmd_list = NULL;
4367 return -ENOMEM;
4371 for (i = 0; i < max_cmd; i++) {
4372 cmd = instance->cmd_list[i];
4373 memset(cmd, 0, sizeof(struct megasas_cmd));
4374 cmd->index = i;
4375 cmd->scmd = NULL;
4376 cmd->instance = instance;
4378 list_add_tail(&cmd->list, &instance->cmd_pool);
4382 * Create a frame pool and assign one frame to each cmd
4384 if (megasas_create_frame_pool(instance)) {
4385 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4386 megasas_free_cmds(instance);
4387 return -ENOMEM;
4390 return 0;
4394 * dcmd_timeout_ocr_possible - Check if OCR is possible based on Driver/FW state.
4395 * @instance: Adapter soft state
4397 * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4398 * or FW is not under OCR.
4400 inline int
4401 dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4403 if (instance->adapter_type == MFI_SERIES)
4404 return KILL_ADAPTER;
4405 else if (instance->unload ||
4406 test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE,
4407 &instance->reset_flags))
4408 return IGNORE_TIMEOUT;
4409 else
4410 return INITIATE_OCR;
4413 static void
4414 megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4416 int ret;
4417 struct megasas_cmd *cmd;
4418 struct megasas_dcmd_frame *dcmd;
4420 struct MR_PRIV_DEVICE *mr_device_priv_data;
4421 u16 device_id = 0;
4423 device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4424 cmd = megasas_get_cmd(instance);
4426 if (!cmd) {
4427 dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4428 return;
4431 dcmd = &cmd->frame->dcmd;
4433 memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4434 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4436 dcmd->mbox.s[0] = cpu_to_le16(device_id);
4437 dcmd->cmd = MFI_CMD_DCMD;
4438 dcmd->cmd_status = 0xFF;
4439 dcmd->sge_count = 1;
4440 dcmd->flags = MFI_FRAME_DIR_READ;
4441 dcmd->timeout = 0;
4442 dcmd->pad_0 = 0;
4443 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4444 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4446 megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4447 sizeof(struct MR_PD_INFO));
4449 if ((instance->adapter_type != MFI_SERIES) &&
4450 !instance->mask_interrupts)
4451 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4452 else
4453 ret = megasas_issue_polled(instance, cmd);
4455 switch (ret) {
4456 case DCMD_SUCCESS:
4457 mr_device_priv_data = sdev->hostdata;
4458 le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4459 mr_device_priv_data->interface_type =
4460 instance->pd_info->state.ddf.pdType.intf;
4461 break;
4463 case DCMD_TIMEOUT:
4465 switch (dcmd_timeout_ocr_possible(instance)) {
4466 case INITIATE_OCR:
4467 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4468 mutex_unlock(&instance->reset_mutex);
4469 megasas_reset_fusion(instance->host,
4470 MFI_IO_TIMEOUT_OCR);
4471 mutex_lock(&instance->reset_mutex);
4472 break;
4473 case KILL_ADAPTER:
4474 megaraid_sas_kill_hba(instance);
4475 break;
4476 case IGNORE_TIMEOUT:
4477 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4478 __func__, __LINE__);
4479 break;
4482 break;
4485 if (ret != DCMD_TIMEOUT)
4486 megasas_return_cmd(instance, cmd);
4488 return;
4491 * megasas_get_pd_list_info - Returns FW's pd_list structure
4492 * @instance: Adapter soft state
4493 * @pd_list: pd_list structure
4495 * Issues an internal command (DCMD) to get the FW's controller PD
4496 * list structure. This information is mainly used to find out SYSTEM
4497 * supported by the FW.
4499 static int
4500 megasas_get_pd_list(struct megasas_instance *instance)
4502 int ret = 0, pd_index = 0;
4503 struct megasas_cmd *cmd;
4504 struct megasas_dcmd_frame *dcmd;
4505 struct MR_PD_LIST *ci;
4506 struct MR_PD_ADDRESS *pd_addr;
4508 if (instance->pd_list_not_supported) {
4509 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4510 "not supported by firmware\n");
4511 return ret;
4514 ci = instance->pd_list_buf;
4516 cmd = megasas_get_cmd(instance);
4518 if (!cmd) {
4519 dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4520 return -ENOMEM;
4523 dcmd = &cmd->frame->dcmd;
4525 memset(ci, 0, sizeof(*ci));
4526 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4528 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4529 dcmd->mbox.b[1] = 0;
4530 dcmd->cmd = MFI_CMD_DCMD;
4531 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4532 dcmd->sge_count = 1;
4533 dcmd->flags = MFI_FRAME_DIR_READ;
4534 dcmd->timeout = 0;
4535 dcmd->pad_0 = 0;
4536 dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4537 dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4539 megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4540 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4542 if ((instance->adapter_type != MFI_SERIES) &&
4543 !instance->mask_interrupts)
4544 ret = megasas_issue_blocked_cmd(instance, cmd,
4545 MFI_IO_TIMEOUT_SECS);
4546 else
4547 ret = megasas_issue_polled(instance, cmd);
4549 switch (ret) {
4550 case DCMD_FAILED:
4551 dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4552 "failed/not supported by firmware\n");
4554 if (instance->adapter_type != MFI_SERIES)
4555 megaraid_sas_kill_hba(instance);
4556 else
4557 instance->pd_list_not_supported = 1;
4558 break;
4559 case DCMD_TIMEOUT:
4561 switch (dcmd_timeout_ocr_possible(instance)) {
4562 case INITIATE_OCR:
4563 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4565 * DCMD failed from AEN path.
4566 * AEN path already hold reset_mutex to avoid PCI access
4567 * while OCR is in progress.
4569 mutex_unlock(&instance->reset_mutex);
4570 megasas_reset_fusion(instance->host,
4571 MFI_IO_TIMEOUT_OCR);
4572 mutex_lock(&instance->reset_mutex);
4573 break;
4574 case KILL_ADAPTER:
4575 megaraid_sas_kill_hba(instance);
4576 break;
4577 case IGNORE_TIMEOUT:
4578 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4579 __func__, __LINE__);
4580 break;
4583 break;
4585 case DCMD_SUCCESS:
4586 pd_addr = ci->addr;
4587 if (megasas_dbg_lvl & LD_PD_DEBUG)
4588 dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n",
4589 __func__, le32_to_cpu(ci->count));
4591 if ((le32_to_cpu(ci->count) >
4592 (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4593 break;
4595 memset(instance->local_pd_list, 0,
4596 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4598 for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4599 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid =
4600 le16_to_cpu(pd_addr->deviceId);
4601 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType =
4602 pd_addr->scsiDevType;
4603 instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState =
4604 MR_PD_STATE_SYSTEM;
4605 if (megasas_dbg_lvl & LD_PD_DEBUG)
4606 dev_info(&instance->pdev->dev,
4607 "PD%d: targetID: 0x%03x deviceType:0x%x\n",
4608 pd_index, le16_to_cpu(pd_addr->deviceId),
4609 pd_addr->scsiDevType);
4610 pd_addr++;
4613 memcpy(instance->pd_list, instance->local_pd_list,
4614 sizeof(instance->pd_list));
4615 break;
4619 if (ret != DCMD_TIMEOUT)
4620 megasas_return_cmd(instance, cmd);
4622 return ret;
4626 * megasas_get_ld_list_info - Returns FW's ld_list structure
4627 * @instance: Adapter soft state
4628 * @ld_list: ld_list structure
4630 * Issues an internal command (DCMD) to get the FW's controller PD
4631 * list structure. This information is mainly used to find out SYSTEM
4632 * supported by the FW.
4634 static int
4635 megasas_get_ld_list(struct megasas_instance *instance)
4637 int ret = 0, ld_index = 0, ids = 0;
4638 struct megasas_cmd *cmd;
4639 struct megasas_dcmd_frame *dcmd;
4640 struct MR_LD_LIST *ci;
4641 dma_addr_t ci_h = 0;
4642 u32 ld_count;
4644 ci = instance->ld_list_buf;
4645 ci_h = instance->ld_list_buf_h;
4647 cmd = megasas_get_cmd(instance);
4649 if (!cmd) {
4650 dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4651 return -ENOMEM;
4654 dcmd = &cmd->frame->dcmd;
4656 memset(ci, 0, sizeof(*ci));
4657 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4659 if (instance->supportmax256vd)
4660 dcmd->mbox.b[0] = 1;
4661 dcmd->cmd = MFI_CMD_DCMD;
4662 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4663 dcmd->sge_count = 1;
4664 dcmd->flags = MFI_FRAME_DIR_READ;
4665 dcmd->timeout = 0;
4666 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4667 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4668 dcmd->pad_0 = 0;
4670 megasas_set_dma_settings(instance, dcmd, ci_h,
4671 sizeof(struct MR_LD_LIST));
4673 if ((instance->adapter_type != MFI_SERIES) &&
4674 !instance->mask_interrupts)
4675 ret = megasas_issue_blocked_cmd(instance, cmd,
4676 MFI_IO_TIMEOUT_SECS);
4677 else
4678 ret = megasas_issue_polled(instance, cmd);
4680 ld_count = le32_to_cpu(ci->ldCount);
4682 switch (ret) {
4683 case DCMD_FAILED:
4684 megaraid_sas_kill_hba(instance);
4685 break;
4686 case DCMD_TIMEOUT:
4688 switch (dcmd_timeout_ocr_possible(instance)) {
4689 case INITIATE_OCR:
4690 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4692 * DCMD failed from AEN path.
4693 * AEN path already hold reset_mutex to avoid PCI access
4694 * while OCR is in progress.
4696 mutex_unlock(&instance->reset_mutex);
4697 megasas_reset_fusion(instance->host,
4698 MFI_IO_TIMEOUT_OCR);
4699 mutex_lock(&instance->reset_mutex);
4700 break;
4701 case KILL_ADAPTER:
4702 megaraid_sas_kill_hba(instance);
4703 break;
4704 case IGNORE_TIMEOUT:
4705 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4706 __func__, __LINE__);
4707 break;
4710 break;
4712 case DCMD_SUCCESS:
4713 if (megasas_dbg_lvl & LD_PD_DEBUG)
4714 dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4715 __func__, ld_count);
4717 if (ld_count > instance->fw_supported_vd_count)
4718 break;
4720 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4722 for (ld_index = 0; ld_index < ld_count; ld_index++) {
4723 if (ci->ldList[ld_index].state != 0) {
4724 ids = ci->ldList[ld_index].ref.targetId;
4725 instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4726 if (megasas_dbg_lvl & LD_PD_DEBUG)
4727 dev_info(&instance->pdev->dev,
4728 "LD%d: targetID: 0x%03x\n",
4729 ld_index, ids);
4733 break;
4736 if (ret != DCMD_TIMEOUT)
4737 megasas_return_cmd(instance, cmd);
4739 return ret;
4743 * megasas_ld_list_query - Returns FW's ld_list structure
4744 * @instance: Adapter soft state
4745 * @ld_list: ld_list structure
4747 * Issues an internal command (DCMD) to get the FW's controller PD
4748 * list structure. This information is mainly used to find out SYSTEM
4749 * supported by the FW.
4751 static int
4752 megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4754 int ret = 0, ld_index = 0, ids = 0;
4755 struct megasas_cmd *cmd;
4756 struct megasas_dcmd_frame *dcmd;
4757 struct MR_LD_TARGETID_LIST *ci;
4758 dma_addr_t ci_h = 0;
4759 u32 tgtid_count;
4761 ci = instance->ld_targetid_list_buf;
4762 ci_h = instance->ld_targetid_list_buf_h;
4764 cmd = megasas_get_cmd(instance);
4766 if (!cmd) {
4767 dev_warn(&instance->pdev->dev,
4768 "megasas_ld_list_query: Failed to get cmd\n");
4769 return -ENOMEM;
4772 dcmd = &cmd->frame->dcmd;
4774 memset(ci, 0, sizeof(*ci));
4775 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4777 dcmd->mbox.b[0] = query_type;
4778 if (instance->supportmax256vd)
4779 dcmd->mbox.b[2] = 1;
4781 dcmd->cmd = MFI_CMD_DCMD;
4782 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4783 dcmd->sge_count = 1;
4784 dcmd->flags = MFI_FRAME_DIR_READ;
4785 dcmd->timeout = 0;
4786 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4787 dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4788 dcmd->pad_0 = 0;
4790 megasas_set_dma_settings(instance, dcmd, ci_h,
4791 sizeof(struct MR_LD_TARGETID_LIST));
4793 if ((instance->adapter_type != MFI_SERIES) &&
4794 !instance->mask_interrupts)
4795 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4796 else
4797 ret = megasas_issue_polled(instance, cmd);
4799 switch (ret) {
4800 case DCMD_FAILED:
4801 dev_info(&instance->pdev->dev,
4802 "DCMD not supported by firmware - %s %d\n",
4803 __func__, __LINE__);
4804 ret = megasas_get_ld_list(instance);
4805 break;
4806 case DCMD_TIMEOUT:
4807 switch (dcmd_timeout_ocr_possible(instance)) {
4808 case INITIATE_OCR:
4809 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4811 * DCMD failed from AEN path.
4812 * AEN path already hold reset_mutex to avoid PCI access
4813 * while OCR is in progress.
4815 mutex_unlock(&instance->reset_mutex);
4816 megasas_reset_fusion(instance->host,
4817 MFI_IO_TIMEOUT_OCR);
4818 mutex_lock(&instance->reset_mutex);
4819 break;
4820 case KILL_ADAPTER:
4821 megaraid_sas_kill_hba(instance);
4822 break;
4823 case IGNORE_TIMEOUT:
4824 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4825 __func__, __LINE__);
4826 break;
4829 break;
4830 case DCMD_SUCCESS:
4831 tgtid_count = le32_to_cpu(ci->count);
4833 if (megasas_dbg_lvl & LD_PD_DEBUG)
4834 dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4835 __func__, tgtid_count);
4837 if ((tgtid_count > (instance->fw_supported_vd_count)))
4838 break;
4840 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4841 for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4842 ids = ci->targetId[ld_index];
4843 instance->ld_ids[ids] = ci->targetId[ld_index];
4844 if (megasas_dbg_lvl & LD_PD_DEBUG)
4845 dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n",
4846 ld_index, ci->targetId[ld_index]);
4849 break;
4852 if (ret != DCMD_TIMEOUT)
4853 megasas_return_cmd(instance, cmd);
4855 return ret;
4859 * dcmd.opcode - MR_DCMD_CTRL_DEVICE_LIST_GET
4860 * dcmd.mbox - reserved
4861 * dcmd.sge IN - ptr to return MR_HOST_DEVICE_LIST structure
4862 * Desc: This DCMD will return the combined device list
4863 * Status: MFI_STAT_OK - List returned successfully
4864 * MFI_STAT_INVALID_CMD - Firmware support for the feature has been
4865 * disabled
4866 * @instance: Adapter soft state
4867 * @is_probe: Driver probe check
4868 * Return: 0 if DCMD succeeded
4869 * non-zero if failed
4871 static int
4872 megasas_host_device_list_query(struct megasas_instance *instance,
4873 bool is_probe)
4875 int ret, i, target_id;
4876 struct megasas_cmd *cmd;
4877 struct megasas_dcmd_frame *dcmd;
4878 struct MR_HOST_DEVICE_LIST *ci;
4879 u32 count;
4880 dma_addr_t ci_h;
4882 ci = instance->host_device_list_buf;
4883 ci_h = instance->host_device_list_buf_h;
4885 cmd = megasas_get_cmd(instance);
4887 if (!cmd) {
4888 dev_warn(&instance->pdev->dev,
4889 "%s: failed to get cmd\n",
4890 __func__);
4891 return -ENOMEM;
4894 dcmd = &cmd->frame->dcmd;
4896 memset(ci, 0, sizeof(*ci));
4897 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4899 dcmd->mbox.b[0] = is_probe ? 0 : 1;
4900 dcmd->cmd = MFI_CMD_DCMD;
4901 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4902 dcmd->sge_count = 1;
4903 dcmd->flags = MFI_FRAME_DIR_READ;
4904 dcmd->timeout = 0;
4905 dcmd->pad_0 = 0;
4906 dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ);
4907 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET);
4909 megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ);
4911 if (!instance->mask_interrupts) {
4912 ret = megasas_issue_blocked_cmd(instance, cmd,
4913 MFI_IO_TIMEOUT_SECS);
4914 } else {
4915 ret = megasas_issue_polled(instance, cmd);
4916 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4919 switch (ret) {
4920 case DCMD_SUCCESS:
4921 /* Fill the internal pd_list and ld_ids array based on
4922 * targetIds returned by FW
4924 count = le32_to_cpu(ci->count);
4926 if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))
4927 break;
4929 if (megasas_dbg_lvl & LD_PD_DEBUG)
4930 dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n",
4931 __func__, count);
4933 memset(instance->local_pd_list, 0,
4934 MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4935 memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4936 for (i = 0; i < count; i++) {
4937 target_id = le16_to_cpu(ci->host_device_list[i].target_id);
4938 if (ci->host_device_list[i].flags.u.bits.is_sys_pd) {
4939 instance->local_pd_list[target_id].tid = target_id;
4940 instance->local_pd_list[target_id].driveType =
4941 ci->host_device_list[i].scsi_type;
4942 instance->local_pd_list[target_id].driveState =
4943 MR_PD_STATE_SYSTEM;
4944 if (megasas_dbg_lvl & LD_PD_DEBUG)
4945 dev_info(&instance->pdev->dev,
4946 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n",
4947 i, target_id, ci->host_device_list[i].scsi_type);
4948 } else {
4949 instance->ld_ids[target_id] = target_id;
4950 if (megasas_dbg_lvl & LD_PD_DEBUG)
4951 dev_info(&instance->pdev->dev,
4952 "Device %d: LD targetID: 0x%03x\n",
4953 i, target_id);
4957 memcpy(instance->pd_list, instance->local_pd_list,
4958 sizeof(instance->pd_list));
4959 break;
4961 case DCMD_TIMEOUT:
4962 switch (dcmd_timeout_ocr_possible(instance)) {
4963 case INITIATE_OCR:
4964 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4965 mutex_unlock(&instance->reset_mutex);
4966 megasas_reset_fusion(instance->host,
4967 MFI_IO_TIMEOUT_OCR);
4968 mutex_lock(&instance->reset_mutex);
4969 break;
4970 case KILL_ADAPTER:
4971 megaraid_sas_kill_hba(instance);
4972 break;
4973 case IGNORE_TIMEOUT:
4974 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4975 __func__, __LINE__);
4976 break;
4978 break;
4979 case DCMD_FAILED:
4980 dev_err(&instance->pdev->dev,
4981 "%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n",
4982 __func__);
4983 break;
4986 if (ret != DCMD_TIMEOUT)
4987 megasas_return_cmd(instance, cmd);
4989 return ret;
4993 * megasas_update_ext_vd_details : Update details w.r.t Extended VD
4994 * instance : Controller's instance
4996 static void megasas_update_ext_vd_details(struct megasas_instance *instance)
4998 struct fusion_context *fusion;
4999 u32 ventura_map_sz = 0;
5001 fusion = instance->ctrl_context;
5002 /* For MFI based controllers return dummy success */
5003 if (!fusion)
5004 return;
5006 instance->supportmax256vd =
5007 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
5008 /* Below is additional check to address future FW enhancement */
5009 if (instance->ctrl_info_buf->max_lds > 64)
5010 instance->supportmax256vd = 1;
5012 instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
5013 * MEGASAS_MAX_DEV_PER_CHANNEL;
5014 instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
5015 * MEGASAS_MAX_DEV_PER_CHANNEL;
5016 if (instance->supportmax256vd) {
5017 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
5018 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5019 } else {
5020 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5021 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5024 dev_info(&instance->pdev->dev,
5025 "FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
5026 instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
5027 instance->ctrl_info_buf->max_lds);
5029 if (instance->max_raid_mapsize) {
5030 ventura_map_sz = instance->max_raid_mapsize *
5031 MR_MIN_MAP_SIZE; /* 64k */
5032 fusion->current_map_sz = ventura_map_sz;
5033 fusion->max_map_sz = ventura_map_sz;
5034 } else {
5035 fusion->old_map_sz = sizeof(struct MR_FW_RAID_MAP) +
5036 (sizeof(struct MR_LD_SPAN_MAP) *
5037 (instance->fw_supported_vd_count - 1));
5038 fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
5040 fusion->max_map_sz =
5041 max(fusion->old_map_sz, fusion->new_map_sz);
5043 if (instance->supportmax256vd)
5044 fusion->current_map_sz = fusion->new_map_sz;
5045 else
5046 fusion->current_map_sz = fusion->old_map_sz;
5048 /* irrespective of FW raid maps, driver raid map is constant */
5049 fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
5053 * dcmd.opcode - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
5054 * dcmd.hdr.length - number of bytes to read
5055 * dcmd.sge - Ptr to MR_SNAPDUMP_PROPERTIES
5056 * Desc: Fill in snapdump properties
5057 * Status: MFI_STAT_OK- Command successful
5059 void megasas_get_snapdump_properties(struct megasas_instance *instance)
5061 int ret = 0;
5062 struct megasas_cmd *cmd;
5063 struct megasas_dcmd_frame *dcmd;
5064 struct MR_SNAPDUMP_PROPERTIES *ci;
5065 dma_addr_t ci_h = 0;
5067 ci = instance->snapdump_prop;
5068 ci_h = instance->snapdump_prop_h;
5070 if (!ci)
5071 return;
5073 cmd = megasas_get_cmd(instance);
5075 if (!cmd) {
5076 dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
5077 return;
5080 dcmd = &cmd->frame->dcmd;
5082 memset(ci, 0, sizeof(*ci));
5083 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5085 dcmd->cmd = MFI_CMD_DCMD;
5086 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5087 dcmd->sge_count = 1;
5088 dcmd->flags = MFI_FRAME_DIR_READ;
5089 dcmd->timeout = 0;
5090 dcmd->pad_0 = 0;
5091 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
5092 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
5094 megasas_set_dma_settings(instance, dcmd, ci_h,
5095 sizeof(struct MR_SNAPDUMP_PROPERTIES));
5097 if (!instance->mask_interrupts) {
5098 ret = megasas_issue_blocked_cmd(instance, cmd,
5099 MFI_IO_TIMEOUT_SECS);
5100 } else {
5101 ret = megasas_issue_polled(instance, cmd);
5102 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5105 switch (ret) {
5106 case DCMD_SUCCESS:
5107 instance->snapdump_wait_time =
5108 min_t(u8, ci->trigger_min_num_sec_before_ocr,
5109 MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
5110 break;
5112 case DCMD_TIMEOUT:
5113 switch (dcmd_timeout_ocr_possible(instance)) {
5114 case INITIATE_OCR:
5115 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5116 mutex_unlock(&instance->reset_mutex);
5117 megasas_reset_fusion(instance->host,
5118 MFI_IO_TIMEOUT_OCR);
5119 mutex_lock(&instance->reset_mutex);
5120 break;
5121 case KILL_ADAPTER:
5122 megaraid_sas_kill_hba(instance);
5123 break;
5124 case IGNORE_TIMEOUT:
5125 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5126 __func__, __LINE__);
5127 break;
5131 if (ret != DCMD_TIMEOUT)
5132 megasas_return_cmd(instance, cmd);
5136 * megasas_get_controller_info - Returns FW's controller structure
5137 * @instance: Adapter soft state
5139 * Issues an internal command (DCMD) to get the FW's controller structure.
5140 * This information is mainly used to find out the maximum IO transfer per
5141 * command supported by the FW.
5144 megasas_get_ctrl_info(struct megasas_instance *instance)
5146 int ret = 0;
5147 struct megasas_cmd *cmd;
5148 struct megasas_dcmd_frame *dcmd;
5149 struct megasas_ctrl_info *ci;
5150 dma_addr_t ci_h = 0;
5152 ci = instance->ctrl_info_buf;
5153 ci_h = instance->ctrl_info_buf_h;
5155 cmd = megasas_get_cmd(instance);
5157 if (!cmd) {
5158 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
5159 return -ENOMEM;
5162 dcmd = &cmd->frame->dcmd;
5164 memset(ci, 0, sizeof(*ci));
5165 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5167 dcmd->cmd = MFI_CMD_DCMD;
5168 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5169 dcmd->sge_count = 1;
5170 dcmd->flags = MFI_FRAME_DIR_READ;
5171 dcmd->timeout = 0;
5172 dcmd->pad_0 = 0;
5173 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
5174 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
5175 dcmd->mbox.b[0] = 1;
5177 megasas_set_dma_settings(instance, dcmd, ci_h,
5178 sizeof(struct megasas_ctrl_info));
5180 if ((instance->adapter_type != MFI_SERIES) &&
5181 !instance->mask_interrupts) {
5182 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5183 } else {
5184 ret = megasas_issue_polled(instance, cmd);
5185 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5188 switch (ret) {
5189 case DCMD_SUCCESS:
5190 /* Save required controller information in
5191 * CPU endianness format.
5193 le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
5194 le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
5195 le32_to_cpus((u32 *)&ci->adapterOperations2);
5196 le32_to_cpus((u32 *)&ci->adapterOperations3);
5197 le16_to_cpus((u16 *)&ci->adapter_operations4);
5198 le32_to_cpus((u32 *)&ci->adapter_operations5);
5200 /* Update the latest Ext VD info.
5201 * From Init path, store current firmware details.
5202 * From OCR path, detect any firmware properties changes.
5203 * in case of Firmware upgrade without system reboot.
5205 megasas_update_ext_vd_details(instance);
5206 instance->support_seqnum_jbod_fp =
5207 ci->adapterOperations3.useSeqNumJbodFP;
5208 instance->support_morethan256jbod =
5209 ci->adapter_operations4.support_pd_map_target_id;
5210 instance->support_nvme_passthru =
5211 ci->adapter_operations4.support_nvme_passthru;
5212 instance->support_pci_lane_margining =
5213 ci->adapter_operations5.support_pci_lane_margining;
5214 instance->task_abort_tmo = ci->TaskAbortTO;
5215 instance->max_reset_tmo = ci->MaxResetTO;
5217 /*Check whether controller is iMR or MR */
5218 instance->is_imr = (ci->memory_size ? 0 : 1);
5220 instance->snapdump_wait_time =
5221 (ci->properties.on_off_properties2.enable_snap_dump ?
5222 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
5224 instance->enable_fw_dev_list =
5225 ci->properties.on_off_properties2.enable_fw_dev_list;
5227 dev_info(&instance->pdev->dev,
5228 "controller type\t: %s(%dMB)\n",
5229 instance->is_imr ? "iMR" : "MR",
5230 le16_to_cpu(ci->memory_size));
5232 instance->disableOnlineCtrlReset =
5233 ci->properties.OnOffProperties.disableOnlineCtrlReset;
5234 instance->secure_jbod_support =
5235 ci->adapterOperations3.supportSecurityonJBOD;
5236 dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
5237 instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
5238 dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
5239 instance->secure_jbod_support ? "Yes" : "No");
5240 dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
5241 instance->support_nvme_passthru ? "Yes" : "No");
5242 dev_info(&instance->pdev->dev,
5243 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
5244 instance->task_abort_tmo, instance->max_reset_tmo);
5245 dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n",
5246 instance->support_seqnum_jbod_fp ? "Yes" : "No");
5247 dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n",
5248 instance->support_pci_lane_margining ? "Yes" : "No");
5250 break;
5252 case DCMD_TIMEOUT:
5253 switch (dcmd_timeout_ocr_possible(instance)) {
5254 case INITIATE_OCR:
5255 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5256 mutex_unlock(&instance->reset_mutex);
5257 megasas_reset_fusion(instance->host,
5258 MFI_IO_TIMEOUT_OCR);
5259 mutex_lock(&instance->reset_mutex);
5260 break;
5261 case KILL_ADAPTER:
5262 megaraid_sas_kill_hba(instance);
5263 break;
5264 case IGNORE_TIMEOUT:
5265 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5266 __func__, __LINE__);
5267 break;
5269 break;
5270 case DCMD_FAILED:
5271 megaraid_sas_kill_hba(instance);
5272 break;
5276 if (ret != DCMD_TIMEOUT)
5277 megasas_return_cmd(instance, cmd);
5279 return ret;
5283 * megasas_set_crash_dump_params - Sends address of crash dump DMA buffer
5284 * to firmware
5286 * @instance: Adapter soft state
5287 * @crash_buf_state - tell FW to turn ON/OFF crash dump feature
5288 MR_CRASH_BUF_TURN_OFF = 0
5289 MR_CRASH_BUF_TURN_ON = 1
5290 * @return 0 on success non-zero on failure.
5291 * Issues an internal command (DCMD) to set parameters for crash dump feature.
5292 * Driver will send address of crash dump DMA buffer and set mbox to tell FW
5293 * that driver supports crash dump feature. This DCMD will be sent only if
5294 * crash dump feature is supported by the FW.
5297 int megasas_set_crash_dump_params(struct megasas_instance *instance,
5298 u8 crash_buf_state)
5300 int ret = 0;
5301 struct megasas_cmd *cmd;
5302 struct megasas_dcmd_frame *dcmd;
5304 cmd = megasas_get_cmd(instance);
5306 if (!cmd) {
5307 dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
5308 return -ENOMEM;
5312 dcmd = &cmd->frame->dcmd;
5314 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5315 dcmd->mbox.b[0] = crash_buf_state;
5316 dcmd->cmd = MFI_CMD_DCMD;
5317 dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5318 dcmd->sge_count = 1;
5319 dcmd->flags = MFI_FRAME_DIR_NONE;
5320 dcmd->timeout = 0;
5321 dcmd->pad_0 = 0;
5322 dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
5323 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
5325 megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
5326 CRASH_DMA_BUF_SIZE);
5328 if ((instance->adapter_type != MFI_SERIES) &&
5329 !instance->mask_interrupts)
5330 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5331 else
5332 ret = megasas_issue_polled(instance, cmd);
5334 if (ret == DCMD_TIMEOUT) {
5335 switch (dcmd_timeout_ocr_possible(instance)) {
5336 case INITIATE_OCR:
5337 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5338 megasas_reset_fusion(instance->host,
5339 MFI_IO_TIMEOUT_OCR);
5340 break;
5341 case KILL_ADAPTER:
5342 megaraid_sas_kill_hba(instance);
5343 break;
5344 case IGNORE_TIMEOUT:
5345 dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5346 __func__, __LINE__);
5347 break;
5349 } else
5350 megasas_return_cmd(instance, cmd);
5352 return ret;
5356 * megasas_issue_init_mfi - Initializes the FW
5357 * @instance: Adapter soft state
5359 * Issues the INIT MFI cmd
5361 static int
5362 megasas_issue_init_mfi(struct megasas_instance *instance)
5364 __le32 context;
5365 struct megasas_cmd *cmd;
5366 struct megasas_init_frame *init_frame;
5367 struct megasas_init_queue_info *initq_info;
5368 dma_addr_t init_frame_h;
5369 dma_addr_t initq_info_h;
5372 * Prepare a init frame. Note the init frame points to queue info
5373 * structure. Each frame has SGL allocated after first 64 bytes. For
5374 * this frame - since we don't need any SGL - we use SGL's space as
5375 * queue info structure
5377 * We will not get a NULL command below. We just created the pool.
5379 cmd = megasas_get_cmd(instance);
5381 init_frame = (struct megasas_init_frame *)cmd->frame;
5382 initq_info = (struct megasas_init_queue_info *)
5383 ((unsigned long)init_frame + 64);
5385 init_frame_h = cmd->frame_phys_addr;
5386 initq_info_h = init_frame_h + 64;
5388 context = init_frame->context;
5389 memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
5390 memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
5391 init_frame->context = context;
5393 initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
5394 initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
5396 initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
5397 initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
5399 init_frame->cmd = MFI_CMD_INIT;
5400 init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
5401 init_frame->queue_info_new_phys_addr_lo =
5402 cpu_to_le32(lower_32_bits(initq_info_h));
5403 init_frame->queue_info_new_phys_addr_hi =
5404 cpu_to_le32(upper_32_bits(initq_info_h));
5406 init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
5409 * disable the intr before firing the init frame to FW
5411 instance->instancet->disable_intr(instance);
5414 * Issue the init frame in polled mode
5417 if (megasas_issue_polled(instance, cmd)) {
5418 dev_err(&instance->pdev->dev, "Failed to init firmware\n");
5419 megasas_return_cmd(instance, cmd);
5420 goto fail_fw_init;
5423 megasas_return_cmd(instance, cmd);
5425 return 0;
5427 fail_fw_init:
5428 return -EINVAL;
5431 static u32
5432 megasas_init_adapter_mfi(struct megasas_instance *instance)
5434 u32 context_sz;
5435 u32 reply_q_sz;
5438 * Get various operational parameters from status register
5440 instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
5442 * Reduce the max supported cmds by 1. This is to ensure that the
5443 * reply_q_sz (1 more than the max cmd that driver may send)
5444 * does not exceed max cmds that the FW can support
5446 instance->max_fw_cmds = instance->max_fw_cmds-1;
5447 instance->max_mfi_cmds = instance->max_fw_cmds;
5448 instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
5449 0x10;
5451 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
5452 * are reserved for IOCTL + driver's internal DCMDs.
5454 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
5455 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
5456 instance->max_scsi_cmds = (instance->max_fw_cmds -
5457 MEGASAS_SKINNY_INT_CMDS);
5458 sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
5459 } else {
5460 instance->max_scsi_cmds = (instance->max_fw_cmds -
5461 MEGASAS_INT_CMDS);
5462 sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
5465 instance->cur_can_queue = instance->max_scsi_cmds;
5467 * Create a pool of commands
5469 if (megasas_alloc_cmds(instance))
5470 goto fail_alloc_cmds;
5473 * Allocate memory for reply queue. Length of reply queue should
5474 * be _one_ more than the maximum commands handled by the firmware.
5476 * Note: When FW completes commands, it places corresponding contex
5477 * values in this circular reply queue. This circular queue is a fairly
5478 * typical producer-consumer queue. FW is the producer (of completed
5479 * commands) and the driver is the consumer.
5481 context_sz = sizeof(u32);
5482 reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
5484 instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
5485 reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
5487 if (!instance->reply_queue) {
5488 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
5489 goto fail_reply_queue;
5492 if (megasas_issue_init_mfi(instance))
5493 goto fail_fw_init;
5495 if (megasas_get_ctrl_info(instance)) {
5496 dev_err(&instance->pdev->dev, "(%d): Could get controller info "
5497 "Fail from %s %d\n", instance->unique_id,
5498 __func__, __LINE__);
5499 goto fail_fw_init;
5502 instance->fw_support_ieee = 0;
5503 instance->fw_support_ieee =
5504 (instance->instancet->read_fw_status_reg(instance) &
5505 0x04000000);
5507 dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
5508 instance->fw_support_ieee);
5510 if (instance->fw_support_ieee)
5511 instance->flag_ieee = 1;
5513 return 0;
5515 fail_fw_init:
5517 dma_free_coherent(&instance->pdev->dev, reply_q_sz,
5518 instance->reply_queue, instance->reply_queue_h);
5519 fail_reply_queue:
5520 megasas_free_cmds(instance);
5522 fail_alloc_cmds:
5523 return 1;
5526 static
5527 void megasas_setup_irq_poll(struct megasas_instance *instance)
5529 struct megasas_irq_context *irq_ctx;
5530 u32 count, i;
5532 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5534 /* Initialize IRQ poll */
5535 for (i = 0; i < count; i++) {
5536 irq_ctx = &instance->irq_context[i];
5537 irq_ctx->os_irq = pci_irq_vector(instance->pdev, i);
5538 irq_ctx->irq_poll_scheduled = false;
5539 irq_poll_init(&irq_ctx->irqpoll,
5540 instance->threshold_reply_count,
5541 megasas_irqpoll);
5546 * megasas_setup_irqs_ioapic - register legacy interrupts.
5547 * @instance: Adapter soft state
5549 * Do not enable interrupt, only setup ISRs.
5551 * Return 0 on success.
5553 static int
5554 megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5556 struct pci_dev *pdev;
5558 pdev = instance->pdev;
5559 instance->irq_context[0].instance = instance;
5560 instance->irq_context[0].MSIxIndex = 0;
5561 snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u",
5562 "megasas", instance->host->host_no);
5563 if (request_irq(pci_irq_vector(pdev, 0),
5564 instance->instancet->service_isr, IRQF_SHARED,
5565 instance->irq_context->name, &instance->irq_context[0])) {
5566 dev_err(&instance->pdev->dev,
5567 "Failed to register IRQ from %s %d\n",
5568 __func__, __LINE__);
5569 return -1;
5571 instance->perf_mode = MR_LATENCY_PERF_MODE;
5572 instance->low_latency_index_start = 0;
5573 return 0;
5577 * megasas_setup_irqs_msix - register MSI-x interrupts.
5578 * @instance: Adapter soft state
5579 * @is_probe: Driver probe check
5581 * Do not enable interrupt, only setup ISRs.
5583 * Return 0 on success.
5585 static int
5586 megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5588 int i, j;
5589 struct pci_dev *pdev;
5591 pdev = instance->pdev;
5593 /* Try MSI-x */
5594 for (i = 0; i < instance->msix_vectors; i++) {
5595 instance->irq_context[i].instance = instance;
5596 instance->irq_context[i].MSIxIndex = i;
5597 snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u",
5598 "megasas", instance->host->host_no, i);
5599 if (request_irq(pci_irq_vector(pdev, i),
5600 instance->instancet->service_isr, 0, instance->irq_context[i].name,
5601 &instance->irq_context[i])) {
5602 dev_err(&instance->pdev->dev,
5603 "Failed to register IRQ for vector %d.\n", i);
5604 for (j = 0; j < i; j++)
5605 free_irq(pci_irq_vector(pdev, j),
5606 &instance->irq_context[j]);
5607 /* Retry irq register for IO_APIC*/
5608 instance->msix_vectors = 0;
5609 instance->msix_load_balance = false;
5610 if (is_probe) {
5611 pci_free_irq_vectors(instance->pdev);
5612 return megasas_setup_irqs_ioapic(instance);
5613 } else {
5614 return -1;
5619 return 0;
5623 * megasas_destroy_irqs- unregister interrupts.
5624 * @instance: Adapter soft state
5625 * return: void
5627 static void
5628 megasas_destroy_irqs(struct megasas_instance *instance) {
5630 int i;
5631 int count;
5632 struct megasas_irq_context *irq_ctx;
5634 count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5635 if (instance->adapter_type != MFI_SERIES) {
5636 for (i = 0; i < count; i++) {
5637 irq_ctx = &instance->irq_context[i];
5638 irq_poll_disable(&irq_ctx->irqpoll);
5642 if (instance->msix_vectors)
5643 for (i = 0; i < instance->msix_vectors; i++) {
5644 free_irq(pci_irq_vector(instance->pdev, i),
5645 &instance->irq_context[i]);
5647 else
5648 free_irq(pci_irq_vector(instance->pdev, 0),
5649 &instance->irq_context[0]);
5653 * megasas_setup_jbod_map - setup jbod map for FP seq_number.
5654 * @instance: Adapter soft state
5655 * @is_probe: Driver probe check
5657 * Return 0 on success.
5659 void
5660 megasas_setup_jbod_map(struct megasas_instance *instance)
5662 int i;
5663 struct fusion_context *fusion = instance->ctrl_context;
5664 u32 pd_seq_map_sz;
5666 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5667 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5669 instance->use_seqnum_jbod_fp =
5670 instance->support_seqnum_jbod_fp;
5671 if (reset_devices || !fusion ||
5672 !instance->support_seqnum_jbod_fp) {
5673 dev_info(&instance->pdev->dev,
5674 "JBOD sequence map is disabled %s %d\n",
5675 __func__, __LINE__);
5676 instance->use_seqnum_jbod_fp = false;
5677 return;
5680 if (fusion->pd_seq_sync[0])
5681 goto skip_alloc;
5683 for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5684 fusion->pd_seq_sync[i] = dma_alloc_coherent
5685 (&instance->pdev->dev, pd_seq_map_sz,
5686 &fusion->pd_seq_phys[i], GFP_KERNEL);
5687 if (!fusion->pd_seq_sync[i]) {
5688 dev_err(&instance->pdev->dev,
5689 "Failed to allocate memory from %s %d\n",
5690 __func__, __LINE__);
5691 if (i == 1) {
5692 dma_free_coherent(&instance->pdev->dev,
5693 pd_seq_map_sz, fusion->pd_seq_sync[0],
5694 fusion->pd_seq_phys[0]);
5695 fusion->pd_seq_sync[0] = NULL;
5697 instance->use_seqnum_jbod_fp = false;
5698 return;
5702 skip_alloc:
5703 if (!megasas_sync_pd_seq_num(instance, false) &&
5704 !megasas_sync_pd_seq_num(instance, true))
5705 instance->use_seqnum_jbod_fp = true;
5706 else
5707 instance->use_seqnum_jbod_fp = false;
5710 static void megasas_setup_reply_map(struct megasas_instance *instance)
5712 const struct cpumask *mask;
5713 unsigned int queue, cpu, low_latency_index_start;
5715 low_latency_index_start = instance->low_latency_index_start;
5717 for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) {
5718 mask = pci_irq_get_affinity(instance->pdev, queue);
5719 if (!mask)
5720 goto fallback;
5722 for_each_cpu(cpu, mask)
5723 instance->reply_map[cpu] = queue;
5725 return;
5727 fallback:
5728 queue = low_latency_index_start;
5729 for_each_possible_cpu(cpu) {
5730 instance->reply_map[cpu] = queue;
5731 if (queue == (instance->msix_vectors - 1))
5732 queue = low_latency_index_start;
5733 else
5734 queue++;
5739 * megasas_get_device_list - Get the PD and LD device list from FW.
5740 * @instance: Adapter soft state
5741 * @return: Success or failure
5743 * Issue DCMDs to Firmware to get the PD and LD list.
5744 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
5745 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
5747 static
5748 int megasas_get_device_list(struct megasas_instance *instance)
5750 memset(instance->pd_list, 0,
5751 (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5752 memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5754 if (instance->enable_fw_dev_list) {
5755 if (megasas_host_device_list_query(instance, true))
5756 return FAILED;
5757 } else {
5758 if (megasas_get_pd_list(instance) < 0) {
5759 dev_err(&instance->pdev->dev, "failed to get PD list\n");
5760 return FAILED;
5763 if (megasas_ld_list_query(instance,
5764 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) {
5765 dev_err(&instance->pdev->dev, "failed to get LD list\n");
5766 return FAILED;
5770 return SUCCESS;
5774 * megasas_set_high_iops_queue_affinity_hint - Set affinity hint for high IOPS queues
5775 * @instance: Adapter soft state
5776 * return: void
5778 static inline void
5779 megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
5781 int i;
5782 int local_numa_node;
5784 if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
5785 local_numa_node = dev_to_node(&instance->pdev->dev);
5787 for (i = 0; i < instance->low_latency_index_start; i++)
5788 irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
5789 cpumask_of_node(local_numa_node));
5793 static int
5794 __megasas_alloc_irq_vectors(struct megasas_instance *instance)
5796 int i, irq_flags;
5797 struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start };
5798 struct irq_affinity *descp = &desc;
5800 irq_flags = PCI_IRQ_MSIX;
5802 if (instance->smp_affinity_enable)
5803 irq_flags |= PCI_IRQ_AFFINITY;
5804 else
5805 descp = NULL;
5807 i = pci_alloc_irq_vectors_affinity(instance->pdev,
5808 instance->low_latency_index_start,
5809 instance->msix_vectors, irq_flags, descp);
5811 return i;
5815 * megasas_alloc_irq_vectors - Allocate IRQ vectors/enable MSI-x vectors
5816 * @instance: Adapter soft state
5817 * return: void
5819 static void
5820 megasas_alloc_irq_vectors(struct megasas_instance *instance)
5822 int i;
5823 unsigned int num_msix_req;
5825 i = __megasas_alloc_irq_vectors(instance);
5827 if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
5828 (i != instance->msix_vectors)) {
5829 if (instance->msix_vectors)
5830 pci_free_irq_vectors(instance->pdev);
5831 /* Disable Balanced IOPS mode and try realloc vectors */
5832 instance->perf_mode = MR_LATENCY_PERF_MODE;
5833 instance->low_latency_index_start = 1;
5834 num_msix_req = num_online_cpus() + instance->low_latency_index_start;
5836 instance->msix_vectors = min(num_msix_req,
5837 instance->msix_vectors);
5839 i = __megasas_alloc_irq_vectors(instance);
5843 dev_info(&instance->pdev->dev,
5844 "requested/available msix %d/%d\n", instance->msix_vectors, i);
5846 if (i > 0)
5847 instance->msix_vectors = i;
5848 else
5849 instance->msix_vectors = 0;
5851 if (instance->smp_affinity_enable)
5852 megasas_set_high_iops_queue_affinity_hint(instance);
5856 * megasas_init_fw - Initializes the FW
5857 * @instance: Adapter soft state
5859 * This is the main function for initializing firmware
5862 static int megasas_init_fw(struct megasas_instance *instance)
5864 u32 max_sectors_1;
5865 u32 max_sectors_2, tmp_sectors, msix_enable;
5866 u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
5867 resource_size_t base_addr;
5868 void *base_addr_phys;
5869 struct megasas_ctrl_info *ctrl_info = NULL;
5870 unsigned long bar_list;
5871 int i, j, loop;
5872 struct IOV_111 *iovPtr;
5873 struct fusion_context *fusion;
5874 bool intr_coalescing;
5875 unsigned int num_msix_req;
5876 u16 lnksta, speed;
5878 fusion = instance->ctrl_context;
5880 /* Find first memory bar */
5881 bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5882 instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5883 if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5884 "megasas: LSI")) {
5885 dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5886 return -EBUSY;
5889 base_addr = pci_resource_start(instance->pdev, instance->bar);
5890 instance->reg_set = ioremap(base_addr, 8192);
5892 if (!instance->reg_set) {
5893 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5894 goto fail_ioremap;
5897 base_addr_phys = &base_addr;
5898 dev_printk(KERN_DEBUG, &instance->pdev->dev,
5899 "BAR:0x%lx BAR's base_addr(phys):%pa mapped virt_addr:0x%p\n",
5900 instance->bar, base_addr_phys, instance->reg_set);
5902 if (instance->adapter_type != MFI_SERIES)
5903 instance->instancet = &megasas_instance_template_fusion;
5904 else {
5905 switch (instance->pdev->device) {
5906 case PCI_DEVICE_ID_LSI_SAS1078R:
5907 case PCI_DEVICE_ID_LSI_SAS1078DE:
5908 instance->instancet = &megasas_instance_template_ppc;
5909 break;
5910 case PCI_DEVICE_ID_LSI_SAS1078GEN2:
5911 case PCI_DEVICE_ID_LSI_SAS0079GEN2:
5912 instance->instancet = &megasas_instance_template_gen2;
5913 break;
5914 case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
5915 case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
5916 instance->instancet = &megasas_instance_template_skinny;
5917 break;
5918 case PCI_DEVICE_ID_LSI_SAS1064R:
5919 case PCI_DEVICE_ID_DELL_PERC5:
5920 default:
5921 instance->instancet = &megasas_instance_template_xscale;
5922 instance->pd_list_not_supported = 1;
5923 break;
5927 if (megasas_transition_to_ready(instance, 0)) {
5928 dev_info(&instance->pdev->dev,
5929 "Failed to transition controller to ready from %s!\n",
5930 __func__);
5931 if (instance->adapter_type != MFI_SERIES) {
5932 status_reg = instance->instancet->read_fw_status_reg(
5933 instance);
5934 if (status_reg & MFI_RESET_ADAPTER) {
5935 if (megasas_adp_reset_wait_for_ready
5936 (instance, true, 0) == FAILED)
5937 goto fail_ready_state;
5938 } else {
5939 goto fail_ready_state;
5941 } else {
5942 atomic_set(&instance->fw_reset_no_pci_access, 1);
5943 instance->instancet->adp_reset
5944 (instance, instance->reg_set);
5945 atomic_set(&instance->fw_reset_no_pci_access, 0);
5947 /*waiting for about 30 second before retry*/
5948 ssleep(30);
5950 if (megasas_transition_to_ready(instance, 0))
5951 goto fail_ready_state;
5954 dev_info(&instance->pdev->dev,
5955 "FW restarted successfully from %s!\n",
5956 __func__);
5959 megasas_init_ctrl_params(instance);
5961 if (megasas_set_dma_mask(instance))
5962 goto fail_ready_state;
5964 if (megasas_alloc_ctrl_mem(instance))
5965 goto fail_alloc_dma_buf;
5967 if (megasas_alloc_ctrl_dma_buffers(instance))
5968 goto fail_alloc_dma_buf;
5970 fusion = instance->ctrl_context;
5972 if (instance->adapter_type >= VENTURA_SERIES) {
5973 scratch_pad_2 =
5974 megasas_readl(instance,
5975 &instance->reg_set->outbound_scratch_pad_2);
5976 instance->max_raid_mapsize = ((scratch_pad_2 >>
5977 MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
5978 MR_MAX_RAID_MAP_SIZE_MASK);
5981 instance->enable_sdev_max_qd = enable_sdev_max_qd;
5983 switch (instance->adapter_type) {
5984 case VENTURA_SERIES:
5985 fusion->pcie_bw_limitation = true;
5986 break;
5987 case AERO_SERIES:
5988 fusion->r56_div_offload = true;
5989 break;
5990 default:
5991 break;
5994 /* Check if MSI-X is supported while in ready state */
5995 msix_enable = (instance->instancet->read_fw_status_reg(instance) &
5996 0x4000000) >> 0x1a;
5997 if (msix_enable && !msix_disable) {
5999 scratch_pad_1 = megasas_readl
6000 (instance, &instance->reg_set->outbound_scratch_pad_1);
6001 /* Check max MSI-X vectors */
6002 if (fusion) {
6003 if (instance->adapter_type == THUNDERBOLT_SERIES) {
6004 /* Thunderbolt Series*/
6005 instance->msix_vectors = (scratch_pad_1
6006 & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
6007 } else {
6008 instance->msix_vectors = ((scratch_pad_1
6009 & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
6010 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
6013 * For Invader series, > 8 MSI-x vectors
6014 * supported by FW/HW implies combined
6015 * reply queue mode is enabled.
6016 * For Ventura series, > 16 MSI-x vectors
6017 * supported by FW/HW implies combined
6018 * reply queue mode is enabled.
6020 switch (instance->adapter_type) {
6021 case INVADER_SERIES:
6022 if (instance->msix_vectors > 8)
6023 instance->msix_combined = true;
6024 break;
6025 case AERO_SERIES:
6026 case VENTURA_SERIES:
6027 if (instance->msix_vectors > 16)
6028 instance->msix_combined = true;
6029 break;
6032 if (rdpq_enable)
6033 instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
6034 1 : 0;
6036 if (instance->adapter_type >= INVADER_SERIES &&
6037 !instance->msix_combined) {
6038 instance->msix_load_balance = true;
6039 instance->smp_affinity_enable = false;
6042 /* Save 1-15 reply post index address to local memory
6043 * Index 0 is already saved from reg offset
6044 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
6046 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
6047 instance->reply_post_host_index_addr[loop] =
6048 (u32 __iomem *)
6049 ((u8 __iomem *)instance->reg_set +
6050 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
6051 + (loop * 0x10));
6055 dev_info(&instance->pdev->dev,
6056 "firmware supports msix\t: (%d)",
6057 instance->msix_vectors);
6058 if (msix_vectors)
6059 instance->msix_vectors = min(msix_vectors,
6060 instance->msix_vectors);
6061 } else /* MFI adapters */
6062 instance->msix_vectors = 1;
6066 * For Aero (if some conditions are met), driver will configure a
6067 * few additional reply queues with interrupt coalescing enabled.
6068 * These queues with interrupt coalescing enabled are called
6069 * High IOPS queues and rest of reply queues (based on number of
6070 * logical CPUs) are termed as Low latency queues.
6072 * Total Number of reply queues = High IOPS queues + low latency queues
6074 * For rest of fusion adapters, 1 additional reply queue will be
6075 * reserved for management commands, rest of reply queues
6076 * (based on number of logical CPUs) will be used for IOs and
6077 * referenced as IO queues.
6078 * Total Number of reply queues = 1 + IO queues
6080 * MFI adapters supports single MSI-x so single reply queue
6081 * will be used for IO and management commands.
6084 intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
6085 true : false;
6086 if (intr_coalescing &&
6087 (num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
6088 (instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
6089 instance->perf_mode = MR_BALANCED_PERF_MODE;
6090 else
6091 instance->perf_mode = MR_LATENCY_PERF_MODE;
6094 if (instance->adapter_type == AERO_SERIES) {
6095 pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta);
6096 speed = lnksta & PCI_EXP_LNKSTA_CLS;
6099 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate
6100 * in latency perf mode and enable R1 PCI bandwidth algorithm
6102 if (speed < 0x4) {
6103 instance->perf_mode = MR_LATENCY_PERF_MODE;
6104 fusion->pcie_bw_limitation = true;
6108 * Performance mode settings provided through module parameter-perf_mode will
6109 * take affect only for:
6110 * 1. Aero family of adapters.
6111 * 2. When user sets module parameter- perf_mode in range of 0-2.
6113 if ((perf_mode >= MR_BALANCED_PERF_MODE) &&
6114 (perf_mode <= MR_LATENCY_PERF_MODE))
6115 instance->perf_mode = perf_mode;
6117 * If intr coalescing is not supported by controller FW, then IOPS
6118 * and Balanced modes are not feasible.
6120 if (!intr_coalescing)
6121 instance->perf_mode = MR_LATENCY_PERF_MODE;
6125 if (instance->perf_mode == MR_BALANCED_PERF_MODE)
6126 instance->low_latency_index_start =
6127 MR_HIGH_IOPS_QUEUE_COUNT;
6128 else
6129 instance->low_latency_index_start = 1;
6131 num_msix_req = num_online_cpus() + instance->low_latency_index_start;
6133 instance->msix_vectors = min(num_msix_req,
6134 instance->msix_vectors);
6136 megasas_alloc_irq_vectors(instance);
6137 if (!instance->msix_vectors)
6138 instance->msix_load_balance = false;
6141 * MSI-X host index 0 is common for all adapter.
6142 * It is used for all MPT based Adapters.
6144 if (instance->msix_combined) {
6145 instance->reply_post_host_index_addr[0] =
6146 (u32 *)((u8 *)instance->reg_set +
6147 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
6148 } else {
6149 instance->reply_post_host_index_addr[0] =
6150 (u32 *)((u8 *)instance->reg_set +
6151 MPI2_REPLY_POST_HOST_INDEX_OFFSET);
6154 if (!instance->msix_vectors) {
6155 i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
6156 if (i < 0)
6157 goto fail_init_adapter;
6160 megasas_setup_reply_map(instance);
6162 dev_info(&instance->pdev->dev,
6163 "current msix/online cpus\t: (%d/%d)\n",
6164 instance->msix_vectors, (unsigned int)num_online_cpus());
6165 dev_info(&instance->pdev->dev,
6166 "RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
6168 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6169 (unsigned long)instance);
6172 * Below are default value for legacy Firmware.
6173 * non-fusion based controllers
6175 instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
6176 instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
6177 /* Get operational params, sge flags, send init cmd to controller */
6178 if (instance->instancet->init_adapter(instance))
6179 goto fail_init_adapter;
6181 if (instance->adapter_type >= VENTURA_SERIES) {
6182 scratch_pad_3 =
6183 megasas_readl(instance,
6184 &instance->reg_set->outbound_scratch_pad_3);
6185 if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
6186 MR_DEFAULT_NVME_PAGE_SHIFT)
6187 instance->nvme_page_size =
6188 (1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
6190 dev_info(&instance->pdev->dev,
6191 "NVME page size\t: (%d)\n", instance->nvme_page_size);
6194 if (instance->msix_vectors ?
6195 megasas_setup_irqs_msix(instance, 1) :
6196 megasas_setup_irqs_ioapic(instance))
6197 goto fail_init_adapter;
6199 if (instance->adapter_type != MFI_SERIES)
6200 megasas_setup_irq_poll(instance);
6202 instance->instancet->enable_intr(instance);
6204 dev_info(&instance->pdev->dev, "INIT adapter done\n");
6206 megasas_setup_jbod_map(instance);
6208 if (megasas_get_device_list(instance) != SUCCESS) {
6209 dev_err(&instance->pdev->dev,
6210 "%s: megasas_get_device_list failed\n",
6211 __func__);
6212 goto fail_get_ld_pd_list;
6215 /* stream detection initialization */
6216 if (instance->adapter_type >= VENTURA_SERIES) {
6217 fusion->stream_detect_by_ld =
6218 kcalloc(MAX_LOGICAL_DRIVES_EXT,
6219 sizeof(struct LD_STREAM_DETECT *),
6220 GFP_KERNEL);
6221 if (!fusion->stream_detect_by_ld) {
6222 dev_err(&instance->pdev->dev,
6223 "unable to allocate stream detection for pool of LDs\n");
6224 goto fail_get_ld_pd_list;
6226 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
6227 fusion->stream_detect_by_ld[i] =
6228 kzalloc(sizeof(struct LD_STREAM_DETECT),
6229 GFP_KERNEL);
6230 if (!fusion->stream_detect_by_ld[i]) {
6231 dev_err(&instance->pdev->dev,
6232 "unable to allocate stream detect by LD\n ");
6233 for (j = 0; j < i; ++j)
6234 kfree(fusion->stream_detect_by_ld[j]);
6235 kfree(fusion->stream_detect_by_ld);
6236 fusion->stream_detect_by_ld = NULL;
6237 goto fail_get_ld_pd_list;
6239 fusion->stream_detect_by_ld[i]->mru_bit_map
6240 = MR_STREAM_BITMAP;
6245 * Compute the max allowed sectors per IO: The controller info has two
6246 * limits on max sectors. Driver should use the minimum of these two.
6248 * 1 << stripe_sz_ops.min = max sectors per strip
6250 * Note that older firmwares ( < FW ver 30) didn't report information
6251 * to calculate max_sectors_1. So the number ended up as zero always.
6253 tmp_sectors = 0;
6254 ctrl_info = instance->ctrl_info_buf;
6256 max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
6257 le16_to_cpu(ctrl_info->max_strips_per_io);
6258 max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
6260 tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
6262 instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
6263 instance->passive = ctrl_info->cluster.passive;
6264 memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
6265 instance->UnevenSpanSupport =
6266 ctrl_info->adapterOperations2.supportUnevenSpans;
6267 if (instance->UnevenSpanSupport) {
6268 struct fusion_context *fusion = instance->ctrl_context;
6269 if (MR_ValidateMapInfo(instance, instance->map_id))
6270 fusion->fast_path_io = 1;
6271 else
6272 fusion->fast_path_io = 0;
6275 if (ctrl_info->host_interface.SRIOV) {
6276 instance->requestorId = ctrl_info->iov.requestorId;
6277 if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
6278 if (!ctrl_info->adapterOperations2.activePassive)
6279 instance->PlasmaFW111 = 1;
6281 dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
6282 instance->PlasmaFW111 ? "1.11" : "new");
6284 if (instance->PlasmaFW111) {
6285 iovPtr = (struct IOV_111 *)
6286 ((unsigned char *)ctrl_info + IOV_111_OFFSET);
6287 instance->requestorId = iovPtr->requestorId;
6290 dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
6291 instance->requestorId);
6294 instance->crash_dump_fw_support =
6295 ctrl_info->adapterOperations3.supportCrashDump;
6296 instance->crash_dump_drv_support =
6297 (instance->crash_dump_fw_support &&
6298 instance->crash_dump_buf);
6299 if (instance->crash_dump_drv_support)
6300 megasas_set_crash_dump_params(instance,
6301 MR_CRASH_BUF_TURN_OFF);
6303 else {
6304 if (instance->crash_dump_buf)
6305 dma_free_coherent(&instance->pdev->dev,
6306 CRASH_DMA_BUF_SIZE,
6307 instance->crash_dump_buf,
6308 instance->crash_dump_h);
6309 instance->crash_dump_buf = NULL;
6312 if (instance->snapdump_wait_time) {
6313 megasas_get_snapdump_properties(instance);
6314 dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
6315 instance->snapdump_wait_time);
6318 dev_info(&instance->pdev->dev,
6319 "pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
6320 le16_to_cpu(ctrl_info->pci.vendor_id),
6321 le16_to_cpu(ctrl_info->pci.device_id),
6322 le16_to_cpu(ctrl_info->pci.sub_vendor_id),
6323 le16_to_cpu(ctrl_info->pci.sub_device_id));
6324 dev_info(&instance->pdev->dev, "unevenspan support : %s\n",
6325 instance->UnevenSpanSupport ? "yes" : "no");
6326 dev_info(&instance->pdev->dev, "firmware crash dump : %s\n",
6327 instance->crash_dump_drv_support ? "yes" : "no");
6328 dev_info(&instance->pdev->dev, "JBOD sequence map : %s\n",
6329 instance->use_seqnum_jbod_fp ? "enabled" : "disabled");
6331 instance->max_sectors_per_req = instance->max_num_sge *
6332 SGE_BUFFER_SIZE / 512;
6333 if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
6334 instance->max_sectors_per_req = tmp_sectors;
6336 /* Check for valid throttlequeuedepth module parameter */
6337 if (throttlequeuedepth &&
6338 throttlequeuedepth <= instance->max_scsi_cmds)
6339 instance->throttlequeuedepth = throttlequeuedepth;
6340 else
6341 instance->throttlequeuedepth =
6342 MEGASAS_THROTTLE_QUEUE_DEPTH;
6344 if ((resetwaittime < 1) ||
6345 (resetwaittime > MEGASAS_RESET_WAIT_TIME))
6346 resetwaittime = MEGASAS_RESET_WAIT_TIME;
6348 if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
6349 scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
6351 /* Launch SR-IOV heartbeat timer */
6352 if (instance->requestorId) {
6353 if (!megasas_sriov_start_heartbeat(instance, 1)) {
6354 megasas_start_timer(instance);
6355 } else {
6356 instance->skip_heartbeat_timer_del = 1;
6357 goto fail_get_ld_pd_list;
6362 * Create and start watchdog thread which will monitor
6363 * controller state every 1 sec and trigger OCR when
6364 * it enters fault state
6366 if (instance->adapter_type != MFI_SERIES)
6367 if (megasas_fusion_start_watchdog(instance) != SUCCESS)
6368 goto fail_start_watchdog;
6370 return 0;
6372 fail_start_watchdog:
6373 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6374 del_timer_sync(&instance->sriov_heartbeat_timer);
6375 fail_get_ld_pd_list:
6376 instance->instancet->disable_intr(instance);
6377 megasas_destroy_irqs(instance);
6378 fail_init_adapter:
6379 if (instance->msix_vectors)
6380 pci_free_irq_vectors(instance->pdev);
6381 instance->msix_vectors = 0;
6382 fail_alloc_dma_buf:
6383 megasas_free_ctrl_dma_buffers(instance);
6384 megasas_free_ctrl_mem(instance);
6385 fail_ready_state:
6386 iounmap(instance->reg_set);
6388 fail_ioremap:
6389 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6391 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6392 __func__, __LINE__);
6393 return -EINVAL;
6397 * megasas_release_mfi - Reverses the FW initialization
6398 * @instance: Adapter soft state
6400 static void megasas_release_mfi(struct megasas_instance *instance)
6402 u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
6404 if (instance->reply_queue)
6405 dma_free_coherent(&instance->pdev->dev, reply_q_sz,
6406 instance->reply_queue, instance->reply_queue_h);
6408 megasas_free_cmds(instance);
6410 iounmap(instance->reg_set);
6412 pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6416 * megasas_get_seq_num - Gets latest event sequence numbers
6417 * @instance: Adapter soft state
6418 * @eli: FW event log sequence numbers information
6420 * FW maintains a log of all events in a non-volatile area. Upper layers would
6421 * usually find out the latest sequence number of the events, the seq number at
6422 * the boot etc. They would "read" all the events below the latest seq number
6423 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
6424 * number), they would subsribe to AEN (asynchronous event notification) and
6425 * wait for the events to happen.
6427 static int
6428 megasas_get_seq_num(struct megasas_instance *instance,
6429 struct megasas_evt_log_info *eli)
6431 struct megasas_cmd *cmd;
6432 struct megasas_dcmd_frame *dcmd;
6433 struct megasas_evt_log_info *el_info;
6434 dma_addr_t el_info_h = 0;
6435 int ret;
6437 cmd = megasas_get_cmd(instance);
6439 if (!cmd) {
6440 return -ENOMEM;
6443 dcmd = &cmd->frame->dcmd;
6444 el_info = dma_alloc_coherent(&instance->pdev->dev,
6445 sizeof(struct megasas_evt_log_info),
6446 &el_info_h, GFP_KERNEL);
6447 if (!el_info) {
6448 megasas_return_cmd(instance, cmd);
6449 return -ENOMEM;
6452 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6454 dcmd->cmd = MFI_CMD_DCMD;
6455 dcmd->cmd_status = 0x0;
6456 dcmd->sge_count = 1;
6457 dcmd->flags = MFI_FRAME_DIR_READ;
6458 dcmd->timeout = 0;
6459 dcmd->pad_0 = 0;
6460 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
6461 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
6463 megasas_set_dma_settings(instance, dcmd, el_info_h,
6464 sizeof(struct megasas_evt_log_info));
6466 ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
6467 if (ret != DCMD_SUCCESS) {
6468 dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6469 __func__, __LINE__);
6470 goto dcmd_failed;
6474 * Copy the data back into callers buffer
6476 eli->newest_seq_num = el_info->newest_seq_num;
6477 eli->oldest_seq_num = el_info->oldest_seq_num;
6478 eli->clear_seq_num = el_info->clear_seq_num;
6479 eli->shutdown_seq_num = el_info->shutdown_seq_num;
6480 eli->boot_seq_num = el_info->boot_seq_num;
6482 dcmd_failed:
6483 dma_free_coherent(&instance->pdev->dev,
6484 sizeof(struct megasas_evt_log_info),
6485 el_info, el_info_h);
6487 megasas_return_cmd(instance, cmd);
6489 return ret;
6493 * megasas_register_aen - Registers for asynchronous event notification
6494 * @instance: Adapter soft state
6495 * @seq_num: The starting sequence number
6496 * @class_locale: Class of the event
6498 * This function subscribes for AEN for events beyond the @seq_num. It requests
6499 * to be notified if and only if the event is of type @class_locale
6501 static int
6502 megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
6503 u32 class_locale_word)
6505 int ret_val;
6506 struct megasas_cmd *cmd;
6507 struct megasas_dcmd_frame *dcmd;
6508 union megasas_evt_class_locale curr_aen;
6509 union megasas_evt_class_locale prev_aen;
6512 * If there an AEN pending already (aen_cmd), check if the
6513 * class_locale of that pending AEN is inclusive of the new
6514 * AEN request we currently have. If it is, then we don't have
6515 * to do anything. In other words, whichever events the current
6516 * AEN request is subscribing to, have already been subscribed
6517 * to.
6519 * If the old_cmd is _not_ inclusive, then we have to abort
6520 * that command, form a class_locale that is superset of both
6521 * old and current and re-issue to the FW
6524 curr_aen.word = class_locale_word;
6526 if (instance->aen_cmd) {
6528 prev_aen.word =
6529 le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
6531 if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
6532 (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
6533 dev_info(&instance->pdev->dev,
6534 "%s %d out of range class %d send by application\n",
6535 __func__, __LINE__, curr_aen.members.class);
6536 return 0;
6540 * A class whose enum value is smaller is inclusive of all
6541 * higher values. If a PROGRESS (= -1) was previously
6542 * registered, then a new registration requests for higher
6543 * classes need not be sent to FW. They are automatically
6544 * included.
6546 * Locale numbers don't have such hierarchy. They are bitmap
6547 * values
6549 if ((prev_aen.members.class <= curr_aen.members.class) &&
6550 !((prev_aen.members.locale & curr_aen.members.locale) ^
6551 curr_aen.members.locale)) {
6553 * Previously issued event registration includes
6554 * current request. Nothing to do.
6556 return 0;
6557 } else {
6558 curr_aen.members.locale |= prev_aen.members.locale;
6560 if (prev_aen.members.class < curr_aen.members.class)
6561 curr_aen.members.class = prev_aen.members.class;
6563 instance->aen_cmd->abort_aen = 1;
6564 ret_val = megasas_issue_blocked_abort_cmd(instance,
6565 instance->
6566 aen_cmd, 30);
6568 if (ret_val) {
6569 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
6570 "previous AEN command\n");
6571 return ret_val;
6576 cmd = megasas_get_cmd(instance);
6578 if (!cmd)
6579 return -ENOMEM;
6581 dcmd = &cmd->frame->dcmd;
6583 memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
6586 * Prepare DCMD for aen registration
6588 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6590 dcmd->cmd = MFI_CMD_DCMD;
6591 dcmd->cmd_status = 0x0;
6592 dcmd->sge_count = 1;
6593 dcmd->flags = MFI_FRAME_DIR_READ;
6594 dcmd->timeout = 0;
6595 dcmd->pad_0 = 0;
6596 dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
6597 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
6598 dcmd->mbox.w[0] = cpu_to_le32(seq_num);
6599 instance->last_seq_num = seq_num;
6600 dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
6602 megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
6603 sizeof(struct megasas_evt_detail));
6605 if (instance->aen_cmd != NULL) {
6606 megasas_return_cmd(instance, cmd);
6607 return 0;
6611 * Store reference to the cmd used to register for AEN. When an
6612 * application wants us to register for AEN, we have to abort this
6613 * cmd and re-register with a new EVENT LOCALE supplied by that app
6615 instance->aen_cmd = cmd;
6618 * Issue the aen registration frame
6620 instance->instancet->issue_dcmd(instance, cmd);
6622 return 0;
6625 /* megasas_get_target_prop - Send DCMD with below details to firmware.
6627 * This DCMD will fetch few properties of LD/system PD defined
6628 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
6630 * DCMD send by drivers whenever new target is added to the OS.
6632 * dcmd.opcode - MR_DCMD_DEV_GET_TARGET_PROP
6633 * dcmd.mbox.b[0] - DCMD is to be fired for LD or system PD.
6634 * 0 = system PD, 1 = LD.
6635 * dcmd.mbox.s[1] - TargetID for LD/system PD.
6636 * dcmd.sge IN - Pointer to return MR_TARGET_DEV_PROPERTIES.
6638 * @instance: Adapter soft state
6639 * @sdev: OS provided scsi device
6641 * Returns 0 on success non-zero on failure.
6644 megasas_get_target_prop(struct megasas_instance *instance,
6645 struct scsi_device *sdev)
6647 int ret;
6648 struct megasas_cmd *cmd;
6649 struct megasas_dcmd_frame *dcmd;
6650 u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
6651 sdev->id;
6653 cmd = megasas_get_cmd(instance);
6655 if (!cmd) {
6656 dev_err(&instance->pdev->dev,
6657 "Failed to get cmd %s\n", __func__);
6658 return -ENOMEM;
6661 dcmd = &cmd->frame->dcmd;
6663 memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
6664 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6665 dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
6667 dcmd->mbox.s[1] = cpu_to_le16(targetId);
6668 dcmd->cmd = MFI_CMD_DCMD;
6669 dcmd->cmd_status = 0xFF;
6670 dcmd->sge_count = 1;
6671 dcmd->flags = MFI_FRAME_DIR_READ;
6672 dcmd->timeout = 0;
6673 dcmd->pad_0 = 0;
6674 dcmd->data_xfer_len =
6675 cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
6676 dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
6678 megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
6679 sizeof(struct MR_TARGET_PROPERTIES));
6681 if ((instance->adapter_type != MFI_SERIES) &&
6682 !instance->mask_interrupts)
6683 ret = megasas_issue_blocked_cmd(instance,
6684 cmd, MFI_IO_TIMEOUT_SECS);
6685 else
6686 ret = megasas_issue_polled(instance, cmd);
6688 switch (ret) {
6689 case DCMD_TIMEOUT:
6690 switch (dcmd_timeout_ocr_possible(instance)) {
6691 case INITIATE_OCR:
6692 cmd->flags |= DRV_DCMD_SKIP_REFIRE;
6693 mutex_unlock(&instance->reset_mutex);
6694 megasas_reset_fusion(instance->host,
6695 MFI_IO_TIMEOUT_OCR);
6696 mutex_lock(&instance->reset_mutex);
6697 break;
6698 case KILL_ADAPTER:
6699 megaraid_sas_kill_hba(instance);
6700 break;
6701 case IGNORE_TIMEOUT:
6702 dev_info(&instance->pdev->dev,
6703 "Ignore DCMD timeout: %s %d\n",
6704 __func__, __LINE__);
6705 break;
6707 break;
6709 default:
6710 megasas_return_cmd(instance, cmd);
6712 if (ret != DCMD_SUCCESS)
6713 dev_err(&instance->pdev->dev,
6714 "return from %s %d return value %d\n",
6715 __func__, __LINE__, ret);
6717 return ret;
6721 * megasas_start_aen - Subscribes to AEN during driver load time
6722 * @instance: Adapter soft state
6724 static int megasas_start_aen(struct megasas_instance *instance)
6726 struct megasas_evt_log_info eli;
6727 union megasas_evt_class_locale class_locale;
6730 * Get the latest sequence number from FW
6732 memset(&eli, 0, sizeof(eli));
6734 if (megasas_get_seq_num(instance, &eli))
6735 return -1;
6738 * Register AEN with FW for latest sequence number plus 1
6740 class_locale.members.reserved = 0;
6741 class_locale.members.locale = MR_EVT_LOCALE_ALL;
6742 class_locale.members.class = MR_EVT_CLASS_DEBUG;
6744 return megasas_register_aen(instance,
6745 le32_to_cpu(eli.newest_seq_num) + 1,
6746 class_locale.word);
6750 * megasas_io_attach - Attaches this driver to SCSI mid-layer
6751 * @instance: Adapter soft state
6753 static int megasas_io_attach(struct megasas_instance *instance)
6755 struct Scsi_Host *host = instance->host;
6758 * Export parameters required by SCSI mid-layer
6760 host->unique_id = instance->unique_id;
6761 host->can_queue = instance->max_scsi_cmds;
6762 host->this_id = instance->init_id;
6763 host->sg_tablesize = instance->max_num_sge;
6765 if (instance->fw_support_ieee)
6766 instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
6769 * Check if the module parameter value for max_sectors can be used
6771 if (max_sectors && max_sectors < instance->max_sectors_per_req)
6772 instance->max_sectors_per_req = max_sectors;
6773 else {
6774 if (max_sectors) {
6775 if (((instance->pdev->device ==
6776 PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
6777 (instance->pdev->device ==
6778 PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
6779 (max_sectors <= MEGASAS_MAX_SECTORS)) {
6780 instance->max_sectors_per_req = max_sectors;
6781 } else {
6782 dev_info(&instance->pdev->dev, "max_sectors should be > 0"
6783 "and <= %d (or < 1MB for GEN2 controller)\n",
6784 instance->max_sectors_per_req);
6789 host->max_sectors = instance->max_sectors_per_req;
6790 host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
6791 host->max_channel = MEGASAS_MAX_CHANNELS - 1;
6792 host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
6793 host->max_lun = MEGASAS_MAX_LUN;
6794 host->max_cmd_len = 16;
6797 * Notify the mid-layer about the new controller
6799 if (scsi_add_host(host, &instance->pdev->dev)) {
6800 dev_err(&instance->pdev->dev,
6801 "Failed to add host from %s %d\n",
6802 __func__, __LINE__);
6803 return -ENODEV;
6806 return 0;
6810 * megasas_set_dma_mask - Set DMA mask for supported controllers
6812 * @instance: Adapter soft state
6813 * Description:
6815 * For Ventura, driver/FW will operate in 63bit DMA addresses.
6817 * For invader-
6818 * By default, driver/FW will operate in 32bit DMA addresses
6819 * for consistent DMA mapping but if 32 bit consistent
6820 * DMA mask fails, driver will try with 63 bit consistent
6821 * mask provided FW is true 63bit DMA capable
6823 * For older controllers(Thunderbolt and MFI based adapters)-
6824 * driver/FW will operate in 32 bit consistent DMA addresses.
6826 static int
6827 megasas_set_dma_mask(struct megasas_instance *instance)
6829 u64 consistent_mask;
6830 struct pci_dev *pdev;
6831 u32 scratch_pad_1;
6833 pdev = instance->pdev;
6834 consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
6835 DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
6837 if (IS_DMA64) {
6838 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
6839 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6840 goto fail_set_dma_mask;
6842 if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
6843 (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
6844 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
6846 * If 32 bit DMA mask fails, then try for 64 bit mask
6847 * for FW capable of handling 64 bit DMA.
6849 scratch_pad_1 = megasas_readl
6850 (instance, &instance->reg_set->outbound_scratch_pad_1);
6852 if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
6853 goto fail_set_dma_mask;
6854 else if (dma_set_mask_and_coherent(&pdev->dev,
6855 DMA_BIT_MASK(63)))
6856 goto fail_set_dma_mask;
6858 } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6859 goto fail_set_dma_mask;
6861 if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
6862 instance->consistent_mask_64bit = false;
6863 else
6864 instance->consistent_mask_64bit = true;
6866 dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6867 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
6868 (instance->consistent_mask_64bit ? "63" : "32"));
6870 return 0;
6872 fail_set_dma_mask:
6873 dev_err(&pdev->dev, "Failed to set DMA mask\n");
6874 return -1;
6879 * megasas_set_adapter_type - Set adapter type.
6880 * Supported controllers can be divided in
6881 * different categories-
6882 * enum MR_ADAPTER_TYPE {
6883 * MFI_SERIES = 1,
6884 * THUNDERBOLT_SERIES = 2,
6885 * INVADER_SERIES = 3,
6886 * VENTURA_SERIES = 4,
6887 * AERO_SERIES = 5,
6888 * };
6889 * @instance: Adapter soft state
6890 * return: void
6892 static inline void megasas_set_adapter_type(struct megasas_instance *instance)
6894 if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
6895 (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
6896 instance->adapter_type = MFI_SERIES;
6897 } else {
6898 switch (instance->pdev->device) {
6899 case PCI_DEVICE_ID_LSI_AERO_10E1:
6900 case PCI_DEVICE_ID_LSI_AERO_10E2:
6901 case PCI_DEVICE_ID_LSI_AERO_10E5:
6902 case PCI_DEVICE_ID_LSI_AERO_10E6:
6903 instance->adapter_type = AERO_SERIES;
6904 break;
6905 case PCI_DEVICE_ID_LSI_VENTURA:
6906 case PCI_DEVICE_ID_LSI_CRUSADER:
6907 case PCI_DEVICE_ID_LSI_HARPOON:
6908 case PCI_DEVICE_ID_LSI_TOMCAT:
6909 case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
6910 case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
6911 instance->adapter_type = VENTURA_SERIES;
6912 break;
6913 case PCI_DEVICE_ID_LSI_FUSION:
6914 case PCI_DEVICE_ID_LSI_PLASMA:
6915 instance->adapter_type = THUNDERBOLT_SERIES;
6916 break;
6917 case PCI_DEVICE_ID_LSI_INVADER:
6918 case PCI_DEVICE_ID_LSI_INTRUDER:
6919 case PCI_DEVICE_ID_LSI_INTRUDER_24:
6920 case PCI_DEVICE_ID_LSI_CUTLASS_52:
6921 case PCI_DEVICE_ID_LSI_CUTLASS_53:
6922 case PCI_DEVICE_ID_LSI_FURY:
6923 instance->adapter_type = INVADER_SERIES;
6924 break;
6925 default: /* For all other supported controllers */
6926 instance->adapter_type = MFI_SERIES;
6927 break;
6932 static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
6934 instance->producer = dma_alloc_coherent(&instance->pdev->dev,
6935 sizeof(u32), &instance->producer_h, GFP_KERNEL);
6936 instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
6937 sizeof(u32), &instance->consumer_h, GFP_KERNEL);
6939 if (!instance->producer || !instance->consumer) {
6940 dev_err(&instance->pdev->dev,
6941 "Failed to allocate memory for producer, consumer\n");
6942 return -1;
6945 *instance->producer = 0;
6946 *instance->consumer = 0;
6947 return 0;
6951 * megasas_alloc_ctrl_mem - Allocate per controller memory for core data
6952 * structures which are not common across MFI
6953 * adapters and fusion adapters.
6954 * For MFI based adapters, allocate producer and
6955 * consumer buffers. For fusion adapters, allocate
6956 * memory for fusion context.
6957 * @instance: Adapter soft state
6958 * return: 0 for SUCCESS
6960 static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
6962 instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int),
6963 GFP_KERNEL);
6964 if (!instance->reply_map)
6965 return -ENOMEM;
6967 switch (instance->adapter_type) {
6968 case MFI_SERIES:
6969 if (megasas_alloc_mfi_ctrl_mem(instance))
6970 goto fail;
6971 break;
6972 case AERO_SERIES:
6973 case VENTURA_SERIES:
6974 case THUNDERBOLT_SERIES:
6975 case INVADER_SERIES:
6976 if (megasas_alloc_fusion_context(instance))
6977 goto fail;
6978 break;
6981 return 0;
6982 fail:
6983 kfree(instance->reply_map);
6984 instance->reply_map = NULL;
6985 return -ENOMEM;
6989 * megasas_free_ctrl_mem - Free fusion context for fusion adapters and
6990 * producer, consumer buffers for MFI adapters
6992 * @instance - Adapter soft instance
6995 static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
6997 kfree(instance->reply_map);
6998 if (instance->adapter_type == MFI_SERIES) {
6999 if (instance->producer)
7000 dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7001 instance->producer,
7002 instance->producer_h);
7003 if (instance->consumer)
7004 dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7005 instance->consumer,
7006 instance->consumer_h);
7007 } else {
7008 megasas_free_fusion_context(instance);
7013 * megasas_alloc_ctrl_dma_buffers - Allocate consistent DMA buffers during
7014 * driver load time
7016 * @instance- Adapter soft instance
7017 * @return- O for SUCCESS
7019 static inline
7020 int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
7022 struct pci_dev *pdev = instance->pdev;
7023 struct fusion_context *fusion = instance->ctrl_context;
7025 instance->evt_detail = dma_alloc_coherent(&pdev->dev,
7026 sizeof(struct megasas_evt_detail),
7027 &instance->evt_detail_h, GFP_KERNEL);
7029 if (!instance->evt_detail) {
7030 dev_err(&instance->pdev->dev,
7031 "Failed to allocate event detail buffer\n");
7032 return -ENOMEM;
7035 if (fusion) {
7036 fusion->ioc_init_request =
7037 dma_alloc_coherent(&pdev->dev,
7038 sizeof(struct MPI2_IOC_INIT_REQUEST),
7039 &fusion->ioc_init_request_phys,
7040 GFP_KERNEL);
7042 if (!fusion->ioc_init_request) {
7043 dev_err(&pdev->dev,
7044 "Failed to allocate PD list buffer\n");
7045 return -ENOMEM;
7048 instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
7049 sizeof(struct MR_SNAPDUMP_PROPERTIES),
7050 &instance->snapdump_prop_h, GFP_KERNEL);
7052 if (!instance->snapdump_prop)
7053 dev_err(&pdev->dev,
7054 "Failed to allocate snapdump properties buffer\n");
7056 instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev,
7057 HOST_DEVICE_LIST_SZ,
7058 &instance->host_device_list_buf_h,
7059 GFP_KERNEL);
7061 if (!instance->host_device_list_buf) {
7062 dev_err(&pdev->dev,
7063 "Failed to allocate targetid list buffer\n");
7064 return -ENOMEM;
7069 instance->pd_list_buf =
7070 dma_alloc_coherent(&pdev->dev,
7071 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7072 &instance->pd_list_buf_h, GFP_KERNEL);
7074 if (!instance->pd_list_buf) {
7075 dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
7076 return -ENOMEM;
7079 instance->ctrl_info_buf =
7080 dma_alloc_coherent(&pdev->dev,
7081 sizeof(struct megasas_ctrl_info),
7082 &instance->ctrl_info_buf_h, GFP_KERNEL);
7084 if (!instance->ctrl_info_buf) {
7085 dev_err(&pdev->dev,
7086 "Failed to allocate controller info buffer\n");
7087 return -ENOMEM;
7090 instance->ld_list_buf =
7091 dma_alloc_coherent(&pdev->dev,
7092 sizeof(struct MR_LD_LIST),
7093 &instance->ld_list_buf_h, GFP_KERNEL);
7095 if (!instance->ld_list_buf) {
7096 dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
7097 return -ENOMEM;
7100 instance->ld_targetid_list_buf =
7101 dma_alloc_coherent(&pdev->dev,
7102 sizeof(struct MR_LD_TARGETID_LIST),
7103 &instance->ld_targetid_list_buf_h, GFP_KERNEL);
7105 if (!instance->ld_targetid_list_buf) {
7106 dev_err(&pdev->dev,
7107 "Failed to allocate LD targetid list buffer\n");
7108 return -ENOMEM;
7111 if (!reset_devices) {
7112 instance->system_info_buf =
7113 dma_alloc_coherent(&pdev->dev,
7114 sizeof(struct MR_DRV_SYSTEM_INFO),
7115 &instance->system_info_h, GFP_KERNEL);
7116 instance->pd_info =
7117 dma_alloc_coherent(&pdev->dev,
7118 sizeof(struct MR_PD_INFO),
7119 &instance->pd_info_h, GFP_KERNEL);
7120 instance->tgt_prop =
7121 dma_alloc_coherent(&pdev->dev,
7122 sizeof(struct MR_TARGET_PROPERTIES),
7123 &instance->tgt_prop_h, GFP_KERNEL);
7124 instance->crash_dump_buf =
7125 dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7126 &instance->crash_dump_h, GFP_KERNEL);
7128 if (!instance->system_info_buf)
7129 dev_err(&instance->pdev->dev,
7130 "Failed to allocate system info buffer\n");
7132 if (!instance->pd_info)
7133 dev_err(&instance->pdev->dev,
7134 "Failed to allocate pd_info buffer\n");
7136 if (!instance->tgt_prop)
7137 dev_err(&instance->pdev->dev,
7138 "Failed to allocate tgt_prop buffer\n");
7140 if (!instance->crash_dump_buf)
7141 dev_err(&instance->pdev->dev,
7142 "Failed to allocate crash dump buffer\n");
7145 return 0;
7149 * megasas_free_ctrl_dma_buffers - Free consistent DMA buffers allocated
7150 * during driver load time
7152 * @instance- Adapter soft instance
7155 static inline
7156 void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
7158 struct pci_dev *pdev = instance->pdev;
7159 struct fusion_context *fusion = instance->ctrl_context;
7161 if (instance->evt_detail)
7162 dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
7163 instance->evt_detail,
7164 instance->evt_detail_h);
7166 if (fusion && fusion->ioc_init_request)
7167 dma_free_coherent(&pdev->dev,
7168 sizeof(struct MPI2_IOC_INIT_REQUEST),
7169 fusion->ioc_init_request,
7170 fusion->ioc_init_request_phys);
7172 if (instance->pd_list_buf)
7173 dma_free_coherent(&pdev->dev,
7174 MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7175 instance->pd_list_buf,
7176 instance->pd_list_buf_h);
7178 if (instance->ld_list_buf)
7179 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
7180 instance->ld_list_buf,
7181 instance->ld_list_buf_h);
7183 if (instance->ld_targetid_list_buf)
7184 dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
7185 instance->ld_targetid_list_buf,
7186 instance->ld_targetid_list_buf_h);
7188 if (instance->ctrl_info_buf)
7189 dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
7190 instance->ctrl_info_buf,
7191 instance->ctrl_info_buf_h);
7193 if (instance->system_info_buf)
7194 dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
7195 instance->system_info_buf,
7196 instance->system_info_h);
7198 if (instance->pd_info)
7199 dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
7200 instance->pd_info, instance->pd_info_h);
7202 if (instance->tgt_prop)
7203 dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
7204 instance->tgt_prop, instance->tgt_prop_h);
7206 if (instance->crash_dump_buf)
7207 dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7208 instance->crash_dump_buf,
7209 instance->crash_dump_h);
7211 if (instance->snapdump_prop)
7212 dma_free_coherent(&pdev->dev,
7213 sizeof(struct MR_SNAPDUMP_PROPERTIES),
7214 instance->snapdump_prop,
7215 instance->snapdump_prop_h);
7217 if (instance->host_device_list_buf)
7218 dma_free_coherent(&pdev->dev,
7219 HOST_DEVICE_LIST_SZ,
7220 instance->host_device_list_buf,
7221 instance->host_device_list_buf_h);
7226 * megasas_init_ctrl_params - Initialize controller's instance
7227 * parameters before FW init
7228 * @instance - Adapter soft instance
7229 * @return - void
7231 static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
7233 instance->fw_crash_state = UNAVAILABLE;
7235 megasas_poll_wait_aen = 0;
7236 instance->issuepend_done = 1;
7237 atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
7240 * Initialize locks and queues
7242 INIT_LIST_HEAD(&instance->cmd_pool);
7243 INIT_LIST_HEAD(&instance->internal_reset_pending_q);
7245 atomic_set(&instance->fw_outstanding, 0);
7246 atomic64_set(&instance->total_io_count, 0);
7248 init_waitqueue_head(&instance->int_cmd_wait_q);
7249 init_waitqueue_head(&instance->abort_cmd_wait_q);
7251 spin_lock_init(&instance->crashdump_lock);
7252 spin_lock_init(&instance->mfi_pool_lock);
7253 spin_lock_init(&instance->hba_lock);
7254 spin_lock_init(&instance->stream_lock);
7255 spin_lock_init(&instance->completion_lock);
7257 mutex_init(&instance->reset_mutex);
7259 if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
7260 (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
7261 instance->flag_ieee = 1;
7263 megasas_dbg_lvl = 0;
7264 instance->flag = 0;
7265 instance->unload = 1;
7266 instance->last_time = 0;
7267 instance->disableOnlineCtrlReset = 1;
7268 instance->UnevenSpanSupport = 0;
7269 instance->smp_affinity_enable = smp_affinity_enable ? true : false;
7270 instance->msix_load_balance = false;
7272 if (instance->adapter_type != MFI_SERIES)
7273 INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
7274 else
7275 INIT_WORK(&instance->work_init, process_fw_state_change_wq);
7279 * megasas_probe_one - PCI hotplug entry point
7280 * @pdev: PCI device structure
7281 * @id: PCI ids of supported hotplugged adapter
7283 static int megasas_probe_one(struct pci_dev *pdev,
7284 const struct pci_device_id *id)
7286 int rval, pos;
7287 struct Scsi_Host *host;
7288 struct megasas_instance *instance;
7289 u16 control = 0;
7291 switch (pdev->device) {
7292 case PCI_DEVICE_ID_LSI_AERO_10E0:
7293 case PCI_DEVICE_ID_LSI_AERO_10E3:
7294 case PCI_DEVICE_ID_LSI_AERO_10E4:
7295 case PCI_DEVICE_ID_LSI_AERO_10E7:
7296 dev_err(&pdev->dev, "Adapter is in non secure mode\n");
7297 return 1;
7298 case PCI_DEVICE_ID_LSI_AERO_10E1:
7299 case PCI_DEVICE_ID_LSI_AERO_10E5:
7300 dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
7301 break;
7304 /* Reset MSI-X in the kdump kernel */
7305 if (reset_devices) {
7306 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
7307 if (pos) {
7308 pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
7309 &control);
7310 if (control & PCI_MSIX_FLAGS_ENABLE) {
7311 dev_info(&pdev->dev, "resetting MSI-X\n");
7312 pci_write_config_word(pdev,
7313 pos + PCI_MSIX_FLAGS,
7314 control &
7315 ~PCI_MSIX_FLAGS_ENABLE);
7321 * PCI prepping: enable device set bus mastering and dma mask
7323 rval = pci_enable_device_mem(pdev);
7325 if (rval) {
7326 return rval;
7329 pci_set_master(pdev);
7331 host = scsi_host_alloc(&megasas_template,
7332 sizeof(struct megasas_instance));
7334 if (!host) {
7335 dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
7336 goto fail_alloc_instance;
7339 instance = (struct megasas_instance *)host->hostdata;
7340 memset(instance, 0, sizeof(*instance));
7341 atomic_set(&instance->fw_reset_no_pci_access, 0);
7344 * Initialize PCI related and misc parameters
7346 instance->pdev = pdev;
7347 instance->host = host;
7348 instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
7349 instance->init_id = MEGASAS_DEFAULT_INIT_ID;
7351 megasas_set_adapter_type(instance);
7354 * Initialize MFI Firmware
7356 if (megasas_init_fw(instance))
7357 goto fail_init_mfi;
7359 if (instance->requestorId) {
7360 if (instance->PlasmaFW111) {
7361 instance->vf_affiliation_111 =
7362 dma_alloc_coherent(&pdev->dev,
7363 sizeof(struct MR_LD_VF_AFFILIATION_111),
7364 &instance->vf_affiliation_111_h,
7365 GFP_KERNEL);
7366 if (!instance->vf_affiliation_111)
7367 dev_warn(&pdev->dev, "Can't allocate "
7368 "memory for VF affiliation buffer\n");
7369 } else {
7370 instance->vf_affiliation =
7371 dma_alloc_coherent(&pdev->dev,
7372 (MAX_LOGICAL_DRIVES + 1) *
7373 sizeof(struct MR_LD_VF_AFFILIATION),
7374 &instance->vf_affiliation_h,
7375 GFP_KERNEL);
7376 if (!instance->vf_affiliation)
7377 dev_warn(&pdev->dev, "Can't allocate "
7378 "memory for VF affiliation buffer\n");
7383 * Store instance in PCI softstate
7385 pci_set_drvdata(pdev, instance);
7388 * Add this controller to megasas_mgmt_info structure so that it
7389 * can be exported to management applications
7391 megasas_mgmt_info.count++;
7392 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
7393 megasas_mgmt_info.max_index++;
7396 * Register with SCSI mid-layer
7398 if (megasas_io_attach(instance))
7399 goto fail_io_attach;
7401 instance->unload = 0;
7403 * Trigger SCSI to scan our drives
7405 if (!instance->enable_fw_dev_list ||
7406 (instance->host_device_list_buf->count > 0))
7407 scsi_scan_host(host);
7410 * Initiate AEN (Asynchronous Event Notification)
7412 if (megasas_start_aen(instance)) {
7413 dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
7414 goto fail_start_aen;
7417 megasas_setup_debugfs(instance);
7419 /* Get current SR-IOV LD/VF affiliation */
7420 if (instance->requestorId)
7421 megasas_get_ld_vf_affiliation(instance, 1);
7423 return 0;
7425 fail_start_aen:
7426 fail_io_attach:
7427 megasas_mgmt_info.count--;
7428 megasas_mgmt_info.max_index--;
7429 megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
7431 instance->instancet->disable_intr(instance);
7432 megasas_destroy_irqs(instance);
7434 if (instance->adapter_type != MFI_SERIES)
7435 megasas_release_fusion(instance);
7436 else
7437 megasas_release_mfi(instance);
7438 if (instance->msix_vectors)
7439 pci_free_irq_vectors(instance->pdev);
7440 fail_init_mfi:
7441 scsi_host_put(host);
7442 fail_alloc_instance:
7443 pci_disable_device(pdev);
7445 return -ENODEV;
7449 * megasas_flush_cache - Requests FW to flush all its caches
7450 * @instance: Adapter soft state
7452 static void megasas_flush_cache(struct megasas_instance *instance)
7454 struct megasas_cmd *cmd;
7455 struct megasas_dcmd_frame *dcmd;
7457 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7458 return;
7460 cmd = megasas_get_cmd(instance);
7462 if (!cmd)
7463 return;
7465 dcmd = &cmd->frame->dcmd;
7467 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7469 dcmd->cmd = MFI_CMD_DCMD;
7470 dcmd->cmd_status = 0x0;
7471 dcmd->sge_count = 0;
7472 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7473 dcmd->timeout = 0;
7474 dcmd->pad_0 = 0;
7475 dcmd->data_xfer_len = 0;
7476 dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
7477 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
7479 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7480 != DCMD_SUCCESS) {
7481 dev_err(&instance->pdev->dev,
7482 "return from %s %d\n", __func__, __LINE__);
7483 return;
7486 megasas_return_cmd(instance, cmd);
7490 * megasas_shutdown_controller - Instructs FW to shutdown the controller
7491 * @instance: Adapter soft state
7492 * @opcode: Shutdown/Hibernate
7494 static void megasas_shutdown_controller(struct megasas_instance *instance,
7495 u32 opcode)
7497 struct megasas_cmd *cmd;
7498 struct megasas_dcmd_frame *dcmd;
7500 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7501 return;
7503 cmd = megasas_get_cmd(instance);
7505 if (!cmd)
7506 return;
7508 if (instance->aen_cmd)
7509 megasas_issue_blocked_abort_cmd(instance,
7510 instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
7511 if (instance->map_update_cmd)
7512 megasas_issue_blocked_abort_cmd(instance,
7513 instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
7514 if (instance->jbod_seq_cmd)
7515 megasas_issue_blocked_abort_cmd(instance,
7516 instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
7518 dcmd = &cmd->frame->dcmd;
7520 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7522 dcmd->cmd = MFI_CMD_DCMD;
7523 dcmd->cmd_status = 0x0;
7524 dcmd->sge_count = 0;
7525 dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7526 dcmd->timeout = 0;
7527 dcmd->pad_0 = 0;
7528 dcmd->data_xfer_len = 0;
7529 dcmd->opcode = cpu_to_le32(opcode);
7531 if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7532 != DCMD_SUCCESS) {
7533 dev_err(&instance->pdev->dev,
7534 "return from %s %d\n", __func__, __LINE__);
7535 return;
7538 megasas_return_cmd(instance, cmd);
7541 #ifdef CONFIG_PM
7543 * megasas_suspend - driver suspend entry point
7544 * @pdev: PCI device structure
7545 * @state: PCI power state to suspend routine
7547 static int
7548 megasas_suspend(struct pci_dev *pdev, pm_message_t state)
7550 struct megasas_instance *instance;
7552 instance = pci_get_drvdata(pdev);
7554 if (!instance)
7555 return 0;
7557 instance->unload = 1;
7559 dev_info(&pdev->dev, "%s is called\n", __func__);
7561 /* Shutdown SR-IOV heartbeat timer */
7562 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7563 del_timer_sync(&instance->sriov_heartbeat_timer);
7565 /* Stop the FW fault detection watchdog */
7566 if (instance->adapter_type != MFI_SERIES)
7567 megasas_fusion_stop_watchdog(instance);
7569 megasas_flush_cache(instance);
7570 megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
7572 /* cancel the delayed work if this work still in queue */
7573 if (instance->ev != NULL) {
7574 struct megasas_aen_event *ev = instance->ev;
7575 cancel_delayed_work_sync(&ev->hotplug_work);
7576 instance->ev = NULL;
7579 tasklet_kill(&instance->isr_tasklet);
7581 pci_set_drvdata(instance->pdev, instance);
7582 instance->instancet->disable_intr(instance);
7584 megasas_destroy_irqs(instance);
7586 if (instance->msix_vectors)
7587 pci_free_irq_vectors(instance->pdev);
7589 pci_save_state(pdev);
7590 pci_disable_device(pdev);
7592 pci_set_power_state(pdev, pci_choose_state(pdev, state));
7594 return 0;
7598 * megasas_resume- driver resume entry point
7599 * @pdev: PCI device structure
7601 static int
7602 megasas_resume(struct pci_dev *pdev)
7604 int rval;
7605 struct Scsi_Host *host;
7606 struct megasas_instance *instance;
7607 int irq_flags = PCI_IRQ_LEGACY;
7608 u32 status_reg;
7610 instance = pci_get_drvdata(pdev);
7612 if (!instance)
7613 return 0;
7615 host = instance->host;
7616 pci_set_power_state(pdev, PCI_D0);
7617 pci_enable_wake(pdev, PCI_D0, 0);
7618 pci_restore_state(pdev);
7620 dev_info(&pdev->dev, "%s is called\n", __func__);
7622 * PCI prepping: enable device set bus mastering and dma mask
7624 rval = pci_enable_device_mem(pdev);
7626 if (rval) {
7627 dev_err(&pdev->dev, "Enable device failed\n");
7628 return rval;
7631 pci_set_master(pdev);
7634 * We expect the FW state to be READY
7637 if (megasas_transition_to_ready(instance, 0)) {
7638 dev_info(&instance->pdev->dev,
7639 "Failed to transition controller to ready from %s!\n",
7640 __func__);
7641 if (instance->adapter_type != MFI_SERIES) {
7642 status_reg =
7643 instance->instancet->read_fw_status_reg(instance);
7644 if (!(status_reg & MFI_RESET_ADAPTER) ||
7645 ((megasas_adp_reset_wait_for_ready
7646 (instance, true, 0)) == FAILED))
7647 goto fail_ready_state;
7648 } else {
7649 atomic_set(&instance->fw_reset_no_pci_access, 1);
7650 instance->instancet->adp_reset
7651 (instance, instance->reg_set);
7652 atomic_set(&instance->fw_reset_no_pci_access, 0);
7654 /* waiting for about 30 seconds before retry */
7655 ssleep(30);
7657 if (megasas_transition_to_ready(instance, 0))
7658 goto fail_ready_state;
7661 dev_info(&instance->pdev->dev,
7662 "FW restarted successfully from %s!\n",
7663 __func__);
7665 if (megasas_set_dma_mask(instance))
7666 goto fail_set_dma_mask;
7669 * Initialize MFI Firmware
7672 atomic_set(&instance->fw_outstanding, 0);
7673 atomic_set(&instance->ldio_outstanding, 0);
7675 /* Now re-enable MSI-X */
7676 if (instance->msix_vectors) {
7677 irq_flags = PCI_IRQ_MSIX;
7678 if (instance->smp_affinity_enable)
7679 irq_flags |= PCI_IRQ_AFFINITY;
7681 rval = pci_alloc_irq_vectors(instance->pdev, 1,
7682 instance->msix_vectors ?
7683 instance->msix_vectors : 1, irq_flags);
7684 if (rval < 0)
7685 goto fail_reenable_msix;
7687 megasas_setup_reply_map(instance);
7689 if (instance->adapter_type != MFI_SERIES) {
7690 megasas_reset_reply_desc(instance);
7691 if (megasas_ioc_init_fusion(instance)) {
7692 megasas_free_cmds(instance);
7693 megasas_free_cmds_fusion(instance);
7694 goto fail_init_mfi;
7696 if (!megasas_get_map_info(instance))
7697 megasas_sync_map_info(instance);
7698 } else {
7699 *instance->producer = 0;
7700 *instance->consumer = 0;
7701 if (megasas_issue_init_mfi(instance))
7702 goto fail_init_mfi;
7705 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
7706 goto fail_init_mfi;
7708 tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
7709 (unsigned long)instance);
7711 if (instance->msix_vectors ?
7712 megasas_setup_irqs_msix(instance, 0) :
7713 megasas_setup_irqs_ioapic(instance))
7714 goto fail_init_mfi;
7716 if (instance->adapter_type != MFI_SERIES)
7717 megasas_setup_irq_poll(instance);
7719 /* Re-launch SR-IOV heartbeat timer */
7720 if (instance->requestorId) {
7721 if (!megasas_sriov_start_heartbeat(instance, 0))
7722 megasas_start_timer(instance);
7723 else {
7724 instance->skip_heartbeat_timer_del = 1;
7725 goto fail_init_mfi;
7729 instance->instancet->enable_intr(instance);
7730 megasas_setup_jbod_map(instance);
7731 instance->unload = 0;
7734 * Initiate AEN (Asynchronous Event Notification)
7736 if (megasas_start_aen(instance))
7737 dev_err(&instance->pdev->dev, "Start AEN failed\n");
7739 /* Re-launch FW fault watchdog */
7740 if (instance->adapter_type != MFI_SERIES)
7741 if (megasas_fusion_start_watchdog(instance) != SUCCESS)
7742 goto fail_start_watchdog;
7744 return 0;
7746 fail_start_watchdog:
7747 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7748 del_timer_sync(&instance->sriov_heartbeat_timer);
7749 fail_init_mfi:
7750 megasas_free_ctrl_dma_buffers(instance);
7751 megasas_free_ctrl_mem(instance);
7752 scsi_host_put(host);
7754 fail_reenable_msix:
7755 fail_set_dma_mask:
7756 fail_ready_state:
7758 pci_disable_device(pdev);
7760 return -ENODEV;
7762 #else
7763 #define megasas_suspend NULL
7764 #define megasas_resume NULL
7765 #endif
7767 static inline int
7768 megasas_wait_for_adapter_operational(struct megasas_instance *instance)
7770 int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
7771 int i;
7772 u8 adp_state;
7774 for (i = 0; i < wait_time; i++) {
7775 adp_state = atomic_read(&instance->adprecovery);
7776 if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
7777 (adp_state == MEGASAS_HW_CRITICAL_ERROR))
7778 break;
7780 if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
7781 dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
7783 msleep(1000);
7786 if (adp_state != MEGASAS_HBA_OPERATIONAL) {
7787 dev_info(&instance->pdev->dev,
7788 "%s HBA failed to become operational, adp_state %d\n",
7789 __func__, adp_state);
7790 return 1;
7793 return 0;
7797 * megasas_detach_one - PCI hot"un"plug entry point
7798 * @pdev: PCI device structure
7800 static void megasas_detach_one(struct pci_dev *pdev)
7802 int i;
7803 struct Scsi_Host *host;
7804 struct megasas_instance *instance;
7805 struct fusion_context *fusion;
7806 u32 pd_seq_map_sz;
7808 instance = pci_get_drvdata(pdev);
7810 if (!instance)
7811 return;
7813 host = instance->host;
7814 fusion = instance->ctrl_context;
7816 /* Shutdown SR-IOV heartbeat timer */
7817 if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7818 del_timer_sync(&instance->sriov_heartbeat_timer);
7820 /* Stop the FW fault detection watchdog */
7821 if (instance->adapter_type != MFI_SERIES)
7822 megasas_fusion_stop_watchdog(instance);
7824 if (instance->fw_crash_state != UNAVAILABLE)
7825 megasas_free_host_crash_buffer(instance);
7826 scsi_remove_host(instance->host);
7827 instance->unload = 1;
7829 if (megasas_wait_for_adapter_operational(instance))
7830 goto skip_firing_dcmds;
7832 megasas_flush_cache(instance);
7833 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7835 skip_firing_dcmds:
7836 /* cancel the delayed work if this work still in queue*/
7837 if (instance->ev != NULL) {
7838 struct megasas_aen_event *ev = instance->ev;
7839 cancel_delayed_work_sync(&ev->hotplug_work);
7840 instance->ev = NULL;
7843 /* cancel all wait events */
7844 wake_up_all(&instance->int_cmd_wait_q);
7846 tasklet_kill(&instance->isr_tasklet);
7849 * Take the instance off the instance array. Note that we will not
7850 * decrement the max_index. We let this array be sparse array
7852 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7853 if (megasas_mgmt_info.instance[i] == instance) {
7854 megasas_mgmt_info.count--;
7855 megasas_mgmt_info.instance[i] = NULL;
7857 break;
7861 instance->instancet->disable_intr(instance);
7863 megasas_destroy_irqs(instance);
7865 if (instance->msix_vectors)
7866 pci_free_irq_vectors(instance->pdev);
7868 if (instance->adapter_type >= VENTURA_SERIES) {
7869 for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
7870 kfree(fusion->stream_detect_by_ld[i]);
7871 kfree(fusion->stream_detect_by_ld);
7872 fusion->stream_detect_by_ld = NULL;
7876 if (instance->adapter_type != MFI_SERIES) {
7877 megasas_release_fusion(instance);
7878 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
7879 (sizeof(struct MR_PD_CFG_SEQ) *
7880 (MAX_PHYSICAL_DEVICES - 1));
7881 for (i = 0; i < 2 ; i++) {
7882 if (fusion->ld_map[i])
7883 dma_free_coherent(&instance->pdev->dev,
7884 fusion->max_map_sz,
7885 fusion->ld_map[i],
7886 fusion->ld_map_phys[i]);
7887 if (fusion->ld_drv_map[i]) {
7888 if (is_vmalloc_addr(fusion->ld_drv_map[i]))
7889 vfree(fusion->ld_drv_map[i]);
7890 else
7891 free_pages((ulong)fusion->ld_drv_map[i],
7892 fusion->drv_map_pages);
7895 if (fusion->pd_seq_sync[i])
7896 dma_free_coherent(&instance->pdev->dev,
7897 pd_seq_map_sz,
7898 fusion->pd_seq_sync[i],
7899 fusion->pd_seq_phys[i]);
7901 } else {
7902 megasas_release_mfi(instance);
7905 if (instance->vf_affiliation)
7906 dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
7907 sizeof(struct MR_LD_VF_AFFILIATION),
7908 instance->vf_affiliation,
7909 instance->vf_affiliation_h);
7911 if (instance->vf_affiliation_111)
7912 dma_free_coherent(&pdev->dev,
7913 sizeof(struct MR_LD_VF_AFFILIATION_111),
7914 instance->vf_affiliation_111,
7915 instance->vf_affiliation_111_h);
7917 if (instance->hb_host_mem)
7918 dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
7919 instance->hb_host_mem,
7920 instance->hb_host_mem_h);
7922 megasas_free_ctrl_dma_buffers(instance);
7924 megasas_free_ctrl_mem(instance);
7926 megasas_destroy_debugfs(instance);
7928 scsi_host_put(host);
7930 pci_disable_device(pdev);
7934 * megasas_shutdown - Shutdown entry point
7935 * @device: Generic device structure
7937 static void megasas_shutdown(struct pci_dev *pdev)
7939 struct megasas_instance *instance = pci_get_drvdata(pdev);
7941 if (!instance)
7942 return;
7944 instance->unload = 1;
7946 if (megasas_wait_for_adapter_operational(instance))
7947 goto skip_firing_dcmds;
7949 megasas_flush_cache(instance);
7950 megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7952 skip_firing_dcmds:
7953 instance->instancet->disable_intr(instance);
7954 megasas_destroy_irqs(instance);
7956 if (instance->msix_vectors)
7957 pci_free_irq_vectors(instance->pdev);
7961 * megasas_mgmt_open - char node "open" entry point
7963 static int megasas_mgmt_open(struct inode *inode, struct file *filep)
7966 * Allow only those users with admin rights
7968 if (!capable(CAP_SYS_ADMIN))
7969 return -EACCES;
7971 return 0;
7975 * megasas_mgmt_fasync - Async notifier registration from applications
7977 * This function adds the calling process to a driver global queue. When an
7978 * event occurs, SIGIO will be sent to all processes in this queue.
7980 static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
7982 int rc;
7984 mutex_lock(&megasas_async_queue_mutex);
7986 rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
7988 mutex_unlock(&megasas_async_queue_mutex);
7990 if (rc >= 0) {
7991 /* For sanity check when we get ioctl */
7992 filep->private_data = filep;
7993 return 0;
7996 printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
7998 return rc;
8002 * megasas_mgmt_poll - char node "poll" entry point
8003 * */
8004 static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
8006 __poll_t mask;
8007 unsigned long flags;
8009 poll_wait(file, &megasas_poll_wait, wait);
8010 spin_lock_irqsave(&poll_aen_lock, flags);
8011 if (megasas_poll_wait_aen)
8012 mask = (EPOLLIN | EPOLLRDNORM);
8013 else
8014 mask = 0;
8015 megasas_poll_wait_aen = 0;
8016 spin_unlock_irqrestore(&poll_aen_lock, flags);
8017 return mask;
8021 * megasas_set_crash_dump_params_ioctl:
8022 * Send CRASH_DUMP_MODE DCMD to all controllers
8023 * @cmd: MFI command frame
8026 static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
8028 struct megasas_instance *local_instance;
8029 int i, error = 0;
8030 int crash_support;
8032 crash_support = cmd->frame->dcmd.mbox.w[0];
8034 for (i = 0; i < megasas_mgmt_info.max_index; i++) {
8035 local_instance = megasas_mgmt_info.instance[i];
8036 if (local_instance && local_instance->crash_dump_drv_support) {
8037 if ((atomic_read(&local_instance->adprecovery) ==
8038 MEGASAS_HBA_OPERATIONAL) &&
8039 !megasas_set_crash_dump_params(local_instance,
8040 crash_support)) {
8041 local_instance->crash_dump_app_support =
8042 crash_support;
8043 dev_info(&local_instance->pdev->dev,
8044 "Application firmware crash "
8045 "dump mode set success\n");
8046 error = 0;
8047 } else {
8048 dev_info(&local_instance->pdev->dev,
8049 "Application firmware crash "
8050 "dump mode set failed\n");
8051 error = -1;
8055 return error;
8059 * megasas_mgmt_fw_ioctl - Issues management ioctls to FW
8060 * @instance: Adapter soft state
8061 * @argp: User's ioctl packet
8063 static int
8064 megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
8065 struct megasas_iocpacket __user * user_ioc,
8066 struct megasas_iocpacket *ioc)
8068 struct megasas_sge64 *kern_sge64 = NULL;
8069 struct megasas_sge32 *kern_sge32 = NULL;
8070 struct megasas_cmd *cmd;
8071 void *kbuff_arr[MAX_IOCTL_SGE];
8072 dma_addr_t buf_handle = 0;
8073 int error = 0, i;
8074 void *sense = NULL;
8075 dma_addr_t sense_handle;
8076 unsigned long *sense_ptr;
8077 u32 opcode = 0;
8078 int ret = DCMD_SUCCESS;
8080 memset(kbuff_arr, 0, sizeof(kbuff_arr));
8082 if (ioc->sge_count > MAX_IOCTL_SGE) {
8083 dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] > max limit [%d]\n",
8084 ioc->sge_count, MAX_IOCTL_SGE);
8085 return -EINVAL;
8088 if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
8089 ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
8090 !instance->support_nvme_passthru) ||
8091 ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) &&
8092 !instance->support_pci_lane_margining)) {
8093 dev_err(&instance->pdev->dev,
8094 "Received invalid ioctl command 0x%x\n",
8095 ioc->frame.hdr.cmd);
8096 return -ENOTSUPP;
8099 cmd = megasas_get_cmd(instance);
8100 if (!cmd) {
8101 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
8102 return -ENOMEM;
8106 * User's IOCTL packet has 2 frames (maximum). Copy those two
8107 * frames into our cmd's frames. cmd->frame's context will get
8108 * overwritten when we copy from user's frames. So set that value
8109 * alone separately
8111 memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
8112 cmd->frame->hdr.context = cpu_to_le32(cmd->index);
8113 cmd->frame->hdr.pad_0 = 0;
8115 cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
8117 if (instance->consistent_mask_64bit)
8118 cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
8119 MFI_FRAME_SENSE64));
8120 else
8121 cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
8122 MFI_FRAME_SENSE64));
8124 if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
8125 opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
8127 if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
8128 mutex_lock(&instance->reset_mutex);
8129 if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
8130 megasas_return_cmd(instance, cmd);
8131 mutex_unlock(&instance->reset_mutex);
8132 return -1;
8134 mutex_unlock(&instance->reset_mutex);
8137 if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
8138 error = megasas_set_crash_dump_params_ioctl(cmd);
8139 megasas_return_cmd(instance, cmd);
8140 return error;
8144 * The management interface between applications and the fw uses
8145 * MFI frames. E.g, RAID configuration changes, LD property changes
8146 * etc are accomplishes through different kinds of MFI frames. The
8147 * driver needs to care only about substituting user buffers with
8148 * kernel buffers in SGLs. The location of SGL is embedded in the
8149 * struct iocpacket itself.
8151 if (instance->consistent_mask_64bit)
8152 kern_sge64 = (struct megasas_sge64 *)
8153 ((unsigned long)cmd->frame + ioc->sgl_off);
8154 else
8155 kern_sge32 = (struct megasas_sge32 *)
8156 ((unsigned long)cmd->frame + ioc->sgl_off);
8159 * For each user buffer, create a mirror buffer and copy in
8161 for (i = 0; i < ioc->sge_count; i++) {
8162 if (!ioc->sgl[i].iov_len)
8163 continue;
8165 kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
8166 ioc->sgl[i].iov_len,
8167 &buf_handle, GFP_KERNEL);
8168 if (!kbuff_arr[i]) {
8169 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
8170 "kernel SGL buffer for IOCTL\n");
8171 error = -ENOMEM;
8172 goto out;
8176 * We don't change the dma_coherent_mask, so
8177 * dma_alloc_coherent only returns 32bit addresses
8179 if (instance->consistent_mask_64bit) {
8180 kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
8181 kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8182 } else {
8183 kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
8184 kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8188 * We created a kernel buffer corresponding to the
8189 * user buffer. Now copy in from the user buffer
8191 if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
8192 (u32) (ioc->sgl[i].iov_len))) {
8193 error = -EFAULT;
8194 goto out;
8198 if (ioc->sense_len) {
8199 sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
8200 &sense_handle, GFP_KERNEL);
8201 if (!sense) {
8202 error = -ENOMEM;
8203 goto out;
8206 sense_ptr =
8207 (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
8208 if (instance->consistent_mask_64bit)
8209 *sense_ptr = cpu_to_le64(sense_handle);
8210 else
8211 *sense_ptr = cpu_to_le32(sense_handle);
8215 * Set the sync_cmd flag so that the ISR knows not to complete this
8216 * cmd to the SCSI mid-layer
8218 cmd->sync_cmd = 1;
8220 ret = megasas_issue_blocked_cmd(instance, cmd, 0);
8221 switch (ret) {
8222 case DCMD_INIT:
8223 case DCMD_BUSY:
8224 cmd->sync_cmd = 0;
8225 dev_err(&instance->pdev->dev,
8226 "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
8227 __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
8228 cmd->cmd_status_drv);
8229 error = -EBUSY;
8230 goto out;
8233 cmd->sync_cmd = 0;
8235 if (instance->unload == 1) {
8236 dev_info(&instance->pdev->dev, "Driver unload is in progress "
8237 "don't submit data to application\n");
8238 goto out;
8241 * copy out the kernel buffers to user buffers
8243 for (i = 0; i < ioc->sge_count; i++) {
8244 if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
8245 ioc->sgl[i].iov_len)) {
8246 error = -EFAULT;
8247 goto out;
8252 * copy out the sense
8254 if (ioc->sense_len) {
8256 * sense_ptr points to the location that has the user
8257 * sense buffer address
8259 sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
8260 ioc->sense_off);
8262 if (copy_to_user((void __user *)((unsigned long)
8263 get_unaligned((unsigned long *)sense_ptr)),
8264 sense, ioc->sense_len)) {
8265 dev_err(&instance->pdev->dev, "Failed to copy out to user "
8266 "sense data\n");
8267 error = -EFAULT;
8268 goto out;
8273 * copy the status codes returned by the fw
8275 if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
8276 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
8277 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
8278 error = -EFAULT;
8281 out:
8282 if (sense) {
8283 dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
8284 sense, sense_handle);
8287 for (i = 0; i < ioc->sge_count; i++) {
8288 if (kbuff_arr[i]) {
8289 if (instance->consistent_mask_64bit)
8290 dma_free_coherent(&instance->pdev->dev,
8291 le32_to_cpu(kern_sge64[i].length),
8292 kbuff_arr[i],
8293 le64_to_cpu(kern_sge64[i].phys_addr));
8294 else
8295 dma_free_coherent(&instance->pdev->dev,
8296 le32_to_cpu(kern_sge32[i].length),
8297 kbuff_arr[i],
8298 le32_to_cpu(kern_sge32[i].phys_addr));
8299 kbuff_arr[i] = NULL;
8303 megasas_return_cmd(instance, cmd);
8304 return error;
8307 static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
8309 struct megasas_iocpacket __user *user_ioc =
8310 (struct megasas_iocpacket __user *)arg;
8311 struct megasas_iocpacket *ioc;
8312 struct megasas_instance *instance;
8313 int error;
8315 ioc = memdup_user(user_ioc, sizeof(*ioc));
8316 if (IS_ERR(ioc))
8317 return PTR_ERR(ioc);
8319 instance = megasas_lookup_instance(ioc->host_no);
8320 if (!instance) {
8321 error = -ENODEV;
8322 goto out_kfree_ioc;
8325 /* Block ioctls in VF mode */
8326 if (instance->requestorId && !allow_vf_ioctls) {
8327 error = -ENODEV;
8328 goto out_kfree_ioc;
8331 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8332 dev_err(&instance->pdev->dev, "Controller in crit error\n");
8333 error = -ENODEV;
8334 goto out_kfree_ioc;
8337 if (instance->unload == 1) {
8338 error = -ENODEV;
8339 goto out_kfree_ioc;
8342 if (down_interruptible(&instance->ioctl_sem)) {
8343 error = -ERESTARTSYS;
8344 goto out_kfree_ioc;
8347 if (megasas_wait_for_adapter_operational(instance)) {
8348 error = -ENODEV;
8349 goto out_up;
8352 error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
8353 out_up:
8354 up(&instance->ioctl_sem);
8356 out_kfree_ioc:
8357 kfree(ioc);
8358 return error;
8361 static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
8363 struct megasas_instance *instance;
8364 struct megasas_aen aen;
8365 int error;
8367 if (file->private_data != file) {
8368 printk(KERN_DEBUG "megasas: fasync_helper was not "
8369 "called first\n");
8370 return -EINVAL;
8373 if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
8374 return -EFAULT;
8376 instance = megasas_lookup_instance(aen.host_no);
8378 if (!instance)
8379 return -ENODEV;
8381 if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8382 return -ENODEV;
8385 if (instance->unload == 1) {
8386 return -ENODEV;
8389 if (megasas_wait_for_adapter_operational(instance))
8390 return -ENODEV;
8392 mutex_lock(&instance->reset_mutex);
8393 error = megasas_register_aen(instance, aen.seq_num,
8394 aen.class_locale_word);
8395 mutex_unlock(&instance->reset_mutex);
8396 return error;
8400 * megasas_mgmt_ioctl - char node ioctl entry point
8402 static long
8403 megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8405 switch (cmd) {
8406 case MEGASAS_IOC_FIRMWARE:
8407 return megasas_mgmt_ioctl_fw(file, arg);
8409 case MEGASAS_IOC_GET_AEN:
8410 return megasas_mgmt_ioctl_aen(file, arg);
8413 return -ENOTTY;
8416 #ifdef CONFIG_COMPAT
8417 static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
8419 struct compat_megasas_iocpacket __user *cioc =
8420 (struct compat_megasas_iocpacket __user *)arg;
8421 struct megasas_iocpacket __user *ioc =
8422 compat_alloc_user_space(sizeof(struct megasas_iocpacket));
8423 int i;
8424 int error = 0;
8425 compat_uptr_t ptr;
8426 u32 local_sense_off;
8427 u32 local_sense_len;
8428 u32 user_sense_off;
8430 if (clear_user(ioc, sizeof(*ioc)))
8431 return -EFAULT;
8433 if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
8434 copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
8435 copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
8436 copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
8437 copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
8438 copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
8439 return -EFAULT;
8442 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
8443 * sense_len is not null, so prepare the 64bit value under
8444 * the same condition.
8446 if (get_user(local_sense_off, &ioc->sense_off) ||
8447 get_user(local_sense_len, &ioc->sense_len) ||
8448 get_user(user_sense_off, &cioc->sense_off))
8449 return -EFAULT;
8451 if (local_sense_off != user_sense_off)
8452 return -EINVAL;
8454 if (local_sense_len) {
8455 void __user **sense_ioc_ptr =
8456 (void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
8457 compat_uptr_t *sense_cioc_ptr =
8458 (compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
8459 if (get_user(ptr, sense_cioc_ptr) ||
8460 put_user(compat_ptr(ptr), sense_ioc_ptr))
8461 return -EFAULT;
8464 for (i = 0; i < MAX_IOCTL_SGE; i++) {
8465 if (get_user(ptr, &cioc->sgl[i].iov_base) ||
8466 put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
8467 copy_in_user(&ioc->sgl[i].iov_len,
8468 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
8469 return -EFAULT;
8472 error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
8474 if (copy_in_user(&cioc->frame.hdr.cmd_status,
8475 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
8476 printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
8477 return -EFAULT;
8479 return error;
8482 static long
8483 megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
8484 unsigned long arg)
8486 switch (cmd) {
8487 case MEGASAS_IOC_FIRMWARE32:
8488 return megasas_mgmt_compat_ioctl_fw(file, arg);
8489 case MEGASAS_IOC_GET_AEN:
8490 return megasas_mgmt_ioctl_aen(file, arg);
8493 return -ENOTTY;
8495 #endif
8498 * File operations structure for management interface
8500 static const struct file_operations megasas_mgmt_fops = {
8501 .owner = THIS_MODULE,
8502 .open = megasas_mgmt_open,
8503 .fasync = megasas_mgmt_fasync,
8504 .unlocked_ioctl = megasas_mgmt_ioctl,
8505 .poll = megasas_mgmt_poll,
8506 #ifdef CONFIG_COMPAT
8507 .compat_ioctl = megasas_mgmt_compat_ioctl,
8508 #endif
8509 .llseek = noop_llseek,
8513 * PCI hotplug support registration structure
8515 static struct pci_driver megasas_pci_driver = {
8517 .name = "megaraid_sas",
8518 .id_table = megasas_pci_table,
8519 .probe = megasas_probe_one,
8520 .remove = megasas_detach_one,
8521 .suspend = megasas_suspend,
8522 .resume = megasas_resume,
8523 .shutdown = megasas_shutdown,
8527 * Sysfs driver attributes
8529 static ssize_t version_show(struct device_driver *dd, char *buf)
8531 return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
8532 MEGASAS_VERSION);
8534 static DRIVER_ATTR_RO(version);
8536 static ssize_t release_date_show(struct device_driver *dd, char *buf)
8538 return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
8539 MEGASAS_RELDATE);
8541 static DRIVER_ATTR_RO(release_date);
8543 static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
8545 return sprintf(buf, "%u\n", support_poll_for_event);
8547 static DRIVER_ATTR_RO(support_poll_for_event);
8549 static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
8551 return sprintf(buf, "%u\n", support_device_change);
8553 static DRIVER_ATTR_RO(support_device_change);
8555 static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
8557 return sprintf(buf, "%u\n", megasas_dbg_lvl);
8560 static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
8561 size_t count)
8563 int retval = count;
8565 if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
8566 printk(KERN_ERR "megasas: could not set dbg_lvl\n");
8567 retval = -EINVAL;
8569 return retval;
8571 static DRIVER_ATTR_RW(dbg_lvl);
8573 static ssize_t
8574 support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
8576 return sprintf(buf, "%u\n", support_nvme_encapsulation);
8579 static DRIVER_ATTR_RO(support_nvme_encapsulation);
8581 static ssize_t
8582 support_pci_lane_margining_show(struct device_driver *dd, char *buf)
8584 return sprintf(buf, "%u\n", support_pci_lane_margining);
8587 static DRIVER_ATTR_RO(support_pci_lane_margining);
8589 static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
8591 sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
8592 scsi_remove_device(sdev);
8593 scsi_device_put(sdev);
8597 * megasas_update_device_list - Update the PD and LD device list from FW
8598 * after an AEN event notification
8599 * @instance: Adapter soft state
8600 * @event_type: Indicates type of event (PD or LD event)
8602 * @return: Success or failure
8604 * Issue DCMDs to Firmware to update the internal device list in driver.
8605 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
8606 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
8608 static
8609 int megasas_update_device_list(struct megasas_instance *instance,
8610 int event_type)
8612 int dcmd_ret = DCMD_SUCCESS;
8614 if (instance->enable_fw_dev_list) {
8615 dcmd_ret = megasas_host_device_list_query(instance, false);
8616 if (dcmd_ret != DCMD_SUCCESS)
8617 goto out;
8618 } else {
8619 if (event_type & SCAN_PD_CHANNEL) {
8620 dcmd_ret = megasas_get_pd_list(instance);
8622 if (dcmd_ret != DCMD_SUCCESS)
8623 goto out;
8626 if (event_type & SCAN_VD_CHANNEL) {
8627 if (!instance->requestorId ||
8628 (instance->requestorId &&
8629 megasas_get_ld_vf_affiliation(instance, 0))) {
8630 dcmd_ret = megasas_ld_list_query(instance,
8631 MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
8632 if (dcmd_ret != DCMD_SUCCESS)
8633 goto out;
8638 out:
8639 return dcmd_ret;
8643 * megasas_add_remove_devices - Add/remove devices to SCSI mid-layer
8644 * after an AEN event notification
8645 * @instance: Adapter soft state
8646 * @scan_type: Indicates type of devices (PD/LD) to add
8647 * @return void
8649 static
8650 void megasas_add_remove_devices(struct megasas_instance *instance,
8651 int scan_type)
8653 int i, j;
8654 u16 pd_index = 0;
8655 u16 ld_index = 0;
8656 u16 channel = 0, id = 0;
8657 struct Scsi_Host *host;
8658 struct scsi_device *sdev1;
8659 struct MR_HOST_DEVICE_LIST *targetid_list = NULL;
8660 struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL;
8662 host = instance->host;
8664 if (instance->enable_fw_dev_list) {
8665 targetid_list = instance->host_device_list_buf;
8666 for (i = 0; i < targetid_list->count; i++) {
8667 targetid_entry = &targetid_list->host_device_list[i];
8668 if (targetid_entry->flags.u.bits.is_sys_pd) {
8669 channel = le16_to_cpu(targetid_entry->target_id) /
8670 MEGASAS_MAX_DEV_PER_CHANNEL;
8671 id = le16_to_cpu(targetid_entry->target_id) %
8672 MEGASAS_MAX_DEV_PER_CHANNEL;
8673 } else {
8674 channel = MEGASAS_MAX_PD_CHANNELS +
8675 (le16_to_cpu(targetid_entry->target_id) /
8676 MEGASAS_MAX_DEV_PER_CHANNEL);
8677 id = le16_to_cpu(targetid_entry->target_id) %
8678 MEGASAS_MAX_DEV_PER_CHANNEL;
8680 sdev1 = scsi_device_lookup(host, channel, id, 0);
8681 if (!sdev1) {
8682 scsi_add_device(host, channel, id, 0);
8683 } else {
8684 scsi_device_put(sdev1);
8689 if (scan_type & SCAN_PD_CHANNEL) {
8690 for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
8691 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8692 pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j;
8693 sdev1 = scsi_device_lookup(host, i, j, 0);
8694 if (instance->pd_list[pd_index].driveState ==
8695 MR_PD_STATE_SYSTEM) {
8696 if (!sdev1)
8697 scsi_add_device(host, i, j, 0);
8698 else
8699 scsi_device_put(sdev1);
8700 } else {
8701 if (sdev1)
8702 megasas_remove_scsi_device(sdev1);
8708 if (scan_type & SCAN_VD_CHANNEL) {
8709 for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
8710 for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8711 ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
8712 sdev1 = scsi_device_lookup(host,
8713 MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8714 if (instance->ld_ids[ld_index] != 0xff) {
8715 if (!sdev1)
8716 scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8717 else
8718 scsi_device_put(sdev1);
8719 } else {
8720 if (sdev1)
8721 megasas_remove_scsi_device(sdev1);
8729 static void
8730 megasas_aen_polling(struct work_struct *work)
8732 struct megasas_aen_event *ev =
8733 container_of(work, struct megasas_aen_event, hotplug_work.work);
8734 struct megasas_instance *instance = ev->instance;
8735 union megasas_evt_class_locale class_locale;
8736 int event_type = 0;
8737 u32 seq_num;
8738 int error;
8739 u8 dcmd_ret = DCMD_SUCCESS;
8741 if (!instance) {
8742 printk(KERN_ERR "invalid instance!\n");
8743 kfree(ev);
8744 return;
8747 /* Don't run the event workqueue thread if OCR is running */
8748 mutex_lock(&instance->reset_mutex);
8750 instance->ev = NULL;
8751 if (instance->evt_detail) {
8752 megasas_decode_evt(instance);
8754 switch (le32_to_cpu(instance->evt_detail->code)) {
8756 case MR_EVT_PD_INSERTED:
8757 case MR_EVT_PD_REMOVED:
8758 event_type = SCAN_PD_CHANNEL;
8759 break;
8761 case MR_EVT_LD_OFFLINE:
8762 case MR_EVT_CFG_CLEARED:
8763 case MR_EVT_LD_DELETED:
8764 case MR_EVT_LD_CREATED:
8765 event_type = SCAN_VD_CHANNEL;
8766 break;
8768 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
8769 case MR_EVT_FOREIGN_CFG_IMPORTED:
8770 case MR_EVT_LD_STATE_CHANGE:
8771 event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL;
8772 dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
8773 instance->host->host_no);
8774 break;
8776 case MR_EVT_CTRL_PROP_CHANGED:
8777 dcmd_ret = megasas_get_ctrl_info(instance);
8778 if (dcmd_ret == DCMD_SUCCESS &&
8779 instance->snapdump_wait_time) {
8780 megasas_get_snapdump_properties(instance);
8781 dev_info(&instance->pdev->dev,
8782 "Snap dump wait time\t: %d\n",
8783 instance->snapdump_wait_time);
8785 break;
8786 default:
8787 event_type = 0;
8788 break;
8790 } else {
8791 dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
8792 mutex_unlock(&instance->reset_mutex);
8793 kfree(ev);
8794 return;
8797 if (event_type)
8798 dcmd_ret = megasas_update_device_list(instance, event_type);
8800 mutex_unlock(&instance->reset_mutex);
8802 if (event_type && dcmd_ret == DCMD_SUCCESS)
8803 megasas_add_remove_devices(instance, event_type);
8805 if (dcmd_ret == DCMD_SUCCESS)
8806 seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
8807 else
8808 seq_num = instance->last_seq_num;
8810 /* Register AEN with FW for latest sequence number plus 1 */
8811 class_locale.members.reserved = 0;
8812 class_locale.members.locale = MR_EVT_LOCALE_ALL;
8813 class_locale.members.class = MR_EVT_CLASS_DEBUG;
8815 if (instance->aen_cmd != NULL) {
8816 kfree(ev);
8817 return;
8820 mutex_lock(&instance->reset_mutex);
8821 error = megasas_register_aen(instance, seq_num,
8822 class_locale.word);
8823 if (error)
8824 dev_err(&instance->pdev->dev,
8825 "register aen failed error %x\n", error);
8827 mutex_unlock(&instance->reset_mutex);
8828 kfree(ev);
8832 * megasas_init - Driver load entry point
8834 static int __init megasas_init(void)
8836 int rval;
8839 * Booted in kdump kernel, minimize memory footprints by
8840 * disabling few features
8842 if (reset_devices) {
8843 msix_vectors = 1;
8844 rdpq_enable = 0;
8845 dual_qdepth_disable = 1;
8849 * Announce driver version and other information
8851 pr_info("megasas: %s\n", MEGASAS_VERSION);
8853 spin_lock_init(&poll_aen_lock);
8855 support_poll_for_event = 2;
8856 support_device_change = 1;
8857 support_nvme_encapsulation = true;
8858 support_pci_lane_margining = true;
8860 memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
8863 * Register character device node
8865 rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
8867 if (rval < 0) {
8868 printk(KERN_DEBUG "megasas: failed to open device node\n");
8869 return rval;
8872 megasas_mgmt_majorno = rval;
8874 megasas_init_debugfs();
8877 * Register ourselves as PCI hotplug module
8879 rval = pci_register_driver(&megasas_pci_driver);
8881 if (rval) {
8882 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
8883 goto err_pcidrv;
8886 if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
8887 (event_log_level > MFI_EVT_CLASS_DEAD)) {
8888 pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
8889 event_log_level = MFI_EVT_CLASS_CRITICAL;
8892 rval = driver_create_file(&megasas_pci_driver.driver,
8893 &driver_attr_version);
8894 if (rval)
8895 goto err_dcf_attr_ver;
8897 rval = driver_create_file(&megasas_pci_driver.driver,
8898 &driver_attr_release_date);
8899 if (rval)
8900 goto err_dcf_rel_date;
8902 rval = driver_create_file(&megasas_pci_driver.driver,
8903 &driver_attr_support_poll_for_event);
8904 if (rval)
8905 goto err_dcf_support_poll_for_event;
8907 rval = driver_create_file(&megasas_pci_driver.driver,
8908 &driver_attr_dbg_lvl);
8909 if (rval)
8910 goto err_dcf_dbg_lvl;
8911 rval = driver_create_file(&megasas_pci_driver.driver,
8912 &driver_attr_support_device_change);
8913 if (rval)
8914 goto err_dcf_support_device_change;
8916 rval = driver_create_file(&megasas_pci_driver.driver,
8917 &driver_attr_support_nvme_encapsulation);
8918 if (rval)
8919 goto err_dcf_support_nvme_encapsulation;
8921 rval = driver_create_file(&megasas_pci_driver.driver,
8922 &driver_attr_support_pci_lane_margining);
8923 if (rval)
8924 goto err_dcf_support_pci_lane_margining;
8926 return rval;
8928 err_dcf_support_pci_lane_margining:
8929 driver_remove_file(&megasas_pci_driver.driver,
8930 &driver_attr_support_nvme_encapsulation);
8932 err_dcf_support_nvme_encapsulation:
8933 driver_remove_file(&megasas_pci_driver.driver,
8934 &driver_attr_support_device_change);
8936 err_dcf_support_device_change:
8937 driver_remove_file(&megasas_pci_driver.driver,
8938 &driver_attr_dbg_lvl);
8939 err_dcf_dbg_lvl:
8940 driver_remove_file(&megasas_pci_driver.driver,
8941 &driver_attr_support_poll_for_event);
8942 err_dcf_support_poll_for_event:
8943 driver_remove_file(&megasas_pci_driver.driver,
8944 &driver_attr_release_date);
8945 err_dcf_rel_date:
8946 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8947 err_dcf_attr_ver:
8948 pci_unregister_driver(&megasas_pci_driver);
8949 err_pcidrv:
8950 megasas_exit_debugfs();
8951 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
8952 return rval;
8956 * megasas_exit - Driver unload entry point
8958 static void __exit megasas_exit(void)
8960 driver_remove_file(&megasas_pci_driver.driver,
8961 &driver_attr_dbg_lvl);
8962 driver_remove_file(&megasas_pci_driver.driver,
8963 &driver_attr_support_poll_for_event);
8964 driver_remove_file(&megasas_pci_driver.driver,
8965 &driver_attr_support_device_change);
8966 driver_remove_file(&megasas_pci_driver.driver,
8967 &driver_attr_release_date);
8968 driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
8969 driver_remove_file(&megasas_pci_driver.driver,
8970 &driver_attr_support_nvme_encapsulation);
8971 driver_remove_file(&megasas_pci_driver.driver,
8972 &driver_attr_support_pci_lane_margining);
8974 pci_unregister_driver(&megasas_pci_driver);
8975 megasas_exit_debugfs();
8976 unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
8979 module_init(megasas_init);
8980 module_exit(megasas_exit);