Linux 5.1.15
[linux/fpc-iii.git] / drivers / scsi / mpt3sas / mpt3sas_scsih.c
blob1ccfbc7eebe0323ce88b1c450e52bb87aba3c45e
1 /*
2 * Scsi Host Layer for MPT (Message Passing Technology) based controllers
4 * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
5 * Copyright (C) 2012-2014 LSI Corporation
6 * Copyright (C) 2013-2014 Avago Technologies
7 * (mailto: MPT-FusionLinux.pdl@avagotech.com)
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
42 * USA.
45 #include <linux/module.h>
46 #include <linux/kernel.h>
47 #include <linux/init.h>
48 #include <linux/errno.h>
49 #include <linux/blkdev.h>
50 #include <linux/sched.h>
51 #include <linux/workqueue.h>
52 #include <linux/delay.h>
53 #include <linux/pci.h>
54 #include <linux/pci-aspm.h>
55 #include <linux/interrupt.h>
56 #include <linux/aer.h>
57 #include <linux/raid_class.h>
58 #include <asm/unaligned.h>
60 #include "mpt3sas_base.h"
62 #define RAID_CHANNEL 1
64 #define PCIE_CHANNEL 2
66 /* forward proto's */
67 static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
68 struct _sas_node *sas_expander);
69 static void _firmware_event_work(struct work_struct *work);
71 static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
72 struct _sas_device *sas_device);
73 static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
74 u8 retry_count, u8 is_pd);
75 static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
76 static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
77 struct _pcie_device *pcie_device);
78 static void
79 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
80 static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
82 /* global parameters */
83 LIST_HEAD(mpt3sas_ioc_list);
84 /* global ioc lock for list operations */
85 DEFINE_SPINLOCK(gioc_lock);
87 MODULE_AUTHOR(MPT3SAS_AUTHOR);
88 MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
89 MODULE_LICENSE("GPL");
90 MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
91 MODULE_ALIAS("mpt2sas");
93 /* local parameters */
94 static u8 scsi_io_cb_idx = -1;
95 static u8 tm_cb_idx = -1;
96 static u8 ctl_cb_idx = -1;
97 static u8 base_cb_idx = -1;
98 static u8 port_enable_cb_idx = -1;
99 static u8 transport_cb_idx = -1;
100 static u8 scsih_cb_idx = -1;
101 static u8 config_cb_idx = -1;
102 static int mpt2_ids;
103 static int mpt3_ids;
105 static u8 tm_tr_cb_idx = -1 ;
106 static u8 tm_tr_volume_cb_idx = -1 ;
107 static u8 tm_sas_control_cb_idx = -1;
109 /* command line options */
110 static u32 logging_level;
111 MODULE_PARM_DESC(logging_level,
112 " bits for enabling additional logging info (default=0)");
115 static ushort max_sectors = 0xFFFF;
116 module_param(max_sectors, ushort, 0);
117 MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
120 static int missing_delay[2] = {-1, -1};
121 module_param_array(missing_delay, int, NULL, 0);
122 MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
124 /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
125 #define MPT3SAS_MAX_LUN (16895)
126 static u64 max_lun = MPT3SAS_MAX_LUN;
127 module_param(max_lun, ullong, 0);
128 MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
130 static ushort hbas_to_enumerate;
131 module_param(hbas_to_enumerate, ushort, 0);
132 MODULE_PARM_DESC(hbas_to_enumerate,
133 " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
134 1 - enumerates only SAS 2.0 generation HBAs\n \
135 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
137 /* diag_buffer_enable is bitwise
138 * bit 0 set = TRACE
139 * bit 1 set = SNAPSHOT
140 * bit 2 set = EXTENDED
142 * Either bit can be set, or both
144 static int diag_buffer_enable = -1;
145 module_param(diag_buffer_enable, int, 0);
146 MODULE_PARM_DESC(diag_buffer_enable,
147 " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
148 static int disable_discovery = -1;
149 module_param(disable_discovery, int, 0);
150 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
153 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
154 static int prot_mask = -1;
155 module_param(prot_mask, int, 0);
156 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
159 /* raid transport support */
160 static struct raid_template *mpt3sas_raid_template;
161 static struct raid_template *mpt2sas_raid_template;
165 * struct sense_info - common structure for obtaining sense keys
166 * @skey: sense key
167 * @asc: additional sense code
168 * @ascq: additional sense code qualifier
170 struct sense_info {
171 u8 skey;
172 u8 asc;
173 u8 ascq;
176 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
177 #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
178 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
179 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
180 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
182 * struct fw_event_work - firmware event struct
183 * @list: link list framework
184 * @work: work object (ioc->fault_reset_work_q)
185 * @ioc: per adapter object
186 * @device_handle: device handle
187 * @VF_ID: virtual function id
188 * @VP_ID: virtual port id
189 * @ignore: flag meaning this event has been marked to ignore
190 * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
191 * @refcount: kref for this event
192 * @event_data: reply event data payload follows
194 * This object stored on ioc->fw_event_list.
196 struct fw_event_work {
197 struct list_head list;
198 struct work_struct work;
200 struct MPT3SAS_ADAPTER *ioc;
201 u16 device_handle;
202 u8 VF_ID;
203 u8 VP_ID;
204 u8 ignore;
205 u16 event;
206 struct kref refcount;
207 char event_data[0] __aligned(4);
210 static void fw_event_work_free(struct kref *r)
212 kfree(container_of(r, struct fw_event_work, refcount));
215 static void fw_event_work_get(struct fw_event_work *fw_work)
217 kref_get(&fw_work->refcount);
220 static void fw_event_work_put(struct fw_event_work *fw_work)
222 kref_put(&fw_work->refcount, fw_event_work_free);
225 static struct fw_event_work *alloc_fw_event_work(int len)
227 struct fw_event_work *fw_event;
229 fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
230 if (!fw_event)
231 return NULL;
233 kref_init(&fw_event->refcount);
234 return fw_event;
238 * struct _scsi_io_transfer - scsi io transfer
239 * @handle: sas device handle (assigned by firmware)
240 * @is_raid: flag set for hidden raid components
241 * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
242 * @data_length: data transfer length
243 * @data_dma: dma pointer to data
244 * @sense: sense data
245 * @lun: lun number
246 * @cdb_length: cdb length
247 * @cdb: cdb contents
248 * @timeout: timeout for this command
249 * @VF_ID: virtual function id
250 * @VP_ID: virtual port id
251 * @valid_reply: flag set for reply message
252 * @sense_length: sense length
253 * @ioc_status: ioc status
254 * @scsi_state: scsi state
255 * @scsi_status: scsi staus
256 * @log_info: log information
257 * @transfer_length: data length transfer when there is a reply message
259 * Used for sending internal scsi commands to devices within this module.
260 * Refer to _scsi_send_scsi_io().
262 struct _scsi_io_transfer {
263 u16 handle;
264 u8 is_raid;
265 enum dma_data_direction dir;
266 u32 data_length;
267 dma_addr_t data_dma;
268 u8 sense[SCSI_SENSE_BUFFERSIZE];
269 u32 lun;
270 u8 cdb_length;
271 u8 cdb[32];
272 u8 timeout;
273 u8 VF_ID;
274 u8 VP_ID;
275 u8 valid_reply;
276 /* the following bits are only valid when 'valid_reply = 1' */
277 u32 sense_length;
278 u16 ioc_status;
279 u8 scsi_state;
280 u8 scsi_status;
281 u32 log_info;
282 u32 transfer_length;
286 * _scsih_set_debug_level - global setting of ioc->logging_level.
287 * @val: ?
288 * @kp: ?
290 * Note: The logging levels are defined in mpt3sas_debug.h.
292 static int
293 _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
295 int ret = param_set_int(val, kp);
296 struct MPT3SAS_ADAPTER *ioc;
298 if (ret)
299 return ret;
301 pr_info("setting logging_level(0x%08x)\n", logging_level);
302 spin_lock(&gioc_lock);
303 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
304 ioc->logging_level = logging_level;
305 spin_unlock(&gioc_lock);
306 return 0;
308 module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
309 &logging_level, 0644);
312 * _scsih_srch_boot_sas_address - search based on sas_address
313 * @sas_address: sas address
314 * @boot_device: boot device object from bios page 2
316 * Return: 1 when there's a match, 0 means no match.
318 static inline int
319 _scsih_srch_boot_sas_address(u64 sas_address,
320 Mpi2BootDeviceSasWwid_t *boot_device)
322 return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
326 * _scsih_srch_boot_device_name - search based on device name
327 * @device_name: device name specified in INDENTIFY fram
328 * @boot_device: boot device object from bios page 2
330 * Return: 1 when there's a match, 0 means no match.
332 static inline int
333 _scsih_srch_boot_device_name(u64 device_name,
334 Mpi2BootDeviceDeviceName_t *boot_device)
336 return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
340 * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
341 * @enclosure_logical_id: enclosure logical id
342 * @slot_number: slot number
343 * @boot_device: boot device object from bios page 2
345 * Return: 1 when there's a match, 0 means no match.
347 static inline int
348 _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
349 Mpi2BootDeviceEnclosureSlot_t *boot_device)
351 return (enclosure_logical_id == le64_to_cpu(boot_device->
352 EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
353 SlotNumber)) ? 1 : 0;
357 * _scsih_is_boot_device - search for matching boot device.
358 * @sas_address: sas address
359 * @device_name: device name specified in INDENTIFY fram
360 * @enclosure_logical_id: enclosure logical id
361 * @slot: slot number
362 * @form: specifies boot device form
363 * @boot_device: boot device object from bios page 2
365 * Return: 1 when there's a match, 0 means no match.
367 static int
368 _scsih_is_boot_device(u64 sas_address, u64 device_name,
369 u64 enclosure_logical_id, u16 slot, u8 form,
370 Mpi2BiosPage2BootDevice_t *boot_device)
372 int rc = 0;
374 switch (form) {
375 case MPI2_BIOSPAGE2_FORM_SAS_WWID:
376 if (!sas_address)
377 break;
378 rc = _scsih_srch_boot_sas_address(
379 sas_address, &boot_device->SasWwid);
380 break;
381 case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
382 if (!enclosure_logical_id)
383 break;
384 rc = _scsih_srch_boot_encl_slot(
385 enclosure_logical_id,
386 slot, &boot_device->EnclosureSlot);
387 break;
388 case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
389 if (!device_name)
390 break;
391 rc = _scsih_srch_boot_device_name(
392 device_name, &boot_device->DeviceName);
393 break;
394 case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
395 break;
398 return rc;
402 * _scsih_get_sas_address - set the sas_address for given device handle
403 * @ioc: ?
404 * @handle: device handle
405 * @sas_address: sas address
407 * Return: 0 success, non-zero when failure
409 static int
410 _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
411 u64 *sas_address)
413 Mpi2SasDevicePage0_t sas_device_pg0;
414 Mpi2ConfigReply_t mpi_reply;
415 u32 ioc_status;
417 *sas_address = 0;
419 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
420 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
421 ioc_err(ioc, "failure at %s:%d/%s()!\n",
422 __FILE__, __LINE__, __func__);
423 return -ENXIO;
426 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
427 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
428 /* For HBA, vSES doesn't return HBA SAS address. Instead return
429 * vSES's sas address.
431 if ((handle <= ioc->sas_hba.num_phys) &&
432 (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
433 MPI2_SAS_DEVICE_INFO_SEP)))
434 *sas_address = ioc->sas_hba.sas_address;
435 else
436 *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
437 return 0;
440 /* we hit this because the given parent handle doesn't exist */
441 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
442 return -ENXIO;
444 /* else error case */
445 ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
446 handle, ioc_status, __FILE__, __LINE__, __func__);
447 return -EIO;
451 * _scsih_determine_boot_device - determine boot device.
452 * @ioc: per adapter object
453 * @device: sas_device or pcie_device object
454 * @channel: SAS or PCIe channel
456 * Determines whether this device should be first reported device to
457 * to scsi-ml or sas transport, this purpose is for persistent boot device.
458 * There are primary, alternate, and current entries in bios page 2. The order
459 * priority is primary, alternate, then current. This routine saves
460 * the corresponding device object.
461 * The saved data to be used later in _scsih_probe_boot_devices().
463 static void
464 _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
465 u32 channel)
467 struct _sas_device *sas_device;
468 struct _pcie_device *pcie_device;
469 struct _raid_device *raid_device;
470 u64 sas_address;
471 u64 device_name;
472 u64 enclosure_logical_id;
473 u16 slot;
475 /* only process this function when driver loads */
476 if (!ioc->is_driver_loading)
477 return;
479 /* no Bios, return immediately */
480 if (!ioc->bios_pg3.BiosVersion)
481 return;
483 if (channel == RAID_CHANNEL) {
484 raid_device = device;
485 sas_address = raid_device->wwid;
486 device_name = 0;
487 enclosure_logical_id = 0;
488 slot = 0;
489 } else if (channel == PCIE_CHANNEL) {
490 pcie_device = device;
491 sas_address = pcie_device->wwid;
492 device_name = 0;
493 enclosure_logical_id = 0;
494 slot = 0;
495 } else {
496 sas_device = device;
497 sas_address = sas_device->sas_address;
498 device_name = sas_device->device_name;
499 enclosure_logical_id = sas_device->enclosure_logical_id;
500 slot = sas_device->slot;
503 if (!ioc->req_boot_device.device) {
504 if (_scsih_is_boot_device(sas_address, device_name,
505 enclosure_logical_id, slot,
506 (ioc->bios_pg2.ReqBootDeviceForm &
507 MPI2_BIOSPAGE2_FORM_MASK),
508 &ioc->bios_pg2.RequestedBootDevice)) {
509 dinitprintk(ioc,
510 ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
511 __func__, (u64)sas_address));
512 ioc->req_boot_device.device = device;
513 ioc->req_boot_device.channel = channel;
517 if (!ioc->req_alt_boot_device.device) {
518 if (_scsih_is_boot_device(sas_address, device_name,
519 enclosure_logical_id, slot,
520 (ioc->bios_pg2.ReqAltBootDeviceForm &
521 MPI2_BIOSPAGE2_FORM_MASK),
522 &ioc->bios_pg2.RequestedAltBootDevice)) {
523 dinitprintk(ioc,
524 ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
525 __func__, (u64)sas_address));
526 ioc->req_alt_boot_device.device = device;
527 ioc->req_alt_boot_device.channel = channel;
531 if (!ioc->current_boot_device.device) {
532 if (_scsih_is_boot_device(sas_address, device_name,
533 enclosure_logical_id, slot,
534 (ioc->bios_pg2.CurrentBootDeviceForm &
535 MPI2_BIOSPAGE2_FORM_MASK),
536 &ioc->bios_pg2.CurrentBootDevice)) {
537 dinitprintk(ioc,
538 ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
539 __func__, (u64)sas_address));
540 ioc->current_boot_device.device = device;
541 ioc->current_boot_device.channel = channel;
546 static struct _sas_device *
547 __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
548 struct MPT3SAS_TARGET *tgt_priv)
550 struct _sas_device *ret;
552 assert_spin_locked(&ioc->sas_device_lock);
554 ret = tgt_priv->sas_dev;
555 if (ret)
556 sas_device_get(ret);
558 return ret;
561 static struct _sas_device *
562 mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
563 struct MPT3SAS_TARGET *tgt_priv)
565 struct _sas_device *ret;
566 unsigned long flags;
568 spin_lock_irqsave(&ioc->sas_device_lock, flags);
569 ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
570 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
572 return ret;
575 static struct _pcie_device *
576 __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
577 struct MPT3SAS_TARGET *tgt_priv)
579 struct _pcie_device *ret;
581 assert_spin_locked(&ioc->pcie_device_lock);
583 ret = tgt_priv->pcie_dev;
584 if (ret)
585 pcie_device_get(ret);
587 return ret;
591 * mpt3sas_get_pdev_from_target - pcie device search
592 * @ioc: per adapter object
593 * @tgt_priv: starget private object
595 * Context: This function will acquire ioc->pcie_device_lock and will release
596 * before returning the pcie_device object.
598 * This searches for pcie_device from target, then return pcie_device object.
600 static struct _pcie_device *
601 mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
602 struct MPT3SAS_TARGET *tgt_priv)
604 struct _pcie_device *ret;
605 unsigned long flags;
607 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
608 ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
609 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
611 return ret;
614 struct _sas_device *
615 __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
616 u64 sas_address)
618 struct _sas_device *sas_device;
620 assert_spin_locked(&ioc->sas_device_lock);
622 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
623 if (sas_device->sas_address == sas_address)
624 goto found_device;
626 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
627 if (sas_device->sas_address == sas_address)
628 goto found_device;
630 return NULL;
632 found_device:
633 sas_device_get(sas_device);
634 return sas_device;
638 * mpt3sas_get_sdev_by_addr - sas device search
639 * @ioc: per adapter object
640 * @sas_address: sas address
641 * Context: Calling function should acquire ioc->sas_device_lock
643 * This searches for sas_device based on sas_address, then return sas_device
644 * object.
646 struct _sas_device *
647 mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
648 u64 sas_address)
650 struct _sas_device *sas_device;
651 unsigned long flags;
653 spin_lock_irqsave(&ioc->sas_device_lock, flags);
654 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
655 sas_address);
656 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
658 return sas_device;
661 static struct _sas_device *
662 __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
664 struct _sas_device *sas_device;
666 assert_spin_locked(&ioc->sas_device_lock);
668 list_for_each_entry(sas_device, &ioc->sas_device_list, list)
669 if (sas_device->handle == handle)
670 goto found_device;
672 list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
673 if (sas_device->handle == handle)
674 goto found_device;
676 return NULL;
678 found_device:
679 sas_device_get(sas_device);
680 return sas_device;
684 * mpt3sas_get_sdev_by_handle - sas device search
685 * @ioc: per adapter object
686 * @handle: sas device handle (assigned by firmware)
687 * Context: Calling function should acquire ioc->sas_device_lock
689 * This searches for sas_device based on sas_address, then return sas_device
690 * object.
692 struct _sas_device *
693 mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
695 struct _sas_device *sas_device;
696 unsigned long flags;
698 spin_lock_irqsave(&ioc->sas_device_lock, flags);
699 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
700 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
702 return sas_device;
706 * _scsih_display_enclosure_chassis_info - display device location info
707 * @ioc: per adapter object
708 * @sas_device: per sas device object
709 * @sdev: scsi device struct
710 * @starget: scsi target struct
712 static void
713 _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
714 struct _sas_device *sas_device, struct scsi_device *sdev,
715 struct scsi_target *starget)
717 if (sdev) {
718 if (sas_device->enclosure_handle != 0)
719 sdev_printk(KERN_INFO, sdev,
720 "enclosure logical id (0x%016llx), slot(%d) \n",
721 (unsigned long long)
722 sas_device->enclosure_logical_id,
723 sas_device->slot);
724 if (sas_device->connector_name[0] != '\0')
725 sdev_printk(KERN_INFO, sdev,
726 "enclosure level(0x%04x), connector name( %s)\n",
727 sas_device->enclosure_level,
728 sas_device->connector_name);
729 if (sas_device->is_chassis_slot_valid)
730 sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
731 sas_device->chassis_slot);
732 } else if (starget) {
733 if (sas_device->enclosure_handle != 0)
734 starget_printk(KERN_INFO, starget,
735 "enclosure logical id(0x%016llx), slot(%d) \n",
736 (unsigned long long)
737 sas_device->enclosure_logical_id,
738 sas_device->slot);
739 if (sas_device->connector_name[0] != '\0')
740 starget_printk(KERN_INFO, starget,
741 "enclosure level(0x%04x), connector name( %s)\n",
742 sas_device->enclosure_level,
743 sas_device->connector_name);
744 if (sas_device->is_chassis_slot_valid)
745 starget_printk(KERN_INFO, starget,
746 "chassis slot(0x%04x)\n",
747 sas_device->chassis_slot);
748 } else {
749 if (sas_device->enclosure_handle != 0)
750 ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
751 (u64)sas_device->enclosure_logical_id,
752 sas_device->slot);
753 if (sas_device->connector_name[0] != '\0')
754 ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
755 sas_device->enclosure_level,
756 sas_device->connector_name);
757 if (sas_device->is_chassis_slot_valid)
758 ioc_info(ioc, "chassis slot(0x%04x)\n",
759 sas_device->chassis_slot);
764 * _scsih_sas_device_remove - remove sas_device from list.
765 * @ioc: per adapter object
766 * @sas_device: the sas_device object
767 * Context: This function will acquire ioc->sas_device_lock.
769 * If sas_device is on the list, remove it and decrement its reference count.
771 static void
772 _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
773 struct _sas_device *sas_device)
775 unsigned long flags;
777 if (!sas_device)
778 return;
779 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
780 sas_device->handle, (u64)sas_device->sas_address);
782 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
785 * The lock serializes access to the list, but we still need to verify
786 * that nobody removed the entry while we were waiting on the lock.
788 spin_lock_irqsave(&ioc->sas_device_lock, flags);
789 if (!list_empty(&sas_device->list)) {
790 list_del_init(&sas_device->list);
791 sas_device_put(sas_device);
793 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
797 * _scsih_device_remove_by_handle - removing device object by handle
798 * @ioc: per adapter object
799 * @handle: device handle
801 static void
802 _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
804 struct _sas_device *sas_device;
805 unsigned long flags;
807 if (ioc->shost_recovery)
808 return;
810 spin_lock_irqsave(&ioc->sas_device_lock, flags);
811 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
812 if (sas_device) {
813 list_del_init(&sas_device->list);
814 sas_device_put(sas_device);
816 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
817 if (sas_device) {
818 _scsih_remove_device(ioc, sas_device);
819 sas_device_put(sas_device);
824 * mpt3sas_device_remove_by_sas_address - removing device object by sas address
825 * @ioc: per adapter object
826 * @sas_address: device sas_address
828 void
829 mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
830 u64 sas_address)
832 struct _sas_device *sas_device;
833 unsigned long flags;
835 if (ioc->shost_recovery)
836 return;
838 spin_lock_irqsave(&ioc->sas_device_lock, flags);
839 sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address);
840 if (sas_device) {
841 list_del_init(&sas_device->list);
842 sas_device_put(sas_device);
844 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
845 if (sas_device) {
846 _scsih_remove_device(ioc, sas_device);
847 sas_device_put(sas_device);
852 * _scsih_sas_device_add - insert sas_device to the list.
853 * @ioc: per adapter object
854 * @sas_device: the sas_device object
855 * Context: This function will acquire ioc->sas_device_lock.
857 * Adding new object to the ioc->sas_device_list.
859 static void
860 _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
861 struct _sas_device *sas_device)
863 unsigned long flags;
865 dewtprintk(ioc,
866 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
867 __func__, sas_device->handle,
868 (u64)sas_device->sas_address));
870 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
871 NULL, NULL));
873 spin_lock_irqsave(&ioc->sas_device_lock, flags);
874 sas_device_get(sas_device);
875 list_add_tail(&sas_device->list, &ioc->sas_device_list);
876 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
878 if (ioc->hide_drives) {
879 clear_bit(sas_device->handle, ioc->pend_os_device_add);
880 return;
883 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
884 sas_device->sas_address_parent)) {
885 _scsih_sas_device_remove(ioc, sas_device);
886 } else if (!sas_device->starget) {
888 * When asyn scanning is enabled, its not possible to remove
889 * devices while scanning is turned on due to an oops in
890 * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
892 if (!ioc->is_driver_loading) {
893 mpt3sas_transport_port_remove(ioc,
894 sas_device->sas_address,
895 sas_device->sas_address_parent);
896 _scsih_sas_device_remove(ioc, sas_device);
898 } else
899 clear_bit(sas_device->handle, ioc->pend_os_device_add);
903 * _scsih_sas_device_init_add - insert sas_device to the list.
904 * @ioc: per adapter object
905 * @sas_device: the sas_device object
906 * Context: This function will acquire ioc->sas_device_lock.
908 * Adding new object at driver load time to the ioc->sas_device_init_list.
910 static void
911 _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
912 struct _sas_device *sas_device)
914 unsigned long flags;
916 dewtprintk(ioc,
917 ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
918 __func__, sas_device->handle,
919 (u64)sas_device->sas_address));
921 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
922 NULL, NULL));
924 spin_lock_irqsave(&ioc->sas_device_lock, flags);
925 sas_device_get(sas_device);
926 list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
927 _scsih_determine_boot_device(ioc, sas_device, 0);
928 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
932 static struct _pcie_device *
933 __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
935 struct _pcie_device *pcie_device;
937 assert_spin_locked(&ioc->pcie_device_lock);
939 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
940 if (pcie_device->wwid == wwid)
941 goto found_device;
943 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
944 if (pcie_device->wwid == wwid)
945 goto found_device;
947 return NULL;
949 found_device:
950 pcie_device_get(pcie_device);
951 return pcie_device;
956 * mpt3sas_get_pdev_by_wwid - pcie device search
957 * @ioc: per adapter object
958 * @wwid: wwid
960 * Context: This function will acquire ioc->pcie_device_lock and will release
961 * before returning the pcie_device object.
963 * This searches for pcie_device based on wwid, then return pcie_device object.
965 static struct _pcie_device *
966 mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
968 struct _pcie_device *pcie_device;
969 unsigned long flags;
971 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
972 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
973 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
975 return pcie_device;
979 static struct _pcie_device *
980 __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
981 int channel)
983 struct _pcie_device *pcie_device;
985 assert_spin_locked(&ioc->pcie_device_lock);
987 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
988 if (pcie_device->id == id && pcie_device->channel == channel)
989 goto found_device;
991 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
992 if (pcie_device->id == id && pcie_device->channel == channel)
993 goto found_device;
995 return NULL;
997 found_device:
998 pcie_device_get(pcie_device);
999 return pcie_device;
1002 static struct _pcie_device *
1003 __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1005 struct _pcie_device *pcie_device;
1007 assert_spin_locked(&ioc->pcie_device_lock);
1009 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
1010 if (pcie_device->handle == handle)
1011 goto found_device;
1013 list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
1014 if (pcie_device->handle == handle)
1015 goto found_device;
1017 return NULL;
1019 found_device:
1020 pcie_device_get(pcie_device);
1021 return pcie_device;
1026 * mpt3sas_get_pdev_by_handle - pcie device search
1027 * @ioc: per adapter object
1028 * @handle: Firmware device handle
1030 * Context: This function will acquire ioc->pcie_device_lock and will release
1031 * before returning the pcie_device object.
1033 * This searches for pcie_device based on handle, then return pcie_device
1034 * object.
1036 struct _pcie_device *
1037 mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1039 struct _pcie_device *pcie_device;
1040 unsigned long flags;
1042 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1043 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1044 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1046 return pcie_device;
1050 * _scsih_pcie_device_remove - remove pcie_device from list.
1051 * @ioc: per adapter object
1052 * @pcie_device: the pcie_device object
1053 * Context: This function will acquire ioc->pcie_device_lock.
1055 * If pcie_device is on the list, remove it and decrement its reference count.
1057 static void
1058 _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
1059 struct _pcie_device *pcie_device)
1061 unsigned long flags;
1062 int was_on_pcie_device_list = 0;
1064 if (!pcie_device)
1065 return;
1066 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
1067 pcie_device->handle, (u64)pcie_device->wwid);
1068 if (pcie_device->enclosure_handle != 0)
1069 ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
1070 (u64)pcie_device->enclosure_logical_id,
1071 pcie_device->slot);
1072 if (pcie_device->connector_name[0] != '\0')
1073 ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
1074 pcie_device->enclosure_level,
1075 pcie_device->connector_name);
1077 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1078 if (!list_empty(&pcie_device->list)) {
1079 list_del_init(&pcie_device->list);
1080 was_on_pcie_device_list = 1;
1082 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1083 if (was_on_pcie_device_list) {
1084 kfree(pcie_device->serial_number);
1085 pcie_device_put(pcie_device);
1091 * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
1092 * @ioc: per adapter object
1093 * @handle: device handle
1095 static void
1096 _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1098 struct _pcie_device *pcie_device;
1099 unsigned long flags;
1100 int was_on_pcie_device_list = 0;
1102 if (ioc->shost_recovery)
1103 return;
1105 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1106 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
1107 if (pcie_device) {
1108 if (!list_empty(&pcie_device->list)) {
1109 list_del_init(&pcie_device->list);
1110 was_on_pcie_device_list = 1;
1111 pcie_device_put(pcie_device);
1114 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1115 if (was_on_pcie_device_list) {
1116 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
1117 pcie_device_put(pcie_device);
1122 * _scsih_pcie_device_add - add pcie_device object
1123 * @ioc: per adapter object
1124 * @pcie_device: pcie_device object
1126 * This is added to the pcie_device_list link list.
1128 static void
1129 _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
1130 struct _pcie_device *pcie_device)
1132 unsigned long flags;
1134 dewtprintk(ioc,
1135 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1136 __func__,
1137 pcie_device->handle, (u64)pcie_device->wwid));
1138 if (pcie_device->enclosure_handle != 0)
1139 dewtprintk(ioc,
1140 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1141 __func__,
1142 (u64)pcie_device->enclosure_logical_id,
1143 pcie_device->slot));
1144 if (pcie_device->connector_name[0] != '\0')
1145 dewtprintk(ioc,
1146 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1147 __func__, pcie_device->enclosure_level,
1148 pcie_device->connector_name));
1150 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1151 pcie_device_get(pcie_device);
1152 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
1153 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1155 if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
1156 _scsih_pcie_device_remove(ioc, pcie_device);
1157 } else if (!pcie_device->starget) {
1158 if (!ioc->is_driver_loading) {
1159 /*TODO-- Need to find out whether this condition will occur or not*/
1160 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1162 } else
1163 clear_bit(pcie_device->handle, ioc->pend_os_device_add);
1167 * _scsih_pcie_device_init_add - insert pcie_device to the init list.
1168 * @ioc: per adapter object
1169 * @pcie_device: the pcie_device object
1170 * Context: This function will acquire ioc->pcie_device_lock.
1172 * Adding new object at driver load time to the ioc->pcie_device_init_list.
1174 static void
1175 _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
1176 struct _pcie_device *pcie_device)
1178 unsigned long flags;
1180 dewtprintk(ioc,
1181 ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
1182 __func__,
1183 pcie_device->handle, (u64)pcie_device->wwid));
1184 if (pcie_device->enclosure_handle != 0)
1185 dewtprintk(ioc,
1186 ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
1187 __func__,
1188 (u64)pcie_device->enclosure_logical_id,
1189 pcie_device->slot));
1190 if (pcie_device->connector_name[0] != '\0')
1191 dewtprintk(ioc,
1192 ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
1193 __func__, pcie_device->enclosure_level,
1194 pcie_device->connector_name));
1196 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1197 pcie_device_get(pcie_device);
1198 list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
1199 _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
1200 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1203 * _scsih_raid_device_find_by_id - raid device search
1204 * @ioc: per adapter object
1205 * @id: sas device target id
1206 * @channel: sas device channel
1207 * Context: Calling function should acquire ioc->raid_device_lock
1209 * This searches for raid_device based on target id, then return raid_device
1210 * object.
1212 static struct _raid_device *
1213 _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
1215 struct _raid_device *raid_device, *r;
1217 r = NULL;
1218 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1219 if (raid_device->id == id && raid_device->channel == channel) {
1220 r = raid_device;
1221 goto out;
1225 out:
1226 return r;
1230 * mpt3sas_raid_device_find_by_handle - raid device search
1231 * @ioc: per adapter object
1232 * @handle: sas device handle (assigned by firmware)
1233 * Context: Calling function should acquire ioc->raid_device_lock
1235 * This searches for raid_device based on handle, then return raid_device
1236 * object.
1238 struct _raid_device *
1239 mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1241 struct _raid_device *raid_device, *r;
1243 r = NULL;
1244 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1245 if (raid_device->handle != handle)
1246 continue;
1247 r = raid_device;
1248 goto out;
1251 out:
1252 return r;
1256 * _scsih_raid_device_find_by_wwid - raid device search
1257 * @ioc: per adapter object
1258 * @wwid: ?
1259 * Context: Calling function should acquire ioc->raid_device_lock
1261 * This searches for raid_device based on wwid, then return raid_device
1262 * object.
1264 static struct _raid_device *
1265 _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
1267 struct _raid_device *raid_device, *r;
1269 r = NULL;
1270 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
1271 if (raid_device->wwid != wwid)
1272 continue;
1273 r = raid_device;
1274 goto out;
1277 out:
1278 return r;
1282 * _scsih_raid_device_add - add raid_device object
1283 * @ioc: per adapter object
1284 * @raid_device: raid_device object
1286 * This is added to the raid_device_list link list.
1288 static void
1289 _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
1290 struct _raid_device *raid_device)
1292 unsigned long flags;
1294 dewtprintk(ioc,
1295 ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
1296 __func__,
1297 raid_device->handle, (u64)raid_device->wwid));
1299 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1300 list_add_tail(&raid_device->list, &ioc->raid_device_list);
1301 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1305 * _scsih_raid_device_remove - delete raid_device object
1306 * @ioc: per adapter object
1307 * @raid_device: raid_device object
1310 static void
1311 _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
1312 struct _raid_device *raid_device)
1314 unsigned long flags;
1316 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1317 list_del(&raid_device->list);
1318 kfree(raid_device);
1319 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1323 * mpt3sas_scsih_expander_find_by_handle - expander device search
1324 * @ioc: per adapter object
1325 * @handle: expander handle (assigned by firmware)
1326 * Context: Calling function should acquire ioc->sas_device_lock
1328 * This searches for expander device based on handle, then returns the
1329 * sas_node object.
1331 struct _sas_node *
1332 mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1334 struct _sas_node *sas_expander, *r;
1336 r = NULL;
1337 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1338 if (sas_expander->handle != handle)
1339 continue;
1340 r = sas_expander;
1341 goto out;
1343 out:
1344 return r;
1348 * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
1349 * @ioc: per adapter object
1350 * @handle: enclosure handle (assigned by firmware)
1351 * Context: Calling function should acquire ioc->sas_device_lock
1353 * This searches for enclosure device based on handle, then returns the
1354 * enclosure object.
1356 static struct _enclosure_node *
1357 mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
1359 struct _enclosure_node *enclosure_dev, *r;
1361 r = NULL;
1362 list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
1363 if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
1364 continue;
1365 r = enclosure_dev;
1366 goto out;
1368 out:
1369 return r;
1372 * mpt3sas_scsih_expander_find_by_sas_address - expander device search
1373 * @ioc: per adapter object
1374 * @sas_address: sas address
1375 * Context: Calling function should acquire ioc->sas_node_lock.
1377 * This searches for expander device based on sas_address, then returns the
1378 * sas_node object.
1380 struct _sas_node *
1381 mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
1382 u64 sas_address)
1384 struct _sas_node *sas_expander, *r;
1386 r = NULL;
1387 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
1388 if (sas_expander->sas_address != sas_address)
1389 continue;
1390 r = sas_expander;
1391 goto out;
1393 out:
1394 return r;
1398 * _scsih_expander_node_add - insert expander device to the list.
1399 * @ioc: per adapter object
1400 * @sas_expander: the sas_device object
1401 * Context: This function will acquire ioc->sas_node_lock.
1403 * Adding new object to the ioc->sas_expander_list.
1405 static void
1406 _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
1407 struct _sas_node *sas_expander)
1409 unsigned long flags;
1411 spin_lock_irqsave(&ioc->sas_node_lock, flags);
1412 list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
1413 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
1417 * _scsih_is_end_device - determines if device is an end device
1418 * @device_info: bitfield providing information about the device.
1419 * Context: none
1421 * Return: 1 if end device.
1423 static int
1424 _scsih_is_end_device(u32 device_info)
1426 if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
1427 ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
1428 (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
1429 (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
1430 return 1;
1431 else
1432 return 0;
1436 * _scsih_is_nvme_device - determines if device is an nvme device
1437 * @device_info: bitfield providing information about the device.
1438 * Context: none
1440 * Return: 1 if nvme device.
1442 static int
1443 _scsih_is_nvme_device(u32 device_info)
1445 if ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
1446 == MPI26_PCIE_DEVINFO_NVME)
1447 return 1;
1448 else
1449 return 0;
1453 * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
1454 * @ioc: per adapter object
1455 * @smid: system request message index
1457 * Return: the smid stored scmd pointer.
1458 * Then will dereference the stored scmd pointer.
1460 struct scsi_cmnd *
1461 mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1463 struct scsi_cmnd *scmd = NULL;
1464 struct scsiio_tracker *st;
1465 Mpi25SCSIIORequest_t *mpi_request;
1467 if (smid > 0 &&
1468 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1469 u32 unique_tag = smid - 1;
1471 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1474 * If SCSI IO request is outstanding at driver level then
1475 * DevHandle filed must be non-zero. If DevHandle is zero
1476 * then it means that this smid is free at driver level,
1477 * so return NULL.
1479 if (!mpi_request->DevHandle)
1480 return scmd;
1482 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1483 if (scmd) {
1484 st = scsi_cmd_priv(scmd);
1485 if (st->cb_idx == 0xFF || st->smid == 0)
1486 scmd = NULL;
1489 return scmd;
1493 * scsih_change_queue_depth - setting device queue depth
1494 * @sdev: scsi device struct
1495 * @qdepth: requested queue depth
1497 * Return: queue depth.
1499 static int
1500 scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
1502 struct Scsi_Host *shost = sdev->host;
1503 int max_depth;
1504 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1505 struct MPT3SAS_DEVICE *sas_device_priv_data;
1506 struct MPT3SAS_TARGET *sas_target_priv_data;
1507 struct _sas_device *sas_device;
1508 unsigned long flags;
1510 max_depth = shost->can_queue;
1512 /* limit max device queue for SATA to 32 */
1513 sas_device_priv_data = sdev->hostdata;
1514 if (!sas_device_priv_data)
1515 goto not_sata;
1516 sas_target_priv_data = sas_device_priv_data->sas_target;
1517 if (!sas_target_priv_data)
1518 goto not_sata;
1519 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
1520 goto not_sata;
1522 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1523 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1524 if (sas_device) {
1525 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
1526 max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
1528 sas_device_put(sas_device);
1530 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1532 not_sata:
1534 if (!sdev->tagged_supported)
1535 max_depth = 1;
1536 if (qdepth > max_depth)
1537 qdepth = max_depth;
1538 return scsi_change_queue_depth(sdev, qdepth);
1542 * scsih_target_alloc - target add routine
1543 * @starget: scsi target struct
1545 * Return: 0 if ok. Any other return is assumed to be an error and
1546 * the device is ignored.
1548 static int
1549 scsih_target_alloc(struct scsi_target *starget)
1551 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1552 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1553 struct MPT3SAS_TARGET *sas_target_priv_data;
1554 struct _sas_device *sas_device;
1555 struct _raid_device *raid_device;
1556 struct _pcie_device *pcie_device;
1557 unsigned long flags;
1558 struct sas_rphy *rphy;
1560 sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
1561 GFP_KERNEL);
1562 if (!sas_target_priv_data)
1563 return -ENOMEM;
1565 starget->hostdata = sas_target_priv_data;
1566 sas_target_priv_data->starget = starget;
1567 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
1569 /* RAID volumes */
1570 if (starget->channel == RAID_CHANNEL) {
1571 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1572 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1573 starget->channel);
1574 if (raid_device) {
1575 sas_target_priv_data->handle = raid_device->handle;
1576 sas_target_priv_data->sas_address = raid_device->wwid;
1577 sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
1578 if (ioc->is_warpdrive)
1579 sas_target_priv_data->raid_device = raid_device;
1580 raid_device->starget = starget;
1582 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1583 return 0;
1586 /* PCIe devices */
1587 if (starget->channel == PCIE_CHANNEL) {
1588 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1589 pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
1590 starget->channel);
1591 if (pcie_device) {
1592 sas_target_priv_data->handle = pcie_device->handle;
1593 sas_target_priv_data->sas_address = pcie_device->wwid;
1594 sas_target_priv_data->pcie_dev = pcie_device;
1595 pcie_device->starget = starget;
1596 pcie_device->id = starget->id;
1597 pcie_device->channel = starget->channel;
1598 sas_target_priv_data->flags |=
1599 MPT_TARGET_FLAGS_PCIE_DEVICE;
1600 if (pcie_device->fast_path)
1601 sas_target_priv_data->flags |=
1602 MPT_TARGET_FASTPATH_IO;
1604 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1605 return 0;
1608 /* sas/sata devices */
1609 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1610 rphy = dev_to_rphy(starget->dev.parent);
1611 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1612 rphy->identify.sas_address);
1614 if (sas_device) {
1615 sas_target_priv_data->handle = sas_device->handle;
1616 sas_target_priv_data->sas_address = sas_device->sas_address;
1617 sas_target_priv_data->sas_dev = sas_device;
1618 sas_device->starget = starget;
1619 sas_device->id = starget->id;
1620 sas_device->channel = starget->channel;
1621 if (test_bit(sas_device->handle, ioc->pd_handles))
1622 sas_target_priv_data->flags |=
1623 MPT_TARGET_FLAGS_RAID_COMPONENT;
1624 if (sas_device->fast_path)
1625 sas_target_priv_data->flags |=
1626 MPT_TARGET_FASTPATH_IO;
1628 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1630 return 0;
1634 * scsih_target_destroy - target destroy routine
1635 * @starget: scsi target struct
1637 static void
1638 scsih_target_destroy(struct scsi_target *starget)
1640 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
1641 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1642 struct MPT3SAS_TARGET *sas_target_priv_data;
1643 struct _sas_device *sas_device;
1644 struct _raid_device *raid_device;
1645 struct _pcie_device *pcie_device;
1646 unsigned long flags;
1648 sas_target_priv_data = starget->hostdata;
1649 if (!sas_target_priv_data)
1650 return;
1652 if (starget->channel == RAID_CHANNEL) {
1653 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1654 raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
1655 starget->channel);
1656 if (raid_device) {
1657 raid_device->starget = NULL;
1658 raid_device->sdev = NULL;
1660 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1661 goto out;
1664 if (starget->channel == PCIE_CHANNEL) {
1665 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1666 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1667 sas_target_priv_data);
1668 if (pcie_device && (pcie_device->starget == starget) &&
1669 (pcie_device->id == starget->id) &&
1670 (pcie_device->channel == starget->channel))
1671 pcie_device->starget = NULL;
1673 if (pcie_device) {
1675 * Corresponding get() is in _scsih_target_alloc()
1677 sas_target_priv_data->pcie_dev = NULL;
1678 pcie_device_put(pcie_device);
1679 pcie_device_put(pcie_device);
1681 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1682 goto out;
1685 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1686 sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
1687 if (sas_device && (sas_device->starget == starget) &&
1688 (sas_device->id == starget->id) &&
1689 (sas_device->channel == starget->channel))
1690 sas_device->starget = NULL;
1692 if (sas_device) {
1694 * Corresponding get() is in _scsih_target_alloc()
1696 sas_target_priv_data->sas_dev = NULL;
1697 sas_device_put(sas_device);
1699 sas_device_put(sas_device);
1701 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1703 out:
1704 kfree(sas_target_priv_data);
1705 starget->hostdata = NULL;
1709 * scsih_slave_alloc - device add routine
1710 * @sdev: scsi device struct
1712 * Return: 0 if ok. Any other return is assumed to be an error and
1713 * the device is ignored.
1715 static int
1716 scsih_slave_alloc(struct scsi_device *sdev)
1718 struct Scsi_Host *shost;
1719 struct MPT3SAS_ADAPTER *ioc;
1720 struct MPT3SAS_TARGET *sas_target_priv_data;
1721 struct MPT3SAS_DEVICE *sas_device_priv_data;
1722 struct scsi_target *starget;
1723 struct _raid_device *raid_device;
1724 struct _sas_device *sas_device;
1725 struct _pcie_device *pcie_device;
1726 unsigned long flags;
1728 sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
1729 GFP_KERNEL);
1730 if (!sas_device_priv_data)
1731 return -ENOMEM;
1733 sas_device_priv_data->lun = sdev->lun;
1734 sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
1736 starget = scsi_target(sdev);
1737 sas_target_priv_data = starget->hostdata;
1738 sas_target_priv_data->num_luns++;
1739 sas_device_priv_data->sas_target = sas_target_priv_data;
1740 sdev->hostdata = sas_device_priv_data;
1741 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
1742 sdev->no_uld_attach = 1;
1744 shost = dev_to_shost(&starget->dev);
1745 ioc = shost_priv(shost);
1746 if (starget->channel == RAID_CHANNEL) {
1747 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1748 raid_device = _scsih_raid_device_find_by_id(ioc,
1749 starget->id, starget->channel);
1750 if (raid_device)
1751 raid_device->sdev = sdev; /* raid is single lun */
1752 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1754 if (starget->channel == PCIE_CHANNEL) {
1755 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1756 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
1757 sas_target_priv_data->sas_address);
1758 if (pcie_device && (pcie_device->starget == NULL)) {
1759 sdev_printk(KERN_INFO, sdev,
1760 "%s : pcie_device->starget set to starget @ %d\n",
1761 __func__, __LINE__);
1762 pcie_device->starget = starget;
1765 if (pcie_device)
1766 pcie_device_put(pcie_device);
1767 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1769 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1770 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1771 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
1772 sas_target_priv_data->sas_address);
1773 if (sas_device && (sas_device->starget == NULL)) {
1774 sdev_printk(KERN_INFO, sdev,
1775 "%s : sas_device->starget set to starget @ %d\n",
1776 __func__, __LINE__);
1777 sas_device->starget = starget;
1780 if (sas_device)
1781 sas_device_put(sas_device);
1783 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1786 return 0;
1790 * scsih_slave_destroy - device destroy routine
1791 * @sdev: scsi device struct
1793 static void
1794 scsih_slave_destroy(struct scsi_device *sdev)
1796 struct MPT3SAS_TARGET *sas_target_priv_data;
1797 struct scsi_target *starget;
1798 struct Scsi_Host *shost;
1799 struct MPT3SAS_ADAPTER *ioc;
1800 struct _sas_device *sas_device;
1801 struct _pcie_device *pcie_device;
1802 unsigned long flags;
1804 if (!sdev->hostdata)
1805 return;
1807 starget = scsi_target(sdev);
1808 sas_target_priv_data = starget->hostdata;
1809 sas_target_priv_data->num_luns--;
1811 shost = dev_to_shost(&starget->dev);
1812 ioc = shost_priv(shost);
1814 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
1815 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
1816 pcie_device = __mpt3sas_get_pdev_from_target(ioc,
1817 sas_target_priv_data);
1818 if (pcie_device && !sas_target_priv_data->num_luns)
1819 pcie_device->starget = NULL;
1821 if (pcie_device)
1822 pcie_device_put(pcie_device);
1824 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
1826 } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
1827 spin_lock_irqsave(&ioc->sas_device_lock, flags);
1828 sas_device = __mpt3sas_get_sdev_from_target(ioc,
1829 sas_target_priv_data);
1830 if (sas_device && !sas_target_priv_data->num_luns)
1831 sas_device->starget = NULL;
1833 if (sas_device)
1834 sas_device_put(sas_device);
1835 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
1838 kfree(sdev->hostdata);
1839 sdev->hostdata = NULL;
1843 * _scsih_display_sata_capabilities - sata capabilities
1844 * @ioc: per adapter object
1845 * @handle: device handle
1846 * @sdev: scsi device struct
1848 static void
1849 _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
1850 u16 handle, struct scsi_device *sdev)
1852 Mpi2ConfigReply_t mpi_reply;
1853 Mpi2SasDevicePage0_t sas_device_pg0;
1854 u32 ioc_status;
1855 u16 flags;
1856 u32 device_info;
1858 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
1859 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
1860 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1861 __FILE__, __LINE__, __func__);
1862 return;
1865 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
1866 MPI2_IOCSTATUS_MASK;
1867 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
1868 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1869 __FILE__, __LINE__, __func__);
1870 return;
1873 flags = le16_to_cpu(sas_device_pg0.Flags);
1874 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
1876 sdev_printk(KERN_INFO, sdev,
1877 "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
1878 "sw_preserve(%s)\n",
1879 (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
1880 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
1881 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
1882 "n",
1883 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
1884 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
1885 (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
1889 * raid transport support -
1890 * Enabled for SLES11 and newer, in older kernels the driver will panic when
1891 * unloading the driver followed by a load - I believe that the subroutine
1892 * raid_class_release() is not cleaning up properly.
1896 * scsih_is_raid - return boolean indicating device is raid volume
1897 * @dev: the device struct object
1899 static int
1900 scsih_is_raid(struct device *dev)
1902 struct scsi_device *sdev = to_scsi_device(dev);
1903 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1905 if (ioc->is_warpdrive)
1906 return 0;
1907 return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
1910 static int
1911 scsih_is_nvme(struct device *dev)
1913 struct scsi_device *sdev = to_scsi_device(dev);
1915 return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
1919 * scsih_get_resync - get raid volume resync percent complete
1920 * @dev: the device struct object
1922 static void
1923 scsih_get_resync(struct device *dev)
1925 struct scsi_device *sdev = to_scsi_device(dev);
1926 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1927 static struct _raid_device *raid_device;
1928 unsigned long flags;
1929 Mpi2RaidVolPage0_t vol_pg0;
1930 Mpi2ConfigReply_t mpi_reply;
1931 u32 volume_status_flags;
1932 u8 percent_complete;
1933 u16 handle;
1935 percent_complete = 0;
1936 handle = 0;
1937 if (ioc->is_warpdrive)
1938 goto out;
1940 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1941 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
1942 sdev->channel);
1943 if (raid_device) {
1944 handle = raid_device->handle;
1945 percent_complete = raid_device->percent_complete;
1947 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
1949 if (!handle)
1950 goto out;
1952 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
1953 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
1954 sizeof(Mpi2RaidVolPage0_t))) {
1955 ioc_err(ioc, "failure at %s:%d/%s()!\n",
1956 __FILE__, __LINE__, __func__);
1957 percent_complete = 0;
1958 goto out;
1961 volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
1962 if (!(volume_status_flags &
1963 MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
1964 percent_complete = 0;
1966 out:
1968 switch (ioc->hba_mpi_version_belonged) {
1969 case MPI2_VERSION:
1970 raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
1971 break;
1972 case MPI25_VERSION:
1973 case MPI26_VERSION:
1974 raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
1975 break;
1980 * scsih_get_state - get raid volume level
1981 * @dev: the device struct object
1983 static void
1984 scsih_get_state(struct device *dev)
1986 struct scsi_device *sdev = to_scsi_device(dev);
1987 struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
1988 static struct _raid_device *raid_device;
1989 unsigned long flags;
1990 Mpi2RaidVolPage0_t vol_pg0;
1991 Mpi2ConfigReply_t mpi_reply;
1992 u32 volstate;
1993 enum raid_state state = RAID_STATE_UNKNOWN;
1994 u16 handle = 0;
1996 spin_lock_irqsave(&ioc->raid_device_lock, flags);
1997 raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
1998 sdev->channel);
1999 if (raid_device)
2000 handle = raid_device->handle;
2001 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2003 if (!raid_device)
2004 goto out;
2006 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
2007 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
2008 sizeof(Mpi2RaidVolPage0_t))) {
2009 ioc_err(ioc, "failure at %s:%d/%s()!\n",
2010 __FILE__, __LINE__, __func__);
2011 goto out;
2014 volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
2015 if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
2016 state = RAID_STATE_RESYNCING;
2017 goto out;
2020 switch (vol_pg0.VolumeState) {
2021 case MPI2_RAID_VOL_STATE_OPTIMAL:
2022 case MPI2_RAID_VOL_STATE_ONLINE:
2023 state = RAID_STATE_ACTIVE;
2024 break;
2025 case MPI2_RAID_VOL_STATE_DEGRADED:
2026 state = RAID_STATE_DEGRADED;
2027 break;
2028 case MPI2_RAID_VOL_STATE_FAILED:
2029 case MPI2_RAID_VOL_STATE_MISSING:
2030 state = RAID_STATE_OFFLINE;
2031 break;
2033 out:
2034 switch (ioc->hba_mpi_version_belonged) {
2035 case MPI2_VERSION:
2036 raid_set_state(mpt2sas_raid_template, dev, state);
2037 break;
2038 case MPI25_VERSION:
2039 case MPI26_VERSION:
2040 raid_set_state(mpt3sas_raid_template, dev, state);
2041 break;
2046 * _scsih_set_level - set raid level
2047 * @ioc: ?
2048 * @sdev: scsi device struct
2049 * @volume_type: volume type
2051 static void
2052 _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
2053 struct scsi_device *sdev, u8 volume_type)
2055 enum raid_level level = RAID_LEVEL_UNKNOWN;
2057 switch (volume_type) {
2058 case MPI2_RAID_VOL_TYPE_RAID0:
2059 level = RAID_LEVEL_0;
2060 break;
2061 case MPI2_RAID_VOL_TYPE_RAID10:
2062 level = RAID_LEVEL_10;
2063 break;
2064 case MPI2_RAID_VOL_TYPE_RAID1E:
2065 level = RAID_LEVEL_1E;
2066 break;
2067 case MPI2_RAID_VOL_TYPE_RAID1:
2068 level = RAID_LEVEL_1;
2069 break;
2072 switch (ioc->hba_mpi_version_belonged) {
2073 case MPI2_VERSION:
2074 raid_set_level(mpt2sas_raid_template,
2075 &sdev->sdev_gendev, level);
2076 break;
2077 case MPI25_VERSION:
2078 case MPI26_VERSION:
2079 raid_set_level(mpt3sas_raid_template,
2080 &sdev->sdev_gendev, level);
2081 break;
2087 * _scsih_get_volume_capabilities - volume capabilities
2088 * @ioc: per adapter object
2089 * @raid_device: the raid_device object
2091 * Return: 0 for success, else 1
2093 static int
2094 _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
2095 struct _raid_device *raid_device)
2097 Mpi2RaidVolPage0_t *vol_pg0;
2098 Mpi2RaidPhysDiskPage0_t pd_pg0;
2099 Mpi2SasDevicePage0_t sas_device_pg0;
2100 Mpi2ConfigReply_t mpi_reply;
2101 u16 sz;
2102 u8 num_pds;
2104 if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
2105 &num_pds)) || !num_pds) {
2106 dfailprintk(ioc,
2107 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2108 __FILE__, __LINE__, __func__));
2109 return 1;
2112 raid_device->num_pds = num_pds;
2113 sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
2114 sizeof(Mpi2RaidVol0PhysDisk_t));
2115 vol_pg0 = kzalloc(sz, GFP_KERNEL);
2116 if (!vol_pg0) {
2117 dfailprintk(ioc,
2118 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2119 __FILE__, __LINE__, __func__));
2120 return 1;
2123 if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
2124 MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
2125 dfailprintk(ioc,
2126 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2127 __FILE__, __LINE__, __func__));
2128 kfree(vol_pg0);
2129 return 1;
2132 raid_device->volume_type = vol_pg0->VolumeType;
2134 /* figure out what the underlying devices are by
2135 * obtaining the device_info bits for the 1st device
2137 if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
2138 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
2139 vol_pg0->PhysDisk[0].PhysDiskNum))) {
2140 if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
2141 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
2142 le16_to_cpu(pd_pg0.DevHandle)))) {
2143 raid_device->device_info =
2144 le32_to_cpu(sas_device_pg0.DeviceInfo);
2148 kfree(vol_pg0);
2149 return 0;
2153 * _scsih_enable_tlr - setting TLR flags
2154 * @ioc: per adapter object
2155 * @sdev: scsi device struct
2157 * Enabling Transaction Layer Retries for tape devices when
2158 * vpd page 0x90 is present
2161 static void
2162 _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
2165 /* only for TAPE */
2166 if (sdev->type != TYPE_TAPE)
2167 return;
2169 if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
2170 return;
2172 sas_enable_tlr(sdev);
2173 sdev_printk(KERN_INFO, sdev, "TLR %s\n",
2174 sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
2175 return;
2180 * scsih_slave_configure - device configure routine.
2181 * @sdev: scsi device struct
2183 * Return: 0 if ok. Any other return is assumed to be an error and
2184 * the device is ignored.
2186 static int
2187 scsih_slave_configure(struct scsi_device *sdev)
2189 struct Scsi_Host *shost = sdev->host;
2190 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
2191 struct MPT3SAS_DEVICE *sas_device_priv_data;
2192 struct MPT3SAS_TARGET *sas_target_priv_data;
2193 struct _sas_device *sas_device;
2194 struct _pcie_device *pcie_device;
2195 struct _raid_device *raid_device;
2196 unsigned long flags;
2197 int qdepth;
2198 u8 ssp_target = 0;
2199 char *ds = "";
2200 char *r_level = "";
2201 u16 handle, volume_handle = 0;
2202 u64 volume_wwid = 0;
2204 qdepth = 1;
2205 sas_device_priv_data = sdev->hostdata;
2206 sas_device_priv_data->configured_lun = 1;
2207 sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
2208 sas_target_priv_data = sas_device_priv_data->sas_target;
2209 handle = sas_target_priv_data->handle;
2211 /* raid volume handling */
2212 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
2214 spin_lock_irqsave(&ioc->raid_device_lock, flags);
2215 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
2216 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
2217 if (!raid_device) {
2218 dfailprintk(ioc,
2219 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2220 __FILE__, __LINE__, __func__));
2221 return 1;
2224 if (_scsih_get_volume_capabilities(ioc, raid_device)) {
2225 dfailprintk(ioc,
2226 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2227 __FILE__, __LINE__, __func__));
2228 return 1;
2232 * WARPDRIVE: Initialize the required data for Direct IO
2234 mpt3sas_init_warpdrive_properties(ioc, raid_device);
2236 /* RAID Queue Depth Support
2237 * IS volume = underlying qdepth of drive type, either
2238 * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
2239 * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
2241 if (raid_device->device_info &
2242 MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2243 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2244 ds = "SSP";
2245 } else {
2246 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2247 if (raid_device->device_info &
2248 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2249 ds = "SATA";
2250 else
2251 ds = "STP";
2254 switch (raid_device->volume_type) {
2255 case MPI2_RAID_VOL_TYPE_RAID0:
2256 r_level = "RAID0";
2257 break;
2258 case MPI2_RAID_VOL_TYPE_RAID1E:
2259 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2260 if (ioc->manu_pg10.OEMIdentifier &&
2261 (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
2262 MFG10_GF0_R10_DISPLAY) &&
2263 !(raid_device->num_pds % 2))
2264 r_level = "RAID10";
2265 else
2266 r_level = "RAID1E";
2267 break;
2268 case MPI2_RAID_VOL_TYPE_RAID1:
2269 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2270 r_level = "RAID1";
2271 break;
2272 case MPI2_RAID_VOL_TYPE_RAID10:
2273 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2274 r_level = "RAID10";
2275 break;
2276 case MPI2_RAID_VOL_TYPE_UNKNOWN:
2277 default:
2278 qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
2279 r_level = "RAIDX";
2280 break;
2283 if (!ioc->hide_ir_msg)
2284 sdev_printk(KERN_INFO, sdev,
2285 "%s: handle(0x%04x), wwid(0x%016llx),"
2286 " pd_count(%d), type(%s)\n",
2287 r_level, raid_device->handle,
2288 (unsigned long long)raid_device->wwid,
2289 raid_device->num_pds, ds);
2291 if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
2292 blk_queue_max_hw_sectors(sdev->request_queue,
2293 MPT3SAS_RAID_MAX_SECTORS);
2294 sdev_printk(KERN_INFO, sdev,
2295 "Set queue's max_sector to: %u\n",
2296 MPT3SAS_RAID_MAX_SECTORS);
2299 scsih_change_queue_depth(sdev, qdepth);
2301 /* raid transport support */
2302 if (!ioc->is_warpdrive)
2303 _scsih_set_level(ioc, sdev, raid_device->volume_type);
2304 return 0;
2307 /* non-raid handling */
2308 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
2309 if (mpt3sas_config_get_volume_handle(ioc, handle,
2310 &volume_handle)) {
2311 dfailprintk(ioc,
2312 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2313 __FILE__, __LINE__, __func__));
2314 return 1;
2316 if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
2317 volume_handle, &volume_wwid)) {
2318 dfailprintk(ioc,
2319 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2320 __FILE__, __LINE__, __func__));
2321 return 1;
2325 /* PCIe handling */
2326 if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2327 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2328 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
2329 sas_device_priv_data->sas_target->sas_address);
2330 if (!pcie_device) {
2331 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2332 dfailprintk(ioc,
2333 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2334 __FILE__, __LINE__, __func__));
2335 return 1;
2338 qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
2339 ds = "NVMe";
2340 sdev_printk(KERN_INFO, sdev,
2341 "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2342 ds, handle, (unsigned long long)pcie_device->wwid,
2343 pcie_device->port_num);
2344 if (pcie_device->enclosure_handle != 0)
2345 sdev_printk(KERN_INFO, sdev,
2346 "%s: enclosure logical id(0x%016llx), slot(%d)\n",
2348 (unsigned long long)pcie_device->enclosure_logical_id,
2349 pcie_device->slot);
2350 if (pcie_device->connector_name[0] != '\0')
2351 sdev_printk(KERN_INFO, sdev,
2352 "%s: enclosure level(0x%04x),"
2353 "connector name( %s)\n", ds,
2354 pcie_device->enclosure_level,
2355 pcie_device->connector_name);
2357 if (pcie_device->nvme_mdts)
2358 blk_queue_max_hw_sectors(sdev->request_queue,
2359 pcie_device->nvme_mdts/512);
2361 pcie_device_put(pcie_device);
2362 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2363 scsih_change_queue_depth(sdev, qdepth);
2364 /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
2365 ** merged and can eliminate holes created during merging
2366 ** operation.
2368 blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
2369 sdev->request_queue);
2370 blk_queue_virt_boundary(sdev->request_queue,
2371 ioc->page_size - 1);
2372 return 0;
2375 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2376 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
2377 sas_device_priv_data->sas_target->sas_address);
2378 if (!sas_device) {
2379 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2380 dfailprintk(ioc,
2381 ioc_warn(ioc, "failure at %s:%d/%s()!\n",
2382 __FILE__, __LINE__, __func__));
2383 return 1;
2386 sas_device->volume_handle = volume_handle;
2387 sas_device->volume_wwid = volume_wwid;
2388 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
2389 qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
2390 ssp_target = 1;
2391 if (sas_device->device_info &
2392 MPI2_SAS_DEVICE_INFO_SEP) {
2393 sdev_printk(KERN_WARNING, sdev,
2394 "set ignore_delay_remove for handle(0x%04x)\n",
2395 sas_device_priv_data->sas_target->handle);
2396 sas_device_priv_data->ignore_delay_remove = 1;
2397 ds = "SES";
2398 } else
2399 ds = "SSP";
2400 } else {
2401 qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
2402 if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
2403 ds = "STP";
2404 else if (sas_device->device_info &
2405 MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
2406 ds = "SATA";
2409 sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
2410 "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
2411 ds, handle, (unsigned long long)sas_device->sas_address,
2412 sas_device->phy, (unsigned long long)sas_device->device_name);
2414 _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
2416 sas_device_put(sas_device);
2417 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2419 if (!ssp_target)
2420 _scsih_display_sata_capabilities(ioc, handle, sdev);
2423 scsih_change_queue_depth(sdev, qdepth);
2425 if (ssp_target) {
2426 sas_read_port_mode_page(sdev);
2427 _scsih_enable_tlr(ioc, sdev);
2430 return 0;
2434 * scsih_bios_param - fetch head, sector, cylinder info for a disk
2435 * @sdev: scsi device struct
2436 * @bdev: pointer to block device context
2437 * @capacity: device size (in 512 byte sectors)
2438 * @params: three element array to place output:
2439 * params[0] number of heads (max 255)
2440 * params[1] number of sectors (max 63)
2441 * params[2] number of cylinders
2443 static int
2444 scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2445 sector_t capacity, int params[])
2447 int heads;
2448 int sectors;
2449 sector_t cylinders;
2450 ulong dummy;
2452 heads = 64;
2453 sectors = 32;
2455 dummy = heads * sectors;
2456 cylinders = capacity;
2457 sector_div(cylinders, dummy);
2460 * Handle extended translation size for logical drives
2461 * > 1Gb
2463 if ((ulong)capacity >= 0x200000) {
2464 heads = 255;
2465 sectors = 63;
2466 dummy = heads * sectors;
2467 cylinders = capacity;
2468 sector_div(cylinders, dummy);
2471 /* return result */
2472 params[0] = heads;
2473 params[1] = sectors;
2474 params[2] = cylinders;
2476 return 0;
2480 * _scsih_response_code - translation of device response code
2481 * @ioc: per adapter object
2482 * @response_code: response code returned by the device
2484 static void
2485 _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
2487 char *desc;
2489 switch (response_code) {
2490 case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
2491 desc = "task management request completed";
2492 break;
2493 case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
2494 desc = "invalid frame";
2495 break;
2496 case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
2497 desc = "task management request not supported";
2498 break;
2499 case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
2500 desc = "task management request failed";
2501 break;
2502 case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
2503 desc = "task management request succeeded";
2504 break;
2505 case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
2506 desc = "invalid lun";
2507 break;
2508 case 0xA:
2509 desc = "overlapped tag attempted";
2510 break;
2511 case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
2512 desc = "task queued, however not sent to target";
2513 break;
2514 default:
2515 desc = "unknown";
2516 break;
2518 ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
2522 * _scsih_tm_done - tm completion routine
2523 * @ioc: per adapter object
2524 * @smid: system request message index
2525 * @msix_index: MSIX table index supplied by the OS
2526 * @reply: reply message frame(lower 32bit addr)
2527 * Context: none.
2529 * The callback handler when using scsih_issue_tm.
2531 * Return: 1 meaning mf should be freed from _base_interrupt
2532 * 0 means the mf is freed from this function.
2534 static u8
2535 _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
2537 MPI2DefaultReply_t *mpi_reply;
2539 if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
2540 return 1;
2541 if (ioc->tm_cmds.smid != smid)
2542 return 1;
2543 ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
2544 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
2545 if (mpi_reply) {
2546 memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
2547 ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
2549 ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
2550 complete(&ioc->tm_cmds.done);
2551 return 1;
2555 * mpt3sas_scsih_set_tm_flag - set per target tm_busy
2556 * @ioc: per adapter object
2557 * @handle: device handle
2559 * During taskmangement request, we need to freeze the device queue.
2561 void
2562 mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2564 struct MPT3SAS_DEVICE *sas_device_priv_data;
2565 struct scsi_device *sdev;
2566 u8 skip = 0;
2568 shost_for_each_device(sdev, ioc->shost) {
2569 if (skip)
2570 continue;
2571 sas_device_priv_data = sdev->hostdata;
2572 if (!sas_device_priv_data)
2573 continue;
2574 if (sas_device_priv_data->sas_target->handle == handle) {
2575 sas_device_priv_data->sas_target->tm_busy = 1;
2576 skip = 1;
2577 ioc->ignore_loginfos = 1;
2583 * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
2584 * @ioc: per adapter object
2585 * @handle: device handle
2587 * During taskmangement request, we need to freeze the device queue.
2589 void
2590 mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
2592 struct MPT3SAS_DEVICE *sas_device_priv_data;
2593 struct scsi_device *sdev;
2594 u8 skip = 0;
2596 shost_for_each_device(sdev, ioc->shost) {
2597 if (skip)
2598 continue;
2599 sas_device_priv_data = sdev->hostdata;
2600 if (!sas_device_priv_data)
2601 continue;
2602 if (sas_device_priv_data->sas_target->handle == handle) {
2603 sas_device_priv_data->sas_target->tm_busy = 0;
2604 skip = 1;
2605 ioc->ignore_loginfos = 0;
2611 * mpt3sas_scsih_issue_tm - main routine for sending tm requests
2612 * @ioc: per adapter struct
2613 * @handle: device handle
2614 * @lun: lun number
2615 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
2616 * @smid_task: smid assigned to the task
2617 * @msix_task: MSIX table index supplied by the OS
2618 * @timeout: timeout in seconds
2619 * @tr_method: Target Reset Method
2620 * Context: user
2622 * A generic API for sending task management requests to firmware.
2624 * The callback index is set inside `ioc->tm_cb_idx`.
2625 * The caller is responsible to check for outstanding commands.
2627 * Return: SUCCESS or FAILED.
2630 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
2631 u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method)
2633 Mpi2SCSITaskManagementRequest_t *mpi_request;
2634 Mpi2SCSITaskManagementReply_t *mpi_reply;
2635 u16 smid = 0;
2636 u32 ioc_state;
2637 int rc;
2639 lockdep_assert_held(&ioc->tm_cmds.mutex);
2641 if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
2642 ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
2643 return FAILED;
2646 if (ioc->shost_recovery || ioc->remove_host ||
2647 ioc->pci_error_recovery) {
2648 ioc_info(ioc, "%s: host reset in progress!\n", __func__);
2649 return FAILED;
2652 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
2653 if (ioc_state & MPI2_DOORBELL_USED) {
2654 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
2655 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2656 return (!rc) ? SUCCESS : FAILED;
2659 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2660 mpt3sas_base_fault_info(ioc, ioc_state &
2661 MPI2_DOORBELL_DATA_MASK);
2662 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
2663 return (!rc) ? SUCCESS : FAILED;
2666 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
2667 if (!smid) {
2668 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
2669 return FAILED;
2672 dtmprintk(ioc,
2673 ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
2674 handle, type, smid_task, timeout, tr_method));
2675 ioc->tm_cmds.status = MPT3_CMD_PENDING;
2676 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2677 ioc->tm_cmds.smid = smid;
2678 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
2679 memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
2680 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
2681 mpi_request->DevHandle = cpu_to_le16(handle);
2682 mpi_request->TaskType = type;
2683 mpi_request->MsgFlags = tr_method;
2684 mpi_request->TaskMID = cpu_to_le16(smid_task);
2685 int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
2686 mpt3sas_scsih_set_tm_flag(ioc, handle);
2687 init_completion(&ioc->tm_cmds.done);
2688 mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
2689 wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
2690 if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
2691 if (mpt3sas_base_check_cmd_timeout(ioc,
2692 ioc->tm_cmds.status, mpi_request,
2693 sizeof(Mpi2SCSITaskManagementRequest_t)/4)) {
2694 rc = mpt3sas_base_hard_reset_handler(ioc,
2695 FORCE_BIG_HAMMER);
2696 rc = (!rc) ? SUCCESS : FAILED;
2697 goto out;
2701 /* sync IRQs in case those were busy during flush. */
2702 mpt3sas_base_sync_reply_irqs(ioc);
2704 if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
2705 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
2706 mpi_reply = ioc->tm_cmds.reply;
2707 dtmprintk(ioc,
2708 ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
2709 le16_to_cpu(mpi_reply->IOCStatus),
2710 le32_to_cpu(mpi_reply->IOCLogInfo),
2711 le32_to_cpu(mpi_reply->TerminationCount)));
2712 if (ioc->logging_level & MPT_DEBUG_TM) {
2713 _scsih_response_code(ioc, mpi_reply->ResponseCode);
2714 if (mpi_reply->IOCStatus)
2715 _debug_dump_mf(mpi_request,
2716 sizeof(Mpi2SCSITaskManagementRequest_t)/4);
2719 rc = SUCCESS;
2721 out:
2722 mpt3sas_scsih_clear_tm_flag(ioc, handle);
2723 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
2724 return rc;
2727 int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
2728 u64 lun, u8 type, u16 smid_task, u16 msix_task,
2729 u8 timeout, u8 tr_method)
2731 int ret;
2733 mutex_lock(&ioc->tm_cmds.mutex);
2734 ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
2735 msix_task, timeout, tr_method);
2736 mutex_unlock(&ioc->tm_cmds.mutex);
2738 return ret;
2742 * _scsih_tm_display_info - displays info about the device
2743 * @ioc: per adapter struct
2744 * @scmd: pointer to scsi command object
2746 * Called by task management callback handlers.
2748 static void
2749 _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
2751 struct scsi_target *starget = scmd->device->sdev_target;
2752 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
2753 struct _sas_device *sas_device = NULL;
2754 struct _pcie_device *pcie_device = NULL;
2755 unsigned long flags;
2756 char *device_str = NULL;
2758 if (!priv_target)
2759 return;
2760 if (ioc->hide_ir_msg)
2761 device_str = "WarpDrive";
2762 else
2763 device_str = "volume";
2765 scsi_print_command(scmd);
2766 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2767 starget_printk(KERN_INFO, starget,
2768 "%s handle(0x%04x), %s wwid(0x%016llx)\n",
2769 device_str, priv_target->handle,
2770 device_str, (unsigned long long)priv_target->sas_address);
2772 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
2773 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
2774 pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
2775 if (pcie_device) {
2776 starget_printk(KERN_INFO, starget,
2777 "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
2778 pcie_device->handle,
2779 (unsigned long long)pcie_device->wwid,
2780 pcie_device->port_num);
2781 if (pcie_device->enclosure_handle != 0)
2782 starget_printk(KERN_INFO, starget,
2783 "enclosure logical id(0x%016llx), slot(%d)\n",
2784 (unsigned long long)
2785 pcie_device->enclosure_logical_id,
2786 pcie_device->slot);
2787 if (pcie_device->connector_name[0] != '\0')
2788 starget_printk(KERN_INFO, starget,
2789 "enclosure level(0x%04x), connector name( %s)\n",
2790 pcie_device->enclosure_level,
2791 pcie_device->connector_name);
2792 pcie_device_put(pcie_device);
2794 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
2796 } else {
2797 spin_lock_irqsave(&ioc->sas_device_lock, flags);
2798 sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
2799 if (sas_device) {
2800 if (priv_target->flags &
2801 MPT_TARGET_FLAGS_RAID_COMPONENT) {
2802 starget_printk(KERN_INFO, starget,
2803 "volume handle(0x%04x), "
2804 "volume wwid(0x%016llx)\n",
2805 sas_device->volume_handle,
2806 (unsigned long long)sas_device->volume_wwid);
2808 starget_printk(KERN_INFO, starget,
2809 "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
2810 sas_device->handle,
2811 (unsigned long long)sas_device->sas_address,
2812 sas_device->phy);
2814 _scsih_display_enclosure_chassis_info(NULL, sas_device,
2815 NULL, starget);
2817 sas_device_put(sas_device);
2819 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
2824 * scsih_abort - eh threads main abort routine
2825 * @scmd: pointer to scsi command object
2827 * Return: SUCCESS if command aborted else FAILED
2829 static int
2830 scsih_abort(struct scsi_cmnd *scmd)
2832 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2833 struct MPT3SAS_DEVICE *sas_device_priv_data;
2834 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
2835 u16 handle;
2836 int r;
2838 u8 timeout = 30;
2839 struct _pcie_device *pcie_device = NULL;
2840 sdev_printk(KERN_INFO, scmd->device,
2841 "attempting task abort! scmd(%p)\n", scmd);
2842 _scsih_tm_display_info(ioc, scmd);
2844 sas_device_priv_data = scmd->device->hostdata;
2845 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2846 ioc->remove_host) {
2847 sdev_printk(KERN_INFO, scmd->device,
2848 "device been deleted! scmd(%p)\n", scmd);
2849 scmd->result = DID_NO_CONNECT << 16;
2850 scmd->scsi_done(scmd);
2851 r = SUCCESS;
2852 goto out;
2855 /* check for completed command */
2856 if (st == NULL || st->cb_idx == 0xFF) {
2857 scmd->result = DID_RESET << 16;
2858 r = SUCCESS;
2859 goto out;
2862 /* for hidden raid components and volumes this is not supported */
2863 if (sas_device_priv_data->sas_target->flags &
2864 MPT_TARGET_FLAGS_RAID_COMPONENT ||
2865 sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
2866 scmd->result = DID_RESET << 16;
2867 r = FAILED;
2868 goto out;
2871 mpt3sas_halt_firmware(ioc);
2873 handle = sas_device_priv_data->sas_target->handle;
2874 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2875 if (pcie_device && (!ioc->tm_custom_handling))
2876 timeout = ioc->nvme_abort_timeout;
2877 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2878 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
2879 st->smid, st->msix_io, timeout, 0);
2880 /* Command must be cleared after abort */
2881 if (r == SUCCESS && st->cb_idx != 0xFF)
2882 r = FAILED;
2883 out:
2884 sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
2885 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2886 if (pcie_device)
2887 pcie_device_put(pcie_device);
2888 return r;
2892 * scsih_dev_reset - eh threads main device reset routine
2893 * @scmd: pointer to scsi command object
2895 * Return: SUCCESS if command aborted else FAILED
2897 static int
2898 scsih_dev_reset(struct scsi_cmnd *scmd)
2900 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2901 struct MPT3SAS_DEVICE *sas_device_priv_data;
2902 struct _sas_device *sas_device = NULL;
2903 struct _pcie_device *pcie_device = NULL;
2904 u16 handle;
2905 u8 tr_method = 0;
2906 u8 tr_timeout = 30;
2907 int r;
2909 struct scsi_target *starget = scmd->device->sdev_target;
2910 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
2912 sdev_printk(KERN_INFO, scmd->device,
2913 "attempting device reset! scmd(%p)\n", scmd);
2914 _scsih_tm_display_info(ioc, scmd);
2916 sas_device_priv_data = scmd->device->hostdata;
2917 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2918 ioc->remove_host) {
2919 sdev_printk(KERN_INFO, scmd->device,
2920 "device been deleted! scmd(%p)\n", scmd);
2921 scmd->result = DID_NO_CONNECT << 16;
2922 scmd->scsi_done(scmd);
2923 r = SUCCESS;
2924 goto out;
2927 /* for hidden raid components obtain the volume_handle */
2928 handle = 0;
2929 if (sas_device_priv_data->sas_target->flags &
2930 MPT_TARGET_FLAGS_RAID_COMPONENT) {
2931 sas_device = mpt3sas_get_sdev_from_target(ioc,
2932 target_priv_data);
2933 if (sas_device)
2934 handle = sas_device->volume_handle;
2935 } else
2936 handle = sas_device_priv_data->sas_target->handle;
2938 if (!handle) {
2939 scmd->result = DID_RESET << 16;
2940 r = FAILED;
2941 goto out;
2944 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
2946 if (pcie_device && (!ioc->tm_custom_handling)) {
2947 tr_timeout = pcie_device->reset_timeout;
2948 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
2949 } else
2950 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
2951 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
2952 MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
2953 tr_timeout, tr_method);
2954 /* Check for busy commands after reset */
2955 if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
2956 r = FAILED;
2957 out:
2958 sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
2959 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
2961 if (sas_device)
2962 sas_device_put(sas_device);
2963 if (pcie_device)
2964 pcie_device_put(pcie_device);
2966 return r;
2970 * scsih_target_reset - eh threads main target reset routine
2971 * @scmd: pointer to scsi command object
2973 * Return: SUCCESS if command aborted else FAILED
2975 static int
2976 scsih_target_reset(struct scsi_cmnd *scmd)
2978 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
2979 struct MPT3SAS_DEVICE *sas_device_priv_data;
2980 struct _sas_device *sas_device = NULL;
2981 struct _pcie_device *pcie_device = NULL;
2982 u16 handle;
2983 u8 tr_method = 0;
2984 u8 tr_timeout = 30;
2985 int r;
2986 struct scsi_target *starget = scmd->device->sdev_target;
2987 struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
2989 starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
2990 scmd);
2991 _scsih_tm_display_info(ioc, scmd);
2993 sas_device_priv_data = scmd->device->hostdata;
2994 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
2995 ioc->remove_host) {
2996 starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
2997 scmd);
2998 scmd->result = DID_NO_CONNECT << 16;
2999 scmd->scsi_done(scmd);
3000 r = SUCCESS;
3001 goto out;
3004 /* for hidden raid components obtain the volume_handle */
3005 handle = 0;
3006 if (sas_device_priv_data->sas_target->flags &
3007 MPT_TARGET_FLAGS_RAID_COMPONENT) {
3008 sas_device = mpt3sas_get_sdev_from_target(ioc,
3009 target_priv_data);
3010 if (sas_device)
3011 handle = sas_device->volume_handle;
3012 } else
3013 handle = sas_device_priv_data->sas_target->handle;
3015 if (!handle) {
3016 scmd->result = DID_RESET << 16;
3017 r = FAILED;
3018 goto out;
3021 pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
3023 if (pcie_device && (!ioc->tm_custom_handling)) {
3024 tr_timeout = pcie_device->reset_timeout;
3025 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3026 } else
3027 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3028 r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
3029 MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
3030 tr_timeout, tr_method);
3031 /* Check for busy commands after reset */
3032 if (r == SUCCESS && atomic_read(&starget->target_busy))
3033 r = FAILED;
3034 out:
3035 starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
3036 ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
3038 if (sas_device)
3039 sas_device_put(sas_device);
3040 if (pcie_device)
3041 pcie_device_put(pcie_device);
3042 return r;
3047 * scsih_host_reset - eh threads main host reset routine
3048 * @scmd: pointer to scsi command object
3050 * Return: SUCCESS if command aborted else FAILED
3052 static int
3053 scsih_host_reset(struct scsi_cmnd *scmd)
3055 struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
3056 int r, retval;
3058 ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd);
3059 scsi_print_command(scmd);
3061 if (ioc->is_driver_loading || ioc->remove_host) {
3062 ioc_info(ioc, "Blocking the host reset\n");
3063 r = FAILED;
3064 goto out;
3067 retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
3068 r = (retval < 0) ? FAILED : SUCCESS;
3069 out:
3070 ioc_info(ioc, "host reset: %s scmd(%p)\n",
3071 r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
3073 return r;
3077 * _scsih_fw_event_add - insert and queue up fw_event
3078 * @ioc: per adapter object
3079 * @fw_event: object describing the event
3080 * Context: This function will acquire ioc->fw_event_lock.
3082 * This adds the firmware event object into link list, then queues it up to
3083 * be processed from user context.
3085 static void
3086 _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
3088 unsigned long flags;
3090 if (ioc->firmware_event_thread == NULL)
3091 return;
3093 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3094 fw_event_work_get(fw_event);
3095 INIT_LIST_HEAD(&fw_event->list);
3096 list_add_tail(&fw_event->list, &ioc->fw_event_list);
3097 INIT_WORK(&fw_event->work, _firmware_event_work);
3098 fw_event_work_get(fw_event);
3099 queue_work(ioc->firmware_event_thread, &fw_event->work);
3100 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3104 * _scsih_fw_event_del_from_list - delete fw_event from the list
3105 * @ioc: per adapter object
3106 * @fw_event: object describing the event
3107 * Context: This function will acquire ioc->fw_event_lock.
3109 * If the fw_event is on the fw_event_list, remove it and do a put.
3111 static void
3112 _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
3113 *fw_event)
3115 unsigned long flags;
3117 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3118 if (!list_empty(&fw_event->list)) {
3119 list_del_init(&fw_event->list);
3120 fw_event_work_put(fw_event);
3122 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3127 * mpt3sas_send_trigger_data_event - send event for processing trigger data
3128 * @ioc: per adapter object
3129 * @event_data: trigger event data
3131 void
3132 mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
3133 struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
3135 struct fw_event_work *fw_event;
3136 u16 sz;
3138 if (ioc->is_driver_loading)
3139 return;
3140 sz = sizeof(*event_data);
3141 fw_event = alloc_fw_event_work(sz);
3142 if (!fw_event)
3143 return;
3144 fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
3145 fw_event->ioc = ioc;
3146 memcpy(fw_event->event_data, event_data, sizeof(*event_data));
3147 _scsih_fw_event_add(ioc, fw_event);
3148 fw_event_work_put(fw_event);
3152 * _scsih_error_recovery_delete_devices - remove devices not responding
3153 * @ioc: per adapter object
3155 static void
3156 _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
3158 struct fw_event_work *fw_event;
3160 if (ioc->is_driver_loading)
3161 return;
3162 fw_event = alloc_fw_event_work(0);
3163 if (!fw_event)
3164 return;
3165 fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
3166 fw_event->ioc = ioc;
3167 _scsih_fw_event_add(ioc, fw_event);
3168 fw_event_work_put(fw_event);
3172 * mpt3sas_port_enable_complete - port enable completed (fake event)
3173 * @ioc: per adapter object
3175 void
3176 mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
3178 struct fw_event_work *fw_event;
3180 fw_event = alloc_fw_event_work(0);
3181 if (!fw_event)
3182 return;
3183 fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
3184 fw_event->ioc = ioc;
3185 _scsih_fw_event_add(ioc, fw_event);
3186 fw_event_work_put(fw_event);
3189 static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
3191 unsigned long flags;
3192 struct fw_event_work *fw_event = NULL;
3194 spin_lock_irqsave(&ioc->fw_event_lock, flags);
3195 if (!list_empty(&ioc->fw_event_list)) {
3196 fw_event = list_first_entry(&ioc->fw_event_list,
3197 struct fw_event_work, list);
3198 list_del_init(&fw_event->list);
3200 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
3202 return fw_event;
3206 * _scsih_fw_event_cleanup_queue - cleanup event queue
3207 * @ioc: per adapter object
3209 * Walk the firmware event queue, either killing timers, or waiting
3210 * for outstanding events to complete
3212 static void
3213 _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
3215 struct fw_event_work *fw_event;
3217 if (list_empty(&ioc->fw_event_list) ||
3218 !ioc->firmware_event_thread || in_interrupt())
3219 return;
3221 while ((fw_event = dequeue_next_fw_event(ioc))) {
3223 * Wait on the fw_event to complete. If this returns 1, then
3224 * the event was never executed, and we need a put for the
3225 * reference the work had on the fw_event.
3227 * If it did execute, we wait for it to finish, and the put will
3228 * happen from _firmware_event_work()
3230 if (cancel_work_sync(&fw_event->work))
3231 fw_event_work_put(fw_event);
3233 fw_event_work_put(fw_event);
3238 * _scsih_internal_device_block - block the sdev device
3239 * @sdev: per device object
3240 * @sas_device_priv_data : per device driver private data
3242 * make sure device is blocked without error, if not
3243 * print an error
3245 static void
3246 _scsih_internal_device_block(struct scsi_device *sdev,
3247 struct MPT3SAS_DEVICE *sas_device_priv_data)
3249 int r = 0;
3251 sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
3252 sas_device_priv_data->sas_target->handle);
3253 sas_device_priv_data->block = 1;
3255 r = scsi_internal_device_block_nowait(sdev);
3256 if (r == -EINVAL)
3257 sdev_printk(KERN_WARNING, sdev,
3258 "device_block failed with return(%d) for handle(0x%04x)\n",
3259 r, sas_device_priv_data->sas_target->handle);
3263 * _scsih_internal_device_unblock - unblock the sdev device
3264 * @sdev: per device object
3265 * @sas_device_priv_data : per device driver private data
3266 * make sure device is unblocked without error, if not retry
3267 * by blocking and then unblocking
3270 static void
3271 _scsih_internal_device_unblock(struct scsi_device *sdev,
3272 struct MPT3SAS_DEVICE *sas_device_priv_data)
3274 int r = 0;
3276 sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
3277 "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
3278 sas_device_priv_data->block = 0;
3279 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3280 if (r == -EINVAL) {
3281 /* The device has been set to SDEV_RUNNING by SD layer during
3282 * device addition but the request queue is still stopped by
3283 * our earlier block call. We need to perform a block again
3284 * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
3286 sdev_printk(KERN_WARNING, sdev,
3287 "device_unblock failed with return(%d) for handle(0x%04x) "
3288 "performing a block followed by an unblock\n",
3289 r, sas_device_priv_data->sas_target->handle);
3290 sas_device_priv_data->block = 1;
3291 r = scsi_internal_device_block_nowait(sdev);
3292 if (r)
3293 sdev_printk(KERN_WARNING, sdev, "retried device_block "
3294 "failed with return(%d) for handle(0x%04x)\n",
3295 r, sas_device_priv_data->sas_target->handle);
3297 sas_device_priv_data->block = 0;
3298 r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
3299 if (r)
3300 sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
3301 " failed with return(%d) for handle(0x%04x)\n",
3302 r, sas_device_priv_data->sas_target->handle);
3307 * _scsih_ublock_io_all_device - unblock every device
3308 * @ioc: per adapter object
3310 * change the device state from block to running
3312 static void
3313 _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3315 struct MPT3SAS_DEVICE *sas_device_priv_data;
3316 struct scsi_device *sdev;
3318 shost_for_each_device(sdev, ioc->shost) {
3319 sas_device_priv_data = sdev->hostdata;
3320 if (!sas_device_priv_data)
3321 continue;
3322 if (!sas_device_priv_data->block)
3323 continue;
3325 dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
3326 "device_running, handle(0x%04x)\n",
3327 sas_device_priv_data->sas_target->handle));
3328 _scsih_internal_device_unblock(sdev, sas_device_priv_data);
3334 * _scsih_ublock_io_device - prepare device to be deleted
3335 * @ioc: per adapter object
3336 * @sas_address: sas address
3338 * unblock then put device in offline state
3340 static void
3341 _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
3343 struct MPT3SAS_DEVICE *sas_device_priv_data;
3344 struct scsi_device *sdev;
3346 shost_for_each_device(sdev, ioc->shost) {
3347 sas_device_priv_data = sdev->hostdata;
3348 if (!sas_device_priv_data)
3349 continue;
3350 if (sas_device_priv_data->sas_target->sas_address
3351 != sas_address)
3352 continue;
3353 if (sas_device_priv_data->block)
3354 _scsih_internal_device_unblock(sdev,
3355 sas_device_priv_data);
3360 * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
3361 * @ioc: per adapter object
3363 * During device pull we need to appropriately set the sdev state.
3365 static void
3366 _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
3368 struct MPT3SAS_DEVICE *sas_device_priv_data;
3369 struct scsi_device *sdev;
3371 shost_for_each_device(sdev, ioc->shost) {
3372 sas_device_priv_data = sdev->hostdata;
3373 if (!sas_device_priv_data)
3374 continue;
3375 if (sas_device_priv_data->block)
3376 continue;
3377 if (sas_device_priv_data->ignore_delay_remove) {
3378 sdev_printk(KERN_INFO, sdev,
3379 "%s skip device_block for SES handle(0x%04x)\n",
3380 __func__, sas_device_priv_data->sas_target->handle);
3381 continue;
3383 _scsih_internal_device_block(sdev, sas_device_priv_data);
3388 * _scsih_block_io_device - set the device state to SDEV_BLOCK
3389 * @ioc: per adapter object
3390 * @handle: device handle
3392 * During device pull we need to appropriately set the sdev state.
3394 static void
3395 _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3397 struct MPT3SAS_DEVICE *sas_device_priv_data;
3398 struct scsi_device *sdev;
3399 struct _sas_device *sas_device;
3401 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
3403 shost_for_each_device(sdev, ioc->shost) {
3404 sas_device_priv_data = sdev->hostdata;
3405 if (!sas_device_priv_data)
3406 continue;
3407 if (sas_device_priv_data->sas_target->handle != handle)
3408 continue;
3409 if (sas_device_priv_data->block)
3410 continue;
3411 if (sas_device && sas_device->pend_sas_rphy_add)
3412 continue;
3413 if (sas_device_priv_data->ignore_delay_remove) {
3414 sdev_printk(KERN_INFO, sdev,
3415 "%s skip device_block for SES handle(0x%04x)\n",
3416 __func__, sas_device_priv_data->sas_target->handle);
3417 continue;
3419 _scsih_internal_device_block(sdev, sas_device_priv_data);
3422 if (sas_device)
3423 sas_device_put(sas_device);
3427 * _scsih_block_io_to_children_attached_to_ex
3428 * @ioc: per adapter object
3429 * @sas_expander: the sas_device object
3431 * This routine set sdev state to SDEV_BLOCK for all devices
3432 * attached to this expander. This function called when expander is
3433 * pulled.
3435 static void
3436 _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
3437 struct _sas_node *sas_expander)
3439 struct _sas_port *mpt3sas_port;
3440 struct _sas_device *sas_device;
3441 struct _sas_node *expander_sibling;
3442 unsigned long flags;
3444 if (!sas_expander)
3445 return;
3447 list_for_each_entry(mpt3sas_port,
3448 &sas_expander->sas_port_list, port_list) {
3449 if (mpt3sas_port->remote_identify.device_type ==
3450 SAS_END_DEVICE) {
3451 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3452 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
3453 mpt3sas_port->remote_identify.sas_address);
3454 if (sas_device) {
3455 set_bit(sas_device->handle,
3456 ioc->blocking_handles);
3457 sas_device_put(sas_device);
3459 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3463 list_for_each_entry(mpt3sas_port,
3464 &sas_expander->sas_port_list, port_list) {
3466 if (mpt3sas_port->remote_identify.device_type ==
3467 SAS_EDGE_EXPANDER_DEVICE ||
3468 mpt3sas_port->remote_identify.device_type ==
3469 SAS_FANOUT_EXPANDER_DEVICE) {
3470 expander_sibling =
3471 mpt3sas_scsih_expander_find_by_sas_address(
3472 ioc, mpt3sas_port->remote_identify.sas_address);
3473 _scsih_block_io_to_children_attached_to_ex(ioc,
3474 expander_sibling);
3480 * _scsih_block_io_to_children_attached_directly
3481 * @ioc: per adapter object
3482 * @event_data: topology change event data
3484 * This routine set sdev state to SDEV_BLOCK for all devices
3485 * direct attached during device pull.
3487 static void
3488 _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3489 Mpi2EventDataSasTopologyChangeList_t *event_data)
3491 int i;
3492 u16 handle;
3493 u16 reason_code;
3495 for (i = 0; i < event_data->NumEntries; i++) {
3496 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
3497 if (!handle)
3498 continue;
3499 reason_code = event_data->PHY[i].PhyStatus &
3500 MPI2_EVENT_SAS_TOPO_RC_MASK;
3501 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
3502 _scsih_block_io_device(ioc, handle);
3507 * _scsih_block_io_to_pcie_children_attached_directly
3508 * @ioc: per adapter object
3509 * @event_data: topology change event data
3511 * This routine set sdev state to SDEV_BLOCK for all devices
3512 * direct attached during device pull/reconnect.
3514 static void
3515 _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
3516 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
3518 int i;
3519 u16 handle;
3520 u16 reason_code;
3522 for (i = 0; i < event_data->NumEntries; i++) {
3523 handle =
3524 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
3525 if (!handle)
3526 continue;
3527 reason_code = event_data->PortEntry[i].PortStatus;
3528 if (reason_code ==
3529 MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
3530 _scsih_block_io_device(ioc, handle);
3534 * _scsih_tm_tr_send - send task management request
3535 * @ioc: per adapter object
3536 * @handle: device handle
3537 * Context: interrupt time.
3539 * This code is to initiate the device removal handshake protocol
3540 * with controller firmware. This function will issue target reset
3541 * using high priority request queue. It will send a sas iounit
3542 * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
3544 * This is designed to send muliple task management request at the same
3545 * time to the fifo. If the fifo is full, we will append the request,
3546 * and process it in a future completion.
3548 static void
3549 _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3551 Mpi2SCSITaskManagementRequest_t *mpi_request;
3552 u16 smid;
3553 struct _sas_device *sas_device = NULL;
3554 struct _pcie_device *pcie_device = NULL;
3555 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
3556 u64 sas_address = 0;
3557 unsigned long flags;
3558 struct _tr_list *delayed_tr;
3559 u32 ioc_state;
3560 u8 tr_method = 0;
3562 if (ioc->pci_error_recovery) {
3563 dewtprintk(ioc,
3564 ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
3565 __func__, handle));
3566 return;
3568 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3569 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3570 dewtprintk(ioc,
3571 ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
3572 __func__, handle));
3573 return;
3576 /* if PD, then return */
3577 if (test_bit(handle, ioc->pd_handles))
3578 return;
3580 clear_bit(handle, ioc->pend_os_device_add);
3582 spin_lock_irqsave(&ioc->sas_device_lock, flags);
3583 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
3584 if (sas_device && sas_device->starget &&
3585 sas_device->starget->hostdata) {
3586 sas_target_priv_data = sas_device->starget->hostdata;
3587 sas_target_priv_data->deleted = 1;
3588 sas_address = sas_device->sas_address;
3590 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
3591 if (!sas_device) {
3592 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
3593 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
3594 if (pcie_device && pcie_device->starget &&
3595 pcie_device->starget->hostdata) {
3596 sas_target_priv_data = pcie_device->starget->hostdata;
3597 sas_target_priv_data->deleted = 1;
3598 sas_address = pcie_device->wwid;
3600 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
3601 if (pcie_device && (!ioc->tm_custom_handling))
3602 tr_method =
3603 MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
3604 else
3605 tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
3607 if (sas_target_priv_data) {
3608 dewtprintk(ioc,
3609 ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
3610 handle, (u64)sas_address));
3611 if (sas_device) {
3612 if (sas_device->enclosure_handle != 0)
3613 dewtprintk(ioc,
3614 ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
3615 (u64)sas_device->enclosure_logical_id,
3616 sas_device->slot));
3617 if (sas_device->connector_name[0] != '\0')
3618 dewtprintk(ioc,
3619 ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
3620 sas_device->enclosure_level,
3621 sas_device->connector_name));
3622 } else if (pcie_device) {
3623 if (pcie_device->enclosure_handle != 0)
3624 dewtprintk(ioc,
3625 ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
3626 (u64)pcie_device->enclosure_logical_id,
3627 pcie_device->slot));
3628 if (pcie_device->connector_name[0] != '\0')
3629 dewtprintk(ioc,
3630 ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
3631 pcie_device->enclosure_level,
3632 pcie_device->connector_name));
3634 _scsih_ublock_io_device(ioc, sas_address);
3635 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
3638 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
3639 if (!smid) {
3640 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3641 if (!delayed_tr)
3642 goto out;
3643 INIT_LIST_HEAD(&delayed_tr->list);
3644 delayed_tr->handle = handle;
3645 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
3646 dewtprintk(ioc,
3647 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3648 handle));
3649 goto out;
3652 dewtprintk(ioc,
3653 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3654 handle, smid, ioc->tm_tr_cb_idx));
3655 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3656 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3657 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3658 mpi_request->DevHandle = cpu_to_le16(handle);
3659 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3660 mpi_request->MsgFlags = tr_method;
3661 set_bit(handle, ioc->device_remove_in_progress);
3662 mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
3663 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
3665 out:
3666 if (sas_device)
3667 sas_device_put(sas_device);
3668 if (pcie_device)
3669 pcie_device_put(pcie_device);
3673 * _scsih_tm_tr_complete -
3674 * @ioc: per adapter object
3675 * @smid: system request message index
3676 * @msix_index: MSIX table index supplied by the OS
3677 * @reply: reply message frame(lower 32bit addr)
3678 * Context: interrupt time.
3680 * This is the target reset completion routine.
3681 * This code is part of the code to initiate the device removal
3682 * handshake protocol with controller firmware.
3683 * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
3685 * Return: 1 meaning mf should be freed from _base_interrupt
3686 * 0 means the mf is freed from this function.
3688 static u8
3689 _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
3690 u32 reply)
3692 u16 handle;
3693 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
3694 Mpi2SCSITaskManagementReply_t *mpi_reply =
3695 mpt3sas_base_get_reply_virt_addr(ioc, reply);
3696 Mpi2SasIoUnitControlRequest_t *mpi_request;
3697 u16 smid_sas_ctrl;
3698 u32 ioc_state;
3699 struct _sc_list *delayed_sc;
3701 if (ioc->pci_error_recovery) {
3702 dewtprintk(ioc,
3703 ioc_info(ioc, "%s: host in pci error recovery\n",
3704 __func__));
3705 return 1;
3707 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
3708 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
3709 dewtprintk(ioc,
3710 ioc_info(ioc, "%s: host is not operational\n",
3711 __func__));
3712 return 1;
3714 if (unlikely(!mpi_reply)) {
3715 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3716 __FILE__, __LINE__, __func__);
3717 return 1;
3719 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
3720 handle = le16_to_cpu(mpi_request_tm->DevHandle);
3721 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
3722 dewtprintk(ioc,
3723 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
3724 handle,
3725 le16_to_cpu(mpi_reply->DevHandle), smid));
3726 return 0;
3729 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
3730 dewtprintk(ioc,
3731 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
3732 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
3733 le32_to_cpu(mpi_reply->IOCLogInfo),
3734 le32_to_cpu(mpi_reply->TerminationCount)));
3736 smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
3737 if (!smid_sas_ctrl) {
3738 delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
3739 if (!delayed_sc)
3740 return _scsih_check_for_pending_tm(ioc, smid);
3741 INIT_LIST_HEAD(&delayed_sc->list);
3742 delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
3743 list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
3744 dewtprintk(ioc,
3745 ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
3746 handle));
3747 return _scsih_check_for_pending_tm(ioc, smid);
3750 dewtprintk(ioc,
3751 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3752 handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
3753 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
3754 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
3755 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
3756 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
3757 mpi_request->DevHandle = mpi_request_tm->DevHandle;
3758 mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
3760 return _scsih_check_for_pending_tm(ioc, smid);
3763 /** _scsih_allow_scmd_to_device - check whether scmd needs to
3764 * issue to IOC or not.
3765 * @ioc: per adapter object
3766 * @scmd: pointer to scsi command object
3768 * Returns true if scmd can be issued to IOC otherwise returns false.
3770 inline bool _scsih_allow_scmd_to_device(struct MPT3SAS_ADAPTER *ioc,
3771 struct scsi_cmnd *scmd)
3774 if (ioc->pci_error_recovery)
3775 return false;
3777 if (ioc->hba_mpi_version_belonged == MPI2_VERSION) {
3778 if (ioc->remove_host)
3779 return false;
3781 return true;
3784 if (ioc->remove_host) {
3786 switch (scmd->cmnd[0]) {
3787 case SYNCHRONIZE_CACHE:
3788 case START_STOP:
3789 return true;
3790 default:
3791 return false;
3795 return true;
3799 * _scsih_sas_control_complete - completion routine
3800 * @ioc: per adapter object
3801 * @smid: system request message index
3802 * @msix_index: MSIX table index supplied by the OS
3803 * @reply: reply message frame(lower 32bit addr)
3804 * Context: interrupt time.
3806 * This is the sas iounit control completion routine.
3807 * This code is part of the code to initiate the device removal
3808 * handshake protocol with controller firmware.
3810 * Return: 1 meaning mf should be freed from _base_interrupt
3811 * 0 means the mf is freed from this function.
3813 static u8
3814 _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3815 u8 msix_index, u32 reply)
3817 Mpi2SasIoUnitControlReply_t *mpi_reply =
3818 mpt3sas_base_get_reply_virt_addr(ioc, reply);
3820 if (likely(mpi_reply)) {
3821 dewtprintk(ioc,
3822 ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
3823 le16_to_cpu(mpi_reply->DevHandle), smid,
3824 le16_to_cpu(mpi_reply->IOCStatus),
3825 le32_to_cpu(mpi_reply->IOCLogInfo)));
3826 if (le16_to_cpu(mpi_reply->IOCStatus) ==
3827 MPI2_IOCSTATUS_SUCCESS) {
3828 clear_bit(le16_to_cpu(mpi_reply->DevHandle),
3829 ioc->device_remove_in_progress);
3831 } else {
3832 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3833 __FILE__, __LINE__, __func__);
3835 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
3839 * _scsih_tm_tr_volume_send - send target reset request for volumes
3840 * @ioc: per adapter object
3841 * @handle: device handle
3842 * Context: interrupt time.
3844 * This is designed to send muliple task management request at the same
3845 * time to the fifo. If the fifo is full, we will append the request,
3846 * and process it in a future completion.
3848 static void
3849 _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
3851 Mpi2SCSITaskManagementRequest_t *mpi_request;
3852 u16 smid;
3853 struct _tr_list *delayed_tr;
3855 if (ioc->pci_error_recovery) {
3856 dewtprintk(ioc,
3857 ioc_info(ioc, "%s: host reset in progress!\n",
3858 __func__));
3859 return;
3862 smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
3863 if (!smid) {
3864 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
3865 if (!delayed_tr)
3866 return;
3867 INIT_LIST_HEAD(&delayed_tr->list);
3868 delayed_tr->handle = handle;
3869 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
3870 dewtprintk(ioc,
3871 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
3872 handle));
3873 return;
3876 dewtprintk(ioc,
3877 ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
3878 handle, smid, ioc->tm_tr_volume_cb_idx));
3879 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
3880 memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
3881 mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
3882 mpi_request->DevHandle = cpu_to_le16(handle);
3883 mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
3884 mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
3888 * _scsih_tm_volume_tr_complete - target reset completion
3889 * @ioc: per adapter object
3890 * @smid: system request message index
3891 * @msix_index: MSIX table index supplied by the OS
3892 * @reply: reply message frame(lower 32bit addr)
3893 * Context: interrupt time.
3895 * Return: 1 meaning mf should be freed from _base_interrupt
3896 * 0 means the mf is freed from this function.
3898 static u8
3899 _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3900 u8 msix_index, u32 reply)
3902 u16 handle;
3903 Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
3904 Mpi2SCSITaskManagementReply_t *mpi_reply =
3905 mpt3sas_base_get_reply_virt_addr(ioc, reply);
3907 if (ioc->shost_recovery || ioc->pci_error_recovery) {
3908 dewtprintk(ioc,
3909 ioc_info(ioc, "%s: host reset in progress!\n",
3910 __func__));
3911 return 1;
3913 if (unlikely(!mpi_reply)) {
3914 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
3915 __FILE__, __LINE__, __func__);
3916 return 1;
3919 mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
3920 handle = le16_to_cpu(mpi_request_tm->DevHandle);
3921 if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
3922 dewtprintk(ioc,
3923 ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
3924 handle, le16_to_cpu(mpi_reply->DevHandle),
3925 smid));
3926 return 0;
3929 dewtprintk(ioc,
3930 ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
3931 handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
3932 le32_to_cpu(mpi_reply->IOCLogInfo),
3933 le32_to_cpu(mpi_reply->TerminationCount)));
3935 return _scsih_check_for_pending_tm(ioc, smid);
3939 * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
3940 * @ioc: per adapter object
3941 * @smid: system request message index
3942 * @event: Event ID
3943 * @event_context: used to track events uniquely
3945 * Context - processed in interrupt context.
3947 static void
3948 _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
3949 U32 event_context)
3951 Mpi2EventAckRequest_t *ack_request;
3952 int i = smid - ioc->internal_smid;
3953 unsigned long flags;
3955 /* Without releasing the smid just update the
3956 * call back index and reuse the same smid for
3957 * processing this delayed request
3959 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3960 ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
3961 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3963 dewtprintk(ioc,
3964 ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
3965 le16_to_cpu(event), smid, ioc->base_cb_idx));
3966 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
3967 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
3968 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
3969 ack_request->Event = event;
3970 ack_request->EventContext = event_context;
3971 ack_request->VF_ID = 0; /* TODO */
3972 ack_request->VP_ID = 0;
3973 mpt3sas_base_put_smid_default(ioc, smid);
3977 * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
3978 * sas_io_unit_ctrl messages
3979 * @ioc: per adapter object
3980 * @smid: system request message index
3981 * @handle: device handle
3983 * Context - processed in interrupt context.
3985 static void
3986 _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
3987 u16 smid, u16 handle)
3989 Mpi2SasIoUnitControlRequest_t *mpi_request;
3990 u32 ioc_state;
3991 int i = smid - ioc->internal_smid;
3992 unsigned long flags;
3994 if (ioc->remove_host) {
3995 dewtprintk(ioc,
3996 ioc_info(ioc, "%s: host has been removed\n",
3997 __func__));
3998 return;
3999 } else if (ioc->pci_error_recovery) {
4000 dewtprintk(ioc,
4001 ioc_info(ioc, "%s: host in pci error recovery\n",
4002 __func__));
4003 return;
4005 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
4006 if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
4007 dewtprintk(ioc,
4008 ioc_info(ioc, "%s: host is not operational\n",
4009 __func__));
4010 return;
4013 /* Without releasing the smid just update the
4014 * call back index and reuse the same smid for
4015 * processing this delayed request
4017 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4018 ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
4019 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4021 dewtprintk(ioc,
4022 ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
4023 handle, smid, ioc->tm_sas_control_cb_idx));
4024 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4025 memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
4026 mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
4027 mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
4028 mpi_request->DevHandle = cpu_to_le16(handle);
4029 mpt3sas_base_put_smid_default(ioc, smid);
4033 * _scsih_check_for_pending_internal_cmds - check for pending internal messages
4034 * @ioc: per adapter object
4035 * @smid: system request message index
4037 * Context: Executed in interrupt context
4039 * This will check delayed internal messages list, and process the
4040 * next request.
4042 * Return: 1 meaning mf should be freed from _base_interrupt
4043 * 0 means the mf is freed from this function.
4046 mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4048 struct _sc_list *delayed_sc;
4049 struct _event_ack_list *delayed_event_ack;
4051 if (!list_empty(&ioc->delayed_event_ack_list)) {
4052 delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
4053 struct _event_ack_list, list);
4054 _scsih_issue_delayed_event_ack(ioc, smid,
4055 delayed_event_ack->Event, delayed_event_ack->EventContext);
4056 list_del(&delayed_event_ack->list);
4057 kfree(delayed_event_ack);
4058 return 0;
4061 if (!list_empty(&ioc->delayed_sc_list)) {
4062 delayed_sc = list_entry(ioc->delayed_sc_list.next,
4063 struct _sc_list, list);
4064 _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
4065 delayed_sc->handle);
4066 list_del(&delayed_sc->list);
4067 kfree(delayed_sc);
4068 return 0;
4070 return 1;
4074 * _scsih_check_for_pending_tm - check for pending task management
4075 * @ioc: per adapter object
4076 * @smid: system request message index
4078 * This will check delayed target reset list, and feed the
4079 * next reqeust.
4081 * Return: 1 meaning mf should be freed from _base_interrupt
4082 * 0 means the mf is freed from this function.
4084 static u8
4085 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4087 struct _tr_list *delayed_tr;
4089 if (!list_empty(&ioc->delayed_tr_volume_list)) {
4090 delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
4091 struct _tr_list, list);
4092 mpt3sas_base_free_smid(ioc, smid);
4093 _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
4094 list_del(&delayed_tr->list);
4095 kfree(delayed_tr);
4096 return 0;
4099 if (!list_empty(&ioc->delayed_tr_list)) {
4100 delayed_tr = list_entry(ioc->delayed_tr_list.next,
4101 struct _tr_list, list);
4102 mpt3sas_base_free_smid(ioc, smid);
4103 _scsih_tm_tr_send(ioc, delayed_tr->handle);
4104 list_del(&delayed_tr->list);
4105 kfree(delayed_tr);
4106 return 0;
4109 return 1;
4113 * _scsih_check_topo_delete_events - sanity check on topo events
4114 * @ioc: per adapter object
4115 * @event_data: the event data payload
4117 * This routine added to better handle cable breaker.
4119 * This handles the case where driver receives multiple expander
4120 * add and delete events in a single shot. When there is a delete event
4121 * the routine will void any pending add events waiting in the event queue.
4123 static void
4124 _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
4125 Mpi2EventDataSasTopologyChangeList_t *event_data)
4127 struct fw_event_work *fw_event;
4128 Mpi2EventDataSasTopologyChangeList_t *local_event_data;
4129 u16 expander_handle;
4130 struct _sas_node *sas_expander;
4131 unsigned long flags;
4132 int i, reason_code;
4133 u16 handle;
4135 for (i = 0 ; i < event_data->NumEntries; i++) {
4136 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
4137 if (!handle)
4138 continue;
4139 reason_code = event_data->PHY[i].PhyStatus &
4140 MPI2_EVENT_SAS_TOPO_RC_MASK;
4141 if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
4142 _scsih_tm_tr_send(ioc, handle);
4145 expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
4146 if (expander_handle < ioc->sas_hba.num_phys) {
4147 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4148 return;
4150 if (event_data->ExpStatus ==
4151 MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
4152 /* put expander attached devices into blocking state */
4153 spin_lock_irqsave(&ioc->sas_node_lock, flags);
4154 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
4155 expander_handle);
4156 _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
4157 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
4158 do {
4159 handle = find_first_bit(ioc->blocking_handles,
4160 ioc->facts.MaxDevHandle);
4161 if (handle < ioc->facts.MaxDevHandle)
4162 _scsih_block_io_device(ioc, handle);
4163 } while (test_and_clear_bit(handle, ioc->blocking_handles));
4164 } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
4165 _scsih_block_io_to_children_attached_directly(ioc, event_data);
4167 if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4168 return;
4170 /* mark ignore flag for pending events */
4171 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4172 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4173 if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
4174 fw_event->ignore)
4175 continue;
4176 local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
4177 fw_event->event_data;
4178 if (local_event_data->ExpStatus ==
4179 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4180 local_event_data->ExpStatus ==
4181 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4182 if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
4183 expander_handle) {
4184 dewtprintk(ioc,
4185 ioc_info(ioc, "setting ignoring flag\n"));
4186 fw_event->ignore = 1;
4190 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4194 * _scsih_check_pcie_topo_remove_events - sanity check on topo
4195 * events
4196 * @ioc: per adapter object
4197 * @event_data: the event data payload
4199 * This handles the case where driver receives multiple switch
4200 * or device add and delete events in a single shot. When there
4201 * is a delete event the routine will void any pending add
4202 * events waiting in the event queue.
4204 static void
4205 _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
4206 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
4208 struct fw_event_work *fw_event;
4209 Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
4210 unsigned long flags;
4211 int i, reason_code;
4212 u16 handle, switch_handle;
4214 for (i = 0; i < event_data->NumEntries; i++) {
4215 handle =
4216 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
4217 if (!handle)
4218 continue;
4219 reason_code = event_data->PortEntry[i].PortStatus;
4220 if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
4221 _scsih_tm_tr_send(ioc, handle);
4224 switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
4225 if (!switch_handle) {
4226 _scsih_block_io_to_pcie_children_attached_directly(
4227 ioc, event_data);
4228 return;
4230 /* TODO We are not supporting cascaded PCIe Switch removal yet*/
4231 if ((event_data->SwitchStatus
4232 == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
4233 (event_data->SwitchStatus ==
4234 MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
4235 _scsih_block_io_to_pcie_children_attached_directly(
4236 ioc, event_data);
4238 if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
4239 return;
4241 /* mark ignore flag for pending events */
4242 spin_lock_irqsave(&ioc->fw_event_lock, flags);
4243 list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
4244 if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
4245 fw_event->ignore)
4246 continue;
4247 local_event_data =
4248 (Mpi26EventDataPCIeTopologyChangeList_t *)
4249 fw_event->event_data;
4250 if (local_event_data->SwitchStatus ==
4251 MPI2_EVENT_SAS_TOPO_ES_ADDED ||
4252 local_event_data->SwitchStatus ==
4253 MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
4254 if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
4255 switch_handle) {
4256 dewtprintk(ioc,
4257 ioc_info(ioc, "setting ignoring flag for switch event\n"));
4258 fw_event->ignore = 1;
4262 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
4266 * _scsih_set_volume_delete_flag - setting volume delete flag
4267 * @ioc: per adapter object
4268 * @handle: device handle
4270 * This returns nothing.
4272 static void
4273 _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4275 struct _raid_device *raid_device;
4276 struct MPT3SAS_TARGET *sas_target_priv_data;
4277 unsigned long flags;
4279 spin_lock_irqsave(&ioc->raid_device_lock, flags);
4280 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
4281 if (raid_device && raid_device->starget &&
4282 raid_device->starget->hostdata) {
4283 sas_target_priv_data =
4284 raid_device->starget->hostdata;
4285 sas_target_priv_data->deleted = 1;
4286 dewtprintk(ioc,
4287 ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
4288 handle, (u64)raid_device->wwid));
4290 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
4294 * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
4295 * @handle: input handle
4296 * @a: handle for volume a
4297 * @b: handle for volume b
4299 * IR firmware only supports two raid volumes. The purpose of this
4300 * routine is to set the volume handle in either a or b. When the given
4301 * input handle is non-zero, or when a and b have not been set before.
4303 static void
4304 _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
4306 if (!handle || handle == *a || handle == *b)
4307 return;
4308 if (!*a)
4309 *a = handle;
4310 else if (!*b)
4311 *b = handle;
4315 * _scsih_check_ir_config_unhide_events - check for UNHIDE events
4316 * @ioc: per adapter object
4317 * @event_data: the event data payload
4318 * Context: interrupt time.
4320 * This routine will send target reset to volume, followed by target
4321 * resets to the PDs. This is called when a PD has been removed, or
4322 * volume has been deleted or removed. When the target reset is sent
4323 * to volume, the PD target resets need to be queued to start upon
4324 * completion of the volume target reset.
4326 static void
4327 _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
4328 Mpi2EventDataIrConfigChangeList_t *event_data)
4330 Mpi2EventIrConfigElement_t *element;
4331 int i;
4332 u16 handle, volume_handle, a, b;
4333 struct _tr_list *delayed_tr;
4335 a = 0;
4336 b = 0;
4338 if (ioc->is_warpdrive)
4339 return;
4341 /* Volume Resets for Deleted or Removed */
4342 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4343 for (i = 0; i < event_data->NumElements; i++, element++) {
4344 if (le32_to_cpu(event_data->Flags) &
4345 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4346 continue;
4347 if (element->ReasonCode ==
4348 MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
4349 element->ReasonCode ==
4350 MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
4351 volume_handle = le16_to_cpu(element->VolDevHandle);
4352 _scsih_set_volume_delete_flag(ioc, volume_handle);
4353 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4357 /* Volume Resets for UNHIDE events */
4358 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4359 for (i = 0; i < event_data->NumElements; i++, element++) {
4360 if (le32_to_cpu(event_data->Flags) &
4361 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
4362 continue;
4363 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
4364 volume_handle = le16_to_cpu(element->VolDevHandle);
4365 _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
4369 if (a)
4370 _scsih_tm_tr_volume_send(ioc, a);
4371 if (b)
4372 _scsih_tm_tr_volume_send(ioc, b);
4374 /* PD target resets */
4375 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
4376 for (i = 0; i < event_data->NumElements; i++, element++) {
4377 if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
4378 continue;
4379 handle = le16_to_cpu(element->PhysDiskDevHandle);
4380 volume_handle = le16_to_cpu(element->VolDevHandle);
4381 clear_bit(handle, ioc->pd_handles);
4382 if (!volume_handle)
4383 _scsih_tm_tr_send(ioc, handle);
4384 else if (volume_handle == a || volume_handle == b) {
4385 delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
4386 BUG_ON(!delayed_tr);
4387 INIT_LIST_HEAD(&delayed_tr->list);
4388 delayed_tr->handle = handle;
4389 list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
4390 dewtprintk(ioc,
4391 ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
4392 handle));
4393 } else
4394 _scsih_tm_tr_send(ioc, handle);
4400 * _scsih_check_volume_delete_events - set delete flag for volumes
4401 * @ioc: per adapter object
4402 * @event_data: the event data payload
4403 * Context: interrupt time.
4405 * This will handle the case when the cable connected to entire volume is
4406 * pulled. We will take care of setting the deleted flag so normal IO will
4407 * not be sent.
4409 static void
4410 _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
4411 Mpi2EventDataIrVolume_t *event_data)
4413 u32 state;
4415 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
4416 return;
4417 state = le32_to_cpu(event_data->NewValue);
4418 if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
4419 MPI2_RAID_VOL_STATE_FAILED)
4420 _scsih_set_volume_delete_flag(ioc,
4421 le16_to_cpu(event_data->VolDevHandle));
4425 * _scsih_temp_threshold_events - display temperature threshold exceeded events
4426 * @ioc: per adapter object
4427 * @event_data: the temp threshold event data
4428 * Context: interrupt time.
4430 static void
4431 _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
4432 Mpi2EventDataTemperature_t *event_data)
4434 if (ioc->temp_sensors_count >= event_data->SensorNum) {
4435 ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
4436 le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
4437 le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
4438 le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
4439 le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
4440 event_data->SensorNum);
4441 ioc_err(ioc, "Current Temp In Celsius: %d\n",
4442 event_data->CurrentTemperature);
4446 static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
4448 struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
4450 if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
4451 return 0;
4453 if (pending)
4454 return test_and_set_bit(0, &priv->ata_command_pending);
4456 clear_bit(0, &priv->ata_command_pending);
4457 return 0;
4461 * _scsih_flush_running_cmds - completing outstanding commands.
4462 * @ioc: per adapter object
4464 * The flushing out of all pending scmd commands following host reset,
4465 * where all IO is dropped to the floor.
4467 static void
4468 _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
4470 struct scsi_cmnd *scmd;
4471 struct scsiio_tracker *st;
4472 u16 smid;
4473 int count = 0;
4475 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
4476 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
4477 if (!scmd)
4478 continue;
4479 count++;
4480 _scsih_set_satl_pending(scmd, false);
4481 st = scsi_cmd_priv(scmd);
4482 mpt3sas_base_clear_st(ioc, st);
4483 scsi_dma_unmap(scmd);
4484 if (ioc->pci_error_recovery || ioc->remove_host)
4485 scmd->result = DID_NO_CONNECT << 16;
4486 else
4487 scmd->result = DID_RESET << 16;
4488 scmd->scsi_done(scmd);
4490 dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
4494 * _scsih_setup_eedp - setup MPI request for EEDP transfer
4495 * @ioc: per adapter object
4496 * @scmd: pointer to scsi command object
4497 * @mpi_request: pointer to the SCSI_IO request message frame
4499 * Supporting protection 1 and 3.
4501 static void
4502 _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4503 Mpi25SCSIIORequest_t *mpi_request)
4505 u16 eedp_flags;
4506 unsigned char prot_op = scsi_get_prot_op(scmd);
4507 unsigned char prot_type = scsi_get_prot_type(scmd);
4508 Mpi25SCSIIORequest_t *mpi_request_3v =
4509 (Mpi25SCSIIORequest_t *)mpi_request;
4511 if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
4512 return;
4514 if (prot_op == SCSI_PROT_READ_STRIP)
4515 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
4516 else if (prot_op == SCSI_PROT_WRITE_INSERT)
4517 eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
4518 else
4519 return;
4521 switch (prot_type) {
4522 case SCSI_PROT_DIF_TYPE1:
4523 case SCSI_PROT_DIF_TYPE2:
4526 * enable ref/guard checking
4527 * auto increment ref tag
4529 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
4530 MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
4531 MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4532 mpi_request->CDB.EEDP32.PrimaryReferenceTag =
4533 cpu_to_be32(t10_pi_ref_tag(scmd->request));
4534 break;
4536 case SCSI_PROT_DIF_TYPE3:
4539 * enable guard checking
4541 eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
4543 break;
4546 mpi_request_3v->EEDPBlockSize =
4547 cpu_to_le16(scmd->device->sector_size);
4549 if (ioc->is_gen35_ioc)
4550 eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
4551 mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
4555 * _scsih_eedp_error_handling - return sense code for EEDP errors
4556 * @scmd: pointer to scsi command object
4557 * @ioc_status: ioc status
4559 static void
4560 _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
4562 u8 ascq;
4564 switch (ioc_status) {
4565 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4566 ascq = 0x01;
4567 break;
4568 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4569 ascq = 0x02;
4570 break;
4571 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4572 ascq = 0x03;
4573 break;
4574 default:
4575 ascq = 0x00;
4576 break;
4578 scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
4579 ascq);
4580 scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
4581 SAM_STAT_CHECK_CONDITION;
4585 * scsih_qcmd - main scsi request entry point
4586 * @shost: SCSI host pointer
4587 * @scmd: pointer to scsi command object
4589 * The callback index is set inside `ioc->scsi_io_cb_idx`.
4591 * Return: 0 on success. If there's a failure, return either:
4592 * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
4593 * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
4595 static int
4596 scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
4598 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
4599 struct MPT3SAS_DEVICE *sas_device_priv_data;
4600 struct MPT3SAS_TARGET *sas_target_priv_data;
4601 struct _raid_device *raid_device;
4602 struct request *rq = scmd->request;
4603 int class;
4604 Mpi25SCSIIORequest_t *mpi_request;
4605 struct _pcie_device *pcie_device = NULL;
4606 u32 mpi_control;
4607 u16 smid;
4608 u16 handle;
4610 if (ioc->logging_level & MPT_DEBUG_SCSI)
4611 scsi_print_command(scmd);
4613 sas_device_priv_data = scmd->device->hostdata;
4614 if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
4615 scmd->result = DID_NO_CONNECT << 16;
4616 scmd->scsi_done(scmd);
4617 return 0;
4620 if (!(_scsih_allow_scmd_to_device(ioc, scmd))) {
4621 scmd->result = DID_NO_CONNECT << 16;
4622 scmd->scsi_done(scmd);
4623 return 0;
4626 sas_target_priv_data = sas_device_priv_data->sas_target;
4628 /* invalid device handle */
4629 handle = sas_target_priv_data->handle;
4630 if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
4631 scmd->result = DID_NO_CONNECT << 16;
4632 scmd->scsi_done(scmd);
4633 return 0;
4637 if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
4638 /* host recovery or link resets sent via IOCTLs */
4639 return SCSI_MLQUEUE_HOST_BUSY;
4640 } else if (sas_target_priv_data->deleted) {
4641 /* device has been deleted */
4642 scmd->result = DID_NO_CONNECT << 16;
4643 scmd->scsi_done(scmd);
4644 return 0;
4645 } else if (sas_target_priv_data->tm_busy ||
4646 sas_device_priv_data->block) {
4647 /* device busy with task management */
4648 return SCSI_MLQUEUE_DEVICE_BUSY;
4652 * Bug work around for firmware SATL handling. The loop
4653 * is based on atomic operations and ensures consistency
4654 * since we're lockless at this point
4656 do {
4657 if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
4658 scmd->result = SAM_STAT_BUSY;
4659 scmd->scsi_done(scmd);
4660 return 0;
4662 } while (_scsih_set_satl_pending(scmd, true));
4664 if (scmd->sc_data_direction == DMA_FROM_DEVICE)
4665 mpi_control = MPI2_SCSIIO_CONTROL_READ;
4666 else if (scmd->sc_data_direction == DMA_TO_DEVICE)
4667 mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
4668 else
4669 mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
4671 /* set tags */
4672 mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
4673 /* NCQ Prio supported, make sure control indicated high priority */
4674 if (sas_device_priv_data->ncq_prio_enable) {
4675 class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
4676 if (class == IOPRIO_CLASS_RT)
4677 mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
4679 /* Make sure Device is not raid volume.
4680 * We do not expose raid functionality to upper layer for warpdrive.
4682 if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
4683 && !scsih_is_nvme(&scmd->device->sdev_gendev))
4684 && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
4685 mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
4687 smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
4688 if (!smid) {
4689 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4690 _scsih_set_satl_pending(scmd, false);
4691 goto out;
4693 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4694 memset(mpi_request, 0, ioc->request_sz);
4695 _scsih_setup_eedp(ioc, scmd, mpi_request);
4697 if (scmd->cmd_len == 32)
4698 mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
4699 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4700 if (sas_device_priv_data->sas_target->flags &
4701 MPT_TARGET_FLAGS_RAID_COMPONENT)
4702 mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
4703 else
4704 mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
4705 mpi_request->DevHandle = cpu_to_le16(handle);
4706 mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
4707 mpi_request->Control = cpu_to_le32(mpi_control);
4708 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
4709 mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
4710 mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
4711 mpi_request->SenseBufferLowAddress =
4712 mpt3sas_base_get_sense_buffer_dma(ioc, smid);
4713 mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
4714 int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
4715 mpi_request->LUN);
4716 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
4718 if (mpi_request->DataLength) {
4719 pcie_device = sas_target_priv_data->pcie_dev;
4720 if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
4721 mpt3sas_base_free_smid(ioc, smid);
4722 _scsih_set_satl_pending(scmd, false);
4723 goto out;
4725 } else
4726 ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
4728 raid_device = sas_target_priv_data->raid_device;
4729 if (raid_device && raid_device->direct_io_enabled)
4730 mpt3sas_setup_direct_io(ioc, scmd,
4731 raid_device, mpi_request);
4733 if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
4734 if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
4735 mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
4736 MPI25_SCSIIO_IOFLAGS_FAST_PATH);
4737 mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
4738 } else
4739 ioc->put_smid_scsi_io(ioc, smid,
4740 le16_to_cpu(mpi_request->DevHandle));
4741 } else
4742 mpt3sas_base_put_smid_default(ioc, smid);
4743 return 0;
4745 out:
4746 return SCSI_MLQUEUE_HOST_BUSY;
4750 * _scsih_normalize_sense - normalize descriptor and fixed format sense data
4751 * @sense_buffer: sense data returned by target
4752 * @data: normalized skey/asc/ascq
4754 static void
4755 _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
4757 if ((sense_buffer[0] & 0x7F) >= 0x72) {
4758 /* descriptor format */
4759 data->skey = sense_buffer[1] & 0x0F;
4760 data->asc = sense_buffer[2];
4761 data->ascq = sense_buffer[3];
4762 } else {
4763 /* fixed format */
4764 data->skey = sense_buffer[2] & 0x0F;
4765 data->asc = sense_buffer[12];
4766 data->ascq = sense_buffer[13];
4771 * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
4772 * @ioc: per adapter object
4773 * @scmd: pointer to scsi command object
4774 * @mpi_reply: reply mf payload returned from firmware
4775 * @smid: ?
4777 * scsi_status - SCSI Status code returned from target device
4778 * scsi_state - state info associated with SCSI_IO determined by ioc
4779 * ioc_status - ioc supplied status info
4781 static void
4782 _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
4783 Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
4785 u32 response_info;
4786 u8 *response_bytes;
4787 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
4788 MPI2_IOCSTATUS_MASK;
4789 u8 scsi_state = mpi_reply->SCSIState;
4790 u8 scsi_status = mpi_reply->SCSIStatus;
4791 char *desc_ioc_state = NULL;
4792 char *desc_scsi_status = NULL;
4793 char *desc_scsi_state = ioc->tmp_string;
4794 u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
4795 struct _sas_device *sas_device = NULL;
4796 struct _pcie_device *pcie_device = NULL;
4797 struct scsi_target *starget = scmd->device->sdev_target;
4798 struct MPT3SAS_TARGET *priv_target = starget->hostdata;
4799 char *device_str = NULL;
4801 if (!priv_target)
4802 return;
4803 if (ioc->hide_ir_msg)
4804 device_str = "WarpDrive";
4805 else
4806 device_str = "volume";
4808 if (log_info == 0x31170000)
4809 return;
4811 switch (ioc_status) {
4812 case MPI2_IOCSTATUS_SUCCESS:
4813 desc_ioc_state = "success";
4814 break;
4815 case MPI2_IOCSTATUS_INVALID_FUNCTION:
4816 desc_ioc_state = "invalid function";
4817 break;
4818 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
4819 desc_ioc_state = "scsi recovered error";
4820 break;
4821 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
4822 desc_ioc_state = "scsi invalid dev handle";
4823 break;
4824 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
4825 desc_ioc_state = "scsi device not there";
4826 break;
4827 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
4828 desc_ioc_state = "scsi data overrun";
4829 break;
4830 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
4831 desc_ioc_state = "scsi data underrun";
4832 break;
4833 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
4834 desc_ioc_state = "scsi io data error";
4835 break;
4836 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
4837 desc_ioc_state = "scsi protocol error";
4838 break;
4839 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
4840 desc_ioc_state = "scsi task terminated";
4841 break;
4842 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
4843 desc_ioc_state = "scsi residual mismatch";
4844 break;
4845 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
4846 desc_ioc_state = "scsi task mgmt failed";
4847 break;
4848 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
4849 desc_ioc_state = "scsi ioc terminated";
4850 break;
4851 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
4852 desc_ioc_state = "scsi ext terminated";
4853 break;
4854 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
4855 desc_ioc_state = "eedp guard error";
4856 break;
4857 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
4858 desc_ioc_state = "eedp ref tag error";
4859 break;
4860 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
4861 desc_ioc_state = "eedp app tag error";
4862 break;
4863 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
4864 desc_ioc_state = "insufficient power";
4865 break;
4866 default:
4867 desc_ioc_state = "unknown";
4868 break;
4871 switch (scsi_status) {
4872 case MPI2_SCSI_STATUS_GOOD:
4873 desc_scsi_status = "good";
4874 break;
4875 case MPI2_SCSI_STATUS_CHECK_CONDITION:
4876 desc_scsi_status = "check condition";
4877 break;
4878 case MPI2_SCSI_STATUS_CONDITION_MET:
4879 desc_scsi_status = "condition met";
4880 break;
4881 case MPI2_SCSI_STATUS_BUSY:
4882 desc_scsi_status = "busy";
4883 break;
4884 case MPI2_SCSI_STATUS_INTERMEDIATE:
4885 desc_scsi_status = "intermediate";
4886 break;
4887 case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
4888 desc_scsi_status = "intermediate condmet";
4889 break;
4890 case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
4891 desc_scsi_status = "reservation conflict";
4892 break;
4893 case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
4894 desc_scsi_status = "command terminated";
4895 break;
4896 case MPI2_SCSI_STATUS_TASK_SET_FULL:
4897 desc_scsi_status = "task set full";
4898 break;
4899 case MPI2_SCSI_STATUS_ACA_ACTIVE:
4900 desc_scsi_status = "aca active";
4901 break;
4902 case MPI2_SCSI_STATUS_TASK_ABORTED:
4903 desc_scsi_status = "task aborted";
4904 break;
4905 default:
4906 desc_scsi_status = "unknown";
4907 break;
4910 desc_scsi_state[0] = '\0';
4911 if (!scsi_state)
4912 desc_scsi_state = " ";
4913 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
4914 strcat(desc_scsi_state, "response info ");
4915 if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
4916 strcat(desc_scsi_state, "state terminated ");
4917 if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
4918 strcat(desc_scsi_state, "no status ");
4919 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
4920 strcat(desc_scsi_state, "autosense failed ");
4921 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
4922 strcat(desc_scsi_state, "autosense valid ");
4924 scsi_print_command(scmd);
4926 if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
4927 ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
4928 device_str, (u64)priv_target->sas_address);
4929 } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
4930 pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
4931 if (pcie_device) {
4932 ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
4933 (u64)pcie_device->wwid, pcie_device->port_num);
4934 if (pcie_device->enclosure_handle != 0)
4935 ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
4936 (u64)pcie_device->enclosure_logical_id,
4937 pcie_device->slot);
4938 if (pcie_device->connector_name[0])
4939 ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
4940 pcie_device->enclosure_level,
4941 pcie_device->connector_name);
4942 pcie_device_put(pcie_device);
4944 } else {
4945 sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
4946 if (sas_device) {
4947 ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
4948 (u64)sas_device->sas_address, sas_device->phy);
4950 _scsih_display_enclosure_chassis_info(ioc, sas_device,
4951 NULL, NULL);
4953 sas_device_put(sas_device);
4957 ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
4958 le16_to_cpu(mpi_reply->DevHandle),
4959 desc_ioc_state, ioc_status, smid);
4960 ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
4961 scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
4962 ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
4963 le16_to_cpu(mpi_reply->TaskTag),
4964 le32_to_cpu(mpi_reply->TransferCount), scmd->result);
4965 ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
4966 desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
4968 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
4969 struct sense_info data;
4970 _scsih_normalize_sense(scmd->sense_buffer, &data);
4971 ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
4972 data.skey, data.asc, data.ascq,
4973 le32_to_cpu(mpi_reply->SenseCount));
4975 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
4976 response_info = le32_to_cpu(mpi_reply->ResponseInfo);
4977 response_bytes = (u8 *)&response_info;
4978 _scsih_response_code(ioc, response_bytes[0]);
4983 * _scsih_turn_on_pfa_led - illuminate PFA LED
4984 * @ioc: per adapter object
4985 * @handle: device handle
4986 * Context: process
4988 static void
4989 _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
4991 Mpi2SepReply_t mpi_reply;
4992 Mpi2SepRequest_t mpi_request;
4993 struct _sas_device *sas_device;
4995 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
4996 if (!sas_device)
4997 return;
4999 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5000 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5001 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5002 mpi_request.SlotStatus =
5003 cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
5004 mpi_request.DevHandle = cpu_to_le16(handle);
5005 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
5006 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5007 &mpi_request)) != 0) {
5008 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5009 __FILE__, __LINE__, __func__);
5010 goto out;
5012 sas_device->pfa_led_on = 1;
5014 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5015 dewtprintk(ioc,
5016 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5017 le16_to_cpu(mpi_reply.IOCStatus),
5018 le32_to_cpu(mpi_reply.IOCLogInfo)));
5019 goto out;
5021 out:
5022 sas_device_put(sas_device);
5026 * _scsih_turn_off_pfa_led - turn off Fault LED
5027 * @ioc: per adapter object
5028 * @sas_device: sas device whose PFA LED has to turned off
5029 * Context: process
5031 static void
5032 _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
5033 struct _sas_device *sas_device)
5035 Mpi2SepReply_t mpi_reply;
5036 Mpi2SepRequest_t mpi_request;
5038 memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
5039 mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
5040 mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
5041 mpi_request.SlotStatus = 0;
5042 mpi_request.Slot = cpu_to_le16(sas_device->slot);
5043 mpi_request.DevHandle = 0;
5044 mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
5045 mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
5046 if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
5047 &mpi_request)) != 0) {
5048 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5049 __FILE__, __LINE__, __func__);
5050 return;
5053 if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
5054 dewtprintk(ioc,
5055 ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
5056 le16_to_cpu(mpi_reply.IOCStatus),
5057 le32_to_cpu(mpi_reply.IOCLogInfo)));
5058 return;
5063 * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
5064 * @ioc: per adapter object
5065 * @handle: device handle
5066 * Context: interrupt.
5068 static void
5069 _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5071 struct fw_event_work *fw_event;
5073 fw_event = alloc_fw_event_work(0);
5074 if (!fw_event)
5075 return;
5076 fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
5077 fw_event->device_handle = handle;
5078 fw_event->ioc = ioc;
5079 _scsih_fw_event_add(ioc, fw_event);
5080 fw_event_work_put(fw_event);
5084 * _scsih_smart_predicted_fault - process smart errors
5085 * @ioc: per adapter object
5086 * @handle: device handle
5087 * Context: interrupt.
5089 static void
5090 _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5092 struct scsi_target *starget;
5093 struct MPT3SAS_TARGET *sas_target_priv_data;
5094 Mpi2EventNotificationReply_t *event_reply;
5095 Mpi2EventDataSasDeviceStatusChange_t *event_data;
5096 struct _sas_device *sas_device;
5097 ssize_t sz;
5098 unsigned long flags;
5100 /* only handle non-raid devices */
5101 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5102 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
5103 if (!sas_device)
5104 goto out_unlock;
5106 starget = sas_device->starget;
5107 sas_target_priv_data = starget->hostdata;
5109 if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
5110 ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
5111 goto out_unlock;
5113 _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
5115 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5117 if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
5118 _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
5120 /* insert into event log */
5121 sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
5122 sizeof(Mpi2EventDataSasDeviceStatusChange_t);
5123 event_reply = kzalloc(sz, GFP_KERNEL);
5124 if (!event_reply) {
5125 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5126 __FILE__, __LINE__, __func__);
5127 goto out;
5130 event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
5131 event_reply->Event =
5132 cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
5133 event_reply->MsgLength = sz/4;
5134 event_reply->EventDataLength =
5135 cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
5136 event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
5137 event_reply->EventData;
5138 event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
5139 event_data->ASC = 0x5D;
5140 event_data->DevHandle = cpu_to_le16(handle);
5141 event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
5142 mpt3sas_ctl_add_to_event_log(ioc, event_reply);
5143 kfree(event_reply);
5144 out:
5145 if (sas_device)
5146 sas_device_put(sas_device);
5147 return;
5149 out_unlock:
5150 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5151 goto out;
5155 * _scsih_io_done - scsi request callback
5156 * @ioc: per adapter object
5157 * @smid: system request message index
5158 * @msix_index: MSIX table index supplied by the OS
5159 * @reply: reply message frame(lower 32bit addr)
5161 * Callback handler when using _scsih_qcmd.
5163 * Return: 1 meaning mf should be freed from _base_interrupt
5164 * 0 means the mf is freed from this function.
5166 static u8
5167 _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5169 Mpi25SCSIIORequest_t *mpi_request;
5170 Mpi2SCSIIOReply_t *mpi_reply;
5171 struct scsi_cmnd *scmd;
5172 struct scsiio_tracker *st;
5173 u16 ioc_status;
5174 u32 xfer_cnt;
5175 u8 scsi_state;
5176 u8 scsi_status;
5177 u32 log_info;
5178 struct MPT3SAS_DEVICE *sas_device_priv_data;
5179 u32 response_code = 0;
5181 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5183 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
5184 if (scmd == NULL)
5185 return 1;
5187 _scsih_set_satl_pending(scmd, false);
5189 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
5191 if (mpi_reply == NULL) {
5192 scmd->result = DID_OK << 16;
5193 goto out;
5196 sas_device_priv_data = scmd->device->hostdata;
5197 if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
5198 sas_device_priv_data->sas_target->deleted) {
5199 scmd->result = DID_NO_CONNECT << 16;
5200 goto out;
5202 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
5205 * WARPDRIVE: If direct_io is set then it is directIO,
5206 * the failed direct I/O should be redirected to volume
5208 st = scsi_cmd_priv(scmd);
5209 if (st->direct_io &&
5210 ((ioc_status & MPI2_IOCSTATUS_MASK)
5211 != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
5212 st->direct_io = 0;
5213 memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
5214 mpi_request->DevHandle =
5215 cpu_to_le16(sas_device_priv_data->sas_target->handle);
5216 ioc->put_smid_scsi_io(ioc, smid,
5217 sas_device_priv_data->sas_target->handle);
5218 return 0;
5220 /* turning off TLR */
5221 scsi_state = mpi_reply->SCSIState;
5222 if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
5223 response_code =
5224 le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
5225 if (!sas_device_priv_data->tlr_snoop_check) {
5226 sas_device_priv_data->tlr_snoop_check++;
5227 if ((!ioc->is_warpdrive &&
5228 !scsih_is_raid(&scmd->device->sdev_gendev) &&
5229 !scsih_is_nvme(&scmd->device->sdev_gendev))
5230 && sas_is_tlr_enabled(scmd->device) &&
5231 response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
5232 sas_disable_tlr(scmd->device);
5233 sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
5237 xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
5238 scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
5239 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
5240 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
5241 else
5242 log_info = 0;
5243 ioc_status &= MPI2_IOCSTATUS_MASK;
5244 scsi_status = mpi_reply->SCSIStatus;
5246 if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
5247 (scsi_status == MPI2_SCSI_STATUS_BUSY ||
5248 scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
5249 scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
5250 ioc_status = MPI2_IOCSTATUS_SUCCESS;
5253 if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5254 struct sense_info data;
5255 const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
5256 smid);
5257 u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
5258 le32_to_cpu(mpi_reply->SenseCount));
5259 memcpy(scmd->sense_buffer, sense_data, sz);
5260 _scsih_normalize_sense(scmd->sense_buffer, &data);
5261 /* failure prediction threshold exceeded */
5262 if (data.asc == 0x5D)
5263 _scsih_smart_predicted_fault(ioc,
5264 le16_to_cpu(mpi_reply->DevHandle));
5265 mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
5267 if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
5268 ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
5269 (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
5270 (scmd->sense_buffer[2] == HARDWARE_ERROR)))
5271 _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
5273 switch (ioc_status) {
5274 case MPI2_IOCSTATUS_BUSY:
5275 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5276 scmd->result = SAM_STAT_BUSY;
5277 break;
5279 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5280 scmd->result = DID_NO_CONNECT << 16;
5281 break;
5283 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5284 if (sas_device_priv_data->block) {
5285 scmd->result = DID_TRANSPORT_DISRUPTED << 16;
5286 goto out;
5288 if (log_info == 0x31110630) {
5289 if (scmd->retries > 2) {
5290 scmd->result = DID_NO_CONNECT << 16;
5291 scsi_device_set_state(scmd->device,
5292 SDEV_OFFLINE);
5293 } else {
5294 scmd->result = DID_SOFT_ERROR << 16;
5295 scmd->device->expecting_cc_ua = 1;
5297 break;
5298 } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
5299 scmd->result = DID_RESET << 16;
5300 break;
5301 } else if ((scmd->device->channel == RAID_CHANNEL) &&
5302 (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
5303 MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
5304 scmd->result = DID_RESET << 16;
5305 break;
5307 scmd->result = DID_SOFT_ERROR << 16;
5308 break;
5309 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5310 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5311 scmd->result = DID_RESET << 16;
5312 break;
5314 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5315 if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
5316 scmd->result = DID_SOFT_ERROR << 16;
5317 else
5318 scmd->result = (DID_OK << 16) | scsi_status;
5319 break;
5321 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5322 scmd->result = (DID_OK << 16) | scsi_status;
5324 if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
5325 break;
5327 if (xfer_cnt < scmd->underflow) {
5328 if (scsi_status == SAM_STAT_BUSY)
5329 scmd->result = SAM_STAT_BUSY;
5330 else
5331 scmd->result = DID_SOFT_ERROR << 16;
5332 } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5333 MPI2_SCSI_STATE_NO_SCSI_STATUS))
5334 scmd->result = DID_SOFT_ERROR << 16;
5335 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5336 scmd->result = DID_RESET << 16;
5337 else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
5338 mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
5339 mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
5340 scmd->result = (DRIVER_SENSE << 24) |
5341 SAM_STAT_CHECK_CONDITION;
5342 scmd->sense_buffer[0] = 0x70;
5343 scmd->sense_buffer[2] = ILLEGAL_REQUEST;
5344 scmd->sense_buffer[12] = 0x20;
5345 scmd->sense_buffer[13] = 0;
5347 break;
5349 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5350 scsi_set_resid(scmd, 0);
5351 /* fall through */
5352 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
5353 case MPI2_IOCSTATUS_SUCCESS:
5354 scmd->result = (DID_OK << 16) | scsi_status;
5355 if (response_code ==
5356 MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
5357 (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
5358 MPI2_SCSI_STATE_NO_SCSI_STATUS)))
5359 scmd->result = DID_SOFT_ERROR << 16;
5360 else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
5361 scmd->result = DID_RESET << 16;
5362 break;
5364 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
5365 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
5366 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
5367 _scsih_eedp_error_handling(scmd, ioc_status);
5368 break;
5370 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5371 case MPI2_IOCSTATUS_INVALID_FUNCTION:
5372 case MPI2_IOCSTATUS_INVALID_SGL:
5373 case MPI2_IOCSTATUS_INTERNAL_ERROR:
5374 case MPI2_IOCSTATUS_INVALID_FIELD:
5375 case MPI2_IOCSTATUS_INVALID_STATE:
5376 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5377 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
5378 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
5379 default:
5380 scmd->result = DID_SOFT_ERROR << 16;
5381 break;
5385 if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
5386 _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
5388 out:
5390 scsi_dma_unmap(scmd);
5391 mpt3sas_base_free_smid(ioc, smid);
5392 scmd->scsi_done(scmd);
5393 return 0;
5397 * _scsih_sas_host_refresh - refreshing sas host object contents
5398 * @ioc: per adapter object
5399 * Context: user
5401 * During port enable, fw will send topology events for every device. Its
5402 * possible that the handles may change from the previous setting, so this
5403 * code keeping handles updating if changed.
5405 static void
5406 _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
5408 u16 sz;
5409 u16 ioc_status;
5410 int i;
5411 Mpi2ConfigReply_t mpi_reply;
5412 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5413 u16 attached_handle;
5414 u8 link_rate;
5416 dtmprintk(ioc,
5417 ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
5418 (u64)ioc->sas_hba.sas_address));
5420 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
5421 * sizeof(Mpi2SasIOUnit0PhyData_t));
5422 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5423 if (!sas_iounit_pg0) {
5424 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5425 __FILE__, __LINE__, __func__);
5426 return;
5429 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5430 sas_iounit_pg0, sz)) != 0)
5431 goto out;
5432 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5433 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5434 goto out;
5435 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5436 link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
5437 if (i == 0)
5438 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5439 PhyData[0].ControllerDevHandle);
5440 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5441 attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
5442 AttachedDevHandle);
5443 if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
5444 link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
5445 mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
5446 attached_handle, i, link_rate);
5448 out:
5449 kfree(sas_iounit_pg0);
5453 * _scsih_sas_host_add - create sas host object
5454 * @ioc: per adapter object
5456 * Creating host side data object, stored in ioc->sas_hba
5458 static void
5459 _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
5461 int i;
5462 Mpi2ConfigReply_t mpi_reply;
5463 Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
5464 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
5465 Mpi2SasPhyPage0_t phy_pg0;
5466 Mpi2SasDevicePage0_t sas_device_pg0;
5467 Mpi2SasEnclosurePage0_t enclosure_pg0;
5468 u16 ioc_status;
5469 u16 sz;
5470 u8 device_missing_delay;
5471 u8 num_phys;
5473 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
5474 if (!num_phys) {
5475 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5476 __FILE__, __LINE__, __func__);
5477 return;
5479 ioc->sas_hba.phy = kcalloc(num_phys,
5480 sizeof(struct _sas_phy), GFP_KERNEL);
5481 if (!ioc->sas_hba.phy) {
5482 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5483 __FILE__, __LINE__, __func__);
5484 goto out;
5486 ioc->sas_hba.num_phys = num_phys;
5488 /* sas_iounit page 0 */
5489 sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
5490 sizeof(Mpi2SasIOUnit0PhyData_t));
5491 sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
5492 if (!sas_iounit_pg0) {
5493 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5494 __FILE__, __LINE__, __func__);
5495 return;
5497 if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
5498 sas_iounit_pg0, sz))) {
5499 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5500 __FILE__, __LINE__, __func__);
5501 goto out;
5503 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5504 MPI2_IOCSTATUS_MASK;
5505 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5506 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5507 __FILE__, __LINE__, __func__);
5508 goto out;
5511 /* sas_iounit page 1 */
5512 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
5513 sizeof(Mpi2SasIOUnit1PhyData_t));
5514 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
5515 if (!sas_iounit_pg1) {
5516 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5517 __FILE__, __LINE__, __func__);
5518 goto out;
5520 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
5521 sas_iounit_pg1, sz))) {
5522 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5523 __FILE__, __LINE__, __func__);
5524 goto out;
5526 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5527 MPI2_IOCSTATUS_MASK;
5528 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5529 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5530 __FILE__, __LINE__, __func__);
5531 goto out;
5534 ioc->io_missing_delay =
5535 sas_iounit_pg1->IODeviceMissingDelay;
5536 device_missing_delay =
5537 sas_iounit_pg1->ReportDeviceMissingDelay;
5538 if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
5539 ioc->device_missing_delay = (device_missing_delay &
5540 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
5541 else
5542 ioc->device_missing_delay = device_missing_delay &
5543 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
5545 ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
5546 for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
5547 if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
5548 i))) {
5549 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5550 __FILE__, __LINE__, __func__);
5551 goto out;
5553 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5554 MPI2_IOCSTATUS_MASK;
5555 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5556 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5557 __FILE__, __LINE__, __func__);
5558 goto out;
5561 if (i == 0)
5562 ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
5563 PhyData[0].ControllerDevHandle);
5564 ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
5565 ioc->sas_hba.phy[i].phy_id = i;
5566 mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
5567 phy_pg0, ioc->sas_hba.parent_dev);
5569 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5570 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
5571 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5572 __FILE__, __LINE__, __func__);
5573 goto out;
5575 ioc->sas_hba.enclosure_handle =
5576 le16_to_cpu(sas_device_pg0.EnclosureHandle);
5577 ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5578 ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5579 ioc->sas_hba.handle,
5580 (u64)ioc->sas_hba.sas_address,
5581 ioc->sas_hba.num_phys);
5583 if (ioc->sas_hba.enclosure_handle) {
5584 if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
5585 &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
5586 ioc->sas_hba.enclosure_handle)))
5587 ioc->sas_hba.enclosure_logical_id =
5588 le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
5591 out:
5592 kfree(sas_iounit_pg1);
5593 kfree(sas_iounit_pg0);
5597 * _scsih_expander_add - creating expander object
5598 * @ioc: per adapter object
5599 * @handle: expander handle
5601 * Creating expander object, stored in ioc->sas_expander_list.
5603 * Return: 0 for success, else error.
5605 static int
5606 _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
5608 struct _sas_node *sas_expander;
5609 struct _enclosure_node *enclosure_dev;
5610 Mpi2ConfigReply_t mpi_reply;
5611 Mpi2ExpanderPage0_t expander_pg0;
5612 Mpi2ExpanderPage1_t expander_pg1;
5613 u32 ioc_status;
5614 u16 parent_handle;
5615 u64 sas_address, sas_address_parent = 0;
5616 int i;
5617 unsigned long flags;
5618 struct _sas_port *mpt3sas_port = NULL;
5620 int rc = 0;
5622 if (!handle)
5623 return -1;
5625 if (ioc->shost_recovery || ioc->pci_error_recovery)
5626 return -1;
5628 if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
5629 MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
5630 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5631 __FILE__, __LINE__, __func__);
5632 return -1;
5635 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
5636 MPI2_IOCSTATUS_MASK;
5637 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5638 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5639 __FILE__, __LINE__, __func__);
5640 return -1;
5643 /* handle out of order topology events */
5644 parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
5645 if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
5646 != 0) {
5647 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5648 __FILE__, __LINE__, __func__);
5649 return -1;
5651 if (sas_address_parent != ioc->sas_hba.sas_address) {
5652 spin_lock_irqsave(&ioc->sas_node_lock, flags);
5653 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5654 sas_address_parent);
5655 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5656 if (!sas_expander) {
5657 rc = _scsih_expander_add(ioc, parent_handle);
5658 if (rc != 0)
5659 return rc;
5663 spin_lock_irqsave(&ioc->sas_node_lock, flags);
5664 sas_address = le64_to_cpu(expander_pg0.SASAddress);
5665 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5666 sas_address);
5667 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5669 if (sas_expander)
5670 return 0;
5672 sas_expander = kzalloc(sizeof(struct _sas_node),
5673 GFP_KERNEL);
5674 if (!sas_expander) {
5675 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5676 __FILE__, __LINE__, __func__);
5677 return -1;
5680 sas_expander->handle = handle;
5681 sas_expander->num_phys = expander_pg0.NumPhys;
5682 sas_expander->sas_address_parent = sas_address_parent;
5683 sas_expander->sas_address = sas_address;
5685 ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
5686 handle, parent_handle,
5687 (u64)sas_expander->sas_address, sas_expander->num_phys);
5689 if (!sas_expander->num_phys)
5690 goto out_fail;
5691 sas_expander->phy = kcalloc(sas_expander->num_phys,
5692 sizeof(struct _sas_phy), GFP_KERNEL);
5693 if (!sas_expander->phy) {
5694 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5695 __FILE__, __LINE__, __func__);
5696 rc = -1;
5697 goto out_fail;
5700 INIT_LIST_HEAD(&sas_expander->sas_port_list);
5701 mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
5702 sas_address_parent);
5703 if (!mpt3sas_port) {
5704 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5705 __FILE__, __LINE__, __func__);
5706 rc = -1;
5707 goto out_fail;
5709 sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
5711 for (i = 0 ; i < sas_expander->num_phys ; i++) {
5712 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
5713 &expander_pg1, i, handle))) {
5714 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5715 __FILE__, __LINE__, __func__);
5716 rc = -1;
5717 goto out_fail;
5719 sas_expander->phy[i].handle = handle;
5720 sas_expander->phy[i].phy_id = i;
5722 if ((mpt3sas_transport_add_expander_phy(ioc,
5723 &sas_expander->phy[i], expander_pg1,
5724 sas_expander->parent_dev))) {
5725 ioc_err(ioc, "failure at %s:%d/%s()!\n",
5726 __FILE__, __LINE__, __func__);
5727 rc = -1;
5728 goto out_fail;
5732 if (sas_expander->enclosure_handle) {
5733 enclosure_dev =
5734 mpt3sas_scsih_enclosure_find_by_handle(ioc,
5735 sas_expander->enclosure_handle);
5736 if (enclosure_dev)
5737 sas_expander->enclosure_logical_id =
5738 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
5741 _scsih_expander_node_add(ioc, sas_expander);
5742 return 0;
5744 out_fail:
5746 if (mpt3sas_port)
5747 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
5748 sas_address_parent);
5749 kfree(sas_expander);
5750 return rc;
5754 * mpt3sas_expander_remove - removing expander object
5755 * @ioc: per adapter object
5756 * @sas_address: expander sas_address
5758 void
5759 mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
5761 struct _sas_node *sas_expander;
5762 unsigned long flags;
5764 if (ioc->shost_recovery)
5765 return;
5767 spin_lock_irqsave(&ioc->sas_node_lock, flags);
5768 sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
5769 sas_address);
5770 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
5771 if (sas_expander)
5772 _scsih_expander_node_remove(ioc, sas_expander);
5776 * _scsih_done - internal SCSI_IO callback handler.
5777 * @ioc: per adapter object
5778 * @smid: system request message index
5779 * @msix_index: MSIX table index supplied by the OS
5780 * @reply: reply message frame(lower 32bit addr)
5782 * Callback handler when sending internal generated SCSI_IO.
5783 * The callback index passed is `ioc->scsih_cb_idx`
5785 * Return: 1 meaning mf should be freed from _base_interrupt
5786 * 0 means the mf is freed from this function.
5788 static u8
5789 _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
5791 MPI2DefaultReply_t *mpi_reply;
5793 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
5794 if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
5795 return 1;
5796 if (ioc->scsih_cmds.smid != smid)
5797 return 1;
5798 ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
5799 if (mpi_reply) {
5800 memcpy(ioc->scsih_cmds.reply, mpi_reply,
5801 mpi_reply->MsgLength*4);
5802 ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
5804 ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
5805 complete(&ioc->scsih_cmds.done);
5806 return 1;
5812 #define MPT3_MAX_LUNS (255)
5816 * _scsih_check_access_status - check access flags
5817 * @ioc: per adapter object
5818 * @sas_address: sas address
5819 * @handle: sas device handle
5820 * @access_status: errors returned during discovery of the device
5822 * Return: 0 for success, else failure
5824 static u8
5825 _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
5826 u16 handle, u8 access_status)
5828 u8 rc = 1;
5829 char *desc = NULL;
5831 switch (access_status) {
5832 case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
5833 case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
5834 rc = 0;
5835 break;
5836 case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
5837 desc = "sata capability failed";
5838 break;
5839 case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
5840 desc = "sata affiliation conflict";
5841 break;
5842 case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
5843 desc = "route not addressable";
5844 break;
5845 case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
5846 desc = "smp error not addressable";
5847 break;
5848 case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
5849 desc = "device blocked";
5850 break;
5851 case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
5852 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
5853 case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
5854 case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
5855 case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
5856 case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
5857 case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
5858 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
5859 case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
5860 case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
5861 case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
5862 case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
5863 desc = "sata initialization failed";
5864 break;
5865 default:
5866 desc = "unknown";
5867 break;
5870 if (!rc)
5871 return 0;
5873 ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
5874 desc, (u64)sas_address, handle);
5875 return rc;
5879 * _scsih_check_device - checking device responsiveness
5880 * @ioc: per adapter object
5881 * @parent_sas_address: sas address of parent expander or sas host
5882 * @handle: attached device handle
5883 * @phy_number: phy number
5884 * @link_rate: new link rate
5886 static void
5887 _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
5888 u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
5890 Mpi2ConfigReply_t mpi_reply;
5891 Mpi2SasDevicePage0_t sas_device_pg0;
5892 struct _sas_device *sas_device;
5893 struct _enclosure_node *enclosure_dev = NULL;
5894 u32 ioc_status;
5895 unsigned long flags;
5896 u64 sas_address;
5897 struct scsi_target *starget;
5898 struct MPT3SAS_TARGET *sas_target_priv_data;
5899 u32 device_info;
5901 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
5902 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
5903 return;
5905 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
5906 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
5907 return;
5909 /* wide port handling ~ we need only handle device once for the phy that
5910 * is matched in sas device page zero
5912 if (phy_number != sas_device_pg0.PhyNum)
5913 return;
5915 /* check if this is end device */
5916 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
5917 if (!(_scsih_is_end_device(device_info)))
5918 return;
5920 spin_lock_irqsave(&ioc->sas_device_lock, flags);
5921 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
5922 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
5923 sas_address);
5925 if (!sas_device)
5926 goto out_unlock;
5928 if (unlikely(sas_device->handle != handle)) {
5929 starget = sas_device->starget;
5930 sas_target_priv_data = starget->hostdata;
5931 starget_printk(KERN_INFO, starget,
5932 "handle changed from(0x%04x) to (0x%04x)!!!\n",
5933 sas_device->handle, handle);
5934 sas_target_priv_data->handle = handle;
5935 sas_device->handle = handle;
5936 if (le16_to_cpu(sas_device_pg0.Flags) &
5937 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
5938 sas_device->enclosure_level =
5939 sas_device_pg0.EnclosureLevel;
5940 memcpy(sas_device->connector_name,
5941 sas_device_pg0.ConnectorName, 4);
5942 sas_device->connector_name[4] = '\0';
5943 } else {
5944 sas_device->enclosure_level = 0;
5945 sas_device->connector_name[0] = '\0';
5948 sas_device->enclosure_handle =
5949 le16_to_cpu(sas_device_pg0.EnclosureHandle);
5950 sas_device->is_chassis_slot_valid = 0;
5951 enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
5952 sas_device->enclosure_handle);
5953 if (enclosure_dev) {
5954 sas_device->enclosure_logical_id =
5955 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
5956 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
5957 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
5958 sas_device->is_chassis_slot_valid = 1;
5959 sas_device->chassis_slot =
5960 enclosure_dev->pg0.ChassisSlot;
5965 /* check if device is present */
5966 if (!(le16_to_cpu(sas_device_pg0.Flags) &
5967 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
5968 ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
5969 handle);
5970 goto out_unlock;
5973 /* check if there were any issues with discovery */
5974 if (_scsih_check_access_status(ioc, sas_address, handle,
5975 sas_device_pg0.AccessStatus))
5976 goto out_unlock;
5978 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5979 _scsih_ublock_io_device(ioc, sas_address);
5981 if (sas_device)
5982 sas_device_put(sas_device);
5983 return;
5985 out_unlock:
5986 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
5987 if (sas_device)
5988 sas_device_put(sas_device);
5992 * _scsih_add_device - creating sas device object
5993 * @ioc: per adapter object
5994 * @handle: sas device handle
5995 * @phy_num: phy number end device attached to
5996 * @is_pd: is this hidden raid component
5998 * Creating end device object, stored in ioc->sas_device_list.
6000 * Return: 0 for success, non-zero for failure.
6002 static int
6003 _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
6004 u8 is_pd)
6006 Mpi2ConfigReply_t mpi_reply;
6007 Mpi2SasDevicePage0_t sas_device_pg0;
6008 struct _sas_device *sas_device;
6009 struct _enclosure_node *enclosure_dev = NULL;
6010 u32 ioc_status;
6011 u64 sas_address;
6012 u32 device_info;
6014 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
6015 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
6016 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6017 __FILE__, __LINE__, __func__);
6018 return -1;
6021 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6022 MPI2_IOCSTATUS_MASK;
6023 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6024 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6025 __FILE__, __LINE__, __func__);
6026 return -1;
6029 /* check if this is end device */
6030 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
6031 if (!(_scsih_is_end_device(device_info)))
6032 return -1;
6033 set_bit(handle, ioc->pend_os_device_add);
6034 sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
6036 /* check if device is present */
6037 if (!(le16_to_cpu(sas_device_pg0.Flags) &
6038 MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
6039 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6040 handle);
6041 return -1;
6044 /* check if there were any issues with discovery */
6045 if (_scsih_check_access_status(ioc, sas_address, handle,
6046 sas_device_pg0.AccessStatus))
6047 return -1;
6049 sas_device = mpt3sas_get_sdev_by_addr(ioc,
6050 sas_address);
6051 if (sas_device) {
6052 clear_bit(handle, ioc->pend_os_device_add);
6053 sas_device_put(sas_device);
6054 return -1;
6057 if (sas_device_pg0.EnclosureHandle) {
6058 enclosure_dev =
6059 mpt3sas_scsih_enclosure_find_by_handle(ioc,
6060 le16_to_cpu(sas_device_pg0.EnclosureHandle));
6061 if (enclosure_dev == NULL)
6062 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
6063 sas_device_pg0.EnclosureHandle);
6066 sas_device = kzalloc(sizeof(struct _sas_device),
6067 GFP_KERNEL);
6068 if (!sas_device) {
6069 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6070 __FILE__, __LINE__, __func__);
6071 return 0;
6074 kref_init(&sas_device->refcount);
6075 sas_device->handle = handle;
6076 if (_scsih_get_sas_address(ioc,
6077 le16_to_cpu(sas_device_pg0.ParentDevHandle),
6078 &sas_device->sas_address_parent) != 0)
6079 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6080 __FILE__, __LINE__, __func__);
6081 sas_device->enclosure_handle =
6082 le16_to_cpu(sas_device_pg0.EnclosureHandle);
6083 if (sas_device->enclosure_handle != 0)
6084 sas_device->slot =
6085 le16_to_cpu(sas_device_pg0.Slot);
6086 sas_device->device_info = device_info;
6087 sas_device->sas_address = sas_address;
6088 sas_device->phy = sas_device_pg0.PhyNum;
6089 sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
6090 MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6092 if (le16_to_cpu(sas_device_pg0.Flags)
6093 & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
6094 sas_device->enclosure_level =
6095 sas_device_pg0.EnclosureLevel;
6096 memcpy(sas_device->connector_name,
6097 sas_device_pg0.ConnectorName, 4);
6098 sas_device->connector_name[4] = '\0';
6099 } else {
6100 sas_device->enclosure_level = 0;
6101 sas_device->connector_name[0] = '\0';
6103 /* get enclosure_logical_id & chassis_slot*/
6104 sas_device->is_chassis_slot_valid = 0;
6105 if (enclosure_dev) {
6106 sas_device->enclosure_logical_id =
6107 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6108 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
6109 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
6110 sas_device->is_chassis_slot_valid = 1;
6111 sas_device->chassis_slot =
6112 enclosure_dev->pg0.ChassisSlot;
6116 /* get device name */
6117 sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
6119 if (ioc->wait_for_discovery_to_complete)
6120 _scsih_sas_device_init_add(ioc, sas_device);
6121 else
6122 _scsih_sas_device_add(ioc, sas_device);
6124 sas_device_put(sas_device);
6125 return 0;
6129 * _scsih_remove_device - removing sas device object
6130 * @ioc: per adapter object
6131 * @sas_device: the sas_device object
6133 static void
6134 _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
6135 struct _sas_device *sas_device)
6137 struct MPT3SAS_TARGET *sas_target_priv_data;
6139 if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
6140 (sas_device->pfa_led_on)) {
6141 _scsih_turn_off_pfa_led(ioc, sas_device);
6142 sas_device->pfa_led_on = 0;
6145 dewtprintk(ioc,
6146 ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
6147 __func__,
6148 sas_device->handle, (u64)sas_device->sas_address));
6150 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6151 NULL, NULL));
6153 if (sas_device->starget && sas_device->starget->hostdata) {
6154 sas_target_priv_data = sas_device->starget->hostdata;
6155 sas_target_priv_data->deleted = 1;
6156 _scsih_ublock_io_device(ioc, sas_device->sas_address);
6157 sas_target_priv_data->handle =
6158 MPT3SAS_INVALID_DEVICE_HANDLE;
6161 if (!ioc->hide_drives)
6162 mpt3sas_transport_port_remove(ioc,
6163 sas_device->sas_address,
6164 sas_device->sas_address_parent);
6166 ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
6167 sas_device->handle, (u64)sas_device->sas_address);
6169 _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
6171 dewtprintk(ioc,
6172 ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
6173 __func__,
6174 sas_device->handle, (u64)sas_device->sas_address));
6175 dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
6176 NULL, NULL));
6180 * _scsih_sas_topology_change_event_debug - debug for topology event
6181 * @ioc: per adapter object
6182 * @event_data: event data payload
6183 * Context: user.
6185 static void
6186 _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6187 Mpi2EventDataSasTopologyChangeList_t *event_data)
6189 int i;
6190 u16 handle;
6191 u16 reason_code;
6192 u8 phy_number;
6193 char *status_str = NULL;
6194 u8 link_rate, prev_link_rate;
6196 switch (event_data->ExpStatus) {
6197 case MPI2_EVENT_SAS_TOPO_ES_ADDED:
6198 status_str = "add";
6199 break;
6200 case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
6201 status_str = "remove";
6202 break;
6203 case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
6204 case 0:
6205 status_str = "responding";
6206 break;
6207 case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
6208 status_str = "remove delay";
6209 break;
6210 default:
6211 status_str = "unknown status";
6212 break;
6214 ioc_info(ioc, "sas topology change: (%s)\n", status_str);
6215 pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
6216 "start_phy(%02d), count(%d)\n",
6217 le16_to_cpu(event_data->ExpanderDevHandle),
6218 le16_to_cpu(event_data->EnclosureHandle),
6219 event_data->StartPhyNum, event_data->NumEntries);
6220 for (i = 0; i < event_data->NumEntries; i++) {
6221 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6222 if (!handle)
6223 continue;
6224 phy_number = event_data->StartPhyNum + i;
6225 reason_code = event_data->PHY[i].PhyStatus &
6226 MPI2_EVENT_SAS_TOPO_RC_MASK;
6227 switch (reason_code) {
6228 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6229 status_str = "target add";
6230 break;
6231 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6232 status_str = "target remove";
6233 break;
6234 case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
6235 status_str = "delay target remove";
6236 break;
6237 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6238 status_str = "link rate change";
6239 break;
6240 case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
6241 status_str = "target responding";
6242 break;
6243 default:
6244 status_str = "unknown";
6245 break;
6247 link_rate = event_data->PHY[i].LinkRate >> 4;
6248 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6249 pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
6250 " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
6251 handle, status_str, link_rate, prev_link_rate);
6257 * _scsih_sas_topology_change_event - handle topology changes
6258 * @ioc: per adapter object
6259 * @fw_event: The fw_event_work object
6260 * Context: user.
6263 static int
6264 _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
6265 struct fw_event_work *fw_event)
6267 int i;
6268 u16 parent_handle, handle;
6269 u16 reason_code;
6270 u8 phy_number, max_phys;
6271 struct _sas_node *sas_expander;
6272 u64 sas_address;
6273 unsigned long flags;
6274 u8 link_rate, prev_link_rate;
6275 Mpi2EventDataSasTopologyChangeList_t *event_data =
6276 (Mpi2EventDataSasTopologyChangeList_t *)
6277 fw_event->event_data;
6279 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6280 _scsih_sas_topology_change_event_debug(ioc, event_data);
6282 if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
6283 return 0;
6285 if (!ioc->sas_hba.num_phys)
6286 _scsih_sas_host_add(ioc);
6287 else
6288 _scsih_sas_host_refresh(ioc);
6290 if (fw_event->ignore) {
6291 dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
6292 return 0;
6295 parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
6297 /* handle expander add */
6298 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
6299 if (_scsih_expander_add(ioc, parent_handle) != 0)
6300 return 0;
6302 spin_lock_irqsave(&ioc->sas_node_lock, flags);
6303 sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
6304 parent_handle);
6305 if (sas_expander) {
6306 sas_address = sas_expander->sas_address;
6307 max_phys = sas_expander->num_phys;
6308 } else if (parent_handle < ioc->sas_hba.num_phys) {
6309 sas_address = ioc->sas_hba.sas_address;
6310 max_phys = ioc->sas_hba.num_phys;
6311 } else {
6312 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6313 return 0;
6315 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
6317 /* handle siblings events */
6318 for (i = 0; i < event_data->NumEntries; i++) {
6319 if (fw_event->ignore) {
6320 dewtprintk(ioc,
6321 ioc_info(ioc, "ignoring expander event\n"));
6322 return 0;
6324 if (ioc->remove_host || ioc->pci_error_recovery)
6325 return 0;
6326 phy_number = event_data->StartPhyNum + i;
6327 if (phy_number >= max_phys)
6328 continue;
6329 reason_code = event_data->PHY[i].PhyStatus &
6330 MPI2_EVENT_SAS_TOPO_RC_MASK;
6331 if ((event_data->PHY[i].PhyStatus &
6332 MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
6333 MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
6334 continue;
6335 handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
6336 if (!handle)
6337 continue;
6338 link_rate = event_data->PHY[i].LinkRate >> 4;
6339 prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
6340 switch (reason_code) {
6341 case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
6343 if (ioc->shost_recovery)
6344 break;
6346 if (link_rate == prev_link_rate)
6347 break;
6349 mpt3sas_transport_update_links(ioc, sas_address,
6350 handle, phy_number, link_rate);
6352 if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
6353 break;
6355 _scsih_check_device(ioc, sas_address, handle,
6356 phy_number, link_rate);
6358 if (!test_bit(handle, ioc->pend_os_device_add))
6359 break;
6361 /* fall through */
6363 case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
6365 if (ioc->shost_recovery)
6366 break;
6368 mpt3sas_transport_update_links(ioc, sas_address,
6369 handle, phy_number, link_rate);
6371 _scsih_add_device(ioc, handle, phy_number, 0);
6373 break;
6374 case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
6376 _scsih_device_remove_by_handle(ioc, handle);
6377 break;
6381 /* handle expander removal */
6382 if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
6383 sas_expander)
6384 mpt3sas_expander_remove(ioc, sas_address);
6386 return 0;
6390 * _scsih_sas_device_status_change_event_debug - debug for device event
6391 * @ioc: ?
6392 * @event_data: event data payload
6393 * Context: user.
6395 static void
6396 _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6397 Mpi2EventDataSasDeviceStatusChange_t *event_data)
6399 char *reason_str = NULL;
6401 switch (event_data->ReasonCode) {
6402 case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
6403 reason_str = "smart data";
6404 break;
6405 case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
6406 reason_str = "unsupported device discovered";
6407 break;
6408 case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
6409 reason_str = "internal device reset";
6410 break;
6411 case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
6412 reason_str = "internal task abort";
6413 break;
6414 case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
6415 reason_str = "internal task abort set";
6416 break;
6417 case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
6418 reason_str = "internal clear task set";
6419 break;
6420 case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
6421 reason_str = "internal query task";
6422 break;
6423 case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
6424 reason_str = "sata init failure";
6425 break;
6426 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
6427 reason_str = "internal device reset complete";
6428 break;
6429 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
6430 reason_str = "internal task abort complete";
6431 break;
6432 case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
6433 reason_str = "internal async notification";
6434 break;
6435 case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
6436 reason_str = "expander reduced functionality";
6437 break;
6438 case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
6439 reason_str = "expander reduced functionality complete";
6440 break;
6441 default:
6442 reason_str = "unknown reason";
6443 break;
6445 ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
6446 reason_str, le16_to_cpu(event_data->DevHandle),
6447 (u64)le64_to_cpu(event_data->SASAddress),
6448 le16_to_cpu(event_data->TaskTag));
6449 if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
6450 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
6451 event_data->ASC, event_data->ASCQ);
6452 pr_cont("\n");
6456 * _scsih_sas_device_status_change_event - handle device status change
6457 * @ioc: per adapter object
6458 * @fw_event: The fw_event_work object
6459 * Context: user.
6461 static void
6462 _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
6463 struct fw_event_work *fw_event)
6465 struct MPT3SAS_TARGET *target_priv_data;
6466 struct _sas_device *sas_device;
6467 u64 sas_address;
6468 unsigned long flags;
6469 Mpi2EventDataSasDeviceStatusChange_t *event_data =
6470 (Mpi2EventDataSasDeviceStatusChange_t *)
6471 fw_event->event_data;
6473 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6474 _scsih_sas_device_status_change_event_debug(ioc,
6475 event_data);
6477 /* In MPI Revision K (0xC), the internal device reset complete was
6478 * implemented, so avoid setting tm_busy flag for older firmware.
6480 if ((ioc->facts.HeaderVersion >> 8) < 0xC)
6481 return;
6483 if (event_data->ReasonCode !=
6484 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
6485 event_data->ReasonCode !=
6486 MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
6487 return;
6489 spin_lock_irqsave(&ioc->sas_device_lock, flags);
6490 sas_address = le64_to_cpu(event_data->SASAddress);
6491 sas_device = __mpt3sas_get_sdev_by_addr(ioc,
6492 sas_address);
6494 if (!sas_device || !sas_device->starget)
6495 goto out;
6497 target_priv_data = sas_device->starget->hostdata;
6498 if (!target_priv_data)
6499 goto out;
6501 if (event_data->ReasonCode ==
6502 MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
6503 target_priv_data->tm_busy = 1;
6504 else
6505 target_priv_data->tm_busy = 0;
6507 out:
6508 if (sas_device)
6509 sas_device_put(sas_device);
6511 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
6516 * _scsih_check_pcie_access_status - check access flags
6517 * @ioc: per adapter object
6518 * @wwid: wwid
6519 * @handle: sas device handle
6520 * @access_status: errors returned during discovery of the device
6522 * Return: 0 for success, else failure
6524 static u8
6525 _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
6526 u16 handle, u8 access_status)
6528 u8 rc = 1;
6529 char *desc = NULL;
6531 switch (access_status) {
6532 case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
6533 case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
6534 rc = 0;
6535 break;
6536 case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
6537 desc = "PCIe device capability failed";
6538 break;
6539 case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
6540 desc = "PCIe device blocked";
6541 break;
6542 case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
6543 desc = "PCIe device mem space access failed";
6544 break;
6545 case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
6546 desc = "PCIe device unsupported";
6547 break;
6548 case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
6549 desc = "PCIe device MSIx Required";
6550 break;
6551 case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
6552 desc = "PCIe device init fail max";
6553 break;
6554 case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
6555 desc = "PCIe device status unknown";
6556 break;
6557 case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
6558 desc = "nvme ready timeout";
6559 break;
6560 case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
6561 desc = "nvme device configuration unsupported";
6562 break;
6563 case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
6564 desc = "nvme identify failed";
6565 break;
6566 case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
6567 desc = "nvme qconfig failed";
6568 break;
6569 case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
6570 desc = "nvme qcreation failed";
6571 break;
6572 case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
6573 desc = "nvme eventcfg failed";
6574 break;
6575 case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
6576 desc = "nvme get feature stat failed";
6577 break;
6578 case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
6579 desc = "nvme idle timeout";
6580 break;
6581 case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
6582 desc = "nvme failure status";
6583 break;
6584 default:
6585 ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
6586 access_status, (u64)wwid, handle);
6587 return rc;
6590 if (!rc)
6591 return rc;
6593 ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
6594 desc, (u64)wwid, handle);
6595 return rc;
6599 * _scsih_pcie_device_remove_from_sml - removing pcie device
6600 * from SML and free up associated memory
6601 * @ioc: per adapter object
6602 * @pcie_device: the pcie_device object
6604 static void
6605 _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
6606 struct _pcie_device *pcie_device)
6608 struct MPT3SAS_TARGET *sas_target_priv_data;
6610 dewtprintk(ioc,
6611 ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
6612 __func__,
6613 pcie_device->handle, (u64)pcie_device->wwid));
6614 if (pcie_device->enclosure_handle != 0)
6615 dewtprintk(ioc,
6616 ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
6617 __func__,
6618 (u64)pcie_device->enclosure_logical_id,
6619 pcie_device->slot));
6620 if (pcie_device->connector_name[0] != '\0')
6621 dewtprintk(ioc,
6622 ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
6623 __func__,
6624 pcie_device->enclosure_level,
6625 pcie_device->connector_name));
6627 if (pcie_device->starget && pcie_device->starget->hostdata) {
6628 sas_target_priv_data = pcie_device->starget->hostdata;
6629 sas_target_priv_data->deleted = 1;
6630 _scsih_ublock_io_device(ioc, pcie_device->wwid);
6631 sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
6634 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
6635 pcie_device->handle, (u64)pcie_device->wwid);
6636 if (pcie_device->enclosure_handle != 0)
6637 ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
6638 (u64)pcie_device->enclosure_logical_id,
6639 pcie_device->slot);
6640 if (pcie_device->connector_name[0] != '\0')
6641 ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
6642 pcie_device->enclosure_level,
6643 pcie_device->connector_name);
6645 if (pcie_device->starget)
6646 scsi_remove_target(&pcie_device->starget->dev);
6647 dewtprintk(ioc,
6648 ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
6649 __func__,
6650 pcie_device->handle, (u64)pcie_device->wwid));
6651 if (pcie_device->enclosure_handle != 0)
6652 dewtprintk(ioc,
6653 ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
6654 __func__,
6655 (u64)pcie_device->enclosure_logical_id,
6656 pcie_device->slot));
6657 if (pcie_device->connector_name[0] != '\0')
6658 dewtprintk(ioc,
6659 ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
6660 __func__,
6661 pcie_device->enclosure_level,
6662 pcie_device->connector_name));
6664 kfree(pcie_device->serial_number);
6669 * _scsih_pcie_check_device - checking device responsiveness
6670 * @ioc: per adapter object
6671 * @handle: attached device handle
6673 static void
6674 _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6676 Mpi2ConfigReply_t mpi_reply;
6677 Mpi26PCIeDevicePage0_t pcie_device_pg0;
6678 u32 ioc_status;
6679 struct _pcie_device *pcie_device;
6680 u64 wwid;
6681 unsigned long flags;
6682 struct scsi_target *starget;
6683 struct MPT3SAS_TARGET *sas_target_priv_data;
6684 u32 device_info;
6686 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6687 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
6688 return;
6690 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6691 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6692 return;
6694 /* check if this is end device */
6695 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6696 if (!(_scsih_is_nvme_device(device_info)))
6697 return;
6699 wwid = le64_to_cpu(pcie_device_pg0.WWID);
6700 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
6701 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
6703 if (!pcie_device) {
6704 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6705 return;
6708 if (unlikely(pcie_device->handle != handle)) {
6709 starget = pcie_device->starget;
6710 sas_target_priv_data = starget->hostdata;
6711 starget_printk(KERN_INFO, starget,
6712 "handle changed from(0x%04x) to (0x%04x)!!!\n",
6713 pcie_device->handle, handle);
6714 sas_target_priv_data->handle = handle;
6715 pcie_device->handle = handle;
6717 if (le32_to_cpu(pcie_device_pg0.Flags) &
6718 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6719 pcie_device->enclosure_level =
6720 pcie_device_pg0.EnclosureLevel;
6721 memcpy(&pcie_device->connector_name[0],
6722 &pcie_device_pg0.ConnectorName[0], 4);
6723 } else {
6724 pcie_device->enclosure_level = 0;
6725 pcie_device->connector_name[0] = '\0';
6729 /* check if device is present */
6730 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6731 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6732 ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
6733 handle);
6734 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6735 pcie_device_put(pcie_device);
6736 return;
6739 /* check if there were any issues with discovery */
6740 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
6741 pcie_device_pg0.AccessStatus)) {
6742 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6743 pcie_device_put(pcie_device);
6744 return;
6747 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
6748 pcie_device_put(pcie_device);
6750 _scsih_ublock_io_device(ioc, wwid);
6752 return;
6756 * _scsih_pcie_add_device - creating pcie device object
6757 * @ioc: per adapter object
6758 * @handle: pcie device handle
6760 * Creating end device object, stored in ioc->pcie_device_list.
6762 * Return: 1 means queue the event later, 0 means complete the event
6764 static int
6765 _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
6767 Mpi26PCIeDevicePage0_t pcie_device_pg0;
6768 Mpi26PCIeDevicePage2_t pcie_device_pg2;
6769 Mpi2ConfigReply_t mpi_reply;
6770 struct _pcie_device *pcie_device;
6771 struct _enclosure_node *enclosure_dev;
6772 u32 ioc_status;
6773 u64 wwid;
6775 if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
6776 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
6777 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6778 __FILE__, __LINE__, __func__);
6779 return 0;
6781 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
6782 MPI2_IOCSTATUS_MASK;
6783 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6784 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6785 __FILE__, __LINE__, __func__);
6786 return 0;
6789 set_bit(handle, ioc->pend_os_device_add);
6790 wwid = le64_to_cpu(pcie_device_pg0.WWID);
6792 /* check if device is present */
6793 if (!(le32_to_cpu(pcie_device_pg0.Flags) &
6794 MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
6795 ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
6796 handle);
6797 return 0;
6800 /* check if there were any issues with discovery */
6801 if (_scsih_check_pcie_access_status(ioc, wwid, handle,
6802 pcie_device_pg0.AccessStatus))
6803 return 0;
6805 if (!(_scsih_is_nvme_device(le32_to_cpu(pcie_device_pg0.DeviceInfo))))
6806 return 0;
6808 pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
6809 if (pcie_device) {
6810 clear_bit(handle, ioc->pend_os_device_add);
6811 pcie_device_put(pcie_device);
6812 return 0;
6815 pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
6816 if (!pcie_device) {
6817 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6818 __FILE__, __LINE__, __func__);
6819 return 0;
6822 kref_init(&pcie_device->refcount);
6823 pcie_device->id = ioc->pcie_target_id++;
6824 pcie_device->channel = PCIE_CHANNEL;
6825 pcie_device->handle = handle;
6826 pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
6827 pcie_device->wwid = wwid;
6828 pcie_device->port_num = pcie_device_pg0.PortNum;
6829 pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
6830 MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
6832 pcie_device->enclosure_handle =
6833 le16_to_cpu(pcie_device_pg0.EnclosureHandle);
6834 if (pcie_device->enclosure_handle != 0)
6835 pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
6837 if (le32_to_cpu(pcie_device_pg0.Flags) &
6838 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
6839 pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
6840 memcpy(&pcie_device->connector_name[0],
6841 &pcie_device_pg0.ConnectorName[0], 4);
6842 } else {
6843 pcie_device->enclosure_level = 0;
6844 pcie_device->connector_name[0] = '\0';
6847 /* get enclosure_logical_id */
6848 if (pcie_device->enclosure_handle) {
6849 enclosure_dev =
6850 mpt3sas_scsih_enclosure_find_by_handle(ioc,
6851 pcie_device->enclosure_handle);
6852 if (enclosure_dev)
6853 pcie_device->enclosure_logical_id =
6854 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
6856 /* TODO -- Add device name once FW supports it */
6857 if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
6858 &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
6859 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6860 __FILE__, __LINE__, __func__);
6861 kfree(pcie_device);
6862 return 0;
6865 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6866 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6867 ioc_err(ioc, "failure at %s:%d/%s()!\n",
6868 __FILE__, __LINE__, __func__);
6869 kfree(pcie_device);
6870 return 0;
6872 pcie_device->nvme_mdts =
6873 le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
6874 if (pcie_device_pg2.ControllerResetTO)
6875 pcie_device->reset_timeout =
6876 pcie_device_pg2.ControllerResetTO;
6877 else
6878 pcie_device->reset_timeout = 30;
6880 if (ioc->wait_for_discovery_to_complete)
6881 _scsih_pcie_device_init_add(ioc, pcie_device);
6882 else
6883 _scsih_pcie_device_add(ioc, pcie_device);
6885 pcie_device_put(pcie_device);
6886 return 0;
6890 * _scsih_pcie_topology_change_event_debug - debug for topology
6891 * event
6892 * @ioc: per adapter object
6893 * @event_data: event data payload
6894 * Context: user.
6896 static void
6897 _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
6898 Mpi26EventDataPCIeTopologyChangeList_t *event_data)
6900 int i;
6901 u16 handle;
6902 u16 reason_code;
6903 u8 port_number;
6904 char *status_str = NULL;
6905 u8 link_rate, prev_link_rate;
6907 switch (event_data->SwitchStatus) {
6908 case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
6909 status_str = "add";
6910 break;
6911 case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
6912 status_str = "remove";
6913 break;
6914 case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
6915 case 0:
6916 status_str = "responding";
6917 break;
6918 case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
6919 status_str = "remove delay";
6920 break;
6921 default:
6922 status_str = "unknown status";
6923 break;
6925 ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
6926 pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
6927 "start_port(%02d), count(%d)\n",
6928 le16_to_cpu(event_data->SwitchDevHandle),
6929 le16_to_cpu(event_data->EnclosureHandle),
6930 event_data->StartPortNum, event_data->NumEntries);
6931 for (i = 0; i < event_data->NumEntries; i++) {
6932 handle =
6933 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
6934 if (!handle)
6935 continue;
6936 port_number = event_data->StartPortNum + i;
6937 reason_code = event_data->PortEntry[i].PortStatus;
6938 switch (reason_code) {
6939 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
6940 status_str = "target add";
6941 break;
6942 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
6943 status_str = "target remove";
6944 break;
6945 case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
6946 status_str = "delay target remove";
6947 break;
6948 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
6949 status_str = "link rate change";
6950 break;
6951 case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
6952 status_str = "target responding";
6953 break;
6954 default:
6955 status_str = "unknown";
6956 break;
6958 link_rate = event_data->PortEntry[i].CurrentPortInfo &
6959 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
6960 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
6961 MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
6962 pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
6963 " link rate: new(0x%02x), old(0x%02x)\n", port_number,
6964 handle, status_str, link_rate, prev_link_rate);
6969 * _scsih_pcie_topology_change_event - handle PCIe topology
6970 * changes
6971 * @ioc: per adapter object
6972 * @fw_event: The fw_event_work object
6973 * Context: user.
6976 static void
6977 _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
6978 struct fw_event_work *fw_event)
6980 int i;
6981 u16 handle;
6982 u16 reason_code;
6983 u8 link_rate, prev_link_rate;
6984 unsigned long flags;
6985 int rc;
6986 Mpi26EventDataPCIeTopologyChangeList_t *event_data =
6987 (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
6988 struct _pcie_device *pcie_device;
6990 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
6991 _scsih_pcie_topology_change_event_debug(ioc, event_data);
6993 if (ioc->shost_recovery || ioc->remove_host ||
6994 ioc->pci_error_recovery)
6995 return;
6997 if (fw_event->ignore) {
6998 dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
6999 return;
7002 /* handle siblings events */
7003 for (i = 0; i < event_data->NumEntries; i++) {
7004 if (fw_event->ignore) {
7005 dewtprintk(ioc,
7006 ioc_info(ioc, "ignoring switch event\n"));
7007 return;
7009 if (ioc->remove_host || ioc->pci_error_recovery)
7010 return;
7011 reason_code = event_data->PortEntry[i].PortStatus;
7012 handle =
7013 le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
7014 if (!handle)
7015 continue;
7017 link_rate = event_data->PortEntry[i].CurrentPortInfo
7018 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7019 prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
7020 & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
7022 switch (reason_code) {
7023 case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
7024 if (ioc->shost_recovery)
7025 break;
7026 if (link_rate == prev_link_rate)
7027 break;
7028 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7029 break;
7031 _scsih_pcie_check_device(ioc, handle);
7033 /* This code after this point handles the test case
7034 * where a device has been added, however its returning
7035 * BUSY for sometime. Then before the Device Missing
7036 * Delay expires and the device becomes READY, the
7037 * device is removed and added back.
7039 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7040 pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
7041 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7043 if (pcie_device) {
7044 pcie_device_put(pcie_device);
7045 break;
7048 if (!test_bit(handle, ioc->pend_os_device_add))
7049 break;
7051 dewtprintk(ioc,
7052 ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
7053 handle));
7054 event_data->PortEntry[i].PortStatus &= 0xF0;
7055 event_data->PortEntry[i].PortStatus |=
7056 MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
7057 /* fall through */
7058 case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
7059 if (ioc->shost_recovery)
7060 break;
7061 if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
7062 break;
7064 rc = _scsih_pcie_add_device(ioc, handle);
7065 if (!rc) {
7066 /* mark entry vacant */
7067 /* TODO This needs to be reviewed and fixed,
7068 * we dont have an entry
7069 * to make an event void like vacant
7071 event_data->PortEntry[i].PortStatus |=
7072 MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
7074 break;
7075 case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
7076 _scsih_pcie_device_remove_by_handle(ioc, handle);
7077 break;
7083 * _scsih_pcie_device_status_change_event_debug - debug for device event
7084 * @ioc: ?
7085 * @event_data: event data payload
7086 * Context: user.
7088 static void
7089 _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7090 Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
7092 char *reason_str = NULL;
7094 switch (event_data->ReasonCode) {
7095 case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
7096 reason_str = "smart data";
7097 break;
7098 case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
7099 reason_str = "unsupported device discovered";
7100 break;
7101 case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
7102 reason_str = "internal device reset";
7103 break;
7104 case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
7105 reason_str = "internal task abort";
7106 break;
7107 case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
7108 reason_str = "internal task abort set";
7109 break;
7110 case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
7111 reason_str = "internal clear task set";
7112 break;
7113 case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
7114 reason_str = "internal query task";
7115 break;
7116 case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
7117 reason_str = "device init failure";
7118 break;
7119 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
7120 reason_str = "internal device reset complete";
7121 break;
7122 case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
7123 reason_str = "internal task abort complete";
7124 break;
7125 case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
7126 reason_str = "internal async notification";
7127 break;
7128 case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
7129 reason_str = "pcie hot reset failed";
7130 break;
7131 default:
7132 reason_str = "unknown reason";
7133 break;
7136 ioc_info(ioc, "PCIE device status change: (%s)\n"
7137 "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
7138 reason_str, le16_to_cpu(event_data->DevHandle),
7139 (u64)le64_to_cpu(event_data->WWID),
7140 le16_to_cpu(event_data->TaskTag));
7141 if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
7142 pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
7143 event_data->ASC, event_data->ASCQ);
7144 pr_cont("\n");
7148 * _scsih_pcie_device_status_change_event - handle device status
7149 * change
7150 * @ioc: per adapter object
7151 * @fw_event: The fw_event_work object
7152 * Context: user.
7154 static void
7155 _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7156 struct fw_event_work *fw_event)
7158 struct MPT3SAS_TARGET *target_priv_data;
7159 struct _pcie_device *pcie_device;
7160 u64 wwid;
7161 unsigned long flags;
7162 Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
7163 (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
7164 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7165 _scsih_pcie_device_status_change_event_debug(ioc,
7166 event_data);
7168 if (event_data->ReasonCode !=
7169 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
7170 event_data->ReasonCode !=
7171 MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
7172 return;
7174 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
7175 wwid = le64_to_cpu(event_data->WWID);
7176 pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
7178 if (!pcie_device || !pcie_device->starget)
7179 goto out;
7181 target_priv_data = pcie_device->starget->hostdata;
7182 if (!target_priv_data)
7183 goto out;
7185 if (event_data->ReasonCode ==
7186 MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
7187 target_priv_data->tm_busy = 1;
7188 else
7189 target_priv_data->tm_busy = 0;
7190 out:
7191 if (pcie_device)
7192 pcie_device_put(pcie_device);
7194 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
7198 * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
7199 * event
7200 * @ioc: per adapter object
7201 * @event_data: event data payload
7202 * Context: user.
7204 static void
7205 _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7206 Mpi2EventDataSasEnclDevStatusChange_t *event_data)
7208 char *reason_str = NULL;
7210 switch (event_data->ReasonCode) {
7211 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7212 reason_str = "enclosure add";
7213 break;
7214 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7215 reason_str = "enclosure remove";
7216 break;
7217 default:
7218 reason_str = "unknown reason";
7219 break;
7222 ioc_info(ioc, "enclosure status change: (%s)\n"
7223 "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
7224 reason_str,
7225 le16_to_cpu(event_data->EnclosureHandle),
7226 (u64)le64_to_cpu(event_data->EnclosureLogicalID),
7227 le16_to_cpu(event_data->StartSlot));
7231 * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
7232 * @ioc: per adapter object
7233 * @fw_event: The fw_event_work object
7234 * Context: user.
7236 static void
7237 _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
7238 struct fw_event_work *fw_event)
7240 Mpi2ConfigReply_t mpi_reply;
7241 struct _enclosure_node *enclosure_dev = NULL;
7242 Mpi2EventDataSasEnclDevStatusChange_t *event_data =
7243 (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
7244 int rc;
7245 u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
7247 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
7248 _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
7249 (Mpi2EventDataSasEnclDevStatusChange_t *)
7250 fw_event->event_data);
7251 if (ioc->shost_recovery)
7252 return;
7254 if (enclosure_handle)
7255 enclosure_dev =
7256 mpt3sas_scsih_enclosure_find_by_handle(ioc,
7257 enclosure_handle);
7258 switch (event_data->ReasonCode) {
7259 case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7260 if (!enclosure_dev) {
7261 enclosure_dev =
7262 kzalloc(sizeof(struct _enclosure_node),
7263 GFP_KERNEL);
7264 if (!enclosure_dev) {
7265 ioc_info(ioc, "failure at %s:%d/%s()!\n",
7266 __FILE__, __LINE__, __func__);
7267 return;
7269 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
7270 &enclosure_dev->pg0,
7271 MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
7272 enclosure_handle);
7274 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
7275 MPI2_IOCSTATUS_MASK)) {
7276 kfree(enclosure_dev);
7277 return;
7280 list_add_tail(&enclosure_dev->list,
7281 &ioc->enclosure_list);
7283 break;
7284 case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7285 if (enclosure_dev) {
7286 list_del(&enclosure_dev->list);
7287 kfree(enclosure_dev);
7289 break;
7290 default:
7291 break;
7296 * _scsih_sas_broadcast_primitive_event - handle broadcast events
7297 * @ioc: per adapter object
7298 * @fw_event: The fw_event_work object
7299 * Context: user.
7301 static void
7302 _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
7303 struct fw_event_work *fw_event)
7305 struct scsi_cmnd *scmd;
7306 struct scsi_device *sdev;
7307 struct scsiio_tracker *st;
7308 u16 smid, handle;
7309 u32 lun;
7310 struct MPT3SAS_DEVICE *sas_device_priv_data;
7311 u32 termination_count;
7312 u32 query_count;
7313 Mpi2SCSITaskManagementReply_t *mpi_reply;
7314 Mpi2EventDataSasBroadcastPrimitive_t *event_data =
7315 (Mpi2EventDataSasBroadcastPrimitive_t *)
7316 fw_event->event_data;
7317 u16 ioc_status;
7318 unsigned long flags;
7319 int r;
7320 u8 max_retries = 0;
7321 u8 task_abort_retries;
7323 mutex_lock(&ioc->tm_cmds.mutex);
7324 ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
7325 __func__, event_data->PhyNum, event_data->PortWidth);
7327 _scsih_block_io_all_device(ioc);
7329 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7330 mpi_reply = ioc->tm_cmds.reply;
7331 broadcast_aen_retry:
7333 /* sanity checks for retrying this loop */
7334 if (max_retries++ == 5) {
7335 dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
7336 goto out;
7337 } else if (max_retries > 1)
7338 dewtprintk(ioc,
7339 ioc_info(ioc, "%s: %d retry\n",
7340 __func__, max_retries - 1));
7342 termination_count = 0;
7343 query_count = 0;
7344 for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
7345 if (ioc->shost_recovery)
7346 goto out;
7347 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
7348 if (!scmd)
7349 continue;
7350 st = scsi_cmd_priv(scmd);
7351 sdev = scmd->device;
7352 sas_device_priv_data = sdev->hostdata;
7353 if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
7354 continue;
7355 /* skip hidden raid components */
7356 if (sas_device_priv_data->sas_target->flags &
7357 MPT_TARGET_FLAGS_RAID_COMPONENT)
7358 continue;
7359 /* skip volumes */
7360 if (sas_device_priv_data->sas_target->flags &
7361 MPT_TARGET_FLAGS_VOLUME)
7362 continue;
7363 /* skip PCIe devices */
7364 if (sas_device_priv_data->sas_target->flags &
7365 MPT_TARGET_FLAGS_PCIE_DEVICE)
7366 continue;
7368 handle = sas_device_priv_data->sas_target->handle;
7369 lun = sas_device_priv_data->lun;
7370 query_count++;
7372 if (ioc->shost_recovery)
7373 goto out;
7375 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7376 r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
7377 MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
7378 st->msix_io, 30, 0);
7379 if (r == FAILED) {
7380 sdev_printk(KERN_WARNING, sdev,
7381 "mpt3sas_scsih_issue_tm: FAILED when sending "
7382 "QUERY_TASK: scmd(%p)\n", scmd);
7383 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7384 goto broadcast_aen_retry;
7386 ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
7387 & MPI2_IOCSTATUS_MASK;
7388 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7389 sdev_printk(KERN_WARNING, sdev,
7390 "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
7391 ioc_status, scmd);
7392 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7393 goto broadcast_aen_retry;
7396 /* see if IO is still owned by IOC and target */
7397 if (mpi_reply->ResponseCode ==
7398 MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
7399 mpi_reply->ResponseCode ==
7400 MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
7401 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7402 continue;
7404 task_abort_retries = 0;
7405 tm_retry:
7406 if (task_abort_retries++ == 60) {
7407 dewtprintk(ioc,
7408 ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
7409 __func__));
7410 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7411 goto broadcast_aen_retry;
7414 if (ioc->shost_recovery)
7415 goto out_no_lock;
7417 r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
7418 MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
7419 st->msix_io, 30, 0);
7420 if (r == FAILED || st->cb_idx != 0xFF) {
7421 sdev_printk(KERN_WARNING, sdev,
7422 "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
7423 "scmd(%p)\n", scmd);
7424 goto tm_retry;
7427 if (task_abort_retries > 1)
7428 sdev_printk(KERN_WARNING, sdev,
7429 "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
7430 " scmd(%p)\n",
7431 task_abort_retries - 1, scmd);
7433 termination_count += le32_to_cpu(mpi_reply->TerminationCount);
7434 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
7437 if (ioc->broadcast_aen_pending) {
7438 dewtprintk(ioc,
7439 ioc_info(ioc,
7440 "%s: loop back due to pending AEN\n",
7441 __func__));
7442 ioc->broadcast_aen_pending = 0;
7443 goto broadcast_aen_retry;
7446 out:
7447 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
7448 out_no_lock:
7450 dewtprintk(ioc,
7451 ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
7452 __func__, query_count, termination_count));
7454 ioc->broadcast_aen_busy = 0;
7455 if (!ioc->shost_recovery)
7456 _scsih_ublock_io_all_device(ioc);
7457 mutex_unlock(&ioc->tm_cmds.mutex);
7461 * _scsih_sas_discovery_event - handle discovery events
7462 * @ioc: per adapter object
7463 * @fw_event: The fw_event_work object
7464 * Context: user.
7466 static void
7467 _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
7468 struct fw_event_work *fw_event)
7470 Mpi2EventDataSasDiscovery_t *event_data =
7471 (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
7473 if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
7474 ioc_info(ioc, "discovery event: (%s)",
7475 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
7476 "start" : "stop");
7477 if (event_data->DiscoveryStatus)
7478 pr_cont("discovery_status(0x%08x)",
7479 le32_to_cpu(event_data->DiscoveryStatus));
7480 pr_cont("\n");
7483 if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
7484 !ioc->sas_hba.num_phys) {
7485 if (disable_discovery > 0 && ioc->shost_recovery) {
7486 /* Wait for the reset to complete */
7487 while (ioc->shost_recovery)
7488 ssleep(1);
7490 _scsih_sas_host_add(ioc);
7495 * _scsih_sas_device_discovery_error_event - display SAS device discovery error
7496 * events
7497 * @ioc: per adapter object
7498 * @fw_event: The fw_event_work object
7499 * Context: user.
7501 static void
7502 _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
7503 struct fw_event_work *fw_event)
7505 Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
7506 (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
7508 switch (event_data->ReasonCode) {
7509 case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
7510 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
7511 le16_to_cpu(event_data->DevHandle),
7512 (u64)le64_to_cpu(event_data->SASAddress),
7513 event_data->PhysicalPort);
7514 break;
7515 case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
7516 ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
7517 le16_to_cpu(event_data->DevHandle),
7518 (u64)le64_to_cpu(event_data->SASAddress),
7519 event_data->PhysicalPort);
7520 break;
7521 default:
7522 break;
7527 * _scsih_pcie_enumeration_event - handle enumeration events
7528 * @ioc: per adapter object
7529 * @fw_event: The fw_event_work object
7530 * Context: user.
7532 static void
7533 _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
7534 struct fw_event_work *fw_event)
7536 Mpi26EventDataPCIeEnumeration_t *event_data =
7537 (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
7539 if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
7540 return;
7542 ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
7543 (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
7544 "started" : "completed",
7545 event_data->Flags);
7546 if (event_data->EnumerationStatus)
7547 pr_cont("enumeration_status(0x%08x)",
7548 le32_to_cpu(event_data->EnumerationStatus));
7549 pr_cont("\n");
7553 * _scsih_ir_fastpath - turn on fastpath for IR physdisk
7554 * @ioc: per adapter object
7555 * @handle: device handle for physical disk
7556 * @phys_disk_num: physical disk number
7558 * Return: 0 for success, else failure.
7560 static int
7561 _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
7563 Mpi2RaidActionRequest_t *mpi_request;
7564 Mpi2RaidActionReply_t *mpi_reply;
7565 u16 smid;
7566 u8 issue_reset = 0;
7567 int rc = 0;
7568 u16 ioc_status;
7569 u32 log_info;
7571 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
7572 return rc;
7574 mutex_lock(&ioc->scsih_cmds.mutex);
7576 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
7577 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
7578 rc = -EAGAIN;
7579 goto out;
7581 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
7583 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
7584 if (!smid) {
7585 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7586 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7587 rc = -EAGAIN;
7588 goto out;
7591 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7592 ioc->scsih_cmds.smid = smid;
7593 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
7595 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
7596 mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
7597 mpi_request->PhysDiskNum = phys_disk_num;
7599 dewtprintk(ioc,
7600 ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
7601 handle, phys_disk_num));
7603 init_completion(&ioc->scsih_cmds.done);
7604 mpt3sas_base_put_smid_default(ioc, smid);
7605 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
7607 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
7608 issue_reset =
7609 mpt3sas_base_check_cmd_timeout(ioc,
7610 ioc->scsih_cmds.status, mpi_request,
7611 sizeof(Mpi2RaidActionRequest_t)/4);
7612 rc = -EFAULT;
7613 goto out;
7616 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
7618 mpi_reply = ioc->scsih_cmds.reply;
7619 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
7620 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
7621 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
7622 else
7623 log_info = 0;
7624 ioc_status &= MPI2_IOCSTATUS_MASK;
7625 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7626 dewtprintk(ioc,
7627 ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
7628 ioc_status, log_info));
7629 rc = -EFAULT;
7630 } else
7631 dewtprintk(ioc,
7632 ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
7635 out:
7636 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7637 mutex_unlock(&ioc->scsih_cmds.mutex);
7639 if (issue_reset)
7640 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7641 return rc;
7645 * _scsih_reprobe_lun - reprobing lun
7646 * @sdev: scsi device struct
7647 * @no_uld_attach: sdev->no_uld_attach flag setting
7650 static void
7651 _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
7653 sdev->no_uld_attach = no_uld_attach ? 1 : 0;
7654 sdev_printk(KERN_INFO, sdev, "%s raid component\n",
7655 sdev->no_uld_attach ? "hiding" : "exposing");
7656 WARN_ON(scsi_device_reprobe(sdev));
7660 * _scsih_sas_volume_add - add new volume
7661 * @ioc: per adapter object
7662 * @element: IR config element data
7663 * Context: user.
7665 static void
7666 _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
7667 Mpi2EventIrConfigElement_t *element)
7669 struct _raid_device *raid_device;
7670 unsigned long flags;
7671 u64 wwid;
7672 u16 handle = le16_to_cpu(element->VolDevHandle);
7673 int rc;
7675 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
7676 if (!wwid) {
7677 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7678 __FILE__, __LINE__, __func__);
7679 return;
7682 spin_lock_irqsave(&ioc->raid_device_lock, flags);
7683 raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
7684 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7686 if (raid_device)
7687 return;
7689 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
7690 if (!raid_device) {
7691 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7692 __FILE__, __LINE__, __func__);
7693 return;
7696 raid_device->id = ioc->sas_id++;
7697 raid_device->channel = RAID_CHANNEL;
7698 raid_device->handle = handle;
7699 raid_device->wwid = wwid;
7700 _scsih_raid_device_add(ioc, raid_device);
7701 if (!ioc->wait_for_discovery_to_complete) {
7702 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
7703 raid_device->id, 0);
7704 if (rc)
7705 _scsih_raid_device_remove(ioc, raid_device);
7706 } else {
7707 spin_lock_irqsave(&ioc->raid_device_lock, flags);
7708 _scsih_determine_boot_device(ioc, raid_device, 1);
7709 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7714 * _scsih_sas_volume_delete - delete volume
7715 * @ioc: per adapter object
7716 * @handle: volume device handle
7717 * Context: user.
7719 static void
7720 _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
7722 struct _raid_device *raid_device;
7723 unsigned long flags;
7724 struct MPT3SAS_TARGET *sas_target_priv_data;
7725 struct scsi_target *starget = NULL;
7727 spin_lock_irqsave(&ioc->raid_device_lock, flags);
7728 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
7729 if (raid_device) {
7730 if (raid_device->starget) {
7731 starget = raid_device->starget;
7732 sas_target_priv_data = starget->hostdata;
7733 sas_target_priv_data->deleted = 1;
7735 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
7736 raid_device->handle, (u64)raid_device->wwid);
7737 list_del(&raid_device->list);
7738 kfree(raid_device);
7740 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
7741 if (starget)
7742 scsi_remove_target(&starget->dev);
7746 * _scsih_sas_pd_expose - expose pd component to /dev/sdX
7747 * @ioc: per adapter object
7748 * @element: IR config element data
7749 * Context: user.
7751 static void
7752 _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
7753 Mpi2EventIrConfigElement_t *element)
7755 struct _sas_device *sas_device;
7756 struct scsi_target *starget = NULL;
7757 struct MPT3SAS_TARGET *sas_target_priv_data;
7758 unsigned long flags;
7759 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7761 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7762 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
7763 if (sas_device) {
7764 sas_device->volume_handle = 0;
7765 sas_device->volume_wwid = 0;
7766 clear_bit(handle, ioc->pd_handles);
7767 if (sas_device->starget && sas_device->starget->hostdata) {
7768 starget = sas_device->starget;
7769 sas_target_priv_data = starget->hostdata;
7770 sas_target_priv_data->flags &=
7771 ~MPT_TARGET_FLAGS_RAID_COMPONENT;
7774 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7775 if (!sas_device)
7776 return;
7778 /* exposing raid component */
7779 if (starget)
7780 starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
7782 sas_device_put(sas_device);
7786 * _scsih_sas_pd_hide - hide pd component from /dev/sdX
7787 * @ioc: per adapter object
7788 * @element: IR config element data
7789 * Context: user.
7791 static void
7792 _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
7793 Mpi2EventIrConfigElement_t *element)
7795 struct _sas_device *sas_device;
7796 struct scsi_target *starget = NULL;
7797 struct MPT3SAS_TARGET *sas_target_priv_data;
7798 unsigned long flags;
7799 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7800 u16 volume_handle = 0;
7801 u64 volume_wwid = 0;
7803 mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
7804 if (volume_handle)
7805 mpt3sas_config_get_volume_wwid(ioc, volume_handle,
7806 &volume_wwid);
7808 spin_lock_irqsave(&ioc->sas_device_lock, flags);
7809 sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
7810 if (sas_device) {
7811 set_bit(handle, ioc->pd_handles);
7812 if (sas_device->starget && sas_device->starget->hostdata) {
7813 starget = sas_device->starget;
7814 sas_target_priv_data = starget->hostdata;
7815 sas_target_priv_data->flags |=
7816 MPT_TARGET_FLAGS_RAID_COMPONENT;
7817 sas_device->volume_handle = volume_handle;
7818 sas_device->volume_wwid = volume_wwid;
7821 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
7822 if (!sas_device)
7823 return;
7825 /* hiding raid component */
7826 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7828 if (starget)
7829 starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
7831 sas_device_put(sas_device);
7835 * _scsih_sas_pd_delete - delete pd component
7836 * @ioc: per adapter object
7837 * @element: IR config element data
7838 * Context: user.
7840 static void
7841 _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
7842 Mpi2EventIrConfigElement_t *element)
7844 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7846 _scsih_device_remove_by_handle(ioc, handle);
7850 * _scsih_sas_pd_add - remove pd component
7851 * @ioc: per adapter object
7852 * @element: IR config element data
7853 * Context: user.
7855 static void
7856 _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
7857 Mpi2EventIrConfigElement_t *element)
7859 struct _sas_device *sas_device;
7860 u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
7861 Mpi2ConfigReply_t mpi_reply;
7862 Mpi2SasDevicePage0_t sas_device_pg0;
7863 u32 ioc_status;
7864 u64 sas_address;
7865 u16 parent_handle;
7867 set_bit(handle, ioc->pd_handles);
7869 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
7870 if (sas_device) {
7871 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7872 sas_device_put(sas_device);
7873 return;
7876 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
7877 MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
7878 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7879 __FILE__, __LINE__, __func__);
7880 return;
7883 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
7884 MPI2_IOCSTATUS_MASK;
7885 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
7886 ioc_err(ioc, "failure at %s:%d/%s()!\n",
7887 __FILE__, __LINE__, __func__);
7888 return;
7891 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
7892 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
7893 mpt3sas_transport_update_links(ioc, sas_address, handle,
7894 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
7896 _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
7897 _scsih_add_device(ioc, handle, 0, 1);
7901 * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
7902 * @ioc: per adapter object
7903 * @event_data: event data payload
7904 * Context: user.
7906 static void
7907 _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
7908 Mpi2EventDataIrConfigChangeList_t *event_data)
7910 Mpi2EventIrConfigElement_t *element;
7911 u8 element_type;
7912 int i;
7913 char *reason_str = NULL, *element_str = NULL;
7915 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
7917 ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
7918 le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
7919 "foreign" : "native",
7920 event_data->NumElements);
7921 for (i = 0; i < event_data->NumElements; i++, element++) {
7922 switch (element->ReasonCode) {
7923 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7924 reason_str = "add";
7925 break;
7926 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7927 reason_str = "remove";
7928 break;
7929 case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
7930 reason_str = "no change";
7931 break;
7932 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7933 reason_str = "hide";
7934 break;
7935 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7936 reason_str = "unhide";
7937 break;
7938 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7939 reason_str = "volume_created";
7940 break;
7941 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7942 reason_str = "volume_deleted";
7943 break;
7944 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7945 reason_str = "pd_created";
7946 break;
7947 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7948 reason_str = "pd_deleted";
7949 break;
7950 default:
7951 reason_str = "unknown reason";
7952 break;
7954 element_type = le16_to_cpu(element->ElementFlags) &
7955 MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
7956 switch (element_type) {
7957 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
7958 element_str = "volume";
7959 break;
7960 case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
7961 element_str = "phys disk";
7962 break;
7963 case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
7964 element_str = "hot spare";
7965 break;
7966 default:
7967 element_str = "unknown element";
7968 break;
7970 pr_info("\t(%s:%s), vol handle(0x%04x), " \
7971 "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
7972 reason_str, le16_to_cpu(element->VolDevHandle),
7973 le16_to_cpu(element->PhysDiskDevHandle),
7974 element->PhysDiskNum);
7979 * _scsih_sas_ir_config_change_event - handle ir configuration change events
7980 * @ioc: per adapter object
7981 * @fw_event: The fw_event_work object
7982 * Context: user.
7984 static void
7985 _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
7986 struct fw_event_work *fw_event)
7988 Mpi2EventIrConfigElement_t *element;
7989 int i;
7990 u8 foreign_config;
7991 Mpi2EventDataIrConfigChangeList_t *event_data =
7992 (Mpi2EventDataIrConfigChangeList_t *)
7993 fw_event->event_data;
7995 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
7996 (!ioc->hide_ir_msg))
7997 _scsih_sas_ir_config_change_event_debug(ioc, event_data);
7999 foreign_config = (le32_to_cpu(event_data->Flags) &
8000 MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
8002 element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
8003 if (ioc->shost_recovery &&
8004 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
8005 for (i = 0; i < event_data->NumElements; i++, element++) {
8006 if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
8007 _scsih_ir_fastpath(ioc,
8008 le16_to_cpu(element->PhysDiskDevHandle),
8009 element->PhysDiskNum);
8011 return;
8014 for (i = 0; i < event_data->NumElements; i++, element++) {
8016 switch (element->ReasonCode) {
8017 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
8018 case MPI2_EVENT_IR_CHANGE_RC_ADDED:
8019 if (!foreign_config)
8020 _scsih_sas_volume_add(ioc, element);
8021 break;
8022 case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
8023 case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
8024 if (!foreign_config)
8025 _scsih_sas_volume_delete(ioc,
8026 le16_to_cpu(element->VolDevHandle));
8027 break;
8028 case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
8029 if (!ioc->is_warpdrive)
8030 _scsih_sas_pd_hide(ioc, element);
8031 break;
8032 case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
8033 if (!ioc->is_warpdrive)
8034 _scsih_sas_pd_expose(ioc, element);
8035 break;
8036 case MPI2_EVENT_IR_CHANGE_RC_HIDE:
8037 if (!ioc->is_warpdrive)
8038 _scsih_sas_pd_add(ioc, element);
8039 break;
8040 case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
8041 if (!ioc->is_warpdrive)
8042 _scsih_sas_pd_delete(ioc, element);
8043 break;
8049 * _scsih_sas_ir_volume_event - IR volume event
8050 * @ioc: per adapter object
8051 * @fw_event: The fw_event_work object
8052 * Context: user.
8054 static void
8055 _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
8056 struct fw_event_work *fw_event)
8058 u64 wwid;
8059 unsigned long flags;
8060 struct _raid_device *raid_device;
8061 u16 handle;
8062 u32 state;
8063 int rc;
8064 Mpi2EventDataIrVolume_t *event_data =
8065 (Mpi2EventDataIrVolume_t *) fw_event->event_data;
8067 if (ioc->shost_recovery)
8068 return;
8070 if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
8071 return;
8073 handle = le16_to_cpu(event_data->VolDevHandle);
8074 state = le32_to_cpu(event_data->NewValue);
8075 if (!ioc->hide_ir_msg)
8076 dewtprintk(ioc,
8077 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8078 __func__, handle,
8079 le32_to_cpu(event_data->PreviousValue),
8080 state));
8081 switch (state) {
8082 case MPI2_RAID_VOL_STATE_MISSING:
8083 case MPI2_RAID_VOL_STATE_FAILED:
8084 _scsih_sas_volume_delete(ioc, handle);
8085 break;
8087 case MPI2_RAID_VOL_STATE_ONLINE:
8088 case MPI2_RAID_VOL_STATE_DEGRADED:
8089 case MPI2_RAID_VOL_STATE_OPTIMAL:
8091 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8092 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8093 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8095 if (raid_device)
8096 break;
8098 mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
8099 if (!wwid) {
8100 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8101 __FILE__, __LINE__, __func__);
8102 break;
8105 raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
8106 if (!raid_device) {
8107 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8108 __FILE__, __LINE__, __func__);
8109 break;
8112 raid_device->id = ioc->sas_id++;
8113 raid_device->channel = RAID_CHANNEL;
8114 raid_device->handle = handle;
8115 raid_device->wwid = wwid;
8116 _scsih_raid_device_add(ioc, raid_device);
8117 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
8118 raid_device->id, 0);
8119 if (rc)
8120 _scsih_raid_device_remove(ioc, raid_device);
8121 break;
8123 case MPI2_RAID_VOL_STATE_INITIALIZING:
8124 default:
8125 break;
8130 * _scsih_sas_ir_physical_disk_event - PD event
8131 * @ioc: per adapter object
8132 * @fw_event: The fw_event_work object
8133 * Context: user.
8135 static void
8136 _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
8137 struct fw_event_work *fw_event)
8139 u16 handle, parent_handle;
8140 u32 state;
8141 struct _sas_device *sas_device;
8142 Mpi2ConfigReply_t mpi_reply;
8143 Mpi2SasDevicePage0_t sas_device_pg0;
8144 u32 ioc_status;
8145 Mpi2EventDataIrPhysicalDisk_t *event_data =
8146 (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
8147 u64 sas_address;
8149 if (ioc->shost_recovery)
8150 return;
8152 if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
8153 return;
8155 handle = le16_to_cpu(event_data->PhysDiskDevHandle);
8156 state = le32_to_cpu(event_data->NewValue);
8158 if (!ioc->hide_ir_msg)
8159 dewtprintk(ioc,
8160 ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
8161 __func__, handle,
8162 le32_to_cpu(event_data->PreviousValue),
8163 state));
8165 switch (state) {
8166 case MPI2_RAID_PD_STATE_ONLINE:
8167 case MPI2_RAID_PD_STATE_DEGRADED:
8168 case MPI2_RAID_PD_STATE_REBUILDING:
8169 case MPI2_RAID_PD_STATE_OPTIMAL:
8170 case MPI2_RAID_PD_STATE_HOT_SPARE:
8172 if (!ioc->is_warpdrive)
8173 set_bit(handle, ioc->pd_handles);
8175 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
8176 if (sas_device) {
8177 sas_device_put(sas_device);
8178 return;
8181 if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8182 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
8183 handle))) {
8184 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8185 __FILE__, __LINE__, __func__);
8186 return;
8189 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8190 MPI2_IOCSTATUS_MASK;
8191 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8192 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8193 __FILE__, __LINE__, __func__);
8194 return;
8197 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
8198 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
8199 mpt3sas_transport_update_links(ioc, sas_address, handle,
8200 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
8202 _scsih_add_device(ioc, handle, 0, 1);
8204 break;
8206 case MPI2_RAID_PD_STATE_OFFLINE:
8207 case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
8208 case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
8209 default:
8210 break;
8215 * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
8216 * @ioc: per adapter object
8217 * @event_data: event data payload
8218 * Context: user.
8220 static void
8221 _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
8222 Mpi2EventDataIrOperationStatus_t *event_data)
8224 char *reason_str = NULL;
8226 switch (event_data->RAIDOperation) {
8227 case MPI2_EVENT_IR_RAIDOP_RESYNC:
8228 reason_str = "resync";
8229 break;
8230 case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
8231 reason_str = "online capacity expansion";
8232 break;
8233 case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
8234 reason_str = "consistency check";
8235 break;
8236 case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
8237 reason_str = "background init";
8238 break;
8239 case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
8240 reason_str = "make data consistent";
8241 break;
8244 if (!reason_str)
8245 return;
8247 ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
8248 reason_str,
8249 le16_to_cpu(event_data->VolDevHandle),
8250 event_data->PercentComplete);
8254 * _scsih_sas_ir_operation_status_event - handle RAID operation events
8255 * @ioc: per adapter object
8256 * @fw_event: The fw_event_work object
8257 * Context: user.
8259 static void
8260 _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
8261 struct fw_event_work *fw_event)
8263 Mpi2EventDataIrOperationStatus_t *event_data =
8264 (Mpi2EventDataIrOperationStatus_t *)
8265 fw_event->event_data;
8266 static struct _raid_device *raid_device;
8267 unsigned long flags;
8268 u16 handle;
8270 if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
8271 (!ioc->hide_ir_msg))
8272 _scsih_sas_ir_operation_status_event_debug(ioc,
8273 event_data);
8275 /* code added for raid transport support */
8276 if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
8278 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8279 handle = le16_to_cpu(event_data->VolDevHandle);
8280 raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
8281 if (raid_device)
8282 raid_device->percent_complete =
8283 event_data->PercentComplete;
8284 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8289 * _scsih_prep_device_scan - initialize parameters prior to device scan
8290 * @ioc: per adapter object
8292 * Set the deleted flag prior to device scan. If the device is found during
8293 * the scan, then we clear the deleted flag.
8295 static void
8296 _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
8298 struct MPT3SAS_DEVICE *sas_device_priv_data;
8299 struct scsi_device *sdev;
8301 shost_for_each_device(sdev, ioc->shost) {
8302 sas_device_priv_data = sdev->hostdata;
8303 if (sas_device_priv_data && sas_device_priv_data->sas_target)
8304 sas_device_priv_data->sas_target->deleted = 1;
8309 * _scsih_mark_responding_sas_device - mark a sas_devices as responding
8310 * @ioc: per adapter object
8311 * @sas_device_pg0: SAS Device page 0
8313 * After host reset, find out whether devices are still responding.
8314 * Used in _scsih_remove_unresponsive_sas_devices.
8316 static void
8317 _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
8318 Mpi2SasDevicePage0_t *sas_device_pg0)
8320 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8321 struct scsi_target *starget;
8322 struct _sas_device *sas_device = NULL;
8323 struct _enclosure_node *enclosure_dev = NULL;
8324 unsigned long flags;
8326 if (sas_device_pg0->EnclosureHandle) {
8327 enclosure_dev =
8328 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8329 le16_to_cpu(sas_device_pg0->EnclosureHandle));
8330 if (enclosure_dev == NULL)
8331 ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
8332 sas_device_pg0->EnclosureHandle);
8334 spin_lock_irqsave(&ioc->sas_device_lock, flags);
8335 list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
8336 if ((sas_device->sas_address == le64_to_cpu(
8337 sas_device_pg0->SASAddress)) && (sas_device->slot ==
8338 le16_to_cpu(sas_device_pg0->Slot))) {
8339 sas_device->responding = 1;
8340 starget = sas_device->starget;
8341 if (starget && starget->hostdata) {
8342 sas_target_priv_data = starget->hostdata;
8343 sas_target_priv_data->tm_busy = 0;
8344 sas_target_priv_data->deleted = 0;
8345 } else
8346 sas_target_priv_data = NULL;
8347 if (starget) {
8348 starget_printk(KERN_INFO, starget,
8349 "handle(0x%04x), sas_addr(0x%016llx)\n",
8350 le16_to_cpu(sas_device_pg0->DevHandle),
8351 (unsigned long long)
8352 sas_device->sas_address);
8354 if (sas_device->enclosure_handle != 0)
8355 starget_printk(KERN_INFO, starget,
8356 "enclosure logical id(0x%016llx),"
8357 " slot(%d)\n",
8358 (unsigned long long)
8359 sas_device->enclosure_logical_id,
8360 sas_device->slot);
8362 if (le16_to_cpu(sas_device_pg0->Flags) &
8363 MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
8364 sas_device->enclosure_level =
8365 sas_device_pg0->EnclosureLevel;
8366 memcpy(&sas_device->connector_name[0],
8367 &sas_device_pg0->ConnectorName[0], 4);
8368 } else {
8369 sas_device->enclosure_level = 0;
8370 sas_device->connector_name[0] = '\0';
8373 sas_device->enclosure_handle =
8374 le16_to_cpu(sas_device_pg0->EnclosureHandle);
8375 sas_device->is_chassis_slot_valid = 0;
8376 if (enclosure_dev) {
8377 sas_device->enclosure_logical_id = le64_to_cpu(
8378 enclosure_dev->pg0.EnclosureLogicalID);
8379 if (le16_to_cpu(enclosure_dev->pg0.Flags) &
8380 MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
8381 sas_device->is_chassis_slot_valid = 1;
8382 sas_device->chassis_slot =
8383 enclosure_dev->pg0.ChassisSlot;
8387 if (sas_device->handle == le16_to_cpu(
8388 sas_device_pg0->DevHandle))
8389 goto out;
8390 pr_info("\thandle changed from(0x%04x)!!!\n",
8391 sas_device->handle);
8392 sas_device->handle = le16_to_cpu(
8393 sas_device_pg0->DevHandle);
8394 if (sas_target_priv_data)
8395 sas_target_priv_data->handle =
8396 le16_to_cpu(sas_device_pg0->DevHandle);
8397 goto out;
8400 out:
8401 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8405 * _scsih_create_enclosure_list_after_reset - Free Existing list,
8406 * And create enclosure list by scanning all Enclosure Page(0)s
8407 * @ioc: per adapter object
8409 static void
8410 _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
8412 struct _enclosure_node *enclosure_dev;
8413 Mpi2ConfigReply_t mpi_reply;
8414 u16 enclosure_handle;
8415 int rc;
8417 /* Free existing enclosure list */
8418 mpt3sas_free_enclosure_list(ioc);
8420 /* Re constructing enclosure list after reset*/
8421 enclosure_handle = 0xFFFF;
8422 do {
8423 enclosure_dev =
8424 kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
8425 if (!enclosure_dev) {
8426 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8427 __FILE__, __LINE__, __func__);
8428 return;
8430 rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
8431 &enclosure_dev->pg0,
8432 MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
8433 enclosure_handle);
8435 if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
8436 MPI2_IOCSTATUS_MASK)) {
8437 kfree(enclosure_dev);
8438 return;
8440 list_add_tail(&enclosure_dev->list,
8441 &ioc->enclosure_list);
8442 enclosure_handle =
8443 le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
8444 } while (1);
8448 * _scsih_search_responding_sas_devices -
8449 * @ioc: per adapter object
8451 * After host reset, find out whether devices are still responding.
8452 * If not remove.
8454 static void
8455 _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
8457 Mpi2SasDevicePage0_t sas_device_pg0;
8458 Mpi2ConfigReply_t mpi_reply;
8459 u16 ioc_status;
8460 u16 handle;
8461 u32 device_info;
8463 ioc_info(ioc, "search for end-devices: start\n");
8465 if (list_empty(&ioc->sas_device_list))
8466 goto out;
8468 handle = 0xFFFF;
8469 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
8470 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8471 handle))) {
8472 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8473 MPI2_IOCSTATUS_MASK;
8474 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8475 break;
8476 handle = le16_to_cpu(sas_device_pg0.DevHandle);
8477 device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
8478 if (!(_scsih_is_end_device(device_info)))
8479 continue;
8480 _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
8483 out:
8484 ioc_info(ioc, "search for end-devices: complete\n");
8488 * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
8489 * @ioc: per adapter object
8490 * @pcie_device_pg0: PCIe Device page 0
8492 * After host reset, find out whether devices are still responding.
8493 * Used in _scsih_remove_unresponding_devices.
8495 static void
8496 _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
8497 Mpi26PCIeDevicePage0_t *pcie_device_pg0)
8499 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8500 struct scsi_target *starget;
8501 struct _pcie_device *pcie_device;
8502 unsigned long flags;
8504 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8505 list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
8506 if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
8507 && (pcie_device->slot == le16_to_cpu(
8508 pcie_device_pg0->Slot))) {
8509 pcie_device->responding = 1;
8510 starget = pcie_device->starget;
8511 if (starget && starget->hostdata) {
8512 sas_target_priv_data = starget->hostdata;
8513 sas_target_priv_data->tm_busy = 0;
8514 sas_target_priv_data->deleted = 0;
8515 } else
8516 sas_target_priv_data = NULL;
8517 if (starget) {
8518 starget_printk(KERN_INFO, starget,
8519 "handle(0x%04x), wwid(0x%016llx) ",
8520 pcie_device->handle,
8521 (unsigned long long)pcie_device->wwid);
8522 if (pcie_device->enclosure_handle != 0)
8523 starget_printk(KERN_INFO, starget,
8524 "enclosure logical id(0x%016llx), "
8525 "slot(%d)\n",
8526 (unsigned long long)
8527 pcie_device->enclosure_logical_id,
8528 pcie_device->slot);
8531 if (((le32_to_cpu(pcie_device_pg0->Flags)) &
8532 MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
8533 (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
8534 pcie_device->enclosure_level =
8535 pcie_device_pg0->EnclosureLevel;
8536 memcpy(&pcie_device->connector_name[0],
8537 &pcie_device_pg0->ConnectorName[0], 4);
8538 } else {
8539 pcie_device->enclosure_level = 0;
8540 pcie_device->connector_name[0] = '\0';
8543 if (pcie_device->handle == le16_to_cpu(
8544 pcie_device_pg0->DevHandle))
8545 goto out;
8546 pr_info("\thandle changed from(0x%04x)!!!\n",
8547 pcie_device->handle);
8548 pcie_device->handle = le16_to_cpu(
8549 pcie_device_pg0->DevHandle);
8550 if (sas_target_priv_data)
8551 sas_target_priv_data->handle =
8552 le16_to_cpu(pcie_device_pg0->DevHandle);
8553 goto out;
8557 out:
8558 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8562 * _scsih_search_responding_pcie_devices -
8563 * @ioc: per adapter object
8565 * After host reset, find out whether devices are still responding.
8566 * If not remove.
8568 static void
8569 _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
8571 Mpi26PCIeDevicePage0_t pcie_device_pg0;
8572 Mpi2ConfigReply_t mpi_reply;
8573 u16 ioc_status;
8574 u16 handle;
8575 u32 device_info;
8577 ioc_info(ioc, "search for end-devices: start\n");
8579 if (list_empty(&ioc->pcie_device_list))
8580 goto out;
8582 handle = 0xFFFF;
8583 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
8584 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
8585 handle))) {
8586 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8587 MPI2_IOCSTATUS_MASK;
8588 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8589 ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
8590 __func__, ioc_status,
8591 le32_to_cpu(mpi_reply.IOCLogInfo));
8592 break;
8594 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
8595 device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
8596 if (!(_scsih_is_nvme_device(device_info)))
8597 continue;
8598 _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
8600 out:
8601 ioc_info(ioc, "search for PCIe end-devices: complete\n");
8605 * _scsih_mark_responding_raid_device - mark a raid_device as responding
8606 * @ioc: per adapter object
8607 * @wwid: world wide identifier for raid volume
8608 * @handle: device handle
8610 * After host reset, find out whether devices are still responding.
8611 * Used in _scsih_remove_unresponsive_raid_devices.
8613 static void
8614 _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
8615 u16 handle)
8617 struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
8618 struct scsi_target *starget;
8619 struct _raid_device *raid_device;
8620 unsigned long flags;
8622 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8623 list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
8624 if (raid_device->wwid == wwid && raid_device->starget) {
8625 starget = raid_device->starget;
8626 if (starget && starget->hostdata) {
8627 sas_target_priv_data = starget->hostdata;
8628 sas_target_priv_data->deleted = 0;
8629 } else
8630 sas_target_priv_data = NULL;
8631 raid_device->responding = 1;
8632 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8633 starget_printk(KERN_INFO, raid_device->starget,
8634 "handle(0x%04x), wwid(0x%016llx)\n", handle,
8635 (unsigned long long)raid_device->wwid);
8638 * WARPDRIVE: The handles of the PDs might have changed
8639 * across the host reset so re-initialize the
8640 * required data for Direct IO
8642 mpt3sas_init_warpdrive_properties(ioc, raid_device);
8643 spin_lock_irqsave(&ioc->raid_device_lock, flags);
8644 if (raid_device->handle == handle) {
8645 spin_unlock_irqrestore(&ioc->raid_device_lock,
8646 flags);
8647 return;
8649 pr_info("\thandle changed from(0x%04x)!!!\n",
8650 raid_device->handle);
8651 raid_device->handle = handle;
8652 if (sas_target_priv_data)
8653 sas_target_priv_data->handle = handle;
8654 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8655 return;
8658 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
8662 * _scsih_search_responding_raid_devices -
8663 * @ioc: per adapter object
8665 * After host reset, find out whether devices are still responding.
8666 * If not remove.
8668 static void
8669 _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
8671 Mpi2RaidVolPage1_t volume_pg1;
8672 Mpi2RaidVolPage0_t volume_pg0;
8673 Mpi2RaidPhysDiskPage0_t pd_pg0;
8674 Mpi2ConfigReply_t mpi_reply;
8675 u16 ioc_status;
8676 u16 handle;
8677 u8 phys_disk_num;
8679 if (!ioc->ir_firmware)
8680 return;
8682 ioc_info(ioc, "search for raid volumes: start\n");
8684 if (list_empty(&ioc->raid_device_list))
8685 goto out;
8687 handle = 0xFFFF;
8688 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
8689 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
8690 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8691 MPI2_IOCSTATUS_MASK;
8692 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8693 break;
8694 handle = le16_to_cpu(volume_pg1.DevHandle);
8696 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
8697 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
8698 sizeof(Mpi2RaidVolPage0_t)))
8699 continue;
8701 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
8702 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
8703 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
8704 _scsih_mark_responding_raid_device(ioc,
8705 le64_to_cpu(volume_pg1.WWID), handle);
8708 /* refresh the pd_handles */
8709 if (!ioc->is_warpdrive) {
8710 phys_disk_num = 0xFF;
8711 memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
8712 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
8713 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
8714 phys_disk_num))) {
8715 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8716 MPI2_IOCSTATUS_MASK;
8717 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8718 break;
8719 phys_disk_num = pd_pg0.PhysDiskNum;
8720 handle = le16_to_cpu(pd_pg0.DevHandle);
8721 set_bit(handle, ioc->pd_handles);
8724 out:
8725 ioc_info(ioc, "search for responding raid volumes: complete\n");
8729 * _scsih_mark_responding_expander - mark a expander as responding
8730 * @ioc: per adapter object
8731 * @expander_pg0:SAS Expander Config Page0
8733 * After host reset, find out whether devices are still responding.
8734 * Used in _scsih_remove_unresponsive_expanders.
8736 static void
8737 _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
8738 Mpi2ExpanderPage0_t *expander_pg0)
8740 struct _sas_node *sas_expander = NULL;
8741 unsigned long flags;
8742 int i;
8743 struct _enclosure_node *enclosure_dev = NULL;
8744 u16 handle = le16_to_cpu(expander_pg0->DevHandle);
8745 u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
8746 u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
8748 if (enclosure_handle)
8749 enclosure_dev =
8750 mpt3sas_scsih_enclosure_find_by_handle(ioc,
8751 enclosure_handle);
8753 spin_lock_irqsave(&ioc->sas_node_lock, flags);
8754 list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
8755 if (sas_expander->sas_address != sas_address)
8756 continue;
8757 sas_expander->responding = 1;
8759 if (enclosure_dev) {
8760 sas_expander->enclosure_logical_id =
8761 le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
8762 sas_expander->enclosure_handle =
8763 le16_to_cpu(expander_pg0->EnclosureHandle);
8766 if (sas_expander->handle == handle)
8767 goto out;
8768 pr_info("\texpander(0x%016llx): handle changed" \
8769 " from(0x%04x) to (0x%04x)!!!\n",
8770 (unsigned long long)sas_expander->sas_address,
8771 sas_expander->handle, handle);
8772 sas_expander->handle = handle;
8773 for (i = 0 ; i < sas_expander->num_phys ; i++)
8774 sas_expander->phy[i].handle = handle;
8775 goto out;
8777 out:
8778 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8782 * _scsih_search_responding_expanders -
8783 * @ioc: per adapter object
8785 * After host reset, find out whether devices are still responding.
8786 * If not remove.
8788 static void
8789 _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
8791 Mpi2ExpanderPage0_t expander_pg0;
8792 Mpi2ConfigReply_t mpi_reply;
8793 u16 ioc_status;
8794 u64 sas_address;
8795 u16 handle;
8797 ioc_info(ioc, "search for expanders: start\n");
8799 if (list_empty(&ioc->sas_expander_list))
8800 goto out;
8802 handle = 0xFFFF;
8803 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
8804 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
8806 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8807 MPI2_IOCSTATUS_MASK;
8808 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
8809 break;
8811 handle = le16_to_cpu(expander_pg0.DevHandle);
8812 sas_address = le64_to_cpu(expander_pg0.SASAddress);
8813 pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
8814 handle,
8815 (unsigned long long)sas_address);
8816 _scsih_mark_responding_expander(ioc, &expander_pg0);
8819 out:
8820 ioc_info(ioc, "search for expanders: complete\n");
8824 * _scsih_remove_unresponding_devices - removing unresponding devices
8825 * @ioc: per adapter object
8827 static void
8828 _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
8830 struct _sas_device *sas_device, *sas_device_next;
8831 struct _sas_node *sas_expander, *sas_expander_next;
8832 struct _raid_device *raid_device, *raid_device_next;
8833 struct _pcie_device *pcie_device, *pcie_device_next;
8834 struct list_head tmp_list;
8835 unsigned long flags;
8836 LIST_HEAD(head);
8838 ioc_info(ioc, "removing unresponding devices: start\n");
8840 /* removing unresponding end devices */
8841 ioc_info(ioc, "removing unresponding devices: end-devices\n");
8843 * Iterate, pulling off devices marked as non-responding. We become the
8844 * owner for the reference the list had on any object we prune.
8846 spin_lock_irqsave(&ioc->sas_device_lock, flags);
8847 list_for_each_entry_safe(sas_device, sas_device_next,
8848 &ioc->sas_device_list, list) {
8849 if (!sas_device->responding)
8850 list_move_tail(&sas_device->list, &head);
8851 else
8852 sas_device->responding = 0;
8854 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
8857 * Now, uninitialize and remove the unresponding devices we pruned.
8859 list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
8860 _scsih_remove_device(ioc, sas_device);
8861 list_del_init(&sas_device->list);
8862 sas_device_put(sas_device);
8865 ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
8866 INIT_LIST_HEAD(&head);
8867 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
8868 list_for_each_entry_safe(pcie_device, pcie_device_next,
8869 &ioc->pcie_device_list, list) {
8870 if (!pcie_device->responding)
8871 list_move_tail(&pcie_device->list, &head);
8872 else
8873 pcie_device->responding = 0;
8875 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
8877 list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
8878 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
8879 list_del_init(&pcie_device->list);
8880 pcie_device_put(pcie_device);
8883 /* removing unresponding volumes */
8884 if (ioc->ir_firmware) {
8885 ioc_info(ioc, "removing unresponding devices: volumes\n");
8886 list_for_each_entry_safe(raid_device, raid_device_next,
8887 &ioc->raid_device_list, list) {
8888 if (!raid_device->responding)
8889 _scsih_sas_volume_delete(ioc,
8890 raid_device->handle);
8891 else
8892 raid_device->responding = 0;
8896 /* removing unresponding expanders */
8897 ioc_info(ioc, "removing unresponding devices: expanders\n");
8898 spin_lock_irqsave(&ioc->sas_node_lock, flags);
8899 INIT_LIST_HEAD(&tmp_list);
8900 list_for_each_entry_safe(sas_expander, sas_expander_next,
8901 &ioc->sas_expander_list, list) {
8902 if (!sas_expander->responding)
8903 list_move_tail(&sas_expander->list, &tmp_list);
8904 else
8905 sas_expander->responding = 0;
8907 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8908 list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
8909 list) {
8910 _scsih_expander_node_remove(ioc, sas_expander);
8913 ioc_info(ioc, "removing unresponding devices: complete\n");
8915 /* unblock devices */
8916 _scsih_ublock_io_all_device(ioc);
8919 static void
8920 _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
8921 struct _sas_node *sas_expander, u16 handle)
8923 Mpi2ExpanderPage1_t expander_pg1;
8924 Mpi2ConfigReply_t mpi_reply;
8925 int i;
8927 for (i = 0 ; i < sas_expander->num_phys ; i++) {
8928 if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
8929 &expander_pg1, i, handle))) {
8930 ioc_err(ioc, "failure at %s:%d/%s()!\n",
8931 __FILE__, __LINE__, __func__);
8932 return;
8935 mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
8936 le16_to_cpu(expander_pg1.AttachedDevHandle), i,
8937 expander_pg1.NegotiatedLinkRate >> 4);
8942 * _scsih_scan_for_devices_after_reset - scan for devices after host reset
8943 * @ioc: per adapter object
8945 static void
8946 _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
8948 Mpi2ExpanderPage0_t expander_pg0;
8949 Mpi2SasDevicePage0_t sas_device_pg0;
8950 Mpi26PCIeDevicePage0_t pcie_device_pg0;
8951 Mpi2RaidVolPage1_t volume_pg1;
8952 Mpi2RaidVolPage0_t volume_pg0;
8953 Mpi2RaidPhysDiskPage0_t pd_pg0;
8954 Mpi2EventIrConfigElement_t element;
8955 Mpi2ConfigReply_t mpi_reply;
8956 u8 phys_disk_num;
8957 u16 ioc_status;
8958 u16 handle, parent_handle;
8959 u64 sas_address;
8960 struct _sas_device *sas_device;
8961 struct _pcie_device *pcie_device;
8962 struct _sas_node *expander_device;
8963 static struct _raid_device *raid_device;
8964 u8 retry_count;
8965 unsigned long flags;
8967 ioc_info(ioc, "scan devices: start\n");
8969 _scsih_sas_host_refresh(ioc);
8971 ioc_info(ioc, "\tscan devices: expanders start\n");
8973 /* expanders */
8974 handle = 0xFFFF;
8975 while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
8976 MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
8977 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
8978 MPI2_IOCSTATUS_MASK;
8979 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
8980 ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
8981 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
8982 break;
8984 handle = le16_to_cpu(expander_pg0.DevHandle);
8985 spin_lock_irqsave(&ioc->sas_node_lock, flags);
8986 expander_device = mpt3sas_scsih_expander_find_by_sas_address(
8987 ioc, le64_to_cpu(expander_pg0.SASAddress));
8988 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
8989 if (expander_device)
8990 _scsih_refresh_expander_links(ioc, expander_device,
8991 handle);
8992 else {
8993 ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
8994 handle,
8995 (u64)le64_to_cpu(expander_pg0.SASAddress));
8996 _scsih_expander_add(ioc, handle);
8997 ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
8998 handle,
8999 (u64)le64_to_cpu(expander_pg0.SASAddress));
9003 ioc_info(ioc, "\tscan devices: expanders complete\n");
9005 if (!ioc->ir_firmware)
9006 goto skip_to_sas;
9008 ioc_info(ioc, "\tscan devices: phys disk start\n");
9010 /* phys disk */
9011 phys_disk_num = 0xFF;
9012 while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
9013 &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
9014 phys_disk_num))) {
9015 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9016 MPI2_IOCSTATUS_MASK;
9017 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9018 ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9019 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9020 break;
9022 phys_disk_num = pd_pg0.PhysDiskNum;
9023 handle = le16_to_cpu(pd_pg0.DevHandle);
9024 sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
9025 if (sas_device) {
9026 sas_device_put(sas_device);
9027 continue;
9029 if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9030 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
9031 handle) != 0)
9032 continue;
9033 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9034 MPI2_IOCSTATUS_MASK;
9035 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9036 ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
9037 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9038 break;
9040 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9041 if (!_scsih_get_sas_address(ioc, parent_handle,
9042 &sas_address)) {
9043 ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9044 handle,
9045 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9046 mpt3sas_transport_update_links(ioc, sas_address,
9047 handle, sas_device_pg0.PhyNum,
9048 MPI2_SAS_NEG_LINK_RATE_1_5);
9049 set_bit(handle, ioc->pd_handles);
9050 retry_count = 0;
9051 /* This will retry adding the end device.
9052 * _scsih_add_device() will decide on retries and
9053 * return "1" when it should be retried
9055 while (_scsih_add_device(ioc, handle, retry_count++,
9056 1)) {
9057 ssleep(1);
9059 ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
9060 handle,
9061 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9065 ioc_info(ioc, "\tscan devices: phys disk complete\n");
9067 ioc_info(ioc, "\tscan devices: volumes start\n");
9069 /* volumes */
9070 handle = 0xFFFF;
9071 while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
9072 &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
9073 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9074 MPI2_IOCSTATUS_MASK;
9075 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9076 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9077 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9078 break;
9080 handle = le16_to_cpu(volume_pg1.DevHandle);
9081 spin_lock_irqsave(&ioc->raid_device_lock, flags);
9082 raid_device = _scsih_raid_device_find_by_wwid(ioc,
9083 le64_to_cpu(volume_pg1.WWID));
9084 spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
9085 if (raid_device)
9086 continue;
9087 if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
9088 &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
9089 sizeof(Mpi2RaidVolPage0_t)))
9090 continue;
9091 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9092 MPI2_IOCSTATUS_MASK;
9093 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9094 ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9095 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9096 break;
9098 if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
9099 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
9100 volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
9101 memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
9102 element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
9103 element.VolDevHandle = volume_pg1.DevHandle;
9104 ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
9105 volume_pg1.DevHandle);
9106 _scsih_sas_volume_add(ioc, &element);
9107 ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
9108 volume_pg1.DevHandle);
9112 ioc_info(ioc, "\tscan devices: volumes complete\n");
9114 skip_to_sas:
9116 ioc_info(ioc, "\tscan devices: end devices start\n");
9118 /* sas devices */
9119 handle = 0xFFFF;
9120 while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
9121 &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9122 handle))) {
9123 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
9124 MPI2_IOCSTATUS_MASK;
9125 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9126 ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9127 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9128 break;
9130 handle = le16_to_cpu(sas_device_pg0.DevHandle);
9131 if (!(_scsih_is_end_device(
9132 le32_to_cpu(sas_device_pg0.DeviceInfo))))
9133 continue;
9134 sas_device = mpt3sas_get_sdev_by_addr(ioc,
9135 le64_to_cpu(sas_device_pg0.SASAddress));
9136 if (sas_device) {
9137 sas_device_put(sas_device);
9138 continue;
9140 parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
9141 if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
9142 ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9143 handle,
9144 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9145 mpt3sas_transport_update_links(ioc, sas_address, handle,
9146 sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
9147 retry_count = 0;
9148 /* This will retry adding the end device.
9149 * _scsih_add_device() will decide on retries and
9150 * return "1" when it should be retried
9152 while (_scsih_add_device(ioc, handle, retry_count++,
9153 0)) {
9154 ssleep(1);
9156 ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
9157 handle,
9158 (u64)le64_to_cpu(sas_device_pg0.SASAddress));
9161 ioc_info(ioc, "\tscan devices: end devices complete\n");
9162 ioc_info(ioc, "\tscan devices: pcie end devices start\n");
9164 /* pcie devices */
9165 handle = 0xFFFF;
9166 while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
9167 &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
9168 handle))) {
9169 ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
9170 & MPI2_IOCSTATUS_MASK;
9171 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
9172 ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
9173 ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
9174 break;
9176 handle = le16_to_cpu(pcie_device_pg0.DevHandle);
9177 if (!(_scsih_is_nvme_device(
9178 le32_to_cpu(pcie_device_pg0.DeviceInfo))))
9179 continue;
9180 pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
9181 le64_to_cpu(pcie_device_pg0.WWID));
9182 if (pcie_device) {
9183 pcie_device_put(pcie_device);
9184 continue;
9186 retry_count = 0;
9187 parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
9188 _scsih_pcie_add_device(ioc, handle);
9190 ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
9191 handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
9193 ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
9194 ioc_info(ioc, "scan devices: complete\n");
9198 * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9199 * @ioc: per adapter object
9201 * The handler for doing any required cleanup or initialization.
9203 void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9205 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
9209 * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih)
9210 * @ioc: per adapter object
9212 * The handler for doing any required cleanup or initialization.
9214 void
9215 mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
9217 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
9218 if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
9219 ioc->scsih_cmds.status |= MPT3_CMD_RESET;
9220 mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
9221 complete(&ioc->scsih_cmds.done);
9223 if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
9224 ioc->tm_cmds.status |= MPT3_CMD_RESET;
9225 mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
9226 complete(&ioc->tm_cmds.done);
9229 memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
9230 memset(ioc->device_remove_in_progress, 0,
9231 ioc->device_remove_in_progress_sz);
9232 _scsih_fw_event_cleanup_queue(ioc);
9233 _scsih_flush_running_cmds(ioc);
9237 * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
9238 * @ioc: per adapter object
9240 * The handler for doing any required cleanup or initialization.
9242 void
9243 mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
9245 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
9246 if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
9247 !ioc->sas_hba.num_phys)) {
9248 _scsih_prep_device_scan(ioc);
9249 _scsih_create_enclosure_list_after_reset(ioc);
9250 _scsih_search_responding_sas_devices(ioc);
9251 _scsih_search_responding_pcie_devices(ioc);
9252 _scsih_search_responding_raid_devices(ioc);
9253 _scsih_search_responding_expanders(ioc);
9254 _scsih_error_recovery_delete_devices(ioc);
9259 * _mpt3sas_fw_work - delayed task for processing firmware events
9260 * @ioc: per adapter object
9261 * @fw_event: The fw_event_work object
9262 * Context: user.
9264 static void
9265 _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
9267 _scsih_fw_event_del_from_list(ioc, fw_event);
9269 /* the queue is being flushed so ignore this event */
9270 if (ioc->remove_host || ioc->pci_error_recovery) {
9271 fw_event_work_put(fw_event);
9272 return;
9275 switch (fw_event->event) {
9276 case MPT3SAS_PROCESS_TRIGGER_DIAG:
9277 mpt3sas_process_trigger_data(ioc,
9278 (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
9279 fw_event->event_data);
9280 break;
9281 case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
9282 while (scsi_host_in_recovery(ioc->shost) ||
9283 ioc->shost_recovery) {
9285 * If we're unloading, bail. Otherwise, this can become
9286 * an infinite loop.
9288 if (ioc->remove_host)
9289 goto out;
9290 ssleep(1);
9292 _scsih_remove_unresponding_devices(ioc);
9293 _scsih_scan_for_devices_after_reset(ioc);
9294 break;
9295 case MPT3SAS_PORT_ENABLE_COMPLETE:
9296 ioc->start_scan = 0;
9297 if (missing_delay[0] != -1 && missing_delay[1] != -1)
9298 mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
9299 missing_delay[1]);
9300 dewtprintk(ioc,
9301 ioc_info(ioc, "port enable: complete from worker thread\n"));
9302 break;
9303 case MPT3SAS_TURN_ON_PFA_LED:
9304 _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
9305 break;
9306 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9307 _scsih_sas_topology_change_event(ioc, fw_event);
9308 break;
9309 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9310 _scsih_sas_device_status_change_event(ioc, fw_event);
9311 break;
9312 case MPI2_EVENT_SAS_DISCOVERY:
9313 _scsih_sas_discovery_event(ioc, fw_event);
9314 break;
9315 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9316 _scsih_sas_device_discovery_error_event(ioc, fw_event);
9317 break;
9318 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9319 _scsih_sas_broadcast_primitive_event(ioc, fw_event);
9320 break;
9321 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9322 _scsih_sas_enclosure_dev_status_change_event(ioc,
9323 fw_event);
9324 break;
9325 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9326 _scsih_sas_ir_config_change_event(ioc, fw_event);
9327 break;
9328 case MPI2_EVENT_IR_VOLUME:
9329 _scsih_sas_ir_volume_event(ioc, fw_event);
9330 break;
9331 case MPI2_EVENT_IR_PHYSICAL_DISK:
9332 _scsih_sas_ir_physical_disk_event(ioc, fw_event);
9333 break;
9334 case MPI2_EVENT_IR_OPERATION_STATUS:
9335 _scsih_sas_ir_operation_status_event(ioc, fw_event);
9336 break;
9337 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9338 _scsih_pcie_device_status_change_event(ioc, fw_event);
9339 break;
9340 case MPI2_EVENT_PCIE_ENUMERATION:
9341 _scsih_pcie_enumeration_event(ioc, fw_event);
9342 break;
9343 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9344 _scsih_pcie_topology_change_event(ioc, fw_event);
9345 return;
9346 break;
9348 out:
9349 fw_event_work_put(fw_event);
9353 * _firmware_event_work
9354 * @work: The fw_event_work object
9355 * Context: user.
9357 * wrappers for the work thread handling firmware events
9360 static void
9361 _firmware_event_work(struct work_struct *work)
9363 struct fw_event_work *fw_event = container_of(work,
9364 struct fw_event_work, work);
9366 _mpt3sas_fw_work(fw_event->ioc, fw_event);
9370 * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
9371 * @ioc: per adapter object
9372 * @msix_index: MSIX table index supplied by the OS
9373 * @reply: reply message frame(lower 32bit addr)
9374 * Context: interrupt.
9376 * This function merely adds a new work task into ioc->firmware_event_thread.
9377 * The tasks are worked from _firmware_event_work in user context.
9379 * Return: 1 meaning mf should be freed from _base_interrupt
9380 * 0 means the mf is freed from this function.
9383 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
9384 u32 reply)
9386 struct fw_event_work *fw_event;
9387 Mpi2EventNotificationReply_t *mpi_reply;
9388 u16 event;
9389 u16 sz;
9390 Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
9392 /* events turned off due to host reset */
9393 if (ioc->pci_error_recovery)
9394 return 1;
9396 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
9398 if (unlikely(!mpi_reply)) {
9399 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
9400 __FILE__, __LINE__, __func__);
9401 return 1;
9404 event = le16_to_cpu(mpi_reply->Event);
9406 if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
9407 mpt3sas_trigger_event(ioc, event, 0);
9409 switch (event) {
9410 /* handle these */
9411 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
9413 Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
9414 (Mpi2EventDataSasBroadcastPrimitive_t *)
9415 mpi_reply->EventData;
9417 if (baen_data->Primitive !=
9418 MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
9419 return 1;
9421 if (ioc->broadcast_aen_busy) {
9422 ioc->broadcast_aen_pending++;
9423 return 1;
9424 } else
9425 ioc->broadcast_aen_busy = 1;
9426 break;
9429 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
9430 _scsih_check_topo_delete_events(ioc,
9431 (Mpi2EventDataSasTopologyChangeList_t *)
9432 mpi_reply->EventData);
9433 break;
9434 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
9435 _scsih_check_pcie_topo_remove_events(ioc,
9436 (Mpi26EventDataPCIeTopologyChangeList_t *)
9437 mpi_reply->EventData);
9438 break;
9439 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
9440 _scsih_check_ir_config_unhide_events(ioc,
9441 (Mpi2EventDataIrConfigChangeList_t *)
9442 mpi_reply->EventData);
9443 break;
9444 case MPI2_EVENT_IR_VOLUME:
9445 _scsih_check_volume_delete_events(ioc,
9446 (Mpi2EventDataIrVolume_t *)
9447 mpi_reply->EventData);
9448 break;
9449 case MPI2_EVENT_LOG_ENTRY_ADDED:
9451 Mpi2EventDataLogEntryAdded_t *log_entry;
9452 u32 *log_code;
9454 if (!ioc->is_warpdrive)
9455 break;
9457 log_entry = (Mpi2EventDataLogEntryAdded_t *)
9458 mpi_reply->EventData;
9459 log_code = (u32 *)log_entry->LogData;
9461 if (le16_to_cpu(log_entry->LogEntryQualifier)
9462 != MPT2_WARPDRIVE_LOGENTRY)
9463 break;
9465 switch (le32_to_cpu(*log_code)) {
9466 case MPT2_WARPDRIVE_LC_SSDT:
9467 ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9468 break;
9469 case MPT2_WARPDRIVE_LC_SSDLW:
9470 ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
9471 break;
9472 case MPT2_WARPDRIVE_LC_SSDLF:
9473 ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
9474 break;
9475 case MPT2_WARPDRIVE_LC_BRMF:
9476 ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
9477 break;
9480 break;
9482 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
9483 case MPI2_EVENT_IR_OPERATION_STATUS:
9484 case MPI2_EVENT_SAS_DISCOVERY:
9485 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
9486 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
9487 case MPI2_EVENT_IR_PHYSICAL_DISK:
9488 case MPI2_EVENT_PCIE_ENUMERATION:
9489 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
9490 break;
9492 case MPI2_EVENT_TEMP_THRESHOLD:
9493 _scsih_temp_threshold_events(ioc,
9494 (Mpi2EventDataTemperature_t *)
9495 mpi_reply->EventData);
9496 break;
9497 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
9498 ActiveCableEventData =
9499 (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
9500 switch (ActiveCableEventData->ReasonCode) {
9501 case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
9502 ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
9503 ActiveCableEventData->ReceptacleID);
9504 pr_notice("cannot be powered and devices connected\n");
9505 pr_notice("to this active cable will not be seen\n");
9506 pr_notice("This active cable requires %d mW of power\n",
9507 ActiveCableEventData->ActiveCablePowerRequirement);
9508 break;
9510 case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
9511 ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
9512 ActiveCableEventData->ReceptacleID);
9513 pr_notice(
9514 "is not running at optimal speed(12 Gb/s rate)\n");
9515 break;
9518 break;
9520 default: /* ignore the rest */
9521 return 1;
9524 sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
9525 fw_event = alloc_fw_event_work(sz);
9526 if (!fw_event) {
9527 ioc_err(ioc, "failure at %s:%d/%s()!\n",
9528 __FILE__, __LINE__, __func__);
9529 return 1;
9532 memcpy(fw_event->event_data, mpi_reply->EventData, sz);
9533 fw_event->ioc = ioc;
9534 fw_event->VF_ID = mpi_reply->VF_ID;
9535 fw_event->VP_ID = mpi_reply->VP_ID;
9536 fw_event->event = event;
9537 _scsih_fw_event_add(ioc, fw_event);
9538 fw_event_work_put(fw_event);
9539 return 1;
9543 * _scsih_expander_node_remove - removing expander device from list.
9544 * @ioc: per adapter object
9545 * @sas_expander: the sas_device object
9547 * Removing object and freeing associated memory from the
9548 * ioc->sas_expander_list.
9550 static void
9551 _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
9552 struct _sas_node *sas_expander)
9554 struct _sas_port *mpt3sas_port, *next;
9555 unsigned long flags;
9557 /* remove sibling ports attached to this expander */
9558 list_for_each_entry_safe(mpt3sas_port, next,
9559 &sas_expander->sas_port_list, port_list) {
9560 if (ioc->shost_recovery)
9561 return;
9562 if (mpt3sas_port->remote_identify.device_type ==
9563 SAS_END_DEVICE)
9564 mpt3sas_device_remove_by_sas_address(ioc,
9565 mpt3sas_port->remote_identify.sas_address);
9566 else if (mpt3sas_port->remote_identify.device_type ==
9567 SAS_EDGE_EXPANDER_DEVICE ||
9568 mpt3sas_port->remote_identify.device_type ==
9569 SAS_FANOUT_EXPANDER_DEVICE)
9570 mpt3sas_expander_remove(ioc,
9571 mpt3sas_port->remote_identify.sas_address);
9574 mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
9575 sas_expander->sas_address_parent);
9577 ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
9578 sas_expander->handle, (unsigned long long)
9579 sas_expander->sas_address);
9581 spin_lock_irqsave(&ioc->sas_node_lock, flags);
9582 list_del(&sas_expander->list);
9583 spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
9585 kfree(sas_expander->phy);
9586 kfree(sas_expander);
9590 * _scsih_ir_shutdown - IR shutdown notification
9591 * @ioc: per adapter object
9593 * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
9594 * the host system is shutting down.
9596 static void
9597 _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
9599 Mpi2RaidActionRequest_t *mpi_request;
9600 Mpi2RaidActionReply_t *mpi_reply;
9601 u16 smid;
9603 /* is IR firmware build loaded ? */
9604 if (!ioc->ir_firmware)
9605 return;
9607 /* are there any volumes ? */
9608 if (list_empty(&ioc->raid_device_list))
9609 return;
9611 mutex_lock(&ioc->scsih_cmds.mutex);
9613 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
9614 ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
9615 goto out;
9617 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
9619 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
9620 if (!smid) {
9621 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
9622 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9623 goto out;
9626 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
9627 ioc->scsih_cmds.smid = smid;
9628 memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
9630 mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
9631 mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
9633 if (!ioc->hide_ir_msg)
9634 ioc_info(ioc, "IR shutdown (sending)\n");
9635 init_completion(&ioc->scsih_cmds.done);
9636 mpt3sas_base_put_smid_default(ioc, smid);
9637 wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
9639 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
9640 ioc_err(ioc, "%s: timeout\n", __func__);
9641 goto out;
9644 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
9645 mpi_reply = ioc->scsih_cmds.reply;
9646 if (!ioc->hide_ir_msg)
9647 ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
9648 le16_to_cpu(mpi_reply->IOCStatus),
9649 le32_to_cpu(mpi_reply->IOCLogInfo));
9652 out:
9653 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
9654 mutex_unlock(&ioc->scsih_cmds.mutex);
9658 * scsih_remove - detach and remove add host
9659 * @pdev: PCI device struct
9661 * Routine called when unloading the driver.
9663 static void scsih_remove(struct pci_dev *pdev)
9665 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9666 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
9667 struct _sas_port *mpt3sas_port, *next_port;
9668 struct _raid_device *raid_device, *next;
9669 struct MPT3SAS_TARGET *sas_target_priv_data;
9670 struct _pcie_device *pcie_device, *pcienext;
9671 struct workqueue_struct *wq;
9672 unsigned long flags;
9674 ioc->remove_host = 1;
9676 mpt3sas_wait_for_commands_to_complete(ioc);
9677 _scsih_flush_running_cmds(ioc);
9679 _scsih_fw_event_cleanup_queue(ioc);
9681 spin_lock_irqsave(&ioc->fw_event_lock, flags);
9682 wq = ioc->firmware_event_thread;
9683 ioc->firmware_event_thread = NULL;
9684 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
9685 if (wq)
9686 destroy_workqueue(wq);
9688 /* release all the volumes */
9689 _scsih_ir_shutdown(ioc);
9690 sas_remove_host(shost);
9691 list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
9692 list) {
9693 if (raid_device->starget) {
9694 sas_target_priv_data =
9695 raid_device->starget->hostdata;
9696 sas_target_priv_data->deleted = 1;
9697 scsi_remove_target(&raid_device->starget->dev);
9699 ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
9700 raid_device->handle, (u64)raid_device->wwid);
9701 _scsih_raid_device_remove(ioc, raid_device);
9703 list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
9704 list) {
9705 _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
9706 list_del_init(&pcie_device->list);
9707 pcie_device_put(pcie_device);
9710 /* free ports attached to the sas_host */
9711 list_for_each_entry_safe(mpt3sas_port, next_port,
9712 &ioc->sas_hba.sas_port_list, port_list) {
9713 if (mpt3sas_port->remote_identify.device_type ==
9714 SAS_END_DEVICE)
9715 mpt3sas_device_remove_by_sas_address(ioc,
9716 mpt3sas_port->remote_identify.sas_address);
9717 else if (mpt3sas_port->remote_identify.device_type ==
9718 SAS_EDGE_EXPANDER_DEVICE ||
9719 mpt3sas_port->remote_identify.device_type ==
9720 SAS_FANOUT_EXPANDER_DEVICE)
9721 mpt3sas_expander_remove(ioc,
9722 mpt3sas_port->remote_identify.sas_address);
9725 /* free phys attached to the sas_host */
9726 if (ioc->sas_hba.num_phys) {
9727 kfree(ioc->sas_hba.phy);
9728 ioc->sas_hba.phy = NULL;
9729 ioc->sas_hba.num_phys = 0;
9732 mpt3sas_base_detach(ioc);
9733 spin_lock(&gioc_lock);
9734 list_del(&ioc->list);
9735 spin_unlock(&gioc_lock);
9736 scsi_host_put(shost);
9740 * scsih_shutdown - routine call during system shutdown
9741 * @pdev: PCI device struct
9743 static void
9744 scsih_shutdown(struct pci_dev *pdev)
9746 struct Scsi_Host *shost = pci_get_drvdata(pdev);
9747 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
9748 struct workqueue_struct *wq;
9749 unsigned long flags;
9751 ioc->remove_host = 1;
9753 mpt3sas_wait_for_commands_to_complete(ioc);
9754 _scsih_flush_running_cmds(ioc);
9756 _scsih_fw_event_cleanup_queue(ioc);
9758 spin_lock_irqsave(&ioc->fw_event_lock, flags);
9759 wq = ioc->firmware_event_thread;
9760 ioc->firmware_event_thread = NULL;
9761 spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
9762 if (wq)
9763 destroy_workqueue(wq);
9765 _scsih_ir_shutdown(ioc);
9766 mpt3sas_base_detach(ioc);
9771 * _scsih_probe_boot_devices - reports 1st device
9772 * @ioc: per adapter object
9774 * If specified in bios page 2, this routine reports the 1st
9775 * device scsi-ml or sas transport for persistent boot device
9776 * purposes. Please refer to function _scsih_determine_boot_device()
9778 static void
9779 _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
9781 u32 channel;
9782 void *device;
9783 struct _sas_device *sas_device;
9784 struct _raid_device *raid_device;
9785 struct _pcie_device *pcie_device;
9786 u16 handle;
9787 u64 sas_address_parent;
9788 u64 sas_address;
9789 unsigned long flags;
9790 int rc;
9791 int tid;
9793 /* no Bios, return immediately */
9794 if (!ioc->bios_pg3.BiosVersion)
9795 return;
9797 device = NULL;
9798 if (ioc->req_boot_device.device) {
9799 device = ioc->req_boot_device.device;
9800 channel = ioc->req_boot_device.channel;
9801 } else if (ioc->req_alt_boot_device.device) {
9802 device = ioc->req_alt_boot_device.device;
9803 channel = ioc->req_alt_boot_device.channel;
9804 } else if (ioc->current_boot_device.device) {
9805 device = ioc->current_boot_device.device;
9806 channel = ioc->current_boot_device.channel;
9809 if (!device)
9810 return;
9812 if (channel == RAID_CHANNEL) {
9813 raid_device = device;
9814 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9815 raid_device->id, 0);
9816 if (rc)
9817 _scsih_raid_device_remove(ioc, raid_device);
9818 } else if (channel == PCIE_CHANNEL) {
9819 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9820 pcie_device = device;
9821 tid = pcie_device->id;
9822 list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
9823 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9824 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
9825 if (rc)
9826 _scsih_pcie_device_remove(ioc, pcie_device);
9827 } else {
9828 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9829 sas_device = device;
9830 handle = sas_device->handle;
9831 sas_address_parent = sas_device->sas_address_parent;
9832 sas_address = sas_device->sas_address;
9833 list_move_tail(&sas_device->list, &ioc->sas_device_list);
9834 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9836 if (ioc->hide_drives)
9837 return;
9838 if (!mpt3sas_transport_port_add(ioc, handle,
9839 sas_address_parent)) {
9840 _scsih_sas_device_remove(ioc, sas_device);
9841 } else if (!sas_device->starget) {
9842 if (!ioc->is_driver_loading) {
9843 mpt3sas_transport_port_remove(ioc,
9844 sas_address,
9845 sas_address_parent);
9846 _scsih_sas_device_remove(ioc, sas_device);
9853 * _scsih_probe_raid - reporting raid volumes to scsi-ml
9854 * @ioc: per adapter object
9856 * Called during initial loading of the driver.
9858 static void
9859 _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
9861 struct _raid_device *raid_device, *raid_next;
9862 int rc;
9864 list_for_each_entry_safe(raid_device, raid_next,
9865 &ioc->raid_device_list, list) {
9866 if (raid_device->starget)
9867 continue;
9868 rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
9869 raid_device->id, 0);
9870 if (rc)
9871 _scsih_raid_device_remove(ioc, raid_device);
9875 static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
9877 struct _sas_device *sas_device = NULL;
9878 unsigned long flags;
9880 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9881 if (!list_empty(&ioc->sas_device_init_list)) {
9882 sas_device = list_first_entry(&ioc->sas_device_init_list,
9883 struct _sas_device, list);
9884 sas_device_get(sas_device);
9886 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9888 return sas_device;
9891 static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
9892 struct _sas_device *sas_device)
9894 unsigned long flags;
9896 spin_lock_irqsave(&ioc->sas_device_lock, flags);
9899 * Since we dropped the lock during the call to port_add(), we need to
9900 * be careful here that somebody else didn't move or delete this item
9901 * while we were busy with other things.
9903 * If it was on the list, we need a put() for the reference the list
9904 * had. Either way, we need a get() for the destination list.
9906 if (!list_empty(&sas_device->list)) {
9907 list_del_init(&sas_device->list);
9908 sas_device_put(sas_device);
9911 sas_device_get(sas_device);
9912 list_add_tail(&sas_device->list, &ioc->sas_device_list);
9914 spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
9918 * _scsih_probe_sas - reporting sas devices to sas transport
9919 * @ioc: per adapter object
9921 * Called during initial loading of the driver.
9923 static void
9924 _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
9926 struct _sas_device *sas_device;
9928 if (ioc->hide_drives)
9929 return;
9931 while ((sas_device = get_next_sas_device(ioc))) {
9932 if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
9933 sas_device->sas_address_parent)) {
9934 _scsih_sas_device_remove(ioc, sas_device);
9935 sas_device_put(sas_device);
9936 continue;
9937 } else if (!sas_device->starget) {
9939 * When asyn scanning is enabled, its not possible to
9940 * remove devices while scanning is turned on due to an
9941 * oops in scsi_sysfs_add_sdev()->add_device()->
9942 * sysfs_addrm_start()
9944 if (!ioc->is_driver_loading) {
9945 mpt3sas_transport_port_remove(ioc,
9946 sas_device->sas_address,
9947 sas_device->sas_address_parent);
9948 _scsih_sas_device_remove(ioc, sas_device);
9949 sas_device_put(sas_device);
9950 continue;
9953 sas_device_make_active(ioc, sas_device);
9954 sas_device_put(sas_device);
9959 * get_next_pcie_device - Get the next pcie device
9960 * @ioc: per adapter object
9962 * Get the next pcie device from pcie_device_init_list list.
9964 * Return: pcie device structure if pcie_device_init_list list is not empty
9965 * otherwise returns NULL
9967 static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
9969 struct _pcie_device *pcie_device = NULL;
9970 unsigned long flags;
9972 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9973 if (!list_empty(&ioc->pcie_device_init_list)) {
9974 pcie_device = list_first_entry(&ioc->pcie_device_init_list,
9975 struct _pcie_device, list);
9976 pcie_device_get(pcie_device);
9978 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
9980 return pcie_device;
9984 * pcie_device_make_active - Add pcie device to pcie_device_list list
9985 * @ioc: per adapter object
9986 * @pcie_device: pcie device object
9988 * Add the pcie device which has registered with SCSI Transport Later to
9989 * pcie_device_list list
9991 static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
9992 struct _pcie_device *pcie_device)
9994 unsigned long flags;
9996 spin_lock_irqsave(&ioc->pcie_device_lock, flags);
9998 if (!list_empty(&pcie_device->list)) {
9999 list_del_init(&pcie_device->list);
10000 pcie_device_put(pcie_device);
10002 pcie_device_get(pcie_device);
10003 list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
10005 spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
10009 * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
10010 * @ioc: per adapter object
10012 * Called during initial loading of the driver.
10014 static void
10015 _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
10017 struct _pcie_device *pcie_device;
10018 int rc;
10020 /* PCIe Device List */
10021 while ((pcie_device = get_next_pcie_device(ioc))) {
10022 if (pcie_device->starget) {
10023 pcie_device_put(pcie_device);
10024 continue;
10026 rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
10027 pcie_device->id, 0);
10028 if (rc) {
10029 _scsih_pcie_device_remove(ioc, pcie_device);
10030 pcie_device_put(pcie_device);
10031 continue;
10032 } else if (!pcie_device->starget) {
10034 * When async scanning is enabled, its not possible to
10035 * remove devices while scanning is turned on due to an
10036 * oops in scsi_sysfs_add_sdev()->add_device()->
10037 * sysfs_addrm_start()
10039 if (!ioc->is_driver_loading) {
10040 /* TODO-- Need to find out whether this condition will
10041 * occur or not
10043 _scsih_pcie_device_remove(ioc, pcie_device);
10044 pcie_device_put(pcie_device);
10045 continue;
10048 pcie_device_make_active(ioc, pcie_device);
10049 pcie_device_put(pcie_device);
10054 * _scsih_probe_devices - probing for devices
10055 * @ioc: per adapter object
10057 * Called during initial loading of the driver.
10059 static void
10060 _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
10062 u16 volume_mapping_flags;
10064 if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
10065 return; /* return when IOC doesn't support initiator mode */
10067 _scsih_probe_boot_devices(ioc);
10069 if (ioc->ir_firmware) {
10070 volume_mapping_flags =
10071 le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
10072 MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
10073 if (volume_mapping_flags ==
10074 MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
10075 _scsih_probe_raid(ioc);
10076 _scsih_probe_sas(ioc);
10077 } else {
10078 _scsih_probe_sas(ioc);
10079 _scsih_probe_raid(ioc);
10081 } else {
10082 _scsih_probe_sas(ioc);
10083 _scsih_probe_pcie(ioc);
10088 * scsih_scan_start - scsi lld callback for .scan_start
10089 * @shost: SCSI host pointer
10091 * The shost has the ability to discover targets on its own instead
10092 * of scanning the entire bus. In our implemention, we will kick off
10093 * firmware discovery.
10095 static void
10096 scsih_scan_start(struct Scsi_Host *shost)
10098 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10099 int rc;
10100 if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
10101 mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
10103 if (disable_discovery > 0)
10104 return;
10106 ioc->start_scan = 1;
10107 rc = mpt3sas_port_enable(ioc);
10109 if (rc != 0)
10110 ioc_info(ioc, "port enable: FAILED\n");
10114 * scsih_scan_finished - scsi lld callback for .scan_finished
10115 * @shost: SCSI host pointer
10116 * @time: elapsed time of the scan in jiffies
10118 * This function will be called periodicallyn until it returns 1 with the
10119 * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
10120 * we wait for firmware discovery to complete, then return 1.
10122 static int
10123 scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
10125 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10127 if (disable_discovery > 0) {
10128 ioc->is_driver_loading = 0;
10129 ioc->wait_for_discovery_to_complete = 0;
10130 return 1;
10133 if (time >= (300 * HZ)) {
10134 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10135 ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
10136 ioc->is_driver_loading = 0;
10137 return 1;
10140 if (ioc->start_scan)
10141 return 0;
10143 if (ioc->start_scan_failed) {
10144 ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
10145 ioc->start_scan_failed);
10146 ioc->is_driver_loading = 0;
10147 ioc->wait_for_discovery_to_complete = 0;
10148 ioc->remove_host = 1;
10149 return 1;
10152 ioc_info(ioc, "port enable: SUCCESS\n");
10153 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
10155 if (ioc->wait_for_discovery_to_complete) {
10156 ioc->wait_for_discovery_to_complete = 0;
10157 _scsih_probe_devices(ioc);
10159 mpt3sas_base_start_watchdog(ioc);
10160 ioc->is_driver_loading = 0;
10161 return 1;
10164 /* shost template for SAS 2.0 HBA devices */
10165 static struct scsi_host_template mpt2sas_driver_template = {
10166 .module = THIS_MODULE,
10167 .name = "Fusion MPT SAS Host",
10168 .proc_name = MPT2SAS_DRIVER_NAME,
10169 .queuecommand = scsih_qcmd,
10170 .target_alloc = scsih_target_alloc,
10171 .slave_alloc = scsih_slave_alloc,
10172 .slave_configure = scsih_slave_configure,
10173 .target_destroy = scsih_target_destroy,
10174 .slave_destroy = scsih_slave_destroy,
10175 .scan_finished = scsih_scan_finished,
10176 .scan_start = scsih_scan_start,
10177 .change_queue_depth = scsih_change_queue_depth,
10178 .eh_abort_handler = scsih_abort,
10179 .eh_device_reset_handler = scsih_dev_reset,
10180 .eh_target_reset_handler = scsih_target_reset,
10181 .eh_host_reset_handler = scsih_host_reset,
10182 .bios_param = scsih_bios_param,
10183 .can_queue = 1,
10184 .this_id = -1,
10185 .sg_tablesize = MPT2SAS_SG_DEPTH,
10186 .max_sectors = 32767,
10187 .cmd_per_lun = 7,
10188 .shost_attrs = mpt3sas_host_attrs,
10189 .sdev_attrs = mpt3sas_dev_attrs,
10190 .track_queue_depth = 1,
10191 .cmd_size = sizeof(struct scsiio_tracker),
10194 /* raid transport support for SAS 2.0 HBA devices */
10195 static struct raid_function_template mpt2sas_raid_functions = {
10196 .cookie = &mpt2sas_driver_template,
10197 .is_raid = scsih_is_raid,
10198 .get_resync = scsih_get_resync,
10199 .get_state = scsih_get_state,
10202 /* shost template for SAS 3.0 HBA devices */
10203 static struct scsi_host_template mpt3sas_driver_template = {
10204 .module = THIS_MODULE,
10205 .name = "Fusion MPT SAS Host",
10206 .proc_name = MPT3SAS_DRIVER_NAME,
10207 .queuecommand = scsih_qcmd,
10208 .target_alloc = scsih_target_alloc,
10209 .slave_alloc = scsih_slave_alloc,
10210 .slave_configure = scsih_slave_configure,
10211 .target_destroy = scsih_target_destroy,
10212 .slave_destroy = scsih_slave_destroy,
10213 .scan_finished = scsih_scan_finished,
10214 .scan_start = scsih_scan_start,
10215 .change_queue_depth = scsih_change_queue_depth,
10216 .eh_abort_handler = scsih_abort,
10217 .eh_device_reset_handler = scsih_dev_reset,
10218 .eh_target_reset_handler = scsih_target_reset,
10219 .eh_host_reset_handler = scsih_host_reset,
10220 .bios_param = scsih_bios_param,
10221 .can_queue = 1,
10222 .this_id = -1,
10223 .sg_tablesize = MPT3SAS_SG_DEPTH,
10224 .max_sectors = 32767,
10225 .cmd_per_lun = 7,
10226 .shost_attrs = mpt3sas_host_attrs,
10227 .sdev_attrs = mpt3sas_dev_attrs,
10228 .track_queue_depth = 1,
10229 .cmd_size = sizeof(struct scsiio_tracker),
10232 /* raid transport support for SAS 3.0 HBA devices */
10233 static struct raid_function_template mpt3sas_raid_functions = {
10234 .cookie = &mpt3sas_driver_template,
10235 .is_raid = scsih_is_raid,
10236 .get_resync = scsih_get_resync,
10237 .get_state = scsih_get_state,
10241 * _scsih_determine_hba_mpi_version - determine in which MPI version class
10242 * this device belongs to.
10243 * @pdev: PCI device struct
10245 * return MPI2_VERSION for SAS 2.0 HBA devices,
10246 * MPI25_VERSION for SAS 3.0 HBA devices, and
10247 * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
10249 static u16
10250 _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
10253 switch (pdev->device) {
10254 case MPI2_MFGPAGE_DEVID_SSS6200:
10255 case MPI2_MFGPAGE_DEVID_SAS2004:
10256 case MPI2_MFGPAGE_DEVID_SAS2008:
10257 case MPI2_MFGPAGE_DEVID_SAS2108_1:
10258 case MPI2_MFGPAGE_DEVID_SAS2108_2:
10259 case MPI2_MFGPAGE_DEVID_SAS2108_3:
10260 case MPI2_MFGPAGE_DEVID_SAS2116_1:
10261 case MPI2_MFGPAGE_DEVID_SAS2116_2:
10262 case MPI2_MFGPAGE_DEVID_SAS2208_1:
10263 case MPI2_MFGPAGE_DEVID_SAS2208_2:
10264 case MPI2_MFGPAGE_DEVID_SAS2208_3:
10265 case MPI2_MFGPAGE_DEVID_SAS2208_4:
10266 case MPI2_MFGPAGE_DEVID_SAS2208_5:
10267 case MPI2_MFGPAGE_DEVID_SAS2208_6:
10268 case MPI2_MFGPAGE_DEVID_SAS2308_1:
10269 case MPI2_MFGPAGE_DEVID_SAS2308_2:
10270 case MPI2_MFGPAGE_DEVID_SAS2308_3:
10271 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10272 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10273 return MPI2_VERSION;
10274 case MPI25_MFGPAGE_DEVID_SAS3004:
10275 case MPI25_MFGPAGE_DEVID_SAS3008:
10276 case MPI25_MFGPAGE_DEVID_SAS3108_1:
10277 case MPI25_MFGPAGE_DEVID_SAS3108_2:
10278 case MPI25_MFGPAGE_DEVID_SAS3108_5:
10279 case MPI25_MFGPAGE_DEVID_SAS3108_6:
10280 return MPI25_VERSION;
10281 case MPI26_MFGPAGE_DEVID_SAS3216:
10282 case MPI26_MFGPAGE_DEVID_SAS3224:
10283 case MPI26_MFGPAGE_DEVID_SAS3316_1:
10284 case MPI26_MFGPAGE_DEVID_SAS3316_2:
10285 case MPI26_MFGPAGE_DEVID_SAS3316_3:
10286 case MPI26_MFGPAGE_DEVID_SAS3316_4:
10287 case MPI26_MFGPAGE_DEVID_SAS3324_1:
10288 case MPI26_MFGPAGE_DEVID_SAS3324_2:
10289 case MPI26_MFGPAGE_DEVID_SAS3324_3:
10290 case MPI26_MFGPAGE_DEVID_SAS3324_4:
10291 case MPI26_MFGPAGE_DEVID_SAS3508:
10292 case MPI26_MFGPAGE_DEVID_SAS3508_1:
10293 case MPI26_MFGPAGE_DEVID_SAS3408:
10294 case MPI26_MFGPAGE_DEVID_SAS3516:
10295 case MPI26_MFGPAGE_DEVID_SAS3516_1:
10296 case MPI26_MFGPAGE_DEVID_SAS3416:
10297 case MPI26_MFGPAGE_DEVID_SAS3616:
10298 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10299 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10300 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10301 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10302 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10303 return MPI26_VERSION;
10305 return 0;
10309 * _scsih_probe - attach and add scsi host
10310 * @pdev: PCI device struct
10311 * @id: pci device id
10313 * Return: 0 success, anything else error.
10315 static int
10316 _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10318 struct MPT3SAS_ADAPTER *ioc;
10319 struct Scsi_Host *shost = NULL;
10320 int rv;
10321 u16 hba_mpi_version;
10323 /* Determine in which MPI version class this pci device belongs */
10324 hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
10325 if (hba_mpi_version == 0)
10326 return -ENODEV;
10328 /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
10329 * for other generation HBA's return with -ENODEV
10331 if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
10332 return -ENODEV;
10334 /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
10335 * for other generation HBA's return with -ENODEV
10337 if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
10338 || hba_mpi_version == MPI26_VERSION)))
10339 return -ENODEV;
10341 switch (hba_mpi_version) {
10342 case MPI2_VERSION:
10343 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
10344 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
10345 /* Use mpt2sas driver host template for SAS 2.0 HBA's */
10346 shost = scsi_host_alloc(&mpt2sas_driver_template,
10347 sizeof(struct MPT3SAS_ADAPTER));
10348 if (!shost)
10349 return -ENODEV;
10350 ioc = shost_priv(shost);
10351 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10352 ioc->hba_mpi_version_belonged = hba_mpi_version;
10353 ioc->id = mpt2_ids++;
10354 sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
10355 switch (pdev->device) {
10356 case MPI2_MFGPAGE_DEVID_SSS6200:
10357 ioc->is_warpdrive = 1;
10358 ioc->hide_ir_msg = 1;
10359 break;
10360 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP:
10361 case MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1:
10362 ioc->is_mcpu_endpoint = 1;
10363 break;
10364 default:
10365 ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
10366 break;
10368 break;
10369 case MPI25_VERSION:
10370 case MPI26_VERSION:
10371 /* Use mpt3sas driver host template for SAS 3.0 HBA's */
10372 shost = scsi_host_alloc(&mpt3sas_driver_template,
10373 sizeof(struct MPT3SAS_ADAPTER));
10374 if (!shost)
10375 return -ENODEV;
10376 ioc = shost_priv(shost);
10377 memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
10378 ioc->hba_mpi_version_belonged = hba_mpi_version;
10379 ioc->id = mpt3_ids++;
10380 sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
10381 switch (pdev->device) {
10382 case MPI26_MFGPAGE_DEVID_SAS3508:
10383 case MPI26_MFGPAGE_DEVID_SAS3508_1:
10384 case MPI26_MFGPAGE_DEVID_SAS3408:
10385 case MPI26_MFGPAGE_DEVID_SAS3516:
10386 case MPI26_MFGPAGE_DEVID_SAS3516_1:
10387 case MPI26_MFGPAGE_DEVID_SAS3416:
10388 case MPI26_MFGPAGE_DEVID_SAS3616:
10389 case MPI26_ATLAS_PCIe_SWITCH_DEVID:
10390 ioc->is_gen35_ioc = 1;
10391 break;
10392 case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
10393 case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
10394 dev_info(&pdev->dev,
10395 "HBA is in Configurable Secure mode\n");
10396 /* fall through */
10397 case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
10398 case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
10399 ioc->is_aero_ioc = ioc->is_gen35_ioc = 1;
10400 break;
10401 default:
10402 ioc->is_gen35_ioc = ioc->is_aero_ioc = 0;
10404 if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
10405 pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
10406 (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
10407 ioc->combined_reply_queue = 1;
10408 if (ioc->is_gen35_ioc)
10409 ioc->combined_reply_index_count =
10410 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
10411 else
10412 ioc->combined_reply_index_count =
10413 MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
10415 break;
10416 default:
10417 return -ENODEV;
10420 INIT_LIST_HEAD(&ioc->list);
10421 spin_lock(&gioc_lock);
10422 list_add_tail(&ioc->list, &mpt3sas_ioc_list);
10423 spin_unlock(&gioc_lock);
10424 ioc->shost = shost;
10425 ioc->pdev = pdev;
10426 ioc->scsi_io_cb_idx = scsi_io_cb_idx;
10427 ioc->tm_cb_idx = tm_cb_idx;
10428 ioc->ctl_cb_idx = ctl_cb_idx;
10429 ioc->base_cb_idx = base_cb_idx;
10430 ioc->port_enable_cb_idx = port_enable_cb_idx;
10431 ioc->transport_cb_idx = transport_cb_idx;
10432 ioc->scsih_cb_idx = scsih_cb_idx;
10433 ioc->config_cb_idx = config_cb_idx;
10434 ioc->tm_tr_cb_idx = tm_tr_cb_idx;
10435 ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
10436 ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
10437 ioc->logging_level = logging_level;
10438 ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
10439 /* misc semaphores and spin locks */
10440 mutex_init(&ioc->reset_in_progress_mutex);
10441 /* initializing pci_access_mutex lock */
10442 mutex_init(&ioc->pci_access_mutex);
10443 spin_lock_init(&ioc->ioc_reset_in_progress_lock);
10444 spin_lock_init(&ioc->scsi_lookup_lock);
10445 spin_lock_init(&ioc->sas_device_lock);
10446 spin_lock_init(&ioc->sas_node_lock);
10447 spin_lock_init(&ioc->fw_event_lock);
10448 spin_lock_init(&ioc->raid_device_lock);
10449 spin_lock_init(&ioc->pcie_device_lock);
10450 spin_lock_init(&ioc->diag_trigger_lock);
10452 INIT_LIST_HEAD(&ioc->sas_device_list);
10453 INIT_LIST_HEAD(&ioc->sas_device_init_list);
10454 INIT_LIST_HEAD(&ioc->sas_expander_list);
10455 INIT_LIST_HEAD(&ioc->enclosure_list);
10456 INIT_LIST_HEAD(&ioc->pcie_device_list);
10457 INIT_LIST_HEAD(&ioc->pcie_device_init_list);
10458 INIT_LIST_HEAD(&ioc->fw_event_list);
10459 INIT_LIST_HEAD(&ioc->raid_device_list);
10460 INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
10461 INIT_LIST_HEAD(&ioc->delayed_tr_list);
10462 INIT_LIST_HEAD(&ioc->delayed_sc_list);
10463 INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
10464 INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
10465 INIT_LIST_HEAD(&ioc->reply_queue_list);
10467 sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
10469 /* init shost parameters */
10470 shost->max_cmd_len = 32;
10471 shost->max_lun = max_lun;
10472 shost->transportt = mpt3sas_transport_template;
10473 shost->unique_id = ioc->id;
10475 if (ioc->is_mcpu_endpoint) {
10476 /* mCPU MPI support 64K max IO */
10477 shost->max_sectors = 128;
10478 ioc_info(ioc, "The max_sectors value is set to %d\n",
10479 shost->max_sectors);
10480 } else {
10481 if (max_sectors != 0xFFFF) {
10482 if (max_sectors < 64) {
10483 shost->max_sectors = 64;
10484 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
10485 max_sectors);
10486 } else if (max_sectors > 32767) {
10487 shost->max_sectors = 32767;
10488 ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
10489 max_sectors);
10490 } else {
10491 shost->max_sectors = max_sectors & 0xFFFE;
10492 ioc_info(ioc, "The max_sectors value is set to %d\n",
10493 shost->max_sectors);
10497 /* register EEDP capabilities with SCSI layer */
10498 if (prot_mask > 0)
10499 scsi_host_set_prot(shost, prot_mask);
10500 else
10501 scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
10502 | SHOST_DIF_TYPE2_PROTECTION
10503 | SHOST_DIF_TYPE3_PROTECTION);
10505 scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
10507 /* event thread */
10508 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
10509 "fw_event_%s%d", ioc->driver_name, ioc->id);
10510 ioc->firmware_event_thread = alloc_ordered_workqueue(
10511 ioc->firmware_event_name, 0);
10512 if (!ioc->firmware_event_thread) {
10513 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10514 __FILE__, __LINE__, __func__);
10515 rv = -ENODEV;
10516 goto out_thread_fail;
10519 ioc->is_driver_loading = 1;
10520 if ((mpt3sas_base_attach(ioc))) {
10521 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10522 __FILE__, __LINE__, __func__);
10523 rv = -ENODEV;
10524 goto out_attach_fail;
10527 if (ioc->is_warpdrive) {
10528 if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
10529 ioc->hide_drives = 0;
10530 else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
10531 ioc->hide_drives = 1;
10532 else {
10533 if (mpt3sas_get_num_volumes(ioc))
10534 ioc->hide_drives = 1;
10535 else
10536 ioc->hide_drives = 0;
10538 } else
10539 ioc->hide_drives = 0;
10541 rv = scsi_add_host(shost, &pdev->dev);
10542 if (rv) {
10543 ioc_err(ioc, "failure at %s:%d/%s()!\n",
10544 __FILE__, __LINE__, __func__);
10545 goto out_add_shost_fail;
10548 scsi_scan_host(shost);
10549 return 0;
10550 out_add_shost_fail:
10551 mpt3sas_base_detach(ioc);
10552 out_attach_fail:
10553 destroy_workqueue(ioc->firmware_event_thread);
10554 out_thread_fail:
10555 spin_lock(&gioc_lock);
10556 list_del(&ioc->list);
10557 spin_unlock(&gioc_lock);
10558 scsi_host_put(shost);
10559 return rv;
10562 #ifdef CONFIG_PM
10564 * scsih_suspend - power management suspend main entry point
10565 * @pdev: PCI device struct
10566 * @state: PM state change to (usually PCI_D3)
10568 * Return: 0 success, anything else error.
10570 static int
10571 scsih_suspend(struct pci_dev *pdev, pm_message_t state)
10573 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10574 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10575 pci_power_t device_state;
10577 mpt3sas_base_stop_watchdog(ioc);
10578 flush_scheduled_work();
10579 scsi_block_requests(shost);
10580 device_state = pci_choose_state(pdev, state);
10581 ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
10582 pdev, pci_name(pdev), device_state);
10584 pci_save_state(pdev);
10585 mpt3sas_base_free_resources(ioc);
10586 pci_set_power_state(pdev, device_state);
10587 return 0;
10591 * scsih_resume - power management resume main entry point
10592 * @pdev: PCI device struct
10594 * Return: 0 success, anything else error.
10596 static int
10597 scsih_resume(struct pci_dev *pdev)
10599 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10600 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10601 pci_power_t device_state = pdev->current_state;
10602 int r;
10604 ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
10605 pdev, pci_name(pdev), device_state);
10607 pci_set_power_state(pdev, PCI_D0);
10608 pci_enable_wake(pdev, PCI_D0, 0);
10609 pci_restore_state(pdev);
10610 ioc->pdev = pdev;
10611 r = mpt3sas_base_map_resources(ioc);
10612 if (r)
10613 return r;
10615 mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
10616 scsi_unblock_requests(shost);
10617 mpt3sas_base_start_watchdog(ioc);
10618 return 0;
10620 #endif /* CONFIG_PM */
10623 * scsih_pci_error_detected - Called when a PCI error is detected.
10624 * @pdev: PCI device struct
10625 * @state: PCI channel state
10627 * Description: Called when a PCI error is detected.
10629 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
10631 static pci_ers_result_t
10632 scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
10634 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10635 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10637 ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
10639 switch (state) {
10640 case pci_channel_io_normal:
10641 return PCI_ERS_RESULT_CAN_RECOVER;
10642 case pci_channel_io_frozen:
10643 /* Fatal error, prepare for slot reset */
10644 ioc->pci_error_recovery = 1;
10645 scsi_block_requests(ioc->shost);
10646 mpt3sas_base_stop_watchdog(ioc);
10647 mpt3sas_base_free_resources(ioc);
10648 return PCI_ERS_RESULT_NEED_RESET;
10649 case pci_channel_io_perm_failure:
10650 /* Permanent error, prepare for device removal */
10651 ioc->pci_error_recovery = 1;
10652 mpt3sas_base_stop_watchdog(ioc);
10653 _scsih_flush_running_cmds(ioc);
10654 return PCI_ERS_RESULT_DISCONNECT;
10656 return PCI_ERS_RESULT_NEED_RESET;
10660 * scsih_pci_slot_reset - Called when PCI slot has been reset.
10661 * @pdev: PCI device struct
10663 * Description: This routine is called by the pci error recovery
10664 * code after the PCI slot has been reset, just before we
10665 * should resume normal operations.
10667 static pci_ers_result_t
10668 scsih_pci_slot_reset(struct pci_dev *pdev)
10670 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10671 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10672 int rc;
10674 ioc_info(ioc, "PCI error: slot reset callback!!\n");
10676 ioc->pci_error_recovery = 0;
10677 ioc->pdev = pdev;
10678 pci_restore_state(pdev);
10679 rc = mpt3sas_base_map_resources(ioc);
10680 if (rc)
10681 return PCI_ERS_RESULT_DISCONNECT;
10683 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
10685 ioc_warn(ioc, "hard reset: %s\n",
10686 (rc == 0) ? "success" : "failed");
10688 if (!rc)
10689 return PCI_ERS_RESULT_RECOVERED;
10690 else
10691 return PCI_ERS_RESULT_DISCONNECT;
10695 * scsih_pci_resume() - resume normal ops after PCI reset
10696 * @pdev: pointer to PCI device
10698 * Called when the error recovery driver tells us that its
10699 * OK to resume normal operation. Use completion to allow
10700 * halted scsi ops to resume.
10702 static void
10703 scsih_pci_resume(struct pci_dev *pdev)
10705 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10706 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10708 ioc_info(ioc, "PCI error: resume callback!!\n");
10710 mpt3sas_base_start_watchdog(ioc);
10711 scsi_unblock_requests(ioc->shost);
10715 * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
10716 * @pdev: pointer to PCI device
10718 static pci_ers_result_t
10719 scsih_pci_mmio_enabled(struct pci_dev *pdev)
10721 struct Scsi_Host *shost = pci_get_drvdata(pdev);
10722 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
10724 ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
10726 /* TODO - dump whatever for debugging purposes */
10728 /* This called only if scsih_pci_error_detected returns
10729 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
10730 * works, no need to reset slot.
10732 return PCI_ERS_RESULT_RECOVERED;
10736 * scsih__ncq_prio_supp - Check for NCQ command priority support
10737 * @sdev: scsi device struct
10739 * This is called when a user indicates they would like to enable
10740 * ncq command priorities. This works only on SATA devices.
10742 bool scsih_ncq_prio_supp(struct scsi_device *sdev)
10744 unsigned char *buf;
10745 bool ncq_prio_supp = false;
10747 if (!scsi_device_supports_vpd(sdev))
10748 return ncq_prio_supp;
10750 buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
10751 if (!buf)
10752 return ncq_prio_supp;
10754 if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
10755 ncq_prio_supp = (buf[213] >> 4) & 1;
10757 kfree(buf);
10758 return ncq_prio_supp;
10761 * The pci device ids are defined in mpi/mpi2_cnfg.h.
10763 static const struct pci_device_id mpt3sas_pci_table[] = {
10764 /* Spitfire ~ 2004 */
10765 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
10766 PCI_ANY_ID, PCI_ANY_ID },
10767 /* Falcon ~ 2008 */
10768 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
10769 PCI_ANY_ID, PCI_ANY_ID },
10770 /* Liberator ~ 2108 */
10771 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
10772 PCI_ANY_ID, PCI_ANY_ID },
10773 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
10774 PCI_ANY_ID, PCI_ANY_ID },
10775 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
10776 PCI_ANY_ID, PCI_ANY_ID },
10777 /* Meteor ~ 2116 */
10778 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
10779 PCI_ANY_ID, PCI_ANY_ID },
10780 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
10781 PCI_ANY_ID, PCI_ANY_ID },
10782 /* Thunderbolt ~ 2208 */
10783 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
10784 PCI_ANY_ID, PCI_ANY_ID },
10785 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
10786 PCI_ANY_ID, PCI_ANY_ID },
10787 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
10788 PCI_ANY_ID, PCI_ANY_ID },
10789 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
10790 PCI_ANY_ID, PCI_ANY_ID },
10791 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
10792 PCI_ANY_ID, PCI_ANY_ID },
10793 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
10794 PCI_ANY_ID, PCI_ANY_ID },
10795 /* Mustang ~ 2308 */
10796 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
10797 PCI_ANY_ID, PCI_ANY_ID },
10798 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
10799 PCI_ANY_ID, PCI_ANY_ID },
10800 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
10801 PCI_ANY_ID, PCI_ANY_ID },
10802 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP,
10803 PCI_ANY_ID, PCI_ANY_ID },
10804 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SWITCH_MPI_EP_1,
10805 PCI_ANY_ID, PCI_ANY_ID },
10806 /* SSS6200 */
10807 { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
10808 PCI_ANY_ID, PCI_ANY_ID },
10809 /* Fury ~ 3004 and 3008 */
10810 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
10811 PCI_ANY_ID, PCI_ANY_ID },
10812 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
10813 PCI_ANY_ID, PCI_ANY_ID },
10814 /* Invader ~ 3108 */
10815 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
10816 PCI_ANY_ID, PCI_ANY_ID },
10817 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
10818 PCI_ANY_ID, PCI_ANY_ID },
10819 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
10820 PCI_ANY_ID, PCI_ANY_ID },
10821 { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
10822 PCI_ANY_ID, PCI_ANY_ID },
10823 /* Cutlass ~ 3216 and 3224 */
10824 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
10825 PCI_ANY_ID, PCI_ANY_ID },
10826 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
10827 PCI_ANY_ID, PCI_ANY_ID },
10828 /* Intruder ~ 3316 and 3324 */
10829 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
10830 PCI_ANY_ID, PCI_ANY_ID },
10831 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
10832 PCI_ANY_ID, PCI_ANY_ID },
10833 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
10834 PCI_ANY_ID, PCI_ANY_ID },
10835 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
10836 PCI_ANY_ID, PCI_ANY_ID },
10837 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
10838 PCI_ANY_ID, PCI_ANY_ID },
10839 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
10840 PCI_ANY_ID, PCI_ANY_ID },
10841 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
10842 PCI_ANY_ID, PCI_ANY_ID },
10843 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
10844 PCI_ANY_ID, PCI_ANY_ID },
10845 /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
10846 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
10847 PCI_ANY_ID, PCI_ANY_ID },
10848 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
10849 PCI_ANY_ID, PCI_ANY_ID },
10850 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
10851 PCI_ANY_ID, PCI_ANY_ID },
10852 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
10853 PCI_ANY_ID, PCI_ANY_ID },
10854 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
10855 PCI_ANY_ID, PCI_ANY_ID },
10856 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
10857 PCI_ANY_ID, PCI_ANY_ID },
10858 /* Mercator ~ 3616*/
10859 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
10860 PCI_ANY_ID, PCI_ANY_ID },
10862 /* Aero SI 0x00E1 Configurable Secure
10863 * 0x00E2 Hard Secure
10865 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3916,
10866 PCI_ANY_ID, PCI_ANY_ID },
10867 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
10868 PCI_ANY_ID, PCI_ANY_ID },
10870 /* Atlas PCIe Switch Management Port */
10871 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
10872 PCI_ANY_ID, PCI_ANY_ID },
10874 /* Sea SI 0x00E5 Configurable Secure
10875 * 0x00E6 Hard Secure
10877 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_CFG_SEC_3816,
10878 PCI_ANY_ID, PCI_ANY_ID },
10879 { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
10880 PCI_ANY_ID, PCI_ANY_ID },
10882 {0} /* Terminating entry */
10884 MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
10886 static struct pci_error_handlers _mpt3sas_err_handler = {
10887 .error_detected = scsih_pci_error_detected,
10888 .mmio_enabled = scsih_pci_mmio_enabled,
10889 .slot_reset = scsih_pci_slot_reset,
10890 .resume = scsih_pci_resume,
10893 static struct pci_driver mpt3sas_driver = {
10894 .name = MPT3SAS_DRIVER_NAME,
10895 .id_table = mpt3sas_pci_table,
10896 .probe = _scsih_probe,
10897 .remove = scsih_remove,
10898 .shutdown = scsih_shutdown,
10899 .err_handler = &_mpt3sas_err_handler,
10900 #ifdef CONFIG_PM
10901 .suspend = scsih_suspend,
10902 .resume = scsih_resume,
10903 #endif
10907 * scsih_init - main entry point for this driver.
10909 * Return: 0 success, anything else error.
10911 static int
10912 scsih_init(void)
10914 mpt2_ids = 0;
10915 mpt3_ids = 0;
10917 mpt3sas_base_initialize_callback_handler();
10919 /* queuecommand callback hander */
10920 scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
10922 /* task management callback handler */
10923 tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
10925 /* base internal commands callback handler */
10926 base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
10927 port_enable_cb_idx = mpt3sas_base_register_callback_handler(
10928 mpt3sas_port_enable_done);
10930 /* transport internal commands callback handler */
10931 transport_cb_idx = mpt3sas_base_register_callback_handler(
10932 mpt3sas_transport_done);
10934 /* scsih internal commands callback handler */
10935 scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
10937 /* configuration page API internal commands callback handler */
10938 config_cb_idx = mpt3sas_base_register_callback_handler(
10939 mpt3sas_config_done);
10941 /* ctl module callback handler */
10942 ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
10944 tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
10945 _scsih_tm_tr_complete);
10947 tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
10948 _scsih_tm_volume_tr_complete);
10950 tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
10951 _scsih_sas_control_complete);
10953 return 0;
10957 * scsih_exit - exit point for this driver (when it is a module).
10959 * Return: 0 success, anything else error.
10961 static void
10962 scsih_exit(void)
10965 mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
10966 mpt3sas_base_release_callback_handler(tm_cb_idx);
10967 mpt3sas_base_release_callback_handler(base_cb_idx);
10968 mpt3sas_base_release_callback_handler(port_enable_cb_idx);
10969 mpt3sas_base_release_callback_handler(transport_cb_idx);
10970 mpt3sas_base_release_callback_handler(scsih_cb_idx);
10971 mpt3sas_base_release_callback_handler(config_cb_idx);
10972 mpt3sas_base_release_callback_handler(ctl_cb_idx);
10974 mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
10975 mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
10976 mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
10978 /* raid transport support */
10979 if (hbas_to_enumerate != 1)
10980 raid_class_release(mpt3sas_raid_template);
10981 if (hbas_to_enumerate != 2)
10982 raid_class_release(mpt2sas_raid_template);
10983 sas_release_transport(mpt3sas_transport_template);
10987 * _mpt3sas_init - main entry point for this driver.
10989 * Return: 0 success, anything else error.
10991 static int __init
10992 _mpt3sas_init(void)
10994 int error;
10996 pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
10997 MPT3SAS_DRIVER_VERSION);
10999 mpt3sas_transport_template =
11000 sas_attach_transport(&mpt3sas_transport_functions);
11001 if (!mpt3sas_transport_template)
11002 return -ENODEV;
11004 /* No need attach mpt3sas raid functions template
11005 * if hbas_to_enumarate value is one.
11007 if (hbas_to_enumerate != 1) {
11008 mpt3sas_raid_template =
11009 raid_class_attach(&mpt3sas_raid_functions);
11010 if (!mpt3sas_raid_template) {
11011 sas_release_transport(mpt3sas_transport_template);
11012 return -ENODEV;
11016 /* No need to attach mpt2sas raid functions template
11017 * if hbas_to_enumarate value is two
11019 if (hbas_to_enumerate != 2) {
11020 mpt2sas_raid_template =
11021 raid_class_attach(&mpt2sas_raid_functions);
11022 if (!mpt2sas_raid_template) {
11023 sas_release_transport(mpt3sas_transport_template);
11024 return -ENODEV;
11028 error = scsih_init();
11029 if (error) {
11030 scsih_exit();
11031 return error;
11034 mpt3sas_ctl_init(hbas_to_enumerate);
11036 error = pci_register_driver(&mpt3sas_driver);
11037 if (error)
11038 scsih_exit();
11040 return error;
11044 * _mpt3sas_exit - exit point for this driver (when it is a module).
11047 static void __exit
11048 _mpt3sas_exit(void)
11050 pr_info("mpt3sas version %s unloading\n",
11051 MPT3SAS_DRIVER_VERSION);
11053 mpt3sas_ctl_exit(hbas_to_enumerate);
11055 pci_unregister_driver(&mpt3sas_driver);
11057 scsih_exit();
11060 module_init(_mpt3sas_init);
11061 module_exit(_mpt3sas_exit);