2 * driver for Microsemi PQI-based storage controllers
3 * Copyright (c) 2016 Microsemi Corporation
4 * Copyright (c) 2016 PMC-Sierra, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/pci.h>
22 #include <linux/delay.h>
23 #include <linux/interrupt.h>
24 #include <linux/sched.h>
25 #include <linux/rtc.h>
26 #include <linux/bcd.h>
27 #include <linux/cciss_ioctl.h>
28 #include <linux/blk-mq-pci.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_transport_sas.h>
34 #include <asm/unaligned.h>
36 #include "smartpqi_sis.h"
38 #if !defined(BUILD_TIMESTAMP)
39 #define BUILD_TIMESTAMP
42 #define DRIVER_VERSION "0.9.13-370"
43 #define DRIVER_MAJOR 0
44 #define DRIVER_MINOR 9
45 #define DRIVER_RELEASE 13
46 #define DRIVER_REVISION 370
48 #define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
49 #define DRIVER_NAME_SHORT "smartpqi"
51 MODULE_AUTHOR("Microsemi");
52 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
54 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
55 MODULE_VERSION(DRIVER_VERSION
);
56 MODULE_LICENSE("GPL");
58 #define PQI_ENABLE_MULTI_QUEUE_SUPPORT 0
60 static char *hpe_branded_controller
= "HPE Smart Array Controller";
61 static char *microsemi_branded_controller
= "Microsemi Smart Family Controller";
63 static void pqi_take_ctrl_offline(struct pqi_ctrl_info
*ctrl_info
);
64 static int pqi_scan_scsi_devices(struct pqi_ctrl_info
*ctrl_info
);
65 static void pqi_scan_start(struct Scsi_Host
*shost
);
66 static void pqi_start_io(struct pqi_ctrl_info
*ctrl_info
,
67 struct pqi_queue_group
*queue_group
, enum pqi_io_path path
,
68 struct pqi_io_request
*io_request
);
69 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info
*ctrl_info
,
70 struct pqi_iu_header
*request
, unsigned int flags
,
71 struct pqi_raid_error_info
*error_info
, unsigned long timeout_msecs
);
72 static int pqi_aio_submit_io(struct pqi_ctrl_info
*ctrl_info
,
73 struct scsi_cmnd
*scmd
, u32 aio_handle
, u8
*cdb
,
74 unsigned int cdb_length
, struct pqi_queue_group
*queue_group
,
75 struct pqi_encryption_info
*encryption_info
);
77 /* for flags argument to pqi_submit_raid_request_synchronous() */
78 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
80 static struct scsi_transport_template
*pqi_sas_transport_template
;
82 static atomic_t pqi_controller_count
= ATOMIC_INIT(0);
84 static int pqi_disable_device_id_wildcards
;
85 module_param_named(disable_device_id_wildcards
,
86 pqi_disable_device_id_wildcards
, int, S_IRUGO
| S_IWUSR
);
87 MODULE_PARM_DESC(disable_device_id_wildcards
,
88 "Disable device ID wildcards.");
90 static char *raid_levels
[] = {
100 static char *pqi_raid_level_to_string(u8 raid_level
)
102 if (raid_level
< ARRAY_SIZE(raid_levels
))
103 return raid_levels
[raid_level
];
110 #define SA_RAID_1 2 /* also used for RAID 10 */
111 #define SA_RAID_5 3 /* also used for RAID 50 */
113 #define SA_RAID_6 5 /* also used for RAID 60 */
114 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
115 #define SA_RAID_MAX SA_RAID_ADM
116 #define SA_RAID_UNKNOWN 0xff
118 static inline void pqi_scsi_done(struct scsi_cmnd
*scmd
)
120 scmd
->scsi_done(scmd
);
123 static inline bool pqi_scsi3addr_equal(u8
*scsi3addr1
, u8
*scsi3addr2
)
125 return memcmp(scsi3addr1
, scsi3addr2
, 8) == 0;
128 static inline struct pqi_ctrl_info
*shost_to_hba(struct Scsi_Host
*shost
)
130 void *hostdata
= shost_priv(shost
);
132 return *((struct pqi_ctrl_info
**)hostdata
);
135 static inline bool pqi_is_logical_device(struct pqi_scsi_dev
*device
)
137 return !device
->is_physical_device
;
140 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info
*ctrl_info
)
142 return !ctrl_info
->controller_online
;
145 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info
*ctrl_info
)
147 if (ctrl_info
->controller_online
)
148 if (!sis_is_firmware_running(ctrl_info
))
149 pqi_take_ctrl_offline(ctrl_info
);
152 static inline bool pqi_is_hba_lunid(u8
*scsi3addr
)
154 return pqi_scsi3addr_equal(scsi3addr
, RAID_CTLR_LUNID
);
157 static inline enum pqi_ctrl_mode
pqi_get_ctrl_mode(
158 struct pqi_ctrl_info
*ctrl_info
)
160 return sis_read_driver_scratch(ctrl_info
);
163 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info
*ctrl_info
,
164 enum pqi_ctrl_mode mode
)
166 sis_write_driver_scratch(ctrl_info
, mode
);
169 #define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
171 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info
*ctrl_info
)
173 schedule_delayed_work(&ctrl_info
->rescan_work
,
174 PQI_RESCAN_WORK_INTERVAL
);
177 static int pqi_map_single(struct pci_dev
*pci_dev
,
178 struct pqi_sg_descriptor
*sg_descriptor
, void *buffer
,
179 size_t buffer_length
, int data_direction
)
181 dma_addr_t bus_address
;
183 if (!buffer
|| buffer_length
== 0 || data_direction
== PCI_DMA_NONE
)
186 bus_address
= pci_map_single(pci_dev
, buffer
, buffer_length
,
188 if (pci_dma_mapping_error(pci_dev
, bus_address
))
191 put_unaligned_le64((u64
)bus_address
, &sg_descriptor
->address
);
192 put_unaligned_le32(buffer_length
, &sg_descriptor
->length
);
193 put_unaligned_le32(CISS_SG_LAST
, &sg_descriptor
->flags
);
198 static void pqi_pci_unmap(struct pci_dev
*pci_dev
,
199 struct pqi_sg_descriptor
*descriptors
, int num_descriptors
,
204 if (data_direction
== PCI_DMA_NONE
)
207 for (i
= 0; i
< num_descriptors
; i
++)
208 pci_unmap_single(pci_dev
,
209 (dma_addr_t
)get_unaligned_le64(&descriptors
[i
].address
),
210 get_unaligned_le32(&descriptors
[i
].length
),
214 static int pqi_build_raid_path_request(struct pqi_ctrl_info
*ctrl_info
,
215 struct pqi_raid_path_request
*request
, u8 cmd
,
216 u8
*scsi3addr
, void *buffer
, size_t buffer_length
,
217 u16 vpd_page
, int *pci_direction
)
222 memset(request
, 0, sizeof(*request
));
224 request
->header
.iu_type
= PQI_REQUEST_IU_RAID_PATH_IO
;
225 put_unaligned_le16(offsetof(struct pqi_raid_path_request
,
226 sg_descriptors
[1]) - PQI_REQUEST_HEADER_LENGTH
,
227 &request
->header
.iu_length
);
228 put_unaligned_le32(buffer_length
, &request
->buffer_length
);
229 memcpy(request
->lun_number
, scsi3addr
, sizeof(request
->lun_number
));
230 request
->task_attribute
= SOP_TASK_ATTRIBUTE_SIMPLE
;
231 request
->additional_cdb_bytes_usage
= SOP_ADDITIONAL_CDB_BYTES_0
;
237 request
->data_direction
= SOP_READ_FLAG
;
239 if (vpd_page
& VPD_PAGE
) {
241 cdb
[2] = (u8
)vpd_page
;
243 cdb
[4] = (u8
)buffer_length
;
245 case CISS_REPORT_LOG
:
246 case CISS_REPORT_PHYS
:
247 request
->data_direction
= SOP_READ_FLAG
;
249 if (cmd
== CISS_REPORT_PHYS
)
250 cdb
[1] = CISS_REPORT_PHYS_EXTENDED
;
252 cdb
[1] = CISS_REPORT_LOG_EXTENDED
;
253 put_unaligned_be32(buffer_length
, &cdb
[6]);
255 case CISS_GET_RAID_MAP
:
256 request
->data_direction
= SOP_READ_FLAG
;
258 cdb
[1] = CISS_GET_RAID_MAP
;
259 put_unaligned_be32(buffer_length
, &cdb
[6]);
262 request
->data_direction
= SOP_WRITE_FLAG
;
264 cdb
[6] = BMIC_CACHE_FLUSH
;
265 put_unaligned_be16(buffer_length
, &cdb
[7]);
267 case BMIC_IDENTIFY_CONTROLLER
:
268 case BMIC_IDENTIFY_PHYSICAL_DEVICE
:
269 request
->data_direction
= SOP_READ_FLAG
;
272 put_unaligned_be16(buffer_length
, &cdb
[7]);
274 case BMIC_WRITE_HOST_WELLNESS
:
275 request
->data_direction
= SOP_WRITE_FLAG
;
278 put_unaligned_be16(buffer_length
, &cdb
[7]);
281 dev_err(&ctrl_info
->pci_dev
->dev
, "unknown command 0x%c\n",
287 switch (request
->data_direction
) {
289 pci_dir
= PCI_DMA_FROMDEVICE
;
292 pci_dir
= PCI_DMA_TODEVICE
;
294 case SOP_NO_DIRECTION_FLAG
:
295 pci_dir
= PCI_DMA_NONE
;
298 pci_dir
= PCI_DMA_BIDIRECTIONAL
;
302 *pci_direction
= pci_dir
;
304 return pqi_map_single(ctrl_info
->pci_dev
, &request
->sg_descriptors
[0],
305 buffer
, buffer_length
, pci_dir
);
308 static struct pqi_io_request
*pqi_alloc_io_request(
309 struct pqi_ctrl_info
*ctrl_info
)
311 struct pqi_io_request
*io_request
;
312 u16 i
= ctrl_info
->next_io_request_slot
; /* benignly racy */
315 io_request
= &ctrl_info
->io_request_pool
[i
];
316 if (atomic_inc_return(&io_request
->refcount
) == 1)
318 atomic_dec(&io_request
->refcount
);
319 i
= (i
+ 1) % ctrl_info
->max_io_slots
;
323 ctrl_info
->next_io_request_slot
= (i
+ 1) % ctrl_info
->max_io_slots
;
325 io_request
->scmd
= NULL
;
326 io_request
->status
= 0;
327 io_request
->error_info
= NULL
;
332 static void pqi_free_io_request(struct pqi_io_request
*io_request
)
334 atomic_dec(&io_request
->refcount
);
337 static int pqi_identify_controller(struct pqi_ctrl_info
*ctrl_info
,
338 struct bmic_identify_controller
*buffer
)
342 struct pqi_raid_path_request request
;
344 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
345 BMIC_IDENTIFY_CONTROLLER
, RAID_CTLR_LUNID
, buffer
,
346 sizeof(*buffer
), 0, &pci_direction
);
350 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
, 0,
353 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
359 static int pqi_scsi_inquiry(struct pqi_ctrl_info
*ctrl_info
,
360 u8
*scsi3addr
, u16 vpd_page
, void *buffer
, size_t buffer_length
)
364 struct pqi_raid_path_request request
;
366 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
367 INQUIRY
, scsi3addr
, buffer
, buffer_length
, vpd_page
,
372 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
, 0,
375 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
381 static int pqi_identify_physical_device(struct pqi_ctrl_info
*ctrl_info
,
382 struct pqi_scsi_dev
*device
,
383 struct bmic_identify_physical_device
*buffer
,
384 size_t buffer_length
)
388 u16 bmic_device_index
;
389 struct pqi_raid_path_request request
;
391 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
392 BMIC_IDENTIFY_PHYSICAL_DEVICE
, RAID_CTLR_LUNID
, buffer
,
393 buffer_length
, 0, &pci_direction
);
397 bmic_device_index
= CISS_GET_DRIVE_NUMBER(device
->scsi3addr
);
398 request
.cdb
[2] = (u8
)bmic_device_index
;
399 request
.cdb
[9] = (u8
)(bmic_device_index
>> 8);
401 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
402 0, NULL
, NO_TIMEOUT
);
404 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
410 #define SA_CACHE_FLUSH_BUFFER_LENGTH 4
412 static int pqi_flush_cache(struct pqi_ctrl_info
*ctrl_info
)
415 struct pqi_raid_path_request request
;
420 * Don't bother trying to flush the cache if the controller is
423 if (pqi_ctrl_offline(ctrl_info
))
426 buffer
= kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH
, GFP_KERNEL
);
430 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
431 SA_CACHE_FLUSH
, RAID_CTLR_LUNID
, buffer
,
432 SA_CACHE_FLUSH_BUFFER_LENGTH
, 0, &pci_direction
);
436 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
437 0, NULL
, NO_TIMEOUT
);
439 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
448 static int pqi_write_host_wellness(struct pqi_ctrl_info
*ctrl_info
,
449 void *buffer
, size_t buffer_length
)
452 struct pqi_raid_path_request request
;
455 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
456 BMIC_WRITE_HOST_WELLNESS
, RAID_CTLR_LUNID
, buffer
,
457 buffer_length
, 0, &pci_direction
);
461 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
462 0, NULL
, NO_TIMEOUT
);
464 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
472 struct bmic_host_wellness_driver_version
{
474 u8 driver_version_tag
[2];
475 __le16 driver_version_length
;
476 char driver_version
[32];
482 static int pqi_write_driver_version_to_host_wellness(
483 struct pqi_ctrl_info
*ctrl_info
)
486 struct bmic_host_wellness_driver_version
*buffer
;
487 size_t buffer_length
;
489 buffer_length
= sizeof(*buffer
);
491 buffer
= kmalloc(buffer_length
, GFP_KERNEL
);
495 buffer
->start_tag
[0] = '<';
496 buffer
->start_tag
[1] = 'H';
497 buffer
->start_tag
[2] = 'W';
498 buffer
->start_tag
[3] = '>';
499 buffer
->driver_version_tag
[0] = 'D';
500 buffer
->driver_version_tag
[1] = 'V';
501 put_unaligned_le16(sizeof(buffer
->driver_version
),
502 &buffer
->driver_version_length
);
503 strncpy(buffer
->driver_version
, DRIVER_VERSION
,
504 sizeof(buffer
->driver_version
) - 1);
505 buffer
->driver_version
[sizeof(buffer
->driver_version
) - 1] = '\0';
506 buffer
->end_tag
[0] = 'Z';
507 buffer
->end_tag
[1] = 'Z';
509 rc
= pqi_write_host_wellness(ctrl_info
, buffer
, buffer_length
);
518 struct bmic_host_wellness_time
{
523 u8 dont_write_tag
[2];
529 static int pqi_write_current_time_to_host_wellness(
530 struct pqi_ctrl_info
*ctrl_info
)
533 struct bmic_host_wellness_time
*buffer
;
534 size_t buffer_length
;
539 buffer_length
= sizeof(*buffer
);
541 buffer
= kmalloc(buffer_length
, GFP_KERNEL
);
545 buffer
->start_tag
[0] = '<';
546 buffer
->start_tag
[1] = 'H';
547 buffer
->start_tag
[2] = 'W';
548 buffer
->start_tag
[3] = '>';
549 buffer
->time_tag
[0] = 'T';
550 buffer
->time_tag
[1] = 'D';
551 put_unaligned_le16(sizeof(buffer
->time
),
552 &buffer
->time_length
);
554 local_time
= ktime_get_real_seconds();
555 time64_to_tm(local_time
, -sys_tz
.tz_minuteswest
* 60, &tm
);
556 year
= tm
.tm_year
+ 1900;
558 buffer
->time
[0] = bin2bcd(tm
.tm_hour
);
559 buffer
->time
[1] = bin2bcd(tm
.tm_min
);
560 buffer
->time
[2] = bin2bcd(tm
.tm_sec
);
562 buffer
->time
[4] = bin2bcd(tm
.tm_mon
+ 1);
563 buffer
->time
[5] = bin2bcd(tm
.tm_mday
);
564 buffer
->time
[6] = bin2bcd(year
/ 100);
565 buffer
->time
[7] = bin2bcd(year
% 100);
567 buffer
->dont_write_tag
[0] = 'D';
568 buffer
->dont_write_tag
[1] = 'W';
569 buffer
->end_tag
[0] = 'Z';
570 buffer
->end_tag
[1] = 'Z';
572 rc
= pqi_write_host_wellness(ctrl_info
, buffer
, buffer_length
);
579 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
581 static void pqi_update_time_worker(struct work_struct
*work
)
584 struct pqi_ctrl_info
*ctrl_info
;
586 ctrl_info
= container_of(to_delayed_work(work
), struct pqi_ctrl_info
,
589 rc
= pqi_write_current_time_to_host_wellness(ctrl_info
);
591 dev_warn(&ctrl_info
->pci_dev
->dev
,
592 "error updating time on controller\n");
594 schedule_delayed_work(&ctrl_info
->update_time_work
,
595 PQI_UPDATE_TIME_WORK_INTERVAL
);
598 static inline void pqi_schedule_update_time_worker(
599 struct pqi_ctrl_info
*ctrl_info
)
601 schedule_delayed_work(&ctrl_info
->update_time_work
, 0);
604 static int pqi_report_luns(struct pqi_ctrl_info
*ctrl_info
, u8 cmd
,
605 void *buffer
, size_t buffer_length
)
609 struct pqi_raid_path_request request
;
611 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
612 cmd
, RAID_CTLR_LUNID
, buffer
, buffer_length
, 0, &pci_direction
);
616 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
, 0,
619 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
625 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info
*ctrl_info
, u8 cmd
,
629 size_t lun_list_length
;
630 size_t lun_data_length
;
631 size_t new_lun_list_length
;
632 void *lun_data
= NULL
;
633 struct report_lun_header
*report_lun_header
;
635 report_lun_header
= kmalloc(sizeof(*report_lun_header
), GFP_KERNEL
);
636 if (!report_lun_header
) {
641 rc
= pqi_report_luns(ctrl_info
, cmd
, report_lun_header
,
642 sizeof(*report_lun_header
));
646 lun_list_length
= get_unaligned_be32(&report_lun_header
->list_length
);
649 lun_data_length
= sizeof(struct report_lun_header
) + lun_list_length
;
651 lun_data
= kmalloc(lun_data_length
, GFP_KERNEL
);
657 if (lun_list_length
== 0) {
658 memcpy(lun_data
, report_lun_header
, sizeof(*report_lun_header
));
662 rc
= pqi_report_luns(ctrl_info
, cmd
, lun_data
, lun_data_length
);
666 new_lun_list_length
= get_unaligned_be32(
667 &((struct report_lun_header
*)lun_data
)->list_length
);
669 if (new_lun_list_length
> lun_list_length
) {
670 lun_list_length
= new_lun_list_length
;
676 kfree(report_lun_header
);
688 static inline int pqi_report_phys_luns(struct pqi_ctrl_info
*ctrl_info
,
691 return pqi_report_phys_logical_luns(ctrl_info
, CISS_REPORT_PHYS
,
695 static inline int pqi_report_logical_luns(struct pqi_ctrl_info
*ctrl_info
,
698 return pqi_report_phys_logical_luns(ctrl_info
, CISS_REPORT_LOG
, buffer
);
701 static int pqi_get_device_lists(struct pqi_ctrl_info
*ctrl_info
,
702 struct report_phys_lun_extended
**physdev_list
,
703 struct report_log_lun_extended
**logdev_list
)
706 size_t logdev_list_length
;
707 size_t logdev_data_length
;
708 struct report_log_lun_extended
*internal_logdev_list
;
709 struct report_log_lun_extended
*logdev_data
;
710 struct report_lun_header report_lun_header
;
712 rc
= pqi_report_phys_luns(ctrl_info
, (void **)physdev_list
);
714 dev_err(&ctrl_info
->pci_dev
->dev
,
715 "report physical LUNs failed\n");
717 rc
= pqi_report_logical_luns(ctrl_info
, (void **)logdev_list
);
719 dev_err(&ctrl_info
->pci_dev
->dev
,
720 "report logical LUNs failed\n");
723 * Tack the controller itself onto the end of the logical device list.
726 logdev_data
= *logdev_list
;
730 get_unaligned_be32(&logdev_data
->header
.list_length
);
732 memset(&report_lun_header
, 0, sizeof(report_lun_header
));
734 (struct report_log_lun_extended
*)&report_lun_header
;
735 logdev_list_length
= 0;
738 logdev_data_length
= sizeof(struct report_lun_header
) +
741 internal_logdev_list
= kmalloc(logdev_data_length
+
742 sizeof(struct report_log_lun_extended
), GFP_KERNEL
);
743 if (!internal_logdev_list
) {
749 memcpy(internal_logdev_list
, logdev_data
, logdev_data_length
);
750 memset((u8
*)internal_logdev_list
+ logdev_data_length
, 0,
751 sizeof(struct report_log_lun_extended_entry
));
752 put_unaligned_be32(logdev_list_length
+
753 sizeof(struct report_log_lun_extended_entry
),
754 &internal_logdev_list
->header
.list_length
);
757 *logdev_list
= internal_logdev_list
;
762 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev
*device
,
763 int bus
, int target
, int lun
)
766 device
->target
= target
;
770 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev
*device
)
775 scsi3addr
= device
->scsi3addr
;
776 lunid
= get_unaligned_le32(scsi3addr
);
778 if (pqi_is_hba_lunid(scsi3addr
)) {
779 /* The specified device is the controller. */
780 pqi_set_bus_target_lun(device
, PQI_HBA_BUS
, 0, lunid
& 0x3fff);
781 device
->target_lun_valid
= true;
785 if (pqi_is_logical_device(device
)) {
786 pqi_set_bus_target_lun(device
, PQI_RAID_VOLUME_BUS
, 0,
788 device
->target_lun_valid
= true;
793 * Defer target and LUN assignment for non-controller physical devices
794 * because the SAS transport layer will make these assignments later.
796 pqi_set_bus_target_lun(device
, PQI_PHYSICAL_DEVICE_BUS
, 0, 0);
799 static void pqi_get_raid_level(struct pqi_ctrl_info
*ctrl_info
,
800 struct pqi_scsi_dev
*device
)
806 raid_level
= SA_RAID_UNKNOWN
;
808 buffer
= kmalloc(64, GFP_KERNEL
);
810 rc
= pqi_scsi_inquiry(ctrl_info
, device
->scsi3addr
,
811 VPD_PAGE
| CISS_VPD_LV_DEVICE_GEOMETRY
, buffer
, 64);
813 raid_level
= buffer
[8];
814 if (raid_level
> SA_RAID_MAX
)
815 raid_level
= SA_RAID_UNKNOWN
;
820 device
->raid_level
= raid_level
;
823 static int pqi_validate_raid_map(struct pqi_ctrl_info
*ctrl_info
,
824 struct pqi_scsi_dev
*device
, struct raid_map
*raid_map
)
828 u32 r5or6_blocks_per_row
;
829 unsigned int num_phys_disks
;
830 unsigned int num_raid_map_entries
;
832 raid_map_size
= get_unaligned_le32(&raid_map
->structure_size
);
834 if (raid_map_size
< offsetof(struct raid_map
, disk_data
)) {
835 err_msg
= "RAID map too small";
839 if (raid_map_size
> sizeof(*raid_map
)) {
840 err_msg
= "RAID map too large";
844 num_phys_disks
= get_unaligned_le16(&raid_map
->layout_map_count
) *
845 (get_unaligned_le16(&raid_map
->data_disks_per_row
) +
846 get_unaligned_le16(&raid_map
->metadata_disks_per_row
));
847 num_raid_map_entries
= num_phys_disks
*
848 get_unaligned_le16(&raid_map
->row_cnt
);
850 if (num_raid_map_entries
> RAID_MAP_MAX_ENTRIES
) {
851 err_msg
= "invalid number of map entries in RAID map";
855 if (device
->raid_level
== SA_RAID_1
) {
856 if (get_unaligned_le16(&raid_map
->layout_map_count
) != 2) {
857 err_msg
= "invalid RAID-1 map";
860 } else if (device
->raid_level
== SA_RAID_ADM
) {
861 if (get_unaligned_le16(&raid_map
->layout_map_count
) != 3) {
862 err_msg
= "invalid RAID-1(ADM) map";
865 } else if ((device
->raid_level
== SA_RAID_5
||
866 device
->raid_level
== SA_RAID_6
) &&
867 get_unaligned_le16(&raid_map
->layout_map_count
) > 1) {
869 r5or6_blocks_per_row
=
870 get_unaligned_le16(&raid_map
->strip_size
) *
871 get_unaligned_le16(&raid_map
->data_disks_per_row
);
872 if (r5or6_blocks_per_row
== 0) {
873 err_msg
= "invalid RAID-5 or RAID-6 map";
881 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n", err_msg
);
886 static int pqi_get_raid_map(struct pqi_ctrl_info
*ctrl_info
,
887 struct pqi_scsi_dev
*device
)
891 struct pqi_raid_path_request request
;
892 struct raid_map
*raid_map
;
894 raid_map
= kmalloc(sizeof(*raid_map
), GFP_KERNEL
);
898 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
899 CISS_GET_RAID_MAP
, device
->scsi3addr
, raid_map
,
900 sizeof(*raid_map
), 0, &pci_direction
);
904 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
, 0,
907 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
913 rc
= pqi_validate_raid_map(ctrl_info
, device
, raid_map
);
917 device
->raid_map
= raid_map
;
927 static void pqi_get_offload_status(struct pqi_ctrl_info
*ctrl_info
,
928 struct pqi_scsi_dev
*device
)
934 buffer
= kmalloc(64, GFP_KERNEL
);
938 rc
= pqi_scsi_inquiry(ctrl_info
, device
->scsi3addr
,
939 VPD_PAGE
| CISS_VPD_LV_OFFLOAD_STATUS
, buffer
, 64);
943 #define OFFLOAD_STATUS_BYTE 4
944 #define OFFLOAD_CONFIGURED_BIT 0x1
945 #define OFFLOAD_ENABLED_BIT 0x2
947 offload_status
= buffer
[OFFLOAD_STATUS_BYTE
];
948 device
->offload_configured
=
949 !!(offload_status
& OFFLOAD_CONFIGURED_BIT
);
950 if (device
->offload_configured
) {
951 device
->offload_enabled_pending
=
952 !!(offload_status
& OFFLOAD_ENABLED_BIT
);
953 if (pqi_get_raid_map(ctrl_info
, device
))
954 device
->offload_enabled_pending
= false;
962 * Use vendor-specific VPD to determine online/offline status of a volume.
965 static void pqi_get_volume_status(struct pqi_ctrl_info
*ctrl_info
,
966 struct pqi_scsi_dev
*device
)
970 u8 volume_status
= CISS_LV_STATUS_UNAVAILABLE
;
971 bool volume_offline
= true;
973 struct ciss_vpd_logical_volume_status
*vpd
;
975 vpd
= kmalloc(sizeof(*vpd
), GFP_KERNEL
);
979 rc
= pqi_scsi_inquiry(ctrl_info
, device
->scsi3addr
,
980 VPD_PAGE
| CISS_VPD_LV_STATUS
, vpd
, sizeof(*vpd
));
984 page_length
= offsetof(struct ciss_vpd_logical_volume_status
,
985 volume_status
) + vpd
->page_length
;
986 if (page_length
< sizeof(*vpd
))
989 volume_status
= vpd
->volume_status
;
990 volume_flags
= get_unaligned_be32(&vpd
->flags
);
991 volume_offline
= (volume_flags
& CISS_LV_FLAGS_NO_HOST_IO
) != 0;
996 device
->volume_status
= volume_status
;
997 device
->volume_offline
= volume_offline
;
1000 static int pqi_get_device_info(struct pqi_ctrl_info
*ctrl_info
,
1001 struct pqi_scsi_dev
*device
)
1006 buffer
= kmalloc(64, GFP_KERNEL
);
1010 /* Send an inquiry to the device to see what it is. */
1011 rc
= pqi_scsi_inquiry(ctrl_info
, device
->scsi3addr
, 0, buffer
, 64);
1015 scsi_sanitize_inquiry_string(&buffer
[8], 8);
1016 scsi_sanitize_inquiry_string(&buffer
[16], 16);
1018 device
->devtype
= buffer
[0] & 0x1f;
1019 memcpy(device
->vendor
, &buffer
[8],
1020 sizeof(device
->vendor
));
1021 memcpy(device
->model
, &buffer
[16],
1022 sizeof(device
->model
));
1024 if (pqi_is_logical_device(device
) && device
->devtype
== TYPE_DISK
) {
1025 pqi_get_raid_level(ctrl_info
, device
);
1026 pqi_get_offload_status(ctrl_info
, device
);
1027 pqi_get_volume_status(ctrl_info
, device
);
1036 static void pqi_get_physical_disk_info(struct pqi_ctrl_info
*ctrl_info
,
1037 struct pqi_scsi_dev
*device
,
1038 struct bmic_identify_physical_device
*id_phys
)
1042 memset(id_phys
, 0, sizeof(*id_phys
));
1044 rc
= pqi_identify_physical_device(ctrl_info
, device
,
1045 id_phys
, sizeof(*id_phys
));
1047 device
->queue_depth
= PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH
;
1051 device
->queue_depth
=
1052 get_unaligned_le16(&id_phys
->current_queue_depth_limit
);
1053 device
->device_type
= id_phys
->device_type
;
1054 device
->active_path_index
= id_phys
->active_path_number
;
1055 device
->path_map
= id_phys
->redundant_path_present_map
;
1056 memcpy(&device
->box
,
1057 &id_phys
->alternate_paths_phys_box_on_port
,
1058 sizeof(device
->box
));
1059 memcpy(&device
->phys_connector
,
1060 &id_phys
->alternate_paths_phys_connector
,
1061 sizeof(device
->phys_connector
));
1062 device
->bay
= id_phys
->phys_bay_in_box
;
1065 static void pqi_show_volume_status(struct pqi_ctrl_info
*ctrl_info
,
1066 struct pqi_scsi_dev
*device
)
1069 static const char unknown_state_str
[] =
1070 "Volume is in an unknown state (%u)";
1071 char unknown_state_buffer
[sizeof(unknown_state_str
) + 10];
1073 switch (device
->volume_status
) {
1075 status
= "Volume online";
1077 case CISS_LV_FAILED
:
1078 status
= "Volume failed";
1080 case CISS_LV_NOT_CONFIGURED
:
1081 status
= "Volume not configured";
1083 case CISS_LV_DEGRADED
:
1084 status
= "Volume degraded";
1086 case CISS_LV_READY_FOR_RECOVERY
:
1087 status
= "Volume ready for recovery operation";
1089 case CISS_LV_UNDERGOING_RECOVERY
:
1090 status
= "Volume undergoing recovery";
1092 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED
:
1093 status
= "Wrong physical drive was replaced";
1095 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM
:
1096 status
= "A physical drive not properly connected";
1098 case CISS_LV_HARDWARE_OVERHEATING
:
1099 status
= "Hardware is overheating";
1101 case CISS_LV_HARDWARE_HAS_OVERHEATED
:
1102 status
= "Hardware has overheated";
1104 case CISS_LV_UNDERGOING_EXPANSION
:
1105 status
= "Volume undergoing expansion";
1107 case CISS_LV_NOT_AVAILABLE
:
1108 status
= "Volume waiting for transforming volume";
1110 case CISS_LV_QUEUED_FOR_EXPANSION
:
1111 status
= "Volume queued for expansion";
1113 case CISS_LV_DISABLED_SCSI_ID_CONFLICT
:
1114 status
= "Volume disabled due to SCSI ID conflict";
1116 case CISS_LV_EJECTED
:
1117 status
= "Volume has been ejected";
1119 case CISS_LV_UNDERGOING_ERASE
:
1120 status
= "Volume undergoing background erase";
1122 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD
:
1123 status
= "Volume ready for predictive spare rebuild";
1125 case CISS_LV_UNDERGOING_RPI
:
1126 status
= "Volume undergoing rapid parity initialization";
1128 case CISS_LV_PENDING_RPI
:
1129 status
= "Volume queued for rapid parity initialization";
1131 case CISS_LV_ENCRYPTED_NO_KEY
:
1132 status
= "Encrypted volume inaccessible - key not present";
1134 case CISS_LV_UNDERGOING_ENCRYPTION
:
1135 status
= "Volume undergoing encryption process";
1137 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING
:
1138 status
= "Volume undergoing encryption re-keying process";
1140 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER
:
1142 "Encrypted volume inaccessible - disabled on ctrl";
1144 case CISS_LV_PENDING_ENCRYPTION
:
1145 status
= "Volume pending migration to encrypted state";
1147 case CISS_LV_PENDING_ENCRYPTION_REKEYING
:
1148 status
= "Volume pending encryption rekeying";
1150 case CISS_LV_NOT_SUPPORTED
:
1151 status
= "Volume not supported on this controller";
1153 case CISS_LV_STATUS_UNAVAILABLE
:
1154 status
= "Volume status not available";
1157 snprintf(unknown_state_buffer
, sizeof(unknown_state_buffer
),
1158 unknown_state_str
, device
->volume_status
);
1159 status
= unknown_state_buffer
;
1163 dev_info(&ctrl_info
->pci_dev
->dev
,
1164 "scsi %d:%d:%d:%d %s\n",
1165 ctrl_info
->scsi_host
->host_no
,
1166 device
->bus
, device
->target
, device
->lun
, status
);
1169 static struct pqi_scsi_dev
*pqi_find_disk_by_aio_handle(
1170 struct pqi_ctrl_info
*ctrl_info
, u32 aio_handle
)
1172 struct pqi_scsi_dev
*device
;
1174 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1175 scsi_device_list_entry
) {
1176 if (device
->devtype
!= TYPE_DISK
&& device
->devtype
!= TYPE_ZBC
)
1178 if (pqi_is_logical_device(device
))
1180 if (device
->aio_handle
== aio_handle
)
1187 static void pqi_update_logical_drive_queue_depth(
1188 struct pqi_ctrl_info
*ctrl_info
, struct pqi_scsi_dev
*logical_drive
)
1191 struct raid_map
*raid_map
;
1192 struct raid_map_disk_data
*disk_data
;
1193 struct pqi_scsi_dev
*phys_disk
;
1194 unsigned int num_phys_disks
;
1195 unsigned int num_raid_map_entries
;
1196 unsigned int queue_depth
;
1198 logical_drive
->queue_depth
= PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH
;
1200 raid_map
= logical_drive
->raid_map
;
1204 disk_data
= raid_map
->disk_data
;
1205 num_phys_disks
= get_unaligned_le16(&raid_map
->layout_map_count
) *
1206 (get_unaligned_le16(&raid_map
->data_disks_per_row
) +
1207 get_unaligned_le16(&raid_map
->metadata_disks_per_row
));
1208 num_raid_map_entries
= num_phys_disks
*
1209 get_unaligned_le16(&raid_map
->row_cnt
);
1212 for (i
= 0; i
< num_raid_map_entries
; i
++) {
1213 phys_disk
= pqi_find_disk_by_aio_handle(ctrl_info
,
1214 disk_data
[i
].aio_handle
);
1217 dev_warn(&ctrl_info
->pci_dev
->dev
,
1218 "failed to find physical disk for logical drive %016llx\n",
1219 get_unaligned_be64(logical_drive
->scsi3addr
));
1220 logical_drive
->offload_enabled
= false;
1221 logical_drive
->offload_enabled_pending
= false;
1223 logical_drive
->raid_map
= NULL
;
1227 queue_depth
+= phys_disk
->queue_depth
;
1230 logical_drive
->queue_depth
= queue_depth
;
1233 static void pqi_update_all_logical_drive_queue_depths(
1234 struct pqi_ctrl_info
*ctrl_info
)
1236 struct pqi_scsi_dev
*device
;
1238 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1239 scsi_device_list_entry
) {
1240 if (device
->devtype
!= TYPE_DISK
&& device
->devtype
!= TYPE_ZBC
)
1242 if (!pqi_is_logical_device(device
))
1244 pqi_update_logical_drive_queue_depth(ctrl_info
, device
);
1248 static void pqi_rescan_worker(struct work_struct
*work
)
1250 struct pqi_ctrl_info
*ctrl_info
;
1252 ctrl_info
= container_of(to_delayed_work(work
), struct pqi_ctrl_info
,
1255 pqi_scan_scsi_devices(ctrl_info
);
1258 static int pqi_add_device(struct pqi_ctrl_info
*ctrl_info
,
1259 struct pqi_scsi_dev
*device
)
1263 if (pqi_is_logical_device(device
))
1264 rc
= scsi_add_device(ctrl_info
->scsi_host
, device
->bus
,
1265 device
->target
, device
->lun
);
1267 rc
= pqi_add_sas_device(ctrl_info
->sas_host
, device
);
1272 static inline void pqi_remove_device(struct pqi_ctrl_info
*ctrl_info
,
1273 struct pqi_scsi_dev
*device
)
1275 if (pqi_is_logical_device(device
))
1276 scsi_remove_device(device
->sdev
);
1278 pqi_remove_sas_device(device
);
1281 /* Assumes the SCSI device list lock is held. */
1283 static struct pqi_scsi_dev
*pqi_find_scsi_dev(struct pqi_ctrl_info
*ctrl_info
,
1284 int bus
, int target
, int lun
)
1286 struct pqi_scsi_dev
*device
;
1288 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1289 scsi_device_list_entry
)
1290 if (device
->bus
== bus
&& device
->target
== target
&&
1297 static inline bool pqi_device_equal(struct pqi_scsi_dev
*dev1
,
1298 struct pqi_scsi_dev
*dev2
)
1300 if (dev1
->is_physical_device
!= dev2
->is_physical_device
)
1303 if (dev1
->is_physical_device
)
1304 return dev1
->wwid
== dev2
->wwid
;
1306 return memcmp(dev1
->volume_id
, dev2
->volume_id
,
1307 sizeof(dev1
->volume_id
)) == 0;
1310 enum pqi_find_result
{
1316 static enum pqi_find_result
pqi_scsi_find_entry(struct pqi_ctrl_info
*ctrl_info
,
1317 struct pqi_scsi_dev
*device_to_find
,
1318 struct pqi_scsi_dev
**matching_device
)
1320 struct pqi_scsi_dev
*device
;
1322 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1323 scsi_device_list_entry
) {
1324 if (pqi_scsi3addr_equal(device_to_find
->scsi3addr
,
1325 device
->scsi3addr
)) {
1326 *matching_device
= device
;
1327 if (pqi_device_equal(device_to_find
, device
)) {
1328 if (device_to_find
->volume_offline
)
1329 return DEVICE_CHANGED
;
1332 return DEVICE_CHANGED
;
1336 return DEVICE_NOT_FOUND
;
1339 static void pqi_dev_info(struct pqi_ctrl_info
*ctrl_info
,
1340 char *action
, struct pqi_scsi_dev
*device
)
1342 dev_info(&ctrl_info
->pci_dev
->dev
,
1343 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1345 ctrl_info
->scsi_host
->host_no
,
1349 scsi_device_type(device
->devtype
),
1352 pqi_raid_level_to_string(device
->raid_level
),
1353 device
->offload_configured
? '+' : '-',
1354 device
->offload_enabled_pending
? '+' : '-',
1355 device
->expose_device
? '+' : '-',
1356 device
->queue_depth
);
1359 /* Assumes the SCSI device list lock is held. */
1361 static void pqi_scsi_update_device(struct pqi_scsi_dev
*existing_device
,
1362 struct pqi_scsi_dev
*new_device
)
1364 existing_device
->devtype
= new_device
->devtype
;
1365 existing_device
->device_type
= new_device
->device_type
;
1366 existing_device
->bus
= new_device
->bus
;
1367 if (new_device
->target_lun_valid
) {
1368 existing_device
->target
= new_device
->target
;
1369 existing_device
->lun
= new_device
->lun
;
1370 existing_device
->target_lun_valid
= true;
1373 /* By definition, the scsi3addr and wwid fields are already the same. */
1375 existing_device
->is_physical_device
= new_device
->is_physical_device
;
1376 existing_device
->expose_device
= new_device
->expose_device
;
1377 existing_device
->no_uld_attach
= new_device
->no_uld_attach
;
1378 existing_device
->aio_enabled
= new_device
->aio_enabled
;
1379 memcpy(existing_device
->vendor
, new_device
->vendor
,
1380 sizeof(existing_device
->vendor
));
1381 memcpy(existing_device
->model
, new_device
->model
,
1382 sizeof(existing_device
->model
));
1383 existing_device
->sas_address
= new_device
->sas_address
;
1384 existing_device
->raid_level
= new_device
->raid_level
;
1385 existing_device
->queue_depth
= new_device
->queue_depth
;
1386 existing_device
->aio_handle
= new_device
->aio_handle
;
1387 existing_device
->volume_status
= new_device
->volume_status
;
1388 existing_device
->active_path_index
= new_device
->active_path_index
;
1389 existing_device
->path_map
= new_device
->path_map
;
1390 existing_device
->bay
= new_device
->bay
;
1391 memcpy(existing_device
->box
, new_device
->box
,
1392 sizeof(existing_device
->box
));
1393 memcpy(existing_device
->phys_connector
, new_device
->phys_connector
,
1394 sizeof(existing_device
->phys_connector
));
1395 existing_device
->offload_configured
= new_device
->offload_configured
;
1396 existing_device
->offload_enabled
= false;
1397 existing_device
->offload_enabled_pending
=
1398 new_device
->offload_enabled_pending
;
1399 existing_device
->offload_to_mirror
= 0;
1400 kfree(existing_device
->raid_map
);
1401 existing_device
->raid_map
= new_device
->raid_map
;
1403 /* To prevent this from being freed later. */
1404 new_device
->raid_map
= NULL
;
1407 static inline void pqi_free_device(struct pqi_scsi_dev
*device
)
1410 kfree(device
->raid_map
);
1416 * Called when exposing a new device to the OS fails in order to re-adjust
1417 * our internal SCSI device list to match the SCSI ML's view.
1420 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info
*ctrl_info
,
1421 struct pqi_scsi_dev
*device
)
1423 unsigned long flags
;
1425 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
1426 list_del(&device
->scsi_device_list_entry
);
1427 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
1429 /* Allow the device structure to be freed later. */
1430 device
->keep_device
= false;
1433 static void pqi_update_device_list(struct pqi_ctrl_info
*ctrl_info
,
1434 struct pqi_scsi_dev
*new_device_list
[], unsigned int num_new_devices
)
1438 unsigned long flags
;
1439 enum pqi_find_result find_result
;
1440 struct pqi_scsi_dev
*device
;
1441 struct pqi_scsi_dev
*next
;
1442 struct pqi_scsi_dev
*matching_device
;
1443 struct list_head add_list
;
1444 struct list_head delete_list
;
1446 INIT_LIST_HEAD(&add_list
);
1447 INIT_LIST_HEAD(&delete_list
);
1450 * The idea here is to do as little work as possible while holding the
1451 * spinlock. That's why we go to great pains to defer anything other
1452 * than updating the internal device list until after we release the
1456 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
1458 /* Assume that all devices in the existing list have gone away. */
1459 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1460 scsi_device_list_entry
)
1461 device
->device_gone
= true;
1463 for (i
= 0; i
< num_new_devices
; i
++) {
1464 device
= new_device_list
[i
];
1466 find_result
= pqi_scsi_find_entry(ctrl_info
, device
,
1469 switch (find_result
) {
1472 * The newly found device is already in the existing
1475 device
->new_device
= false;
1476 matching_device
->device_gone
= false;
1477 pqi_scsi_update_device(matching_device
, device
);
1479 case DEVICE_NOT_FOUND
:
1481 * The newly found device is NOT in the existing device
1484 device
->new_device
= true;
1486 case DEVICE_CHANGED
:
1488 * The original device has gone away and we need to add
1491 device
->new_device
= true;
1494 WARN_ON(find_result
);
1499 /* Process all devices that have gone away. */
1500 list_for_each_entry_safe(device
, next
, &ctrl_info
->scsi_device_list
,
1501 scsi_device_list_entry
) {
1502 if (device
->device_gone
) {
1503 list_del(&device
->scsi_device_list_entry
);
1504 list_add_tail(&device
->delete_list_entry
, &delete_list
);
1508 /* Process all new devices. */
1509 for (i
= 0; i
< num_new_devices
; i
++) {
1510 device
= new_device_list
[i
];
1511 if (!device
->new_device
)
1513 if (device
->volume_offline
)
1515 list_add_tail(&device
->scsi_device_list_entry
,
1516 &ctrl_info
->scsi_device_list
);
1517 list_add_tail(&device
->add_list_entry
, &add_list
);
1518 /* To prevent this device structure from being freed later. */
1519 device
->keep_device
= true;
1522 pqi_update_all_logical_drive_queue_depths(ctrl_info
);
1524 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1525 scsi_device_list_entry
)
1526 device
->offload_enabled
=
1527 device
->offload_enabled_pending
;
1529 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
1531 /* Remove all devices that have gone away. */
1532 list_for_each_entry_safe(device
, next
, &delete_list
,
1533 delete_list_entry
) {
1535 pqi_remove_device(ctrl_info
, device
);
1536 if (device
->volume_offline
) {
1537 pqi_dev_info(ctrl_info
, "offline", device
);
1538 pqi_show_volume_status(ctrl_info
, device
);
1540 pqi_dev_info(ctrl_info
, "removed", device
);
1542 list_del(&device
->delete_list_entry
);
1543 pqi_free_device(device
);
1547 * Notify the SCSI ML if the queue depth of any existing device has
1550 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1551 scsi_device_list_entry
) {
1552 if (device
->sdev
&& device
->queue_depth
!=
1553 device
->advertised_queue_depth
) {
1554 device
->advertised_queue_depth
= device
->queue_depth
;
1555 scsi_change_queue_depth(device
->sdev
,
1556 device
->advertised_queue_depth
);
1560 /* Expose any new devices. */
1561 list_for_each_entry_safe(device
, next
, &add_list
, add_list_entry
) {
1562 if (device
->expose_device
&& !device
->sdev
) {
1563 rc
= pqi_add_device(ctrl_info
, device
);
1565 dev_warn(&ctrl_info
->pci_dev
->dev
,
1566 "scsi %d:%d:%d:%d addition failed, device not added\n",
1567 ctrl_info
->scsi_host
->host_no
,
1568 device
->bus
, device
->target
,
1570 pqi_fixup_botched_add(ctrl_info
, device
);
1574 pqi_dev_info(ctrl_info
, "added", device
);
1578 static bool pqi_is_supported_device(struct pqi_scsi_dev
*device
)
1580 bool is_supported
= false;
1582 switch (device
->devtype
) {
1586 case TYPE_MEDIUM_CHANGER
:
1587 case TYPE_ENCLOSURE
:
1588 is_supported
= true;
1592 * Only support the HBA controller itself as a RAID
1593 * controller. If it's a RAID controller other than
1594 * the HBA itself (an external RAID controller, MSA500
1595 * or similar), we don't support it.
1597 if (pqi_is_hba_lunid(device
->scsi3addr
))
1598 is_supported
= true;
1602 return is_supported
;
1605 static inline bool pqi_skip_device(u8
*scsi3addr
,
1606 struct report_phys_lun_extended_entry
*phys_lun_ext_entry
)
1610 if (!MASKED_DEVICE(scsi3addr
))
1613 /* The device is masked. */
1615 device_flags
= phys_lun_ext_entry
->device_flags
;
1617 if (device_flags
& REPORT_PHYS_LUN_DEV_FLAG_NON_DISK
) {
1619 * It's a non-disk device. We ignore all devices of this type
1620 * when they're masked.
1628 static inline bool pqi_expose_device(struct pqi_scsi_dev
*device
)
1630 /* Expose all devices except for physical devices that are masked. */
1631 if (device
->is_physical_device
&& MASKED_DEVICE(device
->scsi3addr
))
1637 static int pqi_update_scsi_devices(struct pqi_ctrl_info
*ctrl_info
)
1641 struct list_head new_device_list_head
;
1642 struct report_phys_lun_extended
*physdev_list
= NULL
;
1643 struct report_log_lun_extended
*logdev_list
= NULL
;
1644 struct report_phys_lun_extended_entry
*phys_lun_ext_entry
;
1645 struct report_log_lun_extended_entry
*log_lun_ext_entry
;
1646 struct bmic_identify_physical_device
*id_phys
= NULL
;
1649 struct pqi_scsi_dev
**new_device_list
= NULL
;
1650 struct pqi_scsi_dev
*device
;
1651 struct pqi_scsi_dev
*next
;
1652 unsigned int num_new_devices
;
1653 unsigned int num_valid_devices
;
1654 bool is_physical_device
;
1656 static char *out_of_memory_msg
=
1657 "out of memory, device discovery stopped";
1659 INIT_LIST_HEAD(&new_device_list_head
);
1661 rc
= pqi_get_device_lists(ctrl_info
, &physdev_list
, &logdev_list
);
1667 get_unaligned_be32(&physdev_list
->header
.list_length
)
1668 / sizeof(physdev_list
->lun_entries
[0]);
1674 get_unaligned_be32(&logdev_list
->header
.list_length
)
1675 / sizeof(logdev_list
->lun_entries
[0]);
1679 if (num_physicals
) {
1681 * We need this buffer for calls to pqi_get_physical_disk_info()
1682 * below. We allocate it here instead of inside
1683 * pqi_get_physical_disk_info() because it's a fairly large
1686 id_phys
= kmalloc(sizeof(*id_phys
), GFP_KERNEL
);
1688 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n",
1695 num_new_devices
= num_physicals
+ num_logicals
;
1697 new_device_list
= kmalloc(sizeof(*new_device_list
) *
1698 num_new_devices
, GFP_KERNEL
);
1699 if (!new_device_list
) {
1700 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n", out_of_memory_msg
);
1705 for (i
= 0; i
< num_new_devices
; i
++) {
1706 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
1708 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n",
1713 list_add_tail(&device
->new_device_list_entry
,
1714 &new_device_list_head
);
1718 num_valid_devices
= 0;
1720 for (i
= 0; i
< num_new_devices
; i
++) {
1722 if (i
< num_physicals
) {
1723 is_physical_device
= true;
1724 phys_lun_ext_entry
= &physdev_list
->lun_entries
[i
];
1725 log_lun_ext_entry
= NULL
;
1726 scsi3addr
= phys_lun_ext_entry
->lunid
;
1728 is_physical_device
= false;
1729 phys_lun_ext_entry
= NULL
;
1731 &logdev_list
->lun_entries
[i
- num_physicals
];
1732 scsi3addr
= log_lun_ext_entry
->lunid
;
1735 if (is_physical_device
&&
1736 pqi_skip_device(scsi3addr
, phys_lun_ext_entry
))
1740 device
= list_next_entry(device
, new_device_list_entry
);
1742 device
= list_first_entry(&new_device_list_head
,
1743 struct pqi_scsi_dev
, new_device_list_entry
);
1745 memcpy(device
->scsi3addr
, scsi3addr
, sizeof(device
->scsi3addr
));
1746 device
->is_physical_device
= is_physical_device
;
1747 device
->raid_level
= SA_RAID_UNKNOWN
;
1749 /* Gather information about the device. */
1750 rc
= pqi_get_device_info(ctrl_info
, device
);
1751 if (rc
== -ENOMEM
) {
1752 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n",
1757 dev_warn(&ctrl_info
->pci_dev
->dev
,
1758 "obtaining device info failed, skipping device %016llx\n",
1759 get_unaligned_be64(device
->scsi3addr
));
1764 if (!pqi_is_supported_device(device
))
1767 pqi_assign_bus_target_lun(device
);
1769 device
->expose_device
= pqi_expose_device(device
);
1771 if (device
->is_physical_device
) {
1772 device
->wwid
= phys_lun_ext_entry
->wwid
;
1773 if ((phys_lun_ext_entry
->device_flags
&
1774 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED
) &&
1775 phys_lun_ext_entry
->aio_handle
)
1776 device
->aio_enabled
= true;
1778 memcpy(device
->volume_id
, log_lun_ext_entry
->volume_id
,
1779 sizeof(device
->volume_id
));
1782 switch (device
->devtype
) {
1785 case TYPE_ENCLOSURE
:
1786 if (device
->is_physical_device
) {
1787 device
->sas_address
=
1788 get_unaligned_be64(&device
->wwid
);
1789 if (device
->devtype
== TYPE_DISK
||
1790 device
->devtype
== TYPE_ZBC
) {
1791 device
->aio_handle
=
1792 phys_lun_ext_entry
->aio_handle
;
1793 pqi_get_physical_disk_info(ctrl_info
,
1800 new_device_list
[num_valid_devices
++] = device
;
1803 pqi_update_device_list(ctrl_info
, new_device_list
, num_valid_devices
);
1806 list_for_each_entry_safe(device
, next
, &new_device_list_head
,
1807 new_device_list_entry
) {
1808 if (device
->keep_device
)
1810 list_del(&device
->new_device_list_entry
);
1811 pqi_free_device(device
);
1814 kfree(new_device_list
);
1815 kfree(physdev_list
);
1822 static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info
*ctrl_info
)
1824 unsigned long flags
;
1825 struct pqi_scsi_dev
*device
;
1826 struct pqi_scsi_dev
*next
;
1828 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
1830 list_for_each_entry_safe(device
, next
, &ctrl_info
->scsi_device_list
,
1831 scsi_device_list_entry
) {
1833 pqi_remove_device(ctrl_info
, device
);
1834 list_del(&device
->scsi_device_list_entry
);
1835 pqi_free_device(device
);
1838 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
1841 static int pqi_scan_scsi_devices(struct pqi_ctrl_info
*ctrl_info
)
1845 if (pqi_ctrl_offline(ctrl_info
))
1848 mutex_lock(&ctrl_info
->scan_mutex
);
1850 rc
= pqi_update_scsi_devices(ctrl_info
);
1852 pqi_schedule_rescan_worker(ctrl_info
);
1854 mutex_unlock(&ctrl_info
->scan_mutex
);
1859 static void pqi_scan_start(struct Scsi_Host
*shost
)
1861 pqi_scan_scsi_devices(shost_to_hba(shost
));
1864 /* Returns TRUE if scan is finished. */
1866 static int pqi_scan_finished(struct Scsi_Host
*shost
,
1867 unsigned long elapsed_time
)
1869 struct pqi_ctrl_info
*ctrl_info
;
1871 ctrl_info
= shost_priv(shost
);
1873 return !mutex_is_locked(&ctrl_info
->scan_mutex
);
1876 static inline void pqi_set_encryption_info(
1877 struct pqi_encryption_info
*encryption_info
, struct raid_map
*raid_map
,
1880 u32 volume_blk_size
;
1883 * Set the encryption tweak values based on logical block address.
1884 * If the block size is 512, the tweak value is equal to the LBA.
1885 * For other block sizes, tweak value is (LBA * block size) / 512.
1887 volume_blk_size
= get_unaligned_le32(&raid_map
->volume_blk_size
);
1888 if (volume_blk_size
!= 512)
1889 first_block
= (first_block
* volume_blk_size
) / 512;
1891 encryption_info
->data_encryption_key_index
=
1892 get_unaligned_le16(&raid_map
->data_encryption_key_index
);
1893 encryption_info
->encrypt_tweak_lower
= lower_32_bits(first_block
);
1894 encryption_info
->encrypt_tweak_upper
= upper_32_bits(first_block
);
1898 * Attempt to perform offload RAID mapping for a logical volume I/O.
1901 #define PQI_RAID_BYPASS_INELIGIBLE 1
1903 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info
*ctrl_info
,
1904 struct pqi_scsi_dev
*device
, struct scsi_cmnd
*scmd
,
1905 struct pqi_queue_group
*queue_group
)
1907 struct raid_map
*raid_map
;
1908 bool is_write
= false;
1916 u32 first_row_offset
;
1917 u32 last_row_offset
;
1922 u32 r5or6_blocks_per_row
;
1923 u64 r5or6_first_row
;
1925 u32 r5or6_first_row_offset
;
1926 u32 r5or6_last_row_offset
;
1927 u32 r5or6_first_column
;
1928 u32 r5or6_last_column
;
1929 u16 data_disks_per_row
;
1930 u32 total_disks_per_row
;
1931 u16 layout_map_count
;
1943 int offload_to_mirror
;
1944 struct pqi_encryption_info
*encryption_info_ptr
;
1945 struct pqi_encryption_info encryption_info
;
1946 #if BITS_PER_LONG == 32
1950 /* Check for valid opcode, get LBA and block count. */
1951 switch (scmd
->cmnd
[0]) {
1956 first_block
= (u64
)(((scmd
->cmnd
[1] & 0x1f) << 16) |
1957 (scmd
->cmnd
[2] << 8) | scmd
->cmnd
[3]);
1958 block_cnt
= (u32
)scmd
->cmnd
[4];
1966 first_block
= (u64
)get_unaligned_be32(&scmd
->cmnd
[2]);
1967 block_cnt
= (u32
)get_unaligned_be16(&scmd
->cmnd
[7]);
1973 first_block
= (u64
)get_unaligned_be32(&scmd
->cmnd
[2]);
1974 block_cnt
= get_unaligned_be32(&scmd
->cmnd
[6]);
1980 first_block
= get_unaligned_be64(&scmd
->cmnd
[2]);
1981 block_cnt
= get_unaligned_be32(&scmd
->cmnd
[10]);
1984 /* Process via normal I/O path. */
1985 return PQI_RAID_BYPASS_INELIGIBLE
;
1988 /* Check for write to non-RAID-0. */
1989 if (is_write
&& device
->raid_level
!= SA_RAID_0
)
1990 return PQI_RAID_BYPASS_INELIGIBLE
;
1992 if (unlikely(block_cnt
== 0))
1993 return PQI_RAID_BYPASS_INELIGIBLE
;
1995 last_block
= first_block
+ block_cnt
- 1;
1996 raid_map
= device
->raid_map
;
1998 /* Check for invalid block or wraparound. */
1999 if (last_block
>= get_unaligned_le64(&raid_map
->volume_blk_cnt
) ||
2000 last_block
< first_block
)
2001 return PQI_RAID_BYPASS_INELIGIBLE
;
2003 data_disks_per_row
= get_unaligned_le16(&raid_map
->data_disks_per_row
);
2004 strip_size
= get_unaligned_le16(&raid_map
->strip_size
);
2005 layout_map_count
= get_unaligned_le16(&raid_map
->layout_map_count
);
2007 /* Calculate stripe information for the request. */
2008 blocks_per_row
= data_disks_per_row
* strip_size
;
2009 #if BITS_PER_LONG == 32
2010 tmpdiv
= first_block
;
2011 do_div(tmpdiv
, blocks_per_row
);
2013 tmpdiv
= last_block
;
2014 do_div(tmpdiv
, blocks_per_row
);
2016 first_row_offset
= (u32
)(first_block
- (first_row
* blocks_per_row
));
2017 last_row_offset
= (u32
)(last_block
- (last_row
* blocks_per_row
));
2018 tmpdiv
= first_row_offset
;
2019 do_div(tmpdiv
, strip_size
);
2020 first_column
= tmpdiv
;
2021 tmpdiv
= last_row_offset
;
2022 do_div(tmpdiv
, strip_size
);
2023 last_column
= tmpdiv
;
2025 first_row
= first_block
/ blocks_per_row
;
2026 last_row
= last_block
/ blocks_per_row
;
2027 first_row_offset
= (u32
)(first_block
- (first_row
* blocks_per_row
));
2028 last_row_offset
= (u32
)(last_block
- (last_row
* blocks_per_row
));
2029 first_column
= first_row_offset
/ strip_size
;
2030 last_column
= last_row_offset
/ strip_size
;
2033 /* If this isn't a single row/column then give to the controller. */
2034 if (first_row
!= last_row
|| first_column
!= last_column
)
2035 return PQI_RAID_BYPASS_INELIGIBLE
;
2037 /* Proceeding with driver mapping. */
2038 total_disks_per_row
= data_disks_per_row
+
2039 get_unaligned_le16(&raid_map
->metadata_disks_per_row
);
2040 map_row
= ((u32
)(first_row
>> raid_map
->parity_rotation_shift
)) %
2041 get_unaligned_le16(&raid_map
->row_cnt
);
2042 map_index
= (map_row
* total_disks_per_row
) + first_column
;
2045 if (device
->raid_level
== SA_RAID_1
) {
2046 if (device
->offload_to_mirror
)
2047 map_index
+= data_disks_per_row
;
2048 device
->offload_to_mirror
= !device
->offload_to_mirror
;
2049 } else if (device
->raid_level
== SA_RAID_ADM
) {
2052 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2055 offload_to_mirror
= device
->offload_to_mirror
;
2056 if (offload_to_mirror
== 0) {
2057 /* use physical disk in the first mirrored group. */
2058 map_index
%= data_disks_per_row
;
2062 * Determine mirror group that map_index
2065 current_group
= map_index
/ data_disks_per_row
;
2067 if (offload_to_mirror
!= current_group
) {
2069 layout_map_count
- 1) {
2071 * Select raid index from
2074 map_index
+= data_disks_per_row
;
2078 * Select raid index from first
2081 map_index
%= data_disks_per_row
;
2085 } while (offload_to_mirror
!= current_group
);
2088 /* Set mirror group to use next time. */
2090 (offload_to_mirror
>= layout_map_count
- 1) ?
2091 0 : offload_to_mirror
+ 1;
2092 WARN_ON(offload_to_mirror
>= layout_map_count
);
2093 device
->offload_to_mirror
= offload_to_mirror
;
2095 * Avoid direct use of device->offload_to_mirror within this
2096 * function since multiple threads might simultaneously
2097 * increment it beyond the range of device->layout_map_count -1.
2099 } else if ((device
->raid_level
== SA_RAID_5
||
2100 device
->raid_level
== SA_RAID_6
) && layout_map_count
> 1) {
2102 /* Verify first and last block are in same RAID group */
2103 r5or6_blocks_per_row
= strip_size
* data_disks_per_row
;
2104 stripesize
= r5or6_blocks_per_row
* layout_map_count
;
2105 #if BITS_PER_LONG == 32
2106 tmpdiv
= first_block
;
2107 first_group
= do_div(tmpdiv
, stripesize
);
2108 tmpdiv
= first_group
;
2109 do_div(tmpdiv
, r5or6_blocks_per_row
);
2110 first_group
= tmpdiv
;
2111 tmpdiv
= last_block
;
2112 last_group
= do_div(tmpdiv
, stripesize
);
2113 tmpdiv
= last_group
;
2114 do_div(tmpdiv
, r5or6_blocks_per_row
);
2115 last_group
= tmpdiv
;
2117 first_group
= (first_block
% stripesize
) / r5or6_blocks_per_row
;
2118 last_group
= (last_block
% stripesize
) / r5or6_blocks_per_row
;
2120 if (first_group
!= last_group
)
2121 return PQI_RAID_BYPASS_INELIGIBLE
;
2123 /* Verify request is in a single row of RAID 5/6 */
2124 #if BITS_PER_LONG == 32
2125 tmpdiv
= first_block
;
2126 do_div(tmpdiv
, stripesize
);
2127 first_row
= r5or6_first_row
= r0_first_row
= tmpdiv
;
2128 tmpdiv
= last_block
;
2129 do_div(tmpdiv
, stripesize
);
2130 r5or6_last_row
= r0_last_row
= tmpdiv
;
2132 first_row
= r5or6_first_row
= r0_first_row
=
2133 first_block
/ stripesize
;
2134 r5or6_last_row
= r0_last_row
= last_block
/ stripesize
;
2136 if (r5or6_first_row
!= r5or6_last_row
)
2137 return PQI_RAID_BYPASS_INELIGIBLE
;
2139 /* Verify request is in a single column */
2140 #if BITS_PER_LONG == 32
2141 tmpdiv
= first_block
;
2142 first_row_offset
= do_div(tmpdiv
, stripesize
);
2143 tmpdiv
= first_row_offset
;
2144 first_row_offset
= (u32
)do_div(tmpdiv
, r5or6_blocks_per_row
);
2145 r5or6_first_row_offset
= first_row_offset
;
2146 tmpdiv
= last_block
;
2147 r5or6_last_row_offset
= do_div(tmpdiv
, stripesize
);
2148 tmpdiv
= r5or6_last_row_offset
;
2149 r5or6_last_row_offset
= do_div(tmpdiv
, r5or6_blocks_per_row
);
2150 tmpdiv
= r5or6_first_row_offset
;
2151 do_div(tmpdiv
, strip_size
);
2152 first_column
= r5or6_first_column
= tmpdiv
;
2153 tmpdiv
= r5or6_last_row_offset
;
2154 do_div(tmpdiv
, strip_size
);
2155 r5or6_last_column
= tmpdiv
;
2157 first_row_offset
= r5or6_first_row_offset
=
2158 (u32
)((first_block
% stripesize
) %
2159 r5or6_blocks_per_row
);
2161 r5or6_last_row_offset
=
2162 (u32
)((last_block
% stripesize
) %
2163 r5or6_blocks_per_row
);
2165 first_column
= r5or6_first_row_offset
/ strip_size
;
2166 r5or6_first_column
= first_column
;
2167 r5or6_last_column
= r5or6_last_row_offset
/ strip_size
;
2169 if (r5or6_first_column
!= r5or6_last_column
)
2170 return PQI_RAID_BYPASS_INELIGIBLE
;
2172 /* Request is eligible */
2174 ((u32
)(first_row
>> raid_map
->parity_rotation_shift
)) %
2175 get_unaligned_le16(&raid_map
->row_cnt
);
2177 map_index
= (first_group
*
2178 (get_unaligned_le16(&raid_map
->row_cnt
) *
2179 total_disks_per_row
)) +
2180 (map_row
* total_disks_per_row
) + first_column
;
2183 if (unlikely(map_index
>= RAID_MAP_MAX_ENTRIES
))
2184 return PQI_RAID_BYPASS_INELIGIBLE
;
2186 aio_handle
= raid_map
->disk_data
[map_index
].aio_handle
;
2187 disk_block
= get_unaligned_le64(&raid_map
->disk_starting_blk
) +
2188 first_row
* strip_size
+
2189 (first_row_offset
- first_column
* strip_size
);
2190 disk_block_cnt
= block_cnt
;
2192 /* Handle differing logical/physical block sizes. */
2193 if (raid_map
->phys_blk_shift
) {
2194 disk_block
<<= raid_map
->phys_blk_shift
;
2195 disk_block_cnt
<<= raid_map
->phys_blk_shift
;
2198 if (unlikely(disk_block_cnt
> 0xffff))
2199 return PQI_RAID_BYPASS_INELIGIBLE
;
2201 /* Build the new CDB for the physical disk I/O. */
2202 if (disk_block
> 0xffffffff) {
2203 cdb
[0] = is_write
? WRITE_16
: READ_16
;
2205 put_unaligned_be64(disk_block
, &cdb
[2]);
2206 put_unaligned_be32(disk_block_cnt
, &cdb
[10]);
2211 cdb
[0] = is_write
? WRITE_10
: READ_10
;
2213 put_unaligned_be32((u32
)disk_block
, &cdb
[2]);
2215 put_unaligned_be16((u16
)disk_block_cnt
, &cdb
[7]);
2220 if (get_unaligned_le16(&raid_map
->flags
) &
2221 RAID_MAP_ENCRYPTION_ENABLED
) {
2222 pqi_set_encryption_info(&encryption_info
, raid_map
,
2224 encryption_info_ptr
= &encryption_info
;
2226 encryption_info_ptr
= NULL
;
2229 return pqi_aio_submit_io(ctrl_info
, scmd
, aio_handle
,
2230 cdb
, cdb_length
, queue_group
, encryption_info_ptr
);
2233 #define PQI_STATUS_IDLE 0x0
2235 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2236 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2238 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2239 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2240 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2241 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2242 #define PQI_DEVICE_STATE_ERROR 0x4
2244 #define PQI_MODE_READY_TIMEOUT_SECS 30
2245 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2247 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info
*ctrl_info
)
2249 struct pqi_device_registers __iomem
*pqi_registers
;
2250 unsigned long timeout
;
2254 pqi_registers
= ctrl_info
->pqi_registers
;
2255 timeout
= (PQI_MODE_READY_TIMEOUT_SECS
* HZ
) + jiffies
;
2258 signature
= readq(&pqi_registers
->signature
);
2259 if (memcmp(&signature
, PQI_DEVICE_SIGNATURE
,
2260 sizeof(signature
)) == 0)
2262 if (time_after(jiffies
, timeout
)) {
2263 dev_err(&ctrl_info
->pci_dev
->dev
,
2264 "timed out waiting for PQI signature\n");
2267 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS
);
2271 status
= readb(&pqi_registers
->function_and_status_code
);
2272 if (status
== PQI_STATUS_IDLE
)
2274 if (time_after(jiffies
, timeout
)) {
2275 dev_err(&ctrl_info
->pci_dev
->dev
,
2276 "timed out waiting for PQI IDLE\n");
2279 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS
);
2283 if (readl(&pqi_registers
->device_status
) ==
2284 PQI_DEVICE_STATE_ALL_REGISTERS_READY
)
2286 if (time_after(jiffies
, timeout
)) {
2287 dev_err(&ctrl_info
->pci_dev
->dev
,
2288 "timed out waiting for PQI all registers ready\n");
2291 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS
);
2297 static inline void pqi_aio_path_disabled(struct pqi_io_request
*io_request
)
2299 struct pqi_scsi_dev
*device
;
2301 device
= io_request
->scmd
->device
->hostdata
;
2302 device
->offload_enabled
= false;
2305 static inline void pqi_take_device_offline(struct scsi_device
*sdev
)
2307 struct pqi_ctrl_info
*ctrl_info
;
2308 struct pqi_scsi_dev
*device
;
2310 if (scsi_device_online(sdev
)) {
2311 scsi_device_set_state(sdev
, SDEV_OFFLINE
);
2312 ctrl_info
= shost_to_hba(sdev
->host
);
2313 schedule_delayed_work(&ctrl_info
->rescan_work
, 0);
2314 device
= sdev
->hostdata
;
2315 dev_err(&ctrl_info
->pci_dev
->dev
, "offlined scsi %d:%d:%d:%d\n",
2316 ctrl_info
->scsi_host
->host_no
, device
->bus
,
2317 device
->target
, device
->lun
);
2321 static void pqi_process_raid_io_error(struct pqi_io_request
*io_request
)
2325 struct scsi_cmnd
*scmd
;
2326 struct pqi_raid_error_info
*error_info
;
2327 size_t sense_data_length
;
2330 struct scsi_sense_hdr sshdr
;
2332 scmd
= io_request
->scmd
;
2336 error_info
= io_request
->error_info
;
2337 scsi_status
= error_info
->status
;
2340 if (error_info
->data_out_result
== PQI_DATA_IN_OUT_UNDERFLOW
) {
2342 get_unaligned_le32(&error_info
->data_out_transferred
);
2343 residual_count
= scsi_bufflen(scmd
) - xfer_count
;
2344 scsi_set_resid(scmd
, residual_count
);
2345 if (xfer_count
< scmd
->underflow
)
2346 host_byte
= DID_SOFT_ERROR
;
2349 sense_data_length
= get_unaligned_le16(&error_info
->sense_data_length
);
2350 if (sense_data_length
== 0)
2352 get_unaligned_le16(&error_info
->response_data_length
);
2353 if (sense_data_length
) {
2354 if (sense_data_length
> sizeof(error_info
->data
))
2355 sense_data_length
= sizeof(error_info
->data
);
2357 if (scsi_status
== SAM_STAT_CHECK_CONDITION
&&
2358 scsi_normalize_sense(error_info
->data
,
2359 sense_data_length
, &sshdr
) &&
2360 sshdr
.sense_key
== HARDWARE_ERROR
&&
2361 sshdr
.asc
== 0x3e &&
2362 sshdr
.ascq
== 0x1) {
2363 pqi_take_device_offline(scmd
->device
);
2364 host_byte
= DID_NO_CONNECT
;
2367 if (sense_data_length
> SCSI_SENSE_BUFFERSIZE
)
2368 sense_data_length
= SCSI_SENSE_BUFFERSIZE
;
2369 memcpy(scmd
->sense_buffer
, error_info
->data
,
2373 scmd
->result
= scsi_status
;
2374 set_host_byte(scmd
, host_byte
);
2377 static void pqi_process_aio_io_error(struct pqi_io_request
*io_request
)
2381 struct scsi_cmnd
*scmd
;
2382 struct pqi_aio_error_info
*error_info
;
2383 size_t sense_data_length
;
2386 bool device_offline
;
2388 scmd
= io_request
->scmd
;
2389 error_info
= io_request
->error_info
;
2391 sense_data_length
= 0;
2392 device_offline
= false;
2394 switch (error_info
->service_response
) {
2395 case PQI_AIO_SERV_RESPONSE_COMPLETE
:
2396 scsi_status
= error_info
->status
;
2398 case PQI_AIO_SERV_RESPONSE_FAILURE
:
2399 switch (error_info
->status
) {
2400 case PQI_AIO_STATUS_IO_ABORTED
:
2401 scsi_status
= SAM_STAT_TASK_ABORTED
;
2403 case PQI_AIO_STATUS_UNDERRUN
:
2404 scsi_status
= SAM_STAT_GOOD
;
2405 residual_count
= get_unaligned_le32(
2406 &error_info
->residual_count
);
2407 scsi_set_resid(scmd
, residual_count
);
2408 xfer_count
= scsi_bufflen(scmd
) - residual_count
;
2409 if (xfer_count
< scmd
->underflow
)
2410 host_byte
= DID_SOFT_ERROR
;
2412 case PQI_AIO_STATUS_OVERRUN
:
2413 scsi_status
= SAM_STAT_GOOD
;
2415 case PQI_AIO_STATUS_AIO_PATH_DISABLED
:
2416 pqi_aio_path_disabled(io_request
);
2417 scsi_status
= SAM_STAT_GOOD
;
2418 io_request
->status
= -EAGAIN
;
2420 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE
:
2421 case PQI_AIO_STATUS_INVALID_DEVICE
:
2422 device_offline
= true;
2423 pqi_take_device_offline(scmd
->device
);
2424 host_byte
= DID_NO_CONNECT
;
2425 scsi_status
= SAM_STAT_CHECK_CONDITION
;
2427 case PQI_AIO_STATUS_IO_ERROR
:
2429 scsi_status
= SAM_STAT_CHECK_CONDITION
;
2433 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE
:
2434 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED
:
2435 scsi_status
= SAM_STAT_GOOD
;
2437 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED
:
2438 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN
:
2440 scsi_status
= SAM_STAT_CHECK_CONDITION
;
2444 if (error_info
->data_present
) {
2446 get_unaligned_le16(&error_info
->data_length
);
2447 if (sense_data_length
) {
2448 if (sense_data_length
> sizeof(error_info
->data
))
2449 sense_data_length
= sizeof(error_info
->data
);
2450 if (sense_data_length
> SCSI_SENSE_BUFFERSIZE
)
2451 sense_data_length
= SCSI_SENSE_BUFFERSIZE
;
2452 memcpy(scmd
->sense_buffer
, error_info
->data
,
2457 if (device_offline
&& sense_data_length
== 0)
2458 scsi_build_sense_buffer(0, scmd
->sense_buffer
, HARDWARE_ERROR
,
2461 scmd
->result
= scsi_status
;
2462 set_host_byte(scmd
, host_byte
);
2465 static void pqi_process_io_error(unsigned int iu_type
,
2466 struct pqi_io_request
*io_request
)
2469 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR
:
2470 pqi_process_raid_io_error(io_request
);
2472 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR
:
2473 pqi_process_aio_io_error(io_request
);
2478 static int pqi_interpret_task_management_response(
2479 struct pqi_task_management_response
*response
)
2483 switch (response
->response_code
) {
2484 case SOP_TMF_COMPLETE
:
2485 case SOP_TMF_FUNCTION_SUCCEEDED
:
2496 static unsigned int pqi_process_io_intr(struct pqi_ctrl_info
*ctrl_info
,
2497 struct pqi_queue_group
*queue_group
)
2499 unsigned int num_responses
;
2502 struct pqi_io_request
*io_request
;
2503 struct pqi_io_response
*response
;
2507 oq_ci
= queue_group
->oq_ci_copy
;
2510 oq_pi
= *queue_group
->oq_pi
;
2515 response
= queue_group
->oq_element_array
+
2516 (oq_ci
* PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
);
2518 request_id
= get_unaligned_le16(&response
->request_id
);
2519 WARN_ON(request_id
>= ctrl_info
->max_io_slots
);
2521 io_request
= &ctrl_info
->io_request_pool
[request_id
];
2522 WARN_ON(atomic_read(&io_request
->refcount
) == 0);
2524 switch (response
->header
.iu_type
) {
2525 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS
:
2526 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS
:
2527 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT
:
2529 case PQI_RESPONSE_IU_TASK_MANAGEMENT
:
2530 io_request
->status
=
2531 pqi_interpret_task_management_response(
2534 case PQI_RESPONSE_IU_AIO_PATH_DISABLED
:
2535 pqi_aio_path_disabled(io_request
);
2536 io_request
->status
= -EAGAIN
;
2538 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR
:
2539 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR
:
2540 io_request
->error_info
= ctrl_info
->error_buffer
+
2541 (get_unaligned_le16(&response
->error_index
) *
2542 PQI_ERROR_BUFFER_ELEMENT_LENGTH
);
2543 pqi_process_io_error(response
->header
.iu_type
,
2547 dev_err(&ctrl_info
->pci_dev
->dev
,
2548 "unexpected IU type: 0x%x\n",
2549 response
->header
.iu_type
);
2550 WARN_ON(response
->header
.iu_type
);
2554 io_request
->io_complete_callback(io_request
,
2555 io_request
->context
);
2558 * Note that the I/O request structure CANNOT BE TOUCHED after
2559 * returning from the I/O completion callback!
2562 oq_ci
= (oq_ci
+ 1) % ctrl_info
->num_elements_per_oq
;
2565 if (num_responses
) {
2566 queue_group
->oq_ci_copy
= oq_ci
;
2567 writel(oq_ci
, queue_group
->oq_ci
);
2570 return num_responses
;
2573 static inline unsigned int pqi_num_elements_free(unsigned int pi
,
2574 unsigned int ci
, unsigned int elements_in_queue
)
2576 unsigned int num_elements_used
;
2579 num_elements_used
= pi
- ci
;
2581 num_elements_used
= elements_in_queue
- ci
+ pi
;
2583 return elements_in_queue
- num_elements_used
- 1;
2586 #define PQI_EVENT_ACK_TIMEOUT 30
2588 static void pqi_start_event_ack(struct pqi_ctrl_info
*ctrl_info
,
2589 struct pqi_event_acknowledge_request
*iu
, size_t iu_length
)
2593 unsigned long flags
;
2595 unsigned long timeout
;
2596 struct pqi_queue_group
*queue_group
;
2598 queue_group
= &ctrl_info
->queue_groups
[PQI_DEFAULT_QUEUE_GROUP
];
2599 put_unaligned_le16(queue_group
->oq_id
, &iu
->header
.response_queue_id
);
2601 timeout
= (PQI_EVENT_ACK_TIMEOUT
* HZ
) + jiffies
;
2604 spin_lock_irqsave(&queue_group
->submit_lock
[RAID_PATH
], flags
);
2606 iq_pi
= queue_group
->iq_pi_copy
[RAID_PATH
];
2607 iq_ci
= *queue_group
->iq_ci
[RAID_PATH
];
2609 if (pqi_num_elements_free(iq_pi
, iq_ci
,
2610 ctrl_info
->num_elements_per_iq
))
2613 spin_unlock_irqrestore(
2614 &queue_group
->submit_lock
[RAID_PATH
], flags
);
2616 if (time_after(jiffies
, timeout
)) {
2617 dev_err(&ctrl_info
->pci_dev
->dev
,
2618 "sending event acknowledge timed out\n");
2623 next_element
= queue_group
->iq_element_array
[RAID_PATH
] +
2624 (iq_pi
* PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
2626 memcpy(next_element
, iu
, iu_length
);
2628 iq_pi
= (iq_pi
+ 1) % ctrl_info
->num_elements_per_iq
;
2630 queue_group
->iq_pi_copy
[RAID_PATH
] = iq_pi
;
2633 * This write notifies the controller that an IU is available to be
2636 writel(iq_pi
, queue_group
->iq_pi
[RAID_PATH
]);
2638 spin_unlock_irqrestore(&queue_group
->submit_lock
[RAID_PATH
], flags
);
2641 static void pqi_acknowledge_event(struct pqi_ctrl_info
*ctrl_info
,
2642 struct pqi_event
*event
)
2644 struct pqi_event_acknowledge_request request
;
2646 memset(&request
, 0, sizeof(request
));
2648 request
.header
.iu_type
= PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT
;
2649 put_unaligned_le16(sizeof(request
) - PQI_REQUEST_HEADER_LENGTH
,
2650 &request
.header
.iu_length
);
2651 request
.event_type
= event
->event_type
;
2652 request
.event_id
= event
->event_id
;
2653 request
.additional_event_id
= event
->additional_event_id
;
2655 pqi_start_event_ack(ctrl_info
, &request
, sizeof(request
));
2658 static void pqi_event_worker(struct work_struct
*work
)
2661 struct pqi_ctrl_info
*ctrl_info
;
2662 struct pqi_event
*pending_event
;
2663 bool got_non_heartbeat_event
= false;
2665 ctrl_info
= container_of(work
, struct pqi_ctrl_info
, event_work
);
2667 pending_event
= ctrl_info
->pending_events
;
2668 for (i
= 0; i
< PQI_NUM_SUPPORTED_EVENTS
; i
++) {
2669 if (pending_event
->pending
) {
2670 pending_event
->pending
= false;
2671 pqi_acknowledge_event(ctrl_info
, pending_event
);
2672 if (i
!= PQI_EVENT_HEARTBEAT
)
2673 got_non_heartbeat_event
= true;
2678 if (got_non_heartbeat_event
)
2679 pqi_schedule_rescan_worker(ctrl_info
);
2682 static void pqi_take_ctrl_offline(struct pqi_ctrl_info
*ctrl_info
)
2686 struct pqi_queue_group
*queue_group
;
2687 unsigned long flags
;
2688 struct pqi_io_request
*io_request
;
2689 struct pqi_io_request
*next
;
2690 struct scsi_cmnd
*scmd
;
2692 ctrl_info
->controller_online
= false;
2693 dev_err(&ctrl_info
->pci_dev
->dev
, "controller offline\n");
2695 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
2696 queue_group
= &ctrl_info
->queue_groups
[i
];
2698 for (path
= 0; path
< 2; path
++) {
2700 &queue_group
->submit_lock
[path
], flags
);
2702 list_for_each_entry_safe(io_request
, next
,
2703 &queue_group
->request_list
[path
],
2704 request_list_entry
) {
2706 scmd
= io_request
->scmd
;
2708 set_host_byte(scmd
, DID_NO_CONNECT
);
2709 pqi_scsi_done(scmd
);
2712 list_del(&io_request
->request_list_entry
);
2715 spin_unlock_irqrestore(
2716 &queue_group
->submit_lock
[path
], flags
);
2721 #define PQI_HEARTBEAT_TIMER_INTERVAL (5 * HZ)
2722 #define PQI_MAX_HEARTBEAT_REQUESTS 5
2724 static void pqi_heartbeat_timer_handler(unsigned long data
)
2727 struct pqi_ctrl_info
*ctrl_info
= (struct pqi_ctrl_info
*)data
;
2729 num_interrupts
= atomic_read(&ctrl_info
->num_interrupts
);
2731 if (num_interrupts
== ctrl_info
->previous_num_interrupts
) {
2732 ctrl_info
->num_heartbeats_requested
++;
2733 if (ctrl_info
->num_heartbeats_requested
>
2734 PQI_MAX_HEARTBEAT_REQUESTS
) {
2735 pqi_take_ctrl_offline(ctrl_info
);
2738 ctrl_info
->pending_events
[PQI_EVENT_HEARTBEAT
].pending
= true;
2739 schedule_work(&ctrl_info
->event_work
);
2741 ctrl_info
->num_heartbeats_requested
= 0;
2744 ctrl_info
->previous_num_interrupts
= num_interrupts
;
2745 mod_timer(&ctrl_info
->heartbeat_timer
,
2746 jiffies
+ PQI_HEARTBEAT_TIMER_INTERVAL
);
2749 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info
*ctrl_info
)
2751 ctrl_info
->previous_num_interrupts
=
2752 atomic_read(&ctrl_info
->num_interrupts
);
2754 init_timer(&ctrl_info
->heartbeat_timer
);
2755 ctrl_info
->heartbeat_timer
.expires
=
2756 jiffies
+ PQI_HEARTBEAT_TIMER_INTERVAL
;
2757 ctrl_info
->heartbeat_timer
.data
= (unsigned long)ctrl_info
;
2758 ctrl_info
->heartbeat_timer
.function
= pqi_heartbeat_timer_handler
;
2759 add_timer(&ctrl_info
->heartbeat_timer
);
2760 ctrl_info
->heartbeat_timer_started
= true;
2763 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info
*ctrl_info
)
2765 if (ctrl_info
->heartbeat_timer_started
)
2766 del_timer_sync(&ctrl_info
->heartbeat_timer
);
2769 static int pqi_event_type_to_event_index(unsigned int event_type
)
2773 switch (event_type
) {
2774 case PQI_EVENT_TYPE_HEARTBEAT
:
2775 index
= PQI_EVENT_HEARTBEAT
;
2777 case PQI_EVENT_TYPE_HOTPLUG
:
2778 index
= PQI_EVENT_HOTPLUG
;
2780 case PQI_EVENT_TYPE_HARDWARE
:
2781 index
= PQI_EVENT_HARDWARE
;
2783 case PQI_EVENT_TYPE_PHYSICAL_DEVICE
:
2784 index
= PQI_EVENT_PHYSICAL_DEVICE
;
2786 case PQI_EVENT_TYPE_LOGICAL_DEVICE
:
2787 index
= PQI_EVENT_LOGICAL_DEVICE
;
2789 case PQI_EVENT_TYPE_AIO_STATE_CHANGE
:
2790 index
= PQI_EVENT_AIO_STATE_CHANGE
;
2792 case PQI_EVENT_TYPE_AIO_CONFIG_CHANGE
:
2793 index
= PQI_EVENT_AIO_CONFIG_CHANGE
;
2803 static unsigned int pqi_process_event_intr(struct pqi_ctrl_info
*ctrl_info
)
2805 unsigned int num_events
;
2808 struct pqi_event_queue
*event_queue
;
2809 struct pqi_event_response
*response
;
2810 struct pqi_event
*pending_event
;
2811 bool need_delayed_work
;
2814 event_queue
= &ctrl_info
->event_queue
;
2816 need_delayed_work
= false;
2817 oq_ci
= event_queue
->oq_ci_copy
;
2820 oq_pi
= *event_queue
->oq_pi
;
2825 response
= event_queue
->oq_element_array
+
2826 (oq_ci
* PQI_EVENT_OQ_ELEMENT_LENGTH
);
2829 pqi_event_type_to_event_index(response
->event_type
);
2831 if (event_index
>= 0) {
2832 if (response
->request_acknowlege
) {
2834 &ctrl_info
->pending_events
[event_index
];
2835 pending_event
->event_type
=
2836 response
->event_type
;
2837 pending_event
->event_id
= response
->event_id
;
2838 pending_event
->additional_event_id
=
2839 response
->additional_event_id
;
2840 if (event_index
!= PQI_EVENT_HEARTBEAT
) {
2841 pending_event
->pending
= true;
2842 need_delayed_work
= true;
2847 oq_ci
= (oq_ci
+ 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS
;
2851 event_queue
->oq_ci_copy
= oq_ci
;
2852 writel(oq_ci
, event_queue
->oq_ci
);
2854 if (need_delayed_work
)
2855 schedule_work(&ctrl_info
->event_work
);
2861 static irqreturn_t
pqi_irq_handler(int irq
, void *data
)
2863 struct pqi_ctrl_info
*ctrl_info
;
2864 struct pqi_queue_group
*queue_group
;
2865 unsigned int num_responses_handled
;
2868 ctrl_info
= queue_group
->ctrl_info
;
2870 if (!ctrl_info
|| !queue_group
->oq_ci
)
2873 num_responses_handled
= pqi_process_io_intr(ctrl_info
, queue_group
);
2875 if (irq
== ctrl_info
->event_irq
)
2876 num_responses_handled
+= pqi_process_event_intr(ctrl_info
);
2878 if (num_responses_handled
)
2879 atomic_inc(&ctrl_info
->num_interrupts
);
2881 pqi_start_io(ctrl_info
, queue_group
, RAID_PATH
, NULL
);
2882 pqi_start_io(ctrl_info
, queue_group
, AIO_PATH
, NULL
);
2887 static int pqi_request_irqs(struct pqi_ctrl_info
*ctrl_info
)
2889 struct pci_dev
*pdev
= ctrl_info
->pci_dev
;
2893 ctrl_info
->event_irq
= pci_irq_vector(pdev
, 0);
2895 for (i
= 0; i
< ctrl_info
->num_msix_vectors_enabled
; i
++) {
2896 rc
= request_irq(pci_irq_vector(pdev
, i
), pqi_irq_handler
, 0,
2897 DRIVER_NAME_SHORT
, &ctrl_info
->queue_groups
[i
]);
2900 "irq %u init failed with error %d\n",
2901 pci_irq_vector(pdev
, i
), rc
);
2904 ctrl_info
->num_msix_vectors_initialized
++;
2910 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info
*ctrl_info
)
2914 ret
= pci_alloc_irq_vectors(ctrl_info
->pci_dev
,
2915 PQI_MIN_MSIX_VECTORS
, ctrl_info
->num_queue_groups
,
2916 PCI_IRQ_MSIX
| PCI_IRQ_AFFINITY
);
2918 dev_err(&ctrl_info
->pci_dev
->dev
,
2919 "MSI-X init failed with error %d\n", ret
);
2923 ctrl_info
->num_msix_vectors_enabled
= ret
;
2927 static int pqi_alloc_operational_queues(struct pqi_ctrl_info
*ctrl_info
)
2930 size_t alloc_length
;
2931 size_t element_array_length_per_iq
;
2932 size_t element_array_length_per_oq
;
2933 void *element_array
;
2934 void *next_queue_index
;
2935 void *aligned_pointer
;
2936 unsigned int num_inbound_queues
;
2937 unsigned int num_outbound_queues
;
2938 unsigned int num_queue_indexes
;
2939 struct pqi_queue_group
*queue_group
;
2941 element_array_length_per_iq
=
2942 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
*
2943 ctrl_info
->num_elements_per_iq
;
2944 element_array_length_per_oq
=
2945 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
*
2946 ctrl_info
->num_elements_per_oq
;
2947 num_inbound_queues
= ctrl_info
->num_queue_groups
* 2;
2948 num_outbound_queues
= ctrl_info
->num_queue_groups
;
2949 num_queue_indexes
= (ctrl_info
->num_queue_groups
* 3) + 1;
2951 aligned_pointer
= NULL
;
2953 for (i
= 0; i
< num_inbound_queues
; i
++) {
2954 aligned_pointer
= PTR_ALIGN(aligned_pointer
,
2955 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
2956 aligned_pointer
+= element_array_length_per_iq
;
2959 for (i
= 0; i
< num_outbound_queues
; i
++) {
2960 aligned_pointer
= PTR_ALIGN(aligned_pointer
,
2961 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
2962 aligned_pointer
+= element_array_length_per_oq
;
2965 aligned_pointer
= PTR_ALIGN(aligned_pointer
,
2966 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
2967 aligned_pointer
+= PQI_NUM_EVENT_QUEUE_ELEMENTS
*
2968 PQI_EVENT_OQ_ELEMENT_LENGTH
;
2970 for (i
= 0; i
< num_queue_indexes
; i
++) {
2971 aligned_pointer
= PTR_ALIGN(aligned_pointer
,
2972 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
2973 aligned_pointer
+= sizeof(pqi_index_t
);
2976 alloc_length
= (size_t)aligned_pointer
+
2977 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
;
2979 ctrl_info
->queue_memory_base
=
2980 dma_zalloc_coherent(&ctrl_info
->pci_dev
->dev
,
2982 &ctrl_info
->queue_memory_base_dma_handle
, GFP_KERNEL
);
2984 if (!ctrl_info
->queue_memory_base
) {
2985 dev_err(&ctrl_info
->pci_dev
->dev
,
2986 "failed to allocate memory for PQI admin queues\n");
2990 ctrl_info
->queue_memory_length
= alloc_length
;
2992 element_array
= PTR_ALIGN(ctrl_info
->queue_memory_base
,
2993 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
2995 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
2996 queue_group
= &ctrl_info
->queue_groups
[i
];
2997 queue_group
->iq_element_array
[RAID_PATH
] = element_array
;
2998 queue_group
->iq_element_array_bus_addr
[RAID_PATH
] =
2999 ctrl_info
->queue_memory_base_dma_handle
+
3000 (element_array
- ctrl_info
->queue_memory_base
);
3001 element_array
+= element_array_length_per_iq
;
3002 element_array
= PTR_ALIGN(element_array
,
3003 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3004 queue_group
->iq_element_array
[AIO_PATH
] = element_array
;
3005 queue_group
->iq_element_array_bus_addr
[AIO_PATH
] =
3006 ctrl_info
->queue_memory_base_dma_handle
+
3007 (element_array
- ctrl_info
->queue_memory_base
);
3008 element_array
+= element_array_length_per_iq
;
3009 element_array
= PTR_ALIGN(element_array
,
3010 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3013 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3014 queue_group
= &ctrl_info
->queue_groups
[i
];
3015 queue_group
->oq_element_array
= element_array
;
3016 queue_group
->oq_element_array_bus_addr
=
3017 ctrl_info
->queue_memory_base_dma_handle
+
3018 (element_array
- ctrl_info
->queue_memory_base
);
3019 element_array
+= element_array_length_per_oq
;
3020 element_array
= PTR_ALIGN(element_array
,
3021 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3024 ctrl_info
->event_queue
.oq_element_array
= element_array
;
3025 ctrl_info
->event_queue
.oq_element_array_bus_addr
=
3026 ctrl_info
->queue_memory_base_dma_handle
+
3027 (element_array
- ctrl_info
->queue_memory_base
);
3028 element_array
+= PQI_NUM_EVENT_QUEUE_ELEMENTS
*
3029 PQI_EVENT_OQ_ELEMENT_LENGTH
;
3031 next_queue_index
= PTR_ALIGN(element_array
,
3032 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3034 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3035 queue_group
= &ctrl_info
->queue_groups
[i
];
3036 queue_group
->iq_ci
[RAID_PATH
] = next_queue_index
;
3037 queue_group
->iq_ci_bus_addr
[RAID_PATH
] =
3038 ctrl_info
->queue_memory_base_dma_handle
+
3039 (next_queue_index
- ctrl_info
->queue_memory_base
);
3040 next_queue_index
+= sizeof(pqi_index_t
);
3041 next_queue_index
= PTR_ALIGN(next_queue_index
,
3042 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3043 queue_group
->iq_ci
[AIO_PATH
] = next_queue_index
;
3044 queue_group
->iq_ci_bus_addr
[AIO_PATH
] =
3045 ctrl_info
->queue_memory_base_dma_handle
+
3046 (next_queue_index
- ctrl_info
->queue_memory_base
);
3047 next_queue_index
+= sizeof(pqi_index_t
);
3048 next_queue_index
= PTR_ALIGN(next_queue_index
,
3049 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3050 queue_group
->oq_pi
= next_queue_index
;
3051 queue_group
->oq_pi_bus_addr
=
3052 ctrl_info
->queue_memory_base_dma_handle
+
3053 (next_queue_index
- ctrl_info
->queue_memory_base
);
3054 next_queue_index
+= sizeof(pqi_index_t
);
3055 next_queue_index
= PTR_ALIGN(next_queue_index
,
3056 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3059 ctrl_info
->event_queue
.oq_pi
= next_queue_index
;
3060 ctrl_info
->event_queue
.oq_pi_bus_addr
=
3061 ctrl_info
->queue_memory_base_dma_handle
+
3062 (next_queue_index
- ctrl_info
->queue_memory_base
);
3067 static void pqi_init_operational_queues(struct pqi_ctrl_info
*ctrl_info
)
3070 u16 next_iq_id
= PQI_MIN_OPERATIONAL_QUEUE_ID
;
3071 u16 next_oq_id
= PQI_MIN_OPERATIONAL_QUEUE_ID
;
3074 * Initialize the backpointers to the controller structure in
3075 * each operational queue group structure.
3077 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++)
3078 ctrl_info
->queue_groups
[i
].ctrl_info
= ctrl_info
;
3081 * Assign IDs to all operational queues. Note that the IDs
3082 * assigned to operational IQs are independent of the IDs
3083 * assigned to operational OQs.
3085 ctrl_info
->event_queue
.oq_id
= next_oq_id
++;
3086 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3087 ctrl_info
->queue_groups
[i
].iq_id
[RAID_PATH
] = next_iq_id
++;
3088 ctrl_info
->queue_groups
[i
].iq_id
[AIO_PATH
] = next_iq_id
++;
3089 ctrl_info
->queue_groups
[i
].oq_id
= next_oq_id
++;
3093 * Assign MSI-X table entry indexes to all queues. Note that the
3094 * interrupt for the event queue is shared with the first queue group.
3096 ctrl_info
->event_queue
.int_msg_num
= 0;
3097 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++)
3098 ctrl_info
->queue_groups
[i
].int_msg_num
= i
;
3100 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3101 spin_lock_init(&ctrl_info
->queue_groups
[i
].submit_lock
[0]);
3102 spin_lock_init(&ctrl_info
->queue_groups
[i
].submit_lock
[1]);
3103 INIT_LIST_HEAD(&ctrl_info
->queue_groups
[i
].request_list
[0]);
3104 INIT_LIST_HEAD(&ctrl_info
->queue_groups
[i
].request_list
[1]);
3108 static int pqi_alloc_admin_queues(struct pqi_ctrl_info
*ctrl_info
)
3110 size_t alloc_length
;
3111 struct pqi_admin_queues_aligned
*admin_queues_aligned
;
3112 struct pqi_admin_queues
*admin_queues
;
3114 alloc_length
= sizeof(struct pqi_admin_queues_aligned
) +
3115 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
;
3117 ctrl_info
->admin_queue_memory_base
=
3118 dma_zalloc_coherent(&ctrl_info
->pci_dev
->dev
,
3120 &ctrl_info
->admin_queue_memory_base_dma_handle
,
3123 if (!ctrl_info
->admin_queue_memory_base
)
3126 ctrl_info
->admin_queue_memory_length
= alloc_length
;
3128 admin_queues
= &ctrl_info
->admin_queues
;
3129 admin_queues_aligned
= PTR_ALIGN(ctrl_info
->admin_queue_memory_base
,
3130 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3131 admin_queues
->iq_element_array
=
3132 &admin_queues_aligned
->iq_element_array
;
3133 admin_queues
->oq_element_array
=
3134 &admin_queues_aligned
->oq_element_array
;
3135 admin_queues
->iq_ci
= &admin_queues_aligned
->iq_ci
;
3136 admin_queues
->oq_pi
= &admin_queues_aligned
->oq_pi
;
3138 admin_queues
->iq_element_array_bus_addr
=
3139 ctrl_info
->admin_queue_memory_base_dma_handle
+
3140 (admin_queues
->iq_element_array
-
3141 ctrl_info
->admin_queue_memory_base
);
3142 admin_queues
->oq_element_array_bus_addr
=
3143 ctrl_info
->admin_queue_memory_base_dma_handle
+
3144 (admin_queues
->oq_element_array
-
3145 ctrl_info
->admin_queue_memory_base
);
3146 admin_queues
->iq_ci_bus_addr
=
3147 ctrl_info
->admin_queue_memory_base_dma_handle
+
3148 ((void *)admin_queues
->iq_ci
-
3149 ctrl_info
->admin_queue_memory_base
);
3150 admin_queues
->oq_pi_bus_addr
=
3151 ctrl_info
->admin_queue_memory_base_dma_handle
+
3152 ((void *)admin_queues
->oq_pi
-
3153 ctrl_info
->admin_queue_memory_base
);
3158 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3159 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3161 static int pqi_create_admin_queues(struct pqi_ctrl_info
*ctrl_info
)
3163 struct pqi_device_registers __iomem
*pqi_registers
;
3164 struct pqi_admin_queues
*admin_queues
;
3165 unsigned long timeout
;
3169 pqi_registers
= ctrl_info
->pqi_registers
;
3170 admin_queues
= &ctrl_info
->admin_queues
;
3172 writeq((u64
)admin_queues
->iq_element_array_bus_addr
,
3173 &pqi_registers
->admin_iq_element_array_addr
);
3174 writeq((u64
)admin_queues
->oq_element_array_bus_addr
,
3175 &pqi_registers
->admin_oq_element_array_addr
);
3176 writeq((u64
)admin_queues
->iq_ci_bus_addr
,
3177 &pqi_registers
->admin_iq_ci_addr
);
3178 writeq((u64
)admin_queues
->oq_pi_bus_addr
,
3179 &pqi_registers
->admin_oq_pi_addr
);
3181 reg
= PQI_ADMIN_IQ_NUM_ELEMENTS
|
3182 (PQI_ADMIN_OQ_NUM_ELEMENTS
) << 8 |
3183 (admin_queues
->int_msg_num
<< 16);
3184 writel(reg
, &pqi_registers
->admin_iq_num_elements
);
3185 writel(PQI_CREATE_ADMIN_QUEUE_PAIR
,
3186 &pqi_registers
->function_and_status_code
);
3188 timeout
= PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES
+ jiffies
;
3190 status
= readb(&pqi_registers
->function_and_status_code
);
3191 if (status
== PQI_STATUS_IDLE
)
3193 if (time_after(jiffies
, timeout
))
3195 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS
);
3199 * The offset registers are not initialized to the correct
3200 * offsets until *after* the create admin queue pair command
3201 * completes successfully.
3203 admin_queues
->iq_pi
= ctrl_info
->iomem_base
+
3204 PQI_DEVICE_REGISTERS_OFFSET
+
3205 readq(&pqi_registers
->admin_iq_pi_offset
);
3206 admin_queues
->oq_ci
= ctrl_info
->iomem_base
+
3207 PQI_DEVICE_REGISTERS_OFFSET
+
3208 readq(&pqi_registers
->admin_oq_ci_offset
);
3213 static void pqi_submit_admin_request(struct pqi_ctrl_info
*ctrl_info
,
3214 struct pqi_general_admin_request
*request
)
3216 struct pqi_admin_queues
*admin_queues
;
3220 admin_queues
= &ctrl_info
->admin_queues
;
3221 iq_pi
= admin_queues
->iq_pi_copy
;
3223 next_element
= admin_queues
->iq_element_array
+
3224 (iq_pi
* PQI_ADMIN_IQ_ELEMENT_LENGTH
);
3226 memcpy(next_element
, request
, sizeof(*request
));
3228 iq_pi
= (iq_pi
+ 1) % PQI_ADMIN_IQ_NUM_ELEMENTS
;
3229 admin_queues
->iq_pi_copy
= iq_pi
;
3232 * This write notifies the controller that an IU is available to be
3235 writel(iq_pi
, admin_queues
->iq_pi
);
3238 static int pqi_poll_for_admin_response(struct pqi_ctrl_info
*ctrl_info
,
3239 struct pqi_general_admin_response
*response
)
3241 struct pqi_admin_queues
*admin_queues
;
3244 unsigned long timeout
;
3246 admin_queues
= &ctrl_info
->admin_queues
;
3247 oq_ci
= admin_queues
->oq_ci_copy
;
3249 timeout
= (3 * HZ
) + jiffies
;
3252 oq_pi
= *admin_queues
->oq_pi
;
3255 if (time_after(jiffies
, timeout
)) {
3256 dev_err(&ctrl_info
->pci_dev
->dev
,
3257 "timed out waiting for admin response\n");
3260 usleep_range(1000, 2000);
3263 memcpy(response
, admin_queues
->oq_element_array
+
3264 (oq_ci
* PQI_ADMIN_OQ_ELEMENT_LENGTH
), sizeof(*response
));
3266 oq_ci
= (oq_ci
+ 1) % PQI_ADMIN_OQ_NUM_ELEMENTS
;
3267 admin_queues
->oq_ci_copy
= oq_ci
;
3268 writel(oq_ci
, admin_queues
->oq_ci
);
3273 static void pqi_start_io(struct pqi_ctrl_info
*ctrl_info
,
3274 struct pqi_queue_group
*queue_group
, enum pqi_io_path path
,
3275 struct pqi_io_request
*io_request
)
3277 struct pqi_io_request
*next
;
3282 unsigned long flags
;
3283 unsigned int num_elements_needed
;
3284 unsigned int num_elements_to_end_of_queue
;
3286 struct pqi_iu_header
*request
;
3288 spin_lock_irqsave(&queue_group
->submit_lock
[path
], flags
);
3291 list_add_tail(&io_request
->request_list_entry
,
3292 &queue_group
->request_list
[path
]);
3294 iq_pi
= queue_group
->iq_pi_copy
[path
];
3296 list_for_each_entry_safe(io_request
, next
,
3297 &queue_group
->request_list
[path
], request_list_entry
) {
3299 request
= io_request
->iu
;
3301 iu_length
= get_unaligned_le16(&request
->iu_length
) +
3302 PQI_REQUEST_HEADER_LENGTH
;
3303 num_elements_needed
=
3304 DIV_ROUND_UP(iu_length
,
3305 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
3307 iq_ci
= *queue_group
->iq_ci
[path
];
3309 if (num_elements_needed
> pqi_num_elements_free(iq_pi
, iq_ci
,
3310 ctrl_info
->num_elements_per_iq
))
3313 put_unaligned_le16(queue_group
->oq_id
,
3314 &request
->response_queue_id
);
3316 next_element
= queue_group
->iq_element_array
[path
] +
3317 (iq_pi
* PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
3319 num_elements_to_end_of_queue
=
3320 ctrl_info
->num_elements_per_iq
- iq_pi
;
3322 if (num_elements_needed
<= num_elements_to_end_of_queue
) {
3323 memcpy(next_element
, request
, iu_length
);
3325 copy_count
= num_elements_to_end_of_queue
*
3326 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
;
3327 memcpy(next_element
, request
, copy_count
);
3328 memcpy(queue_group
->iq_element_array
[path
],
3329 (u8
*)request
+ copy_count
,
3330 iu_length
- copy_count
);
3333 iq_pi
= (iq_pi
+ num_elements_needed
) %
3334 ctrl_info
->num_elements_per_iq
;
3336 list_del(&io_request
->request_list_entry
);
3339 if (iq_pi
!= queue_group
->iq_pi_copy
[path
]) {
3340 queue_group
->iq_pi_copy
[path
] = iq_pi
;
3342 * This write notifies the controller that one or more IUs are
3343 * available to be processed.
3345 writel(iq_pi
, queue_group
->iq_pi
[path
]);
3348 spin_unlock_irqrestore(&queue_group
->submit_lock
[path
], flags
);
3351 static void pqi_raid_synchronous_complete(struct pqi_io_request
*io_request
,
3354 struct completion
*waiting
= context
;
3359 static int pqi_submit_raid_request_synchronous_with_io_request(
3360 struct pqi_ctrl_info
*ctrl_info
, struct pqi_io_request
*io_request
,
3361 unsigned long timeout_msecs
)
3364 DECLARE_COMPLETION_ONSTACK(wait
);
3366 io_request
->io_complete_callback
= pqi_raid_synchronous_complete
;
3367 io_request
->context
= &wait
;
3369 pqi_start_io(ctrl_info
,
3370 &ctrl_info
->queue_groups
[PQI_DEFAULT_QUEUE_GROUP
], RAID_PATH
,
3373 if (timeout_msecs
== NO_TIMEOUT
) {
3374 wait_for_completion_io(&wait
);
3376 if (!wait_for_completion_io_timeout(&wait
,
3377 msecs_to_jiffies(timeout_msecs
))) {
3378 dev_warn(&ctrl_info
->pci_dev
->dev
,
3379 "command timed out\n");
3387 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info
*ctrl_info
,
3388 struct pqi_iu_header
*request
, unsigned int flags
,
3389 struct pqi_raid_error_info
*error_info
, unsigned long timeout_msecs
)
3392 struct pqi_io_request
*io_request
;
3393 unsigned long start_jiffies
;
3394 unsigned long msecs_blocked
;
3398 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3399 * are mutually exclusive.
3402 if (flags
& PQI_SYNC_FLAGS_INTERRUPTABLE
) {
3403 if (down_interruptible(&ctrl_info
->sync_request_sem
))
3404 return -ERESTARTSYS
;
3406 if (timeout_msecs
== NO_TIMEOUT
) {
3407 down(&ctrl_info
->sync_request_sem
);
3409 start_jiffies
= jiffies
;
3410 if (down_timeout(&ctrl_info
->sync_request_sem
,
3411 msecs_to_jiffies(timeout_msecs
)))
3414 jiffies_to_msecs(jiffies
- start_jiffies
);
3415 if (msecs_blocked
>= timeout_msecs
)
3417 timeout_msecs
-= msecs_blocked
;
3421 io_request
= pqi_alloc_io_request(ctrl_info
);
3423 put_unaligned_le16(io_request
->index
,
3424 &(((struct pqi_raid_path_request
*)request
)->request_id
));
3426 if (request
->iu_type
== PQI_REQUEST_IU_RAID_PATH_IO
)
3427 ((struct pqi_raid_path_request
*)request
)->error_index
=
3428 ((struct pqi_raid_path_request
*)request
)->request_id
;
3430 iu_length
= get_unaligned_le16(&request
->iu_length
) +
3431 PQI_REQUEST_HEADER_LENGTH
;
3432 memcpy(io_request
->iu
, request
, iu_length
);
3434 rc
= pqi_submit_raid_request_synchronous_with_io_request(ctrl_info
,
3435 io_request
, timeout_msecs
);
3438 if (io_request
->error_info
)
3439 memcpy(error_info
, io_request
->error_info
,
3440 sizeof(*error_info
));
3442 memset(error_info
, 0, sizeof(*error_info
));
3443 } else if (rc
== 0 && io_request
->error_info
) {
3445 struct pqi_raid_error_info
*raid_error_info
;
3447 raid_error_info
= io_request
->error_info
;
3448 scsi_status
= raid_error_info
->status
;
3450 if (scsi_status
== SAM_STAT_CHECK_CONDITION
&&
3451 raid_error_info
->data_out_result
==
3452 PQI_DATA_IN_OUT_UNDERFLOW
)
3453 scsi_status
= SAM_STAT_GOOD
;
3455 if (scsi_status
!= SAM_STAT_GOOD
)
3459 pqi_free_io_request(io_request
);
3461 up(&ctrl_info
->sync_request_sem
);
3466 static int pqi_validate_admin_response(
3467 struct pqi_general_admin_response
*response
, u8 expected_function_code
)
3469 if (response
->header
.iu_type
!= PQI_RESPONSE_IU_GENERAL_ADMIN
)
3472 if (get_unaligned_le16(&response
->header
.iu_length
) !=
3473 PQI_GENERAL_ADMIN_IU_LENGTH
)
3476 if (response
->function_code
!= expected_function_code
)
3479 if (response
->status
!= PQI_GENERAL_ADMIN_STATUS_SUCCESS
)
3485 static int pqi_submit_admin_request_synchronous(
3486 struct pqi_ctrl_info
*ctrl_info
,
3487 struct pqi_general_admin_request
*request
,
3488 struct pqi_general_admin_response
*response
)
3492 pqi_submit_admin_request(ctrl_info
, request
);
3494 rc
= pqi_poll_for_admin_response(ctrl_info
, response
);
3497 rc
= pqi_validate_admin_response(response
,
3498 request
->function_code
);
3503 static int pqi_report_device_capability(struct pqi_ctrl_info
*ctrl_info
)
3506 struct pqi_general_admin_request request
;
3507 struct pqi_general_admin_response response
;
3508 struct pqi_device_capability
*capability
;
3509 struct pqi_iu_layer_descriptor
*sop_iu_layer_descriptor
;
3511 capability
= kmalloc(sizeof(*capability
), GFP_KERNEL
);
3515 memset(&request
, 0, sizeof(request
));
3517 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3518 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3519 &request
.header
.iu_length
);
3520 request
.function_code
=
3521 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY
;
3522 put_unaligned_le32(sizeof(*capability
),
3523 &request
.data
.report_device_capability
.buffer_length
);
3525 rc
= pqi_map_single(ctrl_info
->pci_dev
,
3526 &request
.data
.report_device_capability
.sg_descriptor
,
3527 capability
, sizeof(*capability
),
3528 PCI_DMA_FROMDEVICE
);
3532 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3535 pqi_pci_unmap(ctrl_info
->pci_dev
,
3536 &request
.data
.report_device_capability
.sg_descriptor
, 1,
3537 PCI_DMA_FROMDEVICE
);
3542 if (response
.status
!= PQI_GENERAL_ADMIN_STATUS_SUCCESS
) {
3547 ctrl_info
->max_inbound_queues
=
3548 get_unaligned_le16(&capability
->max_inbound_queues
);
3549 ctrl_info
->max_elements_per_iq
=
3550 get_unaligned_le16(&capability
->max_elements_per_iq
);
3551 ctrl_info
->max_iq_element_length
=
3552 get_unaligned_le16(&capability
->max_iq_element_length
)
3554 ctrl_info
->max_outbound_queues
=
3555 get_unaligned_le16(&capability
->max_outbound_queues
);
3556 ctrl_info
->max_elements_per_oq
=
3557 get_unaligned_le16(&capability
->max_elements_per_oq
);
3558 ctrl_info
->max_oq_element_length
=
3559 get_unaligned_le16(&capability
->max_oq_element_length
)
3562 sop_iu_layer_descriptor
=
3563 &capability
->iu_layer_descriptors
[PQI_PROTOCOL_SOP
];
3565 ctrl_info
->max_inbound_iu_length_per_firmware
=
3567 &sop_iu_layer_descriptor
->max_inbound_iu_length
);
3568 ctrl_info
->inbound_spanning_supported
=
3569 sop_iu_layer_descriptor
->inbound_spanning_supported
;
3570 ctrl_info
->outbound_spanning_supported
=
3571 sop_iu_layer_descriptor
->outbound_spanning_supported
;
3579 static int pqi_validate_device_capability(struct pqi_ctrl_info
*ctrl_info
)
3581 if (ctrl_info
->max_iq_element_length
<
3582 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
) {
3583 dev_err(&ctrl_info
->pci_dev
->dev
,
3584 "max. inbound queue element length of %d is less than the required length of %d\n",
3585 ctrl_info
->max_iq_element_length
,
3586 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
3590 if (ctrl_info
->max_oq_element_length
<
3591 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
) {
3592 dev_err(&ctrl_info
->pci_dev
->dev
,
3593 "max. outbound queue element length of %d is less than the required length of %d\n",
3594 ctrl_info
->max_oq_element_length
,
3595 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
);
3599 if (ctrl_info
->max_inbound_iu_length_per_firmware
<
3600 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
) {
3601 dev_err(&ctrl_info
->pci_dev
->dev
,
3602 "max. inbound IU length of %u is less than the min. required length of %d\n",
3603 ctrl_info
->max_inbound_iu_length_per_firmware
,
3604 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
3608 if (!ctrl_info
->inbound_spanning_supported
) {
3609 dev_err(&ctrl_info
->pci_dev
->dev
,
3610 "the controller does not support inbound spanning\n");
3614 if (ctrl_info
->outbound_spanning_supported
) {
3615 dev_err(&ctrl_info
->pci_dev
->dev
,
3616 "the controller supports outbound spanning but this driver does not\n");
3623 static int pqi_delete_operational_queue(struct pqi_ctrl_info
*ctrl_info
,
3624 bool inbound_queue
, u16 queue_id
)
3626 struct pqi_general_admin_request request
;
3627 struct pqi_general_admin_response response
;
3629 memset(&request
, 0, sizeof(request
));
3630 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3631 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3632 &request
.header
.iu_length
);
3634 request
.function_code
=
3635 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ
;
3637 request
.function_code
=
3638 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ
;
3639 put_unaligned_le16(queue_id
,
3640 &request
.data
.delete_operational_queue
.queue_id
);
3642 return pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3646 static int pqi_create_event_queue(struct pqi_ctrl_info
*ctrl_info
)
3649 struct pqi_event_queue
*event_queue
;
3650 struct pqi_general_admin_request request
;
3651 struct pqi_general_admin_response response
;
3653 event_queue
= &ctrl_info
->event_queue
;
3656 * Create OQ (Outbound Queue - device to host queue) to dedicate
3659 memset(&request
, 0, sizeof(request
));
3660 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3661 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3662 &request
.header
.iu_length
);
3663 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ
;
3664 put_unaligned_le16(event_queue
->oq_id
,
3665 &request
.data
.create_operational_oq
.queue_id
);
3666 put_unaligned_le64((u64
)event_queue
->oq_element_array_bus_addr
,
3667 &request
.data
.create_operational_oq
.element_array_addr
);
3668 put_unaligned_le64((u64
)event_queue
->oq_pi_bus_addr
,
3669 &request
.data
.create_operational_oq
.pi_addr
);
3670 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS
,
3671 &request
.data
.create_operational_oq
.num_elements
);
3672 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH
/ 16,
3673 &request
.data
.create_operational_oq
.element_length
);
3674 request
.data
.create_operational_oq
.queue_protocol
= PQI_PROTOCOL_SOP
;
3675 put_unaligned_le16(event_queue
->int_msg_num
,
3676 &request
.data
.create_operational_oq
.int_msg_num
);
3678 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3683 event_queue
->oq_ci
= ctrl_info
->iomem_base
+
3684 PQI_DEVICE_REGISTERS_OFFSET
+
3686 &response
.data
.create_operational_oq
.oq_ci_offset
);
3691 static int pqi_create_queue_group(struct pqi_ctrl_info
*ctrl_info
)
3695 struct pqi_queue_group
*queue_group
;
3696 struct pqi_general_admin_request request
;
3697 struct pqi_general_admin_response response
;
3699 i
= ctrl_info
->num_active_queue_groups
;
3700 queue_group
= &ctrl_info
->queue_groups
[i
];
3703 * Create IQ (Inbound Queue - host to device queue) for
3706 memset(&request
, 0, sizeof(request
));
3707 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3708 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3709 &request
.header
.iu_length
);
3710 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ
;
3711 put_unaligned_le16(queue_group
->iq_id
[RAID_PATH
],
3712 &request
.data
.create_operational_iq
.queue_id
);
3714 (u64
)queue_group
->iq_element_array_bus_addr
[RAID_PATH
],
3715 &request
.data
.create_operational_iq
.element_array_addr
);
3716 put_unaligned_le64((u64
)queue_group
->iq_ci_bus_addr
[RAID_PATH
],
3717 &request
.data
.create_operational_iq
.ci_addr
);
3718 put_unaligned_le16(ctrl_info
->num_elements_per_iq
,
3719 &request
.data
.create_operational_iq
.num_elements
);
3720 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
/ 16,
3721 &request
.data
.create_operational_iq
.element_length
);
3722 request
.data
.create_operational_iq
.queue_protocol
= PQI_PROTOCOL_SOP
;
3724 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3727 dev_err(&ctrl_info
->pci_dev
->dev
,
3728 "error creating inbound RAID queue\n");
3732 queue_group
->iq_pi
[RAID_PATH
] = ctrl_info
->iomem_base
+
3733 PQI_DEVICE_REGISTERS_OFFSET
+
3735 &response
.data
.create_operational_iq
.iq_pi_offset
);
3738 * Create IQ (Inbound Queue - host to device queue) for
3739 * Advanced I/O (AIO) path.
3741 memset(&request
, 0, sizeof(request
));
3742 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3743 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3744 &request
.header
.iu_length
);
3745 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ
;
3746 put_unaligned_le16(queue_group
->iq_id
[AIO_PATH
],
3747 &request
.data
.create_operational_iq
.queue_id
);
3748 put_unaligned_le64((u64
)queue_group
->
3749 iq_element_array_bus_addr
[AIO_PATH
],
3750 &request
.data
.create_operational_iq
.element_array_addr
);
3751 put_unaligned_le64((u64
)queue_group
->iq_ci_bus_addr
[AIO_PATH
],
3752 &request
.data
.create_operational_iq
.ci_addr
);
3753 put_unaligned_le16(ctrl_info
->num_elements_per_iq
,
3754 &request
.data
.create_operational_iq
.num_elements
);
3755 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
/ 16,
3756 &request
.data
.create_operational_iq
.element_length
);
3757 request
.data
.create_operational_iq
.queue_protocol
= PQI_PROTOCOL_SOP
;
3759 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3762 dev_err(&ctrl_info
->pci_dev
->dev
,
3763 "error creating inbound AIO queue\n");
3764 goto delete_inbound_queue_raid
;
3767 queue_group
->iq_pi
[AIO_PATH
] = ctrl_info
->iomem_base
+
3768 PQI_DEVICE_REGISTERS_OFFSET
+
3770 &response
.data
.create_operational_iq
.iq_pi_offset
);
3773 * Designate the 2nd IQ as the AIO path. By default, all IQs are
3774 * assumed to be for RAID path I/O unless we change the queue's
3777 memset(&request
, 0, sizeof(request
));
3778 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3779 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3780 &request
.header
.iu_length
);
3781 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY
;
3782 put_unaligned_le16(queue_group
->iq_id
[AIO_PATH
],
3783 &request
.data
.change_operational_iq_properties
.queue_id
);
3784 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE
,
3785 &request
.data
.change_operational_iq_properties
.vendor_specific
);
3787 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3790 dev_err(&ctrl_info
->pci_dev
->dev
,
3791 "error changing queue property\n");
3792 goto delete_inbound_queue_aio
;
3796 * Create OQ (Outbound Queue - device to host queue).
3798 memset(&request
, 0, sizeof(request
));
3799 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
3800 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
3801 &request
.header
.iu_length
);
3802 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ
;
3803 put_unaligned_le16(queue_group
->oq_id
,
3804 &request
.data
.create_operational_oq
.queue_id
);
3805 put_unaligned_le64((u64
)queue_group
->oq_element_array_bus_addr
,
3806 &request
.data
.create_operational_oq
.element_array_addr
);
3807 put_unaligned_le64((u64
)queue_group
->oq_pi_bus_addr
,
3808 &request
.data
.create_operational_oq
.pi_addr
);
3809 put_unaligned_le16(ctrl_info
->num_elements_per_oq
,
3810 &request
.data
.create_operational_oq
.num_elements
);
3811 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
/ 16,
3812 &request
.data
.create_operational_oq
.element_length
);
3813 request
.data
.create_operational_oq
.queue_protocol
= PQI_PROTOCOL_SOP
;
3814 put_unaligned_le16(queue_group
->int_msg_num
,
3815 &request
.data
.create_operational_oq
.int_msg_num
);
3817 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
3820 dev_err(&ctrl_info
->pci_dev
->dev
,
3821 "error creating outbound queue\n");
3822 goto delete_inbound_queue_aio
;
3825 queue_group
->oq_ci
= ctrl_info
->iomem_base
+
3826 PQI_DEVICE_REGISTERS_OFFSET
+
3828 &response
.data
.create_operational_oq
.oq_ci_offset
);
3830 ctrl_info
->num_active_queue_groups
++;
3834 delete_inbound_queue_aio
:
3835 pqi_delete_operational_queue(ctrl_info
, true,
3836 queue_group
->iq_id
[AIO_PATH
]);
3838 delete_inbound_queue_raid
:
3839 pqi_delete_operational_queue(ctrl_info
, true,
3840 queue_group
->iq_id
[RAID_PATH
]);
3845 static int pqi_create_queues(struct pqi_ctrl_info
*ctrl_info
)
3850 rc
= pqi_create_event_queue(ctrl_info
);
3852 dev_err(&ctrl_info
->pci_dev
->dev
,
3853 "error creating event queue\n");
3857 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3858 rc
= pqi_create_queue_group(ctrl_info
);
3860 dev_err(&ctrl_info
->pci_dev
->dev
,
3861 "error creating queue group number %u/%u\n",
3862 i
, ctrl_info
->num_queue_groups
);
3870 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
3871 (offsetof(struct pqi_event_config, descriptors) + \
3872 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
3874 static int pqi_configure_events(struct pqi_ctrl_info
*ctrl_info
)
3878 struct pqi_event_config
*event_config
;
3879 struct pqi_general_management_request request
;
3881 event_config
= kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
3886 memset(&request
, 0, sizeof(request
));
3888 request
.header
.iu_type
= PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG
;
3889 put_unaligned_le16(offsetof(struct pqi_general_management_request
,
3890 data
.report_event_configuration
.sg_descriptors
[1]) -
3891 PQI_REQUEST_HEADER_LENGTH
, &request
.header
.iu_length
);
3892 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
3893 &request
.data
.report_event_configuration
.buffer_length
);
3895 rc
= pqi_map_single(ctrl_info
->pci_dev
,
3896 request
.data
.report_event_configuration
.sg_descriptors
,
3897 event_config
, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
3898 PCI_DMA_FROMDEVICE
);
3902 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
3903 0, NULL
, NO_TIMEOUT
);
3905 pqi_pci_unmap(ctrl_info
->pci_dev
,
3906 request
.data
.report_event_configuration
.sg_descriptors
, 1,
3907 PCI_DMA_FROMDEVICE
);
3912 for (i
= 0; i
< event_config
->num_event_descriptors
; i
++)
3913 put_unaligned_le16(ctrl_info
->event_queue
.oq_id
,
3914 &event_config
->descriptors
[i
].oq_id
);
3916 memset(&request
, 0, sizeof(request
));
3918 request
.header
.iu_type
= PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG
;
3919 put_unaligned_le16(offsetof(struct pqi_general_management_request
,
3920 data
.report_event_configuration
.sg_descriptors
[1]) -
3921 PQI_REQUEST_HEADER_LENGTH
, &request
.header
.iu_length
);
3922 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
3923 &request
.data
.report_event_configuration
.buffer_length
);
3925 rc
= pqi_map_single(ctrl_info
->pci_dev
,
3926 request
.data
.report_event_configuration
.sg_descriptors
,
3927 event_config
, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
3932 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
, 0,
3935 pqi_pci_unmap(ctrl_info
->pci_dev
,
3936 request
.data
.report_event_configuration
.sg_descriptors
, 1,
3940 kfree(event_config
);
3945 static void pqi_free_all_io_requests(struct pqi_ctrl_info
*ctrl_info
)
3949 size_t sg_chain_buffer_length
;
3950 struct pqi_io_request
*io_request
;
3952 if (!ctrl_info
->io_request_pool
)
3955 dev
= &ctrl_info
->pci_dev
->dev
;
3956 sg_chain_buffer_length
= ctrl_info
->sg_chain_buffer_length
;
3957 io_request
= ctrl_info
->io_request_pool
;
3959 for (i
= 0; i
< ctrl_info
->max_io_slots
; i
++) {
3960 kfree(io_request
->iu
);
3961 if (!io_request
->sg_chain_buffer
)
3963 dma_free_coherent(dev
, sg_chain_buffer_length
,
3964 io_request
->sg_chain_buffer
,
3965 io_request
->sg_chain_buffer_dma_handle
);
3969 kfree(ctrl_info
->io_request_pool
);
3970 ctrl_info
->io_request_pool
= NULL
;
3973 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info
*ctrl_info
)
3975 ctrl_info
->error_buffer
= dma_zalloc_coherent(&ctrl_info
->pci_dev
->dev
,
3976 ctrl_info
->error_buffer_length
,
3977 &ctrl_info
->error_buffer_dma_handle
, GFP_KERNEL
);
3979 if (!ctrl_info
->error_buffer
)
3985 static int pqi_alloc_io_resources(struct pqi_ctrl_info
*ctrl_info
)
3988 void *sg_chain_buffer
;
3989 size_t sg_chain_buffer_length
;
3990 dma_addr_t sg_chain_buffer_dma_handle
;
3992 struct pqi_io_request
*io_request
;
3994 ctrl_info
->io_request_pool
= kzalloc(ctrl_info
->max_io_slots
*
3995 sizeof(ctrl_info
->io_request_pool
[0]), GFP_KERNEL
);
3997 if (!ctrl_info
->io_request_pool
) {
3998 dev_err(&ctrl_info
->pci_dev
->dev
,
3999 "failed to allocate I/O request pool\n");
4003 dev
= &ctrl_info
->pci_dev
->dev
;
4004 sg_chain_buffer_length
= ctrl_info
->sg_chain_buffer_length
;
4005 io_request
= ctrl_info
->io_request_pool
;
4007 for (i
= 0; i
< ctrl_info
->max_io_slots
; i
++) {
4009 kmalloc(ctrl_info
->max_inbound_iu_length
, GFP_KERNEL
);
4011 if (!io_request
->iu
) {
4012 dev_err(&ctrl_info
->pci_dev
->dev
,
4013 "failed to allocate IU buffers\n");
4017 sg_chain_buffer
= dma_alloc_coherent(dev
,
4018 sg_chain_buffer_length
, &sg_chain_buffer_dma_handle
,
4021 if (!sg_chain_buffer
) {
4022 dev_err(&ctrl_info
->pci_dev
->dev
,
4023 "failed to allocate PQI scatter-gather chain buffers\n");
4027 io_request
->index
= i
;
4028 io_request
->sg_chain_buffer
= sg_chain_buffer
;
4029 io_request
->sg_chain_buffer_dma_handle
=
4030 sg_chain_buffer_dma_handle
;
4037 pqi_free_all_io_requests(ctrl_info
);
4043 * Calculate required resources that are sized based on max. outstanding
4044 * requests and max. transfer size.
4047 static void pqi_calculate_io_resources(struct pqi_ctrl_info
*ctrl_info
)
4049 u32 max_transfer_size
;
4052 ctrl_info
->scsi_ml_can_queue
=
4053 ctrl_info
->max_outstanding_requests
- PQI_RESERVED_IO_SLOTS
;
4054 ctrl_info
->max_io_slots
= ctrl_info
->max_outstanding_requests
;
4056 ctrl_info
->error_buffer_length
=
4057 ctrl_info
->max_io_slots
* PQI_ERROR_BUFFER_ELEMENT_LENGTH
;
4060 min(ctrl_info
->max_transfer_size
, PQI_MAX_TRANSFER_SIZE
);
4062 max_sg_entries
= max_transfer_size
/ PAGE_SIZE
;
4064 /* +1 to cover when the buffer is not page-aligned. */
4067 max_sg_entries
= min(ctrl_info
->max_sg_entries
, max_sg_entries
);
4069 max_transfer_size
= (max_sg_entries
- 1) * PAGE_SIZE
;
4071 ctrl_info
->sg_chain_buffer_length
=
4072 max_sg_entries
* sizeof(struct pqi_sg_descriptor
);
4073 ctrl_info
->sg_tablesize
= max_sg_entries
;
4074 ctrl_info
->max_sectors
= max_transfer_size
/ 512;
4077 static void pqi_calculate_queue_resources(struct pqi_ctrl_info
*ctrl_info
)
4080 int max_queue_groups
;
4081 int num_queue_groups
;
4082 u16 num_elements_per_iq
;
4083 u16 num_elements_per_oq
;
4085 max_queue_groups
= min(ctrl_info
->max_inbound_queues
/ 2,
4086 ctrl_info
->max_outbound_queues
- 1);
4087 max_queue_groups
= min(max_queue_groups
, PQI_MAX_QUEUE_GROUPS
);
4089 num_cpus
= num_online_cpus();
4090 num_queue_groups
= min(num_cpus
, ctrl_info
->max_msix_vectors
);
4091 num_queue_groups
= min(num_queue_groups
, max_queue_groups
);
4093 ctrl_info
->num_queue_groups
= num_queue_groups
;
4096 * Make sure that the max. inbound IU length is an even multiple
4097 * of our inbound element length.
4099 ctrl_info
->max_inbound_iu_length
=
4100 (ctrl_info
->max_inbound_iu_length_per_firmware
/
4101 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
) *
4102 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
;
4104 num_elements_per_iq
=
4105 (ctrl_info
->max_inbound_iu_length
/
4106 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
4108 /* Add one because one element in each queue is unusable. */
4109 num_elements_per_iq
++;
4111 num_elements_per_iq
= min(num_elements_per_iq
,
4112 ctrl_info
->max_elements_per_iq
);
4114 num_elements_per_oq
= ((num_elements_per_iq
- 1) * 2) + 1;
4115 num_elements_per_oq
= min(num_elements_per_oq
,
4116 ctrl_info
->max_elements_per_oq
);
4118 ctrl_info
->num_elements_per_iq
= num_elements_per_iq
;
4119 ctrl_info
->num_elements_per_oq
= num_elements_per_oq
;
4121 ctrl_info
->max_sg_per_iu
=
4122 ((ctrl_info
->max_inbound_iu_length
-
4123 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
) /
4124 sizeof(struct pqi_sg_descriptor
)) +
4125 PQI_MAX_EMBEDDED_SG_DESCRIPTORS
;
4128 static inline void pqi_set_sg_descriptor(
4129 struct pqi_sg_descriptor
*sg_descriptor
, struct scatterlist
*sg
)
4131 u64 address
= (u64
)sg_dma_address(sg
);
4132 unsigned int length
= sg_dma_len(sg
);
4134 put_unaligned_le64(address
, &sg_descriptor
->address
);
4135 put_unaligned_le32(length
, &sg_descriptor
->length
);
4136 put_unaligned_le32(0, &sg_descriptor
->flags
);
4139 static int pqi_build_raid_sg_list(struct pqi_ctrl_info
*ctrl_info
,
4140 struct pqi_raid_path_request
*request
, struct scsi_cmnd
*scmd
,
4141 struct pqi_io_request
*io_request
)
4147 unsigned int num_sg_in_iu
;
4148 unsigned int max_sg_per_iu
;
4149 struct scatterlist
*sg
;
4150 struct pqi_sg_descriptor
*sg_descriptor
;
4152 sg_count
= scsi_dma_map(scmd
);
4156 iu_length
= offsetof(struct pqi_raid_path_request
, sg_descriptors
) -
4157 PQI_REQUEST_HEADER_LENGTH
;
4162 sg
= scsi_sglist(scmd
);
4163 sg_descriptor
= request
->sg_descriptors
;
4164 max_sg_per_iu
= ctrl_info
->max_sg_per_iu
- 1;
4170 pqi_set_sg_descriptor(sg_descriptor
, sg
);
4177 if (i
== max_sg_per_iu
) {
4179 (u64
)io_request
->sg_chain_buffer_dma_handle
,
4180 &sg_descriptor
->address
);
4181 put_unaligned_le32((sg_count
- num_sg_in_iu
)
4182 * sizeof(*sg_descriptor
),
4183 &sg_descriptor
->length
);
4184 put_unaligned_le32(CISS_SG_CHAIN
,
4185 &sg_descriptor
->flags
);
4188 sg_descriptor
= io_request
->sg_chain_buffer
;
4193 put_unaligned_le32(CISS_SG_LAST
, &sg_descriptor
->flags
);
4194 request
->partial
= chained
;
4195 iu_length
+= num_sg_in_iu
* sizeof(*sg_descriptor
);
4198 put_unaligned_le16(iu_length
, &request
->header
.iu_length
);
4203 static int pqi_build_aio_sg_list(struct pqi_ctrl_info
*ctrl_info
,
4204 struct pqi_aio_path_request
*request
, struct scsi_cmnd
*scmd
,
4205 struct pqi_io_request
*io_request
)
4211 unsigned int num_sg_in_iu
;
4212 unsigned int max_sg_per_iu
;
4213 struct scatterlist
*sg
;
4214 struct pqi_sg_descriptor
*sg_descriptor
;
4216 sg_count
= scsi_dma_map(scmd
);
4220 iu_length
= offsetof(struct pqi_aio_path_request
, sg_descriptors
) -
4221 PQI_REQUEST_HEADER_LENGTH
;
4227 sg
= scsi_sglist(scmd
);
4228 sg_descriptor
= request
->sg_descriptors
;
4229 max_sg_per_iu
= ctrl_info
->max_sg_per_iu
- 1;
4234 pqi_set_sg_descriptor(sg_descriptor
, sg
);
4241 if (i
== max_sg_per_iu
) {
4243 (u64
)io_request
->sg_chain_buffer_dma_handle
,
4244 &sg_descriptor
->address
);
4245 put_unaligned_le32((sg_count
- num_sg_in_iu
)
4246 * sizeof(*sg_descriptor
),
4247 &sg_descriptor
->length
);
4248 put_unaligned_le32(CISS_SG_CHAIN
,
4249 &sg_descriptor
->flags
);
4252 sg_descriptor
= io_request
->sg_chain_buffer
;
4257 put_unaligned_le32(CISS_SG_LAST
, &sg_descriptor
->flags
);
4258 request
->partial
= chained
;
4259 iu_length
+= num_sg_in_iu
* sizeof(*sg_descriptor
);
4262 put_unaligned_le16(iu_length
, &request
->header
.iu_length
);
4263 request
->num_sg_descriptors
= num_sg_in_iu
;
4268 static void pqi_raid_io_complete(struct pqi_io_request
*io_request
,
4271 struct scsi_cmnd
*scmd
;
4273 scmd
= io_request
->scmd
;
4274 pqi_free_io_request(io_request
);
4275 scsi_dma_unmap(scmd
);
4276 pqi_scsi_done(scmd
);
4279 static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info
*ctrl_info
,
4280 struct pqi_scsi_dev
*device
, struct scsi_cmnd
*scmd
,
4281 struct pqi_queue_group
*queue_group
)
4285 struct pqi_io_request
*io_request
;
4286 struct pqi_raid_path_request
*request
;
4288 io_request
= pqi_alloc_io_request(ctrl_info
);
4289 io_request
->io_complete_callback
= pqi_raid_io_complete
;
4290 io_request
->scmd
= scmd
;
4292 scmd
->host_scribble
= (unsigned char *)io_request
;
4294 request
= io_request
->iu
;
4296 offsetof(struct pqi_raid_path_request
, sg_descriptors
));
4298 request
->header
.iu_type
= PQI_REQUEST_IU_RAID_PATH_IO
;
4299 put_unaligned_le32(scsi_bufflen(scmd
), &request
->buffer_length
);
4300 request
->task_attribute
= SOP_TASK_ATTRIBUTE_SIMPLE
;
4301 put_unaligned_le16(io_request
->index
, &request
->request_id
);
4302 request
->error_index
= request
->request_id
;
4303 memcpy(request
->lun_number
, device
->scsi3addr
,
4304 sizeof(request
->lun_number
));
4306 cdb_length
= min_t(size_t, scmd
->cmd_len
, sizeof(request
->cdb
));
4307 memcpy(request
->cdb
, scmd
->cmnd
, cdb_length
);
4309 switch (cdb_length
) {
4314 /* No bytes in the Additional CDB bytes field */
4315 request
->additional_cdb_bytes_usage
=
4316 SOP_ADDITIONAL_CDB_BYTES_0
;
4319 /* 4 bytes in the Additional cdb field */
4320 request
->additional_cdb_bytes_usage
=
4321 SOP_ADDITIONAL_CDB_BYTES_4
;
4324 /* 8 bytes in the Additional cdb field */
4325 request
->additional_cdb_bytes_usage
=
4326 SOP_ADDITIONAL_CDB_BYTES_8
;
4329 /* 12 bytes in the Additional cdb field */
4330 request
->additional_cdb_bytes_usage
=
4331 SOP_ADDITIONAL_CDB_BYTES_12
;
4335 /* 16 bytes in the Additional cdb field */
4336 request
->additional_cdb_bytes_usage
=
4337 SOP_ADDITIONAL_CDB_BYTES_16
;
4341 switch (scmd
->sc_data_direction
) {
4343 request
->data_direction
= SOP_READ_FLAG
;
4345 case DMA_FROM_DEVICE
:
4346 request
->data_direction
= SOP_WRITE_FLAG
;
4349 request
->data_direction
= SOP_NO_DIRECTION_FLAG
;
4351 case DMA_BIDIRECTIONAL
:
4352 request
->data_direction
= SOP_BIDIRECTIONAL
;
4355 dev_err(&ctrl_info
->pci_dev
->dev
,
4356 "unknown data direction: %d\n",
4357 scmd
->sc_data_direction
);
4358 WARN_ON(scmd
->sc_data_direction
);
4362 rc
= pqi_build_raid_sg_list(ctrl_info
, request
, scmd
, io_request
);
4364 pqi_free_io_request(io_request
);
4365 return SCSI_MLQUEUE_HOST_BUSY
;
4368 pqi_start_io(ctrl_info
, queue_group
, RAID_PATH
, io_request
);
4373 static void pqi_aio_io_complete(struct pqi_io_request
*io_request
,
4376 struct scsi_cmnd
*scmd
;
4378 scmd
= io_request
->scmd
;
4379 scsi_dma_unmap(scmd
);
4380 if (io_request
->status
== -EAGAIN
)
4381 set_host_byte(scmd
, DID_IMM_RETRY
);
4382 pqi_free_io_request(io_request
);
4383 pqi_scsi_done(scmd
);
4386 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info
*ctrl_info
,
4387 struct pqi_scsi_dev
*device
, struct scsi_cmnd
*scmd
,
4388 struct pqi_queue_group
*queue_group
)
4390 return pqi_aio_submit_io(ctrl_info
, scmd
, device
->aio_handle
,
4391 scmd
->cmnd
, scmd
->cmd_len
, queue_group
, NULL
);
4394 static int pqi_aio_submit_io(struct pqi_ctrl_info
*ctrl_info
,
4395 struct scsi_cmnd
*scmd
, u32 aio_handle
, u8
*cdb
,
4396 unsigned int cdb_length
, struct pqi_queue_group
*queue_group
,
4397 struct pqi_encryption_info
*encryption_info
)
4400 struct pqi_io_request
*io_request
;
4401 struct pqi_aio_path_request
*request
;
4403 io_request
= pqi_alloc_io_request(ctrl_info
);
4404 io_request
->io_complete_callback
= pqi_aio_io_complete
;
4405 io_request
->scmd
= scmd
;
4407 scmd
->host_scribble
= (unsigned char *)io_request
;
4409 request
= io_request
->iu
;
4411 offsetof(struct pqi_raid_path_request
, sg_descriptors
));
4413 request
->header
.iu_type
= PQI_REQUEST_IU_AIO_PATH_IO
;
4414 put_unaligned_le32(aio_handle
, &request
->nexus_id
);
4415 put_unaligned_le32(scsi_bufflen(scmd
), &request
->buffer_length
);
4416 request
->task_attribute
= SOP_TASK_ATTRIBUTE_SIMPLE
;
4417 put_unaligned_le16(io_request
->index
, &request
->request_id
);
4418 request
->error_index
= request
->request_id
;
4419 if (cdb_length
> sizeof(request
->cdb
))
4420 cdb_length
= sizeof(request
->cdb
);
4421 request
->cdb_length
= cdb_length
;
4422 memcpy(request
->cdb
, cdb
, cdb_length
);
4424 switch (scmd
->sc_data_direction
) {
4426 request
->data_direction
= SOP_READ_FLAG
;
4428 case DMA_FROM_DEVICE
:
4429 request
->data_direction
= SOP_WRITE_FLAG
;
4432 request
->data_direction
= SOP_NO_DIRECTION_FLAG
;
4434 case DMA_BIDIRECTIONAL
:
4435 request
->data_direction
= SOP_BIDIRECTIONAL
;
4438 dev_err(&ctrl_info
->pci_dev
->dev
,
4439 "unknown data direction: %d\n",
4440 scmd
->sc_data_direction
);
4441 WARN_ON(scmd
->sc_data_direction
);
4445 if (encryption_info
) {
4446 request
->encryption_enable
= true;
4447 put_unaligned_le16(encryption_info
->data_encryption_key_index
,
4448 &request
->data_encryption_key_index
);
4449 put_unaligned_le32(encryption_info
->encrypt_tweak_lower
,
4450 &request
->encrypt_tweak_lower
);
4451 put_unaligned_le32(encryption_info
->encrypt_tweak_upper
,
4452 &request
->encrypt_tweak_upper
);
4455 rc
= pqi_build_aio_sg_list(ctrl_info
, request
, scmd
, io_request
);
4457 pqi_free_io_request(io_request
);
4458 return SCSI_MLQUEUE_HOST_BUSY
;
4461 pqi_start_io(ctrl_info
, queue_group
, AIO_PATH
, io_request
);
4466 static int pqi_scsi_queue_command(struct Scsi_Host
*shost
,
4467 struct scsi_cmnd
*scmd
)
4470 struct pqi_ctrl_info
*ctrl_info
;
4471 struct pqi_scsi_dev
*device
;
4473 struct pqi_queue_group
*queue_group
;
4476 device
= scmd
->device
->hostdata
;
4477 ctrl_info
= shost_to_hba(shost
);
4479 if (pqi_ctrl_offline(ctrl_info
)) {
4480 set_host_byte(scmd
, DID_NO_CONNECT
);
4481 pqi_scsi_done(scmd
);
4486 * This is necessary because the SML doesn't zero out this field during
4491 hwq
= blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd
->request
));
4492 if (hwq
>= ctrl_info
->num_queue_groups
)
4495 queue_group
= &ctrl_info
->queue_groups
[hwq
];
4497 if (pqi_is_logical_device(device
)) {
4498 raid_bypassed
= false;
4499 if (device
->offload_enabled
&&
4500 !blk_rq_is_passthrough(scmd
->request
)) {
4501 rc
= pqi_raid_bypass_submit_scsi_cmd(ctrl_info
, device
,
4504 rc
== SCSI_MLQUEUE_HOST_BUSY
||
4505 rc
== SAM_STAT_CHECK_CONDITION
||
4506 rc
== SAM_STAT_RESERVATION_CONFLICT
)
4507 raid_bypassed
= true;
4510 rc
= pqi_raid_submit_scsi_cmd(ctrl_info
, device
, scmd
,
4513 if (device
->aio_enabled
)
4514 rc
= pqi_aio_submit_scsi_cmd(ctrl_info
, device
, scmd
,
4517 rc
= pqi_raid_submit_scsi_cmd(ctrl_info
, device
, scmd
,
4524 static void pqi_lun_reset_complete(struct pqi_io_request
*io_request
,
4527 struct completion
*waiting
= context
;
4532 #define PQI_LUN_RESET_TIMEOUT_SECS 10
4534 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info
*ctrl_info
,
4535 struct pqi_scsi_dev
*device
, struct completion
*wait
)
4538 unsigned int wait_secs
= 0;
4541 if (wait_for_completion_io_timeout(wait
,
4542 PQI_LUN_RESET_TIMEOUT_SECS
* HZ
)) {
4547 pqi_check_ctrl_health(ctrl_info
);
4548 if (pqi_ctrl_offline(ctrl_info
)) {
4553 wait_secs
+= PQI_LUN_RESET_TIMEOUT_SECS
;
4555 dev_err(&ctrl_info
->pci_dev
->dev
,
4556 "resetting scsi %d:%d:%d:%d - waiting %u seconds\n",
4557 ctrl_info
->scsi_host
->host_no
, device
->bus
,
4558 device
->target
, device
->lun
, wait_secs
);
4564 static int pqi_lun_reset(struct pqi_ctrl_info
*ctrl_info
,
4565 struct pqi_scsi_dev
*device
)
4568 struct pqi_io_request
*io_request
;
4569 DECLARE_COMPLETION_ONSTACK(wait
);
4570 struct pqi_task_management_request
*request
;
4572 down(&ctrl_info
->lun_reset_sem
);
4574 io_request
= pqi_alloc_io_request(ctrl_info
);
4575 io_request
->io_complete_callback
= pqi_lun_reset_complete
;
4576 io_request
->context
= &wait
;
4578 request
= io_request
->iu
;
4579 memset(request
, 0, sizeof(*request
));
4581 request
->header
.iu_type
= PQI_REQUEST_IU_TASK_MANAGEMENT
;
4582 put_unaligned_le16(sizeof(*request
) - PQI_REQUEST_HEADER_LENGTH
,
4583 &request
->header
.iu_length
);
4584 put_unaligned_le16(io_request
->index
, &request
->request_id
);
4585 memcpy(request
->lun_number
, device
->scsi3addr
,
4586 sizeof(request
->lun_number
));
4587 request
->task_management_function
= SOP_TASK_MANAGEMENT_LUN_RESET
;
4589 pqi_start_io(ctrl_info
,
4590 &ctrl_info
->queue_groups
[PQI_DEFAULT_QUEUE_GROUP
], RAID_PATH
,
4593 rc
= pqi_wait_for_lun_reset_completion(ctrl_info
, device
, &wait
);
4595 rc
= io_request
->status
;
4597 pqi_free_io_request(io_request
);
4598 up(&ctrl_info
->lun_reset_sem
);
4603 /* Performs a reset at the LUN level. */
4605 static int pqi_device_reset(struct pqi_ctrl_info
*ctrl_info
,
4606 struct pqi_scsi_dev
*device
)
4610 pqi_check_ctrl_health(ctrl_info
);
4611 if (pqi_ctrl_offline(ctrl_info
))
4614 rc
= pqi_lun_reset(ctrl_info
, device
);
4616 return rc
== 0 ? SUCCESS
: FAILED
;
4619 static int pqi_eh_device_reset_handler(struct scsi_cmnd
*scmd
)
4622 struct pqi_ctrl_info
*ctrl_info
;
4623 struct pqi_scsi_dev
*device
;
4625 ctrl_info
= shost_to_hba(scmd
->device
->host
);
4626 device
= scmd
->device
->hostdata
;
4628 dev_err(&ctrl_info
->pci_dev
->dev
,
4629 "resetting scsi %d:%d:%d:%d\n",
4630 ctrl_info
->scsi_host
->host_no
,
4631 device
->bus
, device
->target
, device
->lun
);
4633 rc
= pqi_device_reset(ctrl_info
, device
);
4635 dev_err(&ctrl_info
->pci_dev
->dev
,
4636 "reset of scsi %d:%d:%d:%d: %s\n",
4637 ctrl_info
->scsi_host
->host_no
,
4638 device
->bus
, device
->target
, device
->lun
,
4639 rc
== SUCCESS
? "SUCCESS" : "FAILED");
4644 static int pqi_slave_alloc(struct scsi_device
*sdev
)
4646 struct pqi_scsi_dev
*device
;
4647 unsigned long flags
;
4648 struct pqi_ctrl_info
*ctrl_info
;
4649 struct scsi_target
*starget
;
4650 struct sas_rphy
*rphy
;
4652 ctrl_info
= shost_to_hba(sdev
->host
);
4654 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
4656 if (sdev_channel(sdev
) == PQI_PHYSICAL_DEVICE_BUS
) {
4657 starget
= scsi_target(sdev
);
4658 rphy
= target_to_rphy(starget
);
4659 device
= pqi_find_device_by_sas_rphy(ctrl_info
, rphy
);
4661 device
->target
= sdev_id(sdev
);
4662 device
->lun
= sdev
->lun
;
4663 device
->target_lun_valid
= true;
4666 device
= pqi_find_scsi_dev(ctrl_info
, sdev_channel(sdev
),
4667 sdev_id(sdev
), sdev
->lun
);
4670 if (device
&& device
->expose_device
) {
4671 sdev
->hostdata
= device
;
4672 device
->sdev
= sdev
;
4673 if (device
->queue_depth
) {
4674 device
->advertised_queue_depth
= device
->queue_depth
;
4675 scsi_change_queue_depth(sdev
,
4676 device
->advertised_queue_depth
);
4680 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
4685 static int pqi_slave_configure(struct scsi_device
*sdev
)
4687 struct pqi_scsi_dev
*device
;
4689 device
= sdev
->hostdata
;
4690 if (!device
->expose_device
)
4691 sdev
->no_uld_attach
= true;
4696 static int pqi_map_queues(struct Scsi_Host
*shost
)
4698 struct pqi_ctrl_info
*ctrl_info
= shost_to_hba(shost
);
4700 return blk_mq_pci_map_queues(&shost
->tag_set
, ctrl_info
->pci_dev
);
4703 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info
*ctrl_info
,
4706 struct pci_dev
*pci_dev
;
4707 u32 subsystem_vendor
;
4708 u32 subsystem_device
;
4709 cciss_pci_info_struct pciinfo
;
4714 pci_dev
= ctrl_info
->pci_dev
;
4716 pciinfo
.domain
= pci_domain_nr(pci_dev
->bus
);
4717 pciinfo
.bus
= pci_dev
->bus
->number
;
4718 pciinfo
.dev_fn
= pci_dev
->devfn
;
4719 subsystem_vendor
= pci_dev
->subsystem_vendor
;
4720 subsystem_device
= pci_dev
->subsystem_device
;
4721 pciinfo
.board_id
= ((subsystem_device
<< 16) & 0xffff0000) |
4724 if (copy_to_user(arg
, &pciinfo
, sizeof(pciinfo
)))
4730 static int pqi_getdrivver_ioctl(void __user
*arg
)
4737 version
= (DRIVER_MAJOR
<< 28) | (DRIVER_MINOR
<< 24) |
4738 (DRIVER_RELEASE
<< 16) | DRIVER_REVISION
;
4740 if (copy_to_user(arg
, &version
, sizeof(version
)))
4746 struct ciss_error_info
{
4749 size_t sense_data_length
;
4752 static void pqi_error_info_to_ciss(struct pqi_raid_error_info
*pqi_error_info
,
4753 struct ciss_error_info
*ciss_error_info
)
4755 int ciss_cmd_status
;
4756 size_t sense_data_length
;
4758 switch (pqi_error_info
->data_out_result
) {
4759 case PQI_DATA_IN_OUT_GOOD
:
4760 ciss_cmd_status
= CISS_CMD_STATUS_SUCCESS
;
4762 case PQI_DATA_IN_OUT_UNDERFLOW
:
4763 ciss_cmd_status
= CISS_CMD_STATUS_DATA_UNDERRUN
;
4765 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW
:
4766 ciss_cmd_status
= CISS_CMD_STATUS_DATA_OVERRUN
;
4768 case PQI_DATA_IN_OUT_PROTOCOL_ERROR
:
4769 case PQI_DATA_IN_OUT_BUFFER_ERROR
:
4770 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA
:
4771 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE
:
4772 case PQI_DATA_IN_OUT_ERROR
:
4773 ciss_cmd_status
= CISS_CMD_STATUS_PROTOCOL_ERROR
;
4775 case PQI_DATA_IN_OUT_HARDWARE_ERROR
:
4776 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR
:
4777 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT
:
4778 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED
:
4779 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED
:
4780 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED
:
4781 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST
:
4782 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION
:
4783 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED
:
4784 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ
:
4785 ciss_cmd_status
= CISS_CMD_STATUS_HARDWARE_ERROR
;
4787 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT
:
4788 ciss_cmd_status
= CISS_CMD_STATUS_UNSOLICITED_ABORT
;
4790 case PQI_DATA_IN_OUT_ABORTED
:
4791 ciss_cmd_status
= CISS_CMD_STATUS_ABORTED
;
4793 case PQI_DATA_IN_OUT_TIMEOUT
:
4794 ciss_cmd_status
= CISS_CMD_STATUS_TIMEOUT
;
4797 ciss_cmd_status
= CISS_CMD_STATUS_TARGET_STATUS
;
4802 get_unaligned_le16(&pqi_error_info
->sense_data_length
);
4803 if (sense_data_length
== 0)
4805 get_unaligned_le16(&pqi_error_info
->response_data_length
);
4806 if (sense_data_length
)
4807 if (sense_data_length
> sizeof(pqi_error_info
->data
))
4808 sense_data_length
= sizeof(pqi_error_info
->data
);
4810 ciss_error_info
->scsi_status
= pqi_error_info
->status
;
4811 ciss_error_info
->command_status
= ciss_cmd_status
;
4812 ciss_error_info
->sense_data_length
= sense_data_length
;
4815 static int pqi_passthru_ioctl(struct pqi_ctrl_info
*ctrl_info
, void __user
*arg
)
4818 char *kernel_buffer
= NULL
;
4820 size_t sense_data_length
;
4821 IOCTL_Command_struct iocommand
;
4822 struct pqi_raid_path_request request
;
4823 struct pqi_raid_error_info pqi_error_info
;
4824 struct ciss_error_info ciss_error_info
;
4826 if (pqi_ctrl_offline(ctrl_info
))
4830 if (!capable(CAP_SYS_RAWIO
))
4832 if (copy_from_user(&iocommand
, arg
, sizeof(iocommand
)))
4834 if (iocommand
.buf_size
< 1 &&
4835 iocommand
.Request
.Type
.Direction
!= XFER_NONE
)
4837 if (iocommand
.Request
.CDBLen
> sizeof(request
.cdb
))
4839 if (iocommand
.Request
.Type
.Type
!= TYPE_CMD
)
4842 switch (iocommand
.Request
.Type
.Direction
) {
4851 if (iocommand
.buf_size
> 0) {
4852 kernel_buffer
= kmalloc(iocommand
.buf_size
, GFP_KERNEL
);
4855 if (iocommand
.Request
.Type
.Direction
& XFER_WRITE
) {
4856 if (copy_from_user(kernel_buffer
, iocommand
.buf
,
4857 iocommand
.buf_size
)) {
4862 memset(kernel_buffer
, 0, iocommand
.buf_size
);
4866 memset(&request
, 0, sizeof(request
));
4868 request
.header
.iu_type
= PQI_REQUEST_IU_RAID_PATH_IO
;
4869 iu_length
= offsetof(struct pqi_raid_path_request
, sg_descriptors
) -
4870 PQI_REQUEST_HEADER_LENGTH
;
4871 memcpy(request
.lun_number
, iocommand
.LUN_info
.LunAddrBytes
,
4872 sizeof(request
.lun_number
));
4873 memcpy(request
.cdb
, iocommand
.Request
.CDB
, iocommand
.Request
.CDBLen
);
4874 request
.additional_cdb_bytes_usage
= SOP_ADDITIONAL_CDB_BYTES_0
;
4876 switch (iocommand
.Request
.Type
.Direction
) {
4878 request
.data_direction
= SOP_NO_DIRECTION_FLAG
;
4881 request
.data_direction
= SOP_WRITE_FLAG
;
4884 request
.data_direction
= SOP_READ_FLAG
;
4888 request
.task_attribute
= SOP_TASK_ATTRIBUTE_SIMPLE
;
4890 if (iocommand
.buf_size
> 0) {
4891 put_unaligned_le32(iocommand
.buf_size
, &request
.buffer_length
);
4893 rc
= pqi_map_single(ctrl_info
->pci_dev
,
4894 &request
.sg_descriptors
[0], kernel_buffer
,
4895 iocommand
.buf_size
, PCI_DMA_BIDIRECTIONAL
);
4899 iu_length
+= sizeof(request
.sg_descriptors
[0]);
4902 put_unaligned_le16(iu_length
, &request
.header
.iu_length
);
4904 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
4905 PQI_SYNC_FLAGS_INTERRUPTABLE
, &pqi_error_info
, NO_TIMEOUT
);
4907 if (iocommand
.buf_size
> 0)
4908 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
4909 PCI_DMA_BIDIRECTIONAL
);
4911 memset(&iocommand
.error_info
, 0, sizeof(iocommand
.error_info
));
4914 pqi_error_info_to_ciss(&pqi_error_info
, &ciss_error_info
);
4915 iocommand
.error_info
.ScsiStatus
= ciss_error_info
.scsi_status
;
4916 iocommand
.error_info
.CommandStatus
=
4917 ciss_error_info
.command_status
;
4918 sense_data_length
= ciss_error_info
.sense_data_length
;
4919 if (sense_data_length
) {
4920 if (sense_data_length
>
4921 sizeof(iocommand
.error_info
.SenseInfo
))
4923 sizeof(iocommand
.error_info
.SenseInfo
);
4924 memcpy(iocommand
.error_info
.SenseInfo
,
4925 pqi_error_info
.data
, sense_data_length
);
4926 iocommand
.error_info
.SenseLen
= sense_data_length
;
4930 if (copy_to_user(arg
, &iocommand
, sizeof(iocommand
))) {
4935 if (rc
== 0 && iocommand
.buf_size
> 0 &&
4936 (iocommand
.Request
.Type
.Direction
& XFER_READ
)) {
4937 if (copy_to_user(iocommand
.buf
, kernel_buffer
,
4938 iocommand
.buf_size
)) {
4944 kfree(kernel_buffer
);
4949 static int pqi_ioctl(struct scsi_device
*sdev
, int cmd
, void __user
*arg
)
4952 struct pqi_ctrl_info
*ctrl_info
;
4954 ctrl_info
= shost_to_hba(sdev
->host
);
4957 case CCISS_DEREGDISK
:
4958 case CCISS_REGNEWDISK
:
4960 rc
= pqi_scan_scsi_devices(ctrl_info
);
4962 case CCISS_GETPCIINFO
:
4963 rc
= pqi_getpciinfo_ioctl(ctrl_info
, arg
);
4965 case CCISS_GETDRIVVER
:
4966 rc
= pqi_getdrivver_ioctl(arg
);
4968 case CCISS_PASSTHRU
:
4969 rc
= pqi_passthru_ioctl(ctrl_info
, arg
);
4979 static ssize_t
pqi_version_show(struct device
*dev
,
4980 struct device_attribute
*attr
, char *buffer
)
4983 struct Scsi_Host
*shost
;
4984 struct pqi_ctrl_info
*ctrl_info
;
4986 shost
= class_to_shost(dev
);
4987 ctrl_info
= shost_to_hba(shost
);
4989 count
+= snprintf(buffer
+ count
, PAGE_SIZE
- count
,
4990 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP
);
4992 count
+= snprintf(buffer
+ count
, PAGE_SIZE
- count
,
4993 "firmware: %s\n", ctrl_info
->firmware_version
);
4998 static ssize_t
pqi_host_rescan_store(struct device
*dev
,
4999 struct device_attribute
*attr
, const char *buffer
, size_t count
)
5001 struct Scsi_Host
*shost
= class_to_shost(dev
);
5003 pqi_scan_start(shost
);
5008 static DEVICE_ATTR(version
, S_IRUGO
, pqi_version_show
, NULL
);
5009 static DEVICE_ATTR(rescan
, S_IWUSR
, NULL
, pqi_host_rescan_store
);
5011 static struct device_attribute
*pqi_shost_attrs
[] = {
5017 static ssize_t
pqi_sas_address_show(struct device
*dev
,
5018 struct device_attribute
*attr
, char *buffer
)
5020 struct pqi_ctrl_info
*ctrl_info
;
5021 struct scsi_device
*sdev
;
5022 struct pqi_scsi_dev
*device
;
5023 unsigned long flags
;
5026 sdev
= to_scsi_device(dev
);
5027 ctrl_info
= shost_to_hba(sdev
->host
);
5029 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
5031 device
= sdev
->hostdata
;
5032 if (pqi_is_logical_device(device
)) {
5033 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
,
5037 sas_address
= device
->sas_address
;
5039 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
5041 return snprintf(buffer
, PAGE_SIZE
, "0x%016llx\n", sas_address
);
5044 static ssize_t
pqi_ssd_smart_path_enabled_show(struct device
*dev
,
5045 struct device_attribute
*attr
, char *buffer
)
5047 struct pqi_ctrl_info
*ctrl_info
;
5048 struct scsi_device
*sdev
;
5049 struct pqi_scsi_dev
*device
;
5050 unsigned long flags
;
5052 sdev
= to_scsi_device(dev
);
5053 ctrl_info
= shost_to_hba(sdev
->host
);
5055 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
5057 device
= sdev
->hostdata
;
5058 buffer
[0] = device
->offload_enabled
? '1' : '0';
5062 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
5067 static DEVICE_ATTR(sas_address
, S_IRUGO
, pqi_sas_address_show
, NULL
);
5068 static DEVICE_ATTR(ssd_smart_path_enabled
, S_IRUGO
,
5069 pqi_ssd_smart_path_enabled_show
, NULL
);
5071 static struct device_attribute
*pqi_sdev_attrs
[] = {
5072 &dev_attr_sas_address
,
5073 &dev_attr_ssd_smart_path_enabled
,
5077 static struct scsi_host_template pqi_driver_template
= {
5078 .module
= THIS_MODULE
,
5079 .name
= DRIVER_NAME_SHORT
,
5080 .proc_name
= DRIVER_NAME_SHORT
,
5081 .queuecommand
= pqi_scsi_queue_command
,
5082 .scan_start
= pqi_scan_start
,
5083 .scan_finished
= pqi_scan_finished
,
5085 .use_clustering
= ENABLE_CLUSTERING
,
5086 .eh_device_reset_handler
= pqi_eh_device_reset_handler
,
5088 .slave_alloc
= pqi_slave_alloc
,
5089 .slave_configure
= pqi_slave_configure
,
5090 .map_queues
= pqi_map_queues
,
5091 .sdev_attrs
= pqi_sdev_attrs
,
5092 .shost_attrs
= pqi_shost_attrs
,
5095 static int pqi_register_scsi(struct pqi_ctrl_info
*ctrl_info
)
5098 struct Scsi_Host
*shost
;
5100 shost
= scsi_host_alloc(&pqi_driver_template
, sizeof(ctrl_info
));
5102 dev_err(&ctrl_info
->pci_dev
->dev
,
5103 "scsi_host_alloc failed for controller %u\n",
5104 ctrl_info
->ctrl_id
);
5109 shost
->n_io_port
= 0;
5110 shost
->this_id
= -1;
5111 shost
->max_channel
= PQI_MAX_BUS
;
5112 shost
->max_cmd_len
= MAX_COMMAND_SIZE
;
5113 shost
->max_lun
= ~0;
5115 shost
->max_sectors
= ctrl_info
->max_sectors
;
5116 shost
->can_queue
= ctrl_info
->scsi_ml_can_queue
;
5117 shost
->cmd_per_lun
= shost
->can_queue
;
5118 shost
->sg_tablesize
= ctrl_info
->sg_tablesize
;
5119 shost
->transportt
= pqi_sas_transport_template
;
5120 shost
->irq
= pci_irq_vector(ctrl_info
->pci_dev
, 0);
5121 shost
->unique_id
= shost
->irq
;
5122 shost
->nr_hw_queues
= ctrl_info
->num_queue_groups
;
5123 shost
->hostdata
[0] = (unsigned long)ctrl_info
;
5125 rc
= scsi_add_host(shost
, &ctrl_info
->pci_dev
->dev
);
5127 dev_err(&ctrl_info
->pci_dev
->dev
,
5128 "scsi_add_host failed for controller %u\n",
5129 ctrl_info
->ctrl_id
);
5133 rc
= pqi_add_sas_host(shost
, ctrl_info
);
5135 dev_err(&ctrl_info
->pci_dev
->dev
,
5136 "add SAS host failed for controller %u\n",
5137 ctrl_info
->ctrl_id
);
5141 ctrl_info
->scsi_host
= shost
;
5146 scsi_remove_host(shost
);
5148 scsi_host_put(shost
);
5153 static void pqi_unregister_scsi(struct pqi_ctrl_info
*ctrl_info
)
5155 struct Scsi_Host
*shost
;
5157 pqi_delete_sas_host(ctrl_info
);
5159 shost
= ctrl_info
->scsi_host
;
5163 scsi_remove_host(shost
);
5164 scsi_host_put(shost
);
5167 #define PQI_RESET_ACTION_RESET 0x1
5169 #define PQI_RESET_TYPE_NO_RESET 0x0
5170 #define PQI_RESET_TYPE_SOFT_RESET 0x1
5171 #define PQI_RESET_TYPE_FIRM_RESET 0x2
5172 #define PQI_RESET_TYPE_HARD_RESET 0x3
5174 static int pqi_reset(struct pqi_ctrl_info
*ctrl_info
)
5179 reset_params
= (PQI_RESET_ACTION_RESET
<< 5) |
5180 PQI_RESET_TYPE_HARD_RESET
;
5182 writel(reset_params
,
5183 &ctrl_info
->pqi_registers
->device_reset
);
5185 rc
= pqi_wait_for_pqi_mode_ready(ctrl_info
);
5187 dev_err(&ctrl_info
->pci_dev
->dev
,
5188 "PQI reset failed\n");
5193 static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info
*ctrl_info
)
5196 struct bmic_identify_controller
*identify
;
5198 identify
= kmalloc(sizeof(*identify
), GFP_KERNEL
);
5202 rc
= pqi_identify_controller(ctrl_info
, identify
);
5206 memcpy(ctrl_info
->firmware_version
, identify
->firmware_version
,
5207 sizeof(identify
->firmware_version
));
5208 ctrl_info
->firmware_version
[sizeof(identify
->firmware_version
)] = '\0';
5209 snprintf(ctrl_info
->firmware_version
+
5210 strlen(ctrl_info
->firmware_version
),
5211 sizeof(ctrl_info
->firmware_version
),
5212 "-%u", get_unaligned_le16(&identify
->firmware_build_number
));
5220 static int pqi_kdump_init(struct pqi_ctrl_info
*ctrl_info
)
5222 if (!sis_is_firmware_running(ctrl_info
))
5225 if (pqi_get_ctrl_mode(ctrl_info
) == PQI_MODE
) {
5226 sis_disable_msix(ctrl_info
);
5227 if (pqi_reset(ctrl_info
) == 0)
5228 sis_reenable_sis_mode(ctrl_info
);
5234 static int pqi_ctrl_init(struct pqi_ctrl_info
*ctrl_info
)
5238 if (reset_devices
) {
5239 rc
= pqi_kdump_init(ctrl_info
);
5245 * When the controller comes out of reset, it is always running
5246 * in legacy SIS mode. This is so that it can be compatible
5247 * with legacy drivers shipped with OSes. So we have to talk
5248 * to it using SIS commands at first. Once we are satisified
5249 * that the controller supports PQI, we transition it into PQI
5254 * Wait until the controller is ready to start accepting SIS
5257 rc
= sis_wait_for_ctrl_ready(ctrl_info
);
5259 dev_err(&ctrl_info
->pci_dev
->dev
,
5260 "error initializing SIS interface\n");
5265 * Get the controller properties. This allows us to determine
5266 * whether or not it supports PQI mode.
5268 rc
= sis_get_ctrl_properties(ctrl_info
);
5270 dev_err(&ctrl_info
->pci_dev
->dev
,
5271 "error obtaining controller properties\n");
5275 rc
= sis_get_pqi_capabilities(ctrl_info
);
5277 dev_err(&ctrl_info
->pci_dev
->dev
,
5278 "error obtaining controller capabilities\n");
5282 if (ctrl_info
->max_outstanding_requests
> PQI_MAX_OUTSTANDING_REQUESTS
)
5283 ctrl_info
->max_outstanding_requests
=
5284 PQI_MAX_OUTSTANDING_REQUESTS
;
5286 pqi_calculate_io_resources(ctrl_info
);
5288 rc
= pqi_alloc_error_buffer(ctrl_info
);
5290 dev_err(&ctrl_info
->pci_dev
->dev
,
5291 "failed to allocate PQI error buffer\n");
5296 * If the function we are about to call succeeds, the
5297 * controller will transition from legacy SIS mode
5300 rc
= sis_init_base_struct_addr(ctrl_info
);
5302 dev_err(&ctrl_info
->pci_dev
->dev
,
5303 "error initializing PQI mode\n");
5307 /* Wait for the controller to complete the SIS -> PQI transition. */
5308 rc
= pqi_wait_for_pqi_mode_ready(ctrl_info
);
5310 dev_err(&ctrl_info
->pci_dev
->dev
,
5311 "transition to PQI mode failed\n");
5315 /* From here on, we are running in PQI mode. */
5316 ctrl_info
->pqi_mode_enabled
= true;
5317 pqi_save_ctrl_mode(ctrl_info
, PQI_MODE
);
5319 rc
= pqi_alloc_admin_queues(ctrl_info
);
5321 dev_err(&ctrl_info
->pci_dev
->dev
,
5322 "error allocating admin queues\n");
5326 rc
= pqi_create_admin_queues(ctrl_info
);
5328 dev_err(&ctrl_info
->pci_dev
->dev
,
5329 "error creating admin queues\n");
5333 rc
= pqi_report_device_capability(ctrl_info
);
5335 dev_err(&ctrl_info
->pci_dev
->dev
,
5336 "obtaining device capability failed\n");
5340 rc
= pqi_validate_device_capability(ctrl_info
);
5344 pqi_calculate_queue_resources(ctrl_info
);
5346 rc
= pqi_enable_msix_interrupts(ctrl_info
);
5350 if (ctrl_info
->num_msix_vectors_enabled
< ctrl_info
->num_queue_groups
) {
5351 ctrl_info
->max_msix_vectors
=
5352 ctrl_info
->num_msix_vectors_enabled
;
5353 pqi_calculate_queue_resources(ctrl_info
);
5356 rc
= pqi_alloc_io_resources(ctrl_info
);
5360 rc
= pqi_alloc_operational_queues(ctrl_info
);
5364 pqi_init_operational_queues(ctrl_info
);
5366 rc
= pqi_request_irqs(ctrl_info
);
5370 rc
= pqi_create_queues(ctrl_info
);
5374 sis_enable_msix(ctrl_info
);
5376 rc
= pqi_configure_events(ctrl_info
);
5378 dev_err(&ctrl_info
->pci_dev
->dev
,
5379 "error configuring events\n");
5383 pqi_start_heartbeat_timer(ctrl_info
);
5385 ctrl_info
->controller_online
= true;
5387 /* Register with the SCSI subsystem. */
5388 rc
= pqi_register_scsi(ctrl_info
);
5392 rc
= pqi_get_ctrl_firmware_version(ctrl_info
);
5394 dev_err(&ctrl_info
->pci_dev
->dev
,
5395 "error obtaining firmware version\n");
5399 rc
= pqi_write_driver_version_to_host_wellness(ctrl_info
);
5401 dev_err(&ctrl_info
->pci_dev
->dev
,
5402 "error updating host wellness\n");
5406 pqi_schedule_update_time_worker(ctrl_info
);
5408 pqi_scan_scsi_devices(ctrl_info
);
5413 static int pqi_pci_init(struct pqi_ctrl_info
*ctrl_info
)
5418 rc
= pci_enable_device(ctrl_info
->pci_dev
);
5420 dev_err(&ctrl_info
->pci_dev
->dev
,
5421 "failed to enable PCI device\n");
5425 if (sizeof(dma_addr_t
) > 4)
5426 mask
= DMA_BIT_MASK(64);
5428 mask
= DMA_BIT_MASK(32);
5430 rc
= dma_set_mask(&ctrl_info
->pci_dev
->dev
, mask
);
5432 dev_err(&ctrl_info
->pci_dev
->dev
, "failed to set DMA mask\n");
5433 goto disable_device
;
5436 rc
= pci_request_regions(ctrl_info
->pci_dev
, DRIVER_NAME_SHORT
);
5438 dev_err(&ctrl_info
->pci_dev
->dev
,
5439 "failed to obtain PCI resources\n");
5440 goto disable_device
;
5443 ctrl_info
->iomem_base
= ioremap_nocache(pci_resource_start(
5444 ctrl_info
->pci_dev
, 0),
5445 sizeof(struct pqi_ctrl_registers
));
5446 if (!ctrl_info
->iomem_base
) {
5447 dev_err(&ctrl_info
->pci_dev
->dev
,
5448 "failed to map memory for controller registers\n");
5450 goto release_regions
;
5453 ctrl_info
->registers
= ctrl_info
->iomem_base
;
5454 ctrl_info
->pqi_registers
= &ctrl_info
->registers
->pqi_registers
;
5456 /* Enable bus mastering. */
5457 pci_set_master(ctrl_info
->pci_dev
);
5459 pci_set_drvdata(ctrl_info
->pci_dev
, ctrl_info
);
5464 pci_release_regions(ctrl_info
->pci_dev
);
5466 pci_disable_device(ctrl_info
->pci_dev
);
5471 static void pqi_cleanup_pci_init(struct pqi_ctrl_info
*ctrl_info
)
5473 iounmap(ctrl_info
->iomem_base
);
5474 pci_release_regions(ctrl_info
->pci_dev
);
5475 pci_disable_device(ctrl_info
->pci_dev
);
5476 pci_set_drvdata(ctrl_info
->pci_dev
, NULL
);
5479 static struct pqi_ctrl_info
*pqi_alloc_ctrl_info(int numa_node
)
5481 struct pqi_ctrl_info
*ctrl_info
;
5483 ctrl_info
= kzalloc_node(sizeof(struct pqi_ctrl_info
),
5484 GFP_KERNEL
, numa_node
);
5488 mutex_init(&ctrl_info
->scan_mutex
);
5490 INIT_LIST_HEAD(&ctrl_info
->scsi_device_list
);
5491 spin_lock_init(&ctrl_info
->scsi_device_list_lock
);
5493 INIT_WORK(&ctrl_info
->event_work
, pqi_event_worker
);
5494 atomic_set(&ctrl_info
->num_interrupts
, 0);
5496 INIT_DELAYED_WORK(&ctrl_info
->rescan_work
, pqi_rescan_worker
);
5497 INIT_DELAYED_WORK(&ctrl_info
->update_time_work
, pqi_update_time_worker
);
5499 sema_init(&ctrl_info
->sync_request_sem
,
5500 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS
);
5501 sema_init(&ctrl_info
->lun_reset_sem
, PQI_RESERVED_IO_SLOTS_LUN_RESET
);
5503 ctrl_info
->ctrl_id
= atomic_inc_return(&pqi_controller_count
) - 1;
5504 ctrl_info
->max_msix_vectors
= PQI_MAX_MSIX_VECTORS
;
5509 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info
*ctrl_info
)
5514 static void pqi_free_interrupts(struct pqi_ctrl_info
*ctrl_info
)
5518 for (i
= 0; i
< ctrl_info
->num_msix_vectors_initialized
; i
++) {
5519 free_irq(pci_irq_vector(ctrl_info
->pci_dev
, i
),
5520 &ctrl_info
->queue_groups
[i
]);
5523 pci_free_irq_vectors(ctrl_info
->pci_dev
);
5526 static void pqi_free_ctrl_resources(struct pqi_ctrl_info
*ctrl_info
)
5528 pqi_stop_heartbeat_timer(ctrl_info
);
5529 pqi_free_interrupts(ctrl_info
);
5530 if (ctrl_info
->queue_memory_base
)
5531 dma_free_coherent(&ctrl_info
->pci_dev
->dev
,
5532 ctrl_info
->queue_memory_length
,
5533 ctrl_info
->queue_memory_base
,
5534 ctrl_info
->queue_memory_base_dma_handle
);
5535 if (ctrl_info
->admin_queue_memory_base
)
5536 dma_free_coherent(&ctrl_info
->pci_dev
->dev
,
5537 ctrl_info
->admin_queue_memory_length
,
5538 ctrl_info
->admin_queue_memory_base
,
5539 ctrl_info
->admin_queue_memory_base_dma_handle
);
5540 pqi_free_all_io_requests(ctrl_info
);
5541 if (ctrl_info
->error_buffer
)
5542 dma_free_coherent(&ctrl_info
->pci_dev
->dev
,
5543 ctrl_info
->error_buffer_length
,
5544 ctrl_info
->error_buffer
,
5545 ctrl_info
->error_buffer_dma_handle
);
5546 if (ctrl_info
->iomem_base
)
5547 pqi_cleanup_pci_init(ctrl_info
);
5548 pqi_free_ctrl_info(ctrl_info
);
5551 static void pqi_remove_ctrl(struct pqi_ctrl_info
*ctrl_info
)
5553 cancel_delayed_work_sync(&ctrl_info
->rescan_work
);
5554 cancel_delayed_work_sync(&ctrl_info
->update_time_work
);
5555 pqi_remove_all_scsi_devices(ctrl_info
);
5556 pqi_unregister_scsi(ctrl_info
);
5558 if (ctrl_info
->pqi_mode_enabled
) {
5559 sis_disable_msix(ctrl_info
);
5560 if (pqi_reset(ctrl_info
) == 0)
5561 sis_reenable_sis_mode(ctrl_info
);
5563 pqi_free_ctrl_resources(ctrl_info
);
5566 static void pqi_print_ctrl_info(struct pci_dev
*pdev
,
5567 const struct pci_device_id
*id
)
5569 char *ctrl_description
;
5571 if (id
->driver_data
) {
5572 ctrl_description
= (char *)id
->driver_data
;
5574 switch (id
->subvendor
) {
5575 case PCI_VENDOR_ID_HP
:
5576 ctrl_description
= hpe_branded_controller
;
5578 case PCI_VENDOR_ID_ADAPTEC2
:
5580 ctrl_description
= microsemi_branded_controller
;
5585 dev_info(&pdev
->dev
, "%s found\n", ctrl_description
);
5588 static int pqi_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
5592 struct pqi_ctrl_info
*ctrl_info
;
5594 pqi_print_ctrl_info(pdev
, id
);
5596 if (pqi_disable_device_id_wildcards
&&
5597 id
->subvendor
== PCI_ANY_ID
&&
5598 id
->subdevice
== PCI_ANY_ID
) {
5599 dev_warn(&pdev
->dev
,
5600 "controller not probed because device ID wildcards are disabled\n");
5604 if (id
->subvendor
== PCI_ANY_ID
|| id
->subdevice
== PCI_ANY_ID
)
5605 dev_warn(&pdev
->dev
,
5606 "controller device ID matched using wildcards\n");
5608 node
= dev_to_node(&pdev
->dev
);
5609 if (node
== NUMA_NO_NODE
)
5610 set_dev_node(&pdev
->dev
, 0);
5612 ctrl_info
= pqi_alloc_ctrl_info(node
);
5615 "failed to allocate controller info block\n");
5619 ctrl_info
->pci_dev
= pdev
;
5621 rc
= pqi_pci_init(ctrl_info
);
5625 rc
= pqi_ctrl_init(ctrl_info
);
5632 pqi_remove_ctrl(ctrl_info
);
5637 static void pqi_pci_remove(struct pci_dev
*pdev
)
5639 struct pqi_ctrl_info
*ctrl_info
;
5641 ctrl_info
= pci_get_drvdata(pdev
);
5645 pqi_remove_ctrl(ctrl_info
);
5648 static void pqi_shutdown(struct pci_dev
*pdev
)
5651 struct pqi_ctrl_info
*ctrl_info
;
5653 ctrl_info
= pci_get_drvdata(pdev
);
5658 * Write all data in the controller's battery-backed cache to
5661 rc
= pqi_flush_cache(ctrl_info
);
5666 dev_warn(&pdev
->dev
,
5667 "unable to flush controller cache\n");
5670 /* Define the PCI IDs for the controllers that we support. */
5671 static const struct pci_device_id pqi_pci_id_table
[] = {
5673 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5674 PCI_VENDOR_ID_ADAPTEC2
, 0x0110)
5677 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5678 PCI_VENDOR_ID_HP
, 0x0600)
5681 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5682 PCI_VENDOR_ID_HP
, 0x0601)
5685 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5686 PCI_VENDOR_ID_HP
, 0x0602)
5689 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5690 PCI_VENDOR_ID_HP
, 0x0603)
5693 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5694 PCI_VENDOR_ID_HP
, 0x0650)
5697 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5698 PCI_VENDOR_ID_HP
, 0x0651)
5701 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5702 PCI_VENDOR_ID_HP
, 0x0652)
5705 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5706 PCI_VENDOR_ID_HP
, 0x0653)
5709 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5710 PCI_VENDOR_ID_HP
, 0x0654)
5713 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5714 PCI_VENDOR_ID_HP
, 0x0655)
5717 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5718 PCI_VENDOR_ID_HP
, 0x0700)
5721 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5722 PCI_VENDOR_ID_HP
, 0x0701)
5725 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5726 PCI_VENDOR_ID_ADAPTEC2
, 0x0800)
5729 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5730 PCI_VENDOR_ID_ADAPTEC2
, 0x0801)
5733 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5734 PCI_VENDOR_ID_ADAPTEC2
, 0x0802)
5737 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5738 PCI_VENDOR_ID_ADAPTEC2
, 0x0803)
5741 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5742 PCI_VENDOR_ID_ADAPTEC2
, 0x0804)
5745 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5746 PCI_VENDOR_ID_ADAPTEC2
, 0x0805)
5749 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5750 PCI_VENDOR_ID_ADAPTEC2
, 0x0900)
5753 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5754 PCI_VENDOR_ID_ADAPTEC2
, 0x0901)
5757 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5758 PCI_VENDOR_ID_ADAPTEC2
, 0x0902)
5761 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5762 PCI_VENDOR_ID_ADAPTEC2
, 0x0903)
5765 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5766 PCI_VENDOR_ID_ADAPTEC2
, 0x0904)
5769 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5770 PCI_VENDOR_ID_ADAPTEC2
, 0x0905)
5773 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5774 PCI_VENDOR_ID_ADAPTEC2
, 0x0906)
5777 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5778 PCI_VENDOR_ID_HP
, 0x1001)
5781 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5782 PCI_VENDOR_ID_HP
, 0x1100)
5785 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5786 PCI_VENDOR_ID_HP
, 0x1101)
5789 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5790 PCI_VENDOR_ID_HP
, 0x1102)
5793 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5794 PCI_VENDOR_ID_HP
, 0x1150)
5797 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
5798 PCI_ANY_ID
, PCI_ANY_ID
)
5803 MODULE_DEVICE_TABLE(pci
, pqi_pci_id_table
);
5805 static struct pci_driver pqi_pci_driver
= {
5806 .name
= DRIVER_NAME_SHORT
,
5807 .id_table
= pqi_pci_id_table
,
5808 .probe
= pqi_pci_probe
,
5809 .remove
= pqi_pci_remove
,
5810 .shutdown
= pqi_shutdown
,
5813 static int __init
pqi_init(void)
5817 pr_info(DRIVER_NAME
"\n");
5819 pqi_sas_transport_template
=
5820 sas_attach_transport(&pqi_sas_transport_functions
);
5821 if (!pqi_sas_transport_template
)
5824 rc
= pci_register_driver(&pqi_pci_driver
);
5826 sas_release_transport(pqi_sas_transport_template
);
5831 static void __exit
pqi_cleanup(void)
5833 pci_unregister_driver(&pqi_pci_driver
);
5834 sas_release_transport(pqi_sas_transport_template
);
5837 module_init(pqi_init
);
5838 module_exit(pqi_cleanup
);
5840 static void __attribute__((unused
)) verify_structures(void)
5842 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5843 sis_host_to_ctrl_doorbell
) != 0x20);
5844 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5845 sis_interrupt_mask
) != 0x34);
5846 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5847 sis_ctrl_to_host_doorbell
) != 0x9c);
5848 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5849 sis_ctrl_to_host_doorbell_clear
) != 0xa0);
5850 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5851 sis_driver_scratch
) != 0xb0);
5852 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5853 sis_firmware_status
) != 0xbc);
5854 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5855 sis_mailbox
) != 0x1000);
5856 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
5857 pqi_registers
) != 0x4000);
5859 BUILD_BUG_ON(offsetof(struct pqi_iu_header
,
5861 BUILD_BUG_ON(offsetof(struct pqi_iu_header
,
5863 BUILD_BUG_ON(offsetof(struct pqi_iu_header
,
5864 response_queue_id
) != 0x4);
5865 BUILD_BUG_ON(offsetof(struct pqi_iu_header
,
5867 BUILD_BUG_ON(sizeof(struct pqi_iu_header
) != 0x8);
5869 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5871 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5872 service_response
) != 0x1);
5873 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5874 data_present
) != 0x2);
5875 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5877 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5878 residual_count
) != 0x4);
5879 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5880 data_length
) != 0x8);
5881 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5883 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
5885 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info
) != 0x10c);
5887 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5888 data_in_result
) != 0x0);
5889 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5890 data_out_result
) != 0x1);
5891 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5893 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5895 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5896 status_qualifier
) != 0x6);
5897 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5898 sense_data_length
) != 0x8);
5899 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5900 response_data_length
) != 0xa);
5901 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5902 data_in_transferred
) != 0xc);
5903 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5904 data_out_transferred
) != 0x10);
5905 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
5907 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info
) != 0x114);
5909 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5911 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5912 function_and_status_code
) != 0x8);
5913 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5914 max_admin_iq_elements
) != 0x10);
5915 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5916 max_admin_oq_elements
) != 0x11);
5917 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5918 admin_iq_element_length
) != 0x12);
5919 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5920 admin_oq_element_length
) != 0x13);
5921 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5922 max_reset_timeout
) != 0x14);
5923 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5924 legacy_intx_status
) != 0x18);
5925 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5926 legacy_intx_mask_set
) != 0x1c);
5927 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5928 legacy_intx_mask_clear
) != 0x20);
5929 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5930 device_status
) != 0x40);
5931 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5932 admin_iq_pi_offset
) != 0x48);
5933 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5934 admin_oq_ci_offset
) != 0x50);
5935 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5936 admin_iq_element_array_addr
) != 0x58);
5937 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5938 admin_oq_element_array_addr
) != 0x60);
5939 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5940 admin_iq_ci_addr
) != 0x68);
5941 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5942 admin_oq_pi_addr
) != 0x70);
5943 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5944 admin_iq_num_elements
) != 0x78);
5945 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5946 admin_oq_num_elements
) != 0x79);
5947 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5948 admin_queue_int_msg_num
) != 0x7a);
5949 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5950 device_error
) != 0x80);
5951 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5952 error_details
) != 0x88);
5953 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5954 device_reset
) != 0x90);
5955 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
5956 power_action
) != 0x94);
5957 BUILD_BUG_ON(sizeof(struct pqi_device_registers
) != 0x100);
5959 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5960 header
.iu_type
) != 0);
5961 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5962 header
.iu_length
) != 2);
5963 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5964 header
.work_area
) != 6);
5965 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5967 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5968 function_code
) != 10);
5969 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5970 data
.report_device_capability
.buffer_length
) != 44);
5971 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5972 data
.report_device_capability
.sg_descriptor
) != 48);
5973 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5974 data
.create_operational_iq
.queue_id
) != 12);
5975 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5976 data
.create_operational_iq
.element_array_addr
) != 16);
5977 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5978 data
.create_operational_iq
.ci_addr
) != 24);
5979 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5980 data
.create_operational_iq
.num_elements
) != 32);
5981 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5982 data
.create_operational_iq
.element_length
) != 34);
5983 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5984 data
.create_operational_iq
.queue_protocol
) != 36);
5985 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5986 data
.create_operational_oq
.queue_id
) != 12);
5987 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5988 data
.create_operational_oq
.element_array_addr
) != 16);
5989 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5990 data
.create_operational_oq
.pi_addr
) != 24);
5991 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5992 data
.create_operational_oq
.num_elements
) != 32);
5993 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5994 data
.create_operational_oq
.element_length
) != 34);
5995 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5996 data
.create_operational_oq
.queue_protocol
) != 36);
5997 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
5998 data
.create_operational_oq
.int_msg_num
) != 40);
5999 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6000 data
.create_operational_oq
.coalescing_count
) != 42);
6001 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6002 data
.create_operational_oq
.min_coalescing_time
) != 44);
6003 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6004 data
.create_operational_oq
.max_coalescing_time
) != 48);
6005 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
6006 data
.delete_operational_queue
.queue_id
) != 12);
6007 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request
) != 64);
6008 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request
,
6009 data
.create_operational_iq
) != 64 - 11);
6010 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request
,
6011 data
.create_operational_oq
) != 64 - 11);
6012 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request
,
6013 data
.delete_operational_queue
) != 64 - 11);
6015 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6016 header
.iu_type
) != 0);
6017 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6018 header
.iu_length
) != 2);
6019 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6020 header
.work_area
) != 6);
6021 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6023 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6024 function_code
) != 10);
6025 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6027 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6028 data
.create_operational_iq
.status_descriptor
) != 12);
6029 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6030 data
.create_operational_iq
.iq_pi_offset
) != 16);
6031 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6032 data
.create_operational_oq
.status_descriptor
) != 12);
6033 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
6034 data
.create_operational_oq
.oq_ci_offset
) != 16);
6035 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response
) != 64);
6037 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6038 header
.iu_type
) != 0);
6039 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6040 header
.iu_length
) != 2);
6041 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6042 header
.response_queue_id
) != 4);
6043 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6044 header
.work_area
) != 6);
6045 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6047 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6049 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6050 buffer_length
) != 12);
6051 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6053 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6054 protocol_specific
) != 24);
6055 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6056 error_index
) != 27);
6057 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6059 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
6060 sg_descriptors
) != 64);
6061 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request
) !=
6062 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
6064 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6065 header
.iu_type
) != 0);
6066 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6067 header
.iu_length
) != 2);
6068 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6069 header
.response_queue_id
) != 4);
6070 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6071 header
.work_area
) != 6);
6072 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6074 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6076 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6077 buffer_length
) != 16);
6078 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6079 data_encryption_key_index
) != 22);
6080 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6081 encrypt_tweak_lower
) != 24);
6082 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6083 encrypt_tweak_upper
) != 28);
6084 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6086 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6087 error_index
) != 48);
6088 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6089 num_sg_descriptors
) != 50);
6090 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6092 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6094 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
6095 sg_descriptors
) != 64);
6096 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request
) !=
6097 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
6099 BUILD_BUG_ON(offsetof(struct pqi_io_response
,
6100 header
.iu_type
) != 0);
6101 BUILD_BUG_ON(offsetof(struct pqi_io_response
,
6102 header
.iu_length
) != 2);
6103 BUILD_BUG_ON(offsetof(struct pqi_io_response
,
6105 BUILD_BUG_ON(offsetof(struct pqi_io_response
,
6106 error_index
) != 10);
6108 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6109 header
.iu_type
) != 0);
6110 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6111 header
.iu_length
) != 2);
6112 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6113 header
.response_queue_id
) != 4);
6114 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6116 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6117 data
.report_event_configuration
.buffer_length
) != 12);
6118 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6119 data
.report_event_configuration
.sg_descriptors
) != 16);
6120 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6121 data
.set_event_configuration
.global_event_oq_id
) != 10);
6122 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6123 data
.set_event_configuration
.buffer_length
) != 12);
6124 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
6125 data
.set_event_configuration
.sg_descriptors
) != 16);
6127 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor
,
6128 max_inbound_iu_length
) != 6);
6129 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor
,
6130 max_outbound_iu_length
) != 14);
6131 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor
) != 16);
6133 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6135 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6136 iq_arbitration_priority_support_bitmask
) != 8);
6137 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6138 maximum_aw_a
) != 9);
6139 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6140 maximum_aw_b
) != 10);
6141 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6142 maximum_aw_c
) != 11);
6143 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6144 max_inbound_queues
) != 16);
6145 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6146 max_elements_per_iq
) != 18);
6147 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6148 max_iq_element_length
) != 24);
6149 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6150 min_iq_element_length
) != 26);
6151 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6152 max_outbound_queues
) != 30);
6153 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6154 max_elements_per_oq
) != 32);
6155 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6156 intr_coalescing_time_granularity
) != 34);
6157 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6158 max_oq_element_length
) != 36);
6159 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6160 min_oq_element_length
) != 38);
6161 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
6162 iu_layer_descriptors
) != 64);
6163 BUILD_BUG_ON(sizeof(struct pqi_device_capability
) != 576);
6165 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor
,
6167 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor
,
6169 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor
) != 4);
6171 BUILD_BUG_ON(offsetof(struct pqi_event_config
,
6172 num_event_descriptors
) != 2);
6173 BUILD_BUG_ON(offsetof(struct pqi_event_config
,
6176 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6177 header
.iu_type
) != 0);
6178 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6179 header
.iu_length
) != 2);
6180 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6182 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6184 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6185 additional_event_id
) != 12);
6186 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
6188 BUILD_BUG_ON(sizeof(struct pqi_event_response
) != 32);
6190 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
6191 header
.iu_type
) != 0);
6192 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
6193 header
.iu_length
) != 2);
6194 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
6196 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
6198 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
6199 additional_event_id
) != 12);
6200 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request
) != 16);
6202 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6203 header
.iu_type
) != 0);
6204 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6205 header
.iu_length
) != 2);
6206 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6208 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6210 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6212 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6213 protocol_specific
) != 24);
6214 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6215 outbound_queue_id_to_manage
) != 26);
6216 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6217 request_id_to_manage
) != 28);
6218 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
6219 task_management_function
) != 30);
6220 BUILD_BUG_ON(sizeof(struct pqi_task_management_request
) != 32);
6222 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6223 header
.iu_type
) != 0);
6224 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6225 header
.iu_length
) != 2);
6226 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6228 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6230 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6231 additional_response_info
) != 12);
6232 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
6233 response_code
) != 15);
6234 BUILD_BUG_ON(sizeof(struct pqi_task_management_response
) != 16);
6236 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6237 configured_logical_drive_count
) != 0);
6238 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6239 configuration_signature
) != 1);
6240 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6241 firmware_version
) != 5);
6242 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6243 extended_logical_unit_count
) != 154);
6244 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6245 firmware_build_number
) != 190);
6246 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
6247 controller_mode
) != 292);
6249 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS
> 255);
6250 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS
> 255);
6251 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH
%
6252 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT
!= 0);
6253 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH
%
6254 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT
!= 0);
6255 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
> 1048560);
6256 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
%
6257 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT
!= 0);
6258 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
> 1048560);
6259 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
%
6260 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT
!= 0);
6262 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS
>= PQI_MAX_OUTSTANDING_REQUESTS
);