1 // SPDX-License-Identifier: GPL-2.0
3 * driver for Microsemi PQI-based storage controllers
4 * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
5 * Copyright (c) 2016-2018 Microsemi Corporation
6 * Copyright (c) 2016 PMC-Sierra, Inc.
8 * Questions/Comments/Bugfixes to storagedev@microchip.com
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/sched.h>
18 #include <linux/rtc.h>
19 #include <linux/bcd.h>
20 #include <linux/reboot.h>
21 #include <linux/cciss_ioctl.h>
22 #include <linux/blk-mq-pci.h>
23 #include <scsi/scsi_host.h>
24 #include <scsi/scsi_cmnd.h>
25 #include <scsi/scsi_device.h>
26 #include <scsi/scsi_eh.h>
27 #include <scsi/scsi_transport_sas.h>
28 #include <asm/unaligned.h>
30 #include "smartpqi_sis.h"
32 #if !defined(BUILD_TIMESTAMP)
33 #define BUILD_TIMESTAMP
36 #define DRIVER_VERSION "1.2.16-012"
37 #define DRIVER_MAJOR 1
38 #define DRIVER_MINOR 2
39 #define DRIVER_RELEASE 16
40 #define DRIVER_REVISION 12
42 #define DRIVER_NAME "Microsemi PQI Driver (v" \
43 DRIVER_VERSION BUILD_TIMESTAMP ")"
44 #define DRIVER_NAME_SHORT "smartpqi"
46 #define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
48 MODULE_AUTHOR("Microsemi");
49 MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
51 MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
52 MODULE_VERSION(DRIVER_VERSION
);
53 MODULE_LICENSE("GPL");
55 static void pqi_take_ctrl_offline(struct pqi_ctrl_info
*ctrl_info
);
56 static void pqi_ctrl_offline_worker(struct work_struct
*work
);
57 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info
*ctrl_info
);
58 static int pqi_scan_scsi_devices(struct pqi_ctrl_info
*ctrl_info
);
59 static void pqi_scan_start(struct Scsi_Host
*shost
);
60 static void pqi_start_io(struct pqi_ctrl_info
*ctrl_info
,
61 struct pqi_queue_group
*queue_group
, enum pqi_io_path path
,
62 struct pqi_io_request
*io_request
);
63 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info
*ctrl_info
,
64 struct pqi_iu_header
*request
, unsigned int flags
,
65 struct pqi_raid_error_info
*error_info
, unsigned long timeout_msecs
);
66 static int pqi_aio_submit_io(struct pqi_ctrl_info
*ctrl_info
,
67 struct scsi_cmnd
*scmd
, u32 aio_handle
, u8
*cdb
,
68 unsigned int cdb_length
, struct pqi_queue_group
*queue_group
,
69 struct pqi_encryption_info
*encryption_info
, bool raid_bypass
);
70 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info
*ctrl_info
);
71 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info
*ctrl_info
);
72 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info
*ctrl_info
);
73 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info
*ctrl_info
,
75 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info
*ctrl_info
);
76 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info
*ctrl_info
);
77 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info
*ctrl_info
,
78 struct pqi_scsi_dev
*device
, unsigned long timeout_secs
);
80 /* for flags argument to pqi_submit_raid_request_synchronous() */
81 #define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
83 static struct scsi_transport_template
*pqi_sas_transport_template
;
85 static atomic_t pqi_controller_count
= ATOMIC_INIT(0);
87 enum pqi_lockup_action
{
93 static enum pqi_lockup_action pqi_lockup_action
= NONE
;
96 enum pqi_lockup_action action
;
98 } pqi_lockup_actions
[] = {
113 static unsigned int pqi_supported_event_types
[] = {
114 PQI_EVENT_TYPE_HOTPLUG
,
115 PQI_EVENT_TYPE_HARDWARE
,
116 PQI_EVENT_TYPE_PHYSICAL_DEVICE
,
117 PQI_EVENT_TYPE_LOGICAL_DEVICE
,
119 PQI_EVENT_TYPE_AIO_STATE_CHANGE
,
120 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE
,
123 static int pqi_disable_device_id_wildcards
;
124 module_param_named(disable_device_id_wildcards
,
125 pqi_disable_device_id_wildcards
, int, 0644);
126 MODULE_PARM_DESC(disable_device_id_wildcards
,
127 "Disable device ID wildcards.");
129 static int pqi_disable_heartbeat
;
130 module_param_named(disable_heartbeat
,
131 pqi_disable_heartbeat
, int, 0644);
132 MODULE_PARM_DESC(disable_heartbeat
,
133 "Disable heartbeat.");
135 static int pqi_disable_ctrl_shutdown
;
136 module_param_named(disable_ctrl_shutdown
,
137 pqi_disable_ctrl_shutdown
, int, 0644);
138 MODULE_PARM_DESC(disable_ctrl_shutdown
,
139 "Disable controller shutdown when controller locked up.");
141 static char *pqi_lockup_action_param
;
142 module_param_named(lockup_action
,
143 pqi_lockup_action_param
, charp
, 0644);
144 MODULE_PARM_DESC(lockup_action
, "Action to take when controller locked up.\n"
145 "\t\tSupported: none, reboot, panic\n"
146 "\t\tDefault: none");
148 static int pqi_expose_ld_first
;
149 module_param_named(expose_ld_first
,
150 pqi_expose_ld_first
, int, 0644);
151 MODULE_PARM_DESC(expose_ld_first
,
152 "Expose logical drives before physical drives.");
154 static int pqi_hide_vsep
;
155 module_param_named(hide_vsep
,
156 pqi_hide_vsep
, int, 0644);
157 MODULE_PARM_DESC(hide_vsep
,
158 "Hide the virtual SEP for direct attached drives.");
160 static char *raid_levels
[] = {
170 static char *pqi_raid_level_to_string(u8 raid_level
)
172 if (raid_level
< ARRAY_SIZE(raid_levels
))
173 return raid_levels
[raid_level
];
175 return "RAID UNKNOWN";
180 #define SA_RAID_1 2 /* also used for RAID 10 */
181 #define SA_RAID_5 3 /* also used for RAID 50 */
183 #define SA_RAID_6 5 /* also used for RAID 60 */
184 #define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
185 #define SA_RAID_MAX SA_RAID_ADM
186 #define SA_RAID_UNKNOWN 0xff
188 static inline void pqi_scsi_done(struct scsi_cmnd
*scmd
)
190 pqi_prep_for_scsi_done(scmd
);
191 scmd
->scsi_done(scmd
);
194 static inline void pqi_disable_write_same(struct scsi_device
*sdev
)
196 sdev
->no_write_same
= 1;
199 static inline bool pqi_scsi3addr_equal(u8
*scsi3addr1
, u8
*scsi3addr2
)
201 return memcmp(scsi3addr1
, scsi3addr2
, 8) == 0;
204 static inline bool pqi_is_logical_device(struct pqi_scsi_dev
*device
)
206 return !device
->is_physical_device
;
209 static inline bool pqi_is_external_raid_addr(u8
*scsi3addr
)
211 return scsi3addr
[2] != 0;
214 static inline bool pqi_ctrl_offline(struct pqi_ctrl_info
*ctrl_info
)
216 return !ctrl_info
->controller_online
;
219 static inline void pqi_check_ctrl_health(struct pqi_ctrl_info
*ctrl_info
)
221 if (ctrl_info
->controller_online
)
222 if (!sis_is_firmware_running(ctrl_info
))
223 pqi_take_ctrl_offline(ctrl_info
);
226 static inline bool pqi_is_hba_lunid(u8
*scsi3addr
)
228 return pqi_scsi3addr_equal(scsi3addr
, RAID_CTLR_LUNID
);
231 static inline enum pqi_ctrl_mode
pqi_get_ctrl_mode(
232 struct pqi_ctrl_info
*ctrl_info
)
234 return sis_read_driver_scratch(ctrl_info
);
237 static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info
*ctrl_info
,
238 enum pqi_ctrl_mode mode
)
240 sis_write_driver_scratch(ctrl_info
, mode
);
243 static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info
*ctrl_info
)
245 ctrl_info
->block_device_reset
= true;
248 static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info
*ctrl_info
)
250 return ctrl_info
->block_device_reset
;
253 static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info
*ctrl_info
)
255 return ctrl_info
->block_requests
;
258 static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info
*ctrl_info
)
260 ctrl_info
->block_requests
= true;
261 scsi_block_requests(ctrl_info
->scsi_host
);
264 static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info
*ctrl_info
)
266 ctrl_info
->block_requests
= false;
267 wake_up_all(&ctrl_info
->block_requests_wait
);
268 pqi_retry_raid_bypass_requests(ctrl_info
);
269 scsi_unblock_requests(ctrl_info
->scsi_host
);
272 static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info
*ctrl_info
,
273 unsigned long timeout_msecs
)
275 unsigned long remaining_msecs
;
277 if (!pqi_ctrl_blocked(ctrl_info
))
278 return timeout_msecs
;
280 atomic_inc(&ctrl_info
->num_blocked_threads
);
282 if (timeout_msecs
== NO_TIMEOUT
) {
283 wait_event(ctrl_info
->block_requests_wait
,
284 !pqi_ctrl_blocked(ctrl_info
));
285 remaining_msecs
= timeout_msecs
;
287 unsigned long remaining_jiffies
;
290 wait_event_timeout(ctrl_info
->block_requests_wait
,
291 !pqi_ctrl_blocked(ctrl_info
),
292 msecs_to_jiffies(timeout_msecs
));
293 remaining_msecs
= jiffies_to_msecs(remaining_jiffies
);
296 atomic_dec(&ctrl_info
->num_blocked_threads
);
298 return remaining_msecs
;
301 static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info
*ctrl_info
)
303 while (atomic_read(&ctrl_info
->num_busy_threads
) >
304 atomic_read(&ctrl_info
->num_blocked_threads
))
305 usleep_range(1000, 2000);
308 static inline bool pqi_device_offline(struct pqi_scsi_dev
*device
)
310 return device
->device_offline
;
313 static inline void pqi_device_reset_start(struct pqi_scsi_dev
*device
)
315 device
->in_reset
= true;
318 static inline void pqi_device_reset_done(struct pqi_scsi_dev
*device
)
320 device
->in_reset
= false;
323 static inline bool pqi_device_in_reset(struct pqi_scsi_dev
*device
)
325 return device
->in_reset
;
328 static inline void pqi_ctrl_ofa_start(struct pqi_ctrl_info
*ctrl_info
)
330 ctrl_info
->in_ofa
= true;
333 static inline void pqi_ctrl_ofa_done(struct pqi_ctrl_info
*ctrl_info
)
335 ctrl_info
->in_ofa
= false;
338 static inline bool pqi_ctrl_in_ofa(struct pqi_ctrl_info
*ctrl_info
)
340 return ctrl_info
->in_ofa
;
343 static inline void pqi_device_remove_start(struct pqi_scsi_dev
*device
)
345 device
->in_remove
= true;
348 static inline bool pqi_device_in_remove(struct pqi_scsi_dev
*device
)
350 return device
->in_remove
;
353 static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info
*ctrl_info
)
355 ctrl_info
->in_shutdown
= true;
358 static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info
*ctrl_info
)
360 return ctrl_info
->in_shutdown
;
363 static inline void pqi_schedule_rescan_worker_with_delay(
364 struct pqi_ctrl_info
*ctrl_info
, unsigned long delay
)
366 if (pqi_ctrl_offline(ctrl_info
))
368 if (pqi_ctrl_in_ofa(ctrl_info
))
371 schedule_delayed_work(&ctrl_info
->rescan_work
, delay
);
374 static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info
*ctrl_info
)
376 pqi_schedule_rescan_worker_with_delay(ctrl_info
, 0);
379 #define PQI_RESCAN_WORK_DELAY (10 * PQI_HZ)
381 static inline void pqi_schedule_rescan_worker_delayed(
382 struct pqi_ctrl_info
*ctrl_info
)
384 pqi_schedule_rescan_worker_with_delay(ctrl_info
, PQI_RESCAN_WORK_DELAY
);
387 static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info
*ctrl_info
)
389 cancel_delayed_work_sync(&ctrl_info
->rescan_work
);
392 static inline void pqi_cancel_event_worker(struct pqi_ctrl_info
*ctrl_info
)
394 cancel_work_sync(&ctrl_info
->event_work
);
397 static inline u32
pqi_read_heartbeat_counter(struct pqi_ctrl_info
*ctrl_info
)
399 if (!ctrl_info
->heartbeat_counter
)
402 return readl(ctrl_info
->heartbeat_counter
);
405 static inline u8
pqi_read_soft_reset_status(struct pqi_ctrl_info
*ctrl_info
)
407 if (!ctrl_info
->soft_reset_status
)
410 return readb(ctrl_info
->soft_reset_status
);
413 static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info
*ctrl_info
,
418 if (!ctrl_info
->soft_reset_status
)
421 status
= pqi_read_soft_reset_status(ctrl_info
);
423 writeb(status
, ctrl_info
->soft_reset_status
);
426 static int pqi_map_single(struct pci_dev
*pci_dev
,
427 struct pqi_sg_descriptor
*sg_descriptor
, void *buffer
,
428 size_t buffer_length
, enum dma_data_direction data_direction
)
430 dma_addr_t bus_address
;
432 if (!buffer
|| buffer_length
== 0 || data_direction
== DMA_NONE
)
435 bus_address
= dma_map_single(&pci_dev
->dev
, buffer
, buffer_length
,
437 if (dma_mapping_error(&pci_dev
->dev
, bus_address
))
440 put_unaligned_le64((u64
)bus_address
, &sg_descriptor
->address
);
441 put_unaligned_le32(buffer_length
, &sg_descriptor
->length
);
442 put_unaligned_le32(CISS_SG_LAST
, &sg_descriptor
->flags
);
447 static void pqi_pci_unmap(struct pci_dev
*pci_dev
,
448 struct pqi_sg_descriptor
*descriptors
, int num_descriptors
,
449 enum dma_data_direction data_direction
)
453 if (data_direction
== DMA_NONE
)
456 for (i
= 0; i
< num_descriptors
; i
++)
457 dma_unmap_single(&pci_dev
->dev
,
458 (dma_addr_t
)get_unaligned_le64(&descriptors
[i
].address
),
459 get_unaligned_le32(&descriptors
[i
].length
),
463 static int pqi_build_raid_path_request(struct pqi_ctrl_info
*ctrl_info
,
464 struct pqi_raid_path_request
*request
, u8 cmd
,
465 u8
*scsi3addr
, void *buffer
, size_t buffer_length
,
466 u16 vpd_page
, enum dma_data_direction
*dir
)
469 size_t cdb_length
= buffer_length
;
471 memset(request
, 0, sizeof(*request
));
473 request
->header
.iu_type
= PQI_REQUEST_IU_RAID_PATH_IO
;
474 put_unaligned_le16(offsetof(struct pqi_raid_path_request
,
475 sg_descriptors
[1]) - PQI_REQUEST_HEADER_LENGTH
,
476 &request
->header
.iu_length
);
477 put_unaligned_le32(buffer_length
, &request
->buffer_length
);
478 memcpy(request
->lun_number
, scsi3addr
, sizeof(request
->lun_number
));
479 request
->task_attribute
= SOP_TASK_ATTRIBUTE_SIMPLE
;
480 request
->additional_cdb_bytes_usage
= SOP_ADDITIONAL_CDB_BYTES_0
;
486 request
->data_direction
= SOP_READ_FLAG
;
488 if (vpd_page
& VPD_PAGE
) {
490 cdb
[2] = (u8
)vpd_page
;
492 cdb
[4] = (u8
)cdb_length
;
494 case CISS_REPORT_LOG
:
495 case CISS_REPORT_PHYS
:
496 request
->data_direction
= SOP_READ_FLAG
;
498 if (cmd
== CISS_REPORT_PHYS
)
499 cdb
[1] = CISS_REPORT_PHYS_FLAG_OTHER
;
501 cdb
[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID
;
502 put_unaligned_be32(cdb_length
, &cdb
[6]);
504 case CISS_GET_RAID_MAP
:
505 request
->data_direction
= SOP_READ_FLAG
;
507 cdb
[1] = CISS_GET_RAID_MAP
;
508 put_unaligned_be32(cdb_length
, &cdb
[6]);
511 request
->data_direction
= SOP_WRITE_FLAG
;
513 cdb
[6] = BMIC_FLUSH_CACHE
;
514 put_unaligned_be16(cdb_length
, &cdb
[7]);
516 case BMIC_SENSE_DIAG_OPTIONS
:
519 case BMIC_IDENTIFY_CONTROLLER
:
520 case BMIC_IDENTIFY_PHYSICAL_DEVICE
:
521 case BMIC_SENSE_SUBSYSTEM_INFORMATION
:
522 request
->data_direction
= SOP_READ_FLAG
;
525 put_unaligned_be16(cdb_length
, &cdb
[7]);
527 case BMIC_SET_DIAG_OPTIONS
:
530 case BMIC_WRITE_HOST_WELLNESS
:
531 request
->data_direction
= SOP_WRITE_FLAG
;
534 put_unaligned_be16(cdb_length
, &cdb
[7]);
536 case BMIC_CSMI_PASSTHRU
:
537 request
->data_direction
= SOP_BIDIRECTIONAL
;
539 cdb
[5] = CSMI_CC_SAS_SMP_PASSTHRU
;
541 put_unaligned_be16(cdb_length
, &cdb
[7]);
544 dev_err(&ctrl_info
->pci_dev
->dev
, "unknown command 0x%c\n", cmd
);
548 switch (request
->data_direction
) {
550 *dir
= DMA_FROM_DEVICE
;
553 *dir
= DMA_TO_DEVICE
;
555 case SOP_NO_DIRECTION_FLAG
:
559 *dir
= DMA_BIDIRECTIONAL
;
563 return pqi_map_single(ctrl_info
->pci_dev
, &request
->sg_descriptors
[0],
564 buffer
, buffer_length
, *dir
);
567 static inline void pqi_reinit_io_request(struct pqi_io_request
*io_request
)
569 io_request
->scmd
= NULL
;
570 io_request
->status
= 0;
571 io_request
->error_info
= NULL
;
572 io_request
->raid_bypass
= false;
575 static struct pqi_io_request
*pqi_alloc_io_request(
576 struct pqi_ctrl_info
*ctrl_info
)
578 struct pqi_io_request
*io_request
;
579 u16 i
= ctrl_info
->next_io_request_slot
; /* benignly racy */
582 io_request
= &ctrl_info
->io_request_pool
[i
];
583 if (atomic_inc_return(&io_request
->refcount
) == 1)
585 atomic_dec(&io_request
->refcount
);
586 i
= (i
+ 1) % ctrl_info
->max_io_slots
;
590 ctrl_info
->next_io_request_slot
= (i
+ 1) % ctrl_info
->max_io_slots
;
592 pqi_reinit_io_request(io_request
);
597 static void pqi_free_io_request(struct pqi_io_request
*io_request
)
599 atomic_dec(&io_request
->refcount
);
602 static int pqi_send_scsi_raid_request(struct pqi_ctrl_info
*ctrl_info
, u8 cmd
,
603 u8
*scsi3addr
, void *buffer
, size_t buffer_length
, u16 vpd_page
,
604 struct pqi_raid_error_info
*error_info
, unsigned long timeout_msecs
)
607 struct pqi_raid_path_request request
;
608 enum dma_data_direction dir
;
610 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
611 cmd
, scsi3addr
, buffer
,
612 buffer_length
, vpd_page
, &dir
);
616 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
, 0,
617 error_info
, timeout_msecs
);
619 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1, dir
);
624 /* helper functions for pqi_send_scsi_raid_request */
626 static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info
*ctrl_info
,
627 u8 cmd
, void *buffer
, size_t buffer_length
)
629 return pqi_send_scsi_raid_request(ctrl_info
, cmd
, RAID_CTLR_LUNID
,
630 buffer
, buffer_length
, 0, NULL
, NO_TIMEOUT
);
633 static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info
*ctrl_info
,
634 u8 cmd
, void *buffer
, size_t buffer_length
,
635 struct pqi_raid_error_info
*error_info
)
637 return pqi_send_scsi_raid_request(ctrl_info
, cmd
, RAID_CTLR_LUNID
,
638 buffer
, buffer_length
, 0, error_info
, NO_TIMEOUT
);
641 static inline int pqi_identify_controller(struct pqi_ctrl_info
*ctrl_info
,
642 struct bmic_identify_controller
*buffer
)
644 return pqi_send_ctrl_raid_request(ctrl_info
, BMIC_IDENTIFY_CONTROLLER
,
645 buffer
, sizeof(*buffer
));
648 static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info
*ctrl_info
,
649 struct bmic_sense_subsystem_info
*sense_info
)
651 return pqi_send_ctrl_raid_request(ctrl_info
,
652 BMIC_SENSE_SUBSYSTEM_INFORMATION
, sense_info
,
653 sizeof(*sense_info
));
656 static inline int pqi_scsi_inquiry(struct pqi_ctrl_info
*ctrl_info
,
657 u8
*scsi3addr
, u16 vpd_page
, void *buffer
, size_t buffer_length
)
659 return pqi_send_scsi_raid_request(ctrl_info
, INQUIRY
, scsi3addr
,
660 buffer
, buffer_length
, vpd_page
, NULL
, NO_TIMEOUT
);
663 static int pqi_identify_physical_device(struct pqi_ctrl_info
*ctrl_info
,
664 struct pqi_scsi_dev
*device
,
665 struct bmic_identify_physical_device
*buffer
, size_t buffer_length
)
668 enum dma_data_direction dir
;
669 u16 bmic_device_index
;
670 struct pqi_raid_path_request request
;
672 rc
= pqi_build_raid_path_request(ctrl_info
, &request
,
673 BMIC_IDENTIFY_PHYSICAL_DEVICE
, RAID_CTLR_LUNID
, buffer
,
674 buffer_length
, 0, &dir
);
678 bmic_device_index
= CISS_GET_DRIVE_NUMBER(device
->scsi3addr
);
679 request
.cdb
[2] = (u8
)bmic_device_index
;
680 request
.cdb
[9] = (u8
)(bmic_device_index
>> 8);
682 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
683 0, NULL
, NO_TIMEOUT
);
685 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1, dir
);
690 static int pqi_flush_cache(struct pqi_ctrl_info
*ctrl_info
,
691 enum bmic_flush_cache_shutdown_event shutdown_event
)
694 struct bmic_flush_cache
*flush_cache
;
697 * Don't bother trying to flush the cache if the controller is
700 if (pqi_ctrl_offline(ctrl_info
))
703 flush_cache
= kzalloc(sizeof(*flush_cache
), GFP_KERNEL
);
707 flush_cache
->shutdown_event
= shutdown_event
;
709 rc
= pqi_send_ctrl_raid_request(ctrl_info
, SA_FLUSH_CACHE
, flush_cache
,
710 sizeof(*flush_cache
));
717 int pqi_csmi_smp_passthru(struct pqi_ctrl_info
*ctrl_info
,
718 struct bmic_csmi_smp_passthru_buffer
*buffer
, size_t buffer_length
,
719 struct pqi_raid_error_info
*error_info
)
721 return pqi_send_ctrl_raid_with_error(ctrl_info
, BMIC_CSMI_PASSTHRU
,
722 buffer
, buffer_length
, error_info
);
725 #define PQI_FETCH_PTRAID_DATA (1 << 31)
727 static int pqi_set_diag_rescan(struct pqi_ctrl_info
*ctrl_info
)
730 struct bmic_diag_options
*diag
;
732 diag
= kzalloc(sizeof(*diag
), GFP_KERNEL
);
736 rc
= pqi_send_ctrl_raid_request(ctrl_info
, BMIC_SENSE_DIAG_OPTIONS
,
737 diag
, sizeof(*diag
));
741 diag
->options
|= cpu_to_le32(PQI_FETCH_PTRAID_DATA
);
743 rc
= pqi_send_ctrl_raid_request(ctrl_info
, BMIC_SET_DIAG_OPTIONS
, diag
,
752 static inline int pqi_write_host_wellness(struct pqi_ctrl_info
*ctrl_info
,
753 void *buffer
, size_t buffer_length
)
755 return pqi_send_ctrl_raid_request(ctrl_info
, BMIC_WRITE_HOST_WELLNESS
,
756 buffer
, buffer_length
);
761 struct bmic_host_wellness_driver_version
{
763 u8 driver_version_tag
[2];
764 __le16 driver_version_length
;
765 char driver_version
[32];
766 u8 dont_write_tag
[2];
772 static int pqi_write_driver_version_to_host_wellness(
773 struct pqi_ctrl_info
*ctrl_info
)
776 struct bmic_host_wellness_driver_version
*buffer
;
777 size_t buffer_length
;
779 buffer_length
= sizeof(*buffer
);
781 buffer
= kmalloc(buffer_length
, GFP_KERNEL
);
785 buffer
->start_tag
[0] = '<';
786 buffer
->start_tag
[1] = 'H';
787 buffer
->start_tag
[2] = 'W';
788 buffer
->start_tag
[3] = '>';
789 buffer
->driver_version_tag
[0] = 'D';
790 buffer
->driver_version_tag
[1] = 'V';
791 put_unaligned_le16(sizeof(buffer
->driver_version
),
792 &buffer
->driver_version_length
);
793 strncpy(buffer
->driver_version
, "Linux " DRIVER_VERSION
,
794 sizeof(buffer
->driver_version
) - 1);
795 buffer
->driver_version
[sizeof(buffer
->driver_version
) - 1] = '\0';
796 buffer
->dont_write_tag
[0] = 'D';
797 buffer
->dont_write_tag
[1] = 'W';
798 buffer
->end_tag
[0] = 'Z';
799 buffer
->end_tag
[1] = 'Z';
801 rc
= pqi_write_host_wellness(ctrl_info
, buffer
, buffer_length
);
810 struct bmic_host_wellness_time
{
815 u8 dont_write_tag
[2];
821 static int pqi_write_current_time_to_host_wellness(
822 struct pqi_ctrl_info
*ctrl_info
)
825 struct bmic_host_wellness_time
*buffer
;
826 size_t buffer_length
;
831 buffer_length
= sizeof(*buffer
);
833 buffer
= kmalloc(buffer_length
, GFP_KERNEL
);
837 buffer
->start_tag
[0] = '<';
838 buffer
->start_tag
[1] = 'H';
839 buffer
->start_tag
[2] = 'W';
840 buffer
->start_tag
[3] = '>';
841 buffer
->time_tag
[0] = 'T';
842 buffer
->time_tag
[1] = 'D';
843 put_unaligned_le16(sizeof(buffer
->time
),
844 &buffer
->time_length
);
846 local_time
= ktime_get_real_seconds();
847 time64_to_tm(local_time
, -sys_tz
.tz_minuteswest
* 60, &tm
);
848 year
= tm
.tm_year
+ 1900;
850 buffer
->time
[0] = bin2bcd(tm
.tm_hour
);
851 buffer
->time
[1] = bin2bcd(tm
.tm_min
);
852 buffer
->time
[2] = bin2bcd(tm
.tm_sec
);
854 buffer
->time
[4] = bin2bcd(tm
.tm_mon
+ 1);
855 buffer
->time
[5] = bin2bcd(tm
.tm_mday
);
856 buffer
->time
[6] = bin2bcd(year
/ 100);
857 buffer
->time
[7] = bin2bcd(year
% 100);
859 buffer
->dont_write_tag
[0] = 'D';
860 buffer
->dont_write_tag
[1] = 'W';
861 buffer
->end_tag
[0] = 'Z';
862 buffer
->end_tag
[1] = 'Z';
864 rc
= pqi_write_host_wellness(ctrl_info
, buffer
, buffer_length
);
871 #define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * PQI_HZ)
873 static void pqi_update_time_worker(struct work_struct
*work
)
876 struct pqi_ctrl_info
*ctrl_info
;
878 ctrl_info
= container_of(to_delayed_work(work
), struct pqi_ctrl_info
,
881 if (pqi_ctrl_offline(ctrl_info
))
884 rc
= pqi_write_current_time_to_host_wellness(ctrl_info
);
886 dev_warn(&ctrl_info
->pci_dev
->dev
,
887 "error updating time on controller\n");
889 schedule_delayed_work(&ctrl_info
->update_time_work
,
890 PQI_UPDATE_TIME_WORK_INTERVAL
);
893 static inline void pqi_schedule_update_time_worker(
894 struct pqi_ctrl_info
*ctrl_info
)
896 schedule_delayed_work(&ctrl_info
->update_time_work
, 0);
899 static inline void pqi_cancel_update_time_worker(
900 struct pqi_ctrl_info
*ctrl_info
)
902 cancel_delayed_work_sync(&ctrl_info
->update_time_work
);
905 static inline int pqi_report_luns(struct pqi_ctrl_info
*ctrl_info
, u8 cmd
,
906 void *buffer
, size_t buffer_length
)
908 return pqi_send_ctrl_raid_request(ctrl_info
, cmd
, buffer
,
912 static int pqi_report_phys_logical_luns(struct pqi_ctrl_info
*ctrl_info
, u8 cmd
,
916 size_t lun_list_length
;
917 size_t lun_data_length
;
918 size_t new_lun_list_length
;
919 void *lun_data
= NULL
;
920 struct report_lun_header
*report_lun_header
;
922 report_lun_header
= kmalloc(sizeof(*report_lun_header
), GFP_KERNEL
);
923 if (!report_lun_header
) {
928 rc
= pqi_report_luns(ctrl_info
, cmd
, report_lun_header
,
929 sizeof(*report_lun_header
));
933 lun_list_length
= get_unaligned_be32(&report_lun_header
->list_length
);
936 lun_data_length
= sizeof(struct report_lun_header
) + lun_list_length
;
938 lun_data
= kmalloc(lun_data_length
, GFP_KERNEL
);
944 if (lun_list_length
== 0) {
945 memcpy(lun_data
, report_lun_header
, sizeof(*report_lun_header
));
949 rc
= pqi_report_luns(ctrl_info
, cmd
, lun_data
, lun_data_length
);
953 new_lun_list_length
= get_unaligned_be32(
954 &((struct report_lun_header
*)lun_data
)->list_length
);
956 if (new_lun_list_length
> lun_list_length
) {
957 lun_list_length
= new_lun_list_length
;
963 kfree(report_lun_header
);
975 static inline int pqi_report_phys_luns(struct pqi_ctrl_info
*ctrl_info
,
978 return pqi_report_phys_logical_luns(ctrl_info
, CISS_REPORT_PHYS
,
982 static inline int pqi_report_logical_luns(struct pqi_ctrl_info
*ctrl_info
,
985 return pqi_report_phys_logical_luns(ctrl_info
, CISS_REPORT_LOG
, buffer
);
988 static int pqi_get_device_lists(struct pqi_ctrl_info
*ctrl_info
,
989 struct report_phys_lun_extended
**physdev_list
,
990 struct report_log_lun_extended
**logdev_list
)
993 size_t logdev_list_length
;
994 size_t logdev_data_length
;
995 struct report_log_lun_extended
*internal_logdev_list
;
996 struct report_log_lun_extended
*logdev_data
;
997 struct report_lun_header report_lun_header
;
999 rc
= pqi_report_phys_luns(ctrl_info
, (void **)physdev_list
);
1001 dev_err(&ctrl_info
->pci_dev
->dev
,
1002 "report physical LUNs failed\n");
1004 rc
= pqi_report_logical_luns(ctrl_info
, (void **)logdev_list
);
1006 dev_err(&ctrl_info
->pci_dev
->dev
,
1007 "report logical LUNs failed\n");
1010 * Tack the controller itself onto the end of the logical device list.
1013 logdev_data
= *logdev_list
;
1016 logdev_list_length
=
1017 get_unaligned_be32(&logdev_data
->header
.list_length
);
1019 memset(&report_lun_header
, 0, sizeof(report_lun_header
));
1021 (struct report_log_lun_extended
*)&report_lun_header
;
1022 logdev_list_length
= 0;
1025 logdev_data_length
= sizeof(struct report_lun_header
) +
1028 internal_logdev_list
= kmalloc(logdev_data_length
+
1029 sizeof(struct report_log_lun_extended
), GFP_KERNEL
);
1030 if (!internal_logdev_list
) {
1031 kfree(*logdev_list
);
1032 *logdev_list
= NULL
;
1036 memcpy(internal_logdev_list
, logdev_data
, logdev_data_length
);
1037 memset((u8
*)internal_logdev_list
+ logdev_data_length
, 0,
1038 sizeof(struct report_log_lun_extended_entry
));
1039 put_unaligned_be32(logdev_list_length
+
1040 sizeof(struct report_log_lun_extended_entry
),
1041 &internal_logdev_list
->header
.list_length
);
1043 kfree(*logdev_list
);
1044 *logdev_list
= internal_logdev_list
;
1049 static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev
*device
,
1050 int bus
, int target
, int lun
)
1053 device
->target
= target
;
1057 static void pqi_assign_bus_target_lun(struct pqi_scsi_dev
*device
)
1065 scsi3addr
= device
->scsi3addr
;
1066 lunid
= get_unaligned_le32(scsi3addr
);
1068 if (pqi_is_hba_lunid(scsi3addr
)) {
1069 /* The specified device is the controller. */
1070 pqi_set_bus_target_lun(device
, PQI_HBA_BUS
, 0, lunid
& 0x3fff);
1071 device
->target_lun_valid
= true;
1075 if (pqi_is_logical_device(device
)) {
1076 if (device
->is_external_raid_device
) {
1077 bus
= PQI_EXTERNAL_RAID_VOLUME_BUS
;
1078 target
= (lunid
>> 16) & 0x3fff;
1081 bus
= PQI_RAID_VOLUME_BUS
;
1083 lun
= lunid
& 0x3fff;
1085 pqi_set_bus_target_lun(device
, bus
, target
, lun
);
1086 device
->target_lun_valid
= true;
1091 * Defer target and LUN assignment for non-controller physical devices
1092 * because the SAS transport layer will make these assignments later.
1094 pqi_set_bus_target_lun(device
, PQI_PHYSICAL_DEVICE_BUS
, 0, 0);
1097 static void pqi_get_raid_level(struct pqi_ctrl_info
*ctrl_info
,
1098 struct pqi_scsi_dev
*device
)
1104 raid_level
= SA_RAID_UNKNOWN
;
1106 buffer
= kmalloc(64, GFP_KERNEL
);
1108 rc
= pqi_scsi_inquiry(ctrl_info
, device
->scsi3addr
,
1109 VPD_PAGE
| CISS_VPD_LV_DEVICE_GEOMETRY
, buffer
, 64);
1111 raid_level
= buffer
[8];
1112 if (raid_level
> SA_RAID_MAX
)
1113 raid_level
= SA_RAID_UNKNOWN
;
1118 device
->raid_level
= raid_level
;
1121 static int pqi_validate_raid_map(struct pqi_ctrl_info
*ctrl_info
,
1122 struct pqi_scsi_dev
*device
, struct raid_map
*raid_map
)
1126 u32 r5or6_blocks_per_row
;
1128 raid_map_size
= get_unaligned_le32(&raid_map
->structure_size
);
1130 if (raid_map_size
< offsetof(struct raid_map
, disk_data
)) {
1131 err_msg
= "RAID map too small";
1135 if (device
->raid_level
== SA_RAID_1
) {
1136 if (get_unaligned_le16(&raid_map
->layout_map_count
) != 2) {
1137 err_msg
= "invalid RAID-1 map";
1140 } else if (device
->raid_level
== SA_RAID_ADM
) {
1141 if (get_unaligned_le16(&raid_map
->layout_map_count
) != 3) {
1142 err_msg
= "invalid RAID-1(ADM) map";
1145 } else if ((device
->raid_level
== SA_RAID_5
||
1146 device
->raid_level
== SA_RAID_6
) &&
1147 get_unaligned_le16(&raid_map
->layout_map_count
) > 1) {
1149 r5or6_blocks_per_row
=
1150 get_unaligned_le16(&raid_map
->strip_size
) *
1151 get_unaligned_le16(&raid_map
->data_disks_per_row
);
1152 if (r5or6_blocks_per_row
== 0) {
1153 err_msg
= "invalid RAID-5 or RAID-6 map";
1161 dev_warn(&ctrl_info
->pci_dev
->dev
,
1162 "logical device %08x%08x %s\n",
1163 *((u32
*)&device
->scsi3addr
),
1164 *((u32
*)&device
->scsi3addr
[4]), err_msg
);
1169 static int pqi_get_raid_map(struct pqi_ctrl_info
*ctrl_info
,
1170 struct pqi_scsi_dev
*device
)
1174 struct raid_map
*raid_map
;
1176 raid_map
= kmalloc(sizeof(*raid_map
), GFP_KERNEL
);
1180 rc
= pqi_send_scsi_raid_request(ctrl_info
, CISS_GET_RAID_MAP
,
1181 device
->scsi3addr
, raid_map
, sizeof(*raid_map
),
1182 0, NULL
, NO_TIMEOUT
);
1187 raid_map_size
= get_unaligned_le32(&raid_map
->structure_size
);
1189 if (raid_map_size
> sizeof(*raid_map
)) {
1193 raid_map
= kmalloc(raid_map_size
, GFP_KERNEL
);
1197 rc
= pqi_send_scsi_raid_request(ctrl_info
, CISS_GET_RAID_MAP
,
1198 device
->scsi3addr
, raid_map
, raid_map_size
,
1199 0, NULL
, NO_TIMEOUT
);
1203 if (get_unaligned_le32(&raid_map
->structure_size
)
1205 dev_warn(&ctrl_info
->pci_dev
->dev
,
1206 "Requested %d bytes, received %d bytes",
1208 get_unaligned_le32(&raid_map
->structure_size
));
1213 rc
= pqi_validate_raid_map(ctrl_info
, device
, raid_map
);
1217 device
->raid_map
= raid_map
;
1227 static void pqi_get_raid_bypass_status(struct pqi_ctrl_info
*ctrl_info
,
1228 struct pqi_scsi_dev
*device
)
1234 buffer
= kmalloc(64, GFP_KERNEL
);
1238 rc
= pqi_scsi_inquiry(ctrl_info
, device
->scsi3addr
,
1239 VPD_PAGE
| CISS_VPD_LV_BYPASS_STATUS
, buffer
, 64);
1243 #define RAID_BYPASS_STATUS 4
1244 #define RAID_BYPASS_CONFIGURED 0x1
1245 #define RAID_BYPASS_ENABLED 0x2
1247 bypass_status
= buffer
[RAID_BYPASS_STATUS
];
1248 device
->raid_bypass_configured
=
1249 (bypass_status
& RAID_BYPASS_CONFIGURED
) != 0;
1250 if (device
->raid_bypass_configured
&&
1251 (bypass_status
& RAID_BYPASS_ENABLED
) &&
1252 pqi_get_raid_map(ctrl_info
, device
) == 0)
1253 device
->raid_bypass_enabled
= true;
1260 * Use vendor-specific VPD to determine online/offline status of a volume.
1263 static void pqi_get_volume_status(struct pqi_ctrl_info
*ctrl_info
,
1264 struct pqi_scsi_dev
*device
)
1268 u8 volume_status
= CISS_LV_STATUS_UNAVAILABLE
;
1269 bool volume_offline
= true;
1271 struct ciss_vpd_logical_volume_status
*vpd
;
1273 vpd
= kmalloc(sizeof(*vpd
), GFP_KERNEL
);
1277 rc
= pqi_scsi_inquiry(ctrl_info
, device
->scsi3addr
,
1278 VPD_PAGE
| CISS_VPD_LV_STATUS
, vpd
, sizeof(*vpd
));
1282 if (vpd
->page_code
!= CISS_VPD_LV_STATUS
)
1285 page_length
= offsetof(struct ciss_vpd_logical_volume_status
,
1286 volume_status
) + vpd
->page_length
;
1287 if (page_length
< sizeof(*vpd
))
1290 volume_status
= vpd
->volume_status
;
1291 volume_flags
= get_unaligned_be32(&vpd
->flags
);
1292 volume_offline
= (volume_flags
& CISS_LV_FLAGS_NO_HOST_IO
) != 0;
1297 device
->volume_status
= volume_status
;
1298 device
->volume_offline
= volume_offline
;
1301 static int pqi_get_physical_device_info(struct pqi_ctrl_info
*ctrl_info
,
1302 struct pqi_scsi_dev
*device
,
1303 struct bmic_identify_physical_device
*id_phys
)
1307 memset(id_phys
, 0, sizeof(*id_phys
));
1309 rc
= pqi_identify_physical_device(ctrl_info
, device
,
1310 id_phys
, sizeof(*id_phys
));
1312 device
->queue_depth
= PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH
;
1316 scsi_sanitize_inquiry_string(&id_phys
->model
[0], 8);
1317 scsi_sanitize_inquiry_string(&id_phys
->model
[8], 16);
1319 memcpy(device
->vendor
, &id_phys
->model
[0], sizeof(device
->vendor
));
1320 memcpy(device
->model
, &id_phys
->model
[8], sizeof(device
->model
));
1322 device
->box_index
= id_phys
->box_index
;
1323 device
->phys_box_on_bus
= id_phys
->phys_box_on_bus
;
1324 device
->phy_connected_dev_type
= id_phys
->phy_connected_dev_type
[0];
1325 device
->queue_depth
=
1326 get_unaligned_le16(&id_phys
->current_queue_depth_limit
);
1327 device
->active_path_index
= id_phys
->active_path_number
;
1328 device
->path_map
= id_phys
->redundant_path_present_map
;
1329 memcpy(&device
->box
,
1330 &id_phys
->alternate_paths_phys_box_on_port
,
1331 sizeof(device
->box
));
1332 memcpy(&device
->phys_connector
,
1333 &id_phys
->alternate_paths_phys_connector
,
1334 sizeof(device
->phys_connector
));
1335 device
->bay
= id_phys
->phys_bay_in_box
;
1340 static int pqi_get_logical_device_info(struct pqi_ctrl_info
*ctrl_info
,
1341 struct pqi_scsi_dev
*device
)
1346 buffer
= kmalloc(64, GFP_KERNEL
);
1350 /* Send an inquiry to the device to see what it is. */
1351 rc
= pqi_scsi_inquiry(ctrl_info
, device
->scsi3addr
, 0, buffer
, 64);
1355 scsi_sanitize_inquiry_string(&buffer
[8], 8);
1356 scsi_sanitize_inquiry_string(&buffer
[16], 16);
1358 device
->devtype
= buffer
[0] & 0x1f;
1359 memcpy(device
->vendor
, &buffer
[8], sizeof(device
->vendor
));
1360 memcpy(device
->model
, &buffer
[16], sizeof(device
->model
));
1362 if (device
->devtype
== TYPE_DISK
) {
1363 if (device
->is_external_raid_device
) {
1364 device
->raid_level
= SA_RAID_UNKNOWN
;
1365 device
->volume_status
= CISS_LV_OK
;
1366 device
->volume_offline
= false;
1368 pqi_get_raid_level(ctrl_info
, device
);
1369 pqi_get_raid_bypass_status(ctrl_info
, device
);
1370 pqi_get_volume_status(ctrl_info
, device
);
1380 static int pqi_get_device_info(struct pqi_ctrl_info
*ctrl_info
,
1381 struct pqi_scsi_dev
*device
,
1382 struct bmic_identify_physical_device
*id_phys
)
1386 if (device
->is_expander_smp_device
)
1389 if (pqi_is_logical_device(device
))
1390 rc
= pqi_get_logical_device_info(ctrl_info
, device
);
1392 rc
= pqi_get_physical_device_info(ctrl_info
, device
, id_phys
);
1397 static void pqi_show_volume_status(struct pqi_ctrl_info
*ctrl_info
,
1398 struct pqi_scsi_dev
*device
)
1401 static const char unknown_state_str
[] =
1402 "Volume is in an unknown state (%u)";
1403 char unknown_state_buffer
[sizeof(unknown_state_str
) + 10];
1405 switch (device
->volume_status
) {
1407 status
= "Volume online";
1409 case CISS_LV_FAILED
:
1410 status
= "Volume failed";
1412 case CISS_LV_NOT_CONFIGURED
:
1413 status
= "Volume not configured";
1415 case CISS_LV_DEGRADED
:
1416 status
= "Volume degraded";
1418 case CISS_LV_READY_FOR_RECOVERY
:
1419 status
= "Volume ready for recovery operation";
1421 case CISS_LV_UNDERGOING_RECOVERY
:
1422 status
= "Volume undergoing recovery";
1424 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED
:
1425 status
= "Wrong physical drive was replaced";
1427 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM
:
1428 status
= "A physical drive not properly connected";
1430 case CISS_LV_HARDWARE_OVERHEATING
:
1431 status
= "Hardware is overheating";
1433 case CISS_LV_HARDWARE_HAS_OVERHEATED
:
1434 status
= "Hardware has overheated";
1436 case CISS_LV_UNDERGOING_EXPANSION
:
1437 status
= "Volume undergoing expansion";
1439 case CISS_LV_NOT_AVAILABLE
:
1440 status
= "Volume waiting for transforming volume";
1442 case CISS_LV_QUEUED_FOR_EXPANSION
:
1443 status
= "Volume queued for expansion";
1445 case CISS_LV_DISABLED_SCSI_ID_CONFLICT
:
1446 status
= "Volume disabled due to SCSI ID conflict";
1448 case CISS_LV_EJECTED
:
1449 status
= "Volume has been ejected";
1451 case CISS_LV_UNDERGOING_ERASE
:
1452 status
= "Volume undergoing background erase";
1454 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD
:
1455 status
= "Volume ready for predictive spare rebuild";
1457 case CISS_LV_UNDERGOING_RPI
:
1458 status
= "Volume undergoing rapid parity initialization";
1460 case CISS_LV_PENDING_RPI
:
1461 status
= "Volume queued for rapid parity initialization";
1463 case CISS_LV_ENCRYPTED_NO_KEY
:
1464 status
= "Encrypted volume inaccessible - key not present";
1466 case CISS_LV_UNDERGOING_ENCRYPTION
:
1467 status
= "Volume undergoing encryption process";
1469 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING
:
1470 status
= "Volume undergoing encryption re-keying process";
1472 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER
:
1473 status
= "Volume encrypted but encryption is disabled";
1475 case CISS_LV_PENDING_ENCRYPTION
:
1476 status
= "Volume pending migration to encrypted state";
1478 case CISS_LV_PENDING_ENCRYPTION_REKEYING
:
1479 status
= "Volume pending encryption rekeying";
1481 case CISS_LV_NOT_SUPPORTED
:
1482 status
= "Volume not supported on this controller";
1484 case CISS_LV_STATUS_UNAVAILABLE
:
1485 status
= "Volume status not available";
1488 snprintf(unknown_state_buffer
, sizeof(unknown_state_buffer
),
1489 unknown_state_str
, device
->volume_status
);
1490 status
= unknown_state_buffer
;
1494 dev_info(&ctrl_info
->pci_dev
->dev
,
1495 "scsi %d:%d:%d:%d %s\n",
1496 ctrl_info
->scsi_host
->host_no
,
1497 device
->bus
, device
->target
, device
->lun
, status
);
1500 static void pqi_rescan_worker(struct work_struct
*work
)
1502 struct pqi_ctrl_info
*ctrl_info
;
1504 ctrl_info
= container_of(to_delayed_work(work
), struct pqi_ctrl_info
,
1507 pqi_scan_scsi_devices(ctrl_info
);
1510 static int pqi_add_device(struct pqi_ctrl_info
*ctrl_info
,
1511 struct pqi_scsi_dev
*device
)
1515 if (pqi_is_logical_device(device
))
1516 rc
= scsi_add_device(ctrl_info
->scsi_host
, device
->bus
,
1517 device
->target
, device
->lun
);
1519 rc
= pqi_add_sas_device(ctrl_info
->sas_host
, device
);
1524 #define PQI_PENDING_IO_TIMEOUT_SECS 20
1526 static inline void pqi_remove_device(struct pqi_ctrl_info
*ctrl_info
,
1527 struct pqi_scsi_dev
*device
)
1531 pqi_device_remove_start(device
);
1533 rc
= pqi_device_wait_for_pending_io(ctrl_info
, device
, PQI_PENDING_IO_TIMEOUT_SECS
);
1535 dev_err(&ctrl_info
->pci_dev
->dev
,
1536 "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
1537 ctrl_info
->scsi_host
->host_no
, device
->bus
,
1538 device
->target
, device
->lun
,
1539 atomic_read(&device
->scsi_cmds_outstanding
));
1541 if (pqi_is_logical_device(device
))
1542 scsi_remove_device(device
->sdev
);
1544 pqi_remove_sas_device(device
);
1547 /* Assumes the SCSI device list lock is held. */
1549 static struct pqi_scsi_dev
*pqi_find_scsi_dev(struct pqi_ctrl_info
*ctrl_info
,
1550 int bus
, int target
, int lun
)
1552 struct pqi_scsi_dev
*device
;
1554 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
, scsi_device_list_entry
)
1555 if (device
->bus
== bus
&& device
->target
== target
&& device
->lun
== lun
)
1561 static inline bool pqi_device_equal(struct pqi_scsi_dev
*dev1
,
1562 struct pqi_scsi_dev
*dev2
)
1564 if (dev1
->is_physical_device
!= dev2
->is_physical_device
)
1567 if (dev1
->is_physical_device
)
1568 return dev1
->wwid
== dev2
->wwid
;
1570 return memcmp(dev1
->volume_id
, dev2
->volume_id
,
1571 sizeof(dev1
->volume_id
)) == 0;
1574 enum pqi_find_result
{
1580 static enum pqi_find_result
pqi_scsi_find_entry(struct pqi_ctrl_info
*ctrl_info
,
1581 struct pqi_scsi_dev
*device_to_find
, struct pqi_scsi_dev
**matching_device
)
1583 struct pqi_scsi_dev
*device
;
1585 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
, scsi_device_list_entry
) {
1586 if (pqi_scsi3addr_equal(device_to_find
->scsi3addr
, device
->scsi3addr
)) {
1587 *matching_device
= device
;
1588 if (pqi_device_equal(device_to_find
, device
)) {
1589 if (device_to_find
->volume_offline
)
1590 return DEVICE_CHANGED
;
1593 return DEVICE_CHANGED
;
1597 return DEVICE_NOT_FOUND
;
1600 static inline const char *pqi_device_type(struct pqi_scsi_dev
*device
)
1602 if (device
->is_expander_smp_device
)
1603 return "Enclosure SMP ";
1605 return scsi_device_type(device
->devtype
);
1608 #define PQI_DEV_INFO_BUFFER_LENGTH 128
1610 static void pqi_dev_info(struct pqi_ctrl_info
*ctrl_info
,
1611 char *action
, struct pqi_scsi_dev
*device
)
1614 char buffer
[PQI_DEV_INFO_BUFFER_LENGTH
];
1616 count
= snprintf(buffer
, PQI_DEV_INFO_BUFFER_LENGTH
,
1617 "%d:%d:", ctrl_info
->scsi_host
->host_no
, device
->bus
);
1619 if (device
->target_lun_valid
)
1620 count
+= scnprintf(buffer
+ count
,
1621 PQI_DEV_INFO_BUFFER_LENGTH
- count
,
1626 count
+= scnprintf(buffer
+ count
,
1627 PQI_DEV_INFO_BUFFER_LENGTH
- count
,
1630 if (pqi_is_logical_device(device
))
1631 count
+= scnprintf(buffer
+ count
,
1632 PQI_DEV_INFO_BUFFER_LENGTH
- count
,
1634 *((u32
*)&device
->scsi3addr
),
1635 *((u32
*)&device
->scsi3addr
[4]));
1637 count
+= scnprintf(buffer
+ count
,
1638 PQI_DEV_INFO_BUFFER_LENGTH
- count
,
1639 " %016llx", device
->sas_address
);
1641 count
+= scnprintf(buffer
+ count
, PQI_DEV_INFO_BUFFER_LENGTH
- count
,
1643 pqi_device_type(device
),
1647 if (pqi_is_logical_device(device
)) {
1648 if (device
->devtype
== TYPE_DISK
)
1649 count
+= scnprintf(buffer
+ count
,
1650 PQI_DEV_INFO_BUFFER_LENGTH
- count
,
1651 "SSDSmartPathCap%c En%c %-12s",
1652 device
->raid_bypass_configured
? '+' : '-',
1653 device
->raid_bypass_enabled
? '+' : '-',
1654 pqi_raid_level_to_string(device
->raid_level
));
1656 count
+= scnprintf(buffer
+ count
,
1657 PQI_DEV_INFO_BUFFER_LENGTH
- count
,
1658 "AIO%c", device
->aio_enabled
? '+' : '-');
1659 if (device
->devtype
== TYPE_DISK
||
1660 device
->devtype
== TYPE_ZBC
)
1661 count
+= scnprintf(buffer
+ count
,
1662 PQI_DEV_INFO_BUFFER_LENGTH
- count
,
1663 " qd=%-6d", device
->queue_depth
);
1666 dev_info(&ctrl_info
->pci_dev
->dev
, "%s %s\n", action
, buffer
);
1669 /* Assumes the SCSI device list lock is held. */
1671 static void pqi_scsi_update_device(struct pqi_scsi_dev
*existing_device
,
1672 struct pqi_scsi_dev
*new_device
)
1674 existing_device
->devtype
= new_device
->devtype
;
1675 existing_device
->device_type
= new_device
->device_type
;
1676 existing_device
->bus
= new_device
->bus
;
1677 if (new_device
->target_lun_valid
) {
1678 existing_device
->target
= new_device
->target
;
1679 existing_device
->lun
= new_device
->lun
;
1680 existing_device
->target_lun_valid
= true;
1683 if ((existing_device
->volume_status
== CISS_LV_QUEUED_FOR_EXPANSION
||
1684 existing_device
->volume_status
== CISS_LV_UNDERGOING_EXPANSION
) &&
1685 new_device
->volume_status
== CISS_LV_OK
)
1686 existing_device
->rescan
= true;
1688 /* By definition, the scsi3addr and wwid fields are already the same. */
1690 existing_device
->is_physical_device
= new_device
->is_physical_device
;
1691 existing_device
->is_external_raid_device
=
1692 new_device
->is_external_raid_device
;
1693 existing_device
->is_expander_smp_device
=
1694 new_device
->is_expander_smp_device
;
1695 existing_device
->aio_enabled
= new_device
->aio_enabled
;
1696 memcpy(existing_device
->vendor
, new_device
->vendor
,
1697 sizeof(existing_device
->vendor
));
1698 memcpy(existing_device
->model
, new_device
->model
,
1699 sizeof(existing_device
->model
));
1700 existing_device
->sas_address
= new_device
->sas_address
;
1701 existing_device
->raid_level
= new_device
->raid_level
;
1702 existing_device
->queue_depth
= new_device
->queue_depth
;
1703 existing_device
->aio_handle
= new_device
->aio_handle
;
1704 existing_device
->volume_status
= new_device
->volume_status
;
1705 existing_device
->active_path_index
= new_device
->active_path_index
;
1706 existing_device
->path_map
= new_device
->path_map
;
1707 existing_device
->bay
= new_device
->bay
;
1708 existing_device
->box_index
= new_device
->box_index
;
1709 existing_device
->phys_box_on_bus
= new_device
->phys_box_on_bus
;
1710 existing_device
->phy_connected_dev_type
=
1711 new_device
->phy_connected_dev_type
;
1712 memcpy(existing_device
->box
, new_device
->box
,
1713 sizeof(existing_device
->box
));
1714 memcpy(existing_device
->phys_connector
, new_device
->phys_connector
,
1715 sizeof(existing_device
->phys_connector
));
1716 existing_device
->offload_to_mirror
= 0;
1717 kfree(existing_device
->raid_map
);
1718 existing_device
->raid_map
= new_device
->raid_map
;
1719 existing_device
->raid_bypass_configured
=
1720 new_device
->raid_bypass_configured
;
1721 existing_device
->raid_bypass_enabled
=
1722 new_device
->raid_bypass_enabled
;
1723 existing_device
->device_offline
= false;
1725 /* To prevent this from being freed later. */
1726 new_device
->raid_map
= NULL
;
1729 static inline void pqi_free_device(struct pqi_scsi_dev
*device
)
1732 kfree(device
->raid_map
);
1738 * Called when exposing a new device to the OS fails in order to re-adjust
1739 * our internal SCSI device list to match the SCSI ML's view.
1742 static inline void pqi_fixup_botched_add(struct pqi_ctrl_info
*ctrl_info
,
1743 struct pqi_scsi_dev
*device
)
1745 unsigned long flags
;
1747 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
1748 list_del(&device
->scsi_device_list_entry
);
1749 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
1751 /* Allow the device structure to be freed later. */
1752 device
->keep_device
= false;
1755 static inline bool pqi_is_device_added(struct pqi_scsi_dev
*device
)
1757 if (device
->is_expander_smp_device
)
1758 return device
->sas_port
!= NULL
;
1760 return device
->sdev
!= NULL
;
1763 static void pqi_update_device_list(struct pqi_ctrl_info
*ctrl_info
,
1764 struct pqi_scsi_dev
*new_device_list
[], unsigned int num_new_devices
)
1768 unsigned long flags
;
1769 enum pqi_find_result find_result
;
1770 struct pqi_scsi_dev
*device
;
1771 struct pqi_scsi_dev
*next
;
1772 struct pqi_scsi_dev
*matching_device
;
1773 LIST_HEAD(add_list
);
1774 LIST_HEAD(delete_list
);
1777 * The idea here is to do as little work as possible while holding the
1778 * spinlock. That's why we go to great pains to defer anything other
1779 * than updating the internal device list until after we release the
1783 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
1785 /* Assume that all devices in the existing list have gone away. */
1786 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
, scsi_device_list_entry
)
1787 device
->device_gone
= true;
1789 for (i
= 0; i
< num_new_devices
; i
++) {
1790 device
= new_device_list
[i
];
1792 find_result
= pqi_scsi_find_entry(ctrl_info
, device
,
1795 switch (find_result
) {
1798 * The newly found device is already in the existing
1801 device
->new_device
= false;
1802 matching_device
->device_gone
= false;
1803 pqi_scsi_update_device(matching_device
, device
);
1805 case DEVICE_NOT_FOUND
:
1807 * The newly found device is NOT in the existing device
1810 device
->new_device
= true;
1812 case DEVICE_CHANGED
:
1814 * The original device has gone away and we need to add
1817 device
->new_device
= true;
1822 /* Process all devices that have gone away. */
1823 list_for_each_entry_safe(device
, next
, &ctrl_info
->scsi_device_list
,
1824 scsi_device_list_entry
) {
1825 if (device
->device_gone
) {
1826 list_del_init(&device
->scsi_device_list_entry
);
1827 list_add_tail(&device
->delete_list_entry
, &delete_list
);
1831 /* Process all new devices. */
1832 for (i
= 0; i
< num_new_devices
; i
++) {
1833 device
= new_device_list
[i
];
1834 if (!device
->new_device
)
1836 if (device
->volume_offline
)
1838 list_add_tail(&device
->scsi_device_list_entry
,
1839 &ctrl_info
->scsi_device_list
);
1840 list_add_tail(&device
->add_list_entry
, &add_list
);
1841 /* To prevent this device structure from being freed later. */
1842 device
->keep_device
= true;
1845 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
1847 if (pqi_ctrl_in_ofa(ctrl_info
))
1848 pqi_ctrl_ofa_done(ctrl_info
);
1850 /* Remove all devices that have gone away. */
1851 list_for_each_entry_safe(device
, next
, &delete_list
, delete_list_entry
) {
1852 if (device
->volume_offline
) {
1853 pqi_dev_info(ctrl_info
, "offline", device
);
1854 pqi_show_volume_status(ctrl_info
, device
);
1856 list_del(&device
->delete_list_entry
);
1857 if (pqi_is_device_added(device
)) {
1858 pqi_remove_device(ctrl_info
, device
);
1860 if (!device
->volume_offline
)
1861 pqi_dev_info(ctrl_info
, "removed", device
);
1862 pqi_free_device(device
);
1867 * Notify the SCSI ML if the queue depth of any existing device has
1870 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
1871 scsi_device_list_entry
) {
1873 if (device
->queue_depth
!=
1874 device
->advertised_queue_depth
) {
1875 device
->advertised_queue_depth
= device
->queue_depth
;
1876 scsi_change_queue_depth(device
->sdev
,
1877 device
->advertised_queue_depth
);
1879 if (device
->rescan
) {
1880 scsi_rescan_device(&device
->sdev
->sdev_gendev
);
1881 device
->rescan
= false;
1886 /* Expose any new devices. */
1887 list_for_each_entry_safe(device
, next
, &add_list
, add_list_entry
) {
1888 if (!pqi_is_device_added(device
)) {
1889 rc
= pqi_add_device(ctrl_info
, device
);
1891 pqi_dev_info(ctrl_info
, "added", device
);
1893 dev_warn(&ctrl_info
->pci_dev
->dev
,
1894 "scsi %d:%d:%d:%d addition failed, device not added\n",
1895 ctrl_info
->scsi_host
->host_no
,
1896 device
->bus
, device
->target
,
1898 pqi_fixup_botched_add(ctrl_info
, device
);
1904 static inline bool pqi_is_supported_device(struct pqi_scsi_dev
*device
)
1907 * Only support the HBA controller itself as a RAID
1908 * controller. If it's a RAID controller other than
1909 * the HBA itself (an external RAID controller, for
1910 * example), we don't support it.
1912 if (device
->device_type
== SA_DEVICE_TYPE_CONTROLLER
&&
1913 !pqi_is_hba_lunid(device
->scsi3addr
))
1919 static inline bool pqi_skip_device(u8
*scsi3addr
)
1921 /* Ignore all masked devices. */
1922 if (MASKED_DEVICE(scsi3addr
))
1928 static inline void pqi_mask_device(u8
*scsi3addr
)
1930 scsi3addr
[3] |= 0xc0;
1933 static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev
*device
)
1935 switch (device
->device_type
) {
1936 case SA_DEVICE_TYPE_SAS
:
1937 case SA_DEVICE_TYPE_EXPANDER_SMP
:
1938 case SA_DEVICE_TYPE_SES
:
1945 static inline bool pqi_expose_device(struct pqi_scsi_dev
*device
)
1947 return !device
->is_physical_device
||
1948 !pqi_skip_device(device
->scsi3addr
);
1951 static int pqi_update_scsi_devices(struct pqi_ctrl_info
*ctrl_info
)
1955 LIST_HEAD(new_device_list_head
);
1956 struct report_phys_lun_extended
*physdev_list
= NULL
;
1957 struct report_log_lun_extended
*logdev_list
= NULL
;
1958 struct report_phys_lun_extended_entry
*phys_lun_ext_entry
;
1959 struct report_log_lun_extended_entry
*log_lun_ext_entry
;
1960 struct bmic_identify_physical_device
*id_phys
= NULL
;
1963 struct pqi_scsi_dev
**new_device_list
= NULL
;
1964 struct pqi_scsi_dev
*device
;
1965 struct pqi_scsi_dev
*next
;
1966 unsigned int num_new_devices
;
1967 unsigned int num_valid_devices
;
1968 bool is_physical_device
;
1970 unsigned int physical_index
;
1971 unsigned int logical_index
;
1972 static char *out_of_memory_msg
=
1973 "failed to allocate memory, device discovery stopped";
1975 rc
= pqi_get_device_lists(ctrl_info
, &physdev_list
, &logdev_list
);
1981 get_unaligned_be32(&physdev_list
->header
.list_length
)
1982 / sizeof(physdev_list
->lun_entries
[0]);
1988 get_unaligned_be32(&logdev_list
->header
.list_length
)
1989 / sizeof(logdev_list
->lun_entries
[0]);
1993 if (num_physicals
) {
1995 * We need this buffer for calls to pqi_get_physical_disk_info()
1996 * below. We allocate it here instead of inside
1997 * pqi_get_physical_disk_info() because it's a fairly large
2000 id_phys
= kmalloc(sizeof(*id_phys
), GFP_KERNEL
);
2002 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n",
2008 if (pqi_hide_vsep
) {
2009 for (i
= num_physicals
- 1; i
>= 0; i
--) {
2010 phys_lun_ext_entry
=
2011 &physdev_list
->lun_entries
[i
];
2012 if (CISS_GET_DRIVE_NUMBER(
2013 phys_lun_ext_entry
->lunid
) ==
2014 PQI_VSEP_CISS_BTL
) {
2016 phys_lun_ext_entry
->lunid
);
2023 num_new_devices
= num_physicals
+ num_logicals
;
2025 new_device_list
= kmalloc_array(num_new_devices
,
2026 sizeof(*new_device_list
),
2028 if (!new_device_list
) {
2029 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n", out_of_memory_msg
);
2034 for (i
= 0; i
< num_new_devices
; i
++) {
2035 device
= kzalloc(sizeof(*device
), GFP_KERNEL
);
2037 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n",
2042 list_add_tail(&device
->new_device_list_entry
,
2043 &new_device_list_head
);
2047 num_valid_devices
= 0;
2051 for (i
= 0; i
< num_new_devices
; i
++) {
2053 if ((!pqi_expose_ld_first
&& i
< num_physicals
) ||
2054 (pqi_expose_ld_first
&& i
>= num_logicals
)) {
2055 is_physical_device
= true;
2056 phys_lun_ext_entry
=
2057 &physdev_list
->lun_entries
[physical_index
++];
2058 log_lun_ext_entry
= NULL
;
2059 scsi3addr
= phys_lun_ext_entry
->lunid
;
2061 is_physical_device
= false;
2062 phys_lun_ext_entry
= NULL
;
2064 &logdev_list
->lun_entries
[logical_index
++];
2065 scsi3addr
= log_lun_ext_entry
->lunid
;
2068 if (is_physical_device
&& pqi_skip_device(scsi3addr
))
2072 device
= list_next_entry(device
, new_device_list_entry
);
2074 device
= list_first_entry(&new_device_list_head
,
2075 struct pqi_scsi_dev
, new_device_list_entry
);
2077 memcpy(device
->scsi3addr
, scsi3addr
, sizeof(device
->scsi3addr
));
2078 device
->is_physical_device
= is_physical_device
;
2079 if (is_physical_device
) {
2080 device
->device_type
= phys_lun_ext_entry
->device_type
;
2081 if (device
->device_type
== SA_DEVICE_TYPE_EXPANDER_SMP
)
2082 device
->is_expander_smp_device
= true;
2084 device
->is_external_raid_device
=
2085 pqi_is_external_raid_addr(scsi3addr
);
2088 if (!pqi_is_supported_device(device
))
2091 /* Gather information about the device. */
2092 rc
= pqi_get_device_info(ctrl_info
, device
, id_phys
);
2093 if (rc
== -ENOMEM
) {
2094 dev_warn(&ctrl_info
->pci_dev
->dev
, "%s\n",
2099 if (device
->is_physical_device
)
2100 dev_warn(&ctrl_info
->pci_dev
->dev
,
2101 "obtaining device info failed, skipping physical device %016llx\n",
2103 &phys_lun_ext_entry
->wwid
));
2105 dev_warn(&ctrl_info
->pci_dev
->dev
,
2106 "obtaining device info failed, skipping logical device %08x%08x\n",
2107 *((u32
*)&device
->scsi3addr
),
2108 *((u32
*)&device
->scsi3addr
[4]));
2113 pqi_assign_bus_target_lun(device
);
2115 if (device
->is_physical_device
) {
2116 device
->wwid
= phys_lun_ext_entry
->wwid
;
2117 if ((phys_lun_ext_entry
->device_flags
&
2118 CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED
) &&
2119 phys_lun_ext_entry
->aio_handle
) {
2120 device
->aio_enabled
= true;
2121 device
->aio_handle
=
2122 phys_lun_ext_entry
->aio_handle
;
2125 memcpy(device
->volume_id
, log_lun_ext_entry
->volume_id
,
2126 sizeof(device
->volume_id
));
2129 if (pqi_is_device_with_sas_address(device
))
2130 device
->sas_address
= get_unaligned_be64(&device
->wwid
);
2132 new_device_list
[num_valid_devices
++] = device
;
2135 pqi_update_device_list(ctrl_info
, new_device_list
, num_valid_devices
);
2138 list_for_each_entry_safe(device
, next
, &new_device_list_head
,
2139 new_device_list_entry
) {
2140 if (device
->keep_device
)
2142 list_del(&device
->new_device_list_entry
);
2143 pqi_free_device(device
);
2146 kfree(new_device_list
);
2147 kfree(physdev_list
);
2154 static int pqi_scan_scsi_devices(struct pqi_ctrl_info
*ctrl_info
)
2158 if (pqi_ctrl_offline(ctrl_info
))
2161 if (!mutex_trylock(&ctrl_info
->scan_mutex
)) {
2162 pqi_schedule_rescan_worker_delayed(ctrl_info
);
2165 rc
= pqi_update_scsi_devices(ctrl_info
);
2167 pqi_schedule_rescan_worker_delayed(ctrl_info
);
2168 mutex_unlock(&ctrl_info
->scan_mutex
);
2174 static void pqi_scan_start(struct Scsi_Host
*shost
)
2176 struct pqi_ctrl_info
*ctrl_info
;
2178 ctrl_info
= shost_to_hba(shost
);
2179 if (pqi_ctrl_in_ofa(ctrl_info
))
2182 pqi_scan_scsi_devices(ctrl_info
);
2185 /* Returns TRUE if scan is finished. */
2187 static int pqi_scan_finished(struct Scsi_Host
*shost
,
2188 unsigned long elapsed_time
)
2190 struct pqi_ctrl_info
*ctrl_info
;
2192 ctrl_info
= shost_priv(shost
);
2194 return !mutex_is_locked(&ctrl_info
->scan_mutex
);
2197 static void pqi_wait_until_scan_finished(struct pqi_ctrl_info
*ctrl_info
)
2199 mutex_lock(&ctrl_info
->scan_mutex
);
2200 mutex_unlock(&ctrl_info
->scan_mutex
);
2203 static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info
*ctrl_info
)
2205 mutex_lock(&ctrl_info
->lun_reset_mutex
);
2206 mutex_unlock(&ctrl_info
->lun_reset_mutex
);
2209 static void pqi_wait_until_ofa_finished(struct pqi_ctrl_info
*ctrl_info
)
2211 mutex_lock(&ctrl_info
->ofa_mutex
);
2212 mutex_unlock(&ctrl_info
->ofa_mutex
);
2215 static inline void pqi_set_encryption_info(
2216 struct pqi_encryption_info
*encryption_info
, struct raid_map
*raid_map
,
2219 u32 volume_blk_size
;
2222 * Set the encryption tweak values based on logical block address.
2223 * If the block size is 512, the tweak value is equal to the LBA.
2224 * For other block sizes, tweak value is (LBA * block size) / 512.
2226 volume_blk_size
= get_unaligned_le32(&raid_map
->volume_blk_size
);
2227 if (volume_blk_size
!= 512)
2228 first_block
= (first_block
* volume_blk_size
) / 512;
2230 encryption_info
->data_encryption_key_index
=
2231 get_unaligned_le16(&raid_map
->data_encryption_key_index
);
2232 encryption_info
->encrypt_tweak_lower
= lower_32_bits(first_block
);
2233 encryption_info
->encrypt_tweak_upper
= upper_32_bits(first_block
);
2237 * Attempt to perform RAID bypass mapping for a logical volume I/O.
2240 #define PQI_RAID_BYPASS_INELIGIBLE 1
2242 static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info
*ctrl_info
,
2243 struct pqi_scsi_dev
*device
, struct scsi_cmnd
*scmd
,
2244 struct pqi_queue_group
*queue_group
)
2246 struct raid_map
*raid_map
;
2247 bool is_write
= false;
2255 u32 first_row_offset
;
2256 u32 last_row_offset
;
2261 u32 r5or6_blocks_per_row
;
2262 u64 r5or6_first_row
;
2264 u32 r5or6_first_row_offset
;
2265 u32 r5or6_last_row_offset
;
2266 u32 r5or6_first_column
;
2267 u32 r5or6_last_column
;
2268 u16 data_disks_per_row
;
2269 u32 total_disks_per_row
;
2270 u16 layout_map_count
;
2282 int offload_to_mirror
;
2283 struct pqi_encryption_info
*encryption_info_ptr
;
2284 struct pqi_encryption_info encryption_info
;
2285 #if BITS_PER_LONG == 32
2289 /* Check for valid opcode, get LBA and block count. */
2290 switch (scmd
->cmnd
[0]) {
2295 first_block
= (u64
)(((scmd
->cmnd
[1] & 0x1f) << 16) |
2296 (scmd
->cmnd
[2] << 8) | scmd
->cmnd
[3]);
2297 block_cnt
= (u32
)scmd
->cmnd
[4];
2305 first_block
= (u64
)get_unaligned_be32(&scmd
->cmnd
[2]);
2306 block_cnt
= (u32
)get_unaligned_be16(&scmd
->cmnd
[7]);
2312 first_block
= (u64
)get_unaligned_be32(&scmd
->cmnd
[2]);
2313 block_cnt
= get_unaligned_be32(&scmd
->cmnd
[6]);
2319 first_block
= get_unaligned_be64(&scmd
->cmnd
[2]);
2320 block_cnt
= get_unaligned_be32(&scmd
->cmnd
[10]);
2323 /* Process via normal I/O path. */
2324 return PQI_RAID_BYPASS_INELIGIBLE
;
2327 /* Check for write to non-RAID-0. */
2328 if (is_write
&& device
->raid_level
!= SA_RAID_0
)
2329 return PQI_RAID_BYPASS_INELIGIBLE
;
2331 if (unlikely(block_cnt
== 0))
2332 return PQI_RAID_BYPASS_INELIGIBLE
;
2334 last_block
= first_block
+ block_cnt
- 1;
2335 raid_map
= device
->raid_map
;
2337 /* Check for invalid block or wraparound. */
2338 if (last_block
>= get_unaligned_le64(&raid_map
->volume_blk_cnt
) ||
2339 last_block
< first_block
)
2340 return PQI_RAID_BYPASS_INELIGIBLE
;
2342 data_disks_per_row
= get_unaligned_le16(&raid_map
->data_disks_per_row
);
2343 strip_size
= get_unaligned_le16(&raid_map
->strip_size
);
2344 layout_map_count
= get_unaligned_le16(&raid_map
->layout_map_count
);
2346 /* Calculate stripe information for the request. */
2347 blocks_per_row
= data_disks_per_row
* strip_size
;
2348 #if BITS_PER_LONG == 32
2349 tmpdiv
= first_block
;
2350 do_div(tmpdiv
, blocks_per_row
);
2352 tmpdiv
= last_block
;
2353 do_div(tmpdiv
, blocks_per_row
);
2355 first_row_offset
= (u32
)(first_block
- (first_row
* blocks_per_row
));
2356 last_row_offset
= (u32
)(last_block
- (last_row
* blocks_per_row
));
2357 tmpdiv
= first_row_offset
;
2358 do_div(tmpdiv
, strip_size
);
2359 first_column
= tmpdiv
;
2360 tmpdiv
= last_row_offset
;
2361 do_div(tmpdiv
, strip_size
);
2362 last_column
= tmpdiv
;
2364 first_row
= first_block
/ blocks_per_row
;
2365 last_row
= last_block
/ blocks_per_row
;
2366 first_row_offset
= (u32
)(first_block
- (first_row
* blocks_per_row
));
2367 last_row_offset
= (u32
)(last_block
- (last_row
* blocks_per_row
));
2368 first_column
= first_row_offset
/ strip_size
;
2369 last_column
= last_row_offset
/ strip_size
;
2372 /* If this isn't a single row/column then give to the controller. */
2373 if (first_row
!= last_row
|| first_column
!= last_column
)
2374 return PQI_RAID_BYPASS_INELIGIBLE
;
2376 /* Proceeding with driver mapping. */
2377 total_disks_per_row
= data_disks_per_row
+
2378 get_unaligned_le16(&raid_map
->metadata_disks_per_row
);
2379 map_row
= ((u32
)(first_row
>> raid_map
->parity_rotation_shift
)) %
2380 get_unaligned_le16(&raid_map
->row_cnt
);
2381 map_index
= (map_row
* total_disks_per_row
) + first_column
;
2384 if (device
->raid_level
== SA_RAID_1
) {
2385 if (device
->offload_to_mirror
)
2386 map_index
+= data_disks_per_row
;
2387 device
->offload_to_mirror
= !device
->offload_to_mirror
;
2388 } else if (device
->raid_level
== SA_RAID_ADM
) {
2391 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2394 offload_to_mirror
= device
->offload_to_mirror
;
2395 if (offload_to_mirror
== 0) {
2396 /* use physical disk in the first mirrored group. */
2397 map_index
%= data_disks_per_row
;
2401 * Determine mirror group that map_index
2404 current_group
= map_index
/ data_disks_per_row
;
2406 if (offload_to_mirror
!= current_group
) {
2408 layout_map_count
- 1) {
2410 * Select raid index from
2413 map_index
+= data_disks_per_row
;
2417 * Select raid index from first
2420 map_index
%= data_disks_per_row
;
2424 } while (offload_to_mirror
!= current_group
);
2427 /* Set mirror group to use next time. */
2429 (offload_to_mirror
>= layout_map_count
- 1) ?
2430 0 : offload_to_mirror
+ 1;
2431 device
->offload_to_mirror
= offload_to_mirror
;
2433 * Avoid direct use of device->offload_to_mirror within this
2434 * function since multiple threads might simultaneously
2435 * increment it beyond the range of device->layout_map_count -1.
2437 } else if ((device
->raid_level
== SA_RAID_5
||
2438 device
->raid_level
== SA_RAID_6
) && layout_map_count
> 1) {
2440 /* Verify first and last block are in same RAID group */
2441 r5or6_blocks_per_row
= strip_size
* data_disks_per_row
;
2442 stripesize
= r5or6_blocks_per_row
* layout_map_count
;
2443 #if BITS_PER_LONG == 32
2444 tmpdiv
= first_block
;
2445 first_group
= do_div(tmpdiv
, stripesize
);
2446 tmpdiv
= first_group
;
2447 do_div(tmpdiv
, r5or6_blocks_per_row
);
2448 first_group
= tmpdiv
;
2449 tmpdiv
= last_block
;
2450 last_group
= do_div(tmpdiv
, stripesize
);
2451 tmpdiv
= last_group
;
2452 do_div(tmpdiv
, r5or6_blocks_per_row
);
2453 last_group
= tmpdiv
;
2455 first_group
= (first_block
% stripesize
) / r5or6_blocks_per_row
;
2456 last_group
= (last_block
% stripesize
) / r5or6_blocks_per_row
;
2458 if (first_group
!= last_group
)
2459 return PQI_RAID_BYPASS_INELIGIBLE
;
2461 /* Verify request is in a single row of RAID 5/6 */
2462 #if BITS_PER_LONG == 32
2463 tmpdiv
= first_block
;
2464 do_div(tmpdiv
, stripesize
);
2465 first_row
= r5or6_first_row
= r0_first_row
= tmpdiv
;
2466 tmpdiv
= last_block
;
2467 do_div(tmpdiv
, stripesize
);
2468 r5or6_last_row
= r0_last_row
= tmpdiv
;
2470 first_row
= r5or6_first_row
= r0_first_row
=
2471 first_block
/ stripesize
;
2472 r5or6_last_row
= r0_last_row
= last_block
/ stripesize
;
2474 if (r5or6_first_row
!= r5or6_last_row
)
2475 return PQI_RAID_BYPASS_INELIGIBLE
;
2477 /* Verify request is in a single column */
2478 #if BITS_PER_LONG == 32
2479 tmpdiv
= first_block
;
2480 first_row_offset
= do_div(tmpdiv
, stripesize
);
2481 tmpdiv
= first_row_offset
;
2482 first_row_offset
= (u32
)do_div(tmpdiv
, r5or6_blocks_per_row
);
2483 r5or6_first_row_offset
= first_row_offset
;
2484 tmpdiv
= last_block
;
2485 r5or6_last_row_offset
= do_div(tmpdiv
, stripesize
);
2486 tmpdiv
= r5or6_last_row_offset
;
2487 r5or6_last_row_offset
= do_div(tmpdiv
, r5or6_blocks_per_row
);
2488 tmpdiv
= r5or6_first_row_offset
;
2489 do_div(tmpdiv
, strip_size
);
2490 first_column
= r5or6_first_column
= tmpdiv
;
2491 tmpdiv
= r5or6_last_row_offset
;
2492 do_div(tmpdiv
, strip_size
);
2493 r5or6_last_column
= tmpdiv
;
2495 first_row_offset
= r5or6_first_row_offset
=
2496 (u32
)((first_block
% stripesize
) %
2497 r5or6_blocks_per_row
);
2499 r5or6_last_row_offset
=
2500 (u32
)((last_block
% stripesize
) %
2501 r5or6_blocks_per_row
);
2503 first_column
= r5or6_first_row_offset
/ strip_size
;
2504 r5or6_first_column
= first_column
;
2505 r5or6_last_column
= r5or6_last_row_offset
/ strip_size
;
2507 if (r5or6_first_column
!= r5or6_last_column
)
2508 return PQI_RAID_BYPASS_INELIGIBLE
;
2510 /* Request is eligible */
2512 ((u32
)(first_row
>> raid_map
->parity_rotation_shift
)) %
2513 get_unaligned_le16(&raid_map
->row_cnt
);
2515 map_index
= (first_group
*
2516 (get_unaligned_le16(&raid_map
->row_cnt
) *
2517 total_disks_per_row
)) +
2518 (map_row
* total_disks_per_row
) + first_column
;
2521 aio_handle
= raid_map
->disk_data
[map_index
].aio_handle
;
2522 disk_block
= get_unaligned_le64(&raid_map
->disk_starting_blk
) +
2523 first_row
* strip_size
+
2524 (first_row_offset
- first_column
* strip_size
);
2525 disk_block_cnt
= block_cnt
;
2527 /* Handle differing logical/physical block sizes. */
2528 if (raid_map
->phys_blk_shift
) {
2529 disk_block
<<= raid_map
->phys_blk_shift
;
2530 disk_block_cnt
<<= raid_map
->phys_blk_shift
;
2533 if (unlikely(disk_block_cnt
> 0xffff))
2534 return PQI_RAID_BYPASS_INELIGIBLE
;
2536 /* Build the new CDB for the physical disk I/O. */
2537 if (disk_block
> 0xffffffff) {
2538 cdb
[0] = is_write
? WRITE_16
: READ_16
;
2540 put_unaligned_be64(disk_block
, &cdb
[2]);
2541 put_unaligned_be32(disk_block_cnt
, &cdb
[10]);
2546 cdb
[0] = is_write
? WRITE_10
: READ_10
;
2548 put_unaligned_be32((u32
)disk_block
, &cdb
[2]);
2550 put_unaligned_be16((u16
)disk_block_cnt
, &cdb
[7]);
2555 if (get_unaligned_le16(&raid_map
->flags
) &
2556 RAID_MAP_ENCRYPTION_ENABLED
) {
2557 pqi_set_encryption_info(&encryption_info
, raid_map
,
2559 encryption_info_ptr
= &encryption_info
;
2561 encryption_info_ptr
= NULL
;
2564 return pqi_aio_submit_io(ctrl_info
, scmd
, aio_handle
,
2565 cdb
, cdb_length
, queue_group
, encryption_info_ptr
, true);
2568 #define PQI_STATUS_IDLE 0x0
2570 #define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2571 #define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2573 #define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2574 #define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2575 #define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2576 #define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2577 #define PQI_DEVICE_STATE_ERROR 0x4
2579 #define PQI_MODE_READY_TIMEOUT_SECS 30
2580 #define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2582 static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info
*ctrl_info
)
2584 struct pqi_device_registers __iomem
*pqi_registers
;
2585 unsigned long timeout
;
2589 pqi_registers
= ctrl_info
->pqi_registers
;
2590 timeout
= (PQI_MODE_READY_TIMEOUT_SECS
* PQI_HZ
) + jiffies
;
2593 signature
= readq(&pqi_registers
->signature
);
2594 if (memcmp(&signature
, PQI_DEVICE_SIGNATURE
,
2595 sizeof(signature
)) == 0)
2597 if (time_after(jiffies
, timeout
)) {
2598 dev_err(&ctrl_info
->pci_dev
->dev
,
2599 "timed out waiting for PQI signature\n");
2602 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS
);
2606 status
= readb(&pqi_registers
->function_and_status_code
);
2607 if (status
== PQI_STATUS_IDLE
)
2609 if (time_after(jiffies
, timeout
)) {
2610 dev_err(&ctrl_info
->pci_dev
->dev
,
2611 "timed out waiting for PQI IDLE\n");
2614 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS
);
2618 if (readl(&pqi_registers
->device_status
) ==
2619 PQI_DEVICE_STATE_ALL_REGISTERS_READY
)
2621 if (time_after(jiffies
, timeout
)) {
2622 dev_err(&ctrl_info
->pci_dev
->dev
,
2623 "timed out waiting for PQI all registers ready\n");
2626 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS
);
2632 static inline void pqi_aio_path_disabled(struct pqi_io_request
*io_request
)
2634 struct pqi_scsi_dev
*device
;
2636 device
= io_request
->scmd
->device
->hostdata
;
2637 device
->raid_bypass_enabled
= false;
2638 device
->aio_enabled
= false;
2641 static inline void pqi_take_device_offline(struct scsi_device
*sdev
, char *path
)
2643 struct pqi_ctrl_info
*ctrl_info
;
2644 struct pqi_scsi_dev
*device
;
2646 device
= sdev
->hostdata
;
2647 if (device
->device_offline
)
2650 device
->device_offline
= true;
2651 ctrl_info
= shost_to_hba(sdev
->host
);
2652 pqi_schedule_rescan_worker(ctrl_info
);
2653 dev_err(&ctrl_info
->pci_dev
->dev
, "re-scanning %s scsi %d:%d:%d:%d\n",
2654 path
, ctrl_info
->scsi_host
->host_no
, device
->bus
,
2655 device
->target
, device
->lun
);
2658 static void pqi_process_raid_io_error(struct pqi_io_request
*io_request
)
2662 struct scsi_cmnd
*scmd
;
2663 struct pqi_raid_error_info
*error_info
;
2664 size_t sense_data_length
;
2667 struct scsi_sense_hdr sshdr
;
2669 scmd
= io_request
->scmd
;
2673 error_info
= io_request
->error_info
;
2674 scsi_status
= error_info
->status
;
2677 switch (error_info
->data_out_result
) {
2678 case PQI_DATA_IN_OUT_GOOD
:
2680 case PQI_DATA_IN_OUT_UNDERFLOW
:
2682 get_unaligned_le32(&error_info
->data_out_transferred
);
2683 residual_count
= scsi_bufflen(scmd
) - xfer_count
;
2684 scsi_set_resid(scmd
, residual_count
);
2685 if (xfer_count
< scmd
->underflow
)
2686 host_byte
= DID_SOFT_ERROR
;
2688 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT
:
2689 case PQI_DATA_IN_OUT_ABORTED
:
2690 host_byte
= DID_ABORT
;
2692 case PQI_DATA_IN_OUT_TIMEOUT
:
2693 host_byte
= DID_TIME_OUT
;
2695 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW
:
2696 case PQI_DATA_IN_OUT_PROTOCOL_ERROR
:
2697 case PQI_DATA_IN_OUT_BUFFER_ERROR
:
2698 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA
:
2699 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE
:
2700 case PQI_DATA_IN_OUT_ERROR
:
2701 case PQI_DATA_IN_OUT_HARDWARE_ERROR
:
2702 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR
:
2703 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT
:
2704 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED
:
2705 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED
:
2706 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED
:
2707 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST
:
2708 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION
:
2709 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED
:
2710 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ
:
2712 host_byte
= DID_ERROR
;
2716 sense_data_length
= get_unaligned_le16(&error_info
->sense_data_length
);
2717 if (sense_data_length
== 0)
2719 get_unaligned_le16(&error_info
->response_data_length
);
2720 if (sense_data_length
) {
2721 if (sense_data_length
> sizeof(error_info
->data
))
2722 sense_data_length
= sizeof(error_info
->data
);
2724 if (scsi_status
== SAM_STAT_CHECK_CONDITION
&&
2725 scsi_normalize_sense(error_info
->data
,
2726 sense_data_length
, &sshdr
) &&
2727 sshdr
.sense_key
== HARDWARE_ERROR
&&
2728 sshdr
.asc
== 0x3e) {
2729 struct pqi_ctrl_info
*ctrl_info
= shost_to_hba(scmd
->device
->host
);
2730 struct pqi_scsi_dev
*device
= scmd
->device
->hostdata
;
2732 switch (sshdr
.ascq
) {
2733 case 0x1: /* LOGICAL UNIT FAILURE */
2734 if (printk_ratelimit())
2735 scmd_printk(KERN_ERR
, scmd
, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
2736 ctrl_info
->scsi_host
->host_no
, device
->bus
, device
->target
, device
->lun
);
2737 pqi_take_device_offline(scmd
->device
, "RAID");
2738 host_byte
= DID_NO_CONNECT
;
2741 default: /* See http://www.t10.org/lists/asc-num.htm#ASC_3E */
2742 if (printk_ratelimit())
2743 scmd_printk(KERN_ERR
, scmd
, "received unhandled error %d from controller for scsi %d:%d:%d:%d\n",
2744 sshdr
.ascq
, ctrl_info
->scsi_host
->host_no
, device
->bus
, device
->target
, device
->lun
);
2749 if (sense_data_length
> SCSI_SENSE_BUFFERSIZE
)
2750 sense_data_length
= SCSI_SENSE_BUFFERSIZE
;
2751 memcpy(scmd
->sense_buffer
, error_info
->data
,
2755 scmd
->result
= scsi_status
;
2756 set_host_byte(scmd
, host_byte
);
2759 static void pqi_process_aio_io_error(struct pqi_io_request
*io_request
)
2763 struct scsi_cmnd
*scmd
;
2764 struct pqi_aio_error_info
*error_info
;
2765 size_t sense_data_length
;
2768 bool device_offline
;
2770 scmd
= io_request
->scmd
;
2771 error_info
= io_request
->error_info
;
2773 sense_data_length
= 0;
2774 device_offline
= false;
2776 switch (error_info
->service_response
) {
2777 case PQI_AIO_SERV_RESPONSE_COMPLETE
:
2778 scsi_status
= error_info
->status
;
2780 case PQI_AIO_SERV_RESPONSE_FAILURE
:
2781 switch (error_info
->status
) {
2782 case PQI_AIO_STATUS_IO_ABORTED
:
2783 scsi_status
= SAM_STAT_TASK_ABORTED
;
2785 case PQI_AIO_STATUS_UNDERRUN
:
2786 scsi_status
= SAM_STAT_GOOD
;
2787 residual_count
= get_unaligned_le32(
2788 &error_info
->residual_count
);
2789 scsi_set_resid(scmd
, residual_count
);
2790 xfer_count
= scsi_bufflen(scmd
) - residual_count
;
2791 if (xfer_count
< scmd
->underflow
)
2792 host_byte
= DID_SOFT_ERROR
;
2794 case PQI_AIO_STATUS_OVERRUN
:
2795 scsi_status
= SAM_STAT_GOOD
;
2797 case PQI_AIO_STATUS_AIO_PATH_DISABLED
:
2798 pqi_aio_path_disabled(io_request
);
2799 scsi_status
= SAM_STAT_GOOD
;
2800 io_request
->status
= -EAGAIN
;
2802 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE
:
2803 case PQI_AIO_STATUS_INVALID_DEVICE
:
2804 if (!io_request
->raid_bypass
) {
2805 device_offline
= true;
2806 pqi_take_device_offline(scmd
->device
, "AIO");
2807 host_byte
= DID_NO_CONNECT
;
2809 scsi_status
= SAM_STAT_CHECK_CONDITION
;
2811 case PQI_AIO_STATUS_IO_ERROR
:
2813 scsi_status
= SAM_STAT_CHECK_CONDITION
;
2817 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE
:
2818 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED
:
2819 scsi_status
= SAM_STAT_GOOD
;
2821 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED
:
2822 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN
:
2824 scsi_status
= SAM_STAT_CHECK_CONDITION
;
2828 if (error_info
->data_present
) {
2830 get_unaligned_le16(&error_info
->data_length
);
2831 if (sense_data_length
) {
2832 if (sense_data_length
> sizeof(error_info
->data
))
2833 sense_data_length
= sizeof(error_info
->data
);
2834 if (sense_data_length
> SCSI_SENSE_BUFFERSIZE
)
2835 sense_data_length
= SCSI_SENSE_BUFFERSIZE
;
2836 memcpy(scmd
->sense_buffer
, error_info
->data
,
2841 if (device_offline
&& sense_data_length
== 0)
2842 scsi_build_sense_buffer(0, scmd
->sense_buffer
, HARDWARE_ERROR
,
2845 scmd
->result
= scsi_status
;
2846 set_host_byte(scmd
, host_byte
);
2849 static void pqi_process_io_error(unsigned int iu_type
,
2850 struct pqi_io_request
*io_request
)
2853 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR
:
2854 pqi_process_raid_io_error(io_request
);
2856 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR
:
2857 pqi_process_aio_io_error(io_request
);
2862 static int pqi_interpret_task_management_response(
2863 struct pqi_task_management_response
*response
)
2867 switch (response
->response_code
) {
2868 case SOP_TMF_COMPLETE
:
2869 case SOP_TMF_FUNCTION_SUCCEEDED
:
2872 case SOP_TMF_REJECTED
:
2883 static inline void pqi_invalid_response(struct pqi_ctrl_info
*ctrl_info
)
2885 pqi_take_ctrl_offline(ctrl_info
);
2888 static int pqi_process_io_intr(struct pqi_ctrl_info
*ctrl_info
, struct pqi_queue_group
*queue_group
)
2893 struct pqi_io_request
*io_request
;
2894 struct pqi_io_response
*response
;
2898 oq_ci
= queue_group
->oq_ci_copy
;
2901 oq_pi
= readl(queue_group
->oq_pi
);
2902 if (oq_pi
>= ctrl_info
->num_elements_per_oq
) {
2903 pqi_invalid_response(ctrl_info
);
2904 dev_err(&ctrl_info
->pci_dev
->dev
,
2905 "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
2906 oq_pi
, ctrl_info
->num_elements_per_oq
- 1, oq_ci
);
2913 response
= queue_group
->oq_element_array
+
2914 (oq_ci
* PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
);
2916 request_id
= get_unaligned_le16(&response
->request_id
);
2917 if (request_id
>= ctrl_info
->max_io_slots
) {
2918 pqi_invalid_response(ctrl_info
);
2919 dev_err(&ctrl_info
->pci_dev
->dev
,
2920 "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
2921 request_id
, ctrl_info
->max_io_slots
- 1, oq_pi
, oq_ci
);
2925 io_request
= &ctrl_info
->io_request_pool
[request_id
];
2926 if (atomic_read(&io_request
->refcount
) == 0) {
2927 pqi_invalid_response(ctrl_info
);
2928 dev_err(&ctrl_info
->pci_dev
->dev
,
2929 "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
2930 request_id
, oq_pi
, oq_ci
);
2934 switch (response
->header
.iu_type
) {
2935 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS
:
2936 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS
:
2937 if (io_request
->scmd
)
2938 io_request
->scmd
->result
= 0;
2940 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT
:
2942 case PQI_RESPONSE_IU_VENDOR_GENERAL
:
2943 io_request
->status
=
2945 &((struct pqi_vendor_general_response
*)
2948 case PQI_RESPONSE_IU_TASK_MANAGEMENT
:
2949 io_request
->status
=
2950 pqi_interpret_task_management_response(
2953 case PQI_RESPONSE_IU_AIO_PATH_DISABLED
:
2954 pqi_aio_path_disabled(io_request
);
2955 io_request
->status
= -EAGAIN
;
2957 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR
:
2958 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR
:
2959 io_request
->error_info
= ctrl_info
->error_buffer
+
2960 (get_unaligned_le16(&response
->error_index
) *
2961 PQI_ERROR_BUFFER_ELEMENT_LENGTH
);
2962 pqi_process_io_error(response
->header
.iu_type
, io_request
);
2965 pqi_invalid_response(ctrl_info
);
2966 dev_err(&ctrl_info
->pci_dev
->dev
,
2967 "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
2968 response
->header
.iu_type
, oq_pi
, oq_ci
);
2972 io_request
->io_complete_callback(io_request
, io_request
->context
);
2975 * Note that the I/O request structure CANNOT BE TOUCHED after
2976 * returning from the I/O completion callback!
2978 oq_ci
= (oq_ci
+ 1) % ctrl_info
->num_elements_per_oq
;
2981 if (num_responses
) {
2982 queue_group
->oq_ci_copy
= oq_ci
;
2983 writel(oq_ci
, queue_group
->oq_ci
);
2986 return num_responses
;
2989 static inline unsigned int pqi_num_elements_free(unsigned int pi
,
2990 unsigned int ci
, unsigned int elements_in_queue
)
2992 unsigned int num_elements_used
;
2995 num_elements_used
= pi
- ci
;
2997 num_elements_used
= elements_in_queue
- ci
+ pi
;
2999 return elements_in_queue
- num_elements_used
- 1;
3002 static void pqi_send_event_ack(struct pqi_ctrl_info
*ctrl_info
,
3003 struct pqi_event_acknowledge_request
*iu
, size_t iu_length
)
3007 unsigned long flags
;
3009 struct pqi_queue_group
*queue_group
;
3011 queue_group
= &ctrl_info
->queue_groups
[PQI_DEFAULT_QUEUE_GROUP
];
3012 put_unaligned_le16(queue_group
->oq_id
, &iu
->header
.response_queue_id
);
3015 spin_lock_irqsave(&queue_group
->submit_lock
[RAID_PATH
], flags
);
3017 iq_pi
= queue_group
->iq_pi_copy
[RAID_PATH
];
3018 iq_ci
= readl(queue_group
->iq_ci
[RAID_PATH
]);
3020 if (pqi_num_elements_free(iq_pi
, iq_ci
,
3021 ctrl_info
->num_elements_per_iq
))
3024 spin_unlock_irqrestore(
3025 &queue_group
->submit_lock
[RAID_PATH
], flags
);
3027 if (pqi_ctrl_offline(ctrl_info
))
3031 next_element
= queue_group
->iq_element_array
[RAID_PATH
] +
3032 (iq_pi
* PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
3034 memcpy(next_element
, iu
, iu_length
);
3036 iq_pi
= (iq_pi
+ 1) % ctrl_info
->num_elements_per_iq
;
3037 queue_group
->iq_pi_copy
[RAID_PATH
] = iq_pi
;
3040 * This write notifies the controller that an IU is available to be
3043 writel(iq_pi
, queue_group
->iq_pi
[RAID_PATH
]);
3045 spin_unlock_irqrestore(&queue_group
->submit_lock
[RAID_PATH
], flags
);
3048 static void pqi_acknowledge_event(struct pqi_ctrl_info
*ctrl_info
,
3049 struct pqi_event
*event
)
3051 struct pqi_event_acknowledge_request request
;
3053 memset(&request
, 0, sizeof(request
));
3055 request
.header
.iu_type
= PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT
;
3056 put_unaligned_le16(sizeof(request
) - PQI_REQUEST_HEADER_LENGTH
,
3057 &request
.header
.iu_length
);
3058 request
.event_type
= event
->event_type
;
3059 request
.event_id
= event
->event_id
;
3060 request
.additional_event_id
= event
->additional_event_id
;
3062 pqi_send_event_ack(ctrl_info
, &request
, sizeof(request
));
3065 #define PQI_SOFT_RESET_STATUS_TIMEOUT_SECS 30
3066 #define PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS 1
3068 static enum pqi_soft_reset_status
pqi_poll_for_soft_reset_status(
3069 struct pqi_ctrl_info
*ctrl_info
)
3071 unsigned long timeout
;
3074 timeout
= (PQI_SOFT_RESET_STATUS_TIMEOUT_SECS
* PQI_HZ
) + jiffies
;
3077 status
= pqi_read_soft_reset_status(ctrl_info
);
3078 if (status
& PQI_SOFT_RESET_INITIATE
)
3079 return RESET_INITIATE_DRIVER
;
3081 if (status
& PQI_SOFT_RESET_ABORT
)
3084 if (time_after(jiffies
, timeout
)) {
3085 dev_err(&ctrl_info
->pci_dev
->dev
,
3086 "timed out waiting for soft reset status\n");
3087 return RESET_TIMEDOUT
;
3090 if (!sis_is_firmware_running(ctrl_info
))
3091 return RESET_NORESPONSE
;
3093 ssleep(PQI_SOFT_RESET_STATUS_POLL_INTERVAL_SECS
);
3097 static void pqi_process_soft_reset(struct pqi_ctrl_info
*ctrl_info
,
3098 enum pqi_soft_reset_status reset_status
)
3102 switch (reset_status
) {
3103 case RESET_INITIATE_DRIVER
:
3104 case RESET_TIMEDOUT
:
3105 dev_info(&ctrl_info
->pci_dev
->dev
,
3106 "resetting controller %u\n", ctrl_info
->ctrl_id
);
3107 sis_soft_reset(ctrl_info
);
3109 case RESET_INITIATE_FIRMWARE
:
3110 rc
= pqi_ofa_ctrl_restart(ctrl_info
);
3111 pqi_ofa_free_host_buffer(ctrl_info
);
3112 dev_info(&ctrl_info
->pci_dev
->dev
,
3113 "Online Firmware Activation for controller %u: %s\n",
3114 ctrl_info
->ctrl_id
, rc
== 0 ? "SUCCESS" : "FAILED");
3117 pqi_ofa_ctrl_unquiesce(ctrl_info
);
3118 dev_info(&ctrl_info
->pci_dev
->dev
,
3119 "Online Firmware Activation for controller %u: %s\n",
3120 ctrl_info
->ctrl_id
, "ABORTED");
3122 case RESET_NORESPONSE
:
3123 pqi_ofa_free_host_buffer(ctrl_info
);
3124 pqi_take_ctrl_offline(ctrl_info
);
3129 static void pqi_ofa_process_event(struct pqi_ctrl_info
*ctrl_info
,
3130 struct pqi_event
*event
)
3133 enum pqi_soft_reset_status status
;
3135 event_id
= get_unaligned_le16(&event
->event_id
);
3137 mutex_lock(&ctrl_info
->ofa_mutex
);
3139 if (event_id
== PQI_EVENT_OFA_QUIESCE
) {
3140 dev_info(&ctrl_info
->pci_dev
->dev
,
3141 "Received Online Firmware Activation quiesce event for controller %u\n",
3142 ctrl_info
->ctrl_id
);
3143 pqi_ofa_ctrl_quiesce(ctrl_info
);
3144 pqi_acknowledge_event(ctrl_info
, event
);
3145 if (ctrl_info
->soft_reset_handshake_supported
) {
3146 status
= pqi_poll_for_soft_reset_status(ctrl_info
);
3147 pqi_process_soft_reset(ctrl_info
, status
);
3149 pqi_process_soft_reset(ctrl_info
,
3150 RESET_INITIATE_FIRMWARE
);
3153 } else if (event_id
== PQI_EVENT_OFA_MEMORY_ALLOCATION
) {
3154 pqi_acknowledge_event(ctrl_info
, event
);
3155 pqi_ofa_setup_host_buffer(ctrl_info
,
3156 le32_to_cpu(event
->ofa_bytes_requested
));
3157 pqi_ofa_host_memory_update(ctrl_info
);
3158 } else if (event_id
== PQI_EVENT_OFA_CANCELLED
) {
3159 pqi_ofa_free_host_buffer(ctrl_info
);
3160 pqi_acknowledge_event(ctrl_info
, event
);
3161 dev_info(&ctrl_info
->pci_dev
->dev
,
3162 "Online Firmware Activation(%u) cancel reason : %u\n",
3163 ctrl_info
->ctrl_id
, event
->ofa_cancel_reason
);
3166 mutex_unlock(&ctrl_info
->ofa_mutex
);
3169 static void pqi_event_worker(struct work_struct
*work
)
3172 struct pqi_ctrl_info
*ctrl_info
;
3173 struct pqi_event
*event
;
3175 ctrl_info
= container_of(work
, struct pqi_ctrl_info
, event_work
);
3177 pqi_ctrl_busy(ctrl_info
);
3178 pqi_wait_if_ctrl_blocked(ctrl_info
, NO_TIMEOUT
);
3179 if (pqi_ctrl_offline(ctrl_info
))
3182 pqi_schedule_rescan_worker_delayed(ctrl_info
);
3184 event
= ctrl_info
->events
;
3185 for (i
= 0; i
< PQI_NUM_SUPPORTED_EVENTS
; i
++) {
3186 if (event
->pending
) {
3187 event
->pending
= false;
3188 if (event
->event_type
== PQI_EVENT_TYPE_OFA
) {
3189 pqi_ctrl_unbusy(ctrl_info
);
3190 pqi_ofa_process_event(ctrl_info
, event
);
3193 pqi_acknowledge_event(ctrl_info
, event
);
3199 pqi_ctrl_unbusy(ctrl_info
);
3202 #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * PQI_HZ)
3204 static void pqi_heartbeat_timer_handler(struct timer_list
*t
)
3207 u32 heartbeat_count
;
3208 struct pqi_ctrl_info
*ctrl_info
= from_timer(ctrl_info
, t
,
3211 pqi_check_ctrl_health(ctrl_info
);
3212 if (pqi_ctrl_offline(ctrl_info
))
3215 num_interrupts
= atomic_read(&ctrl_info
->num_interrupts
);
3216 heartbeat_count
= pqi_read_heartbeat_counter(ctrl_info
);
3218 if (num_interrupts
== ctrl_info
->previous_num_interrupts
) {
3219 if (heartbeat_count
== ctrl_info
->previous_heartbeat_count
) {
3220 dev_err(&ctrl_info
->pci_dev
->dev
,
3221 "no heartbeat detected - last heartbeat count: %u\n",
3223 pqi_take_ctrl_offline(ctrl_info
);
3227 ctrl_info
->previous_num_interrupts
= num_interrupts
;
3230 ctrl_info
->previous_heartbeat_count
= heartbeat_count
;
3231 mod_timer(&ctrl_info
->heartbeat_timer
,
3232 jiffies
+ PQI_HEARTBEAT_TIMER_INTERVAL
);
3235 static void pqi_start_heartbeat_timer(struct pqi_ctrl_info
*ctrl_info
)
3237 if (!ctrl_info
->heartbeat_counter
)
3240 ctrl_info
->previous_num_interrupts
=
3241 atomic_read(&ctrl_info
->num_interrupts
);
3242 ctrl_info
->previous_heartbeat_count
=
3243 pqi_read_heartbeat_counter(ctrl_info
);
3245 ctrl_info
->heartbeat_timer
.expires
=
3246 jiffies
+ PQI_HEARTBEAT_TIMER_INTERVAL
;
3247 add_timer(&ctrl_info
->heartbeat_timer
);
3250 static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info
*ctrl_info
)
3252 del_timer_sync(&ctrl_info
->heartbeat_timer
);
3255 static inline int pqi_event_type_to_event_index(unsigned int event_type
)
3259 for (index
= 0; index
< ARRAY_SIZE(pqi_supported_event_types
); index
++)
3260 if (event_type
== pqi_supported_event_types
[index
])
3266 static inline bool pqi_is_supported_event(unsigned int event_type
)
3268 return pqi_event_type_to_event_index(event_type
) != -1;
3271 static void pqi_ofa_capture_event_payload(struct pqi_event
*event
,
3272 struct pqi_event_response
*response
)
3276 event_id
= get_unaligned_le16(&event
->event_id
);
3278 if (event
->event_type
== PQI_EVENT_TYPE_OFA
) {
3279 if (event_id
== PQI_EVENT_OFA_MEMORY_ALLOCATION
) {
3280 event
->ofa_bytes_requested
=
3281 response
->data
.ofa_memory_allocation
.bytes_requested
;
3282 } else if (event_id
== PQI_EVENT_OFA_CANCELLED
) {
3283 event
->ofa_cancel_reason
=
3284 response
->data
.ofa_cancelled
.reason
;
3289 static int pqi_process_event_intr(struct pqi_ctrl_info
*ctrl_info
)
3294 struct pqi_event_queue
*event_queue
;
3295 struct pqi_event_response
*response
;
3296 struct pqi_event
*event
;
3299 event_queue
= &ctrl_info
->event_queue
;
3301 oq_ci
= event_queue
->oq_ci_copy
;
3304 oq_pi
= readl(event_queue
->oq_pi
);
3305 if (oq_pi
>= PQI_NUM_EVENT_QUEUE_ELEMENTS
) {
3306 pqi_invalid_response(ctrl_info
);
3307 dev_err(&ctrl_info
->pci_dev
->dev
,
3308 "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
3309 oq_pi
, PQI_NUM_EVENT_QUEUE_ELEMENTS
- 1, oq_ci
);
3317 response
= event_queue
->oq_element_array
+ (oq_ci
* PQI_EVENT_OQ_ELEMENT_LENGTH
);
3320 pqi_event_type_to_event_index(response
->event_type
);
3322 if (event_index
>= 0 && response
->request_acknowledge
) {
3323 event
= &ctrl_info
->events
[event_index
];
3324 event
->pending
= true;
3325 event
->event_type
= response
->event_type
;
3326 event
->event_id
= response
->event_id
;
3327 event
->additional_event_id
= response
->additional_event_id
;
3328 if (event
->event_type
== PQI_EVENT_TYPE_OFA
)
3329 pqi_ofa_capture_event_payload(event
, response
);
3332 oq_ci
= (oq_ci
+ 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS
;
3336 event_queue
->oq_ci_copy
= oq_ci
;
3337 writel(oq_ci
, event_queue
->oq_ci
);
3338 schedule_work(&ctrl_info
->event_work
);
3344 #define PQI_LEGACY_INTX_MASK 0x1
3346 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info
*ctrl_info
,
3350 struct pqi_device_registers __iomem
*pqi_registers
;
3351 volatile void __iomem
*register_addr
;
3353 pqi_registers
= ctrl_info
->pqi_registers
;
3356 register_addr
= &pqi_registers
->legacy_intx_mask_clear
;
3358 register_addr
= &pqi_registers
->legacy_intx_mask_set
;
3360 intx_mask
= readl(register_addr
);
3361 intx_mask
|= PQI_LEGACY_INTX_MASK
;
3362 writel(intx_mask
, register_addr
);
3365 static void pqi_change_irq_mode(struct pqi_ctrl_info
*ctrl_info
,
3366 enum pqi_irq_mode new_mode
)
3368 switch (ctrl_info
->irq_mode
) {
3374 pqi_configure_legacy_intx(ctrl_info
, true);
3375 sis_enable_intx(ctrl_info
);
3384 pqi_configure_legacy_intx(ctrl_info
, false);
3385 sis_enable_msix(ctrl_info
);
3390 pqi_configure_legacy_intx(ctrl_info
, false);
3397 sis_enable_msix(ctrl_info
);
3400 pqi_configure_legacy_intx(ctrl_info
, true);
3401 sis_enable_intx(ctrl_info
);
3409 ctrl_info
->irq_mode
= new_mode
;
3412 #define PQI_LEGACY_INTX_PENDING 0x1
3414 static inline bool pqi_is_valid_irq(struct pqi_ctrl_info
*ctrl_info
)
3419 switch (ctrl_info
->irq_mode
) {
3425 readl(&ctrl_info
->pqi_registers
->legacy_intx_status
);
3426 if (intx_status
& PQI_LEGACY_INTX_PENDING
)
3440 static irqreturn_t
pqi_irq_handler(int irq
, void *data
)
3442 struct pqi_ctrl_info
*ctrl_info
;
3443 struct pqi_queue_group
*queue_group
;
3444 int num_io_responses_handled
;
3445 int num_events_handled
;
3448 ctrl_info
= queue_group
->ctrl_info
;
3450 if (!pqi_is_valid_irq(ctrl_info
))
3453 num_io_responses_handled
= pqi_process_io_intr(ctrl_info
, queue_group
);
3454 if (num_io_responses_handled
< 0)
3457 if (irq
== ctrl_info
->event_irq
) {
3458 num_events_handled
= pqi_process_event_intr(ctrl_info
);
3459 if (num_events_handled
< 0)
3462 num_events_handled
= 0;
3465 if (num_io_responses_handled
+ num_events_handled
> 0)
3466 atomic_inc(&ctrl_info
->num_interrupts
);
3468 pqi_start_io(ctrl_info
, queue_group
, RAID_PATH
, NULL
);
3469 pqi_start_io(ctrl_info
, queue_group
, AIO_PATH
, NULL
);
3475 static int pqi_request_irqs(struct pqi_ctrl_info
*ctrl_info
)
3477 struct pci_dev
*pci_dev
= ctrl_info
->pci_dev
;
3481 ctrl_info
->event_irq
= pci_irq_vector(pci_dev
, 0);
3483 for (i
= 0; i
< ctrl_info
->num_msix_vectors_enabled
; i
++) {
3484 rc
= request_irq(pci_irq_vector(pci_dev
, i
), pqi_irq_handler
, 0,
3485 DRIVER_NAME_SHORT
, &ctrl_info
->queue_groups
[i
]);
3487 dev_err(&pci_dev
->dev
,
3488 "irq %u init failed with error %d\n",
3489 pci_irq_vector(pci_dev
, i
), rc
);
3492 ctrl_info
->num_msix_vectors_initialized
++;
3498 static void pqi_free_irqs(struct pqi_ctrl_info
*ctrl_info
)
3502 for (i
= 0; i
< ctrl_info
->num_msix_vectors_initialized
; i
++)
3503 free_irq(pci_irq_vector(ctrl_info
->pci_dev
, i
),
3504 &ctrl_info
->queue_groups
[i
]);
3506 ctrl_info
->num_msix_vectors_initialized
= 0;
3509 static int pqi_enable_msix_interrupts(struct pqi_ctrl_info
*ctrl_info
)
3511 int num_vectors_enabled
;
3513 num_vectors_enabled
= pci_alloc_irq_vectors(ctrl_info
->pci_dev
,
3514 PQI_MIN_MSIX_VECTORS
, ctrl_info
->num_queue_groups
,
3515 PCI_IRQ_MSIX
| PCI_IRQ_AFFINITY
);
3516 if (num_vectors_enabled
< 0) {
3517 dev_err(&ctrl_info
->pci_dev
->dev
,
3518 "MSI-X init failed with error %d\n",
3519 num_vectors_enabled
);
3520 return num_vectors_enabled
;
3523 ctrl_info
->num_msix_vectors_enabled
= num_vectors_enabled
;
3524 ctrl_info
->irq_mode
= IRQ_MODE_MSIX
;
3528 static void pqi_disable_msix_interrupts(struct pqi_ctrl_info
*ctrl_info
)
3530 if (ctrl_info
->num_msix_vectors_enabled
) {
3531 pci_free_irq_vectors(ctrl_info
->pci_dev
);
3532 ctrl_info
->num_msix_vectors_enabled
= 0;
3536 static int pqi_alloc_operational_queues(struct pqi_ctrl_info
*ctrl_info
)
3539 size_t alloc_length
;
3540 size_t element_array_length_per_iq
;
3541 size_t element_array_length_per_oq
;
3542 void *element_array
;
3543 void __iomem
*next_queue_index
;
3544 void *aligned_pointer
;
3545 unsigned int num_inbound_queues
;
3546 unsigned int num_outbound_queues
;
3547 unsigned int num_queue_indexes
;
3548 struct pqi_queue_group
*queue_group
;
3550 element_array_length_per_iq
=
3551 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
*
3552 ctrl_info
->num_elements_per_iq
;
3553 element_array_length_per_oq
=
3554 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
*
3555 ctrl_info
->num_elements_per_oq
;
3556 num_inbound_queues
= ctrl_info
->num_queue_groups
* 2;
3557 num_outbound_queues
= ctrl_info
->num_queue_groups
;
3558 num_queue_indexes
= (ctrl_info
->num_queue_groups
* 3) + 1;
3560 aligned_pointer
= NULL
;
3562 for (i
= 0; i
< num_inbound_queues
; i
++) {
3563 aligned_pointer
= PTR_ALIGN(aligned_pointer
,
3564 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3565 aligned_pointer
+= element_array_length_per_iq
;
3568 for (i
= 0; i
< num_outbound_queues
; i
++) {
3569 aligned_pointer
= PTR_ALIGN(aligned_pointer
,
3570 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3571 aligned_pointer
+= element_array_length_per_oq
;
3574 aligned_pointer
= PTR_ALIGN(aligned_pointer
,
3575 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3576 aligned_pointer
+= PQI_NUM_EVENT_QUEUE_ELEMENTS
*
3577 PQI_EVENT_OQ_ELEMENT_LENGTH
;
3579 for (i
= 0; i
< num_queue_indexes
; i
++) {
3580 aligned_pointer
= PTR_ALIGN(aligned_pointer
,
3581 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3582 aligned_pointer
+= sizeof(pqi_index_t
);
3585 alloc_length
= (size_t)aligned_pointer
+
3586 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
;
3588 alloc_length
+= PQI_EXTRA_SGL_MEMORY
;
3590 ctrl_info
->queue_memory_base
=
3591 dma_alloc_coherent(&ctrl_info
->pci_dev
->dev
, alloc_length
,
3592 &ctrl_info
->queue_memory_base_dma_handle
,
3595 if (!ctrl_info
->queue_memory_base
)
3598 ctrl_info
->queue_memory_length
= alloc_length
;
3600 element_array
= PTR_ALIGN(ctrl_info
->queue_memory_base
,
3601 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3603 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3604 queue_group
= &ctrl_info
->queue_groups
[i
];
3605 queue_group
->iq_element_array
[RAID_PATH
] = element_array
;
3606 queue_group
->iq_element_array_bus_addr
[RAID_PATH
] =
3607 ctrl_info
->queue_memory_base_dma_handle
+
3608 (element_array
- ctrl_info
->queue_memory_base
);
3609 element_array
+= element_array_length_per_iq
;
3610 element_array
= PTR_ALIGN(element_array
,
3611 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3612 queue_group
->iq_element_array
[AIO_PATH
] = element_array
;
3613 queue_group
->iq_element_array_bus_addr
[AIO_PATH
] =
3614 ctrl_info
->queue_memory_base_dma_handle
+
3615 (element_array
- ctrl_info
->queue_memory_base
);
3616 element_array
+= element_array_length_per_iq
;
3617 element_array
= PTR_ALIGN(element_array
,
3618 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3621 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3622 queue_group
= &ctrl_info
->queue_groups
[i
];
3623 queue_group
->oq_element_array
= element_array
;
3624 queue_group
->oq_element_array_bus_addr
=
3625 ctrl_info
->queue_memory_base_dma_handle
+
3626 (element_array
- ctrl_info
->queue_memory_base
);
3627 element_array
+= element_array_length_per_oq
;
3628 element_array
= PTR_ALIGN(element_array
,
3629 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3632 ctrl_info
->event_queue
.oq_element_array
= element_array
;
3633 ctrl_info
->event_queue
.oq_element_array_bus_addr
=
3634 ctrl_info
->queue_memory_base_dma_handle
+
3635 (element_array
- ctrl_info
->queue_memory_base
);
3636 element_array
+= PQI_NUM_EVENT_QUEUE_ELEMENTS
*
3637 PQI_EVENT_OQ_ELEMENT_LENGTH
;
3639 next_queue_index
= (void __iomem
*)PTR_ALIGN(element_array
,
3640 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3642 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3643 queue_group
= &ctrl_info
->queue_groups
[i
];
3644 queue_group
->iq_ci
[RAID_PATH
] = next_queue_index
;
3645 queue_group
->iq_ci_bus_addr
[RAID_PATH
] =
3646 ctrl_info
->queue_memory_base_dma_handle
+
3648 (void __iomem
*)ctrl_info
->queue_memory_base
);
3649 next_queue_index
+= sizeof(pqi_index_t
);
3650 next_queue_index
= PTR_ALIGN(next_queue_index
,
3651 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3652 queue_group
->iq_ci
[AIO_PATH
] = next_queue_index
;
3653 queue_group
->iq_ci_bus_addr
[AIO_PATH
] =
3654 ctrl_info
->queue_memory_base_dma_handle
+
3656 (void __iomem
*)ctrl_info
->queue_memory_base
);
3657 next_queue_index
+= sizeof(pqi_index_t
);
3658 next_queue_index
= PTR_ALIGN(next_queue_index
,
3659 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3660 queue_group
->oq_pi
= next_queue_index
;
3661 queue_group
->oq_pi_bus_addr
=
3662 ctrl_info
->queue_memory_base_dma_handle
+
3664 (void __iomem
*)ctrl_info
->queue_memory_base
);
3665 next_queue_index
+= sizeof(pqi_index_t
);
3666 next_queue_index
= PTR_ALIGN(next_queue_index
,
3667 PQI_OPERATIONAL_INDEX_ALIGNMENT
);
3670 ctrl_info
->event_queue
.oq_pi
= next_queue_index
;
3671 ctrl_info
->event_queue
.oq_pi_bus_addr
=
3672 ctrl_info
->queue_memory_base_dma_handle
+
3674 (void __iomem
*)ctrl_info
->queue_memory_base
);
3679 static void pqi_init_operational_queues(struct pqi_ctrl_info
*ctrl_info
)
3682 u16 next_iq_id
= PQI_MIN_OPERATIONAL_QUEUE_ID
;
3683 u16 next_oq_id
= PQI_MIN_OPERATIONAL_QUEUE_ID
;
3686 * Initialize the backpointers to the controller structure in
3687 * each operational queue group structure.
3689 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++)
3690 ctrl_info
->queue_groups
[i
].ctrl_info
= ctrl_info
;
3693 * Assign IDs to all operational queues. Note that the IDs
3694 * assigned to operational IQs are independent of the IDs
3695 * assigned to operational OQs.
3697 ctrl_info
->event_queue
.oq_id
= next_oq_id
++;
3698 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3699 ctrl_info
->queue_groups
[i
].iq_id
[RAID_PATH
] = next_iq_id
++;
3700 ctrl_info
->queue_groups
[i
].iq_id
[AIO_PATH
] = next_iq_id
++;
3701 ctrl_info
->queue_groups
[i
].oq_id
= next_oq_id
++;
3705 * Assign MSI-X table entry indexes to all queues. Note that the
3706 * interrupt for the event queue is shared with the first queue group.
3708 ctrl_info
->event_queue
.int_msg_num
= 0;
3709 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++)
3710 ctrl_info
->queue_groups
[i
].int_msg_num
= i
;
3712 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
3713 spin_lock_init(&ctrl_info
->queue_groups
[i
].submit_lock
[0]);
3714 spin_lock_init(&ctrl_info
->queue_groups
[i
].submit_lock
[1]);
3715 INIT_LIST_HEAD(&ctrl_info
->queue_groups
[i
].request_list
[0]);
3716 INIT_LIST_HEAD(&ctrl_info
->queue_groups
[i
].request_list
[1]);
3720 static int pqi_alloc_admin_queues(struct pqi_ctrl_info
*ctrl_info
)
3722 size_t alloc_length
;
3723 struct pqi_admin_queues_aligned
*admin_queues_aligned
;
3724 struct pqi_admin_queues
*admin_queues
;
3726 alloc_length
= sizeof(struct pqi_admin_queues_aligned
) +
3727 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
;
3729 ctrl_info
->admin_queue_memory_base
=
3730 dma_alloc_coherent(&ctrl_info
->pci_dev
->dev
, alloc_length
,
3731 &ctrl_info
->admin_queue_memory_base_dma_handle
,
3734 if (!ctrl_info
->admin_queue_memory_base
)
3737 ctrl_info
->admin_queue_memory_length
= alloc_length
;
3739 admin_queues
= &ctrl_info
->admin_queues
;
3740 admin_queues_aligned
= PTR_ALIGN(ctrl_info
->admin_queue_memory_base
,
3741 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT
);
3742 admin_queues
->iq_element_array
=
3743 &admin_queues_aligned
->iq_element_array
;
3744 admin_queues
->oq_element_array
=
3745 &admin_queues_aligned
->oq_element_array
;
3746 admin_queues
->iq_ci
= &admin_queues_aligned
->iq_ci
;
3747 admin_queues
->oq_pi
=
3748 (pqi_index_t __iomem
*)&admin_queues_aligned
->oq_pi
;
3750 admin_queues
->iq_element_array_bus_addr
=
3751 ctrl_info
->admin_queue_memory_base_dma_handle
+
3752 (admin_queues
->iq_element_array
-
3753 ctrl_info
->admin_queue_memory_base
);
3754 admin_queues
->oq_element_array_bus_addr
=
3755 ctrl_info
->admin_queue_memory_base_dma_handle
+
3756 (admin_queues
->oq_element_array
-
3757 ctrl_info
->admin_queue_memory_base
);
3758 admin_queues
->iq_ci_bus_addr
=
3759 ctrl_info
->admin_queue_memory_base_dma_handle
+
3760 ((void *)admin_queues
->iq_ci
-
3761 ctrl_info
->admin_queue_memory_base
);
3762 admin_queues
->oq_pi_bus_addr
=
3763 ctrl_info
->admin_queue_memory_base_dma_handle
+
3764 ((void __iomem
*)admin_queues
->oq_pi
-
3765 (void __iomem
*)ctrl_info
->admin_queue_memory_base
);
3770 #define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES PQI_HZ
3771 #define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3773 static int pqi_create_admin_queues(struct pqi_ctrl_info
*ctrl_info
)
3775 struct pqi_device_registers __iomem
*pqi_registers
;
3776 struct pqi_admin_queues
*admin_queues
;
3777 unsigned long timeout
;
3781 pqi_registers
= ctrl_info
->pqi_registers
;
3782 admin_queues
= &ctrl_info
->admin_queues
;
3784 writeq((u64
)admin_queues
->iq_element_array_bus_addr
,
3785 &pqi_registers
->admin_iq_element_array_addr
);
3786 writeq((u64
)admin_queues
->oq_element_array_bus_addr
,
3787 &pqi_registers
->admin_oq_element_array_addr
);
3788 writeq((u64
)admin_queues
->iq_ci_bus_addr
,
3789 &pqi_registers
->admin_iq_ci_addr
);
3790 writeq((u64
)admin_queues
->oq_pi_bus_addr
,
3791 &pqi_registers
->admin_oq_pi_addr
);
3793 reg
= PQI_ADMIN_IQ_NUM_ELEMENTS
|
3794 (PQI_ADMIN_OQ_NUM_ELEMENTS
<< 8) |
3795 (admin_queues
->int_msg_num
<< 16);
3796 writel(reg
, &pqi_registers
->admin_iq_num_elements
);
3797 writel(PQI_CREATE_ADMIN_QUEUE_PAIR
,
3798 &pqi_registers
->function_and_status_code
);
3800 timeout
= PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES
+ jiffies
;
3802 status
= readb(&pqi_registers
->function_and_status_code
);
3803 if (status
== PQI_STATUS_IDLE
)
3805 if (time_after(jiffies
, timeout
))
3807 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS
);
3811 * The offset registers are not initialized to the correct
3812 * offsets until *after* the create admin queue pair command
3813 * completes successfully.
3815 admin_queues
->iq_pi
= ctrl_info
->iomem_base
+
3816 PQI_DEVICE_REGISTERS_OFFSET
+
3817 readq(&pqi_registers
->admin_iq_pi_offset
);
3818 admin_queues
->oq_ci
= ctrl_info
->iomem_base
+
3819 PQI_DEVICE_REGISTERS_OFFSET
+
3820 readq(&pqi_registers
->admin_oq_ci_offset
);
3825 static void pqi_submit_admin_request(struct pqi_ctrl_info
*ctrl_info
,
3826 struct pqi_general_admin_request
*request
)
3828 struct pqi_admin_queues
*admin_queues
;
3832 admin_queues
= &ctrl_info
->admin_queues
;
3833 iq_pi
= admin_queues
->iq_pi_copy
;
3835 next_element
= admin_queues
->iq_element_array
+
3836 (iq_pi
* PQI_ADMIN_IQ_ELEMENT_LENGTH
);
3838 memcpy(next_element
, request
, sizeof(*request
));
3840 iq_pi
= (iq_pi
+ 1) % PQI_ADMIN_IQ_NUM_ELEMENTS
;
3841 admin_queues
->iq_pi_copy
= iq_pi
;
3844 * This write notifies the controller that an IU is available to be
3847 writel(iq_pi
, admin_queues
->iq_pi
);
3850 #define PQI_ADMIN_REQUEST_TIMEOUT_SECS 60
3852 static int pqi_poll_for_admin_response(struct pqi_ctrl_info
*ctrl_info
,
3853 struct pqi_general_admin_response
*response
)
3855 struct pqi_admin_queues
*admin_queues
;
3858 unsigned long timeout
;
3860 admin_queues
= &ctrl_info
->admin_queues
;
3861 oq_ci
= admin_queues
->oq_ci_copy
;
3863 timeout
= (PQI_ADMIN_REQUEST_TIMEOUT_SECS
* PQI_HZ
) + jiffies
;
3866 oq_pi
= readl(admin_queues
->oq_pi
);
3869 if (time_after(jiffies
, timeout
)) {
3870 dev_err(&ctrl_info
->pci_dev
->dev
,
3871 "timed out waiting for admin response\n");
3874 if (!sis_is_firmware_running(ctrl_info
))
3876 usleep_range(1000, 2000);
3879 memcpy(response
, admin_queues
->oq_element_array
+
3880 (oq_ci
* PQI_ADMIN_OQ_ELEMENT_LENGTH
), sizeof(*response
));
3882 oq_ci
= (oq_ci
+ 1) % PQI_ADMIN_OQ_NUM_ELEMENTS
;
3883 admin_queues
->oq_ci_copy
= oq_ci
;
3884 writel(oq_ci
, admin_queues
->oq_ci
);
3889 static void pqi_start_io(struct pqi_ctrl_info
*ctrl_info
,
3890 struct pqi_queue_group
*queue_group
, enum pqi_io_path path
,
3891 struct pqi_io_request
*io_request
)
3893 struct pqi_io_request
*next
;
3898 unsigned long flags
;
3899 unsigned int num_elements_needed
;
3900 unsigned int num_elements_to_end_of_queue
;
3902 struct pqi_iu_header
*request
;
3904 spin_lock_irqsave(&queue_group
->submit_lock
[path
], flags
);
3907 io_request
->queue_group
= queue_group
;
3908 list_add_tail(&io_request
->request_list_entry
,
3909 &queue_group
->request_list
[path
]);
3912 iq_pi
= queue_group
->iq_pi_copy
[path
];
3914 list_for_each_entry_safe(io_request
, next
,
3915 &queue_group
->request_list
[path
], request_list_entry
) {
3917 request
= io_request
->iu
;
3919 iu_length
= get_unaligned_le16(&request
->iu_length
) +
3920 PQI_REQUEST_HEADER_LENGTH
;
3921 num_elements_needed
=
3922 DIV_ROUND_UP(iu_length
,
3923 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
3925 iq_ci
= readl(queue_group
->iq_ci
[path
]);
3927 if (num_elements_needed
> pqi_num_elements_free(iq_pi
, iq_ci
,
3928 ctrl_info
->num_elements_per_iq
))
3931 put_unaligned_le16(queue_group
->oq_id
,
3932 &request
->response_queue_id
);
3934 next_element
= queue_group
->iq_element_array
[path
] +
3935 (iq_pi
* PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
3937 num_elements_to_end_of_queue
=
3938 ctrl_info
->num_elements_per_iq
- iq_pi
;
3940 if (num_elements_needed
<= num_elements_to_end_of_queue
) {
3941 memcpy(next_element
, request
, iu_length
);
3943 copy_count
= num_elements_to_end_of_queue
*
3944 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
;
3945 memcpy(next_element
, request
, copy_count
);
3946 memcpy(queue_group
->iq_element_array
[path
],
3947 (u8
*)request
+ copy_count
,
3948 iu_length
- copy_count
);
3951 iq_pi
= (iq_pi
+ num_elements_needed
) %
3952 ctrl_info
->num_elements_per_iq
;
3954 list_del(&io_request
->request_list_entry
);
3957 if (iq_pi
!= queue_group
->iq_pi_copy
[path
]) {
3958 queue_group
->iq_pi_copy
[path
] = iq_pi
;
3960 * This write notifies the controller that one or more IUs are
3961 * available to be processed.
3963 writel(iq_pi
, queue_group
->iq_pi
[path
]);
3966 spin_unlock_irqrestore(&queue_group
->submit_lock
[path
], flags
);
3969 #define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
3971 static int pqi_wait_for_completion_io(struct pqi_ctrl_info
*ctrl_info
,
3972 struct completion
*wait
)
3977 if (wait_for_completion_io_timeout(wait
,
3978 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS
* PQI_HZ
)) {
3983 pqi_check_ctrl_health(ctrl_info
);
3984 if (pqi_ctrl_offline(ctrl_info
)) {
3993 static void pqi_raid_synchronous_complete(struct pqi_io_request
*io_request
,
3996 struct completion
*waiting
= context
;
4001 static int pqi_process_raid_io_error_synchronous(
4002 struct pqi_raid_error_info
*error_info
)
4006 switch (error_info
->data_out_result
) {
4007 case PQI_DATA_IN_OUT_GOOD
:
4008 if (error_info
->status
== SAM_STAT_GOOD
)
4011 case PQI_DATA_IN_OUT_UNDERFLOW
:
4012 if (error_info
->status
== SAM_STAT_GOOD
||
4013 error_info
->status
== SAM_STAT_CHECK_CONDITION
)
4016 case PQI_DATA_IN_OUT_ABORTED
:
4017 rc
= PQI_CMD_STATUS_ABORTED
;
4024 static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info
*ctrl_info
,
4025 struct pqi_iu_header
*request
, unsigned int flags
,
4026 struct pqi_raid_error_info
*error_info
, unsigned long timeout_msecs
)
4029 struct pqi_io_request
*io_request
;
4030 unsigned long start_jiffies
;
4031 unsigned long msecs_blocked
;
4033 DECLARE_COMPLETION_ONSTACK(wait
);
4036 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
4037 * are mutually exclusive.
4040 if (flags
& PQI_SYNC_FLAGS_INTERRUPTABLE
) {
4041 if (down_interruptible(&ctrl_info
->sync_request_sem
))
4042 return -ERESTARTSYS
;
4044 if (timeout_msecs
== NO_TIMEOUT
) {
4045 down(&ctrl_info
->sync_request_sem
);
4047 start_jiffies
= jiffies
;
4048 if (down_timeout(&ctrl_info
->sync_request_sem
,
4049 msecs_to_jiffies(timeout_msecs
)))
4052 jiffies_to_msecs(jiffies
- start_jiffies
);
4053 if (msecs_blocked
>= timeout_msecs
) {
4057 timeout_msecs
-= msecs_blocked
;
4061 pqi_ctrl_busy(ctrl_info
);
4062 timeout_msecs
= pqi_wait_if_ctrl_blocked(ctrl_info
, timeout_msecs
);
4063 if (timeout_msecs
== 0) {
4064 pqi_ctrl_unbusy(ctrl_info
);
4069 if (pqi_ctrl_offline(ctrl_info
)) {
4070 pqi_ctrl_unbusy(ctrl_info
);
4075 atomic_inc(&ctrl_info
->sync_cmds_outstanding
);
4077 io_request
= pqi_alloc_io_request(ctrl_info
);
4079 put_unaligned_le16(io_request
->index
,
4080 &(((struct pqi_raid_path_request
*)request
)->request_id
));
4082 if (request
->iu_type
== PQI_REQUEST_IU_RAID_PATH_IO
)
4083 ((struct pqi_raid_path_request
*)request
)->error_index
=
4084 ((struct pqi_raid_path_request
*)request
)->request_id
;
4086 iu_length
= get_unaligned_le16(&request
->iu_length
) +
4087 PQI_REQUEST_HEADER_LENGTH
;
4088 memcpy(io_request
->iu
, request
, iu_length
);
4090 io_request
->io_complete_callback
= pqi_raid_synchronous_complete
;
4091 io_request
->context
= &wait
;
4093 pqi_start_io(ctrl_info
,
4094 &ctrl_info
->queue_groups
[PQI_DEFAULT_QUEUE_GROUP
], RAID_PATH
,
4097 pqi_ctrl_unbusy(ctrl_info
);
4099 if (timeout_msecs
== NO_TIMEOUT
) {
4100 pqi_wait_for_completion_io(ctrl_info
, &wait
);
4102 if (!wait_for_completion_io_timeout(&wait
,
4103 msecs_to_jiffies(timeout_msecs
))) {
4104 dev_warn(&ctrl_info
->pci_dev
->dev
,
4105 "command timed out\n");
4111 if (io_request
->error_info
)
4112 memcpy(error_info
, io_request
->error_info
,
4113 sizeof(*error_info
));
4115 memset(error_info
, 0, sizeof(*error_info
));
4116 } else if (rc
== 0 && io_request
->error_info
) {
4117 rc
= pqi_process_raid_io_error_synchronous(
4118 io_request
->error_info
);
4121 pqi_free_io_request(io_request
);
4123 atomic_dec(&ctrl_info
->sync_cmds_outstanding
);
4125 up(&ctrl_info
->sync_request_sem
);
4130 static int pqi_validate_admin_response(
4131 struct pqi_general_admin_response
*response
, u8 expected_function_code
)
4133 if (response
->header
.iu_type
!= PQI_RESPONSE_IU_GENERAL_ADMIN
)
4136 if (get_unaligned_le16(&response
->header
.iu_length
) !=
4137 PQI_GENERAL_ADMIN_IU_LENGTH
)
4140 if (response
->function_code
!= expected_function_code
)
4143 if (response
->status
!= PQI_GENERAL_ADMIN_STATUS_SUCCESS
)
4149 static int pqi_submit_admin_request_synchronous(
4150 struct pqi_ctrl_info
*ctrl_info
,
4151 struct pqi_general_admin_request
*request
,
4152 struct pqi_general_admin_response
*response
)
4156 pqi_submit_admin_request(ctrl_info
, request
);
4158 rc
= pqi_poll_for_admin_response(ctrl_info
, response
);
4161 rc
= pqi_validate_admin_response(response
,
4162 request
->function_code
);
4167 static int pqi_report_device_capability(struct pqi_ctrl_info
*ctrl_info
)
4170 struct pqi_general_admin_request request
;
4171 struct pqi_general_admin_response response
;
4172 struct pqi_device_capability
*capability
;
4173 struct pqi_iu_layer_descriptor
*sop_iu_layer_descriptor
;
4175 capability
= kmalloc(sizeof(*capability
), GFP_KERNEL
);
4179 memset(&request
, 0, sizeof(request
));
4181 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
4182 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
4183 &request
.header
.iu_length
);
4184 request
.function_code
=
4185 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY
;
4186 put_unaligned_le32(sizeof(*capability
),
4187 &request
.data
.report_device_capability
.buffer_length
);
4189 rc
= pqi_map_single(ctrl_info
->pci_dev
,
4190 &request
.data
.report_device_capability
.sg_descriptor
,
4191 capability
, sizeof(*capability
),
4196 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
4199 pqi_pci_unmap(ctrl_info
->pci_dev
,
4200 &request
.data
.report_device_capability
.sg_descriptor
, 1,
4206 if (response
.status
!= PQI_GENERAL_ADMIN_STATUS_SUCCESS
) {
4211 ctrl_info
->max_inbound_queues
=
4212 get_unaligned_le16(&capability
->max_inbound_queues
);
4213 ctrl_info
->max_elements_per_iq
=
4214 get_unaligned_le16(&capability
->max_elements_per_iq
);
4215 ctrl_info
->max_iq_element_length
=
4216 get_unaligned_le16(&capability
->max_iq_element_length
)
4218 ctrl_info
->max_outbound_queues
=
4219 get_unaligned_le16(&capability
->max_outbound_queues
);
4220 ctrl_info
->max_elements_per_oq
=
4221 get_unaligned_le16(&capability
->max_elements_per_oq
);
4222 ctrl_info
->max_oq_element_length
=
4223 get_unaligned_le16(&capability
->max_oq_element_length
)
4226 sop_iu_layer_descriptor
=
4227 &capability
->iu_layer_descriptors
[PQI_PROTOCOL_SOP
];
4229 ctrl_info
->max_inbound_iu_length_per_firmware
=
4231 &sop_iu_layer_descriptor
->max_inbound_iu_length
);
4232 ctrl_info
->inbound_spanning_supported
=
4233 sop_iu_layer_descriptor
->inbound_spanning_supported
;
4234 ctrl_info
->outbound_spanning_supported
=
4235 sop_iu_layer_descriptor
->outbound_spanning_supported
;
4243 static int pqi_validate_device_capability(struct pqi_ctrl_info
*ctrl_info
)
4245 if (ctrl_info
->max_iq_element_length
<
4246 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
) {
4247 dev_err(&ctrl_info
->pci_dev
->dev
,
4248 "max. inbound queue element length of %d is less than the required length of %d\n",
4249 ctrl_info
->max_iq_element_length
,
4250 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
4254 if (ctrl_info
->max_oq_element_length
<
4255 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
) {
4256 dev_err(&ctrl_info
->pci_dev
->dev
,
4257 "max. outbound queue element length of %d is less than the required length of %d\n",
4258 ctrl_info
->max_oq_element_length
,
4259 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
);
4263 if (ctrl_info
->max_inbound_iu_length_per_firmware
<
4264 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
) {
4265 dev_err(&ctrl_info
->pci_dev
->dev
,
4266 "max. inbound IU length of %u is less than the min. required length of %d\n",
4267 ctrl_info
->max_inbound_iu_length_per_firmware
,
4268 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
4272 if (!ctrl_info
->inbound_spanning_supported
) {
4273 dev_err(&ctrl_info
->pci_dev
->dev
,
4274 "the controller does not support inbound spanning\n");
4278 if (ctrl_info
->outbound_spanning_supported
) {
4279 dev_err(&ctrl_info
->pci_dev
->dev
,
4280 "the controller supports outbound spanning but this driver does not\n");
4287 static int pqi_create_event_queue(struct pqi_ctrl_info
*ctrl_info
)
4290 struct pqi_event_queue
*event_queue
;
4291 struct pqi_general_admin_request request
;
4292 struct pqi_general_admin_response response
;
4294 event_queue
= &ctrl_info
->event_queue
;
4297 * Create OQ (Outbound Queue - device to host queue) to dedicate
4300 memset(&request
, 0, sizeof(request
));
4301 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
4302 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
4303 &request
.header
.iu_length
);
4304 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ
;
4305 put_unaligned_le16(event_queue
->oq_id
,
4306 &request
.data
.create_operational_oq
.queue_id
);
4307 put_unaligned_le64((u64
)event_queue
->oq_element_array_bus_addr
,
4308 &request
.data
.create_operational_oq
.element_array_addr
);
4309 put_unaligned_le64((u64
)event_queue
->oq_pi_bus_addr
,
4310 &request
.data
.create_operational_oq
.pi_addr
);
4311 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS
,
4312 &request
.data
.create_operational_oq
.num_elements
);
4313 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH
/ 16,
4314 &request
.data
.create_operational_oq
.element_length
);
4315 request
.data
.create_operational_oq
.queue_protocol
= PQI_PROTOCOL_SOP
;
4316 put_unaligned_le16(event_queue
->int_msg_num
,
4317 &request
.data
.create_operational_oq
.int_msg_num
);
4319 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
4324 event_queue
->oq_ci
= ctrl_info
->iomem_base
+
4325 PQI_DEVICE_REGISTERS_OFFSET
+
4327 &response
.data
.create_operational_oq
.oq_ci_offset
);
4332 static int pqi_create_queue_group(struct pqi_ctrl_info
*ctrl_info
,
4333 unsigned int group_number
)
4336 struct pqi_queue_group
*queue_group
;
4337 struct pqi_general_admin_request request
;
4338 struct pqi_general_admin_response response
;
4340 queue_group
= &ctrl_info
->queue_groups
[group_number
];
4343 * Create IQ (Inbound Queue - host to device queue) for
4346 memset(&request
, 0, sizeof(request
));
4347 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
4348 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
4349 &request
.header
.iu_length
);
4350 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ
;
4351 put_unaligned_le16(queue_group
->iq_id
[RAID_PATH
],
4352 &request
.data
.create_operational_iq
.queue_id
);
4354 (u64
)queue_group
->iq_element_array_bus_addr
[RAID_PATH
],
4355 &request
.data
.create_operational_iq
.element_array_addr
);
4356 put_unaligned_le64((u64
)queue_group
->iq_ci_bus_addr
[RAID_PATH
],
4357 &request
.data
.create_operational_iq
.ci_addr
);
4358 put_unaligned_le16(ctrl_info
->num_elements_per_iq
,
4359 &request
.data
.create_operational_iq
.num_elements
);
4360 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
/ 16,
4361 &request
.data
.create_operational_iq
.element_length
);
4362 request
.data
.create_operational_iq
.queue_protocol
= PQI_PROTOCOL_SOP
;
4364 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
4367 dev_err(&ctrl_info
->pci_dev
->dev
,
4368 "error creating inbound RAID queue\n");
4372 queue_group
->iq_pi
[RAID_PATH
] = ctrl_info
->iomem_base
+
4373 PQI_DEVICE_REGISTERS_OFFSET
+
4375 &response
.data
.create_operational_iq
.iq_pi_offset
);
4378 * Create IQ (Inbound Queue - host to device queue) for
4379 * Advanced I/O (AIO) path.
4381 memset(&request
, 0, sizeof(request
));
4382 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
4383 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
4384 &request
.header
.iu_length
);
4385 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ
;
4386 put_unaligned_le16(queue_group
->iq_id
[AIO_PATH
],
4387 &request
.data
.create_operational_iq
.queue_id
);
4388 put_unaligned_le64((u64
)queue_group
->
4389 iq_element_array_bus_addr
[AIO_PATH
],
4390 &request
.data
.create_operational_iq
.element_array_addr
);
4391 put_unaligned_le64((u64
)queue_group
->iq_ci_bus_addr
[AIO_PATH
],
4392 &request
.data
.create_operational_iq
.ci_addr
);
4393 put_unaligned_le16(ctrl_info
->num_elements_per_iq
,
4394 &request
.data
.create_operational_iq
.num_elements
);
4395 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
/ 16,
4396 &request
.data
.create_operational_iq
.element_length
);
4397 request
.data
.create_operational_iq
.queue_protocol
= PQI_PROTOCOL_SOP
;
4399 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
4402 dev_err(&ctrl_info
->pci_dev
->dev
,
4403 "error creating inbound AIO queue\n");
4407 queue_group
->iq_pi
[AIO_PATH
] = ctrl_info
->iomem_base
+
4408 PQI_DEVICE_REGISTERS_OFFSET
+
4410 &response
.data
.create_operational_iq
.iq_pi_offset
);
4413 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4414 * assumed to be for RAID path I/O unless we change the queue's
4417 memset(&request
, 0, sizeof(request
));
4418 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
4419 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
4420 &request
.header
.iu_length
);
4421 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY
;
4422 put_unaligned_le16(queue_group
->iq_id
[AIO_PATH
],
4423 &request
.data
.change_operational_iq_properties
.queue_id
);
4424 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE
,
4425 &request
.data
.change_operational_iq_properties
.vendor_specific
);
4427 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
4430 dev_err(&ctrl_info
->pci_dev
->dev
,
4431 "error changing queue property\n");
4436 * Create OQ (Outbound Queue - device to host queue).
4438 memset(&request
, 0, sizeof(request
));
4439 request
.header
.iu_type
= PQI_REQUEST_IU_GENERAL_ADMIN
;
4440 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH
,
4441 &request
.header
.iu_length
);
4442 request
.function_code
= PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ
;
4443 put_unaligned_le16(queue_group
->oq_id
,
4444 &request
.data
.create_operational_oq
.queue_id
);
4445 put_unaligned_le64((u64
)queue_group
->oq_element_array_bus_addr
,
4446 &request
.data
.create_operational_oq
.element_array_addr
);
4447 put_unaligned_le64((u64
)queue_group
->oq_pi_bus_addr
,
4448 &request
.data
.create_operational_oq
.pi_addr
);
4449 put_unaligned_le16(ctrl_info
->num_elements_per_oq
,
4450 &request
.data
.create_operational_oq
.num_elements
);
4451 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
/ 16,
4452 &request
.data
.create_operational_oq
.element_length
);
4453 request
.data
.create_operational_oq
.queue_protocol
= PQI_PROTOCOL_SOP
;
4454 put_unaligned_le16(queue_group
->int_msg_num
,
4455 &request
.data
.create_operational_oq
.int_msg_num
);
4457 rc
= pqi_submit_admin_request_synchronous(ctrl_info
, &request
,
4460 dev_err(&ctrl_info
->pci_dev
->dev
,
4461 "error creating outbound queue\n");
4465 queue_group
->oq_ci
= ctrl_info
->iomem_base
+
4466 PQI_DEVICE_REGISTERS_OFFSET
+
4468 &response
.data
.create_operational_oq
.oq_ci_offset
);
4473 static int pqi_create_queues(struct pqi_ctrl_info
*ctrl_info
)
4478 rc
= pqi_create_event_queue(ctrl_info
);
4480 dev_err(&ctrl_info
->pci_dev
->dev
,
4481 "error creating event queue\n");
4485 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
4486 rc
= pqi_create_queue_group(ctrl_info
, i
);
4488 dev_err(&ctrl_info
->pci_dev
->dev
,
4489 "error creating queue group number %u/%u\n",
4490 i
, ctrl_info
->num_queue_groups
);
4498 #define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4499 (offsetof(struct pqi_event_config, descriptors) + \
4500 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4502 static int pqi_configure_events(struct pqi_ctrl_info
*ctrl_info
,
4507 struct pqi_event_config
*event_config
;
4508 struct pqi_event_descriptor
*event_descriptor
;
4509 struct pqi_general_management_request request
;
4511 event_config
= kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
4516 memset(&request
, 0, sizeof(request
));
4518 request
.header
.iu_type
= PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG
;
4519 put_unaligned_le16(offsetof(struct pqi_general_management_request
,
4520 data
.report_event_configuration
.sg_descriptors
[1]) -
4521 PQI_REQUEST_HEADER_LENGTH
, &request
.header
.iu_length
);
4522 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
4523 &request
.data
.report_event_configuration
.buffer_length
);
4525 rc
= pqi_map_single(ctrl_info
->pci_dev
,
4526 request
.data
.report_event_configuration
.sg_descriptors
,
4527 event_config
, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
4532 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
4533 0, NULL
, NO_TIMEOUT
);
4535 pqi_pci_unmap(ctrl_info
->pci_dev
,
4536 request
.data
.report_event_configuration
.sg_descriptors
, 1,
4542 for (i
= 0; i
< event_config
->num_event_descriptors
; i
++) {
4543 event_descriptor
= &event_config
->descriptors
[i
];
4544 if (enable_events
&&
4545 pqi_is_supported_event(event_descriptor
->event_type
))
4546 put_unaligned_le16(ctrl_info
->event_queue
.oq_id
,
4547 &event_descriptor
->oq_id
);
4549 put_unaligned_le16(0, &event_descriptor
->oq_id
);
4552 memset(&request
, 0, sizeof(request
));
4554 request
.header
.iu_type
= PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG
;
4555 put_unaligned_le16(offsetof(struct pqi_general_management_request
,
4556 data
.report_event_configuration
.sg_descriptors
[1]) -
4557 PQI_REQUEST_HEADER_LENGTH
, &request
.header
.iu_length
);
4558 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
4559 &request
.data
.report_event_configuration
.buffer_length
);
4561 rc
= pqi_map_single(ctrl_info
->pci_dev
,
4562 request
.data
.report_event_configuration
.sg_descriptors
,
4563 event_config
, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH
,
4568 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
, 0,
4571 pqi_pci_unmap(ctrl_info
->pci_dev
,
4572 request
.data
.report_event_configuration
.sg_descriptors
, 1,
4576 kfree(event_config
);
4581 static inline int pqi_enable_events(struct pqi_ctrl_info
*ctrl_info
)
4583 return pqi_configure_events(ctrl_info
, true);
4586 static inline int pqi_disable_events(struct pqi_ctrl_info
*ctrl_info
)
4588 return pqi_configure_events(ctrl_info
, false);
4591 static void pqi_free_all_io_requests(struct pqi_ctrl_info
*ctrl_info
)
4595 size_t sg_chain_buffer_length
;
4596 struct pqi_io_request
*io_request
;
4598 if (!ctrl_info
->io_request_pool
)
4601 dev
= &ctrl_info
->pci_dev
->dev
;
4602 sg_chain_buffer_length
= ctrl_info
->sg_chain_buffer_length
;
4603 io_request
= ctrl_info
->io_request_pool
;
4605 for (i
= 0; i
< ctrl_info
->max_io_slots
; i
++) {
4606 kfree(io_request
->iu
);
4607 if (!io_request
->sg_chain_buffer
)
4609 dma_free_coherent(dev
, sg_chain_buffer_length
,
4610 io_request
->sg_chain_buffer
,
4611 io_request
->sg_chain_buffer_dma_handle
);
4615 kfree(ctrl_info
->io_request_pool
);
4616 ctrl_info
->io_request_pool
= NULL
;
4619 static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info
*ctrl_info
)
4622 ctrl_info
->error_buffer
= dma_alloc_coherent(&ctrl_info
->pci_dev
->dev
,
4623 ctrl_info
->error_buffer_length
,
4624 &ctrl_info
->error_buffer_dma_handle
,
4626 if (!ctrl_info
->error_buffer
)
4632 static int pqi_alloc_io_resources(struct pqi_ctrl_info
*ctrl_info
)
4635 void *sg_chain_buffer
;
4636 size_t sg_chain_buffer_length
;
4637 dma_addr_t sg_chain_buffer_dma_handle
;
4639 struct pqi_io_request
*io_request
;
4641 ctrl_info
->io_request_pool
=
4642 kcalloc(ctrl_info
->max_io_slots
,
4643 sizeof(ctrl_info
->io_request_pool
[0]), GFP_KERNEL
);
4645 if (!ctrl_info
->io_request_pool
) {
4646 dev_err(&ctrl_info
->pci_dev
->dev
,
4647 "failed to allocate I/O request pool\n");
4651 dev
= &ctrl_info
->pci_dev
->dev
;
4652 sg_chain_buffer_length
= ctrl_info
->sg_chain_buffer_length
;
4653 io_request
= ctrl_info
->io_request_pool
;
4655 for (i
= 0; i
< ctrl_info
->max_io_slots
; i
++) {
4657 kmalloc(ctrl_info
->max_inbound_iu_length
, GFP_KERNEL
);
4659 if (!io_request
->iu
) {
4660 dev_err(&ctrl_info
->pci_dev
->dev
,
4661 "failed to allocate IU buffers\n");
4665 sg_chain_buffer
= dma_alloc_coherent(dev
,
4666 sg_chain_buffer_length
, &sg_chain_buffer_dma_handle
,
4669 if (!sg_chain_buffer
) {
4670 dev_err(&ctrl_info
->pci_dev
->dev
,
4671 "failed to allocate PQI scatter-gather chain buffers\n");
4675 io_request
->index
= i
;
4676 io_request
->sg_chain_buffer
= sg_chain_buffer
;
4677 io_request
->sg_chain_buffer_dma_handle
=
4678 sg_chain_buffer_dma_handle
;
4685 pqi_free_all_io_requests(ctrl_info
);
4691 * Calculate required resources that are sized based on max. outstanding
4692 * requests and max. transfer size.
4695 static void pqi_calculate_io_resources(struct pqi_ctrl_info
*ctrl_info
)
4697 u32 max_transfer_size
;
4700 ctrl_info
->scsi_ml_can_queue
=
4701 ctrl_info
->max_outstanding_requests
- PQI_RESERVED_IO_SLOTS
;
4702 ctrl_info
->max_io_slots
= ctrl_info
->max_outstanding_requests
;
4704 ctrl_info
->error_buffer_length
=
4705 ctrl_info
->max_io_slots
* PQI_ERROR_BUFFER_ELEMENT_LENGTH
;
4708 max_transfer_size
= min(ctrl_info
->max_transfer_size
,
4709 PQI_MAX_TRANSFER_SIZE_KDUMP
);
4711 max_transfer_size
= min(ctrl_info
->max_transfer_size
,
4712 PQI_MAX_TRANSFER_SIZE
);
4714 max_sg_entries
= max_transfer_size
/ PAGE_SIZE
;
4716 /* +1 to cover when the buffer is not page-aligned. */
4719 max_sg_entries
= min(ctrl_info
->max_sg_entries
, max_sg_entries
);
4721 max_transfer_size
= (max_sg_entries
- 1) * PAGE_SIZE
;
4723 ctrl_info
->sg_chain_buffer_length
=
4724 (max_sg_entries
* sizeof(struct pqi_sg_descriptor
)) +
4725 PQI_EXTRA_SGL_MEMORY
;
4726 ctrl_info
->sg_tablesize
= max_sg_entries
;
4727 ctrl_info
->max_sectors
= max_transfer_size
/ 512;
4730 static void pqi_calculate_queue_resources(struct pqi_ctrl_info
*ctrl_info
)
4732 int num_queue_groups
;
4733 u16 num_elements_per_iq
;
4734 u16 num_elements_per_oq
;
4736 if (reset_devices
) {
4737 num_queue_groups
= 1;
4740 int max_queue_groups
;
4742 max_queue_groups
= min(ctrl_info
->max_inbound_queues
/ 2,
4743 ctrl_info
->max_outbound_queues
- 1);
4744 max_queue_groups
= min(max_queue_groups
, PQI_MAX_QUEUE_GROUPS
);
4746 num_cpus
= num_online_cpus();
4747 num_queue_groups
= min(num_cpus
, ctrl_info
->max_msix_vectors
);
4748 num_queue_groups
= min(num_queue_groups
, max_queue_groups
);
4751 ctrl_info
->num_queue_groups
= num_queue_groups
;
4752 ctrl_info
->max_hw_queue_index
= num_queue_groups
- 1;
4755 * Make sure that the max. inbound IU length is an even multiple
4756 * of our inbound element length.
4758 ctrl_info
->max_inbound_iu_length
=
4759 (ctrl_info
->max_inbound_iu_length_per_firmware
/
4760 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
) *
4761 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
;
4763 num_elements_per_iq
=
4764 (ctrl_info
->max_inbound_iu_length
/
4765 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
4767 /* Add one because one element in each queue is unusable. */
4768 num_elements_per_iq
++;
4770 num_elements_per_iq
= min(num_elements_per_iq
,
4771 ctrl_info
->max_elements_per_iq
);
4773 num_elements_per_oq
= ((num_elements_per_iq
- 1) * 2) + 1;
4774 num_elements_per_oq
= min(num_elements_per_oq
,
4775 ctrl_info
->max_elements_per_oq
);
4777 ctrl_info
->num_elements_per_iq
= num_elements_per_iq
;
4778 ctrl_info
->num_elements_per_oq
= num_elements_per_oq
;
4780 ctrl_info
->max_sg_per_iu
=
4781 ((ctrl_info
->max_inbound_iu_length
-
4782 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
) /
4783 sizeof(struct pqi_sg_descriptor
)) +
4784 PQI_MAX_EMBEDDED_SG_DESCRIPTORS
;
4787 static inline void pqi_set_sg_descriptor(
4788 struct pqi_sg_descriptor
*sg_descriptor
, struct scatterlist
*sg
)
4790 u64 address
= (u64
)sg_dma_address(sg
);
4791 unsigned int length
= sg_dma_len(sg
);
4793 put_unaligned_le64(address
, &sg_descriptor
->address
);
4794 put_unaligned_le32(length
, &sg_descriptor
->length
);
4795 put_unaligned_le32(0, &sg_descriptor
->flags
);
4798 static int pqi_build_raid_sg_list(struct pqi_ctrl_info
*ctrl_info
,
4799 struct pqi_raid_path_request
*request
, struct scsi_cmnd
*scmd
,
4800 struct pqi_io_request
*io_request
)
4806 unsigned int num_sg_in_iu
;
4807 unsigned int max_sg_per_iu
;
4808 struct scatterlist
*sg
;
4809 struct pqi_sg_descriptor
*sg_descriptor
;
4811 sg_count
= scsi_dma_map(scmd
);
4815 iu_length
= offsetof(struct pqi_raid_path_request
, sg_descriptors
) -
4816 PQI_REQUEST_HEADER_LENGTH
;
4821 sg
= scsi_sglist(scmd
);
4822 sg_descriptor
= request
->sg_descriptors
;
4823 max_sg_per_iu
= ctrl_info
->max_sg_per_iu
- 1;
4829 pqi_set_sg_descriptor(sg_descriptor
, sg
);
4836 if (i
== max_sg_per_iu
) {
4838 (u64
)io_request
->sg_chain_buffer_dma_handle
,
4839 &sg_descriptor
->address
);
4840 put_unaligned_le32((sg_count
- num_sg_in_iu
)
4841 * sizeof(*sg_descriptor
),
4842 &sg_descriptor
->length
);
4843 put_unaligned_le32(CISS_SG_CHAIN
,
4844 &sg_descriptor
->flags
);
4847 sg_descriptor
= io_request
->sg_chain_buffer
;
4852 put_unaligned_le32(CISS_SG_LAST
, &sg_descriptor
->flags
);
4853 request
->partial
= chained
;
4854 iu_length
+= num_sg_in_iu
* sizeof(*sg_descriptor
);
4857 put_unaligned_le16(iu_length
, &request
->header
.iu_length
);
4862 static int pqi_build_aio_sg_list(struct pqi_ctrl_info
*ctrl_info
,
4863 struct pqi_aio_path_request
*request
, struct scsi_cmnd
*scmd
,
4864 struct pqi_io_request
*io_request
)
4870 unsigned int num_sg_in_iu
;
4871 unsigned int max_sg_per_iu
;
4872 struct scatterlist
*sg
;
4873 struct pqi_sg_descriptor
*sg_descriptor
;
4875 sg_count
= scsi_dma_map(scmd
);
4879 iu_length
= offsetof(struct pqi_aio_path_request
, sg_descriptors
) -
4880 PQI_REQUEST_HEADER_LENGTH
;
4886 sg
= scsi_sglist(scmd
);
4887 sg_descriptor
= request
->sg_descriptors
;
4888 max_sg_per_iu
= ctrl_info
->max_sg_per_iu
- 1;
4893 pqi_set_sg_descriptor(sg_descriptor
, sg
);
4900 if (i
== max_sg_per_iu
) {
4902 (u64
)io_request
->sg_chain_buffer_dma_handle
,
4903 &sg_descriptor
->address
);
4904 put_unaligned_le32((sg_count
- num_sg_in_iu
)
4905 * sizeof(*sg_descriptor
),
4906 &sg_descriptor
->length
);
4907 put_unaligned_le32(CISS_SG_CHAIN
,
4908 &sg_descriptor
->flags
);
4911 sg_descriptor
= io_request
->sg_chain_buffer
;
4916 put_unaligned_le32(CISS_SG_LAST
, &sg_descriptor
->flags
);
4917 request
->partial
= chained
;
4918 iu_length
+= num_sg_in_iu
* sizeof(*sg_descriptor
);
4921 put_unaligned_le16(iu_length
, &request
->header
.iu_length
);
4922 request
->num_sg_descriptors
= num_sg_in_iu
;
4927 static void pqi_raid_io_complete(struct pqi_io_request
*io_request
,
4930 struct scsi_cmnd
*scmd
;
4932 scmd
= io_request
->scmd
;
4933 pqi_free_io_request(io_request
);
4934 scsi_dma_unmap(scmd
);
4935 pqi_scsi_done(scmd
);
4938 static int pqi_raid_submit_scsi_cmd_with_io_request(
4939 struct pqi_ctrl_info
*ctrl_info
, struct pqi_io_request
*io_request
,
4940 struct pqi_scsi_dev
*device
, struct scsi_cmnd
*scmd
,
4941 struct pqi_queue_group
*queue_group
)
4945 struct pqi_raid_path_request
*request
;
4947 io_request
->io_complete_callback
= pqi_raid_io_complete
;
4948 io_request
->scmd
= scmd
;
4950 request
= io_request
->iu
;
4952 offsetof(struct pqi_raid_path_request
, sg_descriptors
));
4954 request
->header
.iu_type
= PQI_REQUEST_IU_RAID_PATH_IO
;
4955 put_unaligned_le32(scsi_bufflen(scmd
), &request
->buffer_length
);
4956 request
->task_attribute
= SOP_TASK_ATTRIBUTE_SIMPLE
;
4957 put_unaligned_le16(io_request
->index
, &request
->request_id
);
4958 request
->error_index
= request
->request_id
;
4959 memcpy(request
->lun_number
, device
->scsi3addr
,
4960 sizeof(request
->lun_number
));
4962 cdb_length
= min_t(size_t, scmd
->cmd_len
, sizeof(request
->cdb
));
4963 memcpy(request
->cdb
, scmd
->cmnd
, cdb_length
);
4965 switch (cdb_length
) {
4970 /* No bytes in the Additional CDB bytes field */
4971 request
->additional_cdb_bytes_usage
=
4972 SOP_ADDITIONAL_CDB_BYTES_0
;
4975 /* 4 bytes in the Additional cdb field */
4976 request
->additional_cdb_bytes_usage
=
4977 SOP_ADDITIONAL_CDB_BYTES_4
;
4980 /* 8 bytes in the Additional cdb field */
4981 request
->additional_cdb_bytes_usage
=
4982 SOP_ADDITIONAL_CDB_BYTES_8
;
4985 /* 12 bytes in the Additional cdb field */
4986 request
->additional_cdb_bytes_usage
=
4987 SOP_ADDITIONAL_CDB_BYTES_12
;
4991 /* 16 bytes in the Additional cdb field */
4992 request
->additional_cdb_bytes_usage
=
4993 SOP_ADDITIONAL_CDB_BYTES_16
;
4997 switch (scmd
->sc_data_direction
) {
4999 request
->data_direction
= SOP_READ_FLAG
;
5001 case DMA_FROM_DEVICE
:
5002 request
->data_direction
= SOP_WRITE_FLAG
;
5005 request
->data_direction
= SOP_NO_DIRECTION_FLAG
;
5007 case DMA_BIDIRECTIONAL
:
5008 request
->data_direction
= SOP_BIDIRECTIONAL
;
5011 dev_err(&ctrl_info
->pci_dev
->dev
,
5012 "unknown data direction: %d\n",
5013 scmd
->sc_data_direction
);
5017 rc
= pqi_build_raid_sg_list(ctrl_info
, request
, scmd
, io_request
);
5019 pqi_free_io_request(io_request
);
5020 return SCSI_MLQUEUE_HOST_BUSY
;
5023 pqi_start_io(ctrl_info
, queue_group
, RAID_PATH
, io_request
);
5028 static inline int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info
*ctrl_info
,
5029 struct pqi_scsi_dev
*device
, struct scsi_cmnd
*scmd
,
5030 struct pqi_queue_group
*queue_group
)
5032 struct pqi_io_request
*io_request
;
5034 io_request
= pqi_alloc_io_request(ctrl_info
);
5036 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info
, io_request
,
5037 device
, scmd
, queue_group
);
5040 static inline void pqi_schedule_bypass_retry(struct pqi_ctrl_info
*ctrl_info
)
5042 if (!pqi_ctrl_blocked(ctrl_info
))
5043 schedule_work(&ctrl_info
->raid_bypass_retry_work
);
5046 static bool pqi_raid_bypass_retry_needed(struct pqi_io_request
*io_request
)
5048 struct scsi_cmnd
*scmd
;
5049 struct pqi_scsi_dev
*device
;
5050 struct pqi_ctrl_info
*ctrl_info
;
5052 if (!io_request
->raid_bypass
)
5055 scmd
= io_request
->scmd
;
5056 if ((scmd
->result
& 0xff) == SAM_STAT_GOOD
)
5058 if (host_byte(scmd
->result
) == DID_NO_CONNECT
)
5061 device
= scmd
->device
->hostdata
;
5062 if (pqi_device_offline(device
))
5065 ctrl_info
= shost_to_hba(scmd
->device
->host
);
5066 if (pqi_ctrl_offline(ctrl_info
))
5072 static inline void pqi_add_to_raid_bypass_retry_list(
5073 struct pqi_ctrl_info
*ctrl_info
,
5074 struct pqi_io_request
*io_request
, bool at_head
)
5076 unsigned long flags
;
5078 spin_lock_irqsave(&ctrl_info
->raid_bypass_retry_list_lock
, flags
);
5080 list_add(&io_request
->request_list_entry
,
5081 &ctrl_info
->raid_bypass_retry_list
);
5083 list_add_tail(&io_request
->request_list_entry
,
5084 &ctrl_info
->raid_bypass_retry_list
);
5085 spin_unlock_irqrestore(&ctrl_info
->raid_bypass_retry_list_lock
, flags
);
5088 static void pqi_queued_raid_bypass_complete(struct pqi_io_request
*io_request
,
5091 struct scsi_cmnd
*scmd
;
5093 scmd
= io_request
->scmd
;
5094 pqi_free_io_request(io_request
);
5095 pqi_scsi_done(scmd
);
5098 static void pqi_queue_raid_bypass_retry(struct pqi_io_request
*io_request
)
5100 struct scsi_cmnd
*scmd
;
5101 struct pqi_ctrl_info
*ctrl_info
;
5103 io_request
->io_complete_callback
= pqi_queued_raid_bypass_complete
;
5104 scmd
= io_request
->scmd
;
5106 ctrl_info
= shost_to_hba(scmd
->device
->host
);
5108 pqi_add_to_raid_bypass_retry_list(ctrl_info
, io_request
, false);
5109 pqi_schedule_bypass_retry(ctrl_info
);
5112 static int pqi_retry_raid_bypass(struct pqi_io_request
*io_request
)
5114 struct scsi_cmnd
*scmd
;
5115 struct pqi_scsi_dev
*device
;
5116 struct pqi_ctrl_info
*ctrl_info
;
5117 struct pqi_queue_group
*queue_group
;
5119 scmd
= io_request
->scmd
;
5120 device
= scmd
->device
->hostdata
;
5121 if (pqi_device_in_reset(device
)) {
5122 pqi_free_io_request(io_request
);
5123 set_host_byte(scmd
, DID_RESET
);
5124 pqi_scsi_done(scmd
);
5128 ctrl_info
= shost_to_hba(scmd
->device
->host
);
5129 queue_group
= io_request
->queue_group
;
5131 pqi_reinit_io_request(io_request
);
5133 return pqi_raid_submit_scsi_cmd_with_io_request(ctrl_info
, io_request
,
5134 device
, scmd
, queue_group
);
5137 static inline struct pqi_io_request
*pqi_next_queued_raid_bypass_request(
5138 struct pqi_ctrl_info
*ctrl_info
)
5140 unsigned long flags
;
5141 struct pqi_io_request
*io_request
;
5143 spin_lock_irqsave(&ctrl_info
->raid_bypass_retry_list_lock
, flags
);
5144 io_request
= list_first_entry_or_null(
5145 &ctrl_info
->raid_bypass_retry_list
,
5146 struct pqi_io_request
, request_list_entry
);
5148 list_del(&io_request
->request_list_entry
);
5149 spin_unlock_irqrestore(&ctrl_info
->raid_bypass_retry_list_lock
, flags
);
5154 static void pqi_retry_raid_bypass_requests(struct pqi_ctrl_info
*ctrl_info
)
5157 struct pqi_io_request
*io_request
;
5159 pqi_ctrl_busy(ctrl_info
);
5162 if (pqi_ctrl_blocked(ctrl_info
))
5164 io_request
= pqi_next_queued_raid_bypass_request(ctrl_info
);
5167 rc
= pqi_retry_raid_bypass(io_request
);
5169 pqi_add_to_raid_bypass_retry_list(ctrl_info
, io_request
,
5171 pqi_schedule_bypass_retry(ctrl_info
);
5176 pqi_ctrl_unbusy(ctrl_info
);
5179 static void pqi_raid_bypass_retry_worker(struct work_struct
*work
)
5181 struct pqi_ctrl_info
*ctrl_info
;
5183 ctrl_info
= container_of(work
, struct pqi_ctrl_info
,
5184 raid_bypass_retry_work
);
5185 pqi_retry_raid_bypass_requests(ctrl_info
);
5188 static void pqi_clear_all_queued_raid_bypass_retries(
5189 struct pqi_ctrl_info
*ctrl_info
)
5191 unsigned long flags
;
5193 spin_lock_irqsave(&ctrl_info
->raid_bypass_retry_list_lock
, flags
);
5194 INIT_LIST_HEAD(&ctrl_info
->raid_bypass_retry_list
);
5195 spin_unlock_irqrestore(&ctrl_info
->raid_bypass_retry_list_lock
, flags
);
5198 static void pqi_aio_io_complete(struct pqi_io_request
*io_request
,
5201 struct scsi_cmnd
*scmd
;
5203 scmd
= io_request
->scmd
;
5204 scsi_dma_unmap(scmd
);
5205 if (io_request
->status
== -EAGAIN
)
5206 set_host_byte(scmd
, DID_IMM_RETRY
);
5207 else if (pqi_raid_bypass_retry_needed(io_request
)) {
5208 pqi_queue_raid_bypass_retry(io_request
);
5211 pqi_free_io_request(io_request
);
5212 pqi_scsi_done(scmd
);
5215 static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info
*ctrl_info
,
5216 struct pqi_scsi_dev
*device
, struct scsi_cmnd
*scmd
,
5217 struct pqi_queue_group
*queue_group
)
5219 return pqi_aio_submit_io(ctrl_info
, scmd
, device
->aio_handle
,
5220 scmd
->cmnd
, scmd
->cmd_len
, queue_group
, NULL
, false);
5223 static int pqi_aio_submit_io(struct pqi_ctrl_info
*ctrl_info
,
5224 struct scsi_cmnd
*scmd
, u32 aio_handle
, u8
*cdb
,
5225 unsigned int cdb_length
, struct pqi_queue_group
*queue_group
,
5226 struct pqi_encryption_info
*encryption_info
, bool raid_bypass
)
5229 struct pqi_io_request
*io_request
;
5230 struct pqi_aio_path_request
*request
;
5232 io_request
= pqi_alloc_io_request(ctrl_info
);
5233 io_request
->io_complete_callback
= pqi_aio_io_complete
;
5234 io_request
->scmd
= scmd
;
5235 io_request
->raid_bypass
= raid_bypass
;
5237 request
= io_request
->iu
;
5239 offsetof(struct pqi_raid_path_request
, sg_descriptors
));
5241 request
->header
.iu_type
= PQI_REQUEST_IU_AIO_PATH_IO
;
5242 put_unaligned_le32(aio_handle
, &request
->nexus_id
);
5243 put_unaligned_le32(scsi_bufflen(scmd
), &request
->buffer_length
);
5244 request
->task_attribute
= SOP_TASK_ATTRIBUTE_SIMPLE
;
5245 put_unaligned_le16(io_request
->index
, &request
->request_id
);
5246 request
->error_index
= request
->request_id
;
5247 if (cdb_length
> sizeof(request
->cdb
))
5248 cdb_length
= sizeof(request
->cdb
);
5249 request
->cdb_length
= cdb_length
;
5250 memcpy(request
->cdb
, cdb
, cdb_length
);
5252 switch (scmd
->sc_data_direction
) {
5254 request
->data_direction
= SOP_READ_FLAG
;
5256 case DMA_FROM_DEVICE
:
5257 request
->data_direction
= SOP_WRITE_FLAG
;
5260 request
->data_direction
= SOP_NO_DIRECTION_FLAG
;
5262 case DMA_BIDIRECTIONAL
:
5263 request
->data_direction
= SOP_BIDIRECTIONAL
;
5266 dev_err(&ctrl_info
->pci_dev
->dev
,
5267 "unknown data direction: %d\n",
5268 scmd
->sc_data_direction
);
5272 if (encryption_info
) {
5273 request
->encryption_enable
= true;
5274 put_unaligned_le16(encryption_info
->data_encryption_key_index
,
5275 &request
->data_encryption_key_index
);
5276 put_unaligned_le32(encryption_info
->encrypt_tweak_lower
,
5277 &request
->encrypt_tweak_lower
);
5278 put_unaligned_le32(encryption_info
->encrypt_tweak_upper
,
5279 &request
->encrypt_tweak_upper
);
5282 rc
= pqi_build_aio_sg_list(ctrl_info
, request
, scmd
, io_request
);
5284 pqi_free_io_request(io_request
);
5285 return SCSI_MLQUEUE_HOST_BUSY
;
5288 pqi_start_io(ctrl_info
, queue_group
, AIO_PATH
, io_request
);
5293 static inline u16
pqi_get_hw_queue(struct pqi_ctrl_info
*ctrl_info
,
5294 struct scsi_cmnd
*scmd
)
5298 hw_queue
= blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd
->request
));
5299 if (hw_queue
> ctrl_info
->max_hw_queue_index
)
5306 * This function gets called just before we hand the completed SCSI request
5310 void pqi_prep_for_scsi_done(struct scsi_cmnd
*scmd
)
5312 struct pqi_scsi_dev
*device
;
5314 if (!scmd
->device
) {
5315 set_host_byte(scmd
, DID_NO_CONNECT
);
5319 device
= scmd
->device
->hostdata
;
5321 set_host_byte(scmd
, DID_NO_CONNECT
);
5325 atomic_dec(&device
->scsi_cmds_outstanding
);
5328 static int pqi_scsi_queue_command(struct Scsi_Host
*shost
,
5329 struct scsi_cmnd
*scmd
)
5332 struct pqi_ctrl_info
*ctrl_info
;
5333 struct pqi_scsi_dev
*device
;
5335 struct pqi_queue_group
*queue_group
;
5338 device
= scmd
->device
->hostdata
;
5339 ctrl_info
= shost_to_hba(shost
);
5342 set_host_byte(scmd
, DID_NO_CONNECT
);
5343 pqi_scsi_done(scmd
);
5347 atomic_inc(&device
->scsi_cmds_outstanding
);
5349 if (pqi_ctrl_offline(ctrl_info
) || pqi_device_in_remove(device
)) {
5350 set_host_byte(scmd
, DID_NO_CONNECT
);
5351 pqi_scsi_done(scmd
);
5355 pqi_ctrl_busy(ctrl_info
);
5356 if (pqi_ctrl_blocked(ctrl_info
) || pqi_device_in_reset(device
) ||
5357 pqi_ctrl_in_ofa(ctrl_info
) || pqi_ctrl_in_shutdown(ctrl_info
)) {
5358 rc
= SCSI_MLQUEUE_HOST_BUSY
;
5363 * This is necessary because the SML doesn't zero out this field during
5368 hw_queue
= pqi_get_hw_queue(ctrl_info
, scmd
);
5369 queue_group
= &ctrl_info
->queue_groups
[hw_queue
];
5371 if (pqi_is_logical_device(device
)) {
5372 raid_bypassed
= false;
5373 if (device
->raid_bypass_enabled
&&
5374 !blk_rq_is_passthrough(scmd
->request
)) {
5375 rc
= pqi_raid_bypass_submit_scsi_cmd(ctrl_info
, device
,
5377 if (rc
== 0 || rc
== SCSI_MLQUEUE_HOST_BUSY
) {
5378 raid_bypassed
= true;
5379 atomic_inc(&device
->raid_bypass_cnt
);
5383 rc
= pqi_raid_submit_scsi_cmd(ctrl_info
, device
, scmd
, queue_group
);
5385 if (device
->aio_enabled
)
5386 rc
= pqi_aio_submit_scsi_cmd(ctrl_info
, device
, scmd
, queue_group
);
5388 rc
= pqi_raid_submit_scsi_cmd(ctrl_info
, device
, scmd
, queue_group
);
5392 pqi_ctrl_unbusy(ctrl_info
);
5394 atomic_dec(&device
->scsi_cmds_outstanding
);
5399 static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info
*ctrl_info
,
5400 struct pqi_queue_group
*queue_group
)
5403 unsigned long flags
;
5406 for (path
= 0; path
< 2; path
++) {
5409 &queue_group
->submit_lock
[path
], flags
);
5411 list_empty(&queue_group
->request_list
[path
]);
5412 spin_unlock_irqrestore(
5413 &queue_group
->submit_lock
[path
], flags
);
5416 pqi_check_ctrl_health(ctrl_info
);
5417 if (pqi_ctrl_offline(ctrl_info
))
5419 usleep_range(1000, 2000);
5426 static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info
*ctrl_info
)
5431 struct pqi_queue_group
*queue_group
;
5435 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
5436 queue_group
= &ctrl_info
->queue_groups
[i
];
5438 rc
= pqi_wait_until_queued_io_drained(ctrl_info
, queue_group
);
5442 for (path
= 0; path
< 2; path
++) {
5443 iq_pi
= queue_group
->iq_pi_copy
[path
];
5446 iq_ci
= readl(queue_group
->iq_ci
[path
]);
5449 pqi_check_ctrl_health(ctrl_info
);
5450 if (pqi_ctrl_offline(ctrl_info
))
5452 usleep_range(1000, 2000);
5460 static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info
*ctrl_info
,
5461 struct pqi_scsi_dev
*device
)
5465 struct pqi_queue_group
*queue_group
;
5466 unsigned long flags
;
5467 struct pqi_io_request
*io_request
;
5468 struct pqi_io_request
*next
;
5469 struct scsi_cmnd
*scmd
;
5470 struct pqi_scsi_dev
*scsi_device
;
5472 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
5473 queue_group
= &ctrl_info
->queue_groups
[i
];
5475 for (path
= 0; path
< 2; path
++) {
5477 &queue_group
->submit_lock
[path
], flags
);
5479 list_for_each_entry_safe(io_request
, next
,
5480 &queue_group
->request_list
[path
],
5481 request_list_entry
) {
5482 scmd
= io_request
->scmd
;
5486 scsi_device
= scmd
->device
->hostdata
;
5487 if (scsi_device
!= device
)
5490 list_del(&io_request
->request_list_entry
);
5491 set_host_byte(scmd
, DID_RESET
);
5492 pqi_scsi_done(scmd
);
5495 spin_unlock_irqrestore(
5496 &queue_group
->submit_lock
[path
], flags
);
5501 static void pqi_fail_io_queued_for_all_devices(struct pqi_ctrl_info
*ctrl_info
)
5505 struct pqi_queue_group
*queue_group
;
5506 unsigned long flags
;
5507 struct pqi_io_request
*io_request
;
5508 struct pqi_io_request
*next
;
5509 struct scsi_cmnd
*scmd
;
5511 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
5512 queue_group
= &ctrl_info
->queue_groups
[i
];
5514 for (path
= 0; path
< 2; path
++) {
5515 spin_lock_irqsave(&queue_group
->submit_lock
[path
],
5518 list_for_each_entry_safe(io_request
, next
,
5519 &queue_group
->request_list
[path
],
5520 request_list_entry
) {
5522 scmd
= io_request
->scmd
;
5526 list_del(&io_request
->request_list_entry
);
5527 set_host_byte(scmd
, DID_RESET
);
5528 pqi_scsi_done(scmd
);
5531 spin_unlock_irqrestore(
5532 &queue_group
->submit_lock
[path
], flags
);
5537 static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info
*ctrl_info
,
5538 struct pqi_scsi_dev
*device
, unsigned long timeout_secs
)
5540 unsigned long timeout
;
5542 timeout
= (timeout_secs
* PQI_HZ
) + jiffies
;
5544 while (atomic_read(&device
->scsi_cmds_outstanding
)) {
5545 pqi_check_ctrl_health(ctrl_info
);
5546 if (pqi_ctrl_offline(ctrl_info
))
5548 if (timeout_secs
!= NO_TIMEOUT
) {
5549 if (time_after(jiffies
, timeout
)) {
5550 dev_err(&ctrl_info
->pci_dev
->dev
,
5551 "timed out waiting for pending IO\n");
5555 usleep_range(1000, 2000);
5561 static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info
*ctrl_info
,
5562 unsigned long timeout_secs
)
5565 unsigned long flags
;
5566 unsigned long timeout
;
5567 struct pqi_scsi_dev
*device
;
5569 timeout
= (timeout_secs
* PQI_HZ
) + jiffies
;
5573 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
5574 list_for_each_entry(device
, &ctrl_info
->scsi_device_list
,
5575 scsi_device_list_entry
) {
5576 if (atomic_read(&device
->scsi_cmds_outstanding
)) {
5581 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
,
5587 pqi_check_ctrl_health(ctrl_info
);
5588 if (pqi_ctrl_offline(ctrl_info
))
5591 if (timeout_secs
!= NO_TIMEOUT
) {
5592 if (time_after(jiffies
, timeout
)) {
5593 dev_err(&ctrl_info
->pci_dev
->dev
,
5594 "timed out waiting for pending IO\n");
5598 usleep_range(1000, 2000);
5604 static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info
*ctrl_info
)
5606 while (atomic_read(&ctrl_info
->sync_cmds_outstanding
)) {
5607 pqi_check_ctrl_health(ctrl_info
);
5608 if (pqi_ctrl_offline(ctrl_info
))
5610 usleep_range(1000, 2000);
5616 static void pqi_lun_reset_complete(struct pqi_io_request
*io_request
,
5619 struct completion
*waiting
= context
;
5624 #define PQI_LUN_RESET_TIMEOUT_SECS 30
5625 #define PQI_LUN_RESET_POLL_COMPLETION_SECS 10
5627 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info
*ctrl_info
,
5628 struct pqi_scsi_dev
*device
, struct completion
*wait
)
5633 if (wait_for_completion_io_timeout(wait
,
5634 PQI_LUN_RESET_POLL_COMPLETION_SECS
* PQI_HZ
)) {
5639 pqi_check_ctrl_health(ctrl_info
);
5640 if (pqi_ctrl_offline(ctrl_info
)) {
5649 static int pqi_lun_reset(struct pqi_ctrl_info
*ctrl_info
,
5650 struct pqi_scsi_dev
*device
)
5653 struct pqi_io_request
*io_request
;
5654 DECLARE_COMPLETION_ONSTACK(wait
);
5655 struct pqi_task_management_request
*request
;
5657 io_request
= pqi_alloc_io_request(ctrl_info
);
5658 io_request
->io_complete_callback
= pqi_lun_reset_complete
;
5659 io_request
->context
= &wait
;
5661 request
= io_request
->iu
;
5662 memset(request
, 0, sizeof(*request
));
5664 request
->header
.iu_type
= PQI_REQUEST_IU_TASK_MANAGEMENT
;
5665 put_unaligned_le16(sizeof(*request
) - PQI_REQUEST_HEADER_LENGTH
,
5666 &request
->header
.iu_length
);
5667 put_unaligned_le16(io_request
->index
, &request
->request_id
);
5668 memcpy(request
->lun_number
, device
->scsi3addr
,
5669 sizeof(request
->lun_number
));
5670 request
->task_management_function
= SOP_TASK_MANAGEMENT_LUN_RESET
;
5671 if (ctrl_info
->tmf_iu_timeout_supported
)
5672 put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS
,
5675 pqi_start_io(ctrl_info
,
5676 &ctrl_info
->queue_groups
[PQI_DEFAULT_QUEUE_GROUP
], RAID_PATH
,
5679 rc
= pqi_wait_for_lun_reset_completion(ctrl_info
, device
, &wait
);
5681 rc
= io_request
->status
;
5683 pqi_free_io_request(io_request
);
5688 /* Performs a reset at the LUN level. */
5690 #define PQI_LUN_RESET_RETRIES 3
5691 #define PQI_LUN_RESET_RETRY_INTERVAL_MSECS 10000
5692 #define PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS 120
5694 static int _pqi_device_reset(struct pqi_ctrl_info
*ctrl_info
,
5695 struct pqi_scsi_dev
*device
)
5698 unsigned int retries
;
5699 unsigned long timeout_secs
;
5701 for (retries
= 0;;) {
5702 rc
= pqi_lun_reset(ctrl_info
, device
);
5703 if (rc
== 0 || ++retries
> PQI_LUN_RESET_RETRIES
)
5705 msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS
);
5708 timeout_secs
= rc
? PQI_LUN_RESET_PENDING_IO_TIMEOUT_SECS
: NO_TIMEOUT
;
5710 rc
|= pqi_device_wait_for_pending_io(ctrl_info
, device
, timeout_secs
);
5712 return rc
== 0 ? SUCCESS
: FAILED
;
5715 static int pqi_device_reset(struct pqi_ctrl_info
*ctrl_info
,
5716 struct pqi_scsi_dev
*device
)
5720 mutex_lock(&ctrl_info
->lun_reset_mutex
);
5722 pqi_ctrl_block_requests(ctrl_info
);
5723 pqi_ctrl_wait_until_quiesced(ctrl_info
);
5724 pqi_fail_io_queued_for_device(ctrl_info
, device
);
5725 rc
= pqi_wait_until_inbound_queues_empty(ctrl_info
);
5726 pqi_device_reset_start(device
);
5727 pqi_ctrl_unblock_requests(ctrl_info
);
5732 rc
= _pqi_device_reset(ctrl_info
, device
);
5734 pqi_device_reset_done(device
);
5736 mutex_unlock(&ctrl_info
->lun_reset_mutex
);
5741 static int pqi_eh_device_reset_handler(struct scsi_cmnd
*scmd
)
5744 struct Scsi_Host
*shost
;
5745 struct pqi_ctrl_info
*ctrl_info
;
5746 struct pqi_scsi_dev
*device
;
5748 shost
= scmd
->device
->host
;
5749 ctrl_info
= shost_to_hba(shost
);
5750 device
= scmd
->device
->hostdata
;
5752 dev_err(&ctrl_info
->pci_dev
->dev
,
5753 "resetting scsi %d:%d:%d:%d\n",
5754 shost
->host_no
, device
->bus
, device
->target
, device
->lun
);
5756 pqi_check_ctrl_health(ctrl_info
);
5757 if (pqi_ctrl_offline(ctrl_info
) ||
5758 pqi_device_reset_blocked(ctrl_info
)) {
5763 pqi_wait_until_ofa_finished(ctrl_info
);
5765 atomic_inc(&ctrl_info
->sync_cmds_outstanding
);
5766 rc
= pqi_device_reset(ctrl_info
, device
);
5767 atomic_dec(&ctrl_info
->sync_cmds_outstanding
);
5770 dev_err(&ctrl_info
->pci_dev
->dev
,
5771 "reset of scsi %d:%d:%d:%d: %s\n",
5772 shost
->host_no
, device
->bus
, device
->target
, device
->lun
,
5773 rc
== SUCCESS
? "SUCCESS" : "FAILED");
5778 static int pqi_slave_alloc(struct scsi_device
*sdev
)
5780 struct pqi_scsi_dev
*device
;
5781 unsigned long flags
;
5782 struct pqi_ctrl_info
*ctrl_info
;
5783 struct scsi_target
*starget
;
5784 struct sas_rphy
*rphy
;
5786 ctrl_info
= shost_to_hba(sdev
->host
);
5788 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
5790 if (sdev_channel(sdev
) == PQI_PHYSICAL_DEVICE_BUS
) {
5791 starget
= scsi_target(sdev
);
5792 rphy
= target_to_rphy(starget
);
5793 device
= pqi_find_device_by_sas_rphy(ctrl_info
, rphy
);
5795 device
->target
= sdev_id(sdev
);
5796 device
->lun
= sdev
->lun
;
5797 device
->target_lun_valid
= true;
5800 device
= pqi_find_scsi_dev(ctrl_info
, sdev_channel(sdev
),
5801 sdev_id(sdev
), sdev
->lun
);
5805 sdev
->hostdata
= device
;
5806 device
->sdev
= sdev
;
5807 if (device
->queue_depth
) {
5808 device
->advertised_queue_depth
= device
->queue_depth
;
5809 scsi_change_queue_depth(sdev
,
5810 device
->advertised_queue_depth
);
5812 if (pqi_is_logical_device(device
))
5813 pqi_disable_write_same(sdev
);
5815 sdev
->allow_restart
= 1;
5818 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
5823 static int pqi_map_queues(struct Scsi_Host
*shost
)
5825 struct pqi_ctrl_info
*ctrl_info
= shost_to_hba(shost
);
5827 return blk_mq_pci_map_queues(&shost
->tag_set
.map
[HCTX_TYPE_DEFAULT
],
5828 ctrl_info
->pci_dev
, 0);
5831 static int pqi_slave_configure(struct scsi_device
*sdev
)
5833 struct pqi_scsi_dev
*device
;
5835 device
= sdev
->hostdata
;
5836 device
->devtype
= sdev
->type
;
5841 static void pqi_slave_destroy(struct scsi_device
*sdev
)
5843 unsigned long flags
;
5844 struct pqi_scsi_dev
*device
;
5845 struct pqi_ctrl_info
*ctrl_info
;
5847 ctrl_info
= shost_to_hba(sdev
->host
);
5849 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
5851 device
= sdev
->hostdata
;
5853 sdev
->hostdata
= NULL
;
5854 if (!list_empty(&device
->scsi_device_list_entry
))
5855 list_del(&device
->scsi_device_list_entry
);
5858 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
5861 pqi_dev_info(ctrl_info
, "removed", device
);
5862 pqi_free_device(device
);
5866 static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info
*ctrl_info
, void __user
*arg
)
5868 struct pci_dev
*pci_dev
;
5869 u32 subsystem_vendor
;
5870 u32 subsystem_device
;
5871 cciss_pci_info_struct pciinfo
;
5876 pci_dev
= ctrl_info
->pci_dev
;
5878 pciinfo
.domain
= pci_domain_nr(pci_dev
->bus
);
5879 pciinfo
.bus
= pci_dev
->bus
->number
;
5880 pciinfo
.dev_fn
= pci_dev
->devfn
;
5881 subsystem_vendor
= pci_dev
->subsystem_vendor
;
5882 subsystem_device
= pci_dev
->subsystem_device
;
5883 pciinfo
.board_id
= ((subsystem_device
<< 16) & 0xffff0000) | subsystem_vendor
;
5885 if (copy_to_user(arg
, &pciinfo
, sizeof(pciinfo
)))
5891 static int pqi_getdrivver_ioctl(void __user
*arg
)
5898 version
= (DRIVER_MAJOR
<< 28) | (DRIVER_MINOR
<< 24) |
5899 (DRIVER_RELEASE
<< 16) | DRIVER_REVISION
;
5901 if (copy_to_user(arg
, &version
, sizeof(version
)))
5907 struct ciss_error_info
{
5910 size_t sense_data_length
;
5913 static void pqi_error_info_to_ciss(struct pqi_raid_error_info
*pqi_error_info
,
5914 struct ciss_error_info
*ciss_error_info
)
5916 int ciss_cmd_status
;
5917 size_t sense_data_length
;
5919 switch (pqi_error_info
->data_out_result
) {
5920 case PQI_DATA_IN_OUT_GOOD
:
5921 ciss_cmd_status
= CISS_CMD_STATUS_SUCCESS
;
5923 case PQI_DATA_IN_OUT_UNDERFLOW
:
5924 ciss_cmd_status
= CISS_CMD_STATUS_DATA_UNDERRUN
;
5926 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW
:
5927 ciss_cmd_status
= CISS_CMD_STATUS_DATA_OVERRUN
;
5929 case PQI_DATA_IN_OUT_PROTOCOL_ERROR
:
5930 case PQI_DATA_IN_OUT_BUFFER_ERROR
:
5931 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA
:
5932 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE
:
5933 case PQI_DATA_IN_OUT_ERROR
:
5934 ciss_cmd_status
= CISS_CMD_STATUS_PROTOCOL_ERROR
;
5936 case PQI_DATA_IN_OUT_HARDWARE_ERROR
:
5937 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR
:
5938 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT
:
5939 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED
:
5940 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED
:
5941 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED
:
5942 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST
:
5943 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION
:
5944 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED
:
5945 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ
:
5946 ciss_cmd_status
= CISS_CMD_STATUS_HARDWARE_ERROR
;
5948 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT
:
5949 ciss_cmd_status
= CISS_CMD_STATUS_UNSOLICITED_ABORT
;
5951 case PQI_DATA_IN_OUT_ABORTED
:
5952 ciss_cmd_status
= CISS_CMD_STATUS_ABORTED
;
5954 case PQI_DATA_IN_OUT_TIMEOUT
:
5955 ciss_cmd_status
= CISS_CMD_STATUS_TIMEOUT
;
5958 ciss_cmd_status
= CISS_CMD_STATUS_TARGET_STATUS
;
5963 get_unaligned_le16(&pqi_error_info
->sense_data_length
);
5964 if (sense_data_length
== 0)
5966 get_unaligned_le16(&pqi_error_info
->response_data_length
);
5967 if (sense_data_length
)
5968 if (sense_data_length
> sizeof(pqi_error_info
->data
))
5969 sense_data_length
= sizeof(pqi_error_info
->data
);
5971 ciss_error_info
->scsi_status
= pqi_error_info
->status
;
5972 ciss_error_info
->command_status
= ciss_cmd_status
;
5973 ciss_error_info
->sense_data_length
= sense_data_length
;
5976 static int pqi_passthru_ioctl(struct pqi_ctrl_info
*ctrl_info
, void __user
*arg
)
5979 char *kernel_buffer
= NULL
;
5981 size_t sense_data_length
;
5982 IOCTL_Command_struct iocommand
;
5983 struct pqi_raid_path_request request
;
5984 struct pqi_raid_error_info pqi_error_info
;
5985 struct ciss_error_info ciss_error_info
;
5987 if (pqi_ctrl_offline(ctrl_info
))
5991 if (!capable(CAP_SYS_RAWIO
))
5993 if (copy_from_user(&iocommand
, arg
, sizeof(iocommand
)))
5995 if (iocommand
.buf_size
< 1 &&
5996 iocommand
.Request
.Type
.Direction
!= XFER_NONE
)
5998 if (iocommand
.Request
.CDBLen
> sizeof(request
.cdb
))
6000 if (iocommand
.Request
.Type
.Type
!= TYPE_CMD
)
6003 switch (iocommand
.Request
.Type
.Direction
) {
6007 case XFER_READ
| XFER_WRITE
:
6013 if (iocommand
.buf_size
> 0) {
6014 kernel_buffer
= kmalloc(iocommand
.buf_size
, GFP_KERNEL
);
6017 if (iocommand
.Request
.Type
.Direction
& XFER_WRITE
) {
6018 if (copy_from_user(kernel_buffer
, iocommand
.buf
,
6019 iocommand
.buf_size
)) {
6024 memset(kernel_buffer
, 0, iocommand
.buf_size
);
6028 memset(&request
, 0, sizeof(request
));
6030 request
.header
.iu_type
= PQI_REQUEST_IU_RAID_PATH_IO
;
6031 iu_length
= offsetof(struct pqi_raid_path_request
, sg_descriptors
) -
6032 PQI_REQUEST_HEADER_LENGTH
;
6033 memcpy(request
.lun_number
, iocommand
.LUN_info
.LunAddrBytes
,
6034 sizeof(request
.lun_number
));
6035 memcpy(request
.cdb
, iocommand
.Request
.CDB
, iocommand
.Request
.CDBLen
);
6036 request
.additional_cdb_bytes_usage
= SOP_ADDITIONAL_CDB_BYTES_0
;
6038 switch (iocommand
.Request
.Type
.Direction
) {
6040 request
.data_direction
= SOP_NO_DIRECTION_FLAG
;
6043 request
.data_direction
= SOP_WRITE_FLAG
;
6046 request
.data_direction
= SOP_READ_FLAG
;
6048 case XFER_READ
| XFER_WRITE
:
6049 request
.data_direction
= SOP_BIDIRECTIONAL
;
6053 request
.task_attribute
= SOP_TASK_ATTRIBUTE_SIMPLE
;
6055 if (iocommand
.buf_size
> 0) {
6056 put_unaligned_le32(iocommand
.buf_size
, &request
.buffer_length
);
6058 rc
= pqi_map_single(ctrl_info
->pci_dev
,
6059 &request
.sg_descriptors
[0], kernel_buffer
,
6060 iocommand
.buf_size
, DMA_BIDIRECTIONAL
);
6064 iu_length
+= sizeof(request
.sg_descriptors
[0]);
6067 put_unaligned_le16(iu_length
, &request
.header
.iu_length
);
6069 if (ctrl_info
->raid_iu_timeout_supported
)
6070 put_unaligned_le32(iocommand
.Request
.Timeout
, &request
.timeout
);
6072 rc
= pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
6073 PQI_SYNC_FLAGS_INTERRUPTABLE
, &pqi_error_info
, NO_TIMEOUT
);
6075 if (iocommand
.buf_size
> 0)
6076 pqi_pci_unmap(ctrl_info
->pci_dev
, request
.sg_descriptors
, 1,
6079 memset(&iocommand
.error_info
, 0, sizeof(iocommand
.error_info
));
6082 pqi_error_info_to_ciss(&pqi_error_info
, &ciss_error_info
);
6083 iocommand
.error_info
.ScsiStatus
= ciss_error_info
.scsi_status
;
6084 iocommand
.error_info
.CommandStatus
=
6085 ciss_error_info
.command_status
;
6086 sense_data_length
= ciss_error_info
.sense_data_length
;
6087 if (sense_data_length
) {
6088 if (sense_data_length
>
6089 sizeof(iocommand
.error_info
.SenseInfo
))
6091 sizeof(iocommand
.error_info
.SenseInfo
);
6092 memcpy(iocommand
.error_info
.SenseInfo
,
6093 pqi_error_info
.data
, sense_data_length
);
6094 iocommand
.error_info
.SenseLen
= sense_data_length
;
6098 if (copy_to_user(arg
, &iocommand
, sizeof(iocommand
))) {
6103 if (rc
== 0 && iocommand
.buf_size
> 0 &&
6104 (iocommand
.Request
.Type
.Direction
& XFER_READ
)) {
6105 if (copy_to_user(iocommand
.buf
, kernel_buffer
,
6106 iocommand
.buf_size
)) {
6112 kfree(kernel_buffer
);
6117 static int pqi_ioctl(struct scsi_device
*sdev
, unsigned int cmd
,
6121 struct pqi_ctrl_info
*ctrl_info
;
6123 ctrl_info
= shost_to_hba(sdev
->host
);
6125 if (pqi_ctrl_in_ofa(ctrl_info
) || pqi_ctrl_in_shutdown(ctrl_info
))
6129 case CCISS_DEREGDISK
:
6130 case CCISS_REGNEWDISK
:
6132 rc
= pqi_scan_scsi_devices(ctrl_info
);
6134 case CCISS_GETPCIINFO
:
6135 rc
= pqi_getpciinfo_ioctl(ctrl_info
, arg
);
6137 case CCISS_GETDRIVVER
:
6138 rc
= pqi_getdrivver_ioctl(arg
);
6140 case CCISS_PASSTHRU
:
6141 rc
= pqi_passthru_ioctl(ctrl_info
, arg
);
6151 static ssize_t
pqi_firmware_version_show(struct device
*dev
,
6152 struct device_attribute
*attr
, char *buffer
)
6154 struct Scsi_Host
*shost
;
6155 struct pqi_ctrl_info
*ctrl_info
;
6157 shost
= class_to_shost(dev
);
6158 ctrl_info
= shost_to_hba(shost
);
6160 return snprintf(buffer
, PAGE_SIZE
, "%s\n", ctrl_info
->firmware_version
);
6163 static ssize_t
pqi_driver_version_show(struct device
*dev
,
6164 struct device_attribute
*attr
, char *buffer
)
6166 return snprintf(buffer
, PAGE_SIZE
, "%s\n",
6167 DRIVER_VERSION BUILD_TIMESTAMP
);
6170 static ssize_t
pqi_serial_number_show(struct device
*dev
,
6171 struct device_attribute
*attr
, char *buffer
)
6173 struct Scsi_Host
*shost
;
6174 struct pqi_ctrl_info
*ctrl_info
;
6176 shost
= class_to_shost(dev
);
6177 ctrl_info
= shost_to_hba(shost
);
6179 return snprintf(buffer
, PAGE_SIZE
, "%s\n", ctrl_info
->serial_number
);
6182 static ssize_t
pqi_model_show(struct device
*dev
,
6183 struct device_attribute
*attr
, char *buffer
)
6185 struct Scsi_Host
*shost
;
6186 struct pqi_ctrl_info
*ctrl_info
;
6188 shost
= class_to_shost(dev
);
6189 ctrl_info
= shost_to_hba(shost
);
6191 return snprintf(buffer
, PAGE_SIZE
, "%s\n", ctrl_info
->model
);
6194 static ssize_t
pqi_vendor_show(struct device
*dev
,
6195 struct device_attribute
*attr
, char *buffer
)
6197 struct Scsi_Host
*shost
;
6198 struct pqi_ctrl_info
*ctrl_info
;
6200 shost
= class_to_shost(dev
);
6201 ctrl_info
= shost_to_hba(shost
);
6203 return snprintf(buffer
, PAGE_SIZE
, "%s\n", ctrl_info
->vendor
);
6206 static ssize_t
pqi_host_rescan_store(struct device
*dev
,
6207 struct device_attribute
*attr
, const char *buffer
, size_t count
)
6209 struct Scsi_Host
*shost
= class_to_shost(dev
);
6211 pqi_scan_start(shost
);
6216 static ssize_t
pqi_lockup_action_show(struct device
*dev
,
6217 struct device_attribute
*attr
, char *buffer
)
6222 for (i
= 0; i
< ARRAY_SIZE(pqi_lockup_actions
); i
++) {
6223 if (pqi_lockup_actions
[i
].action
== pqi_lockup_action
)
6224 count
+= scnprintf(buffer
+ count
, PAGE_SIZE
- count
,
6225 "[%s] ", pqi_lockup_actions
[i
].name
);
6227 count
+= scnprintf(buffer
+ count
, PAGE_SIZE
- count
,
6228 "%s ", pqi_lockup_actions
[i
].name
);
6231 count
+= scnprintf(buffer
+ count
, PAGE_SIZE
- count
, "\n");
6236 static ssize_t
pqi_lockup_action_store(struct device
*dev
,
6237 struct device_attribute
*attr
, const char *buffer
, size_t count
)
6241 char action_name_buffer
[32];
6243 strlcpy(action_name_buffer
, buffer
, sizeof(action_name_buffer
));
6244 action_name
= strstrip(action_name_buffer
);
6246 for (i
= 0; i
< ARRAY_SIZE(pqi_lockup_actions
); i
++) {
6247 if (strcmp(action_name
, pqi_lockup_actions
[i
].name
) == 0) {
6248 pqi_lockup_action
= pqi_lockup_actions
[i
].action
;
6256 static DEVICE_ATTR(driver_version
, 0444, pqi_driver_version_show
, NULL
);
6257 static DEVICE_ATTR(firmware_version
, 0444, pqi_firmware_version_show
, NULL
);
6258 static DEVICE_ATTR(model
, 0444, pqi_model_show
, NULL
);
6259 static DEVICE_ATTR(serial_number
, 0444, pqi_serial_number_show
, NULL
);
6260 static DEVICE_ATTR(vendor
, 0444, pqi_vendor_show
, NULL
);
6261 static DEVICE_ATTR(rescan
, 0200, NULL
, pqi_host_rescan_store
);
6262 static DEVICE_ATTR(lockup_action
, 0644,
6263 pqi_lockup_action_show
, pqi_lockup_action_store
);
6265 static struct device_attribute
*pqi_shost_attrs
[] = {
6266 &dev_attr_driver_version
,
6267 &dev_attr_firmware_version
,
6269 &dev_attr_serial_number
,
6272 &dev_attr_lockup_action
,
6276 static ssize_t
pqi_unique_id_show(struct device
*dev
,
6277 struct device_attribute
*attr
, char *buffer
)
6279 struct pqi_ctrl_info
*ctrl_info
;
6280 struct scsi_device
*sdev
;
6281 struct pqi_scsi_dev
*device
;
6282 unsigned long flags
;
6285 sdev
= to_scsi_device(dev
);
6286 ctrl_info
= shost_to_hba(sdev
->host
);
6288 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
6290 device
= sdev
->hostdata
;
6292 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
6296 if (device
->is_physical_device
) {
6297 memset(unique_id
, 0, 8);
6298 memcpy(unique_id
+ 8, &device
->wwid
, sizeof(device
->wwid
));
6300 memcpy(unique_id
, device
->volume_id
, sizeof(device
->volume_id
));
6303 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
6305 return snprintf(buffer
, PAGE_SIZE
,
6306 "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
6307 unique_id
[0], unique_id
[1], unique_id
[2], unique_id
[3],
6308 unique_id
[4], unique_id
[5], unique_id
[6], unique_id
[7],
6309 unique_id
[8], unique_id
[9], unique_id
[10], unique_id
[11],
6310 unique_id
[12], unique_id
[13], unique_id
[14], unique_id
[15]);
6313 static ssize_t
pqi_lunid_show(struct device
*dev
,
6314 struct device_attribute
*attr
, char *buffer
)
6316 struct pqi_ctrl_info
*ctrl_info
;
6317 struct scsi_device
*sdev
;
6318 struct pqi_scsi_dev
*device
;
6319 unsigned long flags
;
6322 sdev
= to_scsi_device(dev
);
6323 ctrl_info
= shost_to_hba(sdev
->host
);
6325 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
6327 device
= sdev
->hostdata
;
6329 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
6333 memcpy(lunid
, device
->scsi3addr
, sizeof(lunid
));
6335 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
6337 return snprintf(buffer
, PAGE_SIZE
, "0x%8phN\n", lunid
);
6342 static ssize_t
pqi_path_info_show(struct device
*dev
,
6343 struct device_attribute
*attr
, char *buf
)
6345 struct pqi_ctrl_info
*ctrl_info
;
6346 struct scsi_device
*sdev
;
6347 struct pqi_scsi_dev
*device
;
6348 unsigned long flags
;
6355 u8 phys_connector
[2];
6357 sdev
= to_scsi_device(dev
);
6358 ctrl_info
= shost_to_hba(sdev
->host
);
6360 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
6362 device
= sdev
->hostdata
;
6364 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
6369 for (i
= 0; i
< MAX_PATHS
; i
++) {
6370 path_map_index
= 1 << i
;
6371 if (i
== device
->active_path_index
)
6373 else if (device
->path_map
& path_map_index
)
6374 active
= "Inactive";
6378 output_len
+= scnprintf(buf
+ output_len
,
6379 PAGE_SIZE
- output_len
,
6380 "[%d:%d:%d:%d] %20.20s ",
6381 ctrl_info
->scsi_host
->host_no
,
6382 device
->bus
, device
->target
,
6384 scsi_device_type(device
->devtype
));
6386 if (device
->devtype
== TYPE_RAID
||
6387 pqi_is_logical_device(device
))
6390 memcpy(&phys_connector
, &device
->phys_connector
[i
],
6391 sizeof(phys_connector
));
6392 if (phys_connector
[0] < '0')
6393 phys_connector
[0] = '0';
6394 if (phys_connector
[1] < '0')
6395 phys_connector
[1] = '0';
6397 output_len
+= scnprintf(buf
+ output_len
,
6398 PAGE_SIZE
- output_len
,
6399 "PORT: %.2s ", phys_connector
);
6401 box
= device
->box
[i
];
6402 if (box
!= 0 && box
!= 0xFF)
6403 output_len
+= scnprintf(buf
+ output_len
,
6404 PAGE_SIZE
- output_len
,
6407 if ((device
->devtype
== TYPE_DISK
||
6408 device
->devtype
== TYPE_ZBC
) &&
6409 pqi_expose_device(device
))
6410 output_len
+= scnprintf(buf
+ output_len
,
6411 PAGE_SIZE
- output_len
,
6415 output_len
+= scnprintf(buf
+ output_len
,
6416 PAGE_SIZE
- output_len
,
6420 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
6425 static ssize_t
pqi_sas_address_show(struct device
*dev
,
6426 struct device_attribute
*attr
, char *buffer
)
6428 struct pqi_ctrl_info
*ctrl_info
;
6429 struct scsi_device
*sdev
;
6430 struct pqi_scsi_dev
*device
;
6431 unsigned long flags
;
6434 sdev
= to_scsi_device(dev
);
6435 ctrl_info
= shost_to_hba(sdev
->host
);
6437 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
6439 device
= sdev
->hostdata
;
6440 if (!device
|| !pqi_is_device_with_sas_address(device
)) {
6441 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
6445 sas_address
= device
->sas_address
;
6447 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
6449 return snprintf(buffer
, PAGE_SIZE
, "0x%016llx\n", sas_address
);
6452 static ssize_t
pqi_ssd_smart_path_enabled_show(struct device
*dev
,
6453 struct device_attribute
*attr
, char *buffer
)
6455 struct pqi_ctrl_info
*ctrl_info
;
6456 struct scsi_device
*sdev
;
6457 struct pqi_scsi_dev
*device
;
6458 unsigned long flags
;
6460 sdev
= to_scsi_device(dev
);
6461 ctrl_info
= shost_to_hba(sdev
->host
);
6463 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
6465 device
= sdev
->hostdata
;
6467 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
6471 buffer
[0] = device
->raid_bypass_enabled
? '1' : '0';
6475 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
6480 static ssize_t
pqi_raid_level_show(struct device
*dev
,
6481 struct device_attribute
*attr
, char *buffer
)
6483 struct pqi_ctrl_info
*ctrl_info
;
6484 struct scsi_device
*sdev
;
6485 struct pqi_scsi_dev
*device
;
6486 unsigned long flags
;
6489 sdev
= to_scsi_device(dev
);
6490 ctrl_info
= shost_to_hba(sdev
->host
);
6492 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
6494 device
= sdev
->hostdata
;
6496 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
6500 if (pqi_is_logical_device(device
))
6501 raid_level
= pqi_raid_level_to_string(device
->raid_level
);
6505 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
6507 return snprintf(buffer
, PAGE_SIZE
, "%s\n", raid_level
);
6510 static ssize_t
pqi_raid_bypass_cnt_show(struct device
*dev
,
6511 struct device_attribute
*attr
, char *buffer
)
6513 struct pqi_ctrl_info
*ctrl_info
;
6514 struct scsi_device
*sdev
;
6515 struct pqi_scsi_dev
*device
;
6516 unsigned long flags
;
6517 int raid_bypass_cnt
;
6519 sdev
= to_scsi_device(dev
);
6520 ctrl_info
= shost_to_hba(sdev
->host
);
6522 spin_lock_irqsave(&ctrl_info
->scsi_device_list_lock
, flags
);
6524 device
= sdev
->hostdata
;
6526 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
6530 raid_bypass_cnt
= atomic_read(&device
->raid_bypass_cnt
);
6532 spin_unlock_irqrestore(&ctrl_info
->scsi_device_list_lock
, flags
);
6534 return snprintf(buffer
, PAGE_SIZE
, "0x%x\n", raid_bypass_cnt
);
6537 static DEVICE_ATTR(lunid
, 0444, pqi_lunid_show
, NULL
);
6538 static DEVICE_ATTR(unique_id
, 0444, pqi_unique_id_show
, NULL
);
6539 static DEVICE_ATTR(path_info
, 0444, pqi_path_info_show
, NULL
);
6540 static DEVICE_ATTR(sas_address
, 0444, pqi_sas_address_show
, NULL
);
6541 static DEVICE_ATTR(ssd_smart_path_enabled
, 0444, pqi_ssd_smart_path_enabled_show
, NULL
);
6542 static DEVICE_ATTR(raid_level
, 0444, pqi_raid_level_show
, NULL
);
6543 static DEVICE_ATTR(raid_bypass_cnt
, 0444, pqi_raid_bypass_cnt_show
, NULL
);
6545 static struct device_attribute
*pqi_sdev_attrs
[] = {
6547 &dev_attr_unique_id
,
6548 &dev_attr_path_info
,
6549 &dev_attr_sas_address
,
6550 &dev_attr_ssd_smart_path_enabled
,
6551 &dev_attr_raid_level
,
6552 &dev_attr_raid_bypass_cnt
,
6556 static struct scsi_host_template pqi_driver_template
= {
6557 .module
= THIS_MODULE
,
6558 .name
= DRIVER_NAME_SHORT
,
6559 .proc_name
= DRIVER_NAME_SHORT
,
6560 .queuecommand
= pqi_scsi_queue_command
,
6561 .scan_start
= pqi_scan_start
,
6562 .scan_finished
= pqi_scan_finished
,
6564 .eh_device_reset_handler
= pqi_eh_device_reset_handler
,
6566 .slave_alloc
= pqi_slave_alloc
,
6567 .slave_configure
= pqi_slave_configure
,
6568 .slave_destroy
= pqi_slave_destroy
,
6569 .map_queues
= pqi_map_queues
,
6570 .sdev_attrs
= pqi_sdev_attrs
,
6571 .shost_attrs
= pqi_shost_attrs
,
6574 static int pqi_register_scsi(struct pqi_ctrl_info
*ctrl_info
)
6577 struct Scsi_Host
*shost
;
6579 shost
= scsi_host_alloc(&pqi_driver_template
, sizeof(ctrl_info
));
6581 dev_err(&ctrl_info
->pci_dev
->dev
,
6582 "scsi_host_alloc failed for controller %u\n",
6583 ctrl_info
->ctrl_id
);
6588 shost
->n_io_port
= 0;
6589 shost
->this_id
= -1;
6590 shost
->max_channel
= PQI_MAX_BUS
;
6591 shost
->max_cmd_len
= MAX_COMMAND_SIZE
;
6592 shost
->max_lun
= ~0;
6594 shost
->max_sectors
= ctrl_info
->max_sectors
;
6595 shost
->can_queue
= ctrl_info
->scsi_ml_can_queue
;
6596 shost
->cmd_per_lun
= shost
->can_queue
;
6597 shost
->sg_tablesize
= ctrl_info
->sg_tablesize
;
6598 shost
->transportt
= pqi_sas_transport_template
;
6599 shost
->irq
= pci_irq_vector(ctrl_info
->pci_dev
, 0);
6600 shost
->unique_id
= shost
->irq
;
6601 shost
->nr_hw_queues
= ctrl_info
->num_queue_groups
;
6602 shost
->hostdata
[0] = (unsigned long)ctrl_info
;
6604 rc
= scsi_add_host(shost
, &ctrl_info
->pci_dev
->dev
);
6606 dev_err(&ctrl_info
->pci_dev
->dev
,
6607 "scsi_add_host failed for controller %u\n",
6608 ctrl_info
->ctrl_id
);
6612 rc
= pqi_add_sas_host(shost
, ctrl_info
);
6614 dev_err(&ctrl_info
->pci_dev
->dev
,
6615 "add SAS host failed for controller %u\n",
6616 ctrl_info
->ctrl_id
);
6620 ctrl_info
->scsi_host
= shost
;
6625 scsi_remove_host(shost
);
6627 scsi_host_put(shost
);
6632 static void pqi_unregister_scsi(struct pqi_ctrl_info
*ctrl_info
)
6634 struct Scsi_Host
*shost
;
6636 pqi_delete_sas_host(ctrl_info
);
6638 shost
= ctrl_info
->scsi_host
;
6642 scsi_remove_host(shost
);
6643 scsi_host_put(shost
);
6646 static int pqi_wait_for_pqi_reset_completion(struct pqi_ctrl_info
*ctrl_info
)
6649 struct pqi_device_registers __iomem
*pqi_registers
;
6650 unsigned long timeout
;
6651 unsigned int timeout_msecs
;
6652 union pqi_reset_register reset_reg
;
6654 pqi_registers
= ctrl_info
->pqi_registers
;
6655 timeout_msecs
= readw(&pqi_registers
->max_reset_timeout
) * 100;
6656 timeout
= msecs_to_jiffies(timeout_msecs
) + jiffies
;
6659 msleep(PQI_RESET_POLL_INTERVAL_MSECS
);
6660 reset_reg
.all_bits
= readl(&pqi_registers
->device_reset
);
6661 if (reset_reg
.bits
.reset_action
== PQI_RESET_ACTION_COMPLETED
)
6663 pqi_check_ctrl_health(ctrl_info
);
6664 if (pqi_ctrl_offline(ctrl_info
)) {
6668 if (time_after(jiffies
, timeout
)) {
6677 static int pqi_reset(struct pqi_ctrl_info
*ctrl_info
)
6680 union pqi_reset_register reset_reg
;
6682 if (ctrl_info
->pqi_reset_quiesce_supported
) {
6683 rc
= sis_pqi_reset_quiesce(ctrl_info
);
6685 dev_err(&ctrl_info
->pci_dev
->dev
,
6686 "PQI reset failed during quiesce with error %d\n",
6692 reset_reg
.all_bits
= 0;
6693 reset_reg
.bits
.reset_type
= PQI_RESET_TYPE_HARD_RESET
;
6694 reset_reg
.bits
.reset_action
= PQI_RESET_ACTION_RESET
;
6696 writel(reset_reg
.all_bits
, &ctrl_info
->pqi_registers
->device_reset
);
6698 rc
= pqi_wait_for_pqi_reset_completion(ctrl_info
);
6700 dev_err(&ctrl_info
->pci_dev
->dev
,
6701 "PQI reset failed with error %d\n", rc
);
6706 static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info
*ctrl_info
)
6709 struct bmic_sense_subsystem_info
*sense_info
;
6711 sense_info
= kzalloc(sizeof(*sense_info
), GFP_KERNEL
);
6715 rc
= pqi_sense_subsystem_info(ctrl_info
, sense_info
);
6719 memcpy(ctrl_info
->serial_number
, sense_info
->ctrl_serial_number
,
6720 sizeof(sense_info
->ctrl_serial_number
));
6721 ctrl_info
->serial_number
[sizeof(sense_info
->ctrl_serial_number
)] = '\0';
6729 static int pqi_get_ctrl_product_details(struct pqi_ctrl_info
*ctrl_info
)
6732 struct bmic_identify_controller
*identify
;
6734 identify
= kmalloc(sizeof(*identify
), GFP_KERNEL
);
6738 rc
= pqi_identify_controller(ctrl_info
, identify
);
6742 memcpy(ctrl_info
->firmware_version
, identify
->firmware_version
,
6743 sizeof(identify
->firmware_version
));
6744 ctrl_info
->firmware_version
[sizeof(identify
->firmware_version
)] = '\0';
6745 snprintf(ctrl_info
->firmware_version
+
6746 strlen(ctrl_info
->firmware_version
),
6747 sizeof(ctrl_info
->firmware_version
),
6748 "-%u", get_unaligned_le16(&identify
->firmware_build_number
));
6750 memcpy(ctrl_info
->model
, identify
->product_id
,
6751 sizeof(identify
->product_id
));
6752 ctrl_info
->model
[sizeof(identify
->product_id
)] = '\0';
6754 memcpy(ctrl_info
->vendor
, identify
->vendor_id
,
6755 sizeof(identify
->vendor_id
));
6756 ctrl_info
->vendor
[sizeof(identify
->vendor_id
)] = '\0';
6764 struct pqi_config_table_section_info
{
6765 struct pqi_ctrl_info
*ctrl_info
;
6768 void __iomem
*section_iomem_addr
;
6771 static inline bool pqi_is_firmware_feature_supported(
6772 struct pqi_config_table_firmware_features
*firmware_features
,
6773 unsigned int bit_position
)
6775 unsigned int byte_index
;
6777 byte_index
= bit_position
/ BITS_PER_BYTE
;
6779 if (byte_index
>= le16_to_cpu(firmware_features
->num_elements
))
6782 return firmware_features
->features_supported
[byte_index
] &
6783 (1 << (bit_position
% BITS_PER_BYTE
)) ? true : false;
6786 static inline bool pqi_is_firmware_feature_enabled(
6787 struct pqi_config_table_firmware_features
*firmware_features
,
6788 void __iomem
*firmware_features_iomem_addr
,
6789 unsigned int bit_position
)
6791 unsigned int byte_index
;
6792 u8 __iomem
*features_enabled_iomem_addr
;
6794 byte_index
= (bit_position
/ BITS_PER_BYTE
) +
6795 (le16_to_cpu(firmware_features
->num_elements
) * 2);
6797 features_enabled_iomem_addr
= firmware_features_iomem_addr
+
6798 offsetof(struct pqi_config_table_firmware_features
,
6799 features_supported
) + byte_index
;
6801 return *((__force u8
*)features_enabled_iomem_addr
) &
6802 (1 << (bit_position
% BITS_PER_BYTE
)) ? true : false;
6805 static inline void pqi_request_firmware_feature(
6806 struct pqi_config_table_firmware_features
*firmware_features
,
6807 unsigned int bit_position
)
6809 unsigned int byte_index
;
6811 byte_index
= (bit_position
/ BITS_PER_BYTE
) +
6812 le16_to_cpu(firmware_features
->num_elements
);
6814 firmware_features
->features_supported
[byte_index
] |=
6815 (1 << (bit_position
% BITS_PER_BYTE
));
6818 static int pqi_config_table_update(struct pqi_ctrl_info
*ctrl_info
,
6819 u16 first_section
, u16 last_section
)
6821 struct pqi_vendor_general_request request
;
6823 memset(&request
, 0, sizeof(request
));
6825 request
.header
.iu_type
= PQI_REQUEST_IU_VENDOR_GENERAL
;
6826 put_unaligned_le16(sizeof(request
) - PQI_REQUEST_HEADER_LENGTH
,
6827 &request
.header
.iu_length
);
6828 put_unaligned_le16(PQI_VENDOR_GENERAL_CONFIG_TABLE_UPDATE
,
6829 &request
.function_code
);
6830 put_unaligned_le16(first_section
,
6831 &request
.data
.config_table_update
.first_section
);
6832 put_unaligned_le16(last_section
,
6833 &request
.data
.config_table_update
.last_section
);
6835 return pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
6836 0, NULL
, NO_TIMEOUT
);
6839 static int pqi_enable_firmware_features(struct pqi_ctrl_info
*ctrl_info
,
6840 struct pqi_config_table_firmware_features
*firmware_features
,
6841 void __iomem
*firmware_features_iomem_addr
)
6843 void *features_requested
;
6844 void __iomem
*features_requested_iomem_addr
;
6846 features_requested
= firmware_features
->features_supported
+
6847 le16_to_cpu(firmware_features
->num_elements
);
6849 features_requested_iomem_addr
= firmware_features_iomem_addr
+
6850 (features_requested
- (void *)firmware_features
);
6852 memcpy_toio(features_requested_iomem_addr
, features_requested
,
6853 le16_to_cpu(firmware_features
->num_elements
));
6855 return pqi_config_table_update(ctrl_info
,
6856 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES
,
6857 PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES
);
6860 struct pqi_firmware_feature
{
6862 unsigned int feature_bit
;
6865 void (*feature_status
)(struct pqi_ctrl_info
*ctrl_info
,
6866 struct pqi_firmware_feature
*firmware_feature
);
6869 static void pqi_firmware_feature_status(struct pqi_ctrl_info
*ctrl_info
,
6870 struct pqi_firmware_feature
*firmware_feature
)
6872 if (!firmware_feature
->supported
) {
6873 dev_info(&ctrl_info
->pci_dev
->dev
, "%s not supported by controller\n",
6874 firmware_feature
->feature_name
);
6878 if (firmware_feature
->enabled
) {
6879 dev_info(&ctrl_info
->pci_dev
->dev
,
6880 "%s enabled\n", firmware_feature
->feature_name
);
6884 dev_err(&ctrl_info
->pci_dev
->dev
, "failed to enable %s\n",
6885 firmware_feature
->feature_name
);
6888 static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info
*ctrl_info
,
6889 struct pqi_firmware_feature
*firmware_feature
)
6891 switch (firmware_feature
->feature_bit
) {
6892 case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE
:
6893 ctrl_info
->soft_reset_handshake_supported
=
6894 firmware_feature
->enabled
;
6896 case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT
:
6897 ctrl_info
->raid_iu_timeout_supported
=
6898 firmware_feature
->enabled
;
6900 case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT
:
6901 ctrl_info
->tmf_iu_timeout_supported
=
6902 firmware_feature
->enabled
;
6906 pqi_firmware_feature_status(ctrl_info
, firmware_feature
);
6909 static inline void pqi_firmware_feature_update(struct pqi_ctrl_info
*ctrl_info
,
6910 struct pqi_firmware_feature
*firmware_feature
)
6912 if (firmware_feature
->feature_status
)
6913 firmware_feature
->feature_status(ctrl_info
, firmware_feature
);
6916 static DEFINE_MUTEX(pqi_firmware_features_mutex
);
6918 static struct pqi_firmware_feature pqi_firmware_features
[] = {
6920 .feature_name
= "Online Firmware Activation",
6921 .feature_bit
= PQI_FIRMWARE_FEATURE_OFA
,
6922 .feature_status
= pqi_firmware_feature_status
,
6925 .feature_name
= "Serial Management Protocol",
6926 .feature_bit
= PQI_FIRMWARE_FEATURE_SMP
,
6927 .feature_status
= pqi_firmware_feature_status
,
6930 .feature_name
= "New Soft Reset Handshake",
6931 .feature_bit
= PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE
,
6932 .feature_status
= pqi_ctrl_update_feature_flags
,
6935 .feature_name
= "RAID IU Timeout",
6936 .feature_bit
= PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT
,
6937 .feature_status
= pqi_ctrl_update_feature_flags
,
6940 .feature_name
= "TMF IU Timeout",
6941 .feature_bit
= PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT
,
6942 .feature_status
= pqi_ctrl_update_feature_flags
,
6946 static void pqi_process_firmware_features(
6947 struct pqi_config_table_section_info
*section_info
)
6950 struct pqi_ctrl_info
*ctrl_info
;
6951 struct pqi_config_table_firmware_features
*firmware_features
;
6952 void __iomem
*firmware_features_iomem_addr
;
6954 unsigned int num_features_supported
;
6956 ctrl_info
= section_info
->ctrl_info
;
6957 firmware_features
= section_info
->section
;
6958 firmware_features_iomem_addr
= section_info
->section_iomem_addr
;
6960 for (i
= 0, num_features_supported
= 0;
6961 i
< ARRAY_SIZE(pqi_firmware_features
); i
++) {
6962 if (pqi_is_firmware_feature_supported(firmware_features
,
6963 pqi_firmware_features
[i
].feature_bit
)) {
6964 pqi_firmware_features
[i
].supported
= true;
6965 num_features_supported
++;
6967 pqi_firmware_feature_update(ctrl_info
,
6968 &pqi_firmware_features
[i
]);
6972 if (num_features_supported
== 0)
6975 for (i
= 0; i
< ARRAY_SIZE(pqi_firmware_features
); i
++) {
6976 if (!pqi_firmware_features
[i
].supported
)
6978 pqi_request_firmware_feature(firmware_features
,
6979 pqi_firmware_features
[i
].feature_bit
);
6982 rc
= pqi_enable_firmware_features(ctrl_info
, firmware_features
,
6983 firmware_features_iomem_addr
);
6985 dev_err(&ctrl_info
->pci_dev
->dev
,
6986 "failed to enable firmware features in PQI configuration table\n");
6987 for (i
= 0; i
< ARRAY_SIZE(pqi_firmware_features
); i
++) {
6988 if (!pqi_firmware_features
[i
].supported
)
6990 pqi_firmware_feature_update(ctrl_info
,
6991 &pqi_firmware_features
[i
]);
6996 for (i
= 0; i
< ARRAY_SIZE(pqi_firmware_features
); i
++) {
6997 if (!pqi_firmware_features
[i
].supported
)
6999 if (pqi_is_firmware_feature_enabled(firmware_features
,
7000 firmware_features_iomem_addr
,
7001 pqi_firmware_features
[i
].feature_bit
)) {
7002 pqi_firmware_features
[i
].enabled
= true;
7004 pqi_firmware_feature_update(ctrl_info
,
7005 &pqi_firmware_features
[i
]);
7009 static void pqi_init_firmware_features(void)
7013 for (i
= 0; i
< ARRAY_SIZE(pqi_firmware_features
); i
++) {
7014 pqi_firmware_features
[i
].supported
= false;
7015 pqi_firmware_features
[i
].enabled
= false;
7019 static void pqi_process_firmware_features_section(
7020 struct pqi_config_table_section_info
*section_info
)
7022 mutex_lock(&pqi_firmware_features_mutex
);
7023 pqi_init_firmware_features();
7024 pqi_process_firmware_features(section_info
);
7025 mutex_unlock(&pqi_firmware_features_mutex
);
7028 static int pqi_process_config_table(struct pqi_ctrl_info
*ctrl_info
)
7032 void __iomem
*table_iomem_addr
;
7033 struct pqi_config_table
*config_table
;
7034 struct pqi_config_table_section_header
*section
;
7035 struct pqi_config_table_section_info section_info
;
7037 table_length
= ctrl_info
->config_table_length
;
7038 if (table_length
== 0)
7041 config_table
= kmalloc(table_length
, GFP_KERNEL
);
7042 if (!config_table
) {
7043 dev_err(&ctrl_info
->pci_dev
->dev
,
7044 "failed to allocate memory for PQI configuration table\n");
7049 * Copy the config table contents from I/O memory space into the
7052 table_iomem_addr
= ctrl_info
->iomem_base
+
7053 ctrl_info
->config_table_offset
;
7054 memcpy_fromio(config_table
, table_iomem_addr
, table_length
);
7056 section_info
.ctrl_info
= ctrl_info
;
7058 get_unaligned_le32(&config_table
->first_section_offset
);
7060 while (section_offset
) {
7061 section
= (void *)config_table
+ section_offset
;
7063 section_info
.section
= section
;
7064 section_info
.section_offset
= section_offset
;
7065 section_info
.section_iomem_addr
=
7066 table_iomem_addr
+ section_offset
;
7068 switch (get_unaligned_le16(§ion
->section_id
)) {
7069 case PQI_CONFIG_TABLE_SECTION_FIRMWARE_FEATURES
:
7070 pqi_process_firmware_features_section(§ion_info
);
7072 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT
:
7073 if (pqi_disable_heartbeat
)
7074 dev_warn(&ctrl_info
->pci_dev
->dev
,
7075 "heartbeat disabled by module parameter\n");
7077 ctrl_info
->heartbeat_counter
=
7081 struct pqi_config_table_heartbeat
,
7084 case PQI_CONFIG_TABLE_SECTION_SOFT_RESET
:
7085 ctrl_info
->soft_reset_status
=
7088 offsetof(struct pqi_config_table_soft_reset
,
7094 get_unaligned_le16(§ion
->next_section_offset
);
7097 kfree(config_table
);
7102 /* Switches the controller from PQI mode back into SIS mode. */
7104 static int pqi_revert_to_sis_mode(struct pqi_ctrl_info
*ctrl_info
)
7108 pqi_change_irq_mode(ctrl_info
, IRQ_MODE_NONE
);
7109 rc
= pqi_reset(ctrl_info
);
7112 rc
= sis_reenable_sis_mode(ctrl_info
);
7114 dev_err(&ctrl_info
->pci_dev
->dev
,
7115 "re-enabling SIS mode failed with error %d\n", rc
);
7118 pqi_save_ctrl_mode(ctrl_info
, SIS_MODE
);
7124 * If the controller isn't already in SIS mode, this function forces it into
7128 static int pqi_force_sis_mode(struct pqi_ctrl_info
*ctrl_info
)
7130 if (!sis_is_firmware_running(ctrl_info
))
7133 if (pqi_get_ctrl_mode(ctrl_info
) == SIS_MODE
)
7136 if (sis_is_kernel_up(ctrl_info
)) {
7137 pqi_save_ctrl_mode(ctrl_info
, SIS_MODE
);
7141 return pqi_revert_to_sis_mode(ctrl_info
);
7144 #define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000
7146 static int pqi_ctrl_init(struct pqi_ctrl_info
*ctrl_info
)
7150 if (reset_devices
) {
7151 sis_soft_reset(ctrl_info
);
7152 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY
);
7154 rc
= pqi_force_sis_mode(ctrl_info
);
7160 * Wait until the controller is ready to start accepting SIS
7163 rc
= sis_wait_for_ctrl_ready(ctrl_info
);
7168 * Get the controller properties. This allows us to determine
7169 * whether or not it supports PQI mode.
7171 rc
= sis_get_ctrl_properties(ctrl_info
);
7173 dev_err(&ctrl_info
->pci_dev
->dev
,
7174 "error obtaining controller properties\n");
7178 rc
= sis_get_pqi_capabilities(ctrl_info
);
7180 dev_err(&ctrl_info
->pci_dev
->dev
,
7181 "error obtaining controller capabilities\n");
7185 if (reset_devices
) {
7186 if (ctrl_info
->max_outstanding_requests
>
7187 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP
)
7188 ctrl_info
->max_outstanding_requests
=
7189 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP
;
7191 if (ctrl_info
->max_outstanding_requests
>
7192 PQI_MAX_OUTSTANDING_REQUESTS
)
7193 ctrl_info
->max_outstanding_requests
=
7194 PQI_MAX_OUTSTANDING_REQUESTS
;
7197 pqi_calculate_io_resources(ctrl_info
);
7199 rc
= pqi_alloc_error_buffer(ctrl_info
);
7201 dev_err(&ctrl_info
->pci_dev
->dev
,
7202 "failed to allocate PQI error buffer\n");
7207 * If the function we are about to call succeeds, the
7208 * controller will transition from legacy SIS mode
7211 rc
= sis_init_base_struct_addr(ctrl_info
);
7213 dev_err(&ctrl_info
->pci_dev
->dev
,
7214 "error initializing PQI mode\n");
7218 /* Wait for the controller to complete the SIS -> PQI transition. */
7219 rc
= pqi_wait_for_pqi_mode_ready(ctrl_info
);
7221 dev_err(&ctrl_info
->pci_dev
->dev
,
7222 "transition to PQI mode failed\n");
7226 /* From here on, we are running in PQI mode. */
7227 ctrl_info
->pqi_mode_enabled
= true;
7228 pqi_save_ctrl_mode(ctrl_info
, PQI_MODE
);
7230 rc
= pqi_alloc_admin_queues(ctrl_info
);
7232 dev_err(&ctrl_info
->pci_dev
->dev
,
7233 "failed to allocate admin queues\n");
7237 rc
= pqi_create_admin_queues(ctrl_info
);
7239 dev_err(&ctrl_info
->pci_dev
->dev
,
7240 "error creating admin queues\n");
7244 rc
= pqi_report_device_capability(ctrl_info
);
7246 dev_err(&ctrl_info
->pci_dev
->dev
,
7247 "obtaining device capability failed\n");
7251 rc
= pqi_validate_device_capability(ctrl_info
);
7255 pqi_calculate_queue_resources(ctrl_info
);
7257 rc
= pqi_enable_msix_interrupts(ctrl_info
);
7261 if (ctrl_info
->num_msix_vectors_enabled
< ctrl_info
->num_queue_groups
) {
7262 ctrl_info
->max_msix_vectors
=
7263 ctrl_info
->num_msix_vectors_enabled
;
7264 pqi_calculate_queue_resources(ctrl_info
);
7267 rc
= pqi_alloc_io_resources(ctrl_info
);
7271 rc
= pqi_alloc_operational_queues(ctrl_info
);
7273 dev_err(&ctrl_info
->pci_dev
->dev
,
7274 "failed to allocate operational queues\n");
7278 pqi_init_operational_queues(ctrl_info
);
7280 rc
= pqi_request_irqs(ctrl_info
);
7284 rc
= pqi_create_queues(ctrl_info
);
7288 pqi_change_irq_mode(ctrl_info
, IRQ_MODE_MSIX
);
7290 ctrl_info
->controller_online
= true;
7292 rc
= pqi_process_config_table(ctrl_info
);
7296 pqi_start_heartbeat_timer(ctrl_info
);
7298 rc
= pqi_enable_events(ctrl_info
);
7300 dev_err(&ctrl_info
->pci_dev
->dev
,
7301 "error enabling events\n");
7305 /* Register with the SCSI subsystem. */
7306 rc
= pqi_register_scsi(ctrl_info
);
7310 rc
= pqi_get_ctrl_product_details(ctrl_info
);
7312 dev_err(&ctrl_info
->pci_dev
->dev
,
7313 "error obtaining product details\n");
7317 rc
= pqi_get_ctrl_serial_number(ctrl_info
);
7319 dev_err(&ctrl_info
->pci_dev
->dev
,
7320 "error obtaining ctrl serial number\n");
7324 rc
= pqi_set_diag_rescan(ctrl_info
);
7326 dev_err(&ctrl_info
->pci_dev
->dev
,
7327 "error enabling multi-lun rescan\n");
7331 rc
= pqi_write_driver_version_to_host_wellness(ctrl_info
);
7333 dev_err(&ctrl_info
->pci_dev
->dev
,
7334 "error updating host wellness\n");
7338 pqi_schedule_update_time_worker(ctrl_info
);
7340 pqi_scan_scsi_devices(ctrl_info
);
7345 static void pqi_reinit_queues(struct pqi_ctrl_info
*ctrl_info
)
7348 struct pqi_admin_queues
*admin_queues
;
7349 struct pqi_event_queue
*event_queue
;
7351 admin_queues
= &ctrl_info
->admin_queues
;
7352 admin_queues
->iq_pi_copy
= 0;
7353 admin_queues
->oq_ci_copy
= 0;
7354 writel(0, admin_queues
->oq_pi
);
7356 for (i
= 0; i
< ctrl_info
->num_queue_groups
; i
++) {
7357 ctrl_info
->queue_groups
[i
].iq_pi_copy
[RAID_PATH
] = 0;
7358 ctrl_info
->queue_groups
[i
].iq_pi_copy
[AIO_PATH
] = 0;
7359 ctrl_info
->queue_groups
[i
].oq_ci_copy
= 0;
7361 writel(0, ctrl_info
->queue_groups
[i
].iq_ci
[RAID_PATH
]);
7362 writel(0, ctrl_info
->queue_groups
[i
].iq_ci
[AIO_PATH
]);
7363 writel(0, ctrl_info
->queue_groups
[i
].oq_pi
);
7366 event_queue
= &ctrl_info
->event_queue
;
7367 writel(0, event_queue
->oq_pi
);
7368 event_queue
->oq_ci_copy
= 0;
7371 static int pqi_ctrl_init_resume(struct pqi_ctrl_info
*ctrl_info
)
7375 rc
= pqi_force_sis_mode(ctrl_info
);
7380 * Wait until the controller is ready to start accepting SIS
7383 rc
= sis_wait_for_ctrl_ready_resume(ctrl_info
);
7388 * Get the controller properties. This allows us to determine
7389 * whether or not it supports PQI mode.
7391 rc
= sis_get_ctrl_properties(ctrl_info
);
7393 dev_err(&ctrl_info
->pci_dev
->dev
,
7394 "error obtaining controller properties\n");
7398 rc
= sis_get_pqi_capabilities(ctrl_info
);
7400 dev_err(&ctrl_info
->pci_dev
->dev
,
7401 "error obtaining controller capabilities\n");
7406 * If the function we are about to call succeeds, the
7407 * controller will transition from legacy SIS mode
7410 rc
= sis_init_base_struct_addr(ctrl_info
);
7412 dev_err(&ctrl_info
->pci_dev
->dev
,
7413 "error initializing PQI mode\n");
7417 /* Wait for the controller to complete the SIS -> PQI transition. */
7418 rc
= pqi_wait_for_pqi_mode_ready(ctrl_info
);
7420 dev_err(&ctrl_info
->pci_dev
->dev
,
7421 "transition to PQI mode failed\n");
7425 /* From here on, we are running in PQI mode. */
7426 ctrl_info
->pqi_mode_enabled
= true;
7427 pqi_save_ctrl_mode(ctrl_info
, PQI_MODE
);
7429 pqi_reinit_queues(ctrl_info
);
7431 rc
= pqi_create_admin_queues(ctrl_info
);
7433 dev_err(&ctrl_info
->pci_dev
->dev
,
7434 "error creating admin queues\n");
7438 rc
= pqi_create_queues(ctrl_info
);
7442 pqi_change_irq_mode(ctrl_info
, IRQ_MODE_MSIX
);
7444 ctrl_info
->controller_online
= true;
7445 pqi_ctrl_unblock_requests(ctrl_info
);
7447 rc
= pqi_process_config_table(ctrl_info
);
7451 pqi_start_heartbeat_timer(ctrl_info
);
7453 rc
= pqi_enable_events(ctrl_info
);
7455 dev_err(&ctrl_info
->pci_dev
->dev
,
7456 "error enabling events\n");
7460 rc
= pqi_get_ctrl_product_details(ctrl_info
);
7462 dev_err(&ctrl_info
->pci_dev
->dev
,
7463 "error obtaining product details\n");
7467 rc
= pqi_set_diag_rescan(ctrl_info
);
7469 dev_err(&ctrl_info
->pci_dev
->dev
,
7470 "error enabling multi-lun rescan\n");
7474 rc
= pqi_write_driver_version_to_host_wellness(ctrl_info
);
7476 dev_err(&ctrl_info
->pci_dev
->dev
,
7477 "error updating host wellness\n");
7481 pqi_schedule_update_time_worker(ctrl_info
);
7483 pqi_scan_scsi_devices(ctrl_info
);
7488 static inline int pqi_set_pcie_completion_timeout(struct pci_dev
*pci_dev
,
7493 rc
= pcie_capability_clear_and_set_word(pci_dev
, PCI_EXP_DEVCTL2
,
7494 PCI_EXP_DEVCTL2_COMP_TIMEOUT
, timeout
);
7496 return pcibios_err_to_errno(rc
);
7499 static int pqi_pci_init(struct pqi_ctrl_info
*ctrl_info
)
7504 rc
= pci_enable_device(ctrl_info
->pci_dev
);
7506 dev_err(&ctrl_info
->pci_dev
->dev
,
7507 "failed to enable PCI device\n");
7511 if (sizeof(dma_addr_t
) > 4)
7512 mask
= DMA_BIT_MASK(64);
7514 mask
= DMA_BIT_MASK(32);
7516 rc
= dma_set_mask_and_coherent(&ctrl_info
->pci_dev
->dev
, mask
);
7518 dev_err(&ctrl_info
->pci_dev
->dev
, "failed to set DMA mask\n");
7519 goto disable_device
;
7522 rc
= pci_request_regions(ctrl_info
->pci_dev
, DRIVER_NAME_SHORT
);
7524 dev_err(&ctrl_info
->pci_dev
->dev
,
7525 "failed to obtain PCI resources\n");
7526 goto disable_device
;
7529 ctrl_info
->iomem_base
= ioremap(pci_resource_start(
7530 ctrl_info
->pci_dev
, 0),
7531 sizeof(struct pqi_ctrl_registers
));
7532 if (!ctrl_info
->iomem_base
) {
7533 dev_err(&ctrl_info
->pci_dev
->dev
,
7534 "failed to map memory for controller registers\n");
7536 goto release_regions
;
7539 #define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
7541 /* Increase the PCIe completion timeout. */
7542 rc
= pqi_set_pcie_completion_timeout(ctrl_info
->pci_dev
,
7543 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS
);
7545 dev_err(&ctrl_info
->pci_dev
->dev
,
7546 "failed to set PCIe completion timeout\n");
7547 goto release_regions
;
7550 /* Enable bus mastering. */
7551 pci_set_master(ctrl_info
->pci_dev
);
7553 ctrl_info
->registers
= ctrl_info
->iomem_base
;
7554 ctrl_info
->pqi_registers
= &ctrl_info
->registers
->pqi_registers
;
7556 pci_set_drvdata(ctrl_info
->pci_dev
, ctrl_info
);
7561 pci_release_regions(ctrl_info
->pci_dev
);
7563 pci_disable_device(ctrl_info
->pci_dev
);
7568 static void pqi_cleanup_pci_init(struct pqi_ctrl_info
*ctrl_info
)
7570 iounmap(ctrl_info
->iomem_base
);
7571 pci_release_regions(ctrl_info
->pci_dev
);
7572 if (pci_is_enabled(ctrl_info
->pci_dev
))
7573 pci_disable_device(ctrl_info
->pci_dev
);
7574 pci_set_drvdata(ctrl_info
->pci_dev
, NULL
);
7577 static struct pqi_ctrl_info
*pqi_alloc_ctrl_info(int numa_node
)
7579 struct pqi_ctrl_info
*ctrl_info
;
7581 ctrl_info
= kzalloc_node(sizeof(struct pqi_ctrl_info
),
7582 GFP_KERNEL
, numa_node
);
7586 mutex_init(&ctrl_info
->scan_mutex
);
7587 mutex_init(&ctrl_info
->lun_reset_mutex
);
7588 mutex_init(&ctrl_info
->ofa_mutex
);
7590 INIT_LIST_HEAD(&ctrl_info
->scsi_device_list
);
7591 spin_lock_init(&ctrl_info
->scsi_device_list_lock
);
7593 INIT_WORK(&ctrl_info
->event_work
, pqi_event_worker
);
7594 atomic_set(&ctrl_info
->num_interrupts
, 0);
7595 atomic_set(&ctrl_info
->sync_cmds_outstanding
, 0);
7597 INIT_DELAYED_WORK(&ctrl_info
->rescan_work
, pqi_rescan_worker
);
7598 INIT_DELAYED_WORK(&ctrl_info
->update_time_work
, pqi_update_time_worker
);
7600 timer_setup(&ctrl_info
->heartbeat_timer
, pqi_heartbeat_timer_handler
, 0);
7601 INIT_WORK(&ctrl_info
->ctrl_offline_work
, pqi_ctrl_offline_worker
);
7603 sema_init(&ctrl_info
->sync_request_sem
,
7604 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS
);
7605 init_waitqueue_head(&ctrl_info
->block_requests_wait
);
7607 INIT_LIST_HEAD(&ctrl_info
->raid_bypass_retry_list
);
7608 spin_lock_init(&ctrl_info
->raid_bypass_retry_list_lock
);
7609 INIT_WORK(&ctrl_info
->raid_bypass_retry_work
,
7610 pqi_raid_bypass_retry_worker
);
7612 ctrl_info
->ctrl_id
= atomic_inc_return(&pqi_controller_count
) - 1;
7613 ctrl_info
->irq_mode
= IRQ_MODE_NONE
;
7614 ctrl_info
->max_msix_vectors
= PQI_MAX_MSIX_VECTORS
;
7619 static inline void pqi_free_ctrl_info(struct pqi_ctrl_info
*ctrl_info
)
7624 static void pqi_free_interrupts(struct pqi_ctrl_info
*ctrl_info
)
7626 pqi_free_irqs(ctrl_info
);
7627 pqi_disable_msix_interrupts(ctrl_info
);
7630 static void pqi_free_ctrl_resources(struct pqi_ctrl_info
*ctrl_info
)
7632 pqi_stop_heartbeat_timer(ctrl_info
);
7633 pqi_free_interrupts(ctrl_info
);
7634 if (ctrl_info
->queue_memory_base
)
7635 dma_free_coherent(&ctrl_info
->pci_dev
->dev
,
7636 ctrl_info
->queue_memory_length
,
7637 ctrl_info
->queue_memory_base
,
7638 ctrl_info
->queue_memory_base_dma_handle
);
7639 if (ctrl_info
->admin_queue_memory_base
)
7640 dma_free_coherent(&ctrl_info
->pci_dev
->dev
,
7641 ctrl_info
->admin_queue_memory_length
,
7642 ctrl_info
->admin_queue_memory_base
,
7643 ctrl_info
->admin_queue_memory_base_dma_handle
);
7644 pqi_free_all_io_requests(ctrl_info
);
7645 if (ctrl_info
->error_buffer
)
7646 dma_free_coherent(&ctrl_info
->pci_dev
->dev
,
7647 ctrl_info
->error_buffer_length
,
7648 ctrl_info
->error_buffer
,
7649 ctrl_info
->error_buffer_dma_handle
);
7650 if (ctrl_info
->iomem_base
)
7651 pqi_cleanup_pci_init(ctrl_info
);
7652 pqi_free_ctrl_info(ctrl_info
);
7655 static void pqi_remove_ctrl(struct pqi_ctrl_info
*ctrl_info
)
7657 pqi_cancel_rescan_worker(ctrl_info
);
7658 pqi_cancel_update_time_worker(ctrl_info
);
7659 pqi_unregister_scsi(ctrl_info
);
7660 if (ctrl_info
->pqi_mode_enabled
)
7661 pqi_revert_to_sis_mode(ctrl_info
);
7662 pqi_free_ctrl_resources(ctrl_info
);
7665 static void pqi_ofa_ctrl_quiesce(struct pqi_ctrl_info
*ctrl_info
)
7667 pqi_cancel_update_time_worker(ctrl_info
);
7668 pqi_cancel_rescan_worker(ctrl_info
);
7669 pqi_wait_until_lun_reset_finished(ctrl_info
);
7670 pqi_wait_until_scan_finished(ctrl_info
);
7671 pqi_ctrl_ofa_start(ctrl_info
);
7672 pqi_ctrl_block_requests(ctrl_info
);
7673 pqi_ctrl_wait_until_quiesced(ctrl_info
);
7674 pqi_ctrl_wait_for_pending_io(ctrl_info
, PQI_PENDING_IO_TIMEOUT_SECS
);
7675 pqi_fail_io_queued_for_all_devices(ctrl_info
);
7676 pqi_wait_until_inbound_queues_empty(ctrl_info
);
7677 pqi_stop_heartbeat_timer(ctrl_info
);
7678 ctrl_info
->pqi_mode_enabled
= false;
7679 pqi_save_ctrl_mode(ctrl_info
, SIS_MODE
);
7682 static void pqi_ofa_ctrl_unquiesce(struct pqi_ctrl_info
*ctrl_info
)
7684 pqi_ofa_free_host_buffer(ctrl_info
);
7685 ctrl_info
->pqi_mode_enabled
= true;
7686 pqi_save_ctrl_mode(ctrl_info
, PQI_MODE
);
7687 ctrl_info
->controller_online
= true;
7688 pqi_ctrl_unblock_requests(ctrl_info
);
7689 pqi_start_heartbeat_timer(ctrl_info
);
7690 pqi_schedule_update_time_worker(ctrl_info
);
7691 pqi_clear_soft_reset_status(ctrl_info
,
7692 PQI_SOFT_RESET_ABORT
);
7693 pqi_scan_scsi_devices(ctrl_info
);
7696 static int pqi_ofa_alloc_mem(struct pqi_ctrl_info
*ctrl_info
,
7697 u32 total_size
, u32 chunk_size
)
7702 struct pqi_sg_descriptor
*mem_descriptor
= NULL
;
7704 struct pqi_ofa_memory
*ofap
;
7706 dev
= &ctrl_info
->pci_dev
->dev
;
7708 sg_count
= (total_size
+ chunk_size
- 1);
7709 sg_count
/= chunk_size
;
7711 ofap
= ctrl_info
->pqi_ofa_mem_virt_addr
;
7713 if (sg_count
*chunk_size
< total_size
)
7716 ctrl_info
->pqi_ofa_chunk_virt_addr
=
7717 kcalloc(sg_count
, sizeof(void *), GFP_KERNEL
);
7718 if (!ctrl_info
->pqi_ofa_chunk_virt_addr
)
7721 for (size
= 0, i
= 0; size
< total_size
; size
+= chunk_size
, i
++) {
7722 dma_addr_t dma_handle
;
7724 ctrl_info
->pqi_ofa_chunk_virt_addr
[i
] =
7725 dma_alloc_coherent(dev
, chunk_size
, &dma_handle
,
7728 if (!ctrl_info
->pqi_ofa_chunk_virt_addr
[i
])
7731 mem_descriptor
= &ofap
->sg_descriptor
[i
];
7732 put_unaligned_le64 ((u64
) dma_handle
, &mem_descriptor
->address
);
7733 put_unaligned_le32 (chunk_size
, &mem_descriptor
->length
);
7736 if (!size
|| size
< total_size
)
7737 goto out_free_chunks
;
7739 put_unaligned_le32(CISS_SG_LAST
, &mem_descriptor
->flags
);
7740 put_unaligned_le16(sg_count
, &ofap
->num_memory_descriptors
);
7741 put_unaligned_le32(size
, &ofap
->bytes_allocated
);
7747 mem_descriptor
= &ofap
->sg_descriptor
[i
];
7748 dma_free_coherent(dev
, chunk_size
,
7749 ctrl_info
->pqi_ofa_chunk_virt_addr
[i
],
7750 get_unaligned_le64(&mem_descriptor
->address
));
7752 kfree(ctrl_info
->pqi_ofa_chunk_virt_addr
);
7755 put_unaligned_le32 (0, &ofap
->bytes_allocated
);
7759 static int pqi_ofa_alloc_host_buffer(struct pqi_ctrl_info
*ctrl_info
)
7765 total_size
= le32_to_cpu(
7766 ctrl_info
->pqi_ofa_mem_virt_addr
->bytes_allocated
);
7767 min_chunk_size
= total_size
/ PQI_OFA_MAX_SG_DESCRIPTORS
;
7769 for (chunk_sz
= total_size
; chunk_sz
>= min_chunk_size
; chunk_sz
/= 2)
7770 if (!pqi_ofa_alloc_mem(ctrl_info
, total_size
, chunk_sz
))
7776 static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info
*ctrl_info
,
7777 u32 bytes_requested
)
7779 struct pqi_ofa_memory
*pqi_ofa_memory
;
7782 dev
= &ctrl_info
->pci_dev
->dev
;
7783 pqi_ofa_memory
= dma_alloc_coherent(dev
,
7784 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH
,
7785 &ctrl_info
->pqi_ofa_mem_dma_handle
,
7788 if (!pqi_ofa_memory
)
7791 put_unaligned_le16(PQI_OFA_VERSION
, &pqi_ofa_memory
->version
);
7792 memcpy(&pqi_ofa_memory
->signature
, PQI_OFA_SIGNATURE
,
7793 sizeof(pqi_ofa_memory
->signature
));
7794 pqi_ofa_memory
->bytes_allocated
= cpu_to_le32(bytes_requested
);
7796 ctrl_info
->pqi_ofa_mem_virt_addr
= pqi_ofa_memory
;
7798 if (pqi_ofa_alloc_host_buffer(ctrl_info
) < 0) {
7799 dev_err(dev
, "Failed to allocate host buffer of size = %u",
7806 static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info
*ctrl_info
)
7809 struct pqi_sg_descriptor
*mem_descriptor
;
7810 struct pqi_ofa_memory
*ofap
;
7812 ofap
= ctrl_info
->pqi_ofa_mem_virt_addr
;
7817 if (!ofap
->bytes_allocated
)
7820 mem_descriptor
= ofap
->sg_descriptor
;
7822 for (i
= 0; i
< get_unaligned_le16(&ofap
->num_memory_descriptors
);
7824 dma_free_coherent(&ctrl_info
->pci_dev
->dev
,
7825 get_unaligned_le32(&mem_descriptor
[i
].length
),
7826 ctrl_info
->pqi_ofa_chunk_virt_addr
[i
],
7827 get_unaligned_le64(&mem_descriptor
[i
].address
));
7829 kfree(ctrl_info
->pqi_ofa_chunk_virt_addr
);
7832 dma_free_coherent(&ctrl_info
->pci_dev
->dev
,
7833 PQI_OFA_MEMORY_DESCRIPTOR_LENGTH
, ofap
,
7834 ctrl_info
->pqi_ofa_mem_dma_handle
);
7835 ctrl_info
->pqi_ofa_mem_virt_addr
= NULL
;
7838 static int pqi_ofa_host_memory_update(struct pqi_ctrl_info
*ctrl_info
)
7840 struct pqi_vendor_general_request request
;
7842 struct pqi_ofa_memory
*ofap
;
7844 memset(&request
, 0, sizeof(request
));
7846 ofap
= ctrl_info
->pqi_ofa_mem_virt_addr
;
7848 request
.header
.iu_type
= PQI_REQUEST_IU_VENDOR_GENERAL
;
7849 put_unaligned_le16(sizeof(request
) - PQI_REQUEST_HEADER_LENGTH
,
7850 &request
.header
.iu_length
);
7851 put_unaligned_le16(PQI_VENDOR_GENERAL_HOST_MEMORY_UPDATE
,
7852 &request
.function_code
);
7855 size
= offsetof(struct pqi_ofa_memory
, sg_descriptor
) +
7856 get_unaligned_le16(&ofap
->num_memory_descriptors
) *
7857 sizeof(struct pqi_sg_descriptor
);
7859 put_unaligned_le64((u64
)ctrl_info
->pqi_ofa_mem_dma_handle
,
7860 &request
.data
.ofa_memory_allocation
.buffer_address
);
7861 put_unaligned_le32(size
,
7862 &request
.data
.ofa_memory_allocation
.buffer_length
);
7866 return pqi_submit_raid_request_synchronous(ctrl_info
, &request
.header
,
7867 0, NULL
, NO_TIMEOUT
);
7870 static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info
*ctrl_info
)
7872 msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY
);
7873 return pqi_ctrl_init_resume(ctrl_info
);
7876 static void pqi_perform_lockup_action(void)
7878 switch (pqi_lockup_action
) {
7880 panic("FATAL: Smart Family Controller lockup detected");
7883 emergency_restart();
7891 static struct pqi_raid_error_info pqi_ctrl_offline_raid_error_info
= {
7892 .data_out_result
= PQI_DATA_IN_OUT_HARDWARE_ERROR
,
7893 .status
= SAM_STAT_CHECK_CONDITION
,
7896 static void pqi_fail_all_outstanding_requests(struct pqi_ctrl_info
*ctrl_info
)
7899 struct pqi_io_request
*io_request
;
7900 struct scsi_cmnd
*scmd
;
7902 for (i
= 0; i
< ctrl_info
->max_io_slots
; i
++) {
7903 io_request
= &ctrl_info
->io_request_pool
[i
];
7904 if (atomic_read(&io_request
->refcount
) == 0)
7907 scmd
= io_request
->scmd
;
7909 set_host_byte(scmd
, DID_NO_CONNECT
);
7911 io_request
->status
= -ENXIO
;
7912 io_request
->error_info
=
7913 &pqi_ctrl_offline_raid_error_info
;
7916 io_request
->io_complete_callback(io_request
,
7917 io_request
->context
);
7921 static void pqi_take_ctrl_offline_deferred(struct pqi_ctrl_info
*ctrl_info
)
7923 pqi_perform_lockup_action();
7924 pqi_stop_heartbeat_timer(ctrl_info
);
7925 pqi_free_interrupts(ctrl_info
);
7926 pqi_cancel_rescan_worker(ctrl_info
);
7927 pqi_cancel_update_time_worker(ctrl_info
);
7928 pqi_ctrl_wait_until_quiesced(ctrl_info
);
7929 pqi_fail_all_outstanding_requests(ctrl_info
);
7930 pqi_clear_all_queued_raid_bypass_retries(ctrl_info
);
7931 pqi_ctrl_unblock_requests(ctrl_info
);
7934 static void pqi_ctrl_offline_worker(struct work_struct
*work
)
7936 struct pqi_ctrl_info
*ctrl_info
;
7938 ctrl_info
= container_of(work
, struct pqi_ctrl_info
, ctrl_offline_work
);
7939 pqi_take_ctrl_offline_deferred(ctrl_info
);
7942 static void pqi_take_ctrl_offline(struct pqi_ctrl_info
*ctrl_info
)
7944 if (!ctrl_info
->controller_online
)
7947 ctrl_info
->controller_online
= false;
7948 ctrl_info
->pqi_mode_enabled
= false;
7949 pqi_ctrl_block_requests(ctrl_info
);
7950 if (!pqi_disable_ctrl_shutdown
)
7951 sis_shutdown_ctrl(ctrl_info
);
7952 pci_disable_device(ctrl_info
->pci_dev
);
7953 dev_err(&ctrl_info
->pci_dev
->dev
, "controller offline\n");
7954 schedule_work(&ctrl_info
->ctrl_offline_work
);
7957 static void pqi_print_ctrl_info(struct pci_dev
*pci_dev
,
7958 const struct pci_device_id
*id
)
7960 char *ctrl_description
;
7962 if (id
->driver_data
)
7963 ctrl_description
= (char *)id
->driver_data
;
7965 ctrl_description
= "Microsemi Smart Family Controller";
7967 dev_info(&pci_dev
->dev
, "%s found\n", ctrl_description
);
7970 static int pqi_pci_probe(struct pci_dev
*pci_dev
,
7971 const struct pci_device_id
*id
)
7975 struct pqi_ctrl_info
*ctrl_info
;
7977 pqi_print_ctrl_info(pci_dev
, id
);
7979 if (pqi_disable_device_id_wildcards
&&
7980 id
->subvendor
== PCI_ANY_ID
&&
7981 id
->subdevice
== PCI_ANY_ID
) {
7982 dev_warn(&pci_dev
->dev
,
7983 "controller not probed because device ID wildcards are disabled\n");
7987 if (id
->subvendor
== PCI_ANY_ID
|| id
->subdevice
== PCI_ANY_ID
)
7988 dev_warn(&pci_dev
->dev
,
7989 "controller device ID matched using wildcards\n");
7991 node
= dev_to_node(&pci_dev
->dev
);
7992 if (node
== NUMA_NO_NODE
) {
7993 cp_node
= cpu_to_node(0);
7994 if (cp_node
== NUMA_NO_NODE
)
7996 set_dev_node(&pci_dev
->dev
, cp_node
);
7999 ctrl_info
= pqi_alloc_ctrl_info(node
);
8001 dev_err(&pci_dev
->dev
,
8002 "failed to allocate controller info block\n");
8006 ctrl_info
->pci_dev
= pci_dev
;
8008 rc
= pqi_pci_init(ctrl_info
);
8012 rc
= pqi_ctrl_init(ctrl_info
);
8019 pqi_remove_ctrl(ctrl_info
);
8024 static void pqi_pci_remove(struct pci_dev
*pci_dev
)
8026 struct pqi_ctrl_info
*ctrl_info
;
8028 ctrl_info
= pci_get_drvdata(pci_dev
);
8032 pqi_remove_ctrl(ctrl_info
);
8035 static void pqi_crash_if_pending_command(struct pqi_ctrl_info
*ctrl_info
)
8038 struct pqi_io_request
*io_request
;
8039 struct scsi_cmnd
*scmd
;
8041 for (i
= 0; i
< ctrl_info
->max_io_slots
; i
++) {
8042 io_request
= &ctrl_info
->io_request_pool
[i
];
8043 if (atomic_read(&io_request
->refcount
) == 0)
8045 scmd
= io_request
->scmd
;
8046 WARN_ON(scmd
!= NULL
); /* IO command from SML */
8047 WARN_ON(scmd
== NULL
); /* Non-IO cmd or driver initiated*/
8051 static void pqi_shutdown(struct pci_dev
*pci_dev
)
8054 struct pqi_ctrl_info
*ctrl_info
;
8056 ctrl_info
= pci_get_drvdata(pci_dev
);
8058 dev_err(&pci_dev
->dev
,
8059 "cache could not be flushed\n");
8063 pqi_disable_events(ctrl_info
);
8064 pqi_wait_until_ofa_finished(ctrl_info
);
8065 pqi_cancel_update_time_worker(ctrl_info
);
8066 pqi_cancel_rescan_worker(ctrl_info
);
8067 pqi_cancel_event_worker(ctrl_info
);
8069 pqi_ctrl_shutdown_start(ctrl_info
);
8070 pqi_ctrl_wait_until_quiesced(ctrl_info
);
8072 rc
= pqi_ctrl_wait_for_pending_io(ctrl_info
, NO_TIMEOUT
);
8074 dev_err(&pci_dev
->dev
,
8075 "wait for pending I/O failed\n");
8079 pqi_ctrl_block_device_reset(ctrl_info
);
8080 pqi_wait_until_lun_reset_finished(ctrl_info
);
8083 * Write all data in the controller's battery-backed cache to
8086 rc
= pqi_flush_cache(ctrl_info
, SHUTDOWN
);
8088 dev_err(&pci_dev
->dev
,
8089 "unable to flush controller cache\n");
8091 pqi_ctrl_block_requests(ctrl_info
);
8093 rc
= pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info
);
8095 dev_err(&pci_dev
->dev
,
8096 "wait for pending sync cmds failed\n");
8100 pqi_crash_if_pending_command(ctrl_info
);
8101 pqi_reset(ctrl_info
);
8104 static void pqi_process_lockup_action_param(void)
8108 if (!pqi_lockup_action_param
)
8111 for (i
= 0; i
< ARRAY_SIZE(pqi_lockup_actions
); i
++) {
8112 if (strcmp(pqi_lockup_action_param
,
8113 pqi_lockup_actions
[i
].name
) == 0) {
8114 pqi_lockup_action
= pqi_lockup_actions
[i
].action
;
8119 pr_warn("%s: invalid lockup action setting \"%s\" - supported settings: none, reboot, panic\n",
8120 DRIVER_NAME_SHORT
, pqi_lockup_action_param
);
8123 static void pqi_process_module_params(void)
8125 pqi_process_lockup_action_param();
8128 static __maybe_unused
int pqi_suspend(struct pci_dev
*pci_dev
, pm_message_t state
)
8130 struct pqi_ctrl_info
*ctrl_info
;
8132 ctrl_info
= pci_get_drvdata(pci_dev
);
8134 pqi_disable_events(ctrl_info
);
8135 pqi_cancel_update_time_worker(ctrl_info
);
8136 pqi_cancel_rescan_worker(ctrl_info
);
8137 pqi_wait_until_scan_finished(ctrl_info
);
8138 pqi_wait_until_lun_reset_finished(ctrl_info
);
8139 pqi_wait_until_ofa_finished(ctrl_info
);
8140 pqi_flush_cache(ctrl_info
, SUSPEND
);
8141 pqi_ctrl_block_requests(ctrl_info
);
8142 pqi_ctrl_wait_until_quiesced(ctrl_info
);
8143 pqi_wait_until_inbound_queues_empty(ctrl_info
);
8144 pqi_ctrl_wait_for_pending_io(ctrl_info
, NO_TIMEOUT
);
8145 pqi_stop_heartbeat_timer(ctrl_info
);
8147 if (state
.event
== PM_EVENT_FREEZE
)
8150 pci_save_state(pci_dev
);
8151 pci_set_power_state(pci_dev
, pci_choose_state(pci_dev
, state
));
8153 ctrl_info
->controller_online
= false;
8154 ctrl_info
->pqi_mode_enabled
= false;
8159 static __maybe_unused
int pqi_resume(struct pci_dev
*pci_dev
)
8162 struct pqi_ctrl_info
*ctrl_info
;
8164 ctrl_info
= pci_get_drvdata(pci_dev
);
8166 if (pci_dev
->current_state
!= PCI_D0
) {
8167 ctrl_info
->max_hw_queue_index
= 0;
8168 pqi_free_interrupts(ctrl_info
);
8169 pqi_change_irq_mode(ctrl_info
, IRQ_MODE_INTX
);
8170 rc
= request_irq(pci_irq_vector(pci_dev
, 0), pqi_irq_handler
,
8171 IRQF_SHARED
, DRIVER_NAME_SHORT
,
8172 &ctrl_info
->queue_groups
[0]);
8174 dev_err(&ctrl_info
->pci_dev
->dev
,
8175 "irq %u init failed with error %d\n",
8179 pqi_start_heartbeat_timer(ctrl_info
);
8180 pqi_ctrl_unblock_requests(ctrl_info
);
8184 pci_set_power_state(pci_dev
, PCI_D0
);
8185 pci_restore_state(pci_dev
);
8187 return pqi_ctrl_init_resume(ctrl_info
);
8190 /* Define the PCI IDs for the controllers that we support. */
8191 static const struct pci_device_id pqi_pci_id_table
[] = {
8193 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8197 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8201 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8205 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8209 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8213 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8217 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8221 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8225 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8229 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8233 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8237 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8241 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8245 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8249 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8253 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8257 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8261 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8265 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8269 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8273 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8277 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8281 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8285 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8289 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8293 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8297 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8301 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8305 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8309 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8313 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8317 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8318 PCI_VENDOR_ID_ADAPTEC2
, 0x0110)
8321 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8322 PCI_VENDOR_ID_ADAPTEC2
, 0x0608)
8325 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8326 PCI_VENDOR_ID_ADAPTEC2
, 0x0800)
8329 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8330 PCI_VENDOR_ID_ADAPTEC2
, 0x0801)
8333 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8334 PCI_VENDOR_ID_ADAPTEC2
, 0x0802)
8337 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8338 PCI_VENDOR_ID_ADAPTEC2
, 0x0803)
8341 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8342 PCI_VENDOR_ID_ADAPTEC2
, 0x0804)
8345 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8346 PCI_VENDOR_ID_ADAPTEC2
, 0x0805)
8349 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8350 PCI_VENDOR_ID_ADAPTEC2
, 0x0806)
8353 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8354 PCI_VENDOR_ID_ADAPTEC2
, 0x0807)
8357 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8358 PCI_VENDOR_ID_ADAPTEC2
, 0x0808)
8361 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8362 PCI_VENDOR_ID_ADAPTEC2
, 0x0809)
8365 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8366 PCI_VENDOR_ID_ADAPTEC2
, 0x080a)
8369 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8370 PCI_VENDOR_ID_ADAPTEC2
, 0x0900)
8373 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8374 PCI_VENDOR_ID_ADAPTEC2
, 0x0901)
8377 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8378 PCI_VENDOR_ID_ADAPTEC2
, 0x0902)
8381 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8382 PCI_VENDOR_ID_ADAPTEC2
, 0x0903)
8385 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8386 PCI_VENDOR_ID_ADAPTEC2
, 0x0904)
8389 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8390 PCI_VENDOR_ID_ADAPTEC2
, 0x0905)
8393 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8394 PCI_VENDOR_ID_ADAPTEC2
, 0x0906)
8397 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8398 PCI_VENDOR_ID_ADAPTEC2
, 0x0907)
8401 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8402 PCI_VENDOR_ID_ADAPTEC2
, 0x0908)
8405 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8406 PCI_VENDOR_ID_ADAPTEC2
, 0x090a)
8409 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8410 PCI_VENDOR_ID_ADAPTEC2
, 0x1200)
8413 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8414 PCI_VENDOR_ID_ADAPTEC2
, 0x1201)
8417 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8418 PCI_VENDOR_ID_ADAPTEC2
, 0x1202)
8421 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8422 PCI_VENDOR_ID_ADAPTEC2
, 0x1280)
8425 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8426 PCI_VENDOR_ID_ADAPTEC2
, 0x1281)
8429 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8430 PCI_VENDOR_ID_ADAPTEC2
, 0x1282)
8433 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8434 PCI_VENDOR_ID_ADAPTEC2
, 0x1300)
8437 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8438 PCI_VENDOR_ID_ADAPTEC2
, 0x1301)
8441 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8442 PCI_VENDOR_ID_ADAPTEC2
, 0x1302)
8445 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8446 PCI_VENDOR_ID_ADAPTEC2
, 0x1303)
8449 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8450 PCI_VENDOR_ID_ADAPTEC2
, 0x1380)
8453 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8454 PCI_VENDOR_ID_ADVANTECH
, 0x8312)
8457 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8458 PCI_VENDOR_ID_DELL
, 0x1fe0)
8461 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8462 PCI_VENDOR_ID_HP
, 0x0600)
8465 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8466 PCI_VENDOR_ID_HP
, 0x0601)
8469 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8470 PCI_VENDOR_ID_HP
, 0x0602)
8473 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8474 PCI_VENDOR_ID_HP
, 0x0603)
8477 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8478 PCI_VENDOR_ID_HP
, 0x0609)
8481 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8482 PCI_VENDOR_ID_HP
, 0x0650)
8485 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8486 PCI_VENDOR_ID_HP
, 0x0651)
8489 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8490 PCI_VENDOR_ID_HP
, 0x0652)
8493 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8494 PCI_VENDOR_ID_HP
, 0x0653)
8497 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8498 PCI_VENDOR_ID_HP
, 0x0654)
8501 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8502 PCI_VENDOR_ID_HP
, 0x0655)
8505 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8506 PCI_VENDOR_ID_HP
, 0x0700)
8509 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8510 PCI_VENDOR_ID_HP
, 0x0701)
8513 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8514 PCI_VENDOR_ID_HP
, 0x1001)
8517 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8518 PCI_VENDOR_ID_HP
, 0x1100)
8521 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8522 PCI_VENDOR_ID_HP
, 0x1101)
8525 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8529 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8533 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8537 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8541 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8542 PCI_VENDOR_ID_GIGABYTE
, 0x1000)
8545 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2
, 0x028f,
8546 PCI_ANY_ID
, PCI_ANY_ID
)
8551 MODULE_DEVICE_TABLE(pci
, pqi_pci_id_table
);
8553 static struct pci_driver pqi_pci_driver
= {
8554 .name
= DRIVER_NAME_SHORT
,
8555 .id_table
= pqi_pci_id_table
,
8556 .probe
= pqi_pci_probe
,
8557 .remove
= pqi_pci_remove
,
8558 .shutdown
= pqi_shutdown
,
8559 #if defined(CONFIG_PM)
8560 .suspend
= pqi_suspend
,
8561 .resume
= pqi_resume
,
8565 static int __init
pqi_init(void)
8569 pr_info(DRIVER_NAME
"\n");
8571 pqi_sas_transport_template
= sas_attach_transport(&pqi_sas_transport_functions
);
8572 if (!pqi_sas_transport_template
)
8575 pqi_process_module_params();
8577 rc
= pci_register_driver(&pqi_pci_driver
);
8579 sas_release_transport(pqi_sas_transport_template
);
8584 static void __exit
pqi_cleanup(void)
8586 pci_unregister_driver(&pqi_pci_driver
);
8587 sas_release_transport(pqi_sas_transport_template
);
8590 module_init(pqi_init
);
8591 module_exit(pqi_cleanup
);
8593 static void __attribute__((unused
)) verify_structures(void)
8595 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
8596 sis_host_to_ctrl_doorbell
) != 0x20);
8597 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
8598 sis_interrupt_mask
) != 0x34);
8599 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
8600 sis_ctrl_to_host_doorbell
) != 0x9c);
8601 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
8602 sis_ctrl_to_host_doorbell_clear
) != 0xa0);
8603 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
8604 sis_driver_scratch
) != 0xb0);
8605 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
8606 sis_firmware_status
) != 0xbc);
8607 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
8608 sis_mailbox
) != 0x1000);
8609 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers
,
8610 pqi_registers
) != 0x4000);
8612 BUILD_BUG_ON(offsetof(struct pqi_iu_header
,
8614 BUILD_BUG_ON(offsetof(struct pqi_iu_header
,
8616 BUILD_BUG_ON(offsetof(struct pqi_iu_header
,
8617 response_queue_id
) != 0x4);
8618 BUILD_BUG_ON(offsetof(struct pqi_iu_header
,
8620 BUILD_BUG_ON(sizeof(struct pqi_iu_header
) != 0x8);
8622 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
8624 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
8625 service_response
) != 0x1);
8626 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
8627 data_present
) != 0x2);
8628 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
8630 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
8631 residual_count
) != 0x4);
8632 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
8633 data_length
) != 0x8);
8634 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
8636 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info
,
8638 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info
) != 0x10c);
8640 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
8641 data_in_result
) != 0x0);
8642 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
8643 data_out_result
) != 0x1);
8644 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
8646 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
8648 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
8649 status_qualifier
) != 0x6);
8650 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
8651 sense_data_length
) != 0x8);
8652 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
8653 response_data_length
) != 0xa);
8654 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
8655 data_in_transferred
) != 0xc);
8656 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
8657 data_out_transferred
) != 0x10);
8658 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info
,
8660 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info
) != 0x114);
8662 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8664 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8665 function_and_status_code
) != 0x8);
8666 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8667 max_admin_iq_elements
) != 0x10);
8668 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8669 max_admin_oq_elements
) != 0x11);
8670 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8671 admin_iq_element_length
) != 0x12);
8672 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8673 admin_oq_element_length
) != 0x13);
8674 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8675 max_reset_timeout
) != 0x14);
8676 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8677 legacy_intx_status
) != 0x18);
8678 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8679 legacy_intx_mask_set
) != 0x1c);
8680 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8681 legacy_intx_mask_clear
) != 0x20);
8682 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8683 device_status
) != 0x40);
8684 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8685 admin_iq_pi_offset
) != 0x48);
8686 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8687 admin_oq_ci_offset
) != 0x50);
8688 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8689 admin_iq_element_array_addr
) != 0x58);
8690 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8691 admin_oq_element_array_addr
) != 0x60);
8692 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8693 admin_iq_ci_addr
) != 0x68);
8694 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8695 admin_oq_pi_addr
) != 0x70);
8696 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8697 admin_iq_num_elements
) != 0x78);
8698 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8699 admin_oq_num_elements
) != 0x79);
8700 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8701 admin_queue_int_msg_num
) != 0x7a);
8702 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8703 device_error
) != 0x80);
8704 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8705 error_details
) != 0x88);
8706 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8707 device_reset
) != 0x90);
8708 BUILD_BUG_ON(offsetof(struct pqi_device_registers
,
8709 power_action
) != 0x94);
8710 BUILD_BUG_ON(sizeof(struct pqi_device_registers
) != 0x100);
8712 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8713 header
.iu_type
) != 0);
8714 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8715 header
.iu_length
) != 2);
8716 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8717 header
.work_area
) != 6);
8718 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8720 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8721 function_code
) != 10);
8722 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8723 data
.report_device_capability
.buffer_length
) != 44);
8724 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8725 data
.report_device_capability
.sg_descriptor
) != 48);
8726 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8727 data
.create_operational_iq
.queue_id
) != 12);
8728 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8729 data
.create_operational_iq
.element_array_addr
) != 16);
8730 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8731 data
.create_operational_iq
.ci_addr
) != 24);
8732 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8733 data
.create_operational_iq
.num_elements
) != 32);
8734 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8735 data
.create_operational_iq
.element_length
) != 34);
8736 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8737 data
.create_operational_iq
.queue_protocol
) != 36);
8738 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8739 data
.create_operational_oq
.queue_id
) != 12);
8740 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8741 data
.create_operational_oq
.element_array_addr
) != 16);
8742 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8743 data
.create_operational_oq
.pi_addr
) != 24);
8744 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8745 data
.create_operational_oq
.num_elements
) != 32);
8746 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8747 data
.create_operational_oq
.element_length
) != 34);
8748 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8749 data
.create_operational_oq
.queue_protocol
) != 36);
8750 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8751 data
.create_operational_oq
.int_msg_num
) != 40);
8752 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8753 data
.create_operational_oq
.coalescing_count
) != 42);
8754 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8755 data
.create_operational_oq
.min_coalescing_time
) != 44);
8756 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8757 data
.create_operational_oq
.max_coalescing_time
) != 48);
8758 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request
,
8759 data
.delete_operational_queue
.queue_id
) != 12);
8760 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request
) != 64);
8761 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request
,
8762 data
.create_operational_iq
) != 64 - 11);
8763 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request
,
8764 data
.create_operational_oq
) != 64 - 11);
8765 BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request
,
8766 data
.delete_operational_queue
) != 64 - 11);
8768 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
8769 header
.iu_type
) != 0);
8770 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
8771 header
.iu_length
) != 2);
8772 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
8773 header
.work_area
) != 6);
8774 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
8776 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
8777 function_code
) != 10);
8778 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
8780 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
8781 data
.create_operational_iq
.status_descriptor
) != 12);
8782 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
8783 data
.create_operational_iq
.iq_pi_offset
) != 16);
8784 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
8785 data
.create_operational_oq
.status_descriptor
) != 12);
8786 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response
,
8787 data
.create_operational_oq
.oq_ci_offset
) != 16);
8788 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response
) != 64);
8790 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
8791 header
.iu_type
) != 0);
8792 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
8793 header
.iu_length
) != 2);
8794 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
8795 header
.response_queue_id
) != 4);
8796 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
8797 header
.work_area
) != 6);
8798 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
8800 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
8802 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
8803 buffer_length
) != 12);
8804 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
8806 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
8807 protocol_specific
) != 24);
8808 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
8809 error_index
) != 27);
8810 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
8812 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
8814 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request
,
8815 sg_descriptors
) != 64);
8816 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request
) !=
8817 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
8819 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
8820 header
.iu_type
) != 0);
8821 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
8822 header
.iu_length
) != 2);
8823 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
8824 header
.response_queue_id
) != 4);
8825 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
8826 header
.work_area
) != 6);
8827 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
8829 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
8831 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
8832 buffer_length
) != 16);
8833 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
8834 data_encryption_key_index
) != 22);
8835 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
8836 encrypt_tweak_lower
) != 24);
8837 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
8838 encrypt_tweak_upper
) != 28);
8839 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
8841 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
8842 error_index
) != 48);
8843 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
8844 num_sg_descriptors
) != 50);
8845 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
8847 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
8849 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request
,
8850 sg_descriptors
) != 64);
8851 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request
) !=
8852 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
);
8854 BUILD_BUG_ON(offsetof(struct pqi_io_response
,
8855 header
.iu_type
) != 0);
8856 BUILD_BUG_ON(offsetof(struct pqi_io_response
,
8857 header
.iu_length
) != 2);
8858 BUILD_BUG_ON(offsetof(struct pqi_io_response
,
8860 BUILD_BUG_ON(offsetof(struct pqi_io_response
,
8861 error_index
) != 10);
8863 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
8864 header
.iu_type
) != 0);
8865 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
8866 header
.iu_length
) != 2);
8867 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
8868 header
.response_queue_id
) != 4);
8869 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
8871 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
8872 data
.report_event_configuration
.buffer_length
) != 12);
8873 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
8874 data
.report_event_configuration
.sg_descriptors
) != 16);
8875 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
8876 data
.set_event_configuration
.global_event_oq_id
) != 10);
8877 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
8878 data
.set_event_configuration
.buffer_length
) != 12);
8879 BUILD_BUG_ON(offsetof(struct pqi_general_management_request
,
8880 data
.set_event_configuration
.sg_descriptors
) != 16);
8882 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor
,
8883 max_inbound_iu_length
) != 6);
8884 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor
,
8885 max_outbound_iu_length
) != 14);
8886 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor
) != 16);
8888 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
8890 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
8891 iq_arbitration_priority_support_bitmask
) != 8);
8892 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
8893 maximum_aw_a
) != 9);
8894 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
8895 maximum_aw_b
) != 10);
8896 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
8897 maximum_aw_c
) != 11);
8898 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
8899 max_inbound_queues
) != 16);
8900 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
8901 max_elements_per_iq
) != 18);
8902 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
8903 max_iq_element_length
) != 24);
8904 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
8905 min_iq_element_length
) != 26);
8906 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
8907 max_outbound_queues
) != 30);
8908 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
8909 max_elements_per_oq
) != 32);
8910 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
8911 intr_coalescing_time_granularity
) != 34);
8912 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
8913 max_oq_element_length
) != 36);
8914 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
8915 min_oq_element_length
) != 38);
8916 BUILD_BUG_ON(offsetof(struct pqi_device_capability
,
8917 iu_layer_descriptors
) != 64);
8918 BUILD_BUG_ON(sizeof(struct pqi_device_capability
) != 576);
8920 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor
,
8922 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor
,
8924 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor
) != 4);
8926 BUILD_BUG_ON(offsetof(struct pqi_event_config
,
8927 num_event_descriptors
) != 2);
8928 BUILD_BUG_ON(offsetof(struct pqi_event_config
,
8931 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS
!=
8932 ARRAY_SIZE(pqi_supported_event_types
));
8934 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
8935 header
.iu_type
) != 0);
8936 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
8937 header
.iu_length
) != 2);
8938 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
8940 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
8942 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
8943 additional_event_id
) != 12);
8944 BUILD_BUG_ON(offsetof(struct pqi_event_response
,
8946 BUILD_BUG_ON(sizeof(struct pqi_event_response
) != 32);
8948 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
8949 header
.iu_type
) != 0);
8950 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
8951 header
.iu_length
) != 2);
8952 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
8954 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
8956 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request
,
8957 additional_event_id
) != 12);
8958 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request
) != 16);
8960 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
8961 header
.iu_type
) != 0);
8962 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
8963 header
.iu_length
) != 2);
8964 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
8966 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
8968 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
8970 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
8972 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
8973 protocol_specific
) != 24);
8974 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
8975 outbound_queue_id_to_manage
) != 26);
8976 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
8977 request_id_to_manage
) != 28);
8978 BUILD_BUG_ON(offsetof(struct pqi_task_management_request
,
8979 task_management_function
) != 30);
8980 BUILD_BUG_ON(sizeof(struct pqi_task_management_request
) != 32);
8982 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
8983 header
.iu_type
) != 0);
8984 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
8985 header
.iu_length
) != 2);
8986 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
8988 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
8990 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
8991 additional_response_info
) != 12);
8992 BUILD_BUG_ON(offsetof(struct pqi_task_management_response
,
8993 response_code
) != 15);
8994 BUILD_BUG_ON(sizeof(struct pqi_task_management_response
) != 16);
8996 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
8997 configured_logical_drive_count
) != 0);
8998 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
8999 configuration_signature
) != 1);
9000 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
9001 firmware_version
) != 5);
9002 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
9003 extended_logical_unit_count
) != 154);
9004 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
9005 firmware_build_number
) != 190);
9006 BUILD_BUG_ON(offsetof(struct bmic_identify_controller
,
9007 controller_mode
) != 292);
9009 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device
,
9010 phys_bay_in_box
) != 115);
9011 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device
,
9012 device_type
) != 120);
9013 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device
,
9014 redundant_path_present_map
) != 1736);
9015 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device
,
9016 active_path_number
) != 1738);
9017 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device
,
9018 alternate_paths_phys_connector
) != 1739);
9019 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device
,
9020 alternate_paths_phys_box_on_port
) != 1755);
9021 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device
,
9022 current_queue_depth_limit
) != 1796);
9023 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device
) != 2560);
9025 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS
> 255);
9026 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS
> 255);
9027 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH
%
9028 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT
!= 0);
9029 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH
%
9030 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT
!= 0);
9031 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
> 1048560);
9032 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH
%
9033 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT
!= 0);
9034 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
> 1048560);
9035 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH
%
9036 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT
!= 0);
9038 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS
>= PQI_MAX_OUTSTANDING_REQUESTS
);
9039 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS
>=
9040 PQI_MAX_OUTSTANDING_REQUESTS_KDUMP
);