2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2014-2015 PMC-Sierra, Inc.
4 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * Questions/Comments/Bugfixes to storagedev@pmcs.com
21 #include <scsi/scsicam.h>
28 struct access_method
{
29 void (*submit_command
)(struct ctlr_info
*h
,
30 struct CommandList
*c
);
31 void (*set_intr_mask
)(struct ctlr_info
*h
, unsigned long val
);
32 bool (*intr_pending
)(struct ctlr_info
*h
);
33 unsigned long (*command_completed
)(struct ctlr_info
*h
, u8 q
);
36 /* for SAS hosts and SAS expanders */
37 struct hpsa_sas_node
{
38 struct device
*parent_dev
;
39 struct list_head port_list_head
;
42 struct hpsa_sas_port
{
43 struct list_head port_list_entry
;
45 struct sas_port
*port
;
47 struct list_head phy_list_head
;
48 struct hpsa_sas_node
*parent_node
;
49 struct sas_rphy
*rphy
;
53 struct list_head phy_list_entry
;
55 struct hpsa_sas_port
*parent_port
;
59 struct hpsa_scsi_dev_t
{
61 int bus
, target
, lun
; /* as presented to the OS */
62 unsigned char scsi3addr
[8]; /* as presented to the HW */
63 u8 physical_device
: 1;
65 #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
66 unsigned char device_id
[16]; /* from inquiry pg. 0x83 */
68 unsigned char vendor
[8]; /* bytes 8-15 of inquiry data */
69 unsigned char model
[16]; /* bytes 16-31 of inquiry data */
70 unsigned char raid_level
; /* from inquiry page 0xC1 */
71 unsigned char volume_offline
; /* discovered via TUR or VPD */
72 u16 queue_depth
; /* max queue_depth for this device */
73 atomic_t reset_cmds_out
; /* Count of commands to-be affected */
74 atomic_t ioaccel_cmds_out
; /* Only used for physical devices
75 * counts commands sent to physical
76 * device via "ioaccel" path.
83 u16 phys_connector
[8];
84 int offload_config
; /* I/O accel RAID offload configured */
85 int offload_enabled
; /* I/O accel RAID offload enabled */
86 int offload_to_be_enabled
;
87 int hba_ioaccel_enabled
;
88 int offload_to_mirror
; /* Send next I/O accelerator RAID
89 * offload request to mirror drive
91 struct raid_map_data raid_map
; /* I/O accelerator RAID map */
94 * Pointers from logical drive map indices to the phys drives that
95 * make those logical drives. Note, multiple logical drives may
96 * share physical drives. You can have for instance 5 physical
97 * drives with 3 logical drives each using those same 5 physical
98 * disks. We need these pointers for counting i/o's out to physical
99 * devices in order to honor physical device queue depth limits.
101 struct hpsa_scsi_dev_t
*phys_disk
[RAID_MAP_MAX_ENTRIES
];
104 struct hpsa_sas_port
*sas_port
;
105 int external
; /* 1-from external array 0-not <0-unknown */
108 struct reply_queue_buffer
{
117 struct bmic_controller_parameters
{
119 u8 enable_command_list_verification
;
120 u8 backed_out_write_drives
;
121 u16 stripes_for_parity
;
122 u8 parity_distribution_mode_flags
;
123 u16 max_driver_requests
;
124 u16 elevator_trend_count
;
126 u8 force_scan_complete
;
127 u8 scsi_transfer_mode
;
131 u8 host_sdb_asic_fix
;
132 u8 pdpi_burst_from_host_disabled
;
133 char software_name
[64];
134 char hardware_name
[32];
136 u8 snapshot_priority
;
138 u8 post_prompt_timeout
;
139 u8 automatic_drive_slamming
;
142 u8 cache_nvram_flags
;
143 u8 drive_config_flags
;
145 u8 temp_warning_level
;
146 u8 temp_shutdown_level
;
147 u8 temp_condition_reset
;
148 u8 max_coalesce_commands
;
149 u32 max_coalesce_delay
;
160 struct pci_dev
*pdev
;
165 int nr_cmds
; /* Number of commands allowed on this controller */
166 #define HPSA_CMDS_RESERVED_FOR_ABORTS 2
167 #define HPSA_CMDS_RESERVED_FOR_DRIVER 1
168 struct CfgTable __iomem
*cfgtable
;
169 int interrupts_enabled
;
171 atomic_t commands_outstanding
;
172 # define PERF_MODE_INT 0
173 # define DOORBELL_INT 1
174 # define SIMPLE_MODE_INT 2
175 # define MEMQ_MODE_INT 3
176 unsigned int intr
[MAX_REPLY_QUEUES
];
177 unsigned int msix_vector
;
178 unsigned int msi_vector
;
179 int intr_mode
; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
180 struct access_method access
;
182 /* queue and queue Info */
187 u8 max_cmd_sg_entries
;
189 struct SGDescriptor
**cmd_sg_list
;
190 struct ioaccel2_sg_element
**ioaccel2_cmd_sg_list
;
192 /* pointers to command and error info pool */
193 struct CommandList
*cmd_pool
;
194 dma_addr_t cmd_pool_dhandle
;
195 struct io_accel1_cmd
*ioaccel_cmd_pool
;
196 dma_addr_t ioaccel_cmd_pool_dhandle
;
197 struct io_accel2_cmd
*ioaccel2_cmd_pool
;
198 dma_addr_t ioaccel2_cmd_pool_dhandle
;
199 struct ErrorInfo
*errinfo_pool
;
200 dma_addr_t errinfo_pool_dhandle
;
201 unsigned long *cmd_pool_bits
;
203 spinlock_t scan_lock
;
204 wait_queue_head_t scan_wait_queue
;
206 struct Scsi_Host
*scsi_host
;
207 spinlock_t devlock
; /* to protect hba[ctlr]->dev[]; */
208 int ndevices
; /* number of used elements in .dev[] array. */
209 struct hpsa_scsi_dev_t
*dev
[HPSA_MAX_DEVICES
];
211 * Performant mode tables.
215 struct TransTable_struct __iomem
*transtable
;
216 unsigned long transMethod
;
218 /* cap concurrent passthrus at some reasonable maximum */
219 #define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
220 atomic_t passthru_cmds_avail
;
223 * Performant mode completion buffers
225 size_t reply_queue_size
;
226 struct reply_queue_buffer reply_queue
[MAX_REPLY_QUEUES
];
228 u32
*blockFetchTable
;
229 u32
*ioaccel1_blockFetchTable
;
230 u32
*ioaccel2_blockFetchTable
;
231 u32 __iomem
*ioaccel2_bft2_regs
;
232 unsigned char *hba_inquiry_data
;
237 u64 last_intr_timestamp
;
239 u64 last_heartbeat_timestamp
;
240 u32 heartbeat_sample_interval
;
241 atomic_t firmware_flash_in_progress
;
242 u32 __percpu
*lockup_detected
;
243 struct delayed_work monitor_ctlr_work
;
244 struct delayed_work rescan_ctlr_work
;
245 int remove_in_progress
;
246 /* Address of h->q[x] is passed to intr handler to know which queue */
247 u8 q
[MAX_REPLY_QUEUES
];
248 char intrname
[MAX_REPLY_QUEUES
][16]; /* "hpsa0-msix00" names */
249 u32 TMFSupportFlags
; /* cache what task mgmt funcs are supported. */
250 #define HPSATMF_BITS_SUPPORTED (1 << 0)
251 #define HPSATMF_PHYS_LUN_RESET (1 << 1)
252 #define HPSATMF_PHYS_NEX_RESET (1 << 2)
253 #define HPSATMF_PHYS_TASK_ABORT (1 << 3)
254 #define HPSATMF_PHYS_TSET_ABORT (1 << 4)
255 #define HPSATMF_PHYS_CLEAR_ACA (1 << 5)
256 #define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
257 #define HPSATMF_PHYS_QRY_TASK (1 << 7)
258 #define HPSATMF_PHYS_QRY_TSET (1 << 8)
259 #define HPSATMF_PHYS_QRY_ASYNC (1 << 9)
260 #define HPSATMF_IOACCEL_ENABLED (1 << 15)
261 #define HPSATMF_MASK_SUPPORTED (1 << 16)
262 #define HPSATMF_LOG_LUN_RESET (1 << 17)
263 #define HPSATMF_LOG_NEX_RESET (1 << 18)
264 #define HPSATMF_LOG_TASK_ABORT (1 << 19)
265 #define HPSATMF_LOG_TSET_ABORT (1 << 20)
266 #define HPSATMF_LOG_CLEAR_ACA (1 << 21)
267 #define HPSATMF_LOG_CLEAR_TSET (1 << 22)
268 #define HPSATMF_LOG_QRY_TASK (1 << 23)
269 #define HPSATMF_LOG_QRY_TSET (1 << 24)
270 #define HPSATMF_LOG_QRY_ASYNC (1 << 25)
272 #define CTLR_STATE_CHANGE_EVENT (1 << 0)
273 #define CTLR_ENCLOSURE_HOT_PLUG_EVENT (1 << 1)
274 #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV (1 << 4)
275 #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV (1 << 5)
276 #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL (1 << 6)
277 #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED (1 << 30)
278 #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE (1 << 31)
280 #define RESCAN_REQUIRED_EVENT_BITS \
281 (CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
282 CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
283 CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
284 CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
285 CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
286 spinlock_t offline_device_lock
;
287 struct list_head offline_device_list
;
288 int acciopath_status
;
290 int raid_offload_debug
;
291 int discovery_polling
;
292 struct ReportLUNdata
*lastlogicals
;
293 int needs_abort_tags_swizzled
;
294 struct workqueue_struct
*resubmit_wq
;
295 struct workqueue_struct
*rescan_ctlr_wq
;
296 atomic_t abort_cmds_available
;
297 wait_queue_head_t abort_cmd_wait_queue
;
298 wait_queue_head_t event_sync_wait_queue
;
299 struct mutex reset_mutex
;
300 u8 reset_in_progress
;
301 struct hpsa_sas_node
*sas_host
;
304 struct offline_device_entry
{
305 unsigned char scsi3addr
[8];
306 struct list_head offline_list
;
309 #define HPSA_ABORT_MSG 0
310 #define HPSA_DEVICE_RESET_MSG 1
311 #define HPSA_RESET_TYPE_CONTROLLER 0x00
312 #define HPSA_RESET_TYPE_BUS 0x01
313 #define HPSA_RESET_TYPE_TARGET 0x03
314 #define HPSA_RESET_TYPE_LUN 0x04
315 #define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */
316 #define HPSA_MSG_SEND_RETRY_LIMIT 10
317 #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
319 /* Maximum time in seconds driver will wait for command completions
320 * when polling before giving up.
322 #define HPSA_MAX_POLL_TIME_SECS (20)
324 /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
325 * how many times to retry TEST UNIT READY on a device
326 * while waiting for it to become ready before giving up.
327 * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
328 * between sending TURs while waiting for a device
331 #define HPSA_TUR_RETRY_LIMIT (20)
332 #define HPSA_MAX_WAIT_INTERVAL_SECS (30)
334 /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
335 * to become ready, in seconds, before giving up on it.
336 * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
337 * between polling the board to see if it is ready, in
338 * milliseconds. HPSA_BOARD_READY_POLL_INTERVAL and
339 * HPSA_BOARD_READY_ITERATIONS are derived from those.
341 #define HPSA_BOARD_READY_WAIT_SECS (120)
342 #define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
343 #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
344 #define HPSA_BOARD_READY_POLL_INTERVAL \
345 ((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
346 #define HPSA_BOARD_READY_ITERATIONS \
347 ((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
348 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
349 #define HPSA_BOARD_NOT_READY_ITERATIONS \
350 ((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
351 HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
352 #define HPSA_POST_RESET_PAUSE_MSECS (3000)
353 #define HPSA_POST_RESET_NOOP_RETRIES (12)
355 /* Defining the diffent access_menthods */
357 * Memory mapped FIFO interface (SMART 53xx cards)
359 #define SA5_DOORBELL 0x20
360 #define SA5_REQUEST_PORT_OFFSET 0x40
361 #define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
362 #define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
363 #define SA5_REPLY_INTR_MASK_OFFSET 0x34
364 #define SA5_REPLY_PORT_OFFSET 0x44
365 #define SA5_INTR_STATUS 0x30
366 #define SA5_SCRATCHPAD_OFFSET 0xB0
368 #define SA5_CTCFG_OFFSET 0xB4
369 #define SA5_CTMEM_OFFSET 0xB8
371 #define SA5_INTR_OFF 0x08
372 #define SA5B_INTR_OFF 0x04
373 #define SA5_INTR_PENDING 0x08
374 #define SA5B_INTR_PENDING 0x04
375 #define FIFO_EMPTY 0xffffffff
376 #define HPSA_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
378 #define HPSA_ERROR_BIT 0x02
380 /* Performant mode flags */
381 #define SA5_PERF_INTR_PENDING 0x04
382 #define SA5_PERF_INTR_OFF 0x05
383 #define SA5_OUTDB_STATUS_PERF_BIT 0x01
384 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
385 #define SA5_OUTDB_CLEAR 0xA0
386 #define SA5_OUTDB_CLEAR_PERF_BIT 0x01
387 #define SA5_OUTDB_STATUS 0x9C
390 #define HPSA_INTR_ON 1
391 #define HPSA_INTR_OFF 0
394 * Inbound Post Queue offsets for IO Accelerator Mode 2
396 #define IOACCEL2_INBOUND_POSTQ_32 0x48
397 #define IOACCEL2_INBOUND_POSTQ_64_LOW 0xd0
398 #define IOACCEL2_INBOUND_POSTQ_64_HI 0xd4
400 #define HPSA_PHYSICAL_DEVICE_BUS 0
401 #define HPSA_RAID_VOLUME_BUS 1
402 #define HPSA_EXTERNAL_RAID_VOLUME_BUS 2
403 #define HPSA_HBA_BUS 3
406 Send the command to the hardware
408 static void SA5_submit_command(struct ctlr_info
*h
,
409 struct CommandList
*c
)
411 writel(c
->busaddr
, h
->vaddr
+ SA5_REQUEST_PORT_OFFSET
);
412 (void) readl(h
->vaddr
+ SA5_SCRATCHPAD_OFFSET
);
415 static void SA5_submit_command_no_read(struct ctlr_info
*h
,
416 struct CommandList
*c
)
418 writel(c
->busaddr
, h
->vaddr
+ SA5_REQUEST_PORT_OFFSET
);
421 static void SA5_submit_command_ioaccel2(struct ctlr_info
*h
,
422 struct CommandList
*c
)
424 writel(c
->busaddr
, h
->vaddr
+ SA5_REQUEST_PORT_OFFSET
);
428 * This card is the opposite of the other cards.
429 * 0 turns interrupts on...
430 * 0x08 turns them off...
432 static void SA5_intr_mask(struct ctlr_info
*h
, unsigned long val
)
434 if (val
) { /* Turn interrupts on */
435 h
->interrupts_enabled
= 1;
436 writel(0, h
->vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
437 (void) readl(h
->vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
438 } else { /* Turn them off */
439 h
->interrupts_enabled
= 0;
441 h
->vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
442 (void) readl(h
->vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
446 static void SA5_performant_intr_mask(struct ctlr_info
*h
, unsigned long val
)
448 if (val
) { /* turn on interrupts */
449 h
->interrupts_enabled
= 1;
450 writel(0, h
->vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
451 (void) readl(h
->vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
453 h
->interrupts_enabled
= 0;
454 writel(SA5_PERF_INTR_OFF
,
455 h
->vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
456 (void) readl(h
->vaddr
+ SA5_REPLY_INTR_MASK_OFFSET
);
460 static unsigned long SA5_performant_completed(struct ctlr_info
*h
, u8 q
)
462 struct reply_queue_buffer
*rq
= &h
->reply_queue
[q
];
463 unsigned long register_value
= FIFO_EMPTY
;
465 /* msi auto clears the interrupt pending bit. */
466 if (unlikely(!(h
->msi_vector
|| h
->msix_vector
))) {
467 /* flush the controller write of the reply queue by reading
468 * outbound doorbell status register.
470 (void) readl(h
->vaddr
+ SA5_OUTDB_STATUS
);
471 writel(SA5_OUTDB_CLEAR_PERF_BIT
, h
->vaddr
+ SA5_OUTDB_CLEAR
);
472 /* Do a read in order to flush the write to the controller
475 (void) readl(h
->vaddr
+ SA5_OUTDB_STATUS
);
478 if ((((u32
) rq
->head
[rq
->current_entry
]) & 1) == rq
->wraparound
) {
479 register_value
= rq
->head
[rq
->current_entry
];
481 atomic_dec(&h
->commands_outstanding
);
483 register_value
= FIFO_EMPTY
;
485 /* Check for wraparound */
486 if (rq
->current_entry
== h
->max_commands
) {
487 rq
->current_entry
= 0;
490 return register_value
;
494 * returns value read from hardware.
495 * returns FIFO_EMPTY if there is nothing to read
497 static unsigned long SA5_completed(struct ctlr_info
*h
,
498 __attribute__((unused
)) u8 q
)
500 unsigned long register_value
501 = readl(h
->vaddr
+ SA5_REPLY_PORT_OFFSET
);
503 if (register_value
!= FIFO_EMPTY
)
504 atomic_dec(&h
->commands_outstanding
);
507 if (register_value
!= FIFO_EMPTY
)
508 dev_dbg(&h
->pdev
->dev
, "Read %lx back from board\n",
511 dev_dbg(&h
->pdev
->dev
, "FIFO Empty read\n");
514 return register_value
;
517 * Returns true if an interrupt is pending..
519 static bool SA5_intr_pending(struct ctlr_info
*h
)
521 unsigned long register_value
=
522 readl(h
->vaddr
+ SA5_INTR_STATUS
);
523 return register_value
& SA5_INTR_PENDING
;
526 static bool SA5_performant_intr_pending(struct ctlr_info
*h
)
528 unsigned long register_value
= readl(h
->vaddr
+ SA5_INTR_STATUS
);
533 /* Read outbound doorbell to flush */
534 register_value
= readl(h
->vaddr
+ SA5_OUTDB_STATUS
);
535 return register_value
& SA5_OUTDB_STATUS_PERF_BIT
;
538 #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT 0x100
540 static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info
*h
)
542 unsigned long register_value
= readl(h
->vaddr
+ SA5_INTR_STATUS
);
544 return (register_value
& SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT
) ?
548 #define IOACCEL_MODE1_REPLY_QUEUE_INDEX 0x1A0
549 #define IOACCEL_MODE1_PRODUCER_INDEX 0x1B8
550 #define IOACCEL_MODE1_CONSUMER_INDEX 0x1BC
551 #define IOACCEL_MODE1_REPLY_UNUSED 0xFFFFFFFFFFFFFFFFULL
553 static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info
*h
, u8 q
)
556 struct reply_queue_buffer
*rq
= &h
->reply_queue
[q
];
558 BUG_ON(q
>= h
->nreply_queues
);
560 register_value
= rq
->head
[rq
->current_entry
];
561 if (register_value
!= IOACCEL_MODE1_REPLY_UNUSED
) {
562 rq
->head
[rq
->current_entry
] = IOACCEL_MODE1_REPLY_UNUSED
;
563 if (++rq
->current_entry
== rq
->size
)
564 rq
->current_entry
= 0;
568 * Don't really need to write the new index after each command,
569 * but with current driver design this is easiest.
572 writel((q
<< 24) | rq
->current_entry
, h
->vaddr
+
573 IOACCEL_MODE1_CONSUMER_INDEX
);
574 atomic_dec(&h
->commands_outstanding
);
576 return (unsigned long) register_value
;
579 static struct access_method SA5_access
= {
586 static struct access_method SA5_ioaccel_mode1_access
= {
588 SA5_performant_intr_mask
,
589 SA5_ioaccel_mode1_intr_pending
,
590 SA5_ioaccel_mode1_completed
,
593 static struct access_method SA5_ioaccel_mode2_access
= {
594 SA5_submit_command_ioaccel2
,
595 SA5_performant_intr_mask
,
596 SA5_performant_intr_pending
,
597 SA5_performant_completed
,
600 static struct access_method SA5_performant_access
= {
602 SA5_performant_intr_mask
,
603 SA5_performant_intr_pending
,
604 SA5_performant_completed
,
607 static struct access_method SA5_performant_access_no_read
= {
608 SA5_submit_command_no_read
,
609 SA5_performant_intr_mask
,
610 SA5_performant_intr_pending
,
611 SA5_performant_completed
,
617 struct access_method
*access
;