1 /* Copyright 2012 STEC, Inc.
3 * This file is licensed under the terms of the 3-clause
4 * BSD License (http://opensource.org/licenses/BSD-3-Clause)
5 * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
6 * at your option. Both licenses are also available in the LICENSE file
7 * distributed with this project. This file may not be copied, modified,
8 * or distributed except in accordance with those terms.
9 * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
10 * Initial Driver Design!
11 * Thomas Swann <tswann@stec-inc.com>
13 * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
14 * biomode implementation.
15 * Akhil Bhansali <abhansali@stec-inc.com>
16 * Added support for DISCARD / FLUSH and FUA.
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/pci.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/blkdev.h>
26 #include <linux/sched.h>
27 #include <linux/interrupt.h>
28 #include <linux/compiler.h>
29 #include <linux/workqueue.h>
30 #include <linux/bitops.h>
31 #include <linux/delay.h>
32 #include <linux/time.h>
33 #include <linux/hdreg.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/completion.h>
36 #include <linux/scatterlist.h>
37 #include <linux/version.h>
38 #include <linux/err.h>
39 #include <linux/aer.h>
40 #include <linux/ctype.h>
41 #include <linux/wait.h>
42 #include <linux/uio.h>
43 #include <scsi/scsi.h>
46 #include <linux/uaccess.h>
47 #include <asm/unaligned.h>
49 #include "skd_s1120.h"
51 static int skd_dbg_level
;
52 static int skd_isr_comp_limit
= 4;
58 STEC_LINK_UNKNOWN
= 0xFF
62 SKD_FLUSH_INITIALIZER
,
63 SKD_FLUSH_ZERO_SIZE_FIRST
,
64 SKD_FLUSH_DATA_SECOND
,
67 #define SKD_ASSERT(expr) \
69 if (unlikely(!(expr))) { \
70 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
71 # expr, __FILE__, __func__, __LINE__); \
75 #define DRV_NAME "skd"
76 #define DRV_VERSION "2.2.1"
77 #define DRV_BUILD_ID "0260"
78 #define PFX DRV_NAME ": "
79 #define DRV_BIN_VERSION 0x100
80 #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
82 MODULE_AUTHOR("bug-reports: support@stec-inc.com");
83 MODULE_LICENSE("Dual BSD/GPL");
85 MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID
")");
86 MODULE_VERSION(DRV_VERSION
"-" DRV_BUILD_ID
);
88 #define PCI_VENDOR_ID_STEC 0x1B39
89 #define PCI_DEVICE_ID_S1120 0x0001
91 #define SKD_FUA_NV (1 << 1)
92 #define SKD_MINORS_PER_DEVICE 16
94 #define SKD_MAX_QUEUE_DEPTH 200u
96 #define SKD_PAUSE_TIMEOUT (5 * 1000)
98 #define SKD_N_FITMSG_BYTES (512u)
100 #define SKD_N_SPECIAL_CONTEXT 32u
101 #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
103 /* SG elements are 32 bytes, so we can make this 4096 and still be under the
104 * 128KB limit. That allows 4096*4K = 16M xfer size
106 #define SKD_N_SG_PER_REQ_DEFAULT 256u
107 #define SKD_N_SG_PER_SPECIAL 256u
109 #define SKD_N_COMPLETION_ENTRY 256u
110 #define SKD_N_READ_CAP_BYTES (8u)
112 #define SKD_N_INTERNAL_BYTES (512u)
114 /* 5 bits of uniqifier, 0xF800 */
115 #define SKD_ID_INCR (0x400)
116 #define SKD_ID_TABLE_MASK (3u << 8u)
117 #define SKD_ID_RW_REQUEST (0u << 8u)
118 #define SKD_ID_INTERNAL (1u << 8u)
119 #define SKD_ID_SPECIAL_REQUEST (2u << 8u)
120 #define SKD_ID_FIT_MSG (3u << 8u)
121 #define SKD_ID_SLOT_MASK 0x00FFu
122 #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
124 #define SKD_N_TIMEOUT_SLOT 4u
125 #define SKD_TIMEOUT_SLOT_MASK 3u
127 #define SKD_N_MAX_SECTORS 2048u
129 #define SKD_MAX_RETRIES 2u
131 #define SKD_TIMER_SECONDS(seconds) (seconds)
132 #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
134 #define INQ_STD_NBYTES 36
136 enum skd_drvr_state
{
140 SKD_DRVR_STATE_STARTING
,
141 SKD_DRVR_STATE_ONLINE
,
142 SKD_DRVR_STATE_PAUSING
,
143 SKD_DRVR_STATE_PAUSED
,
144 SKD_DRVR_STATE_DRAINING_TIMEOUT
,
145 SKD_DRVR_STATE_RESTARTING
,
146 SKD_DRVR_STATE_RESUMING
,
147 SKD_DRVR_STATE_STOPPING
,
148 SKD_DRVR_STATE_FAULT
,
149 SKD_DRVR_STATE_DISAPPEARED
,
150 SKD_DRVR_STATE_PROTOCOL_MISMATCH
,
151 SKD_DRVR_STATE_BUSY_ERASE
,
152 SKD_DRVR_STATE_BUSY_SANITIZE
,
153 SKD_DRVR_STATE_BUSY_IMMINENT
,
154 SKD_DRVR_STATE_WAIT_BOOT
,
155 SKD_DRVR_STATE_SYNCING
,
158 #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
159 #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
160 #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
161 #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
162 #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
163 #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
164 #define SKD_START_WAIT_SECONDS 90u
170 SKD_REQ_STATE_COMPLETED
,
171 SKD_REQ_STATE_TIMEOUT
,
172 SKD_REQ_STATE_ABORTED
,
175 enum skd_fit_msg_state
{
180 enum skd_check_status_action
{
181 SKD_CHECK_STATUS_REPORT_GOOD
,
182 SKD_CHECK_STATUS_REPORT_SMART_ALERT
,
183 SKD_CHECK_STATUS_REQUEUE_REQUEST
,
184 SKD_CHECK_STATUS_REPORT_ERROR
,
185 SKD_CHECK_STATUS_BUSY_IMMINENT
,
188 struct skd_fitmsg_context
{
189 enum skd_fit_msg_state state
;
191 struct skd_fitmsg_context
*next
;
200 dma_addr_t mb_dma_address
;
203 struct skd_request_context
{
204 enum skd_req_state state
;
206 struct skd_request_context
*next
;
216 struct scatterlist
*sg
;
220 struct fit_sg_descriptor
*sksg_list
;
221 dma_addr_t sksg_dma_address
;
223 struct fit_completion_entry_v1 completion
;
225 struct fit_comp_error_info err_info
;
228 #define SKD_DATA_DIR_HOST_TO_CARD 1
229 #define SKD_DATA_DIR_CARD_TO_HOST 2
231 struct skd_special_context
{
232 struct skd_request_context req
;
237 dma_addr_t db_dma_address
;
240 dma_addr_t mb_dma_address
;
253 struct sg_iovec
*iov
;
254 struct sg_iovec no_iov_iov
;
256 struct skd_special_context
*skspcl
;
259 typedef enum skd_irq_type
{
265 #define SKD_MAX_BARS 2
268 volatile void __iomem
*mem_map
[SKD_MAX_BARS
];
269 resource_size_t mem_phys
[SKD_MAX_BARS
];
270 u32 mem_size
[SKD_MAX_BARS
];
272 struct skd_msix_entry
*msix_entries
;
274 struct pci_dev
*pdev
;
275 int pcie_error_reporting_is_enabled
;
278 struct gendisk
*disk
;
279 struct request_queue
*queue
;
280 struct device
*class_dev
;
284 atomic_t device_count
;
290 enum skd_drvr_state state
;
294 u32 cur_max_queue_depth
;
295 u32 queue_low_water_mark
;
296 u32 dev_max_queue_depth
;
298 u32 num_fitmsg_context
;
301 u32 timeout_slot
[SKD_N_TIMEOUT_SLOT
];
303 struct skd_fitmsg_context
*skmsg_free_list
;
304 struct skd_fitmsg_context
*skmsg_table
;
306 struct skd_request_context
*skreq_free_list
;
307 struct skd_request_context
*skreq_table
;
309 struct skd_special_context
*skspcl_free_list
;
310 struct skd_special_context
*skspcl_table
;
312 struct skd_special_context internal_skspcl
;
313 u32 read_cap_blocksize
;
314 u32 read_cap_last_lba
;
315 int read_cap_is_valid
;
316 int inquiry_is_valid
;
317 u8 inq_serial_num
[13]; /*12 chars plus null term */
318 u8 id_str
[80]; /* holds a composite name (pci + sernum) */
322 struct fit_completion_entry_v1
*skcomp_table
;
323 struct fit_comp_error_info
*skerr_table
;
324 dma_addr_t cq_dma_address
;
326 wait_queue_head_t waitq
;
328 struct timer_list timer
;
339 u32 connect_time_stamp
;
341 #define SKD_MAX_CONNECT_RETRIES 16
347 struct work_struct completion_worker
;
350 #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
351 #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
352 #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
354 static inline u32
skd_reg_read32(struct skd_device
*skdev
, u32 offset
)
358 if (likely(skdev
->dbg_level
< 2))
359 return readl(skdev
->mem_map
[1] + offset
);
362 val
= readl(skdev
->mem_map
[1] + offset
);
364 pr_debug("%s:%s:%d offset %x = %x\n",
365 skdev
->name
, __func__
, __LINE__
, offset
, val
);
371 static inline void skd_reg_write32(struct skd_device
*skdev
, u32 val
,
374 if (likely(skdev
->dbg_level
< 2)) {
375 writel(val
, skdev
->mem_map
[1] + offset
);
379 writel(val
, skdev
->mem_map
[1] + offset
);
381 pr_debug("%s:%s:%d offset %x = %x\n",
382 skdev
->name
, __func__
, __LINE__
, offset
, val
);
386 static inline void skd_reg_write64(struct skd_device
*skdev
, u64 val
,
389 if (likely(skdev
->dbg_level
< 2)) {
390 writeq(val
, skdev
->mem_map
[1] + offset
);
394 writeq(val
, skdev
->mem_map
[1] + offset
);
396 pr_debug("%s:%s:%d offset %x = %016llx\n",
397 skdev
->name
, __func__
, __LINE__
, offset
, val
);
402 #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
403 static int skd_isr_type
= SKD_IRQ_DEFAULT
;
405 module_param(skd_isr_type
, int, 0444);
406 MODULE_PARM_DESC(skd_isr_type
, "Interrupt type capability."
407 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
409 #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
410 static int skd_max_req_per_msg
= SKD_MAX_REQ_PER_MSG_DEFAULT
;
412 module_param(skd_max_req_per_msg
, int, 0444);
413 MODULE_PARM_DESC(skd_max_req_per_msg
,
414 "Maximum SCSI requests packed in a single message."
415 " (1-14, default==1)");
417 #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
418 #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
419 static int skd_max_queue_depth
= SKD_MAX_QUEUE_DEPTH_DEFAULT
;
421 module_param(skd_max_queue_depth
, int, 0444);
422 MODULE_PARM_DESC(skd_max_queue_depth
,
423 "Maximum SCSI requests issued to s1120."
424 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR
")");
426 static int skd_sgs_per_request
= SKD_N_SG_PER_REQ_DEFAULT
;
427 module_param(skd_sgs_per_request
, int, 0444);
428 MODULE_PARM_DESC(skd_sgs_per_request
,
429 "Maximum SG elements per block request."
430 " (1-4096, default==256)");
432 static int skd_max_pass_thru
= SKD_N_SPECIAL_CONTEXT
;
433 module_param(skd_max_pass_thru
, int, 0444);
434 MODULE_PARM_DESC(skd_max_pass_thru
,
435 "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
437 module_param(skd_dbg_level
, int, 0444);
438 MODULE_PARM_DESC(skd_dbg_level
, "s1120 debug level (0,1,2)");
440 module_param(skd_isr_comp_limit
, int, 0444);
441 MODULE_PARM_DESC(skd_isr_comp_limit
, "s1120 isr comp limit (0=none) default=4");
443 /* Major device number dynamically assigned. */
444 static u32 skd_major
;
446 static void skd_destruct(struct skd_device
*skdev
);
447 static const struct block_device_operations skd_blockdev_ops
;
448 static void skd_send_fitmsg(struct skd_device
*skdev
,
449 struct skd_fitmsg_context
*skmsg
);
450 static void skd_send_special_fitmsg(struct skd_device
*skdev
,
451 struct skd_special_context
*skspcl
);
452 static void skd_request_fn(struct request_queue
*rq
);
453 static void skd_end_request(struct skd_device
*skdev
,
454 struct skd_request_context
*skreq
, int error
);
455 static int skd_preop_sg_list(struct skd_device
*skdev
,
456 struct skd_request_context
*skreq
);
457 static void skd_postop_sg_list(struct skd_device
*skdev
,
458 struct skd_request_context
*skreq
);
460 static void skd_restart_device(struct skd_device
*skdev
);
461 static int skd_quiesce_dev(struct skd_device
*skdev
);
462 static int skd_unquiesce_dev(struct skd_device
*skdev
);
463 static void skd_release_special(struct skd_device
*skdev
,
464 struct skd_special_context
*skspcl
);
465 static void skd_disable_interrupts(struct skd_device
*skdev
);
466 static void skd_isr_fwstate(struct skd_device
*skdev
);
467 static void skd_recover_requests(struct skd_device
*skdev
, int requeue
);
468 static void skd_soft_reset(struct skd_device
*skdev
);
470 static const char *skd_name(struct skd_device
*skdev
);
471 const char *skd_drive_state_to_str(int state
);
472 const char *skd_skdev_state_to_str(enum skd_drvr_state state
);
473 static void skd_log_skdev(struct skd_device
*skdev
, const char *event
);
474 static void skd_log_skmsg(struct skd_device
*skdev
,
475 struct skd_fitmsg_context
*skmsg
, const char *event
);
476 static void skd_log_skreq(struct skd_device
*skdev
,
477 struct skd_request_context
*skreq
, const char *event
);
480 *****************************************************************************
481 * READ/WRITE REQUESTS
482 *****************************************************************************
484 static void skd_fail_all_pending(struct skd_device
*skdev
)
486 struct request_queue
*q
= skdev
->queue
;
490 req
= blk_peek_request(q
);
493 blk_start_request(req
);
494 __blk_end_request_all(req
, -EIO
);
499 skd_prep_rw_cdb(struct skd_scsi_request
*scsi_req
,
500 int data_dir
, unsigned lba
,
503 if (data_dir
== READ
)
504 scsi_req
->cdb
[0] = 0x28;
506 scsi_req
->cdb
[0] = 0x2a;
508 scsi_req
->cdb
[1] = 0;
509 scsi_req
->cdb
[2] = (lba
& 0xff000000) >> 24;
510 scsi_req
->cdb
[3] = (lba
& 0xff0000) >> 16;
511 scsi_req
->cdb
[4] = (lba
& 0xff00) >> 8;
512 scsi_req
->cdb
[5] = (lba
& 0xff);
513 scsi_req
->cdb
[6] = 0;
514 scsi_req
->cdb
[7] = (count
& 0xff00) >> 8;
515 scsi_req
->cdb
[8] = count
& 0xff;
516 scsi_req
->cdb
[9] = 0;
520 skd_prep_zerosize_flush_cdb(struct skd_scsi_request
*scsi_req
,
521 struct skd_request_context
*skreq
)
523 skreq
->flush_cmd
= 1;
525 scsi_req
->cdb
[0] = 0x35;
526 scsi_req
->cdb
[1] = 0;
527 scsi_req
->cdb
[2] = 0;
528 scsi_req
->cdb
[3] = 0;
529 scsi_req
->cdb
[4] = 0;
530 scsi_req
->cdb
[5] = 0;
531 scsi_req
->cdb
[6] = 0;
532 scsi_req
->cdb
[7] = 0;
533 scsi_req
->cdb
[8] = 0;
534 scsi_req
->cdb
[9] = 0;
537 static void skd_request_fn_not_online(struct request_queue
*q
);
539 static void skd_request_fn(struct request_queue
*q
)
541 struct skd_device
*skdev
= q
->queuedata
;
542 struct skd_fitmsg_context
*skmsg
= NULL
;
543 struct fit_msg_hdr
*fmh
= NULL
;
544 struct skd_request_context
*skreq
;
545 struct request
*req
= NULL
;
546 struct skd_scsi_request
*scsi_req
;
547 unsigned long io_flags
;
560 if (skdev
->state
!= SKD_DRVR_STATE_ONLINE
) {
561 skd_request_fn_not_online(q
);
565 if (blk_queue_stopped(skdev
->queue
)) {
566 if (skdev
->skmsg_free_list
== NULL
||
567 skdev
->skreq_free_list
== NULL
||
568 skdev
->in_flight
>= skdev
->queue_low_water_mark
)
569 /* There is still some kind of shortage */
572 queue_flag_clear(QUEUE_FLAG_STOPPED
, skdev
->queue
);
577 * - There are no more native requests
578 * - There are already the maximum number of requests in progress
579 * - There are no more skd_request_context entries
580 * - There are no more FIT msg buffers
586 req
= blk_peek_request(q
);
588 /* Are there any native requests to start? */
592 lba
= (u32
)blk_rq_pos(req
);
593 count
= blk_rq_sectors(req
);
594 data_dir
= rq_data_dir(req
);
595 io_flags
= req
->cmd_flags
;
597 if (req_op(req
) == REQ_OP_FLUSH
)
600 if (io_flags
& REQ_FUA
)
603 pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
604 "count=%u(0x%x) dir=%d\n",
605 skdev
->name
, __func__
, __LINE__
,
606 req
, lba
, lba
, count
, count
, data_dir
);
608 /* At this point we know there is a request */
610 /* Are too many requets already in progress? */
611 if (skdev
->in_flight
>= skdev
->cur_max_queue_depth
) {
612 pr_debug("%s:%s:%d qdepth %d, limit %d\n",
613 skdev
->name
, __func__
, __LINE__
,
614 skdev
->in_flight
, skdev
->cur_max_queue_depth
);
618 /* Is a skd_request_context available? */
619 skreq
= skdev
->skreq_free_list
;
621 pr_debug("%s:%s:%d Out of req=%p\n",
622 skdev
->name
, __func__
, __LINE__
, q
);
625 SKD_ASSERT(skreq
->state
== SKD_REQ_STATE_IDLE
);
626 SKD_ASSERT((skreq
->id
& SKD_ID_INCR
) == 0);
628 /* Now we check to see if we can get a fit msg */
630 if (skdev
->skmsg_free_list
== NULL
) {
631 pr_debug("%s:%s:%d Out of msg\n",
632 skdev
->name
, __func__
, __LINE__
);
637 skreq
->flush_cmd
= 0;
639 skreq
->sg_byte_count
= 0;
642 * OK to now dequeue request from q.
644 * At this point we are comitted to either start or reject
645 * the native request. Note that skd_request_context is
646 * available but is still at the head of the free list.
648 blk_start_request(req
);
650 skreq
->fitmsg_id
= 0;
652 /* Either a FIT msg is in progress or we have to start one. */
654 /* Are there any FIT msg buffers available? */
655 skmsg
= skdev
->skmsg_free_list
;
657 pr_debug("%s:%s:%d Out of msg skdev=%p\n",
658 skdev
->name
, __func__
, __LINE__
,
662 SKD_ASSERT(skmsg
->state
== SKD_MSG_STATE_IDLE
);
663 SKD_ASSERT((skmsg
->id
& SKD_ID_INCR
) == 0);
665 skdev
->skmsg_free_list
= skmsg
->next
;
667 skmsg
->state
= SKD_MSG_STATE_BUSY
;
668 skmsg
->id
+= SKD_ID_INCR
;
670 /* Initialize the FIT msg header */
671 fmh
= (struct fit_msg_hdr
*)skmsg
->msg_buf
;
672 memset(fmh
, 0, sizeof(*fmh
));
673 fmh
->protocol_id
= FIT_PROTOCOL_ID_SOFIT
;
674 skmsg
->length
= sizeof(*fmh
);
677 skreq
->fitmsg_id
= skmsg
->id
;
680 * Note that a FIT msg may have just been started
681 * but contains no SoFIT requests yet.
685 * Transcode the request, checking as we go. The outcome of
686 * the transcoding is represented by the error variable.
688 cmd_ptr
= &skmsg
->msg_buf
[skmsg
->length
];
689 memset(cmd_ptr
, 0, 32);
691 be_lba
= cpu_to_be32(lba
);
692 be_count
= cpu_to_be32(count
);
693 be_dmaa
= cpu_to_be64((u64
)skreq
->sksg_dma_address
);
694 cmdctxt
= skreq
->id
+ SKD_ID_INCR
;
697 scsi_req
->hdr
.tag
= cmdctxt
;
698 scsi_req
->hdr
.sg_list_dma_address
= be_dmaa
;
700 if (data_dir
== READ
)
701 skreq
->sg_data_dir
= SKD_DATA_DIR_CARD_TO_HOST
;
703 skreq
->sg_data_dir
= SKD_DATA_DIR_HOST_TO_CARD
;
705 if (flush
== SKD_FLUSH_ZERO_SIZE_FIRST
) {
706 skd_prep_zerosize_flush_cdb(scsi_req
, skreq
);
707 SKD_ASSERT(skreq
->flush_cmd
== 1);
710 skd_prep_rw_cdb(scsi_req
, data_dir
, lba
, count
);
714 scsi_req
->cdb
[1] |= SKD_FUA_NV
;
719 error
= skd_preop_sg_list(skdev
, skreq
);
723 * Complete the native request with error.
724 * Note that the request context is still at the
725 * head of the free list, and that the SoFIT request
726 * was encoded into the FIT msg buffer but the FIT
727 * msg length has not been updated. In short, the
728 * only resource that has been allocated but might
729 * not be used is that the FIT msg could be empty.
731 pr_debug("%s:%s:%d error Out\n",
732 skdev
->name
, __func__
, __LINE__
);
733 skd_end_request(skdev
, skreq
, error
);
738 scsi_req
->hdr
.sg_list_len_bytes
=
739 cpu_to_be32(skreq
->sg_byte_count
);
741 /* Complete resource allocations. */
742 skdev
->skreq_free_list
= skreq
->next
;
743 skreq
->state
= SKD_REQ_STATE_BUSY
;
744 skreq
->id
+= SKD_ID_INCR
;
746 skmsg
->length
+= sizeof(struct skd_scsi_request
);
747 fmh
->num_protocol_cmds_coalesced
++;
750 * Update the active request counts.
751 * Capture the timeout timestamp.
753 skreq
->timeout_stamp
= skdev
->timeout_stamp
;
754 timo_slot
= skreq
->timeout_stamp
& SKD_TIMEOUT_SLOT_MASK
;
755 skdev
->timeout_slot
[timo_slot
]++;
757 pr_debug("%s:%s:%d req=0x%x busy=%d\n",
758 skdev
->name
, __func__
, __LINE__
,
759 skreq
->id
, skdev
->in_flight
);
762 * If the FIT msg buffer is full send it.
764 if (skmsg
->length
>= SKD_N_FITMSG_BYTES
||
765 fmh
->num_protocol_cmds_coalesced
>= skd_max_req_per_msg
) {
766 skd_send_fitmsg(skdev
, skmsg
);
773 * Is a FIT msg in progress? If it is empty put the buffer back
774 * on the free list. If it is non-empty send what we got.
775 * This minimizes latency when there are fewer requests than
776 * what fits in a FIT msg.
779 /* Bigger than just a FIT msg header? */
780 if (skmsg
->length
> sizeof(struct fit_msg_hdr
)) {
781 pr_debug("%s:%s:%d sending msg=%p, len %d\n",
782 skdev
->name
, __func__
, __LINE__
,
783 skmsg
, skmsg
->length
);
784 skd_send_fitmsg(skdev
, skmsg
);
787 * The FIT msg is empty. It means we got started
788 * on the msg, but the requests were rejected.
790 skmsg
->state
= SKD_MSG_STATE_IDLE
;
791 skmsg
->id
+= SKD_ID_INCR
;
792 skmsg
->next
= skdev
->skmsg_free_list
;
793 skdev
->skmsg_free_list
= skmsg
;
800 * If req is non-NULL it means there is something to do but
801 * we are out of a resource.
804 blk_stop_queue(skdev
->queue
);
807 static void skd_end_request(struct skd_device
*skdev
,
808 struct skd_request_context
*skreq
, int error
)
810 if (unlikely(error
)) {
811 struct request
*req
= skreq
->req
;
812 char *cmd
= (rq_data_dir(req
) == READ
) ? "read" : "write";
813 u32 lba
= (u32
)blk_rq_pos(req
);
814 u32 count
= blk_rq_sectors(req
);
816 pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
817 skd_name(skdev
), cmd
, lba
, count
, skreq
->id
);
819 pr_debug("%s:%s:%d id=0x%x error=%d\n",
820 skdev
->name
, __func__
, __LINE__
, skreq
->id
, error
);
822 __blk_end_request_all(skreq
->req
, error
);
825 static int skd_preop_sg_list(struct skd_device
*skdev
,
826 struct skd_request_context
*skreq
)
828 struct request
*req
= skreq
->req
;
829 int writing
= skreq
->sg_data_dir
== SKD_DATA_DIR_HOST_TO_CARD
;
830 int pci_dir
= writing
? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE
;
831 struct scatterlist
*sg
= &skreq
->sg
[0];
835 skreq
->sg_byte_count
= 0;
837 /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
838 skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
840 n_sg
= blk_rq_map_sg(skdev
->queue
, req
, sg
);
845 * Map scatterlist to PCI bus addresses.
846 * Note PCI might change the number of entries.
848 n_sg
= pci_map_sg(skdev
->pdev
, sg
, n_sg
, pci_dir
);
852 SKD_ASSERT(n_sg
<= skdev
->sgs_per_request
);
856 for (i
= 0; i
< n_sg
; i
++) {
857 struct fit_sg_descriptor
*sgd
= &skreq
->sksg_list
[i
];
858 u32 cnt
= sg_dma_len(&sg
[i
]);
859 uint64_t dma_addr
= sg_dma_address(&sg
[i
]);
861 sgd
->control
= FIT_SGD_CONTROL_NOT_LAST
;
862 sgd
->byte_count
= cnt
;
863 skreq
->sg_byte_count
+= cnt
;
864 sgd
->host_side_addr
= dma_addr
;
865 sgd
->dev_side_addr
= 0;
868 skreq
->sksg_list
[n_sg
- 1].next_desc_ptr
= 0LL;
869 skreq
->sksg_list
[n_sg
- 1].control
= FIT_SGD_CONTROL_LAST
;
871 if (unlikely(skdev
->dbg_level
> 1)) {
872 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
873 skdev
->name
, __func__
, __LINE__
,
874 skreq
->id
, skreq
->sksg_list
, skreq
->sksg_dma_address
);
875 for (i
= 0; i
< n_sg
; i
++) {
876 struct fit_sg_descriptor
*sgd
= &skreq
->sksg_list
[i
];
877 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
878 "addr=0x%llx next=0x%llx\n",
879 skdev
->name
, __func__
, __LINE__
,
880 i
, sgd
->byte_count
, sgd
->control
,
881 sgd
->host_side_addr
, sgd
->next_desc_ptr
);
888 static void skd_postop_sg_list(struct skd_device
*skdev
,
889 struct skd_request_context
*skreq
)
891 int writing
= skreq
->sg_data_dir
== SKD_DATA_DIR_HOST_TO_CARD
;
892 int pci_dir
= writing
? PCI_DMA_TODEVICE
: PCI_DMA_FROMDEVICE
;
895 * restore the next ptr for next IO request so we
896 * don't have to set it every time.
898 skreq
->sksg_list
[skreq
->n_sg
- 1].next_desc_ptr
=
899 skreq
->sksg_dma_address
+
900 ((skreq
->n_sg
) * sizeof(struct fit_sg_descriptor
));
901 pci_unmap_sg(skdev
->pdev
, &skreq
->sg
[0], skreq
->n_sg
, pci_dir
);
904 static void skd_request_fn_not_online(struct request_queue
*q
)
906 struct skd_device
*skdev
= q
->queuedata
;
909 SKD_ASSERT(skdev
->state
!= SKD_DRVR_STATE_ONLINE
);
911 skd_log_skdev(skdev
, "req_not_online");
912 switch (skdev
->state
) {
913 case SKD_DRVR_STATE_PAUSING
:
914 case SKD_DRVR_STATE_PAUSED
:
915 case SKD_DRVR_STATE_STARTING
:
916 case SKD_DRVR_STATE_RESTARTING
:
917 case SKD_DRVR_STATE_WAIT_BOOT
:
918 /* In case of starting, we haven't started the queue,
919 * so we can't get here... but requests are
920 * possibly hanging out waiting for us because we
921 * reported the dev/skd0 already. They'll wait
922 * forever if connect doesn't complete.
923 * What to do??? delay dev/skd0 ??
925 case SKD_DRVR_STATE_BUSY
:
926 case SKD_DRVR_STATE_BUSY_IMMINENT
:
927 case SKD_DRVR_STATE_BUSY_ERASE
:
928 case SKD_DRVR_STATE_DRAINING_TIMEOUT
:
931 case SKD_DRVR_STATE_BUSY_SANITIZE
:
932 case SKD_DRVR_STATE_STOPPING
:
933 case SKD_DRVR_STATE_SYNCING
:
934 case SKD_DRVR_STATE_FAULT
:
935 case SKD_DRVR_STATE_DISAPPEARED
:
941 /* If we get here, terminate all pending block requeusts
942 * with EIO and any scsi pass thru with appropriate sense
945 skd_fail_all_pending(skdev
);
949 *****************************************************************************
951 *****************************************************************************
954 static void skd_timer_tick_not_online(struct skd_device
*skdev
);
956 static void skd_timer_tick(ulong arg
)
958 struct skd_device
*skdev
= (struct skd_device
*)arg
;
961 u32 overdue_timestamp
;
962 unsigned long reqflags
;
965 if (skdev
->state
== SKD_DRVR_STATE_FAULT
)
966 /* The driver has declared fault, and we want it to
967 * stay that way until driver is reloaded.
971 spin_lock_irqsave(&skdev
->lock
, reqflags
);
973 state
= SKD_READL(skdev
, FIT_STATUS
);
974 state
&= FIT_SR_DRIVE_STATE_MASK
;
975 if (state
!= skdev
->drive_state
)
976 skd_isr_fwstate(skdev
);
978 if (skdev
->state
!= SKD_DRVR_STATE_ONLINE
) {
979 skd_timer_tick_not_online(skdev
);
982 skdev
->timeout_stamp
++;
983 timo_slot
= skdev
->timeout_stamp
& SKD_TIMEOUT_SLOT_MASK
;
986 * All requests that happened during the previous use of
987 * this slot should be done by now. The previous use was
988 * over 7 seconds ago.
990 if (skdev
->timeout_slot
[timo_slot
] == 0)
993 /* Something is overdue */
994 overdue_timestamp
= skdev
->timeout_stamp
- SKD_N_TIMEOUT_SLOT
;
996 pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
997 skdev
->name
, __func__
, __LINE__
,
998 skdev
->timeout_slot
[timo_slot
], skdev
->in_flight
);
999 pr_err("(%s): Overdue IOs (%d), busy %d\n",
1000 skd_name(skdev
), skdev
->timeout_slot
[timo_slot
],
1003 skdev
->timer_countdown
= SKD_DRAINING_TIMO
;
1004 skdev
->state
= SKD_DRVR_STATE_DRAINING_TIMEOUT
;
1005 skdev
->timo_slot
= timo_slot
;
1006 blk_stop_queue(skdev
->queue
);
1009 mod_timer(&skdev
->timer
, (jiffies
+ HZ
));
1011 spin_unlock_irqrestore(&skdev
->lock
, reqflags
);
1014 static void skd_timer_tick_not_online(struct skd_device
*skdev
)
1016 switch (skdev
->state
) {
1017 case SKD_DRVR_STATE_IDLE
:
1018 case SKD_DRVR_STATE_LOAD
:
1020 case SKD_DRVR_STATE_BUSY_SANITIZE
:
1021 pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
1022 skdev
->name
, __func__
, __LINE__
,
1023 skdev
->drive_state
, skdev
->state
);
1024 /* If we've been in sanitize for 3 seconds, we figure we're not
1025 * going to get anymore completions, so recover requests now
1027 if (skdev
->timer_countdown
> 0) {
1028 skdev
->timer_countdown
--;
1031 skd_recover_requests(skdev
, 0);
1034 case SKD_DRVR_STATE_BUSY
:
1035 case SKD_DRVR_STATE_BUSY_IMMINENT
:
1036 case SKD_DRVR_STATE_BUSY_ERASE
:
1037 pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
1038 skdev
->name
, __func__
, __LINE__
,
1039 skdev
->state
, skdev
->timer_countdown
);
1040 if (skdev
->timer_countdown
> 0) {
1041 skdev
->timer_countdown
--;
1044 pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
1045 skdev
->name
, __func__
, __LINE__
,
1046 skdev
->state
, skdev
->timer_countdown
);
1047 skd_restart_device(skdev
);
1050 case SKD_DRVR_STATE_WAIT_BOOT
:
1051 case SKD_DRVR_STATE_STARTING
:
1052 if (skdev
->timer_countdown
> 0) {
1053 skdev
->timer_countdown
--;
1056 /* For now, we fault the drive. Could attempt resets to
1057 * revcover at some point. */
1058 skdev
->state
= SKD_DRVR_STATE_FAULT
;
1060 pr_err("(%s): DriveFault Connect Timeout (%x)\n",
1061 skd_name(skdev
), skdev
->drive_state
);
1063 /*start the queue so we can respond with error to requests */
1064 /* wakeup anyone waiting for startup complete */
1065 blk_start_queue(skdev
->queue
);
1066 skdev
->gendisk_on
= -1;
1067 wake_up_interruptible(&skdev
->waitq
);
1070 case SKD_DRVR_STATE_ONLINE
:
1071 /* shouldn't get here. */
1074 case SKD_DRVR_STATE_PAUSING
:
1075 case SKD_DRVR_STATE_PAUSED
:
1078 case SKD_DRVR_STATE_DRAINING_TIMEOUT
:
1079 pr_debug("%s:%s:%d "
1080 "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1081 skdev
->name
, __func__
, __LINE__
,
1083 skdev
->timer_countdown
,
1085 skdev
->timeout_slot
[skdev
->timo_slot
]);
1086 /* if the slot has cleared we can let the I/O continue */
1087 if (skdev
->timeout_slot
[skdev
->timo_slot
] == 0) {
1088 pr_debug("%s:%s:%d Slot drained, starting queue.\n",
1089 skdev
->name
, __func__
, __LINE__
);
1090 skdev
->state
= SKD_DRVR_STATE_ONLINE
;
1091 blk_start_queue(skdev
->queue
);
1094 if (skdev
->timer_countdown
> 0) {
1095 skdev
->timer_countdown
--;
1098 skd_restart_device(skdev
);
1101 case SKD_DRVR_STATE_RESTARTING
:
1102 if (skdev
->timer_countdown
> 0) {
1103 skdev
->timer_countdown
--;
1106 /* For now, we fault the drive. Could attempt resets to
1107 * revcover at some point. */
1108 skdev
->state
= SKD_DRVR_STATE_FAULT
;
1109 pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
1110 skd_name(skdev
), skdev
->drive_state
);
1113 * Recovering does two things:
1114 * 1. completes IO with error
1115 * 2. reclaims dma resources
1116 * When is it safe to recover requests?
1117 * - if the drive state is faulted
1118 * - if the state is still soft reset after out timeout
1119 * - if the drive registers are dead (state = FF)
1120 * If it is "unsafe", we still need to recover, so we will
1121 * disable pci bus mastering and disable our interrupts.
1124 if ((skdev
->drive_state
== FIT_SR_DRIVE_SOFT_RESET
) ||
1125 (skdev
->drive_state
== FIT_SR_DRIVE_FAULT
) ||
1126 (skdev
->drive_state
== FIT_SR_DRIVE_STATE_MASK
))
1127 /* It never came out of soft reset. Try to
1128 * recover the requests and then let them
1129 * fail. This is to mitigate hung processes. */
1130 skd_recover_requests(skdev
, 0);
1132 pr_err("(%s): Disable BusMaster (%x)\n",
1133 skd_name(skdev
), skdev
->drive_state
);
1134 pci_disable_device(skdev
->pdev
);
1135 skd_disable_interrupts(skdev
);
1136 skd_recover_requests(skdev
, 0);
1139 /*start the queue so we can respond with error to requests */
1140 /* wakeup anyone waiting for startup complete */
1141 blk_start_queue(skdev
->queue
);
1142 skdev
->gendisk_on
= -1;
1143 wake_up_interruptible(&skdev
->waitq
);
1146 case SKD_DRVR_STATE_RESUMING
:
1147 case SKD_DRVR_STATE_STOPPING
:
1148 case SKD_DRVR_STATE_SYNCING
:
1149 case SKD_DRVR_STATE_FAULT
:
1150 case SKD_DRVR_STATE_DISAPPEARED
:
1156 static int skd_start_timer(struct skd_device
*skdev
)
1160 init_timer(&skdev
->timer
);
1161 setup_timer(&skdev
->timer
, skd_timer_tick
, (ulong
)skdev
);
1163 rc
= mod_timer(&skdev
->timer
, (jiffies
+ HZ
));
1165 pr_err("%s: failed to start timer %d\n",
1170 static void skd_kill_timer(struct skd_device
*skdev
)
1172 del_timer_sync(&skdev
->timer
);
1176 *****************************************************************************
1178 *****************************************************************************
1180 static int skd_ioctl_sg_io(struct skd_device
*skdev
,
1181 fmode_t mode
, void __user
*argp
);
1182 static int skd_sg_io_get_and_check_args(struct skd_device
*skdev
,
1183 struct skd_sg_io
*sksgio
);
1184 static int skd_sg_io_obtain_skspcl(struct skd_device
*skdev
,
1185 struct skd_sg_io
*sksgio
);
1186 static int skd_sg_io_prep_buffering(struct skd_device
*skdev
,
1187 struct skd_sg_io
*sksgio
);
1188 static int skd_sg_io_copy_buffer(struct skd_device
*skdev
,
1189 struct skd_sg_io
*sksgio
, int dxfer_dir
);
1190 static int skd_sg_io_send_fitmsg(struct skd_device
*skdev
,
1191 struct skd_sg_io
*sksgio
);
1192 static int skd_sg_io_await(struct skd_device
*skdev
, struct skd_sg_io
*sksgio
);
1193 static int skd_sg_io_release_skspcl(struct skd_device
*skdev
,
1194 struct skd_sg_io
*sksgio
);
1195 static int skd_sg_io_put_status(struct skd_device
*skdev
,
1196 struct skd_sg_io
*sksgio
);
1198 static void skd_complete_special(struct skd_device
*skdev
,
1199 volatile struct fit_completion_entry_v1
1201 volatile struct fit_comp_error_info
*skerr
,
1202 struct skd_special_context
*skspcl
);
1204 static int skd_bdev_ioctl(struct block_device
*bdev
, fmode_t mode
,
1205 uint cmd_in
, ulong arg
)
1208 struct gendisk
*disk
= bdev
->bd_disk
;
1209 struct skd_device
*skdev
= disk
->private_data
;
1210 void __user
*p
= (void *)arg
;
1212 pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
1213 skdev
->name
, __func__
, __LINE__
,
1214 disk
->disk_name
, current
->comm
, mode
, cmd_in
, arg
);
1216 if (!capable(CAP_SYS_ADMIN
))
1220 case SG_SET_TIMEOUT
:
1221 case SG_GET_TIMEOUT
:
1222 case SG_GET_VERSION_NUM
:
1223 rc
= scsi_cmd_ioctl(disk
->queue
, disk
, mode
, cmd_in
, p
);
1226 rc
= skd_ioctl_sg_io(skdev
, mode
, p
);
1234 pr_debug("%s:%s:%d %s: completion rc %d\n",
1235 skdev
->name
, __func__
, __LINE__
, disk
->disk_name
, rc
);
1239 static int skd_ioctl_sg_io(struct skd_device
*skdev
, fmode_t mode
,
1243 struct skd_sg_io sksgio
;
1245 memset(&sksgio
, 0, sizeof(sksgio
));
1248 sksgio
.iov
= &sksgio
.no_iov_iov
;
1250 switch (skdev
->state
) {
1251 case SKD_DRVR_STATE_ONLINE
:
1252 case SKD_DRVR_STATE_BUSY_IMMINENT
:
1256 pr_debug("%s:%s:%d drive not online\n",
1257 skdev
->name
, __func__
, __LINE__
);
1262 rc
= skd_sg_io_get_and_check_args(skdev
, &sksgio
);
1266 rc
= skd_sg_io_obtain_skspcl(skdev
, &sksgio
);
1270 rc
= skd_sg_io_prep_buffering(skdev
, &sksgio
);
1274 rc
= skd_sg_io_copy_buffer(skdev
, &sksgio
, SG_DXFER_TO_DEV
);
1278 rc
= skd_sg_io_send_fitmsg(skdev
, &sksgio
);
1282 rc
= skd_sg_io_await(skdev
, &sksgio
);
1286 rc
= skd_sg_io_copy_buffer(skdev
, &sksgio
, SG_DXFER_FROM_DEV
);
1290 rc
= skd_sg_io_put_status(skdev
, &sksgio
);
1297 skd_sg_io_release_skspcl(skdev
, &sksgio
);
1299 if (sksgio
.iov
!= NULL
&& sksgio
.iov
!= &sksgio
.no_iov_iov
)
1304 static int skd_sg_io_get_and_check_args(struct skd_device
*skdev
,
1305 struct skd_sg_io
*sksgio
)
1307 struct sg_io_hdr
*sgp
= &sksgio
->sg
;
1310 if (!access_ok(VERIFY_WRITE
, sksgio
->argp
, sizeof(sg_io_hdr_t
))) {
1311 pr_debug("%s:%s:%d access sg failed %p\n",
1312 skdev
->name
, __func__
, __LINE__
, sksgio
->argp
);
1316 if (__copy_from_user(sgp
, sksgio
->argp
, sizeof(sg_io_hdr_t
))) {
1317 pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
1318 skdev
->name
, __func__
, __LINE__
, sksgio
->argp
);
1322 if (sgp
->interface_id
!= SG_INTERFACE_ID_ORIG
) {
1323 pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
1324 skdev
->name
, __func__
, __LINE__
, sgp
->interface_id
);
1328 if (sgp
->cmd_len
> sizeof(sksgio
->cdb
)) {
1329 pr_debug("%s:%s:%d cmd_len invalid %d\n",
1330 skdev
->name
, __func__
, __LINE__
, sgp
->cmd_len
);
1334 if (sgp
->iovec_count
> 256) {
1335 pr_debug("%s:%s:%d iovec_count invalid %d\n",
1336 skdev
->name
, __func__
, __LINE__
, sgp
->iovec_count
);
1340 if (sgp
->dxfer_len
> (PAGE_SIZE
* SKD_N_SG_PER_SPECIAL
)) {
1341 pr_debug("%s:%s:%d dxfer_len invalid %d\n",
1342 skdev
->name
, __func__
, __LINE__
, sgp
->dxfer_len
);
1346 switch (sgp
->dxfer_direction
) {
1351 case SG_DXFER_TO_DEV
:
1355 case SG_DXFER_FROM_DEV
:
1356 case SG_DXFER_TO_FROM_DEV
:
1361 pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
1362 skdev
->name
, __func__
, __LINE__
, sgp
->dxfer_direction
);
1366 if (copy_from_user(sksgio
->cdb
, sgp
->cmdp
, sgp
->cmd_len
)) {
1367 pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
1368 skdev
->name
, __func__
, __LINE__
, sgp
->cmdp
);
1372 if (sgp
->mx_sb_len
!= 0) {
1373 if (!access_ok(VERIFY_WRITE
, sgp
->sbp
, sgp
->mx_sb_len
)) {
1374 pr_debug("%s:%s:%d access sbp failed %p\n",
1375 skdev
->name
, __func__
, __LINE__
, sgp
->sbp
);
1380 if (sgp
->iovec_count
== 0) {
1381 sksgio
->iov
[0].iov_base
= sgp
->dxferp
;
1382 sksgio
->iov
[0].iov_len
= sgp
->dxfer_len
;
1384 sksgio
->dxfer_len
= sgp
->dxfer_len
;
1386 struct sg_iovec
*iov
;
1387 uint nbytes
= sizeof(*iov
) * sgp
->iovec_count
;
1388 size_t iov_data_len
;
1390 iov
= kmalloc(nbytes
, GFP_KERNEL
);
1392 pr_debug("%s:%s:%d alloc iovec failed %d\n",
1393 skdev
->name
, __func__
, __LINE__
,
1398 sksgio
->iovcnt
= sgp
->iovec_count
;
1400 if (copy_from_user(iov
, sgp
->dxferp
, nbytes
)) {
1401 pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
1402 skdev
->name
, __func__
, __LINE__
, sgp
->dxferp
);
1407 * Sum up the vecs, making sure they don't overflow
1410 for (i
= 0; i
< sgp
->iovec_count
; i
++) {
1411 if (iov_data_len
+ iov
[i
].iov_len
< iov_data_len
)
1413 iov_data_len
+= iov
[i
].iov_len
;
1416 /* SG_IO howto says that the shorter of the two wins */
1417 if (sgp
->dxfer_len
< iov_data_len
) {
1418 sksgio
->iovcnt
= iov_shorten((struct iovec
*)iov
,
1421 sksgio
->dxfer_len
= sgp
->dxfer_len
;
1423 sksgio
->dxfer_len
= iov_data_len
;
1426 if (sgp
->dxfer_direction
!= SG_DXFER_NONE
) {
1427 struct sg_iovec
*iov
= sksgio
->iov
;
1428 for (i
= 0; i
< sksgio
->iovcnt
; i
++, iov
++) {
1429 if (!access_ok(acc
, iov
->iov_base
, iov
->iov_len
)) {
1430 pr_debug("%s:%s:%d access data failed %p/%d\n",
1431 skdev
->name
, __func__
, __LINE__
,
1432 iov
->iov_base
, (int)iov
->iov_len
);
1441 static int skd_sg_io_obtain_skspcl(struct skd_device
*skdev
,
1442 struct skd_sg_io
*sksgio
)
1444 struct skd_special_context
*skspcl
= NULL
;
1450 spin_lock_irqsave(&skdev
->lock
, flags
);
1451 skspcl
= skdev
->skspcl_free_list
;
1452 if (skspcl
!= NULL
) {
1453 skdev
->skspcl_free_list
=
1454 (struct skd_special_context
*)skspcl
->req
.next
;
1455 skspcl
->req
.id
+= SKD_ID_INCR
;
1456 skspcl
->req
.state
= SKD_REQ_STATE_SETUP
;
1457 skspcl
->orphaned
= 0;
1458 skspcl
->req
.n_sg
= 0;
1460 spin_unlock_irqrestore(&skdev
->lock
, flags
);
1462 if (skspcl
!= NULL
) {
1467 pr_debug("%s:%s:%d blocking\n",
1468 skdev
->name
, __func__
, __LINE__
);
1470 rc
= wait_event_interruptible_timeout(
1472 (skdev
->skspcl_free_list
!= NULL
),
1473 msecs_to_jiffies(sksgio
->sg
.timeout
));
1475 pr_debug("%s:%s:%d unblocking, rc=%d\n",
1476 skdev
->name
, __func__
, __LINE__
, rc
);
1486 * If we get here rc > 0 meaning the timeout to
1487 * wait_event_interruptible_timeout() had time left, hence the
1488 * sought event -- non-empty free list -- happened.
1489 * Retry the allocation.
1492 sksgio
->skspcl
= skspcl
;
1497 static int skd_skreq_prep_buffering(struct skd_device
*skdev
,
1498 struct skd_request_context
*skreq
,
1501 u32 resid
= dxfer_len
;
1504 * The DMA engine must have aligned addresses and byte counts.
1506 resid
+= (-resid
) & 3;
1507 skreq
->sg_byte_count
= resid
;
1512 u32 nbytes
= PAGE_SIZE
;
1513 u32 ix
= skreq
->n_sg
;
1514 struct scatterlist
*sg
= &skreq
->sg
[ix
];
1515 struct fit_sg_descriptor
*sksg
= &skreq
->sksg_list
[ix
];
1521 page
= alloc_page(GFP_KERNEL
);
1525 sg_set_page(sg
, page
, nbytes
, 0);
1527 /* TODO: This should be going through a pci_???()
1528 * routine to do proper mapping. */
1529 sksg
->control
= FIT_SGD_CONTROL_NOT_LAST
;
1530 sksg
->byte_count
= nbytes
;
1532 sksg
->host_side_addr
= sg_phys(sg
);
1534 sksg
->dev_side_addr
= 0;
1535 sksg
->next_desc_ptr
= skreq
->sksg_dma_address
+
1536 (ix
+ 1) * sizeof(*sksg
);
1542 if (skreq
->n_sg
> 0) {
1543 u32 ix
= skreq
->n_sg
- 1;
1544 struct fit_sg_descriptor
*sksg
= &skreq
->sksg_list
[ix
];
1546 sksg
->control
= FIT_SGD_CONTROL_LAST
;
1547 sksg
->next_desc_ptr
= 0;
1550 if (unlikely(skdev
->dbg_level
> 1)) {
1553 pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
1554 skdev
->name
, __func__
, __LINE__
,
1555 skreq
->id
, skreq
->sksg_list
, skreq
->sksg_dma_address
);
1556 for (i
= 0; i
< skreq
->n_sg
; i
++) {
1557 struct fit_sg_descriptor
*sgd
= &skreq
->sksg_list
[i
];
1559 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
1560 "addr=0x%llx next=0x%llx\n",
1561 skdev
->name
, __func__
, __LINE__
,
1562 i
, sgd
->byte_count
, sgd
->control
,
1563 sgd
->host_side_addr
, sgd
->next_desc_ptr
);
1570 static int skd_sg_io_prep_buffering(struct skd_device
*skdev
,
1571 struct skd_sg_io
*sksgio
)
1573 struct skd_special_context
*skspcl
= sksgio
->skspcl
;
1574 struct skd_request_context
*skreq
= &skspcl
->req
;
1575 u32 dxfer_len
= sksgio
->dxfer_len
;
1578 rc
= skd_skreq_prep_buffering(skdev
, skreq
, dxfer_len
);
1580 * Eventually, errors or not, skd_release_special() is called
1581 * to recover allocations including partial allocations.
1586 static int skd_sg_io_copy_buffer(struct skd_device
*skdev
,
1587 struct skd_sg_io
*sksgio
, int dxfer_dir
)
1589 struct skd_special_context
*skspcl
= sksgio
->skspcl
;
1591 struct sg_iovec curiov
;
1595 u32 resid
= sksgio
->dxfer_len
;
1599 curiov
.iov_base
= NULL
;
1601 if (dxfer_dir
!= sksgio
->sg
.dxfer_direction
) {
1602 if (dxfer_dir
!= SG_DXFER_TO_DEV
||
1603 sksgio
->sg
.dxfer_direction
!= SG_DXFER_TO_FROM_DEV
)
1608 u32 nbytes
= PAGE_SIZE
;
1610 if (curiov
.iov_len
== 0) {
1611 curiov
= sksgio
->iov
[iov_ix
++];
1617 page
= sg_page(&skspcl
->req
.sg
[sksg_ix
++]);
1618 bufp
= page_address(page
);
1619 buf_len
= PAGE_SIZE
;
1622 nbytes
= min_t(u32
, nbytes
, resid
);
1623 nbytes
= min_t(u32
, nbytes
, curiov
.iov_len
);
1624 nbytes
= min_t(u32
, nbytes
, buf_len
);
1626 if (dxfer_dir
== SG_DXFER_TO_DEV
)
1627 rc
= __copy_from_user(bufp
, curiov
.iov_base
, nbytes
);
1629 rc
= __copy_to_user(curiov
.iov_base
, bufp
, nbytes
);
1635 curiov
.iov_len
-= nbytes
;
1636 curiov
.iov_base
+= nbytes
;
1643 static int skd_sg_io_send_fitmsg(struct skd_device
*skdev
,
1644 struct skd_sg_io
*sksgio
)
1646 struct skd_special_context
*skspcl
= sksgio
->skspcl
;
1647 struct fit_msg_hdr
*fmh
= (struct fit_msg_hdr
*)skspcl
->msg_buf
;
1648 struct skd_scsi_request
*scsi_req
= (struct skd_scsi_request
*)&fmh
[1];
1650 memset(skspcl
->msg_buf
, 0, SKD_N_SPECIAL_FITMSG_BYTES
);
1652 /* Initialize the FIT msg header */
1653 fmh
->protocol_id
= FIT_PROTOCOL_ID_SOFIT
;
1654 fmh
->num_protocol_cmds_coalesced
= 1;
1656 /* Initialize the SCSI request */
1657 if (sksgio
->sg
.dxfer_direction
!= SG_DXFER_NONE
)
1658 scsi_req
->hdr
.sg_list_dma_address
=
1659 cpu_to_be64(skspcl
->req
.sksg_dma_address
);
1660 scsi_req
->hdr
.tag
= skspcl
->req
.id
;
1661 scsi_req
->hdr
.sg_list_len_bytes
=
1662 cpu_to_be32(skspcl
->req
.sg_byte_count
);
1663 memcpy(scsi_req
->cdb
, sksgio
->cdb
, sizeof(scsi_req
->cdb
));
1665 skspcl
->req
.state
= SKD_REQ_STATE_BUSY
;
1666 skd_send_special_fitmsg(skdev
, skspcl
);
1671 static int skd_sg_io_await(struct skd_device
*skdev
, struct skd_sg_io
*sksgio
)
1673 unsigned long flags
;
1676 rc
= wait_event_interruptible_timeout(skdev
->waitq
,
1677 (sksgio
->skspcl
->req
.state
!=
1678 SKD_REQ_STATE_BUSY
),
1679 msecs_to_jiffies(sksgio
->sg
.
1682 spin_lock_irqsave(&skdev
->lock
, flags
);
1684 if (sksgio
->skspcl
->req
.state
== SKD_REQ_STATE_ABORTED
) {
1685 pr_debug("%s:%s:%d skspcl %p aborted\n",
1686 skdev
->name
, __func__
, __LINE__
, sksgio
->skspcl
);
1688 /* Build check cond, sense and let command finish. */
1689 /* For a timeout, we must fabricate completion and sense
1690 * data to complete the command */
1691 sksgio
->skspcl
->req
.completion
.status
=
1692 SAM_STAT_CHECK_CONDITION
;
1694 memset(&sksgio
->skspcl
->req
.err_info
, 0,
1695 sizeof(sksgio
->skspcl
->req
.err_info
));
1696 sksgio
->skspcl
->req
.err_info
.type
= 0x70;
1697 sksgio
->skspcl
->req
.err_info
.key
= ABORTED_COMMAND
;
1698 sksgio
->skspcl
->req
.err_info
.code
= 0x44;
1699 sksgio
->skspcl
->req
.err_info
.qual
= 0;
1701 } else if (sksgio
->skspcl
->req
.state
!= SKD_REQ_STATE_BUSY
)
1702 /* No longer on the adapter. We finish. */
1705 /* Something's gone wrong. Still busy. Timeout or
1706 * user interrupted (control-C). Mark as an orphan
1707 * so it will be disposed when completed. */
1708 sksgio
->skspcl
->orphaned
= 1;
1709 sksgio
->skspcl
= NULL
;
1711 pr_debug("%s:%s:%d timed out %p (%u ms)\n",
1712 skdev
->name
, __func__
, __LINE__
,
1713 sksgio
, sksgio
->sg
.timeout
);
1716 pr_debug("%s:%s:%d cntlc %p\n",
1717 skdev
->name
, __func__
, __LINE__
, sksgio
);
1722 spin_unlock_irqrestore(&skdev
->lock
, flags
);
1727 static int skd_sg_io_put_status(struct skd_device
*skdev
,
1728 struct skd_sg_io
*sksgio
)
1730 struct sg_io_hdr
*sgp
= &sksgio
->sg
;
1731 struct skd_special_context
*skspcl
= sksgio
->skspcl
;
1734 u32 nb
= be32_to_cpu(skspcl
->req
.completion
.num_returned_bytes
);
1736 sgp
->status
= skspcl
->req
.completion
.status
;
1737 resid
= sksgio
->dxfer_len
- nb
;
1739 sgp
->masked_status
= sgp
->status
& STATUS_MASK
;
1740 sgp
->msg_status
= 0;
1741 sgp
->host_status
= 0;
1742 sgp
->driver_status
= 0;
1744 if (sgp
->masked_status
|| sgp
->host_status
|| sgp
->driver_status
)
1745 sgp
->info
|= SG_INFO_CHECK
;
1747 pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
1748 skdev
->name
, __func__
, __LINE__
,
1749 sgp
->status
, sgp
->masked_status
, sgp
->resid
);
1751 if (sgp
->masked_status
== SAM_STAT_CHECK_CONDITION
) {
1752 if (sgp
->mx_sb_len
> 0) {
1753 struct fit_comp_error_info
*ei
= &skspcl
->req
.err_info
;
1754 u32 nbytes
= sizeof(*ei
);
1756 nbytes
= min_t(u32
, nbytes
, sgp
->mx_sb_len
);
1758 sgp
->sb_len_wr
= nbytes
;
1760 if (__copy_to_user(sgp
->sbp
, ei
, nbytes
)) {
1761 pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
1762 skdev
->name
, __func__
, __LINE__
,
1769 if (__copy_to_user(sksgio
->argp
, sgp
, sizeof(sg_io_hdr_t
))) {
1770 pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
1771 skdev
->name
, __func__
, __LINE__
, sksgio
->argp
);
1778 static int skd_sg_io_release_skspcl(struct skd_device
*skdev
,
1779 struct skd_sg_io
*sksgio
)
1781 struct skd_special_context
*skspcl
= sksgio
->skspcl
;
1783 if (skspcl
!= NULL
) {
1786 sksgio
->skspcl
= NULL
;
1788 spin_lock_irqsave(&skdev
->lock
, flags
);
1789 skd_release_special(skdev
, skspcl
);
1790 spin_unlock_irqrestore(&skdev
->lock
, flags
);
1797 *****************************************************************************
1798 * INTERNAL REQUESTS -- generated by driver itself
1799 *****************************************************************************
1802 static int skd_format_internal_skspcl(struct skd_device
*skdev
)
1804 struct skd_special_context
*skspcl
= &skdev
->internal_skspcl
;
1805 struct fit_sg_descriptor
*sgd
= &skspcl
->req
.sksg_list
[0];
1806 struct fit_msg_hdr
*fmh
;
1807 uint64_t dma_address
;
1808 struct skd_scsi_request
*scsi
;
1810 fmh
= (struct fit_msg_hdr
*)&skspcl
->msg_buf
[0];
1811 fmh
->protocol_id
= FIT_PROTOCOL_ID_SOFIT
;
1812 fmh
->num_protocol_cmds_coalesced
= 1;
1814 scsi
= (struct skd_scsi_request
*)&skspcl
->msg_buf
[64];
1815 memset(scsi
, 0, sizeof(*scsi
));
1816 dma_address
= skspcl
->req
.sksg_dma_address
;
1817 scsi
->hdr
.sg_list_dma_address
= cpu_to_be64(dma_address
);
1818 sgd
->control
= FIT_SGD_CONTROL_LAST
;
1819 sgd
->byte_count
= 0;
1820 sgd
->host_side_addr
= skspcl
->db_dma_address
;
1821 sgd
->dev_side_addr
= 0;
1822 sgd
->next_desc_ptr
= 0LL;
1827 #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1829 static void skd_send_internal_skspcl(struct skd_device
*skdev
,
1830 struct skd_special_context
*skspcl
,
1833 struct fit_sg_descriptor
*sgd
= &skspcl
->req
.sksg_list
[0];
1834 struct skd_scsi_request
*scsi
;
1835 unsigned char *buf
= skspcl
->data_buf
;
1838 if (skspcl
->req
.state
!= SKD_REQ_STATE_IDLE
)
1840 * A refresh is already in progress.
1841 * Just wait for it to finish.
1845 SKD_ASSERT((skspcl
->req
.id
& SKD_ID_INCR
) == 0);
1846 skspcl
->req
.state
= SKD_REQ_STATE_BUSY
;
1847 skspcl
->req
.id
+= SKD_ID_INCR
;
1849 scsi
= (struct skd_scsi_request
*)&skspcl
->msg_buf
[64];
1850 scsi
->hdr
.tag
= skspcl
->req
.id
;
1852 memset(scsi
->cdb
, 0, sizeof(scsi
->cdb
));
1855 case TEST_UNIT_READY
:
1856 scsi
->cdb
[0] = TEST_UNIT_READY
;
1857 sgd
->byte_count
= 0;
1858 scsi
->hdr
.sg_list_len_bytes
= 0;
1862 scsi
->cdb
[0] = READ_CAPACITY
;
1863 sgd
->byte_count
= SKD_N_READ_CAP_BYTES
;
1864 scsi
->hdr
.sg_list_len_bytes
= cpu_to_be32(sgd
->byte_count
);
1868 scsi
->cdb
[0] = INQUIRY
;
1869 scsi
->cdb
[1] = 0x01; /* evpd */
1870 scsi
->cdb
[2] = 0x80; /* serial number page */
1871 scsi
->cdb
[4] = 0x10;
1872 sgd
->byte_count
= 16;
1873 scsi
->hdr
.sg_list_len_bytes
= cpu_to_be32(sgd
->byte_count
);
1876 case SYNCHRONIZE_CACHE
:
1877 scsi
->cdb
[0] = SYNCHRONIZE_CACHE
;
1878 sgd
->byte_count
= 0;
1879 scsi
->hdr
.sg_list_len_bytes
= 0;
1883 scsi
->cdb
[0] = WRITE_BUFFER
;
1884 scsi
->cdb
[1] = 0x02;
1885 scsi
->cdb
[7] = (WR_BUF_SIZE
& 0xFF00) >> 8;
1886 scsi
->cdb
[8] = WR_BUF_SIZE
& 0xFF;
1887 sgd
->byte_count
= WR_BUF_SIZE
;
1888 scsi
->hdr
.sg_list_len_bytes
= cpu_to_be32(sgd
->byte_count
);
1889 /* fill incrementing byte pattern */
1890 for (i
= 0; i
< sgd
->byte_count
; i
++)
1895 scsi
->cdb
[0] = READ_BUFFER
;
1896 scsi
->cdb
[1] = 0x02;
1897 scsi
->cdb
[7] = (WR_BUF_SIZE
& 0xFF00) >> 8;
1898 scsi
->cdb
[8] = WR_BUF_SIZE
& 0xFF;
1899 sgd
->byte_count
= WR_BUF_SIZE
;
1900 scsi
->hdr
.sg_list_len_bytes
= cpu_to_be32(sgd
->byte_count
);
1901 memset(skspcl
->data_buf
, 0, sgd
->byte_count
);
1905 SKD_ASSERT("Don't know what to send");
1909 skd_send_special_fitmsg(skdev
, skspcl
);
1912 static void skd_refresh_device_data(struct skd_device
*skdev
)
1914 struct skd_special_context
*skspcl
= &skdev
->internal_skspcl
;
1916 skd_send_internal_skspcl(skdev
, skspcl
, TEST_UNIT_READY
);
1919 static int skd_chk_read_buf(struct skd_device
*skdev
,
1920 struct skd_special_context
*skspcl
)
1922 unsigned char *buf
= skspcl
->data_buf
;
1925 /* check for incrementing byte pattern */
1926 for (i
= 0; i
< WR_BUF_SIZE
; i
++)
1927 if (buf
[i
] != (i
& 0xFF))
1933 static void skd_log_check_status(struct skd_device
*skdev
, u8 status
, u8 key
,
1934 u8 code
, u8 qual
, u8 fruc
)
1936 /* If the check condition is of special interest, log a message */
1937 if ((status
== SAM_STAT_CHECK_CONDITION
) && (key
== 0x02)
1938 && (code
== 0x04) && (qual
== 0x06)) {
1939 pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
1940 "ascq/fruc %02x/%02x/%02x/%02x\n",
1941 skd_name(skdev
), key
, code
, qual
, fruc
);
1945 static void skd_complete_internal(struct skd_device
*skdev
,
1946 volatile struct fit_completion_entry_v1
1948 volatile struct fit_comp_error_info
*skerr
,
1949 struct skd_special_context
*skspcl
)
1951 u8
*buf
= skspcl
->data_buf
;
1954 struct skd_scsi_request
*scsi
=
1955 (struct skd_scsi_request
*)&skspcl
->msg_buf
[64];
1957 SKD_ASSERT(skspcl
== &skdev
->internal_skspcl
);
1959 pr_debug("%s:%s:%d complete internal %x\n",
1960 skdev
->name
, __func__
, __LINE__
, scsi
->cdb
[0]);
1962 skspcl
->req
.completion
= *skcomp
;
1963 skspcl
->req
.state
= SKD_REQ_STATE_IDLE
;
1964 skspcl
->req
.id
+= SKD_ID_INCR
;
1966 status
= skspcl
->req
.completion
.status
;
1968 skd_log_check_status(skdev
, status
, skerr
->key
, skerr
->code
,
1969 skerr
->qual
, skerr
->fruc
);
1971 switch (scsi
->cdb
[0]) {
1972 case TEST_UNIT_READY
:
1973 if (status
== SAM_STAT_GOOD
)
1974 skd_send_internal_skspcl(skdev
, skspcl
, WRITE_BUFFER
);
1975 else if ((status
== SAM_STAT_CHECK_CONDITION
) &&
1976 (skerr
->key
== MEDIUM_ERROR
))
1977 skd_send_internal_skspcl(skdev
, skspcl
, WRITE_BUFFER
);
1979 if (skdev
->state
== SKD_DRVR_STATE_STOPPING
) {
1980 pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
1981 skdev
->name
, __func__
, __LINE__
,
1985 pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
1986 skdev
->name
, __func__
, __LINE__
);
1987 skd_send_internal_skspcl(skdev
, skspcl
, 0x00);
1992 if (status
== SAM_STAT_GOOD
)
1993 skd_send_internal_skspcl(skdev
, skspcl
, READ_BUFFER
);
1995 if (skdev
->state
== SKD_DRVR_STATE_STOPPING
) {
1996 pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
1997 skdev
->name
, __func__
, __LINE__
,
2001 pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
2002 skdev
->name
, __func__
, __LINE__
);
2003 skd_send_internal_skspcl(skdev
, skspcl
, 0x00);
2008 if (status
== SAM_STAT_GOOD
) {
2009 if (skd_chk_read_buf(skdev
, skspcl
) == 0)
2010 skd_send_internal_skspcl(skdev
, skspcl
,
2014 "(%s):*** W/R Buffer mismatch %d ***\n",
2015 skd_name(skdev
), skdev
->connect_retries
);
2016 if (skdev
->connect_retries
<
2017 SKD_MAX_CONNECT_RETRIES
) {
2018 skdev
->connect_retries
++;
2019 skd_soft_reset(skdev
);
2022 "(%s): W/R Buffer Connect Error\n",
2029 if (skdev
->state
== SKD_DRVR_STATE_STOPPING
) {
2030 pr_debug("%s:%s:%d "
2031 "read buffer failed, don't send anymore state 0x%x\n",
2032 skdev
->name
, __func__
, __LINE__
,
2036 pr_debug("%s:%s:%d "
2037 "**** read buffer failed, retry skerr\n",
2038 skdev
->name
, __func__
, __LINE__
);
2039 skd_send_internal_skspcl(skdev
, skspcl
, 0x00);
2044 skdev
->read_cap_is_valid
= 0;
2045 if (status
== SAM_STAT_GOOD
) {
2046 skdev
->read_cap_last_lba
=
2047 (buf
[0] << 24) | (buf
[1] << 16) |
2048 (buf
[2] << 8) | buf
[3];
2049 skdev
->read_cap_blocksize
=
2050 (buf
[4] << 24) | (buf
[5] << 16) |
2051 (buf
[6] << 8) | buf
[7];
2053 pr_debug("%s:%s:%d last lba %d, bs %d\n",
2054 skdev
->name
, __func__
, __LINE__
,
2055 skdev
->read_cap_last_lba
,
2056 skdev
->read_cap_blocksize
);
2058 set_capacity(skdev
->disk
, skdev
->read_cap_last_lba
+ 1);
2060 skdev
->read_cap_is_valid
= 1;
2062 skd_send_internal_skspcl(skdev
, skspcl
, INQUIRY
);
2063 } else if ((status
== SAM_STAT_CHECK_CONDITION
) &&
2064 (skerr
->key
== MEDIUM_ERROR
)) {
2065 skdev
->read_cap_last_lba
= ~0;
2066 set_capacity(skdev
->disk
, skdev
->read_cap_last_lba
+ 1);
2067 pr_debug("%s:%s:%d "
2068 "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
2069 skdev
->name
, __func__
, __LINE__
);
2070 skd_send_internal_skspcl(skdev
, skspcl
, INQUIRY
);
2072 pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
2073 skdev
->name
, __func__
, __LINE__
);
2074 skd_send_internal_skspcl(skdev
, skspcl
,
2080 skdev
->inquiry_is_valid
= 0;
2081 if (status
== SAM_STAT_GOOD
) {
2082 skdev
->inquiry_is_valid
= 1;
2084 for (i
= 0; i
< 12; i
++)
2085 skdev
->inq_serial_num
[i
] = buf
[i
+ 4];
2086 skdev
->inq_serial_num
[12] = 0;
2089 if (skd_unquiesce_dev(skdev
) < 0)
2090 pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
2091 skdev
->name
, __func__
, __LINE__
);
2092 /* connection is complete */
2093 skdev
->connect_retries
= 0;
2096 case SYNCHRONIZE_CACHE
:
2097 if (status
== SAM_STAT_GOOD
)
2098 skdev
->sync_done
= 1;
2100 skdev
->sync_done
= -1;
2101 wake_up_interruptible(&skdev
->waitq
);
2105 SKD_ASSERT("we didn't send this");
2110 *****************************************************************************
2112 *****************************************************************************
2115 static void skd_send_fitmsg(struct skd_device
*skdev
,
2116 struct skd_fitmsg_context
*skmsg
)
2119 struct fit_msg_hdr
*fmh
;
2121 pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
2122 skdev
->name
, __func__
, __LINE__
,
2123 skmsg
->mb_dma_address
, skdev
->in_flight
);
2124 pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
2125 skdev
->name
, __func__
, __LINE__
,
2126 skmsg
->msg_buf
, skmsg
->offset
);
2128 qcmd
= skmsg
->mb_dma_address
;
2129 qcmd
|= FIT_QCMD_QID_NORMAL
;
2131 fmh
= (struct fit_msg_hdr
*)skmsg
->msg_buf
;
2132 skmsg
->outstanding
= fmh
->num_protocol_cmds_coalesced
;
2134 if (unlikely(skdev
->dbg_level
> 1)) {
2135 u8
*bp
= (u8
*)skmsg
->msg_buf
;
2137 for (i
= 0; i
< skmsg
->length
; i
+= 8) {
2138 pr_debug("%s:%s:%d msg[%2d] %8ph\n",
2139 skdev
->name
, __func__
, __LINE__
, i
, &bp
[i
]);
2145 if (skmsg
->length
> 256)
2146 qcmd
|= FIT_QCMD_MSGSIZE_512
;
2147 else if (skmsg
->length
> 128)
2148 qcmd
|= FIT_QCMD_MSGSIZE_256
;
2149 else if (skmsg
->length
> 64)
2150 qcmd
|= FIT_QCMD_MSGSIZE_128
;
2153 * This makes no sense because the FIT msg header is
2154 * 64 bytes. If the msg is only 64 bytes long it has
2157 qcmd
|= FIT_QCMD_MSGSIZE_64
;
2159 SKD_WRITEQ(skdev
, qcmd
, FIT_Q_COMMAND
);
2162 static void skd_send_special_fitmsg(struct skd_device
*skdev
,
2163 struct skd_special_context
*skspcl
)
2167 if (unlikely(skdev
->dbg_level
> 1)) {
2168 u8
*bp
= (u8
*)skspcl
->msg_buf
;
2171 for (i
= 0; i
< SKD_N_SPECIAL_FITMSG_BYTES
; i
+= 8) {
2172 pr_debug("%s:%s:%d spcl[%2d] %8ph\n",
2173 skdev
->name
, __func__
, __LINE__
, i
, &bp
[i
]);
2178 pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2179 skdev
->name
, __func__
, __LINE__
,
2180 skspcl
, skspcl
->req
.id
, skspcl
->req
.sksg_list
,
2181 skspcl
->req
.sksg_dma_address
);
2182 for (i
= 0; i
< skspcl
->req
.n_sg
; i
++) {
2183 struct fit_sg_descriptor
*sgd
=
2184 &skspcl
->req
.sksg_list
[i
];
2186 pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
2187 "addr=0x%llx next=0x%llx\n",
2188 skdev
->name
, __func__
, __LINE__
,
2189 i
, sgd
->byte_count
, sgd
->control
,
2190 sgd
->host_side_addr
, sgd
->next_desc_ptr
);
2195 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2196 * and one 64-byte SSDI command.
2198 qcmd
= skspcl
->mb_dma_address
;
2199 qcmd
|= FIT_QCMD_QID_NORMAL
+ FIT_QCMD_MSGSIZE_128
;
2201 SKD_WRITEQ(skdev
, qcmd
, FIT_Q_COMMAND
);
2205 *****************************************************************************
2207 *****************************************************************************
2210 static void skd_complete_other(struct skd_device
*skdev
,
2211 volatile struct fit_completion_entry_v1
*skcomp
,
2212 volatile struct fit_comp_error_info
*skerr
);
2221 enum skd_check_status_action action
;
2224 static struct sns_info skd_chkstat_table
[] = {
2226 { 0x70, 0x02, RECOVERED_ERROR
, 0, 0, 0x1c,
2227 SKD_CHECK_STATUS_REPORT_GOOD
},
2230 { 0x70, 0x02, NO_SENSE
, 0x0B, 0x00, 0x1E, /* warnings */
2231 SKD_CHECK_STATUS_REPORT_SMART_ALERT
},
2232 { 0x70, 0x02, NO_SENSE
, 0x5D, 0x00, 0x1E, /* thresholds */
2233 SKD_CHECK_STATUS_REPORT_SMART_ALERT
},
2234 { 0x70, 0x02, RECOVERED_ERROR
, 0x0B, 0x01, 0x1F, /* temperature over trigger */
2235 SKD_CHECK_STATUS_REPORT_SMART_ALERT
},
2237 /* Retry (with limits) */
2238 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
2239 SKD_CHECK_STATUS_REQUEUE_REQUEST
},
2240 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
2241 SKD_CHECK_STATUS_REQUEUE_REQUEST
},
2242 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
2243 SKD_CHECK_STATUS_REQUEUE_REQUEST
},
2244 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
2245 SKD_CHECK_STATUS_REQUEUE_REQUEST
},
2247 /* Busy (or about to be) */
2248 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
2249 SKD_CHECK_STATUS_BUSY_IMMINENT
},
2253 * Look up status and sense data to decide how to handle the error
2255 * mask says which fields must match e.g., mask=0x18 means check
2256 * type and stat, ignore key, asc, ascq.
2259 static enum skd_check_status_action
2260 skd_check_status(struct skd_device
*skdev
,
2261 u8 cmp_status
, volatile struct fit_comp_error_info
*skerr
)
2265 pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2266 skd_name(skdev
), skerr
->key
, skerr
->code
, skerr
->qual
,
2269 pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
2270 skdev
->name
, __func__
, __LINE__
, skerr
->type
, cmp_status
,
2271 skerr
->key
, skerr
->code
, skerr
->qual
, skerr
->fruc
);
2273 /* Does the info match an entry in the good category? */
2274 n
= sizeof(skd_chkstat_table
) / sizeof(skd_chkstat_table
[0]);
2275 for (i
= 0; i
< n
; i
++) {
2276 struct sns_info
*sns
= &skd_chkstat_table
[i
];
2278 if (sns
->mask
& 0x10)
2279 if (skerr
->type
!= sns
->type
)
2282 if (sns
->mask
& 0x08)
2283 if (cmp_status
!= sns
->stat
)
2286 if (sns
->mask
& 0x04)
2287 if (skerr
->key
!= sns
->key
)
2290 if (sns
->mask
& 0x02)
2291 if (skerr
->code
!= sns
->asc
)
2294 if (sns
->mask
& 0x01)
2295 if (skerr
->qual
!= sns
->ascq
)
2298 if (sns
->action
== SKD_CHECK_STATUS_REPORT_SMART_ALERT
) {
2299 pr_err("(%s): SMART Alert: sense key/asc/ascq "
2301 skd_name(skdev
), skerr
->key
,
2302 skerr
->code
, skerr
->qual
);
2307 /* No other match, so nonzero status means error,
2308 * zero status means good
2311 pr_debug("%s:%s:%d status check: error\n",
2312 skdev
->name
, __func__
, __LINE__
);
2313 return SKD_CHECK_STATUS_REPORT_ERROR
;
2316 pr_debug("%s:%s:%d status check good default\n",
2317 skdev
->name
, __func__
, __LINE__
);
2318 return SKD_CHECK_STATUS_REPORT_GOOD
;
2321 static void skd_resolve_req_exception(struct skd_device
*skdev
,
2322 struct skd_request_context
*skreq
)
2324 u8 cmp_status
= skreq
->completion
.status
;
2326 switch (skd_check_status(skdev
, cmp_status
, &skreq
->err_info
)) {
2327 case SKD_CHECK_STATUS_REPORT_GOOD
:
2328 case SKD_CHECK_STATUS_REPORT_SMART_ALERT
:
2329 skd_end_request(skdev
, skreq
, 0);
2332 case SKD_CHECK_STATUS_BUSY_IMMINENT
:
2333 skd_log_skreq(skdev
, skreq
, "retry(busy)");
2334 blk_requeue_request(skdev
->queue
, skreq
->req
);
2335 pr_info("(%s) drive BUSY imminent\n", skd_name(skdev
));
2336 skdev
->state
= SKD_DRVR_STATE_BUSY_IMMINENT
;
2337 skdev
->timer_countdown
= SKD_TIMER_MINUTES(20);
2338 skd_quiesce_dev(skdev
);
2341 case SKD_CHECK_STATUS_REQUEUE_REQUEST
:
2342 if ((unsigned long) ++skreq
->req
->special
< SKD_MAX_RETRIES
) {
2343 skd_log_skreq(skdev
, skreq
, "retry");
2344 blk_requeue_request(skdev
->queue
, skreq
->req
);
2347 /* fall through to report error */
2349 case SKD_CHECK_STATUS_REPORT_ERROR
:
2351 skd_end_request(skdev
, skreq
, -EIO
);
2356 /* assume spinlock is already held */
2357 static void skd_release_skreq(struct skd_device
*skdev
,
2358 struct skd_request_context
*skreq
)
2361 struct skd_fitmsg_context
*skmsg
;
2366 * Reclaim the FIT msg buffer if this is
2367 * the first of the requests it carried to
2368 * be completed. The FIT msg buffer used to
2369 * send this request cannot be reused until
2370 * we are sure the s1120 card has copied
2371 * it to its memory. The FIT msg might have
2372 * contained several requests. As soon as
2373 * any of them are completed we know that
2374 * the entire FIT msg was transferred.
2375 * Only the first completed request will
2376 * match the FIT msg buffer id. The FIT
2377 * msg buffer id is immediately updated.
2378 * When subsequent requests complete the FIT
2379 * msg buffer id won't match, so we know
2380 * quite cheaply that it is already done.
2382 msg_slot
= skreq
->fitmsg_id
& SKD_ID_SLOT_MASK
;
2383 SKD_ASSERT(msg_slot
< skdev
->num_fitmsg_context
);
2385 skmsg
= &skdev
->skmsg_table
[msg_slot
];
2386 if (skmsg
->id
== skreq
->fitmsg_id
) {
2387 SKD_ASSERT(skmsg
->state
== SKD_MSG_STATE_BUSY
);
2388 SKD_ASSERT(skmsg
->outstanding
> 0);
2389 skmsg
->outstanding
--;
2390 if (skmsg
->outstanding
== 0) {
2391 skmsg
->state
= SKD_MSG_STATE_IDLE
;
2392 skmsg
->id
+= SKD_ID_INCR
;
2393 skmsg
->next
= skdev
->skmsg_free_list
;
2394 skdev
->skmsg_free_list
= skmsg
;
2399 * Decrease the number of active requests.
2400 * Also decrements the count in the timeout slot.
2402 SKD_ASSERT(skdev
->in_flight
> 0);
2403 skdev
->in_flight
-= 1;
2405 timo_slot
= skreq
->timeout_stamp
& SKD_TIMEOUT_SLOT_MASK
;
2406 SKD_ASSERT(skdev
->timeout_slot
[timo_slot
] > 0);
2407 skdev
->timeout_slot
[timo_slot
] -= 1;
2415 * Reclaim the skd_request_context
2417 skreq
->state
= SKD_REQ_STATE_IDLE
;
2418 skreq
->id
+= SKD_ID_INCR
;
2419 skreq
->next
= skdev
->skreq_free_list
;
2420 skdev
->skreq_free_list
= skreq
;
2423 #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
2425 static void skd_do_inq_page_00(struct skd_device
*skdev
,
2426 volatile struct fit_completion_entry_v1
*skcomp
,
2427 volatile struct fit_comp_error_info
*skerr
,
2428 uint8_t *cdb
, uint8_t *buf
)
2430 uint16_t insert_pt
, max_bytes
, drive_pages
, drive_bytes
, new_size
;
2432 /* Caller requested "supported pages". The driver needs to insert
2435 pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
2436 skdev
->name
, __func__
, __LINE__
);
2438 /* If the device rejected the request because the CDB was
2439 * improperly formed, then just leave.
2441 if (skcomp
->status
== SAM_STAT_CHECK_CONDITION
&&
2442 skerr
->key
== ILLEGAL_REQUEST
&& skerr
->code
== 0x24)
2445 /* Get the amount of space the caller allocated */
2446 max_bytes
= (cdb
[3] << 8) | cdb
[4];
2448 /* Get the number of pages actually returned by the device */
2449 drive_pages
= (buf
[2] << 8) | buf
[3];
2450 drive_bytes
= drive_pages
+ 4;
2451 new_size
= drive_pages
+ 1;
2453 /* Supported pages must be in numerical order, so find where
2454 * the driver page needs to be inserted into the list of
2455 * pages returned by the device.
2457 for (insert_pt
= 4; insert_pt
< drive_bytes
; insert_pt
++) {
2458 if (buf
[insert_pt
] == DRIVER_INQ_EVPD_PAGE_CODE
)
2459 return; /* Device using this page code. abort */
2460 else if (buf
[insert_pt
] > DRIVER_INQ_EVPD_PAGE_CODE
)
2464 if (insert_pt
< max_bytes
) {
2467 /* Shift everything up one byte to make room. */
2468 for (u
= new_size
+ 3; u
> insert_pt
; u
--)
2469 buf
[u
] = buf
[u
- 1];
2470 buf
[insert_pt
] = DRIVER_INQ_EVPD_PAGE_CODE
;
2472 /* SCSI byte order increment of num_returned_bytes by 1 */
2473 skcomp
->num_returned_bytes
=
2474 be32_to_cpu(skcomp
->num_returned_bytes
) + 1;
2475 skcomp
->num_returned_bytes
=
2476 be32_to_cpu(skcomp
->num_returned_bytes
);
2479 /* update page length field to reflect the driver's page too */
2480 buf
[2] = (uint8_t)((new_size
>> 8) & 0xFF);
2481 buf
[3] = (uint8_t)((new_size
>> 0) & 0xFF);
2484 static void skd_get_link_info(struct pci_dev
*pdev
, u8
*speed
, u8
*width
)
2490 pcie_reg
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
2493 pci_read_config_word(pdev
, pcie_reg
+ PCI_EXP_LNKSTA
, &linksta
);
2495 pci_bus_speed
= linksta
& 0xF;
2496 pci_lanes
= (linksta
& 0x3F0) >> 4;
2498 *speed
= STEC_LINK_UNKNOWN
;
2503 switch (pci_bus_speed
) {
2505 *speed
= STEC_LINK_2_5GTS
;
2508 *speed
= STEC_LINK_5GTS
;
2511 *speed
= STEC_LINK_8GTS
;
2514 *speed
= STEC_LINK_UNKNOWN
;
2518 if (pci_lanes
<= 0x20)
2524 static void skd_do_inq_page_da(struct skd_device
*skdev
,
2525 volatile struct fit_completion_entry_v1
*skcomp
,
2526 volatile struct fit_comp_error_info
*skerr
,
2527 uint8_t *cdb
, uint8_t *buf
)
2529 struct pci_dev
*pdev
= skdev
->pdev
;
2531 struct driver_inquiry_data inq
;
2534 pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
2535 skdev
->name
, __func__
, __LINE__
);
2537 memset(&inq
, 0, sizeof(inq
));
2539 inq
.page_code
= DRIVER_INQ_EVPD_PAGE_CODE
;
2541 skd_get_link_info(pdev
, &inq
.pcie_link_speed
, &inq
.pcie_link_lanes
);
2542 inq
.pcie_bus_number
= cpu_to_be16(pdev
->bus
->number
);
2543 inq
.pcie_device_number
= PCI_SLOT(pdev
->devfn
);
2544 inq
.pcie_function_number
= PCI_FUNC(pdev
->devfn
);
2546 pci_read_config_word(pdev
, PCI_VENDOR_ID
, &val
);
2547 inq
.pcie_vendor_id
= cpu_to_be16(val
);
2549 pci_read_config_word(pdev
, PCI_DEVICE_ID
, &val
);
2550 inq
.pcie_device_id
= cpu_to_be16(val
);
2552 pci_read_config_word(pdev
, PCI_SUBSYSTEM_VENDOR_ID
, &val
);
2553 inq
.pcie_subsystem_vendor_id
= cpu_to_be16(val
);
2555 pci_read_config_word(pdev
, PCI_SUBSYSTEM_ID
, &val
);
2556 inq
.pcie_subsystem_device_id
= cpu_to_be16(val
);
2558 /* Driver version, fixed lenth, padded with spaces on the right */
2559 inq
.driver_version_length
= sizeof(inq
.driver_version
);
2560 memset(&inq
.driver_version
, ' ', sizeof(inq
.driver_version
));
2561 memcpy(inq
.driver_version
, DRV_VER_COMPL
,
2562 min(sizeof(inq
.driver_version
), strlen(DRV_VER_COMPL
)));
2564 inq
.page_length
= cpu_to_be16((sizeof(inq
) - 4));
2566 /* Clear the error set by the device */
2567 skcomp
->status
= SAM_STAT_GOOD
;
2568 memset((void *)skerr
, 0, sizeof(*skerr
));
2570 /* copy response into output buffer */
2571 max_bytes
= (cdb
[3] << 8) | cdb
[4];
2572 memcpy(buf
, &inq
, min_t(unsigned, max_bytes
, sizeof(inq
)));
2574 skcomp
->num_returned_bytes
=
2575 be32_to_cpu(min_t(uint16_t, max_bytes
, sizeof(inq
)));
2578 static void skd_do_driver_inq(struct skd_device
*skdev
,
2579 volatile struct fit_completion_entry_v1
*skcomp
,
2580 volatile struct fit_comp_error_info
*skerr
,
2581 uint8_t *cdb
, uint8_t *buf
)
2585 else if (cdb
[0] != INQUIRY
)
2586 return; /* Not an INQUIRY */
2587 else if ((cdb
[1] & 1) == 0)
2588 return; /* EVPD not set */
2589 else if (cdb
[2] == 0)
2590 /* Need to add driver's page to supported pages list */
2591 skd_do_inq_page_00(skdev
, skcomp
, skerr
, cdb
, buf
);
2592 else if (cdb
[2] == DRIVER_INQ_EVPD_PAGE_CODE
)
2593 /* Caller requested driver's page */
2594 skd_do_inq_page_da(skdev
, skcomp
, skerr
, cdb
, buf
);
2597 static unsigned char *skd_sg_1st_page_ptr(struct scatterlist
*sg
)
2606 static void skd_process_scsi_inq(struct skd_device
*skdev
,
2607 volatile struct fit_completion_entry_v1
2609 volatile struct fit_comp_error_info
*skerr
,
2610 struct skd_special_context
*skspcl
)
2613 struct fit_msg_hdr
*fmh
= (struct fit_msg_hdr
*)skspcl
->msg_buf
;
2614 struct skd_scsi_request
*scsi_req
= (struct skd_scsi_request
*)&fmh
[1];
2616 dma_sync_sg_for_cpu(skdev
->class_dev
, skspcl
->req
.sg
, skspcl
->req
.n_sg
,
2617 skspcl
->req
.sg_data_dir
);
2618 buf
= skd_sg_1st_page_ptr(skspcl
->req
.sg
);
2621 skd_do_driver_inq(skdev
, skcomp
, skerr
, scsi_req
->cdb
, buf
);
2625 static int skd_isr_completion_posted(struct skd_device
*skdev
,
2626 int limit
, int *enqueued
)
2628 volatile struct fit_completion_entry_v1
*skcmp
= NULL
;
2629 volatile struct fit_comp_error_info
*skerr
;
2632 struct skd_request_context
*skreq
;
2641 SKD_ASSERT(skdev
->skcomp_ix
< SKD_N_COMPLETION_ENTRY
);
2643 skcmp
= &skdev
->skcomp_table
[skdev
->skcomp_ix
];
2644 cmp_cycle
= skcmp
->cycle
;
2645 cmp_cntxt
= skcmp
->tag
;
2646 cmp_status
= skcmp
->status
;
2647 cmp_bytes
= be32_to_cpu(skcmp
->num_returned_bytes
);
2649 skerr
= &skdev
->skerr_table
[skdev
->skcomp_ix
];
2651 pr_debug("%s:%s:%d "
2652 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
2653 "busy=%d rbytes=0x%x proto=%d\n",
2654 skdev
->name
, __func__
, __LINE__
, skdev
->skcomp_cycle
,
2655 skdev
->skcomp_ix
, cmp_cycle
, cmp_cntxt
, cmp_status
,
2656 skdev
->in_flight
, cmp_bytes
, skdev
->proto_ver
);
2658 if (cmp_cycle
!= skdev
->skcomp_cycle
) {
2659 pr_debug("%s:%s:%d end of completions\n",
2660 skdev
->name
, __func__
, __LINE__
);
2664 * Update the completion queue head index and possibly
2665 * the completion cycle count. 8-bit wrap-around.
2668 if (skdev
->skcomp_ix
>= SKD_N_COMPLETION_ENTRY
) {
2669 skdev
->skcomp_ix
= 0;
2670 skdev
->skcomp_cycle
++;
2674 * The command context is a unique 32-bit ID. The low order
2675 * bits help locate the request. The request is usually a
2676 * r/w request (see skd_start() above) or a special request.
2679 req_slot
= req_id
& SKD_ID_SLOT_AND_TABLE_MASK
;
2681 /* Is this other than a r/w request? */
2682 if (req_slot
>= skdev
->num_req_context
) {
2684 * This is not a completion for a r/w request.
2686 skd_complete_other(skdev
, skcmp
, skerr
);
2690 skreq
= &skdev
->skreq_table
[req_slot
];
2693 * Make sure the request ID for the slot matches.
2695 if (skreq
->id
!= req_id
) {
2696 pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
2697 skdev
->name
, __func__
, __LINE__
,
2700 u16 new_id
= cmp_cntxt
;
2701 pr_err("(%s): Completion mismatch "
2702 "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
2703 skd_name(skdev
), req_id
,
2710 SKD_ASSERT(skreq
->state
== SKD_REQ_STATE_BUSY
);
2712 if (skreq
->state
== SKD_REQ_STATE_ABORTED
) {
2713 pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
2714 skdev
->name
, __func__
, __LINE__
,
2716 /* a previously timed out command can
2717 * now be cleaned up */
2718 skd_release_skreq(skdev
, skreq
);
2722 skreq
->completion
= *skcmp
;
2723 if (unlikely(cmp_status
== SAM_STAT_CHECK_CONDITION
)) {
2724 skreq
->err_info
= *skerr
;
2725 skd_log_check_status(skdev
, cmp_status
, skerr
->key
,
2726 skerr
->code
, skerr
->qual
,
2729 /* Release DMA resources for the request. */
2730 if (skreq
->n_sg
> 0)
2731 skd_postop_sg_list(skdev
, skreq
);
2734 pr_debug("%s:%s:%d NULL backptr skdreq %p, "
2735 "req=0x%x req_id=0x%x\n",
2736 skdev
->name
, __func__
, __LINE__
,
2737 skreq
, skreq
->id
, req_id
);
2740 * Capture the outcome and post it back to the
2743 if (likely(cmp_status
== SAM_STAT_GOOD
))
2744 skd_end_request(skdev
, skreq
, 0);
2746 skd_resolve_req_exception(skdev
, skreq
);
2750 * Release the skreq, its FIT msg (if one), timeout slot,
2753 skd_release_skreq(skdev
, skreq
);
2755 /* skd_isr_comp_limit equal zero means no limit */
2757 if (++processed
>= limit
) {
2764 if ((skdev
->state
== SKD_DRVR_STATE_PAUSING
)
2765 && (skdev
->in_flight
) == 0) {
2766 skdev
->state
= SKD_DRVR_STATE_PAUSED
;
2767 wake_up_interruptible(&skdev
->waitq
);
2773 static void skd_complete_other(struct skd_device
*skdev
,
2774 volatile struct fit_completion_entry_v1
*skcomp
,
2775 volatile struct fit_comp_error_info
*skerr
)
2780 struct skd_special_context
*skspcl
;
2782 req_id
= skcomp
->tag
;
2783 req_table
= req_id
& SKD_ID_TABLE_MASK
;
2784 req_slot
= req_id
& SKD_ID_SLOT_MASK
;
2786 pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
2787 skdev
->name
, __func__
, __LINE__
,
2788 req_table
, req_id
, req_slot
);
2791 * Based on the request id, determine how to dispatch this completion.
2792 * This swich/case is finding the good cases and forwarding the
2793 * completion entry. Errors are reported below the switch.
2795 switch (req_table
) {
2796 case SKD_ID_RW_REQUEST
:
2798 * The caller, skd_completion_posted_isr() above,
2799 * handles r/w requests. The only way we get here
2800 * is if the req_slot is out of bounds.
2804 case SKD_ID_SPECIAL_REQUEST
:
2806 * Make sure the req_slot is in bounds and that the id
2809 if (req_slot
< skdev
->n_special
) {
2810 skspcl
= &skdev
->skspcl_table
[req_slot
];
2811 if (skspcl
->req
.id
== req_id
&&
2812 skspcl
->req
.state
== SKD_REQ_STATE_BUSY
) {
2813 skd_complete_special(skdev
,
2814 skcomp
, skerr
, skspcl
);
2820 case SKD_ID_INTERNAL
:
2821 if (req_slot
== 0) {
2822 skspcl
= &skdev
->internal_skspcl
;
2823 if (skspcl
->req
.id
== req_id
&&
2824 skspcl
->req
.state
== SKD_REQ_STATE_BUSY
) {
2825 skd_complete_internal(skdev
,
2826 skcomp
, skerr
, skspcl
);
2832 case SKD_ID_FIT_MSG
:
2834 * These id's should never appear in a completion record.
2840 * These id's should never appear anywhere;
2846 * If we get here it is a bad or stale id.
2850 static void skd_complete_special(struct skd_device
*skdev
,
2851 volatile struct fit_completion_entry_v1
2853 volatile struct fit_comp_error_info
*skerr
,
2854 struct skd_special_context
*skspcl
)
2856 pr_debug("%s:%s:%d completing special request %p\n",
2857 skdev
->name
, __func__
, __LINE__
, skspcl
);
2858 if (skspcl
->orphaned
) {
2859 /* Discard orphaned request */
2860 /* ?: Can this release directly or does it need
2861 * to use a worker? */
2862 pr_debug("%s:%s:%d release orphaned %p\n",
2863 skdev
->name
, __func__
, __LINE__
, skspcl
);
2864 skd_release_special(skdev
, skspcl
);
2868 skd_process_scsi_inq(skdev
, skcomp
, skerr
, skspcl
);
2870 skspcl
->req
.state
= SKD_REQ_STATE_COMPLETED
;
2871 skspcl
->req
.completion
= *skcomp
;
2872 skspcl
->req
.err_info
= *skerr
;
2874 skd_log_check_status(skdev
, skspcl
->req
.completion
.status
, skerr
->key
,
2875 skerr
->code
, skerr
->qual
, skerr
->fruc
);
2877 wake_up_interruptible(&skdev
->waitq
);
2880 /* assume spinlock is already held */
2881 static void skd_release_special(struct skd_device
*skdev
,
2882 struct skd_special_context
*skspcl
)
2884 int i
, was_depleted
;
2886 for (i
= 0; i
< skspcl
->req
.n_sg
; i
++) {
2887 struct page
*page
= sg_page(&skspcl
->req
.sg
[i
]);
2891 was_depleted
= (skdev
->skspcl_free_list
== NULL
);
2893 skspcl
->req
.state
= SKD_REQ_STATE_IDLE
;
2894 skspcl
->req
.id
+= SKD_ID_INCR
;
2896 (struct skd_request_context
*)skdev
->skspcl_free_list
;
2897 skdev
->skspcl_free_list
= (struct skd_special_context
*)skspcl
;
2900 pr_debug("%s:%s:%d skspcl was depleted\n",
2901 skdev
->name
, __func__
, __LINE__
);
2902 /* Free list was depleted. Their might be waiters. */
2903 wake_up_interruptible(&skdev
->waitq
);
2907 static void skd_reset_skcomp(struct skd_device
*skdev
)
2910 struct fit_completion_entry_v1
*skcomp
;
2912 nbytes
= sizeof(*skcomp
) * SKD_N_COMPLETION_ENTRY
;
2913 nbytes
+= sizeof(struct fit_comp_error_info
) * SKD_N_COMPLETION_ENTRY
;
2915 memset(skdev
->skcomp_table
, 0, nbytes
);
2917 skdev
->skcomp_ix
= 0;
2918 skdev
->skcomp_cycle
= 1;
2922 *****************************************************************************
2924 *****************************************************************************
2926 static void skd_completion_worker(struct work_struct
*work
)
2928 struct skd_device
*skdev
=
2929 container_of(work
, struct skd_device
, completion_worker
);
2930 unsigned long flags
;
2931 int flush_enqueued
= 0;
2933 spin_lock_irqsave(&skdev
->lock
, flags
);
2936 * pass in limit=0, which means no limit..
2937 * process everything in compq
2939 skd_isr_completion_posted(skdev
, 0, &flush_enqueued
);
2940 skd_request_fn(skdev
->queue
);
2942 spin_unlock_irqrestore(&skdev
->lock
, flags
);
2945 static void skd_isr_msg_from_dev(struct skd_device
*skdev
);
2948 skd_isr(int irq
, void *ptr
)
2950 struct skd_device
*skdev
;
2955 int flush_enqueued
= 0;
2957 skdev
= (struct skd_device
*)ptr
;
2958 spin_lock(&skdev
->lock
);
2961 intstat
= SKD_READL(skdev
, FIT_INT_STATUS_HOST
);
2963 ack
= FIT_INT_DEF_MASK
;
2966 pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
2967 skdev
->name
, __func__
, __LINE__
, intstat
, ack
);
2969 /* As long as there is an int pending on device, keep
2970 * running loop. When none, get out, but if we've never
2971 * done any processing, call completion handler?
2974 /* No interrupts on device, but run the completion
2978 if (likely (skdev
->state
2979 == SKD_DRVR_STATE_ONLINE
))
2986 SKD_WRITEL(skdev
, ack
, FIT_INT_STATUS_HOST
);
2988 if (likely((skdev
->state
!= SKD_DRVR_STATE_LOAD
) &&
2989 (skdev
->state
!= SKD_DRVR_STATE_STOPPING
))) {
2990 if (intstat
& FIT_ISH_COMPLETION_POSTED
) {
2992 * If we have already deferred completion
2993 * processing, don't bother running it again
2997 skd_isr_completion_posted(skdev
,
2998 skd_isr_comp_limit
, &flush_enqueued
);
3001 if (intstat
& FIT_ISH_FW_STATE_CHANGE
) {
3002 skd_isr_fwstate(skdev
);
3003 if (skdev
->state
== SKD_DRVR_STATE_FAULT
||
3005 SKD_DRVR_STATE_DISAPPEARED
) {
3006 spin_unlock(&skdev
->lock
);
3011 if (intstat
& FIT_ISH_MSG_FROM_DEV
)
3012 skd_isr_msg_from_dev(skdev
);
3016 if (unlikely(flush_enqueued
))
3017 skd_request_fn(skdev
->queue
);
3020 schedule_work(&skdev
->completion_worker
);
3021 else if (!flush_enqueued
)
3022 skd_request_fn(skdev
->queue
);
3024 spin_unlock(&skdev
->lock
);
3029 static void skd_drive_fault(struct skd_device
*skdev
)
3031 skdev
->state
= SKD_DRVR_STATE_FAULT
;
3032 pr_err("(%s): Drive FAULT\n", skd_name(skdev
));
3035 static void skd_drive_disappeared(struct skd_device
*skdev
)
3037 skdev
->state
= SKD_DRVR_STATE_DISAPPEARED
;
3038 pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev
));
3041 static void skd_isr_fwstate(struct skd_device
*skdev
)
3046 int prev_driver_state
= skdev
->state
;
3048 sense
= SKD_READL(skdev
, FIT_STATUS
);
3049 state
= sense
& FIT_SR_DRIVE_STATE_MASK
;
3051 pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
3053 skd_drive_state_to_str(skdev
->drive_state
), skdev
->drive_state
,
3054 skd_drive_state_to_str(state
), state
);
3056 skdev
->drive_state
= state
;
3058 switch (skdev
->drive_state
) {
3059 case FIT_SR_DRIVE_INIT
:
3060 if (skdev
->state
== SKD_DRVR_STATE_PROTOCOL_MISMATCH
) {
3061 skd_disable_interrupts(skdev
);
3064 if (skdev
->state
== SKD_DRVR_STATE_RESTARTING
)
3065 skd_recover_requests(skdev
, 0);
3066 if (skdev
->state
== SKD_DRVR_STATE_WAIT_BOOT
) {
3067 skdev
->timer_countdown
= SKD_STARTING_TIMO
;
3068 skdev
->state
= SKD_DRVR_STATE_STARTING
;
3069 skd_soft_reset(skdev
);
3072 mtd
= FIT_MXD_CONS(FIT_MTD_FITFW_INIT
, 0, 0);
3073 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
3074 skdev
->last_mtd
= mtd
;
3077 case FIT_SR_DRIVE_ONLINE
:
3078 skdev
->cur_max_queue_depth
= skd_max_queue_depth
;
3079 if (skdev
->cur_max_queue_depth
> skdev
->dev_max_queue_depth
)
3080 skdev
->cur_max_queue_depth
= skdev
->dev_max_queue_depth
;
3082 skdev
->queue_low_water_mark
=
3083 skdev
->cur_max_queue_depth
* 2 / 3 + 1;
3084 if (skdev
->queue_low_water_mark
< 1)
3085 skdev
->queue_low_water_mark
= 1;
3087 "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
3089 skdev
->cur_max_queue_depth
,
3090 skdev
->dev_max_queue_depth
, skdev
->queue_low_water_mark
);
3092 skd_refresh_device_data(skdev
);
3095 case FIT_SR_DRIVE_BUSY
:
3096 skdev
->state
= SKD_DRVR_STATE_BUSY
;
3097 skdev
->timer_countdown
= SKD_BUSY_TIMO
;
3098 skd_quiesce_dev(skdev
);
3100 case FIT_SR_DRIVE_BUSY_SANITIZE
:
3101 /* set timer for 3 seconds, we'll abort any unfinished
3102 * commands after that expires
3104 skdev
->state
= SKD_DRVR_STATE_BUSY_SANITIZE
;
3105 skdev
->timer_countdown
= SKD_TIMER_SECONDS(3);
3106 blk_start_queue(skdev
->queue
);
3108 case FIT_SR_DRIVE_BUSY_ERASE
:
3109 skdev
->state
= SKD_DRVR_STATE_BUSY_ERASE
;
3110 skdev
->timer_countdown
= SKD_BUSY_TIMO
;
3112 case FIT_SR_DRIVE_OFFLINE
:
3113 skdev
->state
= SKD_DRVR_STATE_IDLE
;
3115 case FIT_SR_DRIVE_SOFT_RESET
:
3116 switch (skdev
->state
) {
3117 case SKD_DRVR_STATE_STARTING
:
3118 case SKD_DRVR_STATE_RESTARTING
:
3119 /* Expected by a caller of skd_soft_reset() */
3122 skdev
->state
= SKD_DRVR_STATE_RESTARTING
;
3126 case FIT_SR_DRIVE_FW_BOOTING
:
3127 pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
3128 skdev
->name
, __func__
, __LINE__
, skdev
->name
);
3129 skdev
->state
= SKD_DRVR_STATE_WAIT_BOOT
;
3130 skdev
->timer_countdown
= SKD_WAIT_BOOT_TIMO
;
3133 case FIT_SR_DRIVE_DEGRADED
:
3134 case FIT_SR_PCIE_LINK_DOWN
:
3135 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD
:
3138 case FIT_SR_DRIVE_FAULT
:
3139 skd_drive_fault(skdev
);
3140 skd_recover_requests(skdev
, 0);
3141 blk_start_queue(skdev
->queue
);
3144 /* PCIe bus returned all Fs? */
3146 pr_info("(%s): state=0x%x sense=0x%x\n",
3147 skd_name(skdev
), state
, sense
);
3148 skd_drive_disappeared(skdev
);
3149 skd_recover_requests(skdev
, 0);
3150 blk_start_queue(skdev
->queue
);
3154 * Uknown FW State. Wait for a state we recognize.
3158 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3160 skd_skdev_state_to_str(prev_driver_state
), prev_driver_state
,
3161 skd_skdev_state_to_str(skdev
->state
), skdev
->state
);
3164 static void skd_recover_requests(struct skd_device
*skdev
, int requeue
)
3168 for (i
= 0; i
< skdev
->num_req_context
; i
++) {
3169 struct skd_request_context
*skreq
= &skdev
->skreq_table
[i
];
3171 if (skreq
->state
== SKD_REQ_STATE_BUSY
) {
3172 skd_log_skreq(skdev
, skreq
, "recover");
3174 SKD_ASSERT((skreq
->id
& SKD_ID_INCR
) != 0);
3175 SKD_ASSERT(skreq
->req
!= NULL
);
3177 /* Release DMA resources for the request. */
3178 if (skreq
->n_sg
> 0)
3179 skd_postop_sg_list(skdev
, skreq
);
3182 (unsigned long) ++skreq
->req
->special
<
3184 blk_requeue_request(skdev
->queue
, skreq
->req
);
3186 skd_end_request(skdev
, skreq
, -EIO
);
3190 skreq
->state
= SKD_REQ_STATE_IDLE
;
3191 skreq
->id
+= SKD_ID_INCR
;
3194 skreq
[-1].next
= skreq
;
3197 skdev
->skreq_free_list
= skdev
->skreq_table
;
3199 for (i
= 0; i
< skdev
->num_fitmsg_context
; i
++) {
3200 struct skd_fitmsg_context
*skmsg
= &skdev
->skmsg_table
[i
];
3202 if (skmsg
->state
== SKD_MSG_STATE_BUSY
) {
3203 skd_log_skmsg(skdev
, skmsg
, "salvaged");
3204 SKD_ASSERT((skmsg
->id
& SKD_ID_INCR
) != 0);
3205 skmsg
->state
= SKD_MSG_STATE_IDLE
;
3206 skmsg
->id
+= SKD_ID_INCR
;
3209 skmsg
[-1].next
= skmsg
;
3212 skdev
->skmsg_free_list
= skdev
->skmsg_table
;
3214 for (i
= 0; i
< skdev
->n_special
; i
++) {
3215 struct skd_special_context
*skspcl
= &skdev
->skspcl_table
[i
];
3217 /* If orphaned, reclaim it because it has already been reported
3218 * to the process as an error (it was just waiting for
3219 * a completion that didn't come, and now it will never come)
3220 * If busy, change to a state that will cause it to error
3221 * out in the wait routine and let it do the normal
3222 * reporting and reclaiming
3224 if (skspcl
->req
.state
== SKD_REQ_STATE_BUSY
) {
3225 if (skspcl
->orphaned
) {
3226 pr_debug("%s:%s:%d orphaned %p\n",
3227 skdev
->name
, __func__
, __LINE__
,
3229 skd_release_special(skdev
, skspcl
);
3231 pr_debug("%s:%s:%d not orphaned %p\n",
3232 skdev
->name
, __func__
, __LINE__
,
3234 skspcl
->req
.state
= SKD_REQ_STATE_ABORTED
;
3238 skdev
->skspcl_free_list
= skdev
->skspcl_table
;
3240 for (i
= 0; i
< SKD_N_TIMEOUT_SLOT
; i
++)
3241 skdev
->timeout_slot
[i
] = 0;
3243 skdev
->in_flight
= 0;
3246 static void skd_isr_msg_from_dev(struct skd_device
*skdev
)
3252 mfd
= SKD_READL(skdev
, FIT_MSG_FROM_DEVICE
);
3254 pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
3255 skdev
->name
, __func__
, __LINE__
, mfd
, skdev
->last_mtd
);
3257 /* ignore any mtd that is an ack for something we didn't send */
3258 if (FIT_MXD_TYPE(mfd
) != FIT_MXD_TYPE(skdev
->last_mtd
))
3261 switch (FIT_MXD_TYPE(mfd
)) {
3262 case FIT_MTD_FITFW_INIT
:
3263 skdev
->proto_ver
= FIT_PROTOCOL_MAJOR_VER(mfd
);
3265 if (skdev
->proto_ver
!= FIT_PROTOCOL_VERSION_1
) {
3266 pr_err("(%s): protocol mismatch\n",
3268 pr_err("(%s): got=%d support=%d\n",
3269 skdev
->name
, skdev
->proto_ver
,
3270 FIT_PROTOCOL_VERSION_1
);
3271 pr_err("(%s): please upgrade driver\n",
3273 skdev
->state
= SKD_DRVR_STATE_PROTOCOL_MISMATCH
;
3274 skd_soft_reset(skdev
);
3277 mtd
= FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH
, 0, 0);
3278 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
3279 skdev
->last_mtd
= mtd
;
3282 case FIT_MTD_GET_CMDQ_DEPTH
:
3283 skdev
->dev_max_queue_depth
= FIT_MXD_DATA(mfd
);
3284 mtd
= FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH
, 0,
3285 SKD_N_COMPLETION_ENTRY
);
3286 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
3287 skdev
->last_mtd
= mtd
;
3290 case FIT_MTD_SET_COMPQ_DEPTH
:
3291 SKD_WRITEQ(skdev
, skdev
->cq_dma_address
, FIT_MSG_TO_DEVICE_ARG
);
3292 mtd
= FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR
, 0, 0);
3293 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
3294 skdev
->last_mtd
= mtd
;
3297 case FIT_MTD_SET_COMPQ_ADDR
:
3298 skd_reset_skcomp(skdev
);
3299 mtd
= FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID
, 0, skdev
->devno
);
3300 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
3301 skdev
->last_mtd
= mtd
;
3304 case FIT_MTD_CMD_LOG_HOST_ID
:
3305 skdev
->connect_time_stamp
= get_seconds();
3306 data
= skdev
->connect_time_stamp
& 0xFFFF;
3307 mtd
= FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO
, 0, data
);
3308 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
3309 skdev
->last_mtd
= mtd
;
3312 case FIT_MTD_CMD_LOG_TIME_STAMP_LO
:
3313 skdev
->drive_jiffies
= FIT_MXD_DATA(mfd
);
3314 data
= (skdev
->connect_time_stamp
>> 16) & 0xFFFF;
3315 mtd
= FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI
, 0, data
);
3316 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
3317 skdev
->last_mtd
= mtd
;
3320 case FIT_MTD_CMD_LOG_TIME_STAMP_HI
:
3321 skdev
->drive_jiffies
|= (FIT_MXD_DATA(mfd
) << 16);
3322 mtd
= FIT_MXD_CONS(FIT_MTD_ARM_QUEUE
, 0, 0);
3323 SKD_WRITEL(skdev
, mtd
, FIT_MSG_TO_DEVICE
);
3324 skdev
->last_mtd
= mtd
;
3326 pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
3328 skdev
->connect_time_stamp
, skdev
->drive_jiffies
);
3331 case FIT_MTD_ARM_QUEUE
:
3332 skdev
->last_mtd
= 0;
3334 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3343 static void skd_disable_interrupts(struct skd_device
*skdev
)
3347 sense
= SKD_READL(skdev
, FIT_CONTROL
);
3348 sense
&= ~FIT_CR_ENABLE_INTERRUPTS
;
3349 SKD_WRITEL(skdev
, sense
, FIT_CONTROL
);
3350 pr_debug("%s:%s:%d sense 0x%x\n",
3351 skdev
->name
, __func__
, __LINE__
, sense
);
3353 /* Note that the 1s is written. A 1-bit means
3354 * disable, a 0 means enable.
3356 SKD_WRITEL(skdev
, ~0, FIT_INT_MASK_HOST
);
3359 static void skd_enable_interrupts(struct skd_device
*skdev
)
3363 /* unmask interrupts first */
3364 val
= FIT_ISH_FW_STATE_CHANGE
+
3365 FIT_ISH_COMPLETION_POSTED
+ FIT_ISH_MSG_FROM_DEV
;
3367 /* Note that the compliment of mask is written. A 1-bit means
3368 * disable, a 0 means enable. */
3369 SKD_WRITEL(skdev
, ~val
, FIT_INT_MASK_HOST
);
3370 pr_debug("%s:%s:%d interrupt mask=0x%x\n",
3371 skdev
->name
, __func__
, __LINE__
, ~val
);
3373 val
= SKD_READL(skdev
, FIT_CONTROL
);
3374 val
|= FIT_CR_ENABLE_INTERRUPTS
;
3375 pr_debug("%s:%s:%d control=0x%x\n",
3376 skdev
->name
, __func__
, __LINE__
, val
);
3377 SKD_WRITEL(skdev
, val
, FIT_CONTROL
);
3381 *****************************************************************************
3382 * START, STOP, RESTART, QUIESCE, UNQUIESCE
3383 *****************************************************************************
3386 static void skd_soft_reset(struct skd_device
*skdev
)
3390 val
= SKD_READL(skdev
, FIT_CONTROL
);
3391 val
|= (FIT_CR_SOFT_RESET
);
3392 pr_debug("%s:%s:%d control=0x%x\n",
3393 skdev
->name
, __func__
, __LINE__
, val
);
3394 SKD_WRITEL(skdev
, val
, FIT_CONTROL
);
3397 static void skd_start_device(struct skd_device
*skdev
)
3399 unsigned long flags
;
3403 spin_lock_irqsave(&skdev
->lock
, flags
);
3405 /* ack all ghost interrupts */
3406 SKD_WRITEL(skdev
, FIT_INT_DEF_MASK
, FIT_INT_STATUS_HOST
);
3408 sense
= SKD_READL(skdev
, FIT_STATUS
);
3410 pr_debug("%s:%s:%d initial status=0x%x\n",
3411 skdev
->name
, __func__
, __LINE__
, sense
);
3413 state
= sense
& FIT_SR_DRIVE_STATE_MASK
;
3414 skdev
->drive_state
= state
;
3415 skdev
->last_mtd
= 0;
3417 skdev
->state
= SKD_DRVR_STATE_STARTING
;
3418 skdev
->timer_countdown
= SKD_STARTING_TIMO
;
3420 skd_enable_interrupts(skdev
);
3422 switch (skdev
->drive_state
) {
3423 case FIT_SR_DRIVE_OFFLINE
:
3424 pr_err("(%s): Drive offline...\n", skd_name(skdev
));
3427 case FIT_SR_DRIVE_FW_BOOTING
:
3428 pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
3429 skdev
->name
, __func__
, __LINE__
, skdev
->name
);
3430 skdev
->state
= SKD_DRVR_STATE_WAIT_BOOT
;
3431 skdev
->timer_countdown
= SKD_WAIT_BOOT_TIMO
;
3434 case FIT_SR_DRIVE_BUSY_SANITIZE
:
3435 pr_info("(%s): Start: BUSY_SANITIZE\n",
3437 skdev
->state
= SKD_DRVR_STATE_BUSY_SANITIZE
;
3438 skdev
->timer_countdown
= SKD_STARTED_BUSY_TIMO
;
3441 case FIT_SR_DRIVE_BUSY_ERASE
:
3442 pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev
));
3443 skdev
->state
= SKD_DRVR_STATE_BUSY_ERASE
;
3444 skdev
->timer_countdown
= SKD_STARTED_BUSY_TIMO
;
3447 case FIT_SR_DRIVE_INIT
:
3448 case FIT_SR_DRIVE_ONLINE
:
3449 skd_soft_reset(skdev
);
3452 case FIT_SR_DRIVE_BUSY
:
3453 pr_err("(%s): Drive Busy...\n", skd_name(skdev
));
3454 skdev
->state
= SKD_DRVR_STATE_BUSY
;
3455 skdev
->timer_countdown
= SKD_STARTED_BUSY_TIMO
;
3458 case FIT_SR_DRIVE_SOFT_RESET
:
3459 pr_err("(%s) drive soft reset in prog\n",
3463 case FIT_SR_DRIVE_FAULT
:
3464 /* Fault state is bad...soft reset won't do it...
3465 * Hard reset, maybe, but does it work on device?
3466 * For now, just fault so the system doesn't hang.
3468 skd_drive_fault(skdev
);
3469 /*start the queue so we can respond with error to requests */
3470 pr_debug("%s:%s:%d starting %s queue\n",
3471 skdev
->name
, __func__
, __LINE__
, skdev
->name
);
3472 blk_start_queue(skdev
->queue
);
3473 skdev
->gendisk_on
= -1;
3474 wake_up_interruptible(&skdev
->waitq
);
3478 /* Most likely the device isn't there or isn't responding
3479 * to the BAR1 addresses. */
3480 skd_drive_disappeared(skdev
);
3481 /*start the queue so we can respond with error to requests */
3482 pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
3483 skdev
->name
, __func__
, __LINE__
, skdev
->name
);
3484 blk_start_queue(skdev
->queue
);
3485 skdev
->gendisk_on
= -1;
3486 wake_up_interruptible(&skdev
->waitq
);
3490 pr_err("(%s) Start: unknown state %x\n",
3491 skd_name(skdev
), skdev
->drive_state
);
3495 state
= SKD_READL(skdev
, FIT_CONTROL
);
3496 pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
3497 skdev
->name
, __func__
, __LINE__
, state
);
3499 state
= SKD_READL(skdev
, FIT_INT_STATUS_HOST
);
3500 pr_debug("%s:%s:%d Intr Status=0x%x\n",
3501 skdev
->name
, __func__
, __LINE__
, state
);
3503 state
= SKD_READL(skdev
, FIT_INT_MASK_HOST
);
3504 pr_debug("%s:%s:%d Intr Mask=0x%x\n",
3505 skdev
->name
, __func__
, __LINE__
, state
);
3507 state
= SKD_READL(skdev
, FIT_MSG_FROM_DEVICE
);
3508 pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
3509 skdev
->name
, __func__
, __LINE__
, state
);
3511 state
= SKD_READL(skdev
, FIT_HW_VERSION
);
3512 pr_debug("%s:%s:%d HW version=0x%x\n",
3513 skdev
->name
, __func__
, __LINE__
, state
);
3515 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3518 static void skd_stop_device(struct skd_device
*skdev
)
3520 unsigned long flags
;
3521 struct skd_special_context
*skspcl
= &skdev
->internal_skspcl
;
3525 spin_lock_irqsave(&skdev
->lock
, flags
);
3527 if (skdev
->state
!= SKD_DRVR_STATE_ONLINE
) {
3528 pr_err("(%s): skd_stop_device not online no sync\n",
3533 if (skspcl
->req
.state
!= SKD_REQ_STATE_IDLE
) {
3534 pr_err("(%s): skd_stop_device no special\n",
3539 skdev
->state
= SKD_DRVR_STATE_SYNCING
;
3540 skdev
->sync_done
= 0;
3542 skd_send_internal_skspcl(skdev
, skspcl
, SYNCHRONIZE_CACHE
);
3544 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3546 wait_event_interruptible_timeout(skdev
->waitq
,
3547 (skdev
->sync_done
), (10 * HZ
));
3549 spin_lock_irqsave(&skdev
->lock
, flags
);
3551 switch (skdev
->sync_done
) {
3553 pr_err("(%s): skd_stop_device no sync\n",
3557 pr_err("(%s): skd_stop_device sync done\n",
3561 pr_err("(%s): skd_stop_device sync error\n",
3566 skdev
->state
= SKD_DRVR_STATE_STOPPING
;
3567 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3569 skd_kill_timer(skdev
);
3571 spin_lock_irqsave(&skdev
->lock
, flags
);
3572 skd_disable_interrupts(skdev
);
3574 /* ensure all ints on device are cleared */
3575 /* soft reset the device to unload with a clean slate */
3576 SKD_WRITEL(skdev
, FIT_INT_DEF_MASK
, FIT_INT_STATUS_HOST
);
3577 SKD_WRITEL(skdev
, FIT_CR_SOFT_RESET
, FIT_CONTROL
);
3579 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3581 /* poll every 100ms, 1 second timeout */
3582 for (i
= 0; i
< 10; i
++) {
3584 SKD_READL(skdev
, FIT_STATUS
) & FIT_SR_DRIVE_STATE_MASK
;
3585 if (dev_state
== FIT_SR_DRIVE_INIT
)
3587 set_current_state(TASK_INTERRUPTIBLE
);
3588 schedule_timeout(msecs_to_jiffies(100));
3591 if (dev_state
!= FIT_SR_DRIVE_INIT
)
3592 pr_err("(%s): skd_stop_device state error 0x%02x\n",
3593 skd_name(skdev
), dev_state
);
3596 /* assume spinlock is held */
3597 static void skd_restart_device(struct skd_device
*skdev
)
3601 /* ack all ghost interrupts */
3602 SKD_WRITEL(skdev
, FIT_INT_DEF_MASK
, FIT_INT_STATUS_HOST
);
3604 state
= SKD_READL(skdev
, FIT_STATUS
);
3606 pr_debug("%s:%s:%d drive status=0x%x\n",
3607 skdev
->name
, __func__
, __LINE__
, state
);
3609 state
&= FIT_SR_DRIVE_STATE_MASK
;
3610 skdev
->drive_state
= state
;
3611 skdev
->last_mtd
= 0;
3613 skdev
->state
= SKD_DRVR_STATE_RESTARTING
;
3614 skdev
->timer_countdown
= SKD_RESTARTING_TIMO
;
3616 skd_soft_reset(skdev
);
3619 /* assume spinlock is held */
3620 static int skd_quiesce_dev(struct skd_device
*skdev
)
3624 switch (skdev
->state
) {
3625 case SKD_DRVR_STATE_BUSY
:
3626 case SKD_DRVR_STATE_BUSY_IMMINENT
:
3627 pr_debug("%s:%s:%d stopping %s queue\n",
3628 skdev
->name
, __func__
, __LINE__
, skdev
->name
);
3629 blk_stop_queue(skdev
->queue
);
3631 case SKD_DRVR_STATE_ONLINE
:
3632 case SKD_DRVR_STATE_STOPPING
:
3633 case SKD_DRVR_STATE_SYNCING
:
3634 case SKD_DRVR_STATE_PAUSING
:
3635 case SKD_DRVR_STATE_PAUSED
:
3636 case SKD_DRVR_STATE_STARTING
:
3637 case SKD_DRVR_STATE_RESTARTING
:
3638 case SKD_DRVR_STATE_RESUMING
:
3641 pr_debug("%s:%s:%d state [%d] not implemented\n",
3642 skdev
->name
, __func__
, __LINE__
, skdev
->state
);
3647 /* assume spinlock is held */
3648 static int skd_unquiesce_dev(struct skd_device
*skdev
)
3650 int prev_driver_state
= skdev
->state
;
3652 skd_log_skdev(skdev
, "unquiesce");
3653 if (skdev
->state
== SKD_DRVR_STATE_ONLINE
) {
3654 pr_debug("%s:%s:%d **** device already ONLINE\n",
3655 skdev
->name
, __func__
, __LINE__
);
3658 if (skdev
->drive_state
!= FIT_SR_DRIVE_ONLINE
) {
3660 * If there has been an state change to other than
3661 * ONLINE, we will rely on controller state change
3662 * to come back online and restart the queue.
3663 * The BUSY state means that driver is ready to
3664 * continue normal processing but waiting for controller
3665 * to become available.
3667 skdev
->state
= SKD_DRVR_STATE_BUSY
;
3668 pr_debug("%s:%s:%d drive BUSY state\n",
3669 skdev
->name
, __func__
, __LINE__
);
3674 * Drive has just come online, driver is either in startup,
3675 * paused performing a task, or bust waiting for hardware.
3677 switch (skdev
->state
) {
3678 case SKD_DRVR_STATE_PAUSED
:
3679 case SKD_DRVR_STATE_BUSY
:
3680 case SKD_DRVR_STATE_BUSY_IMMINENT
:
3681 case SKD_DRVR_STATE_BUSY_ERASE
:
3682 case SKD_DRVR_STATE_STARTING
:
3683 case SKD_DRVR_STATE_RESTARTING
:
3684 case SKD_DRVR_STATE_FAULT
:
3685 case SKD_DRVR_STATE_IDLE
:
3686 case SKD_DRVR_STATE_LOAD
:
3687 skdev
->state
= SKD_DRVR_STATE_ONLINE
;
3688 pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
3690 skd_skdev_state_to_str(prev_driver_state
),
3691 prev_driver_state
, skd_skdev_state_to_str(skdev
->state
),
3693 pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
3694 skdev
->name
, __func__
, __LINE__
);
3695 pr_debug("%s:%s:%d starting %s queue\n",
3696 skdev
->name
, __func__
, __LINE__
, skdev
->name
);
3697 pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev
));
3698 blk_start_queue(skdev
->queue
);
3699 skdev
->gendisk_on
= 1;
3700 wake_up_interruptible(&skdev
->waitq
);
3703 case SKD_DRVR_STATE_DISAPPEARED
:
3705 pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
3706 skdev
->name
, __func__
, __LINE__
,
3714 *****************************************************************************
3715 * PCIe MSI/MSI-X INTERRUPT HANDLERS
3716 *****************************************************************************
3719 static irqreturn_t
skd_reserved_isr(int irq
, void *skd_host_data
)
3721 struct skd_device
*skdev
= skd_host_data
;
3722 unsigned long flags
;
3724 spin_lock_irqsave(&skdev
->lock
, flags
);
3725 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3726 skdev
->name
, __func__
, __LINE__
,
3727 SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
3728 pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev
),
3729 irq
, SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
3730 SKD_WRITEL(skdev
, FIT_INT_RESERVED_MASK
, FIT_INT_STATUS_HOST
);
3731 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3735 static irqreturn_t
skd_statec_isr(int irq
, void *skd_host_data
)
3737 struct skd_device
*skdev
= skd_host_data
;
3738 unsigned long flags
;
3740 spin_lock_irqsave(&skdev
->lock
, flags
);
3741 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3742 skdev
->name
, __func__
, __LINE__
,
3743 SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
3744 SKD_WRITEL(skdev
, FIT_ISH_FW_STATE_CHANGE
, FIT_INT_STATUS_HOST
);
3745 skd_isr_fwstate(skdev
);
3746 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3750 static irqreturn_t
skd_comp_q(int irq
, void *skd_host_data
)
3752 struct skd_device
*skdev
= skd_host_data
;
3753 unsigned long flags
;
3754 int flush_enqueued
= 0;
3757 spin_lock_irqsave(&skdev
->lock
, flags
);
3758 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3759 skdev
->name
, __func__
, __LINE__
,
3760 SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
3761 SKD_WRITEL(skdev
, FIT_ISH_COMPLETION_POSTED
, FIT_INT_STATUS_HOST
);
3762 deferred
= skd_isr_completion_posted(skdev
, skd_isr_comp_limit
,
3765 skd_request_fn(skdev
->queue
);
3768 schedule_work(&skdev
->completion_worker
);
3769 else if (!flush_enqueued
)
3770 skd_request_fn(skdev
->queue
);
3772 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3777 static irqreturn_t
skd_msg_isr(int irq
, void *skd_host_data
)
3779 struct skd_device
*skdev
= skd_host_data
;
3780 unsigned long flags
;
3782 spin_lock_irqsave(&skdev
->lock
, flags
);
3783 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3784 skdev
->name
, __func__
, __LINE__
,
3785 SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
3786 SKD_WRITEL(skdev
, FIT_ISH_MSG_FROM_DEV
, FIT_INT_STATUS_HOST
);
3787 skd_isr_msg_from_dev(skdev
);
3788 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3792 static irqreturn_t
skd_qfull_isr(int irq
, void *skd_host_data
)
3794 struct skd_device
*skdev
= skd_host_data
;
3795 unsigned long flags
;
3797 spin_lock_irqsave(&skdev
->lock
, flags
);
3798 pr_debug("%s:%s:%d MSIX = 0x%x\n",
3799 skdev
->name
, __func__
, __LINE__
,
3800 SKD_READL(skdev
, FIT_INT_STATUS_HOST
));
3801 SKD_WRITEL(skdev
, FIT_INT_QUEUE_FULL
, FIT_INT_STATUS_HOST
);
3802 spin_unlock_irqrestore(&skdev
->lock
, flags
);
3807 *****************************************************************************
3808 * PCIe MSI/MSI-X SETUP
3809 *****************************************************************************
3812 struct skd_msix_entry
{
3816 struct skd_init_msix_entry
{
3818 irq_handler_t handler
;
3821 #define SKD_MAX_MSIX_COUNT 13
3822 #define SKD_MIN_MSIX_COUNT 7
3823 #define SKD_BASE_MSIX_IRQ 4
3825 static struct skd_init_msix_entry msix_entries
[SKD_MAX_MSIX_COUNT
] = {
3826 { "(DMA 0)", skd_reserved_isr
},
3827 { "(DMA 1)", skd_reserved_isr
},
3828 { "(DMA 2)", skd_reserved_isr
},
3829 { "(DMA 3)", skd_reserved_isr
},
3830 { "(State Change)", skd_statec_isr
},
3831 { "(COMPL_Q)", skd_comp_q
},
3832 { "(MSG)", skd_msg_isr
},
3833 { "(Reserved)", skd_reserved_isr
},
3834 { "(Reserved)", skd_reserved_isr
},
3835 { "(Queue Full 0)", skd_qfull_isr
},
3836 { "(Queue Full 1)", skd_qfull_isr
},
3837 { "(Queue Full 2)", skd_qfull_isr
},
3838 { "(Queue Full 3)", skd_qfull_isr
},
3841 static int skd_acquire_msix(struct skd_device
*skdev
)
3844 struct pci_dev
*pdev
= skdev
->pdev
;
3846 rc
= pci_alloc_irq_vectors(pdev
, SKD_MAX_MSIX_COUNT
, SKD_MAX_MSIX_COUNT
,
3849 pr_err("(%s): failed to enable MSI-X %d\n",
3850 skd_name(skdev
), rc
);
3854 skdev
->msix_entries
= kcalloc(SKD_MAX_MSIX_COUNT
,
3855 sizeof(struct skd_msix_entry
), GFP_KERNEL
);
3856 if (!skdev
->msix_entries
) {
3858 pr_err("(%s): msix table allocation error\n",
3863 /* Enable MSI-X vectors for the base queue */
3864 for (i
= 0; i
< SKD_MAX_MSIX_COUNT
; i
++) {
3865 struct skd_msix_entry
*qentry
= &skdev
->msix_entries
[i
];
3867 snprintf(qentry
->isr_name
, sizeof(qentry
->isr_name
),
3868 "%s%d-msix %s", DRV_NAME
, skdev
->devno
,
3869 msix_entries
[i
].name
);
3871 rc
= devm_request_irq(&skdev
->pdev
->dev
,
3872 pci_irq_vector(skdev
->pdev
, i
),
3873 msix_entries
[i
].handler
, 0,
3874 qentry
->isr_name
, skdev
);
3876 pr_err("(%s): Unable to register(%d) MSI-X "
3878 skd_name(skdev
), rc
, i
, qentry
->isr_name
);
3883 pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
3884 skdev
->name
, __func__
, __LINE__
,
3885 pci_name(pdev
), skdev
->name
, SKD_MAX_MSIX_COUNT
);
3890 devm_free_irq(&pdev
->dev
, pci_irq_vector(pdev
, i
), skdev
);
3892 kfree(skdev
->msix_entries
);
3893 skdev
->msix_entries
= NULL
;
3897 static int skd_acquire_irq(struct skd_device
*skdev
)
3899 struct pci_dev
*pdev
= skdev
->pdev
;
3900 unsigned int irq_flag
= PCI_IRQ_LEGACY
;
3903 if (skd_isr_type
== SKD_IRQ_MSIX
) {
3904 rc
= skd_acquire_msix(skdev
);
3908 pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n",
3909 skd_name(skdev
), rc
);
3912 snprintf(skdev
->isr_name
, sizeof(skdev
->isr_name
), "%s%d", DRV_NAME
,
3915 if (skd_isr_type
!= SKD_IRQ_LEGACY
)
3916 irq_flag
|= PCI_IRQ_MSI
;
3917 rc
= pci_alloc_irq_vectors(pdev
, 1, 1, irq_flag
);
3919 pr_err("(%s): failed to allocate the MSI interrupt %d\n",
3920 skd_name(skdev
), rc
);
3924 rc
= devm_request_irq(&pdev
->dev
, pdev
->irq
, skd_isr
,
3925 pdev
->msi_enabled
? 0 : IRQF_SHARED
,
3926 skdev
->isr_name
, skdev
);
3928 pci_free_irq_vectors(pdev
);
3929 pr_err("(%s): failed to allocate interrupt %d\n",
3930 skd_name(skdev
), rc
);
3937 static void skd_release_irq(struct skd_device
*skdev
)
3939 struct pci_dev
*pdev
= skdev
->pdev
;
3941 if (skdev
->msix_entries
) {
3944 for (i
= 0; i
< SKD_MAX_MSIX_COUNT
; i
++) {
3945 devm_free_irq(&pdev
->dev
, pci_irq_vector(pdev
, i
),
3949 kfree(skdev
->msix_entries
);
3950 skdev
->msix_entries
= NULL
;
3952 devm_free_irq(&pdev
->dev
, pdev
->irq
, skdev
);
3955 pci_free_irq_vectors(pdev
);
3959 *****************************************************************************
3961 *****************************************************************************
3964 static int skd_cons_skcomp(struct skd_device
*skdev
)
3967 struct fit_completion_entry_v1
*skcomp
;
3970 nbytes
= sizeof(*skcomp
) * SKD_N_COMPLETION_ENTRY
;
3971 nbytes
+= sizeof(struct fit_comp_error_info
) * SKD_N_COMPLETION_ENTRY
;
3973 pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
3974 skdev
->name
, __func__
, __LINE__
,
3975 nbytes
, SKD_N_COMPLETION_ENTRY
);
3977 skcomp
= pci_zalloc_consistent(skdev
->pdev
, nbytes
,
3978 &skdev
->cq_dma_address
);
3980 if (skcomp
== NULL
) {
3985 skdev
->skcomp_table
= skcomp
;
3986 skdev
->skerr_table
= (struct fit_comp_error_info
*)((char *)skcomp
+
3988 SKD_N_COMPLETION_ENTRY
);
3994 static int skd_cons_skmsg(struct skd_device
*skdev
)
3999 pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
4000 skdev
->name
, __func__
, __LINE__
,
4001 sizeof(struct skd_fitmsg_context
),
4002 skdev
->num_fitmsg_context
,
4003 sizeof(struct skd_fitmsg_context
) * skdev
->num_fitmsg_context
);
4005 skdev
->skmsg_table
= kzalloc(sizeof(struct skd_fitmsg_context
)
4006 *skdev
->num_fitmsg_context
, GFP_KERNEL
);
4007 if (skdev
->skmsg_table
== NULL
) {
4012 for (i
= 0; i
< skdev
->num_fitmsg_context
; i
++) {
4013 struct skd_fitmsg_context
*skmsg
;
4015 skmsg
= &skdev
->skmsg_table
[i
];
4017 skmsg
->id
= i
+ SKD_ID_FIT_MSG
;
4019 skmsg
->state
= SKD_MSG_STATE_IDLE
;
4020 skmsg
->msg_buf
= pci_alloc_consistent(skdev
->pdev
,
4021 SKD_N_FITMSG_BYTES
+ 64,
4022 &skmsg
->mb_dma_address
);
4024 if (skmsg
->msg_buf
== NULL
) {
4029 skmsg
->offset
= (u32
)((u64
)skmsg
->msg_buf
&
4030 (~FIT_QCMD_BASE_ADDRESS_MASK
));
4031 skmsg
->msg_buf
+= ~FIT_QCMD_BASE_ADDRESS_MASK
;
4032 skmsg
->msg_buf
= (u8
*)((u64
)skmsg
->msg_buf
&
4033 FIT_QCMD_BASE_ADDRESS_MASK
);
4034 skmsg
->mb_dma_address
+= ~FIT_QCMD_BASE_ADDRESS_MASK
;
4035 skmsg
->mb_dma_address
&= FIT_QCMD_BASE_ADDRESS_MASK
;
4036 memset(skmsg
->msg_buf
, 0, SKD_N_FITMSG_BYTES
);
4038 skmsg
->next
= &skmsg
[1];
4041 /* Free list is in order starting with the 0th entry. */
4042 skdev
->skmsg_table
[i
- 1].next
= NULL
;
4043 skdev
->skmsg_free_list
= skdev
->skmsg_table
;
4049 static struct fit_sg_descriptor
*skd_cons_sg_list(struct skd_device
*skdev
,
4051 dma_addr_t
*ret_dma_addr
)
4053 struct fit_sg_descriptor
*sg_list
;
4056 nbytes
= sizeof(*sg_list
) * n_sg
;
4058 sg_list
= pci_alloc_consistent(skdev
->pdev
, nbytes
, ret_dma_addr
);
4060 if (sg_list
!= NULL
) {
4061 uint64_t dma_address
= *ret_dma_addr
;
4064 memset(sg_list
, 0, nbytes
);
4066 for (i
= 0; i
< n_sg
- 1; i
++) {
4068 ndp_off
= (i
+ 1) * sizeof(struct fit_sg_descriptor
);
4070 sg_list
[i
].next_desc_ptr
= dma_address
+ ndp_off
;
4072 sg_list
[i
].next_desc_ptr
= 0LL;
4078 static int skd_cons_skreq(struct skd_device
*skdev
)
4083 pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
4084 skdev
->name
, __func__
, __LINE__
,
4085 sizeof(struct skd_request_context
),
4086 skdev
->num_req_context
,
4087 sizeof(struct skd_request_context
) * skdev
->num_req_context
);
4089 skdev
->skreq_table
= kzalloc(sizeof(struct skd_request_context
)
4090 * skdev
->num_req_context
, GFP_KERNEL
);
4091 if (skdev
->skreq_table
== NULL
) {
4096 pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
4097 skdev
->name
, __func__
, __LINE__
,
4098 skdev
->sgs_per_request
, sizeof(struct scatterlist
),
4099 skdev
->sgs_per_request
* sizeof(struct scatterlist
));
4101 for (i
= 0; i
< skdev
->num_req_context
; i
++) {
4102 struct skd_request_context
*skreq
;
4104 skreq
= &skdev
->skreq_table
[i
];
4106 skreq
->id
= i
+ SKD_ID_RW_REQUEST
;
4107 skreq
->state
= SKD_REQ_STATE_IDLE
;
4109 skreq
->sg
= kzalloc(sizeof(struct scatterlist
) *
4110 skdev
->sgs_per_request
, GFP_KERNEL
);
4111 if (skreq
->sg
== NULL
) {
4115 sg_init_table(skreq
->sg
, skdev
->sgs_per_request
);
4117 skreq
->sksg_list
= skd_cons_sg_list(skdev
,
4118 skdev
->sgs_per_request
,
4119 &skreq
->sksg_dma_address
);
4121 if (skreq
->sksg_list
== NULL
) {
4126 skreq
->next
= &skreq
[1];
4129 /* Free list is in order starting with the 0th entry. */
4130 skdev
->skreq_table
[i
- 1].next
= NULL
;
4131 skdev
->skreq_free_list
= skdev
->skreq_table
;
4137 static int skd_cons_skspcl(struct skd_device
*skdev
)
4142 pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
4143 skdev
->name
, __func__
, __LINE__
,
4144 sizeof(struct skd_special_context
),
4146 sizeof(struct skd_special_context
) * skdev
->n_special
);
4148 skdev
->skspcl_table
= kzalloc(sizeof(struct skd_special_context
)
4149 * skdev
->n_special
, GFP_KERNEL
);
4150 if (skdev
->skspcl_table
== NULL
) {
4155 for (i
= 0; i
< skdev
->n_special
; i
++) {
4156 struct skd_special_context
*skspcl
;
4158 skspcl
= &skdev
->skspcl_table
[i
];
4160 skspcl
->req
.id
= i
+ SKD_ID_SPECIAL_REQUEST
;
4161 skspcl
->req
.state
= SKD_REQ_STATE_IDLE
;
4163 skspcl
->req
.next
= &skspcl
[1].req
;
4165 nbytes
= SKD_N_SPECIAL_FITMSG_BYTES
;
4168 pci_zalloc_consistent(skdev
->pdev
, nbytes
,
4169 &skspcl
->mb_dma_address
);
4170 if (skspcl
->msg_buf
== NULL
) {
4175 skspcl
->req
.sg
= kzalloc(sizeof(struct scatterlist
) *
4176 SKD_N_SG_PER_SPECIAL
, GFP_KERNEL
);
4177 if (skspcl
->req
.sg
== NULL
) {
4182 skspcl
->req
.sksg_list
= skd_cons_sg_list(skdev
,
4183 SKD_N_SG_PER_SPECIAL
,
4186 if (skspcl
->req
.sksg_list
== NULL
) {
4192 /* Free list is in order starting with the 0th entry. */
4193 skdev
->skspcl_table
[i
- 1].req
.next
= NULL
;
4194 skdev
->skspcl_free_list
= skdev
->skspcl_table
;
4202 static int skd_cons_sksb(struct skd_device
*skdev
)
4205 struct skd_special_context
*skspcl
;
4208 skspcl
= &skdev
->internal_skspcl
;
4210 skspcl
->req
.id
= 0 + SKD_ID_INTERNAL
;
4211 skspcl
->req
.state
= SKD_REQ_STATE_IDLE
;
4213 nbytes
= SKD_N_INTERNAL_BYTES
;
4215 skspcl
->data_buf
= pci_zalloc_consistent(skdev
->pdev
, nbytes
,
4216 &skspcl
->db_dma_address
);
4217 if (skspcl
->data_buf
== NULL
) {
4222 nbytes
= SKD_N_SPECIAL_FITMSG_BYTES
;
4223 skspcl
->msg_buf
= pci_zalloc_consistent(skdev
->pdev
, nbytes
,
4224 &skspcl
->mb_dma_address
);
4225 if (skspcl
->msg_buf
== NULL
) {
4230 skspcl
->req
.sksg_list
= skd_cons_sg_list(skdev
, 1,
4231 &skspcl
->req
.sksg_dma_address
);
4232 if (skspcl
->req
.sksg_list
== NULL
) {
4237 if (!skd_format_internal_skspcl(skdev
)) {
4246 static int skd_cons_disk(struct skd_device
*skdev
)
4249 struct gendisk
*disk
;
4250 struct request_queue
*q
;
4251 unsigned long flags
;
4253 disk
= alloc_disk(SKD_MINORS_PER_DEVICE
);
4260 sprintf(disk
->disk_name
, DRV_NAME
"%u", skdev
->devno
);
4262 disk
->major
= skdev
->major
;
4263 disk
->first_minor
= skdev
->devno
* SKD_MINORS_PER_DEVICE
;
4264 disk
->fops
= &skd_blockdev_ops
;
4265 disk
->private_data
= skdev
;
4267 q
= blk_init_queue(skd_request_fn
, &skdev
->lock
);
4275 q
->queuedata
= skdev
;
4277 blk_queue_write_cache(q
, true, true);
4278 blk_queue_max_segments(q
, skdev
->sgs_per_request
);
4279 blk_queue_max_hw_sectors(q
, SKD_N_MAX_SECTORS
);
4281 /* set sysfs ptimal_io_size to 8K */
4282 blk_queue_io_opt(q
, 8192);
4284 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, q
);
4285 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, q
);
4287 spin_lock_irqsave(&skdev
->lock
, flags
);
4288 pr_debug("%s:%s:%d stopping %s queue\n",
4289 skdev
->name
, __func__
, __LINE__
, skdev
->name
);
4290 blk_stop_queue(skdev
->queue
);
4291 spin_unlock_irqrestore(&skdev
->lock
, flags
);
4297 #define SKD_N_DEV_TABLE 16u
4298 static u32 skd_next_devno
;
4300 static struct skd_device
*skd_construct(struct pci_dev
*pdev
)
4302 struct skd_device
*skdev
;
4303 int blk_major
= skd_major
;
4306 skdev
= kzalloc(sizeof(*skdev
), GFP_KERNEL
);
4309 pr_err(PFX
"(%s): memory alloc failure\n",
4314 skdev
->state
= SKD_DRVR_STATE_LOAD
;
4316 skdev
->devno
= skd_next_devno
++;
4317 skdev
->major
= blk_major
;
4318 sprintf(skdev
->name
, DRV_NAME
"%d", skdev
->devno
);
4319 skdev
->dev_max_queue_depth
= 0;
4321 skdev
->num_req_context
= skd_max_queue_depth
;
4322 skdev
->num_fitmsg_context
= skd_max_queue_depth
;
4323 skdev
->n_special
= skd_max_pass_thru
;
4324 skdev
->cur_max_queue_depth
= 1;
4325 skdev
->queue_low_water_mark
= 1;
4326 skdev
->proto_ver
= 99;
4327 skdev
->sgs_per_request
= skd_sgs_per_request
;
4328 skdev
->dbg_level
= skd_dbg_level
;
4330 atomic_set(&skdev
->device_count
, 0);
4332 spin_lock_init(&skdev
->lock
);
4334 INIT_WORK(&skdev
->completion_worker
, skd_completion_worker
);
4336 pr_debug("%s:%s:%d skcomp\n", skdev
->name
, __func__
, __LINE__
);
4337 rc
= skd_cons_skcomp(skdev
);
4341 pr_debug("%s:%s:%d skmsg\n", skdev
->name
, __func__
, __LINE__
);
4342 rc
= skd_cons_skmsg(skdev
);
4346 pr_debug("%s:%s:%d skreq\n", skdev
->name
, __func__
, __LINE__
);
4347 rc
= skd_cons_skreq(skdev
);
4351 pr_debug("%s:%s:%d skspcl\n", skdev
->name
, __func__
, __LINE__
);
4352 rc
= skd_cons_skspcl(skdev
);
4356 pr_debug("%s:%s:%d sksb\n", skdev
->name
, __func__
, __LINE__
);
4357 rc
= skd_cons_sksb(skdev
);
4361 pr_debug("%s:%s:%d disk\n", skdev
->name
, __func__
, __LINE__
);
4362 rc
= skd_cons_disk(skdev
);
4366 pr_debug("%s:%s:%d VICTORY\n", skdev
->name
, __func__
, __LINE__
);
4370 pr_debug("%s:%s:%d construct failed\n",
4371 skdev
->name
, __func__
, __LINE__
);
4372 skd_destruct(skdev
);
4377 *****************************************************************************
4379 *****************************************************************************
4382 static void skd_free_skcomp(struct skd_device
*skdev
)
4384 if (skdev
->skcomp_table
!= NULL
) {
4387 nbytes
= sizeof(skdev
->skcomp_table
[0]) *
4388 SKD_N_COMPLETION_ENTRY
;
4389 pci_free_consistent(skdev
->pdev
, nbytes
,
4390 skdev
->skcomp_table
, skdev
->cq_dma_address
);
4393 skdev
->skcomp_table
= NULL
;
4394 skdev
->cq_dma_address
= 0;
4397 static void skd_free_skmsg(struct skd_device
*skdev
)
4401 if (skdev
->skmsg_table
== NULL
)
4404 for (i
= 0; i
< skdev
->num_fitmsg_context
; i
++) {
4405 struct skd_fitmsg_context
*skmsg
;
4407 skmsg
= &skdev
->skmsg_table
[i
];
4409 if (skmsg
->msg_buf
!= NULL
) {
4410 skmsg
->msg_buf
+= skmsg
->offset
;
4411 skmsg
->mb_dma_address
+= skmsg
->offset
;
4412 pci_free_consistent(skdev
->pdev
, SKD_N_FITMSG_BYTES
,
4414 skmsg
->mb_dma_address
);
4416 skmsg
->msg_buf
= NULL
;
4417 skmsg
->mb_dma_address
= 0;
4420 kfree(skdev
->skmsg_table
);
4421 skdev
->skmsg_table
= NULL
;
4424 static void skd_free_sg_list(struct skd_device
*skdev
,
4425 struct fit_sg_descriptor
*sg_list
,
4426 u32 n_sg
, dma_addr_t dma_addr
)
4428 if (sg_list
!= NULL
) {
4431 nbytes
= sizeof(*sg_list
) * n_sg
;
4433 pci_free_consistent(skdev
->pdev
, nbytes
, sg_list
, dma_addr
);
4437 static void skd_free_skreq(struct skd_device
*skdev
)
4441 if (skdev
->skreq_table
== NULL
)
4444 for (i
= 0; i
< skdev
->num_req_context
; i
++) {
4445 struct skd_request_context
*skreq
;
4447 skreq
= &skdev
->skreq_table
[i
];
4449 skd_free_sg_list(skdev
, skreq
->sksg_list
,
4450 skdev
->sgs_per_request
,
4451 skreq
->sksg_dma_address
);
4453 skreq
->sksg_list
= NULL
;
4454 skreq
->sksg_dma_address
= 0;
4459 kfree(skdev
->skreq_table
);
4460 skdev
->skreq_table
= NULL
;
4463 static void skd_free_skspcl(struct skd_device
*skdev
)
4468 if (skdev
->skspcl_table
== NULL
)
4471 for (i
= 0; i
< skdev
->n_special
; i
++) {
4472 struct skd_special_context
*skspcl
;
4474 skspcl
= &skdev
->skspcl_table
[i
];
4476 if (skspcl
->msg_buf
!= NULL
) {
4477 nbytes
= SKD_N_SPECIAL_FITMSG_BYTES
;
4478 pci_free_consistent(skdev
->pdev
, nbytes
,
4480 skspcl
->mb_dma_address
);
4483 skspcl
->msg_buf
= NULL
;
4484 skspcl
->mb_dma_address
= 0;
4486 skd_free_sg_list(skdev
, skspcl
->req
.sksg_list
,
4487 SKD_N_SG_PER_SPECIAL
,
4488 skspcl
->req
.sksg_dma_address
);
4490 skspcl
->req
.sksg_list
= NULL
;
4491 skspcl
->req
.sksg_dma_address
= 0;
4493 kfree(skspcl
->req
.sg
);
4496 kfree(skdev
->skspcl_table
);
4497 skdev
->skspcl_table
= NULL
;
4500 static void skd_free_sksb(struct skd_device
*skdev
)
4502 struct skd_special_context
*skspcl
;
4505 skspcl
= &skdev
->internal_skspcl
;
4507 if (skspcl
->data_buf
!= NULL
) {
4508 nbytes
= SKD_N_INTERNAL_BYTES
;
4510 pci_free_consistent(skdev
->pdev
, nbytes
,
4511 skspcl
->data_buf
, skspcl
->db_dma_address
);
4514 skspcl
->data_buf
= NULL
;
4515 skspcl
->db_dma_address
= 0;
4517 if (skspcl
->msg_buf
!= NULL
) {
4518 nbytes
= SKD_N_SPECIAL_FITMSG_BYTES
;
4519 pci_free_consistent(skdev
->pdev
, nbytes
,
4520 skspcl
->msg_buf
, skspcl
->mb_dma_address
);
4523 skspcl
->msg_buf
= NULL
;
4524 skspcl
->mb_dma_address
= 0;
4526 skd_free_sg_list(skdev
, skspcl
->req
.sksg_list
, 1,
4527 skspcl
->req
.sksg_dma_address
);
4529 skspcl
->req
.sksg_list
= NULL
;
4530 skspcl
->req
.sksg_dma_address
= 0;
4533 static void skd_free_disk(struct skd_device
*skdev
)
4535 struct gendisk
*disk
= skdev
->disk
;
4538 struct request_queue
*q
= disk
->queue
;
4540 if (disk
->flags
& GENHD_FL_UP
)
4543 blk_cleanup_queue(q
);
4549 static void skd_destruct(struct skd_device
*skdev
)
4555 pr_debug("%s:%s:%d disk\n", skdev
->name
, __func__
, __LINE__
);
4556 skd_free_disk(skdev
);
4558 pr_debug("%s:%s:%d sksb\n", skdev
->name
, __func__
, __LINE__
);
4559 skd_free_sksb(skdev
);
4561 pr_debug("%s:%s:%d skspcl\n", skdev
->name
, __func__
, __LINE__
);
4562 skd_free_skspcl(skdev
);
4564 pr_debug("%s:%s:%d skreq\n", skdev
->name
, __func__
, __LINE__
);
4565 skd_free_skreq(skdev
);
4567 pr_debug("%s:%s:%d skmsg\n", skdev
->name
, __func__
, __LINE__
);
4568 skd_free_skmsg(skdev
);
4570 pr_debug("%s:%s:%d skcomp\n", skdev
->name
, __func__
, __LINE__
);
4571 skd_free_skcomp(skdev
);
4573 pr_debug("%s:%s:%d skdev\n", skdev
->name
, __func__
, __LINE__
);
4578 *****************************************************************************
4579 * BLOCK DEVICE (BDEV) GLUE
4580 *****************************************************************************
4583 static int skd_bdev_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
4585 struct skd_device
*skdev
;
4588 skdev
= bdev
->bd_disk
->private_data
;
4590 pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
4591 skdev
->name
, __func__
, __LINE__
,
4592 bdev
->bd_disk
->disk_name
, current
->comm
);
4594 if (skdev
->read_cap_is_valid
) {
4595 capacity
= get_capacity(skdev
->disk
);
4598 geo
->cylinders
= (capacity
) / (255 * 64);
4605 static int skd_bdev_attach(struct device
*parent
, struct skd_device
*skdev
)
4607 pr_debug("%s:%s:%d add_disk\n", skdev
->name
, __func__
, __LINE__
);
4608 device_add_disk(parent
, skdev
->disk
);
4612 static const struct block_device_operations skd_blockdev_ops
= {
4613 .owner
= THIS_MODULE
,
4614 .ioctl
= skd_bdev_ioctl
,
4615 .getgeo
= skd_bdev_getgeo
,
4620 *****************************************************************************
4622 *****************************************************************************
4625 static const struct pci_device_id skd_pci_tbl
[] = {
4626 { PCI_VENDOR_ID_STEC
, PCI_DEVICE_ID_S1120
,
4627 PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, },
4628 { 0 } /* terminate list */
4631 MODULE_DEVICE_TABLE(pci
, skd_pci_tbl
);
4633 static char *skd_pci_info(struct skd_device
*skdev
, char *str
)
4637 strcpy(str
, "PCIe (");
4638 pcie_reg
= pci_find_capability(skdev
->pdev
, PCI_CAP_ID_EXP
);
4643 uint16_t pcie_lstat
, lspeed
, lwidth
;
4646 pci_read_config_word(skdev
->pdev
, pcie_reg
, &pcie_lstat
);
4647 lspeed
= pcie_lstat
& (0xF);
4648 lwidth
= (pcie_lstat
& 0x3F0) >> 4;
4651 strcat(str
, "2.5GT/s ");
4652 else if (lspeed
== 2)
4653 strcat(str
, "5.0GT/s ");
4655 strcat(str
, "<unknown> ");
4656 snprintf(lwstr
, sizeof(lwstr
), "%dX)", lwidth
);
4662 static int skd_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
4667 struct skd_device
*skdev
;
4669 pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
4670 DRV_NAME
, DRV_VERSION
, DRV_BUILD_ID
);
4671 pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
4672 pci_name(pdev
), pdev
->vendor
, pdev
->device
);
4674 rc
= pci_enable_device(pdev
);
4677 rc
= pci_request_regions(pdev
, DRV_NAME
);
4680 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
4682 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64))) {
4684 pr_err("(%s): consistent DMA mask error %d\n",
4685 pci_name(pdev
), rc
);
4688 (rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)));
4691 pr_err("(%s): DMA mask error %d\n",
4692 pci_name(pdev
), rc
);
4693 goto err_out_regions
;
4698 rc
= register_blkdev(0, DRV_NAME
);
4700 goto err_out_regions
;
4705 skdev
= skd_construct(pdev
);
4706 if (skdev
== NULL
) {
4708 goto err_out_regions
;
4711 skd_pci_info(skdev
, pci_str
);
4712 pr_info("(%s): %s 64bit\n", skd_name(skdev
), pci_str
);
4714 pci_set_master(pdev
);
4715 rc
= pci_enable_pcie_error_reporting(pdev
);
4718 "(%s): bad enable of PCIe error reporting rc=%d\n",
4719 skd_name(skdev
), rc
);
4720 skdev
->pcie_error_reporting_is_enabled
= 0;
4722 skdev
->pcie_error_reporting_is_enabled
= 1;
4725 pci_set_drvdata(pdev
, skdev
);
4727 for (i
= 0; i
< SKD_MAX_BARS
; i
++) {
4728 skdev
->mem_phys
[i
] = pci_resource_start(pdev
, i
);
4729 skdev
->mem_size
[i
] = (u32
)pci_resource_len(pdev
, i
);
4730 skdev
->mem_map
[i
] = ioremap(skdev
->mem_phys
[i
],
4731 skdev
->mem_size
[i
]);
4732 if (!skdev
->mem_map
[i
]) {
4733 pr_err("(%s): Unable to map adapter memory!\n",
4736 goto err_out_iounmap
;
4738 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
4739 skdev
->name
, __func__
, __LINE__
,
4741 (uint64_t)skdev
->mem_phys
[i
], skdev
->mem_size
[i
]);
4744 rc
= skd_acquire_irq(skdev
);
4746 pr_err("(%s): interrupt resource error %d\n",
4747 skd_name(skdev
), rc
);
4748 goto err_out_iounmap
;
4751 rc
= skd_start_timer(skdev
);
4755 init_waitqueue_head(&skdev
->waitq
);
4757 skd_start_device(skdev
);
4759 rc
= wait_event_interruptible_timeout(skdev
->waitq
,
4760 (skdev
->gendisk_on
),
4761 (SKD_START_WAIT_SECONDS
* HZ
));
4762 if (skdev
->gendisk_on
> 0) {
4763 /* device came on-line after reset */
4764 skd_bdev_attach(&pdev
->dev
, skdev
);
4767 /* we timed out, something is wrong with the device,
4768 don't add the disk structure */
4770 "(%s): error: waiting for s1120 timed out %d!\n",
4771 skd_name(skdev
), rc
);
4772 /* in case of no error; we timeout with ENXIO */
4779 #ifdef SKD_VMK_POLL_HANDLER
4780 if (skdev
->irq_type
== SKD_IRQ_MSIX
) {
4781 /* MSIX completion handler is being used for coredump */
4782 vmklnx_scsi_register_poll_handler(skdev
->scsi_host
,
4783 skdev
->msix_entries
[5].vector
,
4786 vmklnx_scsi_register_poll_handler(skdev
->scsi_host
,
4787 skdev
->pdev
->irq
, skd_isr
,
4790 #endif /* SKD_VMK_POLL_HANDLER */
4795 skd_stop_device(skdev
);
4796 skd_release_irq(skdev
);
4799 for (i
= 0; i
< SKD_MAX_BARS
; i
++)
4800 if (skdev
->mem_map
[i
])
4801 iounmap(skdev
->mem_map
[i
]);
4803 if (skdev
->pcie_error_reporting_is_enabled
)
4804 pci_disable_pcie_error_reporting(pdev
);
4806 skd_destruct(skdev
);
4809 pci_release_regions(pdev
);
4812 pci_disable_device(pdev
);
4813 pci_set_drvdata(pdev
, NULL
);
4817 static void skd_pci_remove(struct pci_dev
*pdev
)
4820 struct skd_device
*skdev
;
4822 skdev
= pci_get_drvdata(pdev
);
4824 pr_err("%s: no device data for PCI\n", pci_name(pdev
));
4827 skd_stop_device(skdev
);
4828 skd_release_irq(skdev
);
4830 for (i
= 0; i
< SKD_MAX_BARS
; i
++)
4831 if (skdev
->mem_map
[i
])
4832 iounmap((u32
*)skdev
->mem_map
[i
]);
4834 if (skdev
->pcie_error_reporting_is_enabled
)
4835 pci_disable_pcie_error_reporting(pdev
);
4837 skd_destruct(skdev
);
4839 pci_release_regions(pdev
);
4840 pci_disable_device(pdev
);
4841 pci_set_drvdata(pdev
, NULL
);
4846 static int skd_pci_suspend(struct pci_dev
*pdev
, pm_message_t state
)
4849 struct skd_device
*skdev
;
4851 skdev
= pci_get_drvdata(pdev
);
4853 pr_err("%s: no device data for PCI\n", pci_name(pdev
));
4857 skd_stop_device(skdev
);
4859 skd_release_irq(skdev
);
4861 for (i
= 0; i
< SKD_MAX_BARS
; i
++)
4862 if (skdev
->mem_map
[i
])
4863 iounmap((u32
*)skdev
->mem_map
[i
]);
4865 if (skdev
->pcie_error_reporting_is_enabled
)
4866 pci_disable_pcie_error_reporting(pdev
);
4868 pci_release_regions(pdev
);
4869 pci_save_state(pdev
);
4870 pci_disable_device(pdev
);
4871 pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
4875 static int skd_pci_resume(struct pci_dev
*pdev
)
4879 struct skd_device
*skdev
;
4881 skdev
= pci_get_drvdata(pdev
);
4883 pr_err("%s: no device data for PCI\n", pci_name(pdev
));
4887 pci_set_power_state(pdev
, PCI_D0
);
4888 pci_enable_wake(pdev
, PCI_D0
, 0);
4889 pci_restore_state(pdev
);
4891 rc
= pci_enable_device(pdev
);
4894 rc
= pci_request_regions(pdev
, DRV_NAME
);
4897 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
4899 if (pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64))) {
4901 pr_err("(%s): consistent DMA mask error %d\n",
4902 pci_name(pdev
), rc
);
4905 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
4908 pr_err("(%s): DMA mask error %d\n",
4909 pci_name(pdev
), rc
);
4910 goto err_out_regions
;
4914 pci_set_master(pdev
);
4915 rc
= pci_enable_pcie_error_reporting(pdev
);
4917 pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
4919 skdev
->pcie_error_reporting_is_enabled
= 0;
4921 skdev
->pcie_error_reporting_is_enabled
= 1;
4923 for (i
= 0; i
< SKD_MAX_BARS
; i
++) {
4925 skdev
->mem_phys
[i
] = pci_resource_start(pdev
, i
);
4926 skdev
->mem_size
[i
] = (u32
)pci_resource_len(pdev
, i
);
4927 skdev
->mem_map
[i
] = ioremap(skdev
->mem_phys
[i
],
4928 skdev
->mem_size
[i
]);
4929 if (!skdev
->mem_map
[i
]) {
4930 pr_err("(%s): Unable to map adapter memory!\n",
4933 goto err_out_iounmap
;
4935 pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
4936 skdev
->name
, __func__
, __LINE__
,
4938 (uint64_t)skdev
->mem_phys
[i
], skdev
->mem_size
[i
]);
4940 rc
= skd_acquire_irq(skdev
);
4943 pr_err("(%s): interrupt resource error %d\n",
4944 pci_name(pdev
), rc
);
4945 goto err_out_iounmap
;
4948 rc
= skd_start_timer(skdev
);
4952 init_waitqueue_head(&skdev
->waitq
);
4954 skd_start_device(skdev
);
4959 skd_stop_device(skdev
);
4960 skd_release_irq(skdev
);
4963 for (i
= 0; i
< SKD_MAX_BARS
; i
++)
4964 if (skdev
->mem_map
[i
])
4965 iounmap(skdev
->mem_map
[i
]);
4967 if (skdev
->pcie_error_reporting_is_enabled
)
4968 pci_disable_pcie_error_reporting(pdev
);
4971 pci_release_regions(pdev
);
4974 pci_disable_device(pdev
);
4978 static void skd_pci_shutdown(struct pci_dev
*pdev
)
4980 struct skd_device
*skdev
;
4982 pr_err("skd_pci_shutdown called\n");
4984 skdev
= pci_get_drvdata(pdev
);
4986 pr_err("%s: no device data for PCI\n", pci_name(pdev
));
4990 pr_err("%s: calling stop\n", skd_name(skdev
));
4991 skd_stop_device(skdev
);
4994 static struct pci_driver skd_driver
= {
4996 .id_table
= skd_pci_tbl
,
4997 .probe
= skd_pci_probe
,
4998 .remove
= skd_pci_remove
,
4999 .suspend
= skd_pci_suspend
,
5000 .resume
= skd_pci_resume
,
5001 .shutdown
= skd_pci_shutdown
,
5005 *****************************************************************************
5007 *****************************************************************************
5010 static const char *skd_name(struct skd_device
*skdev
)
5012 memset(skdev
->id_str
, 0, sizeof(skdev
->id_str
));
5014 if (skdev
->inquiry_is_valid
)
5015 snprintf(skdev
->id_str
, sizeof(skdev
->id_str
), "%s:%s:[%s]",
5016 skdev
->name
, skdev
->inq_serial_num
,
5017 pci_name(skdev
->pdev
));
5019 snprintf(skdev
->id_str
, sizeof(skdev
->id_str
), "%s:??:[%s]",
5020 skdev
->name
, pci_name(skdev
->pdev
));
5022 return skdev
->id_str
;
5025 const char *skd_drive_state_to_str(int state
)
5028 case FIT_SR_DRIVE_OFFLINE
:
5030 case FIT_SR_DRIVE_INIT
:
5032 case FIT_SR_DRIVE_ONLINE
:
5034 case FIT_SR_DRIVE_BUSY
:
5036 case FIT_SR_DRIVE_FAULT
:
5038 case FIT_SR_DRIVE_DEGRADED
:
5040 case FIT_SR_PCIE_LINK_DOWN
:
5042 case FIT_SR_DRIVE_SOFT_RESET
:
5043 return "SOFT_RESET";
5044 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD
:
5046 case FIT_SR_DRIVE_INIT_FAULT
:
5047 return "INIT_FAULT";
5048 case FIT_SR_DRIVE_BUSY_SANITIZE
:
5049 return "BUSY_SANITIZE";
5050 case FIT_SR_DRIVE_BUSY_ERASE
:
5051 return "BUSY_ERASE";
5052 case FIT_SR_DRIVE_FW_BOOTING
:
5053 return "FW_BOOTING";
5059 const char *skd_skdev_state_to_str(enum skd_drvr_state state
)
5062 case SKD_DRVR_STATE_LOAD
:
5064 case SKD_DRVR_STATE_IDLE
:
5066 case SKD_DRVR_STATE_BUSY
:
5068 case SKD_DRVR_STATE_STARTING
:
5070 case SKD_DRVR_STATE_ONLINE
:
5072 case SKD_DRVR_STATE_PAUSING
:
5074 case SKD_DRVR_STATE_PAUSED
:
5076 case SKD_DRVR_STATE_DRAINING_TIMEOUT
:
5077 return "DRAINING_TIMEOUT";
5078 case SKD_DRVR_STATE_RESTARTING
:
5079 return "RESTARTING";
5080 case SKD_DRVR_STATE_RESUMING
:
5082 case SKD_DRVR_STATE_STOPPING
:
5084 case SKD_DRVR_STATE_SYNCING
:
5086 case SKD_DRVR_STATE_FAULT
:
5088 case SKD_DRVR_STATE_DISAPPEARED
:
5089 return "DISAPPEARED";
5090 case SKD_DRVR_STATE_BUSY_ERASE
:
5091 return "BUSY_ERASE";
5092 case SKD_DRVR_STATE_BUSY_SANITIZE
:
5093 return "BUSY_SANITIZE";
5094 case SKD_DRVR_STATE_BUSY_IMMINENT
:
5095 return "BUSY_IMMINENT";
5096 case SKD_DRVR_STATE_WAIT_BOOT
:
5104 static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state
)
5107 case SKD_MSG_STATE_IDLE
:
5109 case SKD_MSG_STATE_BUSY
:
5116 static const char *skd_skreq_state_to_str(enum skd_req_state state
)
5119 case SKD_REQ_STATE_IDLE
:
5121 case SKD_REQ_STATE_SETUP
:
5123 case SKD_REQ_STATE_BUSY
:
5125 case SKD_REQ_STATE_COMPLETED
:
5127 case SKD_REQ_STATE_TIMEOUT
:
5129 case SKD_REQ_STATE_ABORTED
:
5136 static void skd_log_skdev(struct skd_device
*skdev
, const char *event
)
5138 pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
5139 skdev
->name
, __func__
, __LINE__
, skdev
->name
, skdev
, event
);
5140 pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
5141 skdev
->name
, __func__
, __LINE__
,
5142 skd_drive_state_to_str(skdev
->drive_state
), skdev
->drive_state
,
5143 skd_skdev_state_to_str(skdev
->state
), skdev
->state
);
5144 pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
5145 skdev
->name
, __func__
, __LINE__
,
5146 skdev
->in_flight
, skdev
->cur_max_queue_depth
,
5147 skdev
->dev_max_queue_depth
, skdev
->queue_low_water_mark
);
5148 pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
5149 skdev
->name
, __func__
, __LINE__
,
5150 skdev
->timeout_stamp
, skdev
->skcomp_cycle
, skdev
->skcomp_ix
);
5153 static void skd_log_skmsg(struct skd_device
*skdev
,
5154 struct skd_fitmsg_context
*skmsg
, const char *event
)
5156 pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
5157 skdev
->name
, __func__
, __LINE__
, skdev
->name
, skmsg
, event
);
5158 pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
5159 skdev
->name
, __func__
, __LINE__
,
5160 skd_skmsg_state_to_str(skmsg
->state
), skmsg
->state
,
5161 skmsg
->id
, skmsg
->length
);
5164 static void skd_log_skreq(struct skd_device
*skdev
,
5165 struct skd_request_context
*skreq
, const char *event
)
5167 pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
5168 skdev
->name
, __func__
, __LINE__
, skdev
->name
, skreq
, event
);
5169 pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
5170 skdev
->name
, __func__
, __LINE__
,
5171 skd_skreq_state_to_str(skreq
->state
), skreq
->state
,
5172 skreq
->id
, skreq
->fitmsg_id
);
5173 pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
5174 skdev
->name
, __func__
, __LINE__
,
5175 skreq
->timeout_stamp
, skreq
->sg_data_dir
, skreq
->n_sg
);
5177 if (skreq
->req
!= NULL
) {
5178 struct request
*req
= skreq
->req
;
5179 u32 lba
= (u32
)blk_rq_pos(req
);
5180 u32 count
= blk_rq_sectors(req
);
5182 pr_debug("%s:%s:%d "
5183 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
5184 skdev
->name
, __func__
, __LINE__
,
5185 req
, lba
, lba
, count
, count
,
5186 (int)rq_data_dir(req
));
5188 pr_debug("%s:%s:%d req=NULL\n",
5189 skdev
->name
, __func__
, __LINE__
);
5193 *****************************************************************************
5195 *****************************************************************************
5198 static int __init
skd_init(void)
5200 pr_info(PFX
" v%s-b%s loaded\n", DRV_VERSION
, DRV_BUILD_ID
);
5202 switch (skd_isr_type
) {
5203 case SKD_IRQ_LEGACY
:
5208 pr_err(PFX
"skd_isr_type %d invalid, re-set to %d\n",
5209 skd_isr_type
, SKD_IRQ_DEFAULT
);
5210 skd_isr_type
= SKD_IRQ_DEFAULT
;
5213 if (skd_max_queue_depth
< 1 ||
5214 skd_max_queue_depth
> SKD_MAX_QUEUE_DEPTH
) {
5215 pr_err(PFX
"skd_max_queue_depth %d invalid, re-set to %d\n",
5216 skd_max_queue_depth
, SKD_MAX_QUEUE_DEPTH_DEFAULT
);
5217 skd_max_queue_depth
= SKD_MAX_QUEUE_DEPTH_DEFAULT
;
5220 if (skd_max_req_per_msg
< 1 || skd_max_req_per_msg
> 14) {
5221 pr_err(PFX
"skd_max_req_per_msg %d invalid, re-set to %d\n",
5222 skd_max_req_per_msg
, SKD_MAX_REQ_PER_MSG_DEFAULT
);
5223 skd_max_req_per_msg
= SKD_MAX_REQ_PER_MSG_DEFAULT
;
5226 if (skd_sgs_per_request
< 1 || skd_sgs_per_request
> 4096) {
5227 pr_err(PFX
"skd_sg_per_request %d invalid, re-set to %d\n",
5228 skd_sgs_per_request
, SKD_N_SG_PER_REQ_DEFAULT
);
5229 skd_sgs_per_request
= SKD_N_SG_PER_REQ_DEFAULT
;
5232 if (skd_dbg_level
< 0 || skd_dbg_level
> 2) {
5233 pr_err(PFX
"skd_dbg_level %d invalid, re-set to %d\n",
5238 if (skd_isr_comp_limit
< 0) {
5239 pr_err(PFX
"skd_isr_comp_limit %d invalid, set to %d\n",
5240 skd_isr_comp_limit
, 0);
5241 skd_isr_comp_limit
= 0;
5244 if (skd_max_pass_thru
< 1 || skd_max_pass_thru
> 50) {
5245 pr_err(PFX
"skd_max_pass_thru %d invalid, re-set to %d\n",
5246 skd_max_pass_thru
, SKD_N_SPECIAL_CONTEXT
);
5247 skd_max_pass_thru
= SKD_N_SPECIAL_CONTEXT
;
5250 return pci_register_driver(&skd_driver
);
5253 static void __exit
skd_exit(void)
5255 pr_info(PFX
" v%s-b%s unloading\n", DRV_VERSION
, DRV_BUILD_ID
);
5257 pci_unregister_driver(&skd_driver
);
5260 unregister_blkdev(skd_major
, DRV_NAME
);
5263 module_init(skd_init
);
5264 module_exit(skd_exit
);