1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6 * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
8 * (C) Copyright 2013 IBM Corporation
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/reboot.h>
17 #include <linux/slab.h>
18 #include <linux/bitops.h>
19 #include <linux/delay.h>
20 #include <linux/debugfs.h>
21 #include <linux/seq_file.h>
23 #include <linux/genhd.h>
24 #include <linux/idr.h>
26 #include "rsxx_priv.h"
30 #define SYNC_START_TIMEOUT (10 * 60) /* 10 minutes */
32 MODULE_DESCRIPTION("IBM Flash Adapter 900GB Full Height Device Driver");
33 MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM");
34 MODULE_LICENSE("GPL");
35 MODULE_VERSION(DRIVER_VERSION
);
37 static unsigned int force_legacy
= NO_LEGACY
;
38 module_param(force_legacy
, uint
, 0444);
39 MODULE_PARM_DESC(force_legacy
, "Force the use of legacy type PCI interrupts");
41 static unsigned int sync_start
= 1;
42 module_param(sync_start
, uint
, 0444);
43 MODULE_PARM_DESC(sync_start
, "On by Default: Driver load will not complete "
44 "until the card startup has completed.");
46 static DEFINE_IDA(rsxx_disk_ida
);
48 /* --------------------Debugfs Setup ------------------- */
50 static int rsxx_attr_pci_regs_show(struct seq_file
*m
, void *p
)
52 struct rsxx_cardinfo
*card
= m
->private;
54 seq_printf(m
, "HWID 0x%08x\n",
55 ioread32(card
->regmap
+ HWID
));
56 seq_printf(m
, "SCRATCH 0x%08x\n",
57 ioread32(card
->regmap
+ SCRATCH
));
58 seq_printf(m
, "IER 0x%08x\n",
59 ioread32(card
->regmap
+ IER
));
60 seq_printf(m
, "IPR 0x%08x\n",
61 ioread32(card
->regmap
+ IPR
));
62 seq_printf(m
, "CREG_CMD 0x%08x\n",
63 ioread32(card
->regmap
+ CREG_CMD
));
64 seq_printf(m
, "CREG_ADD 0x%08x\n",
65 ioread32(card
->regmap
+ CREG_ADD
));
66 seq_printf(m
, "CREG_CNT 0x%08x\n",
67 ioread32(card
->regmap
+ CREG_CNT
));
68 seq_printf(m
, "CREG_STAT 0x%08x\n",
69 ioread32(card
->regmap
+ CREG_STAT
));
70 seq_printf(m
, "CREG_DATA0 0x%08x\n",
71 ioread32(card
->regmap
+ CREG_DATA0
));
72 seq_printf(m
, "CREG_DATA1 0x%08x\n",
73 ioread32(card
->regmap
+ CREG_DATA1
));
74 seq_printf(m
, "CREG_DATA2 0x%08x\n",
75 ioread32(card
->regmap
+ CREG_DATA2
));
76 seq_printf(m
, "CREG_DATA3 0x%08x\n",
77 ioread32(card
->regmap
+ CREG_DATA3
));
78 seq_printf(m
, "CREG_DATA4 0x%08x\n",
79 ioread32(card
->regmap
+ CREG_DATA4
));
80 seq_printf(m
, "CREG_DATA5 0x%08x\n",
81 ioread32(card
->regmap
+ CREG_DATA5
));
82 seq_printf(m
, "CREG_DATA6 0x%08x\n",
83 ioread32(card
->regmap
+ CREG_DATA6
));
84 seq_printf(m
, "CREG_DATA7 0x%08x\n",
85 ioread32(card
->regmap
+ CREG_DATA7
));
86 seq_printf(m
, "INTR_COAL 0x%08x\n",
87 ioread32(card
->regmap
+ INTR_COAL
));
88 seq_printf(m
, "HW_ERROR 0x%08x\n",
89 ioread32(card
->regmap
+ HW_ERROR
));
90 seq_printf(m
, "DEBUG0 0x%08x\n",
91 ioread32(card
->regmap
+ PCI_DEBUG0
));
92 seq_printf(m
, "DEBUG1 0x%08x\n",
93 ioread32(card
->regmap
+ PCI_DEBUG1
));
94 seq_printf(m
, "DEBUG2 0x%08x\n",
95 ioread32(card
->regmap
+ PCI_DEBUG2
));
96 seq_printf(m
, "DEBUG3 0x%08x\n",
97 ioread32(card
->regmap
+ PCI_DEBUG3
));
98 seq_printf(m
, "DEBUG4 0x%08x\n",
99 ioread32(card
->regmap
+ PCI_DEBUG4
));
100 seq_printf(m
, "DEBUG5 0x%08x\n",
101 ioread32(card
->regmap
+ PCI_DEBUG5
));
102 seq_printf(m
, "DEBUG6 0x%08x\n",
103 ioread32(card
->regmap
+ PCI_DEBUG6
));
104 seq_printf(m
, "DEBUG7 0x%08x\n",
105 ioread32(card
->regmap
+ PCI_DEBUG7
));
106 seq_printf(m
, "RECONFIG 0x%08x\n",
107 ioread32(card
->regmap
+ PCI_RECONFIG
));
112 static int rsxx_attr_stats_show(struct seq_file
*m
, void *p
)
114 struct rsxx_cardinfo
*card
= m
->private;
117 for (i
= 0; i
< card
->n_targets
; i
++) {
118 seq_printf(m
, "Ctrl %d CRC Errors = %d\n",
119 i
, card
->ctrl
[i
].stats
.crc_errors
);
120 seq_printf(m
, "Ctrl %d Hard Errors = %d\n",
121 i
, card
->ctrl
[i
].stats
.hard_errors
);
122 seq_printf(m
, "Ctrl %d Soft Errors = %d\n",
123 i
, card
->ctrl
[i
].stats
.soft_errors
);
124 seq_printf(m
, "Ctrl %d Writes Issued = %d\n",
125 i
, card
->ctrl
[i
].stats
.writes_issued
);
126 seq_printf(m
, "Ctrl %d Writes Failed = %d\n",
127 i
, card
->ctrl
[i
].stats
.writes_failed
);
128 seq_printf(m
, "Ctrl %d Reads Issued = %d\n",
129 i
, card
->ctrl
[i
].stats
.reads_issued
);
130 seq_printf(m
, "Ctrl %d Reads Failed = %d\n",
131 i
, card
->ctrl
[i
].stats
.reads_failed
);
132 seq_printf(m
, "Ctrl %d Reads Retried = %d\n",
133 i
, card
->ctrl
[i
].stats
.reads_retried
);
134 seq_printf(m
, "Ctrl %d Discards Issued = %d\n",
135 i
, card
->ctrl
[i
].stats
.discards_issued
);
136 seq_printf(m
, "Ctrl %d Discards Failed = %d\n",
137 i
, card
->ctrl
[i
].stats
.discards_failed
);
138 seq_printf(m
, "Ctrl %d DMA SW Errors = %d\n",
139 i
, card
->ctrl
[i
].stats
.dma_sw_err
);
140 seq_printf(m
, "Ctrl %d DMA HW Faults = %d\n",
141 i
, card
->ctrl
[i
].stats
.dma_hw_fault
);
142 seq_printf(m
, "Ctrl %d DMAs Cancelled = %d\n",
143 i
, card
->ctrl
[i
].stats
.dma_cancelled
);
144 seq_printf(m
, "Ctrl %d SW Queue Depth = %d\n",
145 i
, card
->ctrl
[i
].stats
.sw_q_depth
);
146 seq_printf(m
, "Ctrl %d HW Queue Depth = %d\n",
147 i
, atomic_read(&card
->ctrl
[i
].stats
.hw_q_depth
));
153 static int rsxx_attr_stats_open(struct inode
*inode
, struct file
*file
)
155 return single_open(file
, rsxx_attr_stats_show
, inode
->i_private
);
158 static int rsxx_attr_pci_regs_open(struct inode
*inode
, struct file
*file
)
160 return single_open(file
, rsxx_attr_pci_regs_show
, inode
->i_private
);
163 static ssize_t
rsxx_cram_read(struct file
*fp
, char __user
*ubuf
,
164 size_t cnt
, loff_t
*ppos
)
166 struct rsxx_cardinfo
*card
= file_inode(fp
)->i_private
;
170 buf
= kzalloc(cnt
, GFP_KERNEL
);
174 st
= rsxx_creg_read(card
, CREG_ADD_CRAM
+ (u32
)*ppos
, cnt
, buf
, 1);
176 st
= copy_to_user(ubuf
, buf
, cnt
);
184 static ssize_t
rsxx_cram_write(struct file
*fp
, const char __user
*ubuf
,
185 size_t cnt
, loff_t
*ppos
)
187 struct rsxx_cardinfo
*card
= file_inode(fp
)->i_private
;
191 buf
= memdup_user(ubuf
, cnt
);
195 st
= rsxx_creg_write(card
, CREG_ADD_CRAM
+ (u32
)*ppos
, cnt
, buf
, 1);
203 static const struct file_operations debugfs_cram_fops
= {
204 .owner
= THIS_MODULE
,
205 .read
= rsxx_cram_read
,
206 .write
= rsxx_cram_write
,
209 static const struct file_operations debugfs_stats_fops
= {
210 .owner
= THIS_MODULE
,
211 .open
= rsxx_attr_stats_open
,
214 .release
= single_release
,
217 static const struct file_operations debugfs_pci_regs_fops
= {
218 .owner
= THIS_MODULE
,
219 .open
= rsxx_attr_pci_regs_open
,
222 .release
= single_release
,
225 static void rsxx_debugfs_dev_new(struct rsxx_cardinfo
*card
)
227 struct dentry
*debugfs_stats
;
228 struct dentry
*debugfs_pci_regs
;
229 struct dentry
*debugfs_cram
;
231 card
->debugfs_dir
= debugfs_create_dir(card
->gendisk
->disk_name
, NULL
);
232 if (IS_ERR_OR_NULL(card
->debugfs_dir
))
233 goto failed_debugfs_dir
;
235 debugfs_stats
= debugfs_create_file("stats", 0444,
236 card
->debugfs_dir
, card
,
237 &debugfs_stats_fops
);
238 if (IS_ERR_OR_NULL(debugfs_stats
))
239 goto failed_debugfs_stats
;
241 debugfs_pci_regs
= debugfs_create_file("pci_regs", 0444,
242 card
->debugfs_dir
, card
,
243 &debugfs_pci_regs_fops
);
244 if (IS_ERR_OR_NULL(debugfs_pci_regs
))
245 goto failed_debugfs_pci_regs
;
247 debugfs_cram
= debugfs_create_file("cram", 0644,
248 card
->debugfs_dir
, card
,
250 if (IS_ERR_OR_NULL(debugfs_cram
))
251 goto failed_debugfs_cram
;
255 debugfs_remove(debugfs_pci_regs
);
256 failed_debugfs_pci_regs
:
257 debugfs_remove(debugfs_stats
);
258 failed_debugfs_stats
:
259 debugfs_remove(card
->debugfs_dir
);
261 card
->debugfs_dir
= NULL
;
264 /*----------------- Interrupt Control & Handling -------------------*/
266 static void rsxx_mask_interrupts(struct rsxx_cardinfo
*card
)
272 static void __enable_intr(unsigned int *mask
, unsigned int intr
)
277 static void __disable_intr(unsigned int *mask
, unsigned int intr
)
283 * NOTE: Disabling the IER will disable the hardware interrupt.
284 * Disabling the ISR will disable the software handling of the ISR bit.
286 * Enable/Disable interrupt functions assume the card->irq_lock
287 * is held by the caller.
289 void rsxx_enable_ier(struct rsxx_cardinfo
*card
, unsigned int intr
)
291 if (unlikely(card
->halt
) ||
292 unlikely(card
->eeh_state
))
295 __enable_intr(&card
->ier_mask
, intr
);
296 iowrite32(card
->ier_mask
, card
->regmap
+ IER
);
299 void rsxx_disable_ier(struct rsxx_cardinfo
*card
, unsigned int intr
)
301 if (unlikely(card
->eeh_state
))
304 __disable_intr(&card
->ier_mask
, intr
);
305 iowrite32(card
->ier_mask
, card
->regmap
+ IER
);
308 void rsxx_enable_ier_and_isr(struct rsxx_cardinfo
*card
,
311 if (unlikely(card
->halt
) ||
312 unlikely(card
->eeh_state
))
315 __enable_intr(&card
->isr_mask
, intr
);
316 __enable_intr(&card
->ier_mask
, intr
);
317 iowrite32(card
->ier_mask
, card
->regmap
+ IER
);
319 void rsxx_disable_ier_and_isr(struct rsxx_cardinfo
*card
,
322 if (unlikely(card
->eeh_state
))
325 __disable_intr(&card
->isr_mask
, intr
);
326 __disable_intr(&card
->ier_mask
, intr
);
327 iowrite32(card
->ier_mask
, card
->regmap
+ IER
);
330 static irqreturn_t
rsxx_isr(int irq
, void *pdata
)
332 struct rsxx_cardinfo
*card
= pdata
;
338 spin_lock(&card
->irq_lock
);
343 if (unlikely(card
->eeh_state
))
346 isr
= ioread32(card
->regmap
+ ISR
);
347 if (isr
== 0xffffffff) {
349 * A few systems seem to have an intermittent issue
350 * where PCI reads return all Fs, but retrying the read
351 * a little later will return as expected.
353 dev_info(CARD_TO_DEV(card
),
354 "ISR = 0xFFFFFFFF, retrying later\n");
358 isr
&= card
->isr_mask
;
362 for (i
= 0; i
< card
->n_targets
; i
++) {
363 if (isr
& CR_INTR_DMA(i
)) {
364 if (card
->ier_mask
& CR_INTR_DMA(i
)) {
365 rsxx_disable_ier(card
, CR_INTR_DMA(i
));
368 queue_work(card
->ctrl
[i
].done_wq
,
369 &card
->ctrl
[i
].dma_done_work
);
374 if (isr
& CR_INTR_CREG
) {
375 queue_work(card
->creg_ctrl
.creg_wq
,
376 &card
->creg_ctrl
.done_work
);
380 if (isr
& CR_INTR_EVENT
) {
381 queue_work(card
->event_wq
, &card
->event_work
);
382 rsxx_disable_ier_and_isr(card
, CR_INTR_EVENT
);
385 } while (reread_isr
);
387 spin_unlock(&card
->irq_lock
);
389 return handled
? IRQ_HANDLED
: IRQ_NONE
;
392 /*----------------- Card Event Handler -------------------*/
393 static const char * const rsxx_card_state_to_str(unsigned int state
)
395 static const char * const state_strings
[] = {
396 "Unknown", "Shutdown", "Starting", "Formatting",
397 "Uninitialized", "Good", "Shutting Down",
398 "Fault", "Read Only Fault", "dStroying"
401 return state_strings
[ffs(state
)];
404 static void card_state_change(struct rsxx_cardinfo
*card
,
405 unsigned int new_state
)
409 dev_info(CARD_TO_DEV(card
),
410 "card state change detected.(%s -> %s)\n",
411 rsxx_card_state_to_str(card
->state
),
412 rsxx_card_state_to_str(new_state
));
414 card
->state
= new_state
;
416 /* Don't attach DMA interfaces if the card has an invalid config */
417 if (!card
->config_valid
)
421 case CARD_STATE_RD_ONLY_FAULT
:
422 dev_crit(CARD_TO_DEV(card
),
423 "Hardware has entered read-only mode!\n");
425 * Fall through so the DMA devices can be attached and
426 * the user can attempt to pull off their data.
429 case CARD_STATE_GOOD
:
430 st
= rsxx_get_card_size8(card
, &card
->size8
);
432 dev_err(CARD_TO_DEV(card
),
433 "Failed attaching DMA devices\n");
435 if (card
->config_valid
)
436 set_capacity(card
->gendisk
, card
->size8
>> 9);
439 case CARD_STATE_FAULT
:
440 dev_crit(CARD_TO_DEV(card
),
441 "Hardware Fault reported!\n");
444 /* Everything else, detach DMA interface if it's attached. */
445 case CARD_STATE_SHUTDOWN
:
446 case CARD_STATE_STARTING
:
447 case CARD_STATE_FORMATTING
:
448 case CARD_STATE_UNINITIALIZED
:
449 case CARD_STATE_SHUTTING_DOWN
:
451 * dStroy is a term coined by marketing to represent the low level
454 case CARD_STATE_DSTROYING
:
455 set_capacity(card
->gendisk
, 0);
460 static void card_event_handler(struct work_struct
*work
)
462 struct rsxx_cardinfo
*card
;
467 card
= container_of(work
, struct rsxx_cardinfo
, event_work
);
469 if (unlikely(card
->halt
))
473 * Enable the interrupt now to avoid any weird race conditions where a
474 * state change might occur while rsxx_get_card_state() is
475 * processing a returned creg cmd.
477 spin_lock_irqsave(&card
->irq_lock
, flags
);
478 rsxx_enable_ier_and_isr(card
, CR_INTR_EVENT
);
479 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
481 st
= rsxx_get_card_state(card
, &state
);
483 dev_info(CARD_TO_DEV(card
),
484 "Failed reading state after event.\n");
488 if (card
->state
!= state
)
489 card_state_change(card
, state
);
491 if (card
->creg_ctrl
.creg_stats
.stat
& CREG_STAT_LOG_PENDING
)
492 rsxx_read_hw_log(card
);
495 /*----------------- Card Operations -------------------*/
496 static int card_shutdown(struct rsxx_cardinfo
*card
)
500 const int timeout
= msecs_to_jiffies(120000);
503 /* We can't issue a shutdown if the card is in a transition state */
506 st
= rsxx_get_card_state(card
, &state
);
509 } while (state
== CARD_STATE_STARTING
&&
510 (jiffies
- start
< timeout
));
512 if (state
== CARD_STATE_STARTING
)
515 /* Only issue a shutdown if we need to */
516 if ((state
!= CARD_STATE_SHUTTING_DOWN
) &&
517 (state
!= CARD_STATE_SHUTDOWN
)) {
518 st
= rsxx_issue_card_cmd(card
, CARD_CMD_SHUTDOWN
);
525 st
= rsxx_get_card_state(card
, &state
);
528 } while (state
!= CARD_STATE_SHUTDOWN
&&
529 (jiffies
- start
< timeout
));
531 if (state
!= CARD_STATE_SHUTDOWN
)
537 static int rsxx_eeh_frozen(struct pci_dev
*dev
)
539 struct rsxx_cardinfo
*card
= pci_get_drvdata(dev
);
543 dev_warn(&dev
->dev
, "IBM Flash Adapter PCI: preparing for slot reset.\n");
546 rsxx_mask_interrupts(card
);
549 * We need to guarantee that the write for eeh_state and masking
550 * interrupts does not become reordered. This will prevent a possible
551 * race condition with the EEH code.
555 pci_disable_device(dev
);
557 st
= rsxx_eeh_save_issued_dmas(card
);
561 rsxx_eeh_save_issued_creg(card
);
563 for (i
= 0; i
< card
->n_targets
; i
++) {
564 if (card
->ctrl
[i
].status
.buf
)
565 pci_free_consistent(card
->dev
, STATUS_BUFFER_SIZE8
,
566 card
->ctrl
[i
].status
.buf
,
567 card
->ctrl
[i
].status
.dma_addr
);
568 if (card
->ctrl
[i
].cmd
.buf
)
569 pci_free_consistent(card
->dev
, COMMAND_BUFFER_SIZE8
,
570 card
->ctrl
[i
].cmd
.buf
,
571 card
->ctrl
[i
].cmd
.dma_addr
);
577 static void rsxx_eeh_failure(struct pci_dev
*dev
)
579 struct rsxx_cardinfo
*card
= pci_get_drvdata(dev
);
583 dev_err(&dev
->dev
, "IBM Flash Adapter PCI: disabling failed card.\n");
588 for (i
= 0; i
< card
->n_targets
; i
++) {
589 spin_lock_bh(&card
->ctrl
[i
].queue_lock
);
590 cnt
= rsxx_cleanup_dma_queue(&card
->ctrl
[i
],
591 &card
->ctrl
[i
].queue
,
593 spin_unlock_bh(&card
->ctrl
[i
].queue_lock
);
595 cnt
+= rsxx_dma_cancel(&card
->ctrl
[i
]);
598 dev_info(CARD_TO_DEV(card
),
599 "Freed %d queued DMAs on channel %d\n",
600 cnt
, card
->ctrl
[i
].id
);
604 static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo
*card
)
609 /* We need to wait for the hardware to reset */
610 while (iter
++ < 10) {
611 status
= ioread32(card
->regmap
+ PCI_RECONFIG
);
613 if (status
& RSXX_FLUSH_BUSY
) {
618 if (status
& RSXX_FLUSH_TIMEOUT
)
619 dev_warn(CARD_TO_DEV(card
), "HW: flash controller timeout\n");
623 /* Hardware failed resetting itself. */
627 static pci_ers_result_t
rsxx_error_detected(struct pci_dev
*dev
,
628 enum pci_channel_state error
)
632 if (dev
->revision
< RSXX_EEH_SUPPORT
)
633 return PCI_ERS_RESULT_NONE
;
635 if (error
== pci_channel_io_perm_failure
) {
636 rsxx_eeh_failure(dev
);
637 return PCI_ERS_RESULT_DISCONNECT
;
640 st
= rsxx_eeh_frozen(dev
);
642 dev_err(&dev
->dev
, "Slot reset setup failed\n");
643 rsxx_eeh_failure(dev
);
644 return PCI_ERS_RESULT_DISCONNECT
;
647 return PCI_ERS_RESULT_NEED_RESET
;
650 static pci_ers_result_t
rsxx_slot_reset(struct pci_dev
*dev
)
652 struct rsxx_cardinfo
*card
= pci_get_drvdata(dev
);
658 "IBM Flash Adapter PCI: recovering from slot reset.\n");
660 st
= pci_enable_device(dev
);
662 goto failed_hw_setup
;
666 st
= rsxx_eeh_fifo_flush_poll(card
);
668 goto failed_hw_setup
;
670 rsxx_dma_queue_reset(card
);
672 for (i
= 0; i
< card
->n_targets
; i
++) {
673 st
= rsxx_hw_buffers_init(dev
, &card
->ctrl
[i
]);
675 goto failed_hw_buffers_init
;
678 if (card
->config_valid
)
679 rsxx_dma_configure(card
);
681 /* Clears the ISR register from spurious interrupts */
682 st
= ioread32(card
->regmap
+ ISR
);
686 spin_lock_irqsave(&card
->irq_lock
, flags
);
687 if (card
->n_targets
& RSXX_MAX_TARGETS
)
688 rsxx_enable_ier_and_isr(card
, CR_INTR_ALL_G
);
690 rsxx_enable_ier_and_isr(card
, CR_INTR_ALL_C
);
691 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
693 rsxx_kick_creg_queue(card
);
695 for (i
= 0; i
< card
->n_targets
; i
++) {
696 spin_lock(&card
->ctrl
[i
].queue_lock
);
697 if (list_empty(&card
->ctrl
[i
].queue
)) {
698 spin_unlock(&card
->ctrl
[i
].queue_lock
);
701 spin_unlock(&card
->ctrl
[i
].queue_lock
);
703 queue_work(card
->ctrl
[i
].issue_wq
,
704 &card
->ctrl
[i
].issue_dma_work
);
707 dev_info(&dev
->dev
, "IBM Flash Adapter PCI: recovery complete.\n");
709 return PCI_ERS_RESULT_RECOVERED
;
711 failed_hw_buffers_init
:
712 for (i
= 0; i
< card
->n_targets
; i
++) {
713 if (card
->ctrl
[i
].status
.buf
)
714 pci_free_consistent(card
->dev
,
716 card
->ctrl
[i
].status
.buf
,
717 card
->ctrl
[i
].status
.dma_addr
);
718 if (card
->ctrl
[i
].cmd
.buf
)
719 pci_free_consistent(card
->dev
,
720 COMMAND_BUFFER_SIZE8
,
721 card
->ctrl
[i
].cmd
.buf
,
722 card
->ctrl
[i
].cmd
.dma_addr
);
725 rsxx_eeh_failure(dev
);
726 return PCI_ERS_RESULT_DISCONNECT
;
730 /*----------------- Driver Initialization & Setup -------------------*/
731 /* Returns: 0 if the driver is compatible with the device
732 -1 if the driver is NOT compatible with the device */
733 static int rsxx_compatibility_check(struct rsxx_cardinfo
*card
)
735 unsigned char pci_rev
;
737 pci_read_config_byte(card
->dev
, PCI_REVISION_ID
, &pci_rev
);
739 if (pci_rev
> RS70_PCI_REV_SUPPORTED
)
744 static int rsxx_pci_probe(struct pci_dev
*dev
,
745 const struct pci_device_id
*id
)
747 struct rsxx_cardinfo
*card
;
749 unsigned int sync_timeout
;
751 dev_info(&dev
->dev
, "PCI-Flash SSD discovered\n");
753 card
= kzalloc(sizeof(*card
), GFP_KERNEL
);
758 pci_set_drvdata(dev
, card
);
760 st
= ida_alloc(&rsxx_disk_ida
, GFP_KERNEL
);
765 st
= pci_enable_device(dev
);
771 st
= dma_set_mask(&dev
->dev
, DMA_BIT_MASK(64));
773 dev_err(CARD_TO_DEV(card
),
774 "No usable DMA configuration,aborting\n");
775 goto failed_dma_mask
;
778 st
= pci_request_regions(dev
, DRIVER_NAME
);
780 dev_err(CARD_TO_DEV(card
),
781 "Failed to request memory region\n");
782 goto failed_request_regions
;
785 if (pci_resource_len(dev
, 0) == 0) {
786 dev_err(CARD_TO_DEV(card
), "BAR0 has length 0!\n");
791 card
->regmap
= pci_iomap(dev
, 0, 0);
793 dev_err(CARD_TO_DEV(card
), "Failed to map BAR0\n");
798 spin_lock_init(&card
->irq_lock
);
802 spin_lock_irq(&card
->irq_lock
);
803 rsxx_disable_ier_and_isr(card
, CR_INTR_ALL
);
804 spin_unlock_irq(&card
->irq_lock
);
807 st
= pci_enable_msi(dev
);
809 dev_warn(CARD_TO_DEV(card
),
810 "Failed to enable MSI\n");
813 st
= request_irq(dev
->irq
, rsxx_isr
, IRQF_SHARED
,
816 dev_err(CARD_TO_DEV(card
),
817 "Failed requesting IRQ%d\n", dev
->irq
);
821 /************* Setup Processor Command Interface *************/
822 st
= rsxx_creg_setup(card
);
824 dev_err(CARD_TO_DEV(card
), "Failed to setup creg interface.\n");
825 goto failed_creg_setup
;
828 spin_lock_irq(&card
->irq_lock
);
829 rsxx_enable_ier_and_isr(card
, CR_INTR_CREG
);
830 spin_unlock_irq(&card
->irq_lock
);
832 st
= rsxx_compatibility_check(card
);
834 dev_warn(CARD_TO_DEV(card
),
835 "Incompatible driver detected. Please update the driver.\n");
837 goto failed_compatiblity_check
;
840 /************* Load Card Config *************/
841 st
= rsxx_load_config(card
);
843 dev_err(CARD_TO_DEV(card
),
844 "Failed loading card config\n");
846 /************* Setup DMA Engine *************/
847 st
= rsxx_get_num_targets(card
, &card
->n_targets
);
849 dev_info(CARD_TO_DEV(card
),
850 "Failed reading the number of DMA targets\n");
852 card
->ctrl
= kcalloc(card
->n_targets
, sizeof(*card
->ctrl
),
856 goto failed_dma_setup
;
859 st
= rsxx_dma_setup(card
);
861 dev_info(CARD_TO_DEV(card
),
862 "Failed to setup DMA engine\n");
863 goto failed_dma_setup
;
866 /************* Setup Card Event Handler *************/
867 card
->event_wq
= create_singlethread_workqueue(DRIVER_NAME
"_event");
868 if (!card
->event_wq
) {
869 dev_err(CARD_TO_DEV(card
), "Failed card event setup.\n");
870 goto failed_event_handler
;
873 INIT_WORK(&card
->event_work
, card_event_handler
);
875 st
= rsxx_setup_dev(card
);
877 goto failed_create_dev
;
879 rsxx_get_card_state(card
, &card
->state
);
881 dev_info(CARD_TO_DEV(card
),
883 rsxx_card_state_to_str(card
->state
));
886 * Now that the DMA Engine and devices have been setup,
887 * we can enable the event interrupt(it kicks off actions in
888 * those layers so we couldn't enable it right away.)
890 spin_lock_irq(&card
->irq_lock
);
891 rsxx_enable_ier_and_isr(card
, CR_INTR_EVENT
);
892 spin_unlock_irq(&card
->irq_lock
);
894 if (card
->state
== CARD_STATE_SHUTDOWN
) {
895 st
= rsxx_issue_card_cmd(card
, CARD_CMD_STARTUP
);
897 dev_crit(CARD_TO_DEV(card
),
898 "Failed issuing card startup\n");
900 sync_timeout
= SYNC_START_TIMEOUT
;
902 dev_info(CARD_TO_DEV(card
),
903 "Waiting for card to startup\n");
909 rsxx_get_card_state(card
, &card
->state
);
910 } while (sync_timeout
&&
911 (card
->state
== CARD_STATE_STARTING
));
913 if (card
->state
== CARD_STATE_STARTING
) {
914 dev_warn(CARD_TO_DEV(card
),
915 "Card startup timed out\n");
918 dev_info(CARD_TO_DEV(card
),
920 rsxx_card_state_to_str(card
->state
));
921 st
= rsxx_get_card_size8(card
, &card
->size8
);
926 } else if (card
->state
== CARD_STATE_GOOD
||
927 card
->state
== CARD_STATE_RD_ONLY_FAULT
) {
928 st
= rsxx_get_card_size8(card
, &card
->size8
);
933 rsxx_attach_dev(card
);
935 /************* Setup Debugfs *************/
936 rsxx_debugfs_dev_new(card
);
941 destroy_workqueue(card
->event_wq
);
942 card
->event_wq
= NULL
;
943 failed_event_handler
:
944 rsxx_dma_destroy(card
);
946 failed_compatiblity_check
:
947 destroy_workqueue(card
->creg_ctrl
.creg_wq
);
948 card
->creg_ctrl
.creg_wq
= NULL
;
950 spin_lock_irq(&card
->irq_lock
);
951 rsxx_disable_ier_and_isr(card
, CR_INTR_ALL
);
952 spin_unlock_irq(&card
->irq_lock
);
953 free_irq(dev
->irq
, card
);
955 pci_disable_msi(dev
);
957 pci_iounmap(dev
, card
->regmap
);
959 pci_release_regions(dev
);
960 failed_request_regions
:
962 pci_disable_device(dev
);
964 ida_free(&rsxx_disk_ida
, card
->disk_id
);
971 static void rsxx_pci_remove(struct pci_dev
*dev
)
973 struct rsxx_cardinfo
*card
= pci_get_drvdata(dev
);
981 dev_info(CARD_TO_DEV(card
),
982 "Removing PCI-Flash SSD.\n");
984 rsxx_detach_dev(card
);
986 for (i
= 0; i
< card
->n_targets
; i
++) {
987 spin_lock_irqsave(&card
->irq_lock
, flags
);
988 rsxx_disable_ier_and_isr(card
, CR_INTR_DMA(i
));
989 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
992 st
= card_shutdown(card
);
994 dev_crit(CARD_TO_DEV(card
), "Shutdown failed!\n");
996 /* Sync outstanding event handlers. */
997 spin_lock_irqsave(&card
->irq_lock
, flags
);
998 rsxx_disable_ier_and_isr(card
, CR_INTR_EVENT
);
999 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
1001 cancel_work_sync(&card
->event_work
);
1003 destroy_workqueue(card
->event_wq
);
1004 rsxx_destroy_dev(card
);
1005 rsxx_dma_destroy(card
);
1006 destroy_workqueue(card
->creg_ctrl
.creg_wq
);
1008 spin_lock_irqsave(&card
->irq_lock
, flags
);
1009 rsxx_disable_ier_and_isr(card
, CR_INTR_ALL
);
1010 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
1012 /* Prevent work_structs from re-queuing themselves. */
1015 debugfs_remove_recursive(card
->debugfs_dir
);
1017 free_irq(dev
->irq
, card
);
1020 pci_disable_msi(dev
);
1022 rsxx_creg_destroy(card
);
1024 pci_iounmap(dev
, card
->regmap
);
1026 pci_disable_device(dev
);
1027 pci_release_regions(dev
);
1029 ida_free(&rsxx_disk_ida
, card
->disk_id
);
1033 static int rsxx_pci_suspend(struct pci_dev
*dev
, pm_message_t state
)
1035 /* We don't support suspend at this time. */
1039 static void rsxx_pci_shutdown(struct pci_dev
*dev
)
1041 struct rsxx_cardinfo
*card
= pci_get_drvdata(dev
);
1042 unsigned long flags
;
1048 dev_info(CARD_TO_DEV(card
), "Shutting down PCI-Flash SSD.\n");
1050 rsxx_detach_dev(card
);
1052 for (i
= 0; i
< card
->n_targets
; i
++) {
1053 spin_lock_irqsave(&card
->irq_lock
, flags
);
1054 rsxx_disable_ier_and_isr(card
, CR_INTR_DMA(i
));
1055 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
1058 card_shutdown(card
);
1061 static const struct pci_error_handlers rsxx_err_handler
= {
1062 .error_detected
= rsxx_error_detected
,
1063 .slot_reset
= rsxx_slot_reset
,
1066 static const struct pci_device_id rsxx_pci_ids
[] = {
1067 {PCI_DEVICE(PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_FS70_FLASH
)},
1068 {PCI_DEVICE(PCI_VENDOR_ID_IBM
, PCI_DEVICE_ID_FS80_FLASH
)},
1072 MODULE_DEVICE_TABLE(pci
, rsxx_pci_ids
);
1074 static struct pci_driver rsxx_pci_driver
= {
1075 .name
= DRIVER_NAME
,
1076 .id_table
= rsxx_pci_ids
,
1077 .probe
= rsxx_pci_probe
,
1078 .remove
= rsxx_pci_remove
,
1079 .suspend
= rsxx_pci_suspend
,
1080 .shutdown
= rsxx_pci_shutdown
,
1081 .err_handler
= &rsxx_err_handler
,
1084 static int __init
rsxx_core_init(void)
1088 st
= rsxx_dev_init();
1092 st
= rsxx_dma_init();
1094 goto dma_init_failed
;
1096 st
= rsxx_creg_init();
1098 goto creg_init_failed
;
1100 return pci_register_driver(&rsxx_pci_driver
);
1110 static void __exit
rsxx_core_cleanup(void)
1112 pci_unregister_driver(&rsxx_pci_driver
);
1113 rsxx_creg_cleanup();
1118 module_init(rsxx_core_init
);
1119 module_exit(rsxx_core_cleanup
);