1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6 * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
8 * (C) Copyright 2013 IBM Corporation
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/slab.h>
17 #include <linux/hdreg.h>
18 #include <linux/genhd.h>
19 #include <linux/blkdev.h>
20 #include <linux/bio.h>
24 #include "rsxx_priv.h"
26 static unsigned int blkdev_minors
= 64;
27 module_param(blkdev_minors
, uint
, 0444);
28 MODULE_PARM_DESC(blkdev_minors
, "Number of minors(partitions)");
31 * For now I'm making this tweakable in case any applications hit this limit.
32 * If you see a "bio too big" error in the log you will need to raise this
35 static unsigned int blkdev_max_hw_sectors
= 1024;
36 module_param(blkdev_max_hw_sectors
, uint
, 0444);
37 MODULE_PARM_DESC(blkdev_max_hw_sectors
, "Max hw sectors for a single BIO");
39 static unsigned int enable_blkdev
= 1;
40 module_param(enable_blkdev
, uint
, 0444);
41 MODULE_PARM_DESC(enable_blkdev
, "Enable block device interfaces");
44 struct rsxx_bio_meta
{
46 atomic_t pending_dmas
;
48 unsigned long start_time
;
51 static struct kmem_cache
*bio_meta_pool
;
53 /*----------------- Block Device Operations -----------------*/
54 static int rsxx_blkdev_ioctl(struct block_device
*bdev
,
59 struct rsxx_cardinfo
*card
= bdev
->bd_disk
->private_data
;
63 return rsxx_reg_access(card
, (void __user
*)arg
, 1);
65 return rsxx_reg_access(card
, (void __user
*)arg
, 0);
71 static int rsxx_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
73 struct rsxx_cardinfo
*card
= bdev
->bd_disk
->private_data
;
74 u64 blocks
= card
->size8
>> 9;
77 * get geometry: Fake it. I haven't found any drivers that set
78 * geo->start, so we won't either.
83 do_div(blocks
, (geo
->heads
* geo
->sectors
));
84 geo
->cylinders
= blocks
;
93 static const struct block_device_operations rsxx_fops
= {
95 .getgeo
= rsxx_getgeo
,
96 .ioctl
= rsxx_blkdev_ioctl
,
99 static void disk_stats_start(struct rsxx_cardinfo
*card
, struct bio
*bio
)
101 generic_start_io_acct(card
->queue
, bio_op(bio
), bio_sectors(bio
),
102 &card
->gendisk
->part0
);
105 static void disk_stats_complete(struct rsxx_cardinfo
*card
,
107 unsigned long start_time
)
109 generic_end_io_acct(card
->queue
, bio_op(bio
),
110 &card
->gendisk
->part0
, start_time
);
113 static void bio_dma_done_cb(struct rsxx_cardinfo
*card
,
117 struct rsxx_bio_meta
*meta
= cb_data
;
120 atomic_set(&meta
->error
, 1);
122 if (atomic_dec_and_test(&meta
->pending_dmas
)) {
123 if (!card
->eeh_state
&& card
->gendisk
)
124 disk_stats_complete(card
, meta
->bio
, meta
->start_time
);
126 if (atomic_read(&meta
->error
))
127 bio_io_error(meta
->bio
);
129 bio_endio(meta
->bio
);
130 kmem_cache_free(bio_meta_pool
, meta
);
134 static blk_qc_t
rsxx_make_request(struct request_queue
*q
, struct bio
*bio
)
136 struct rsxx_cardinfo
*card
= q
->queuedata
;
137 struct rsxx_bio_meta
*bio_meta
;
138 blk_status_t st
= BLK_STS_IOERR
;
140 blk_queue_split(q
, &bio
);
147 if (bio_end_sector(bio
) > get_capacity(card
->gendisk
))
150 if (unlikely(card
->halt
))
153 if (unlikely(card
->dma_fault
))
156 if (bio
->bi_iter
.bi_size
== 0) {
157 dev_err(CARD_TO_DEV(card
), "size zero BIO!\n");
161 bio_meta
= kmem_cache_alloc(bio_meta_pool
, GFP_KERNEL
);
163 st
= BLK_STS_RESOURCE
;
168 atomic_set(&bio_meta
->error
, 0);
169 atomic_set(&bio_meta
->pending_dmas
, 0);
170 bio_meta
->start_time
= jiffies
;
172 if (!unlikely(card
->halt
))
173 disk_stats_start(card
, bio
);
175 dev_dbg(CARD_TO_DEV(card
), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
176 bio_data_dir(bio
) ? 'W' : 'R', bio_meta
,
177 (u64
)bio
->bi_iter
.bi_sector
<< 9, bio
->bi_iter
.bi_size
);
179 st
= rsxx_dma_queue_bio(card
, bio
, &bio_meta
->pending_dmas
,
180 bio_dma_done_cb
, bio_meta
);
184 return BLK_QC_T_NONE
;
187 kmem_cache_free(bio_meta_pool
, bio_meta
);
192 return BLK_QC_T_NONE
;
195 /*----------------- Device Setup -------------------*/
196 static bool rsxx_discard_supported(struct rsxx_cardinfo
*card
)
198 unsigned char pci_rev
;
200 pci_read_config_byte(card
->dev
, PCI_REVISION_ID
, &pci_rev
);
202 return (pci_rev
>= RSXX_DISCARD_SUPPORT
);
205 int rsxx_attach_dev(struct rsxx_cardinfo
*card
)
207 mutex_lock(&card
->dev_lock
);
209 /* The block device requires the stripe size from the config. */
211 if (card
->config_valid
)
212 set_capacity(card
->gendisk
, card
->size8
>> 9);
214 set_capacity(card
->gendisk
, 0);
215 device_add_disk(CARD_TO_DEV(card
), card
->gendisk
, NULL
);
216 card
->bdev_attached
= 1;
219 mutex_unlock(&card
->dev_lock
);
224 void rsxx_detach_dev(struct rsxx_cardinfo
*card
)
226 mutex_lock(&card
->dev_lock
);
228 if (card
->bdev_attached
) {
229 del_gendisk(card
->gendisk
);
230 card
->bdev_attached
= 0;
233 mutex_unlock(&card
->dev_lock
);
236 int rsxx_setup_dev(struct rsxx_cardinfo
*card
)
238 unsigned short blk_size
;
240 mutex_init(&card
->dev_lock
);
245 card
->major
= register_blkdev(0, DRIVER_NAME
);
246 if (card
->major
< 0) {
247 dev_err(CARD_TO_DEV(card
), "Failed to get major number\n");
251 card
->queue
= blk_alloc_queue(GFP_KERNEL
);
253 dev_err(CARD_TO_DEV(card
), "Failed queue alloc\n");
254 unregister_blkdev(card
->major
, DRIVER_NAME
);
258 card
->gendisk
= alloc_disk(blkdev_minors
);
259 if (!card
->gendisk
) {
260 dev_err(CARD_TO_DEV(card
), "Failed disk alloc\n");
261 blk_cleanup_queue(card
->queue
);
262 unregister_blkdev(card
->major
, DRIVER_NAME
);
266 if (card
->config_valid
) {
267 blk_size
= card
->config
.data
.block_size
;
268 blk_queue_dma_alignment(card
->queue
, blk_size
- 1);
269 blk_queue_logical_block_size(card
->queue
, blk_size
);
272 blk_queue_make_request(card
->queue
, rsxx_make_request
);
273 blk_queue_max_hw_sectors(card
->queue
, blkdev_max_hw_sectors
);
274 blk_queue_physical_block_size(card
->queue
, RSXX_HW_BLK_SIZE
);
276 blk_queue_flag_set(QUEUE_FLAG_NONROT
, card
->queue
);
277 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM
, card
->queue
);
278 if (rsxx_discard_supported(card
)) {
279 blk_queue_flag_set(QUEUE_FLAG_DISCARD
, card
->queue
);
280 blk_queue_max_discard_sectors(card
->queue
,
281 RSXX_HW_BLK_SIZE
>> 9);
282 card
->queue
->limits
.discard_granularity
= RSXX_HW_BLK_SIZE
;
283 card
->queue
->limits
.discard_alignment
= RSXX_HW_BLK_SIZE
;
286 card
->queue
->queuedata
= card
;
288 snprintf(card
->gendisk
->disk_name
, sizeof(card
->gendisk
->disk_name
),
289 "rsxx%d", card
->disk_id
);
290 card
->gendisk
->major
= card
->major
;
291 card
->gendisk
->first_minor
= 0;
292 card
->gendisk
->fops
= &rsxx_fops
;
293 card
->gendisk
->private_data
= card
;
294 card
->gendisk
->queue
= card
->queue
;
299 void rsxx_destroy_dev(struct rsxx_cardinfo
*card
)
304 put_disk(card
->gendisk
);
305 card
->gendisk
= NULL
;
307 blk_cleanup_queue(card
->queue
);
308 card
->queue
->queuedata
= NULL
;
309 unregister_blkdev(card
->major
, DRIVER_NAME
);
312 int rsxx_dev_init(void)
314 bio_meta_pool
= KMEM_CACHE(rsxx_bio_meta
, SLAB_HWCACHE_ALIGN
);
321 void rsxx_dev_cleanup(void)
323 kmem_cache_destroy(bio_meta_pool
);