1 /* Copyright (c) 2013 Coraid, Inc. See COPYING for GPL terms. */
4 * block device routines
7 #include <linux/kernel.h>
8 #include <linux/hdreg.h>
9 #include <linux/blk-mq.h>
10 #include <linux/backing-dev.h>
12 #include <linux/ioctl.h>
13 #include <linux/slab.h>
14 #include <linux/ratelimit.h>
15 #include <linux/genhd.h>
16 #include <linux/netdevice.h>
17 #include <linux/mutex.h>
18 #include <linux/export.h>
19 #include <linux/moduleparam.h>
20 #include <linux/debugfs.h>
24 static DEFINE_MUTEX(aoeblk_mutex
);
25 static struct kmem_cache
*buf_pool_cache
;
26 static struct dentry
*aoe_debugfs_dir
;
28 /* GPFS needs a larger value than the default. */
29 static int aoe_maxsectors
;
30 module_param(aoe_maxsectors
, int, 0644);
31 MODULE_PARM_DESC(aoe_maxsectors
,
32 "When nonzero, set the maximum number of sectors per I/O request");
34 static ssize_t
aoedisk_show_state(struct device
*dev
,
35 struct device_attribute
*attr
, char *page
)
37 struct gendisk
*disk
= dev_to_disk(dev
);
38 struct aoedev
*d
= disk
->private_data
;
40 return snprintf(page
, PAGE_SIZE
,
42 (d
->flags
& DEVFL_UP
) ? "up" : "down",
43 (d
->flags
& DEVFL_KICKME
) ? ",kickme" :
44 (d
->nopen
&& !(d
->flags
& DEVFL_UP
)) ? ",closewait" : "");
45 /* I'd rather see nopen exported so we can ditch closewait */
47 static ssize_t
aoedisk_show_mac(struct device
*dev
,
48 struct device_attribute
*attr
, char *page
)
50 struct gendisk
*disk
= dev_to_disk(dev
);
51 struct aoedev
*d
= disk
->private_data
;
52 struct aoetgt
*t
= d
->targets
[0];
55 return snprintf(page
, PAGE_SIZE
, "none\n");
56 return snprintf(page
, PAGE_SIZE
, "%pm\n", t
->addr
);
58 static ssize_t
aoedisk_show_netif(struct device
*dev
,
59 struct device_attribute
*attr
, char *page
)
61 struct gendisk
*disk
= dev_to_disk(dev
);
62 struct aoedev
*d
= disk
->private_data
;
63 struct net_device
*nds
[8], **nd
, **nnd
, **ne
;
64 struct aoetgt
**t
, **te
;
65 struct aoeif
*ifp
, *e
;
68 memset(nds
, 0, sizeof nds
);
70 ne
= nd
+ ARRAY_SIZE(nds
);
73 for (; t
< te
&& *t
; t
++) {
76 for (; ifp
< e
&& ifp
->nd
; ifp
++) {
77 for (nnd
= nds
; nnd
< nd
; nnd
++)
80 if (nnd
== nd
&& nd
!= ne
)
88 return snprintf(page
, PAGE_SIZE
, "none\n");
89 for (p
= page
; nd
< ne
; nd
++)
90 p
+= snprintf(p
, PAGE_SIZE
- (p
-page
), "%s%s",
91 p
== page
? "" : ",", (*nd
)->name
);
92 p
+= snprintf(p
, PAGE_SIZE
- (p
-page
), "\n");
95 /* firmware version */
96 static ssize_t
aoedisk_show_fwver(struct device
*dev
,
97 struct device_attribute
*attr
, char *page
)
99 struct gendisk
*disk
= dev_to_disk(dev
);
100 struct aoedev
*d
= disk
->private_data
;
102 return snprintf(page
, PAGE_SIZE
, "0x%04x\n", (unsigned int) d
->fw_ver
);
104 static ssize_t
aoedisk_show_payload(struct device
*dev
,
105 struct device_attribute
*attr
, char *page
)
107 struct gendisk
*disk
= dev_to_disk(dev
);
108 struct aoedev
*d
= disk
->private_data
;
110 return snprintf(page
, PAGE_SIZE
, "%lu\n", d
->maxbcnt
);
113 static int aoedisk_debugfs_show(struct seq_file
*s
, void *ignored
)
116 struct aoetgt
**t
, **te
;
117 struct aoeif
*ifp
, *ife
;
122 seq_printf(s
, "rttavg: %d rttdev: %d\n",
123 d
->rttavg
>> RTTSCALE
,
124 d
->rttdev
>> RTTDSCALE
);
125 seq_printf(s
, "nskbpool: %d\n", skb_queue_len(&d
->skbpool
));
126 seq_printf(s
, "kicked: %ld\n", d
->kicked
);
127 seq_printf(s
, "maxbcnt: %ld\n", d
->maxbcnt
);
128 seq_printf(s
, "ref: %ld\n", d
->ref
);
130 spin_lock_irqsave(&d
->lock
, flags
);
132 te
= t
+ d
->ntargets
;
133 for (; t
< te
&& *t
; t
++) {
135 seq_printf(s
, "falloc: %ld\n", (*t
)->falloc
);
136 seq_printf(s
, "ffree: %p\n",
137 list_empty(&(*t
)->ffree
) ? NULL
: (*t
)->ffree
.next
);
138 seq_printf(s
, "%pm:%d:%d:%d\n", (*t
)->addr
, (*t
)->nout
,
139 (*t
)->maxout
, (*t
)->nframes
);
140 seq_printf(s
, "\tssthresh:%d\n", (*t
)->ssthresh
);
141 seq_printf(s
, "\ttaint:%d\n", (*t
)->taint
);
142 seq_printf(s
, "\tr:%d\n", (*t
)->rpkts
);
143 seq_printf(s
, "\tw:%d\n", (*t
)->wpkts
);
145 ife
= ifp
+ ARRAY_SIZE((*t
)->ifs
);
146 for (; ifp
->nd
&& ifp
< ife
; ifp
++) {
147 seq_printf(s
, "%c%s", c
, ifp
->nd
->name
);
152 spin_unlock_irqrestore(&d
->lock
, flags
);
157 static int aoe_debugfs_open(struct inode
*inode
, struct file
*file
)
159 return single_open(file
, aoedisk_debugfs_show
, inode
->i_private
);
162 static DEVICE_ATTR(state
, 0444, aoedisk_show_state
, NULL
);
163 static DEVICE_ATTR(mac
, 0444, aoedisk_show_mac
, NULL
);
164 static DEVICE_ATTR(netif
, 0444, aoedisk_show_netif
, NULL
);
165 static struct device_attribute dev_attr_firmware_version
= {
166 .attr
= { .name
= "firmware-version", .mode
= 0444 },
167 .show
= aoedisk_show_fwver
,
169 static DEVICE_ATTR(payload
, 0444, aoedisk_show_payload
, NULL
);
171 static struct attribute
*aoe_attrs
[] = {
172 &dev_attr_state
.attr
,
174 &dev_attr_netif
.attr
,
175 &dev_attr_firmware_version
.attr
,
176 &dev_attr_payload
.attr
,
180 static const struct attribute_group aoe_attr_group
= {
184 static const struct attribute_group
*aoe_attr_groups
[] = {
189 static const struct file_operations aoe_debugfs_fops
= {
190 .open
= aoe_debugfs_open
,
193 .release
= single_release
,
197 aoedisk_add_debugfs(struct aoedev
*d
)
201 if (aoe_debugfs_dir
== NULL
)
203 p
= strchr(d
->gd
->disk_name
, '/');
205 p
= d
->gd
->disk_name
;
209 d
->debugfs
= debugfs_create_file(p
, 0444, aoe_debugfs_dir
, d
,
213 aoedisk_rm_debugfs(struct aoedev
*d
)
215 debugfs_remove(d
->debugfs
);
220 aoeblk_open(struct block_device
*bdev
, fmode_t mode
)
222 struct aoedev
*d
= bdev
->bd_disk
->private_data
;
225 if (!virt_addr_valid(d
)) {
226 pr_crit("aoe: invalid device pointer in %s\n",
231 if (!(d
->flags
& DEVFL_UP
) || d
->flags
& DEVFL_TKILL
)
234 mutex_lock(&aoeblk_mutex
);
235 spin_lock_irqsave(&d
->lock
, flags
);
236 if (d
->flags
& DEVFL_UP
&& !(d
->flags
& DEVFL_TKILL
)) {
238 spin_unlock_irqrestore(&d
->lock
, flags
);
239 mutex_unlock(&aoeblk_mutex
);
242 spin_unlock_irqrestore(&d
->lock
, flags
);
243 mutex_unlock(&aoeblk_mutex
);
248 aoeblk_release(struct gendisk
*disk
, fmode_t mode
)
250 struct aoedev
*d
= disk
->private_data
;
253 spin_lock_irqsave(&d
->lock
, flags
);
255 if (--d
->nopen
== 0) {
256 spin_unlock_irqrestore(&d
->lock
, flags
);
257 aoecmd_cfg(d
->aoemajor
, d
->aoeminor
);
260 spin_unlock_irqrestore(&d
->lock
, flags
);
263 static blk_status_t
aoeblk_queue_rq(struct blk_mq_hw_ctx
*hctx
,
264 const struct blk_mq_queue_data
*bd
)
266 struct aoedev
*d
= hctx
->queue
->queuedata
;
268 spin_lock_irq(&d
->lock
);
270 if ((d
->flags
& DEVFL_UP
) == 0) {
271 pr_info_ratelimited("aoe: device %ld.%d is not up\n",
272 d
->aoemajor
, d
->aoeminor
);
273 spin_unlock_irq(&d
->lock
);
274 blk_mq_start_request(bd
->rq
);
275 return BLK_STS_IOERR
;
278 list_add_tail(&bd
->rq
->queuelist
, &d
->rq_list
);
280 spin_unlock_irq(&d
->lock
);
285 aoeblk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
287 struct aoedev
*d
= bdev
->bd_disk
->private_data
;
289 if ((d
->flags
& DEVFL_UP
) == 0) {
290 printk(KERN_ERR
"aoe: disk not up\n");
294 geo
->cylinders
= d
->geo
.cylinders
;
295 geo
->heads
= d
->geo
.heads
;
296 geo
->sectors
= d
->geo
.sectors
;
301 aoeblk_ioctl(struct block_device
*bdev
, fmode_t mode
, uint cmd
, ulong arg
)
308 d
= bdev
->bd_disk
->private_data
;
309 if ((d
->flags
& DEVFL_UP
) == 0) {
310 pr_err("aoe: disk not up\n");
314 if (cmd
== HDIO_GET_IDENTITY
) {
315 if (!copy_to_user((void __user
*) arg
, &d
->ident
,
321 /* udev calls scsi_id, which uses SG_IO, resulting in noise */
323 pr_info("aoe: unknown ioctl 0x%x\n", cmd
);
328 static const struct block_device_operations aoe_bdops
= {
330 .release
= aoeblk_release
,
331 .ioctl
= aoeblk_ioctl
,
332 .compat_ioctl
= blkdev_compat_ptr_ioctl
,
333 .getgeo
= aoeblk_getgeo
,
334 .owner
= THIS_MODULE
,
337 static const struct blk_mq_ops aoeblk_mq_ops
= {
338 .queue_rq
= aoeblk_queue_rq
,
341 /* alloc_disk and add_disk can sleep */
343 aoeblk_gdalloc(void *vp
)
345 struct aoedev
*d
= vp
;
348 struct request_queue
*q
;
349 struct blk_mq_tag_set
*set
;
350 enum { KB
= 1024, MB
= KB
* KB
, READ_AHEAD
= 2 * MB
, };
355 spin_lock_irqsave(&d
->lock
, flags
);
356 if (d
->flags
& DEVFL_GDALLOC
357 && !(d
->flags
& DEVFL_TKILL
)
358 && !(d
->flags
& DEVFL_GD_NOW
))
359 d
->flags
|= DEVFL_GD_NOW
;
362 spin_unlock_irqrestore(&d
->lock
, flags
);
366 gd
= alloc_disk(AOE_PARTITIONS
);
368 pr_err("aoe: cannot allocate disk structure for %ld.%d\n",
369 d
->aoemajor
, d
->aoeminor
);
373 mp
= mempool_create(MIN_BUFS
, mempool_alloc_slab
, mempool_free_slab
,
376 printk(KERN_ERR
"aoe: cannot allocate bufpool for %ld.%d\n",
377 d
->aoemajor
, d
->aoeminor
);
382 set
->ops
= &aoeblk_mq_ops
;
383 set
->cmd_size
= sizeof(struct aoe_req
);
384 set
->nr_hw_queues
= 1;
385 set
->queue_depth
= 128;
386 set
->numa_node
= NUMA_NO_NODE
;
387 set
->flags
= BLK_MQ_F_SHOULD_MERGE
;
388 err
= blk_mq_alloc_tag_set(set
);
390 pr_err("aoe: cannot allocate tag set for %ld.%d\n",
391 d
->aoemajor
, d
->aoeminor
);
395 q
= blk_mq_init_queue(set
);
397 pr_err("aoe: cannot allocate block queue for %ld.%d\n",
398 d
->aoemajor
, d
->aoeminor
);
399 blk_mq_free_tag_set(set
);
403 spin_lock_irqsave(&d
->lock
, flags
);
404 WARN_ON(!(d
->flags
& DEVFL_GD_NOW
));
405 WARN_ON(!(d
->flags
& DEVFL_GDALLOC
));
406 WARN_ON(d
->flags
& DEVFL_TKILL
);
408 WARN_ON(d
->flags
& DEVFL_UP
);
409 blk_queue_max_hw_sectors(q
, BLK_DEF_MAX_SECTORS
);
410 q
->backing_dev_info
->name
= "aoe";
411 q
->backing_dev_info
->ra_pages
= READ_AHEAD
/ PAGE_SIZE
;
413 d
->blkq
= gd
->queue
= q
;
417 blk_queue_max_hw_sectors(q
, aoe_maxsectors
);
418 gd
->major
= AOE_MAJOR
;
419 gd
->first_minor
= d
->sysminor
;
420 gd
->fops
= &aoe_bdops
;
421 gd
->private_data
= d
;
422 set_capacity(gd
, d
->ssize
);
423 snprintf(gd
->disk_name
, sizeof gd
->disk_name
, "etherd/e%ld.%d",
424 d
->aoemajor
, d
->aoeminor
);
426 d
->flags
&= ~DEVFL_GDALLOC
;
427 d
->flags
|= DEVFL_UP
;
429 spin_unlock_irqrestore(&d
->lock
, flags
);
431 device_add_disk(NULL
, gd
, aoe_attr_groups
);
432 aoedisk_add_debugfs(d
);
434 spin_lock_irqsave(&d
->lock
, flags
);
435 WARN_ON(!(d
->flags
& DEVFL_GD_NOW
));
436 d
->flags
&= ~DEVFL_GD_NOW
;
437 spin_unlock_irqrestore(&d
->lock
, flags
);
445 spin_lock_irqsave(&d
->lock
, flags
);
446 d
->flags
&= ~DEVFL_GD_NOW
;
447 schedule_work(&d
->work
);
448 spin_unlock_irqrestore(&d
->lock
, flags
);
454 debugfs_remove_recursive(aoe_debugfs_dir
);
455 aoe_debugfs_dir
= NULL
;
456 kmem_cache_destroy(buf_pool_cache
);
462 buf_pool_cache
= kmem_cache_create("aoe_bufs",
465 if (buf_pool_cache
== NULL
)
467 aoe_debugfs_dir
= debugfs_create_dir("aoe", NULL
);