1 /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
4 * block device routines
7 #include <linux/kernel.h>
8 #include <linux/hdreg.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
12 #include <linux/ioctl.h>
13 #include <linux/slab.h>
14 #include <linux/ratelimit.h>
15 #include <linux/genhd.h>
16 #include <linux/netdevice.h>
17 #include <linux/mutex.h>
20 static DEFINE_MUTEX(aoeblk_mutex
);
21 static struct kmem_cache
*buf_pool_cache
;
23 static ssize_t
aoedisk_show_state(struct device
*dev
,
24 struct device_attribute
*attr
, char *page
)
26 struct gendisk
*disk
= dev_to_disk(dev
);
27 struct aoedev
*d
= disk
->private_data
;
29 return snprintf(page
, PAGE_SIZE
,
31 (d
->flags
& DEVFL_UP
) ? "up" : "down",
32 (d
->flags
& DEVFL_KICKME
) ? ",kickme" :
33 (d
->nopen
&& !(d
->flags
& DEVFL_UP
)) ? ",closewait" : "");
34 /* I'd rather see nopen exported so we can ditch closewait */
36 static ssize_t
aoedisk_show_mac(struct device
*dev
,
37 struct device_attribute
*attr
, char *page
)
39 struct gendisk
*disk
= dev_to_disk(dev
);
40 struct aoedev
*d
= disk
->private_data
;
41 struct aoetgt
*t
= d
->targets
[0];
44 return snprintf(page
, PAGE_SIZE
, "none\n");
45 return snprintf(page
, PAGE_SIZE
, "%pm\n", t
->addr
);
47 static ssize_t
aoedisk_show_netif(struct device
*dev
,
48 struct device_attribute
*attr
, char *page
)
50 struct gendisk
*disk
= dev_to_disk(dev
);
51 struct aoedev
*d
= disk
->private_data
;
52 struct net_device
*nds
[8], **nd
, **nnd
, **ne
;
53 struct aoetgt
**t
, **te
;
54 struct aoeif
*ifp
, *e
;
57 memset(nds
, 0, sizeof nds
);
59 ne
= nd
+ ARRAY_SIZE(nds
);
62 for (; t
< te
&& *t
; t
++) {
65 for (; ifp
< e
&& ifp
->nd
; ifp
++) {
66 for (nnd
= nds
; nnd
< nd
; nnd
++)
69 if (nnd
== nd
&& nd
!= ne
)
77 return snprintf(page
, PAGE_SIZE
, "none\n");
78 for (p
= page
; nd
< ne
; nd
++)
79 p
+= snprintf(p
, PAGE_SIZE
- (p
-page
), "%s%s",
80 p
== page
? "" : ",", (*nd
)->name
);
81 p
+= snprintf(p
, PAGE_SIZE
- (p
-page
), "\n");
84 /* firmware version */
85 static ssize_t
aoedisk_show_fwver(struct device
*dev
,
86 struct device_attribute
*attr
, char *page
)
88 struct gendisk
*disk
= dev_to_disk(dev
);
89 struct aoedev
*d
= disk
->private_data
;
91 return snprintf(page
, PAGE_SIZE
, "0x%04x\n", (unsigned int) d
->fw_ver
);
94 static DEVICE_ATTR(state
, S_IRUGO
, aoedisk_show_state
, NULL
);
95 static DEVICE_ATTR(mac
, S_IRUGO
, aoedisk_show_mac
, NULL
);
96 static DEVICE_ATTR(netif
, S_IRUGO
, aoedisk_show_netif
, NULL
);
97 static struct device_attribute dev_attr_firmware_version
= {
98 .attr
= { .name
= "firmware-version", .mode
= S_IRUGO
},
99 .show
= aoedisk_show_fwver
,
102 static struct attribute
*aoe_attrs
[] = {
103 &dev_attr_state
.attr
,
105 &dev_attr_netif
.attr
,
106 &dev_attr_firmware_version
.attr
,
110 static const struct attribute_group attr_group
= {
115 aoedisk_add_sysfs(struct aoedev
*d
)
117 return sysfs_create_group(&disk_to_dev(d
->gd
)->kobj
, &attr_group
);
120 aoedisk_rm_sysfs(struct aoedev
*d
)
122 sysfs_remove_group(&disk_to_dev(d
->gd
)->kobj
, &attr_group
);
126 aoeblk_open(struct block_device
*bdev
, fmode_t mode
)
128 struct aoedev
*d
= bdev
->bd_disk
->private_data
;
131 mutex_lock(&aoeblk_mutex
);
132 spin_lock_irqsave(&d
->lock
, flags
);
133 if (d
->flags
& DEVFL_UP
) {
135 spin_unlock_irqrestore(&d
->lock
, flags
);
136 mutex_unlock(&aoeblk_mutex
);
139 spin_unlock_irqrestore(&d
->lock
, flags
);
140 mutex_unlock(&aoeblk_mutex
);
145 aoeblk_release(struct gendisk
*disk
, fmode_t mode
)
147 struct aoedev
*d
= disk
->private_data
;
150 spin_lock_irqsave(&d
->lock
, flags
);
152 if (--d
->nopen
== 0) {
153 spin_unlock_irqrestore(&d
->lock
, flags
);
154 aoecmd_cfg(d
->aoemajor
, d
->aoeminor
);
157 spin_unlock_irqrestore(&d
->lock
, flags
);
163 aoeblk_make_request(struct request_queue
*q
, struct bio
*bio
)
165 struct sk_buff_head queue
;
170 blk_queue_bounce(q
, &bio
);
173 printk(KERN_ERR
"aoe: bio is NULL\n");
177 d
= bio
->bi_bdev
->bd_disk
->private_data
;
179 printk(KERN_ERR
"aoe: bd_disk->private_data is NULL\n");
181 bio_endio(bio
, -ENXIO
);
183 } else if (bio
->bi_rw
& REQ_HARDBARRIER
) {
184 bio_endio(bio
, -EOPNOTSUPP
);
186 } else if (bio
->bi_io_vec
== NULL
) {
187 printk(KERN_ERR
"aoe: bi_io_vec is NULL\n");
189 bio_endio(bio
, -ENXIO
);
192 buf
= mempool_alloc(d
->bufpool
, GFP_NOIO
);
194 printk(KERN_INFO
"aoe: buf allocation failure\n");
195 bio_endio(bio
, -ENOMEM
);
198 memset(buf
, 0, sizeof(*buf
));
199 INIT_LIST_HEAD(&buf
->bufs
);
200 buf
->stime
= jiffies
;
202 buf
->resid
= bio
->bi_size
;
203 buf
->sector
= bio
->bi_sector
;
204 buf
->bv
= &bio
->bi_io_vec
[bio
->bi_idx
];
205 buf
->bv_resid
= buf
->bv
->bv_len
;
206 WARN_ON(buf
->bv_resid
== 0);
207 buf
->bv_off
= buf
->bv
->bv_offset
;
209 spin_lock_irqsave(&d
->lock
, flags
);
211 if ((d
->flags
& DEVFL_UP
) == 0) {
212 pr_info_ratelimited("aoe: device %ld.%d is not up\n",
213 d
->aoemajor
, d
->aoeminor
);
214 spin_unlock_irqrestore(&d
->lock
, flags
);
215 mempool_free(buf
, d
->bufpool
);
216 bio_endio(bio
, -ENXIO
);
220 list_add_tail(&buf
->bufs
, &d
->bufq
);
223 __skb_queue_head_init(&queue
);
224 skb_queue_splice_init(&d
->sendq
, &queue
);
226 spin_unlock_irqrestore(&d
->lock
, flags
);
233 aoeblk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
235 struct aoedev
*d
= bdev
->bd_disk
->private_data
;
237 if ((d
->flags
& DEVFL_UP
) == 0) {
238 printk(KERN_ERR
"aoe: disk not up\n");
242 geo
->cylinders
= d
->geo
.cylinders
;
243 geo
->heads
= d
->geo
.heads
;
244 geo
->sectors
= d
->geo
.sectors
;
248 static const struct block_device_operations aoe_bdops
= {
250 .release
= aoeblk_release
,
251 .getgeo
= aoeblk_getgeo
,
252 .owner
= THIS_MODULE
,
255 /* alloc_disk and add_disk can sleep */
257 aoeblk_gdalloc(void *vp
)
259 struct aoedev
*d
= vp
;
263 gd
= alloc_disk(AOE_PARTITIONS
);
266 "aoe: cannot allocate disk structure for %ld.%d\n",
267 d
->aoemajor
, d
->aoeminor
);
271 d
->bufpool
= mempool_create_slab_pool(MIN_BUFS
, buf_pool_cache
);
272 if (d
->bufpool
== NULL
) {
273 printk(KERN_ERR
"aoe: cannot allocate bufpool for %ld.%d\n",
274 d
->aoemajor
, d
->aoeminor
);
278 d
->blkq
= blk_alloc_queue(GFP_KERNEL
);
281 blk_queue_make_request(d
->blkq
, aoeblk_make_request
);
282 d
->blkq
->backing_dev_info
.name
= "aoe";
283 if (bdi_init(&d
->blkq
->backing_dev_info
))
285 spin_lock_irqsave(&d
->lock
, flags
);
286 gd
->major
= AOE_MAJOR
;
287 gd
->first_minor
= d
->sysminor
* AOE_PARTITIONS
;
288 gd
->fops
= &aoe_bdops
;
289 gd
->private_data
= d
;
290 set_capacity(gd
, d
->ssize
);
291 snprintf(gd
->disk_name
, sizeof gd
->disk_name
, "etherd/e%ld.%d",
292 d
->aoemajor
, d
->aoeminor
);
296 d
->flags
&= ~DEVFL_GDALLOC
;
297 d
->flags
|= DEVFL_UP
;
299 spin_unlock_irqrestore(&d
->lock
, flags
);
302 aoedisk_add_sysfs(d
);
306 blk_cleanup_queue(d
->blkq
);
309 mempool_destroy(d
->bufpool
);
313 spin_lock_irqsave(&d
->lock
, flags
);
314 d
->flags
&= ~DEVFL_GDALLOC
;
315 spin_unlock_irqrestore(&d
->lock
, flags
);
321 kmem_cache_destroy(buf_pool_cache
);
327 buf_pool_cache
= kmem_cache_create("aoe_bufs",
330 if (buf_pool_cache
== NULL
)