ARM: 7409/1: Do not call flush_cache_user_range with mmap_sem held
[linux/fpc-iii.git] / drivers / block / aoe / aoedev.c
blob6b5110a474582fb296e7abbb0076c99a466d7fc8
1 /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
2 /*
3 * aoedev.c
4 * AoE device utility functions; maintains device list.
5 */
7 #include <linux/hdreg.h>
8 #include <linux/blkdev.h>
9 #include <linux/netdevice.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include "aoe.h"
14 static void dummy_timer(ulong);
15 static void aoedev_freedev(struct aoedev *);
16 static void freetgt(struct aoedev *d, struct aoetgt *t);
17 static void skbpoolfree(struct aoedev *d);
19 static struct aoedev *devlist;
20 static DEFINE_SPINLOCK(devlist_lock);
22 struct aoedev *
23 aoedev_by_aoeaddr(int maj, int min)
25 struct aoedev *d;
26 ulong flags;
28 spin_lock_irqsave(&devlist_lock, flags);
30 for (d=devlist; d; d=d->next)
31 if (d->aoemajor == maj && d->aoeminor == min)
32 break;
34 spin_unlock_irqrestore(&devlist_lock, flags);
35 return d;
38 static void
39 dummy_timer(ulong vp)
41 struct aoedev *d;
43 d = (struct aoedev *)vp;
44 if (d->flags & DEVFL_TKILL)
45 return;
46 d->timer.expires = jiffies + HZ;
47 add_timer(&d->timer);
50 void
51 aoedev_downdev(struct aoedev *d)
53 struct aoetgt **t, **te;
54 struct frame *f, *e;
55 struct buf *buf;
56 struct bio *bio;
58 t = d->targets;
59 te = t + NTARGETS;
60 for (; t < te && *t; t++) {
61 f = (*t)->frames;
62 e = f + (*t)->nframes;
63 for (; f < e; f->tag = FREETAG, f->buf = NULL, f++) {
64 if (f->tag == FREETAG || f->buf == NULL)
65 continue;
66 buf = f->buf;
67 bio = buf->bio;
68 if (--buf->nframesout == 0
69 && buf != d->inprocess) {
70 mempool_free(buf, d->bufpool);
71 bio_endio(bio, -EIO);
74 (*t)->maxout = (*t)->nframes;
75 (*t)->nout = 0;
77 buf = d->inprocess;
78 if (buf) {
79 bio = buf->bio;
80 mempool_free(buf, d->bufpool);
81 bio_endio(bio, -EIO);
83 d->inprocess = NULL;
84 d->htgt = NULL;
86 while (!list_empty(&d->bufq)) {
87 buf = container_of(d->bufq.next, struct buf, bufs);
88 list_del(d->bufq.next);
89 bio = buf->bio;
90 mempool_free(buf, d->bufpool);
91 bio_endio(bio, -EIO);
94 if (d->gd)
95 set_capacity(d->gd, 0);
97 d->flags &= ~DEVFL_UP;
100 static void
101 aoedev_freedev(struct aoedev *d)
103 struct aoetgt **t, **e;
105 cancel_work_sync(&d->work);
106 if (d->gd) {
107 aoedisk_rm_sysfs(d);
108 del_gendisk(d->gd);
109 put_disk(d->gd);
111 t = d->targets;
112 e = t + NTARGETS;
113 for (; t < e && *t; t++)
114 freetgt(d, *t);
115 if (d->bufpool)
116 mempool_destroy(d->bufpool);
117 skbpoolfree(d);
118 blk_cleanup_queue(d->blkq);
119 kfree(d);
123 aoedev_flush(const char __user *str, size_t cnt)
125 ulong flags;
126 struct aoedev *d, **dd;
127 struct aoedev *rmd = NULL;
128 char buf[16];
129 int all = 0;
131 if (cnt >= 3) {
132 if (cnt > sizeof buf)
133 cnt = sizeof buf;
134 if (copy_from_user(buf, str, cnt))
135 return -EFAULT;
136 all = !strncmp(buf, "all", 3);
139 spin_lock_irqsave(&devlist_lock, flags);
140 dd = &devlist;
141 while ((d = *dd)) {
142 spin_lock(&d->lock);
143 if ((!all && (d->flags & DEVFL_UP))
144 || (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
145 || d->nopen) {
146 spin_unlock(&d->lock);
147 dd = &d->next;
148 continue;
150 *dd = d->next;
151 aoedev_downdev(d);
152 d->flags |= DEVFL_TKILL;
153 spin_unlock(&d->lock);
154 d->next = rmd;
155 rmd = d;
157 spin_unlock_irqrestore(&devlist_lock, flags);
158 while ((d = rmd)) {
159 rmd = d->next;
160 del_timer_sync(&d->timer);
161 aoedev_freedev(d); /* must be able to sleep */
163 return 0;
166 /* I'm not really sure that this is a realistic problem, but if the
167 network driver goes gonzo let's just leak memory after complaining. */
168 static void
169 skbfree(struct sk_buff *skb)
171 enum { Sms = 100, Tms = 3*1000};
172 int i = Tms / Sms;
174 if (skb == NULL)
175 return;
176 while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0)
177 msleep(Sms);
178 if (i < 0) {
179 printk(KERN_ERR
180 "aoe: %s holds ref: %s\n",
181 skb->dev ? skb->dev->name : "netif",
182 "cannot free skb -- memory leaked.");
183 return;
185 skb_shinfo(skb)->nr_frags = skb->data_len = 0;
186 skb_trim(skb, 0);
187 dev_kfree_skb(skb);
190 static void
191 skbpoolfree(struct aoedev *d)
193 struct sk_buff *skb, *tmp;
195 skb_queue_walk_safe(&d->skbpool, skb, tmp)
196 skbfree(skb);
198 __skb_queue_head_init(&d->skbpool);
201 /* find it or malloc it */
202 struct aoedev *
203 aoedev_by_sysminor_m(ulong sysminor)
205 struct aoedev *d;
206 ulong flags;
208 spin_lock_irqsave(&devlist_lock, flags);
210 for (d=devlist; d; d=d->next)
211 if (d->sysminor == sysminor)
212 break;
213 if (d)
214 goto out;
215 d = kcalloc(1, sizeof *d, GFP_ATOMIC);
216 if (!d)
217 goto out;
218 INIT_WORK(&d->work, aoecmd_sleepwork);
219 spin_lock_init(&d->lock);
220 skb_queue_head_init(&d->sendq);
221 skb_queue_head_init(&d->skbpool);
222 init_timer(&d->timer);
223 d->timer.data = (ulong) d;
224 d->timer.function = dummy_timer;
225 d->timer.expires = jiffies + HZ;
226 add_timer(&d->timer);
227 d->bufpool = NULL; /* defer to aoeblk_gdalloc */
228 d->tgt = d->targets;
229 INIT_LIST_HEAD(&d->bufq);
230 d->sysminor = sysminor;
231 d->aoemajor = AOEMAJOR(sysminor);
232 d->aoeminor = AOEMINOR(sysminor);
233 d->mintimer = MINTIMER;
234 d->next = devlist;
235 devlist = d;
236 out:
237 spin_unlock_irqrestore(&devlist_lock, flags);
238 return d;
241 static void
242 freetgt(struct aoedev *d, struct aoetgt *t)
244 struct frame *f, *e;
246 f = t->frames;
247 e = f + t->nframes;
248 for (; f < e; f++)
249 skbfree(f->skb);
250 kfree(t->frames);
251 kfree(t);
254 void
255 aoedev_exit(void)
257 struct aoedev *d;
258 ulong flags;
260 while ((d = devlist)) {
261 devlist = d->next;
263 spin_lock_irqsave(&d->lock, flags);
264 aoedev_downdev(d);
265 d->flags |= DEVFL_TKILL;
266 spin_unlock_irqrestore(&d->lock, flags);
268 del_timer_sync(&d->timer);
269 aoedev_freedev(d);
273 int __init
274 aoedev_init(void)
276 return 0;