revert-mm-fix-blkdev-size-calculation-in-generic_write_checks
[linux-2.6/linux-trees-mm.git] / drivers / md / dm-uevent.c
blob50377e5dc2a36b41bc0838622aafd313478b1163
1 /*
2 * Device Mapper Uevent Support (dm-uevent)
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright IBM Corporation, 2007
19 * Author: Mike Anderson <andmike@linux.vnet.ibm.com>
21 #include <linux/list.h>
22 #include <linux/slab.h>
23 #include <linux/kobject.h>
24 #include <linux/dm-ioctl.h>
26 #include "dm.h"
27 #include "dm-uevent.h"
29 #define DM_MSG_PREFIX "uevent"
31 static const struct {
32 enum dm_uevent_type type;
33 enum kobject_action action;
34 char *name;
35 } _dm_uevent_type_names[] = {
36 {DM_UEVENT_PATH_FAILED, KOBJ_CHANGE, "PATH_FAILED"},
37 {DM_UEVENT_PATH_REINSTATED, KOBJ_CHANGE, "PATH_REINSTATED"},
40 static struct kmem_cache *_dm_event_cache;
42 struct dm_uevent {
43 struct mapped_device *md;
44 enum kobject_action action;
45 struct kobj_uevent_env ku_env;
46 struct list_head elist;
47 char name[DM_NAME_LEN];
48 char uuid[DM_UUID_LEN];
51 static void dm_uevent_free(struct dm_uevent *event)
53 kmem_cache_free(_dm_event_cache, event);
56 static struct dm_uevent *dm_uevent_alloc(struct mapped_device *md)
58 struct dm_uevent *event;
60 event = kmem_cache_zalloc(_dm_event_cache, GFP_ATOMIC);
61 if (!event)
62 return NULL;
64 INIT_LIST_HEAD(&event->elist);
65 event->md = md;
67 return event;
70 static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
71 struct dm_target *ti,
72 enum kobject_action action,
73 const char *dm_action,
74 const char *path,
75 unsigned nr_valid_paths)
77 struct dm_uevent *event;
79 event = dm_uevent_alloc(md);
80 if (!event) {
81 DMERR("%s: dm_uevent_alloc() failed", __FUNCTION__);
82 goto err_nomem;
85 event->action = action;
87 if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) {
88 DMERR("%s: add_uevent_var() for DM_TARGET failed",
89 __FUNCTION__);
90 goto err_add;
93 if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) {
94 DMERR("%s: add_uevent_var() for DM_ACTION failed",
95 __FUNCTION__);
96 goto err_add;
99 if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u",
100 dm_next_uevent_seq(md))) {
101 DMERR("%s: add_uevent_var() for DM_SEQNUM failed",
102 __FUNCTION__);
103 goto err_add;
106 if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) {
107 DMERR("%s: add_uevent_var() for DM_PATH failed", __FUNCTION__);
108 goto err_add;
111 if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d",
112 nr_valid_paths)) {
113 DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed",
114 __FUNCTION__);
115 goto err_add;
118 return event;
120 err_add:
121 dm_uevent_free(event);
122 err_nomem:
123 return ERR_PTR(-ENOMEM);
127 * dm_send_uevents - send uevents for given list
129 * @events: list of events to send
130 * @kobj: kobject generating event
133 void dm_send_uevents(struct list_head *events, struct kobject *kobj)
135 int r;
136 struct dm_uevent *event, *next;
138 list_for_each_entry_safe(event, next, events, elist) {
139 list_del_init(&event->elist);
142 * Need to call dm_copy_name_and_uuid from here for now.
143 * Context of previous var adds and locking used for
144 * hash_cell not compatable.
146 if (dm_copy_name_and_uuid(event->md, event->name,
147 event->uuid)) {
148 DMERR("%s: dm_copy_name_and_uuid() failed",
149 __FUNCTION__);
150 goto uevent_free;
153 if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) {
154 DMERR("%s: add_uevent_var() for DM_NAME failed",
155 __FUNCTION__);
156 goto uevent_free;
159 if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) {
160 DMERR("%s: add_uevent_var() for DM_UUID failed",
161 __FUNCTION__);
162 goto uevent_free;
165 r = kobject_uevent_env(kobj, event->action, event->ku_env.envp);
166 if (r)
167 DMERR("%s: kobject_uevent_env failed", __FUNCTION__);
168 uevent_free:
169 dm_uevent_free(event);
172 EXPORT_SYMBOL_GPL(dm_send_uevents);
175 * dm_path_uevent - called to create a new path event and queue it
177 * @event_type: path event type enum
178 * @ti: pointer to a dm_target
179 * @path: string containing pathname
180 * @nr_valid_paths: number of valid paths remaining
183 void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
184 const char *path, unsigned nr_valid_paths)
186 struct mapped_device *md = dm_table_get_md(ti->table);
187 struct dm_uevent *event;
189 if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) {
190 DMERR("%s: Invalid event_type %d", __FUNCTION__, event_type);
191 goto out;
194 event = dm_build_path_uevent(md, ti,
195 _dm_uevent_type_names[event_type].action,
196 _dm_uevent_type_names[event_type].name,
197 path, nr_valid_paths);
198 if (IS_ERR(event))
199 goto out;
201 dm_uevent_add(md, &event->elist);
203 out:
204 dm_put(md);
206 EXPORT_SYMBOL_GPL(dm_path_uevent);
208 int dm_uevent_init(void)
210 _dm_event_cache = KMEM_CACHE(dm_uevent, 0);
211 if (!_dm_event_cache)
212 return -ENOMEM;
214 DMINFO("version 1.0.3");
216 return 0;
219 void dm_uevent_exit(void)
221 kmem_cache_destroy(_dm_event_cache);