2 * zpool memory storage api
4 * Copyright (C) 2014 Dan Streetman
6 * This is a common frontend for memory storage pool implementations.
7 * Typically, this is used to store compressed memory.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/list.h>
13 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/spinlock.h>
17 #include <linux/module.h>
18 #include <linux/zpool.h>
23 struct zpool_driver
*driver
;
25 struct zpool_ops
*ops
;
27 struct list_head list
;
30 static LIST_HEAD(drivers_head
);
31 static DEFINE_SPINLOCK(drivers_lock
);
33 static LIST_HEAD(pools_head
);
34 static DEFINE_SPINLOCK(pools_lock
);
37 * zpool_register_driver() - register a zpool implementation.
38 * @driver: driver to register
40 void zpool_register_driver(struct zpool_driver
*driver
)
42 spin_lock(&drivers_lock
);
43 atomic_set(&driver
->refcount
, 0);
44 list_add(&driver
->list
, &drivers_head
);
45 spin_unlock(&drivers_lock
);
47 EXPORT_SYMBOL(zpool_register_driver
);
50 * zpool_unregister_driver() - unregister a zpool implementation.
51 * @driver: driver to unregister.
53 * Module usage counting is used to prevent using a driver
54 * while/after unloading, so if this is called from module
55 * exit function, this should never fail; if called from
56 * other than the module exit function, and this returns
57 * failure, the driver is in use and must remain available.
59 int zpool_unregister_driver(struct zpool_driver
*driver
)
61 int ret
= 0, refcount
;
63 spin_lock(&drivers_lock
);
64 refcount
= atomic_read(&driver
->refcount
);
65 WARN_ON(refcount
< 0);
69 list_del(&driver
->list
);
70 spin_unlock(&drivers_lock
);
74 EXPORT_SYMBOL(zpool_unregister_driver
);
77 * zpool_evict() - evict callback from a zpool implementation.
78 * @pool: pool to evict from.
79 * @handle: handle to evict.
81 * This can be used by zpool implementations to call the
82 * user's evict zpool_ops struct evict callback.
84 int zpool_evict(void *pool
, unsigned long handle
)
88 spin_lock(&pools_lock
);
89 list_for_each_entry(zpool
, &pools_head
, list
) {
90 if (zpool
->pool
== pool
) {
91 spin_unlock(&pools_lock
);
92 if (!zpool
->ops
|| !zpool
->ops
->evict
)
94 return zpool
->ops
->evict(zpool
, handle
);
97 spin_unlock(&pools_lock
);
101 EXPORT_SYMBOL(zpool_evict
);
103 static struct zpool_driver
*zpool_get_driver(char *type
)
105 struct zpool_driver
*driver
;
107 spin_lock(&drivers_lock
);
108 list_for_each_entry(driver
, &drivers_head
, list
) {
109 if (!strcmp(driver
->type
, type
)) {
110 bool got
= try_module_get(driver
->owner
);
113 atomic_inc(&driver
->refcount
);
114 spin_unlock(&drivers_lock
);
115 return got
? driver
: NULL
;
119 spin_unlock(&drivers_lock
);
123 static void zpool_put_driver(struct zpool_driver
*driver
)
125 atomic_dec(&driver
->refcount
);
126 module_put(driver
->owner
);
130 * zpool_create_pool() - Create a new zpool
131 * @type The type of the zpool to create (e.g. zbud, zsmalloc)
132 * @gfp The GFP flags to use when allocating the pool.
133 * @ops The optional ops callback.
135 * This creates a new zpool of the specified type. The gfp flags will be
136 * used when allocating memory, if the implementation supports it. If the
137 * ops param is NULL, then the created zpool will not be shrinkable.
139 * Implementations must guarantee this to be thread-safe.
141 * Returns: New zpool on success, NULL on failure.
143 struct zpool
*zpool_create_pool(char *type
, gfp_t gfp
, struct zpool_ops
*ops
)
145 struct zpool_driver
*driver
;
148 pr_info("creating pool type %s\n", type
);
150 driver
= zpool_get_driver(type
);
153 request_module(type
);
154 driver
= zpool_get_driver(type
);
158 pr_err("no driver for type %s\n", type
);
162 zpool
= kmalloc(sizeof(*zpool
), gfp
);
164 pr_err("couldn't create zpool - out of memory\n");
165 zpool_put_driver(driver
);
169 zpool
->type
= driver
->type
;
170 zpool
->driver
= driver
;
171 zpool
->pool
= driver
->create(gfp
, ops
);
175 pr_err("couldn't create %s pool\n", type
);
176 zpool_put_driver(driver
);
181 pr_info("created %s pool\n", type
);
183 spin_lock(&pools_lock
);
184 list_add(&zpool
->list
, &pools_head
);
185 spin_unlock(&pools_lock
);
191 * zpool_destroy_pool() - Destroy a zpool
192 * @pool The zpool to destroy.
194 * Implementations must guarantee this to be thread-safe,
195 * however only when destroying different pools. The same
196 * pool should only be destroyed once, and should not be used
197 * after it is destroyed.
199 * This destroys an existing zpool. The zpool should not be in use.
201 void zpool_destroy_pool(struct zpool
*zpool
)
203 pr_info("destroying pool type %s\n", zpool
->type
);
205 spin_lock(&pools_lock
);
206 list_del(&zpool
->list
);
207 spin_unlock(&pools_lock
);
208 zpool
->driver
->destroy(zpool
->pool
);
209 zpool_put_driver(zpool
->driver
);
214 * zpool_get_type() - Get the type of the zpool
215 * @pool The zpool to check
217 * This returns the type of the pool.
219 * Implementations must guarantee this to be thread-safe.
221 * Returns: The type of zpool.
223 char *zpool_get_type(struct zpool
*zpool
)
229 * zpool_malloc() - Allocate memory
230 * @pool The zpool to allocate from.
231 * @size The amount of memory to allocate.
232 * @gfp The GFP flags to use when allocating memory.
233 * @handle Pointer to the handle to set
235 * This allocates the requested amount of memory from the pool.
236 * The gfp flags will be used when allocating memory, if the
237 * implementation supports it. The provided @handle will be
238 * set to the allocated object handle.
240 * Implementations must guarantee this to be thread-safe.
242 * Returns: 0 on success, negative value on error.
244 int zpool_malloc(struct zpool
*zpool
, size_t size
, gfp_t gfp
,
245 unsigned long *handle
)
247 return zpool
->driver
->malloc(zpool
->pool
, size
, gfp
, handle
);
251 * zpool_free() - Free previously allocated memory
252 * @pool The zpool that allocated the memory.
253 * @handle The handle to the memory to free.
255 * This frees previously allocated memory. This does not guarantee
256 * that the pool will actually free memory, only that the memory
257 * in the pool will become available for use by the pool.
259 * Implementations must guarantee this to be thread-safe,
260 * however only when freeing different handles. The same
261 * handle should only be freed once, and should not be used
264 void zpool_free(struct zpool
*zpool
, unsigned long handle
)
266 zpool
->driver
->free(zpool
->pool
, handle
);
270 * zpool_shrink() - Shrink the pool size
271 * @pool The zpool to shrink.
272 * @pages The number of pages to shrink the pool.
273 * @reclaimed The number of pages successfully evicted.
275 * This attempts to shrink the actual memory size of the pool
276 * by evicting currently used handle(s). If the pool was
277 * created with no zpool_ops, or the evict call fails for any
278 * of the handles, this will fail. If non-NULL, the @reclaimed
279 * parameter will be set to the number of pages reclaimed,
280 * which may be more than the number of pages requested.
282 * Implementations must guarantee this to be thread-safe.
284 * Returns: 0 on success, negative value on error/failure.
286 int zpool_shrink(struct zpool
*zpool
, unsigned int pages
,
287 unsigned int *reclaimed
)
289 return zpool
->driver
->shrink(zpool
->pool
, pages
, reclaimed
);
293 * zpool_map_handle() - Map a previously allocated handle into memory
294 * @pool The zpool that the handle was allocated from
295 * @handle The handle to map
296 * @mm How the memory should be mapped
298 * This maps a previously allocated handle into memory. The @mm
299 * param indicates to the implementation how the memory will be
300 * used, i.e. read-only, write-only, read-write. If the
301 * implementation does not support it, the memory will be treated
304 * This may hold locks, disable interrupts, and/or preemption,
305 * and the zpool_unmap_handle() must be called to undo those
306 * actions. The code that uses the mapped handle should complete
307 * its operatons on the mapped handle memory quickly and unmap
308 * as soon as possible. As the implementation may use per-cpu
309 * data, multiple handles should not be mapped concurrently on
312 * Returns: A pointer to the handle's mapped memory area.
314 void *zpool_map_handle(struct zpool
*zpool
, unsigned long handle
,
315 enum zpool_mapmode mapmode
)
317 return zpool
->driver
->map(zpool
->pool
, handle
, mapmode
);
321 * zpool_unmap_handle() - Unmap a previously mapped handle
322 * @pool The zpool that the handle was allocated from
323 * @handle The handle to unmap
325 * This unmaps a previously mapped handle. Any locks or other
326 * actions that the implementation took in zpool_map_handle()
327 * will be undone here. The memory area returned from
328 * zpool_map_handle() should no longer be used after this.
330 void zpool_unmap_handle(struct zpool
*zpool
, unsigned long handle
)
332 zpool
->driver
->unmap(zpool
->pool
, handle
);
336 * zpool_get_total_size() - The total size of the pool
337 * @pool The zpool to check
339 * This returns the total size in bytes of the pool.
341 * Returns: Total size of the zpool in bytes.
343 u64
zpool_get_total_size(struct zpool
*zpool
)
345 return zpool
->driver
->total_size(zpool
->pool
);
348 static int __init
init_zpool(void)
354 static void __exit
exit_zpool(void)
356 pr_info("unloaded\n");
359 module_init(init_zpool
);
360 module_exit(exit_zpool
);
362 MODULE_LICENSE("GPL");
363 MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>");
364 MODULE_DESCRIPTION("Common API for compressed memory storage");