mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / lib / sg_pool.c
blob6dd30615a201fde0e8f5c9a74a4a62427aa09c4e
1 #include <linux/module.h>
2 #include <linux/scatterlist.h>
3 #include <linux/mempool.h>
4 #include <linux/slab.h>
6 #define SG_MEMPOOL_NR ARRAY_SIZE(sg_pools)
7 #define SG_MEMPOOL_SIZE 2
9 struct sg_pool {
10 size_t size;
11 char *name;
12 struct kmem_cache *slab;
13 mempool_t *pool;
16 #define SP(x) { .size = x, "sgpool-" __stringify(x) }
17 #if (SG_CHUNK_SIZE < 32)
18 #error SG_CHUNK_SIZE is too small (must be 32 or greater)
19 #endif
20 static struct sg_pool sg_pools[] = {
21 SP(8),
22 SP(16),
23 #if (SG_CHUNK_SIZE > 32)
24 SP(32),
25 #if (SG_CHUNK_SIZE > 64)
26 SP(64),
27 #if (SG_CHUNK_SIZE > 128)
28 SP(128),
29 #if (SG_CHUNK_SIZE > 256)
30 #error SG_CHUNK_SIZE is too large (256 MAX)
31 #endif
32 #endif
33 #endif
34 #endif
35 SP(SG_CHUNK_SIZE)
37 #undef SP
39 static inline unsigned int sg_pool_index(unsigned short nents)
41 unsigned int index;
43 BUG_ON(nents > SG_CHUNK_SIZE);
45 if (nents <= 8)
46 index = 0;
47 else
48 index = get_count_order(nents) - 3;
50 return index;
53 static void sg_pool_free(struct scatterlist *sgl, unsigned int nents)
55 struct sg_pool *sgp;
57 sgp = sg_pools + sg_pool_index(nents);
58 mempool_free(sgl, sgp->pool);
61 static struct scatterlist *sg_pool_alloc(unsigned int nents, gfp_t gfp_mask)
63 struct sg_pool *sgp;
65 sgp = sg_pools + sg_pool_index(nents);
66 return mempool_alloc(sgp->pool, gfp_mask);
69 /**
70 * sg_free_table_chained - Free a previously mapped sg table
71 * @table: The sg table header to use
72 * @first_chunk: was first_chunk not NULL in sg_alloc_table_chained?
74 * Description:
75 * Free an sg table previously allocated and setup with
76 * sg_alloc_table_chained().
78 **/
79 void sg_free_table_chained(struct sg_table *table, bool first_chunk)
81 if (first_chunk && table->orig_nents <= SG_CHUNK_SIZE)
82 return;
83 __sg_free_table(table, SG_CHUNK_SIZE, first_chunk, sg_pool_free);
85 EXPORT_SYMBOL_GPL(sg_free_table_chained);
87 /**
88 * sg_alloc_table_chained - Allocate and chain SGLs in an sg table
89 * @table: The sg table header to use
90 * @nents: Number of entries in sg list
91 * @first_chunk: first SGL
93 * Description:
94 * Allocate and chain SGLs in an sg table. If @nents@ is larger than
95 * SG_CHUNK_SIZE a chained sg table will be setup.
97 **/
98 int sg_alloc_table_chained(struct sg_table *table, int nents,
99 struct scatterlist *first_chunk)
101 int ret;
103 BUG_ON(!nents);
105 if (first_chunk) {
106 if (nents <= SG_CHUNK_SIZE) {
107 table->nents = table->orig_nents = nents;
108 sg_init_table(table->sgl, nents);
109 return 0;
113 ret = __sg_alloc_table(table, nents, SG_CHUNK_SIZE,
114 first_chunk, GFP_ATOMIC, sg_pool_alloc);
115 if (unlikely(ret))
116 sg_free_table_chained(table, (bool)first_chunk);
117 return ret;
119 EXPORT_SYMBOL_GPL(sg_alloc_table_chained);
121 static __init int sg_pool_init(void)
123 int i;
125 for (i = 0; i < SG_MEMPOOL_NR; i++) {
126 struct sg_pool *sgp = sg_pools + i;
127 int size = sgp->size * sizeof(struct scatterlist);
129 sgp->slab = kmem_cache_create(sgp->name, size, 0,
130 SLAB_HWCACHE_ALIGN, NULL);
131 if (!sgp->slab) {
132 printk(KERN_ERR "SG_POOL: can't init sg slab %s\n",
133 sgp->name);
134 goto cleanup_sdb;
137 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
138 sgp->slab);
139 if (!sgp->pool) {
140 printk(KERN_ERR "SG_POOL: can't init sg mempool %s\n",
141 sgp->name);
142 goto cleanup_sdb;
146 return 0;
148 cleanup_sdb:
149 for (i = 0; i < SG_MEMPOOL_NR; i++) {
150 struct sg_pool *sgp = sg_pools + i;
151 if (sgp->pool)
152 mempool_destroy(sgp->pool);
153 if (sgp->slab)
154 kmem_cache_destroy(sgp->slab);
157 return -ENOMEM;
160 static __exit void sg_pool_exit(void)
162 int i;
164 for (i = 0; i < SG_MEMPOOL_NR; i++) {
165 struct sg_pool *sgp = sg_pools + i;
166 mempool_destroy(sgp->pool);
167 kmem_cache_destroy(sgp->slab);
171 module_init(sg_pool_init);
172 module_exit(sg_pool_exit);