2 * CMA DebugFS Interface
4 * Copyright (c) 2015 Sasha Levin <sasha.levin@oracle.com>
8 #include <linux/debugfs.h>
10 #include <linux/list.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/mm_types.h>
18 struct hlist_node node
;
23 static struct dentry
*cma_debugfs_root
;
25 static int cma_debugfs_get(void *data
, u64
*val
)
27 unsigned long *p
= data
;
33 DEFINE_SIMPLE_ATTRIBUTE(cma_debugfs_fops
, cma_debugfs_get
, NULL
, "%llu\n");
35 static int cma_used_get(void *data
, u64
*val
)
37 struct cma
*cma
= data
;
40 mutex_lock(&cma
->lock
);
41 /* pages counter is smaller than sizeof(int) */
42 used
= bitmap_weight(cma
->bitmap
, (int)cma
->count
);
43 mutex_unlock(&cma
->lock
);
44 *val
= (u64
)used
<< cma
->order_per_bit
;
48 DEFINE_SIMPLE_ATTRIBUTE(cma_used_fops
, cma_used_get
, NULL
, "%llu\n");
50 static int cma_maxchunk_get(void *data
, u64
*val
)
52 struct cma
*cma
= data
;
53 unsigned long maxchunk
= 0;
54 unsigned long start
, end
= 0;
56 mutex_lock(&cma
->lock
);
58 start
= find_next_zero_bit(cma
->bitmap
, cma
->count
, end
);
59 if (start
>= cma
->count
)
61 end
= find_next_bit(cma
->bitmap
, cma
->count
, start
);
62 maxchunk
= max(end
- start
, maxchunk
);
64 mutex_unlock(&cma
->lock
);
65 *val
= (u64
)maxchunk
<< cma
->order_per_bit
;
69 DEFINE_SIMPLE_ATTRIBUTE(cma_maxchunk_fops
, cma_maxchunk_get
, NULL
, "%llu\n");
71 static void cma_add_to_cma_mem_list(struct cma
*cma
, struct cma_mem
*mem
)
73 spin_lock(&cma
->mem_head_lock
);
74 hlist_add_head(&mem
->node
, &cma
->mem_head
);
75 spin_unlock(&cma
->mem_head_lock
);
78 static struct cma_mem
*cma_get_entry_from_list(struct cma
*cma
)
80 struct cma_mem
*mem
= NULL
;
82 spin_lock(&cma
->mem_head_lock
);
83 if (!hlist_empty(&cma
->mem_head
)) {
84 mem
= hlist_entry(cma
->mem_head
.first
, struct cma_mem
, node
);
85 hlist_del_init(&mem
->node
);
87 spin_unlock(&cma
->mem_head_lock
);
92 static int cma_free_mem(struct cma
*cma
, int count
)
94 struct cma_mem
*mem
= NULL
;
97 mem
= cma_get_entry_from_list(cma
);
101 if (mem
->n
<= count
) {
102 cma_release(cma
, mem
->p
, mem
->n
);
105 } else if (cma
->order_per_bit
== 0) {
106 cma_release(cma
, mem
->p
, count
);
110 cma_add_to_cma_mem_list(cma
, mem
);
112 pr_debug("cma: cannot release partial block when order_per_bit != 0\n");
113 cma_add_to_cma_mem_list(cma
, mem
);
122 static int cma_free_write(void *data
, u64 val
)
125 struct cma
*cma
= data
;
127 return cma_free_mem(cma
, pages
);
129 DEFINE_SIMPLE_ATTRIBUTE(cma_free_fops
, NULL
, cma_free_write
, "%llu\n");
131 static int cma_alloc_mem(struct cma
*cma
, int count
)
136 mem
= kzalloc(sizeof(*mem
), GFP_KERNEL
);
140 p
= cma_alloc(cma
, count
, 0);
149 cma_add_to_cma_mem_list(cma
, mem
);
154 static int cma_alloc_write(void *data
, u64 val
)
157 struct cma
*cma
= data
;
159 return cma_alloc_mem(cma
, pages
);
161 DEFINE_SIMPLE_ATTRIBUTE(cma_alloc_fops
, NULL
, cma_alloc_write
, "%llu\n");
163 static void cma_debugfs_add_one(struct cma
*cma
, int idx
)
169 sprintf(name
, "cma-%d", idx
);
171 tmp
= debugfs_create_dir(name
, cma_debugfs_root
);
173 debugfs_create_file("alloc", S_IWUSR
, cma_debugfs_root
, cma
,
176 debugfs_create_file("free", S_IWUSR
, cma_debugfs_root
, cma
,
179 debugfs_create_file("base_pfn", S_IRUGO
, tmp
,
180 &cma
->base_pfn
, &cma_debugfs_fops
);
181 debugfs_create_file("count", S_IRUGO
, tmp
,
182 &cma
->count
, &cma_debugfs_fops
);
183 debugfs_create_file("order_per_bit", S_IRUGO
, tmp
,
184 &cma
->order_per_bit
, &cma_debugfs_fops
);
185 debugfs_create_file("used", S_IRUGO
, tmp
, cma
, &cma_used_fops
);
186 debugfs_create_file("maxchunk", S_IRUGO
, tmp
, cma
, &cma_maxchunk_fops
);
188 u32s
= DIV_ROUND_UP(cma_bitmap_maxno(cma
), BITS_PER_BYTE
* sizeof(u32
));
189 debugfs_create_u32_array("bitmap", S_IRUGO
, tmp
, (u32
*)cma
->bitmap
, u32s
);
192 static int __init
cma_debugfs_init(void)
196 cma_debugfs_root
= debugfs_create_dir("cma", NULL
);
197 if (!cma_debugfs_root
)
200 for (i
= 0; i
< cma_area_count
; i
++)
201 cma_debugfs_add_one(&cma_areas
[i
], i
);
205 late_initcall(cma_debugfs_init
);