Merge tag 'hwmon-for-v6.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / tools / cgroup / memcg_slabinfo.py
blob270c28a0d09801ac02a1ac6e356610c66de9b724
1 #!/usr/bin/env drgn
3 # Copyright (C) 2020 Roman Gushchin <guro@fb.com>
4 # Copyright (C) 2020 Facebook
6 from os import stat
7 import argparse
8 import sys
10 from drgn.helpers.linux import list_for_each_entry, list_empty
11 from drgn.helpers.linux import for_each_page
12 from drgn.helpers.linux.cpumask import for_each_online_cpu
13 from drgn.helpers.linux.percpu import per_cpu_ptr
14 from drgn import container_of, FaultError, Object, cast
17 DESC = """
18 This is a drgn script to provide slab statistics for memory cgroups.
19 It supports cgroup v2 and v1 and can emulate memory.kmem.slabinfo
20 interface of cgroup v1.
21 For drgn, visit https://github.com/osandov/drgn.
22 """
25 MEMCGS = {}
27 OO_SHIFT = 16
28 OO_MASK = ((1 << OO_SHIFT) - 1)
31 def err(s):
32 print('slabinfo.py: error: %s' % s, file=sys.stderr, flush=True)
33 sys.exit(1)
36 def find_memcg_ids(css=prog['root_mem_cgroup'].css, prefix=''):
37 if not list_empty(css.children.address_of_()):
38 for css in list_for_each_entry('struct cgroup_subsys_state',
39 css.children.address_of_(),
40 'sibling'):
41 name = prefix + '/' + css.cgroup.kn.name.string_().decode('utf-8')
42 memcg = container_of(css, 'struct mem_cgroup', 'css')
43 MEMCGS[css.cgroup.kn.id.value_()] = memcg
44 find_memcg_ids(css, name)
47 def is_root_cache(s):
48 try:
49 return False if s.memcg_params.root_cache else True
50 except AttributeError:
51 return True
54 def cache_name(s):
55 if is_root_cache(s):
56 return s.name.string_().decode('utf-8')
57 else:
58 return s.memcg_params.root_cache.name.string_().decode('utf-8')
61 # SLUB
63 def oo_order(s):
64 return s.oo.x >> OO_SHIFT
67 def oo_objects(s):
68 return s.oo.x & OO_MASK
71 def count_partial(n, fn):
72 nr_objs = 0
73 for slab in list_for_each_entry('struct slab', n.partial.address_of_(),
74 'slab_list'):
75 nr_objs += fn(slab)
76 return nr_objs
79 def count_free(slab):
80 return slab.objects - slab.inuse
83 def slub_get_slabinfo(s, cfg):
84 nr_slabs = 0
85 nr_objs = 0
86 nr_free = 0
88 for node in range(cfg['nr_nodes']):
89 n = s.node[node]
90 nr_slabs += n.nr_slabs.counter.value_()
91 nr_objs += n.total_objects.counter.value_()
92 nr_free += count_partial(n, count_free)
94 return {'active_objs': nr_objs - nr_free,
95 'num_objs': nr_objs,
96 'active_slabs': nr_slabs,
97 'num_slabs': nr_slabs,
98 'objects_per_slab': oo_objects(s),
99 'cache_order': oo_order(s),
100 'limit': 0,
101 'batchcount': 0,
102 'shared': 0,
103 'shared_avail': 0}
106 def cache_show(s, cfg, objs):
107 if cfg['allocator'] == 'SLUB':
108 sinfo = slub_get_slabinfo(s, cfg)
109 else:
110 err('SLAB isn\'t supported yet')
112 if cfg['shared_slab_pages']:
113 sinfo['active_objs'] = objs
114 sinfo['num_objs'] = objs
116 print('%-17s %6lu %6lu %6u %4u %4d'
117 ' : tunables %4u %4u %4u'
118 ' : slabdata %6lu %6lu %6lu' % (
119 cache_name(s), sinfo['active_objs'], sinfo['num_objs'],
120 s.size, sinfo['objects_per_slab'], 1 << sinfo['cache_order'],
121 sinfo['limit'], sinfo['batchcount'], sinfo['shared'],
122 sinfo['active_slabs'], sinfo['num_slabs'],
123 sinfo['shared_avail']))
126 def detect_kernel_config():
127 cfg = {}
129 cfg['nr_nodes'] = prog['nr_online_nodes'].value_()
131 if prog.type('struct kmem_cache').members[1].name == 'flags':
132 cfg['allocator'] = 'SLUB'
133 elif prog.type('struct kmem_cache').members[1].name == 'batchcount':
134 cfg['allocator'] = 'SLAB'
135 else:
136 err('Can\'t determine the slab allocator')
138 cfg['shared_slab_pages'] = False
139 try:
140 if prog.type('struct obj_cgroup'):
141 cfg['shared_slab_pages'] = True
142 except:
143 pass
145 return cfg
148 def for_each_slab(prog):
149 PGSlab = ~prog.constant('PG_slab')
151 for page in for_each_page(prog):
152 try:
153 if page.page_type.value_() == PGSlab:
154 yield cast('struct slab *', page)
155 except FaultError:
156 pass
159 def main():
160 parser = argparse.ArgumentParser(description=DESC,
161 formatter_class=
162 argparse.RawTextHelpFormatter)
163 parser.add_argument('cgroup', metavar='CGROUP',
164 help='Target memory cgroup')
165 args = parser.parse_args()
167 try:
168 cgroup_id = stat(args.cgroup).st_ino
169 find_memcg_ids()
170 memcg = MEMCGS[cgroup_id]
171 except KeyError:
172 err('Can\'t find the memory cgroup')
174 cfg = detect_kernel_config()
176 print('# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>'
177 ' : tunables <limit> <batchcount> <sharedfactor>'
178 ' : slabdata <active_slabs> <num_slabs> <sharedavail>')
180 if cfg['shared_slab_pages']:
181 obj_cgroups = set()
182 stats = {}
183 caches = {}
185 # find memcg pointers belonging to the specified cgroup
186 obj_cgroups.add(memcg.objcg.value_())
187 for ptr in list_for_each_entry('struct obj_cgroup',
188 memcg.objcg_list.address_of_(),
189 'list'):
190 obj_cgroups.add(ptr.value_())
192 # look over all slab folios and look for objects belonging
193 # to the given memory cgroup
194 for slab in for_each_slab(prog):
195 objcg_vec_raw = slab.memcg_data.value_()
196 if objcg_vec_raw == 0:
197 continue
198 cache = slab.slab_cache
199 if not cache:
200 continue
201 addr = cache.value_()
202 caches[addr] = cache
203 # clear the lowest bit to get the true obj_cgroups
204 objcg_vec = Object(prog, 'struct obj_cgroup **',
205 value=objcg_vec_raw & ~1)
207 if addr not in stats:
208 stats[addr] = 0
210 for i in range(oo_objects(cache)):
211 if objcg_vec[i].value_() in obj_cgroups:
212 stats[addr] += 1
214 for addr in caches:
215 if stats[addr] > 0:
216 cache_show(caches[addr], cfg, stats[addr])
218 else:
219 for s in list_for_each_entry('struct kmem_cache',
220 memcg.kmem_caches.address_of_(),
221 'memcg_params.kmem_caches_node'):
222 cache_show(s, cfg, None)
225 main()