1 # Copyright 2013 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
9 from lib
.bucket
import BUCKET_ID
, COMMITTED
, ALLOC_COUNT
, FREE_COUNT
10 from lib
.ordered_dict
import OrderedDict
11 from lib
.subcommand
import SubCommand
12 from lib
.sorter
import MallocUnit
, MMapUnit
, SorterSet
, UnhookedUnit
, UnitSet
15 LOGGER
= logging
.getLogger('dmprof')
18 class CatCommand(SubCommand
):
20 super(CatCommand
, self
).__init
__('Usage: %prog cat <first-dump>')
21 self
._parser
.add_option('--alternative-dirs', dest
='alternative_dirs',
22 metavar
='/path/on/target@/path/on/host[:...]',
23 help='Read files in /path/on/host/ instead of '
24 'files in /path/on/target/.')
25 self
._parser
.add_option('--indent', dest
='indent', action
='store_true',
26 help='Indent the output.')
28 def do(self
, sys_argv
):
29 options
, args
= self
._parse
_args
(sys_argv
, 1)
31 # TODO(dmikurube): Support shared memory.
32 alternative_dirs_dict
= {}
33 if options
.alternative_dirs
:
34 for alternative_dir_pair
in options
.alternative_dirs
.split(':'):
35 target_path
, host_path
= alternative_dir_pair
.split('@', 1)
36 alternative_dirs_dict
[target_path
] = host_path
37 (bucket_set
, dumps
) = SubCommand
.load_basic_files(
38 dump_path
, True, alternative_dirs
=alternative_dirs_dict
)
43 json_root
= OrderedDict()
44 json_root
['version'] = 1
45 json_root
['run_id'] = None
46 json_root
['roots'] = []
47 for sorter
in sorters
:
49 json_root
['roots'].append([sorter
.world
, sorter
.name
])
50 json_root
['default_template'] = 'l2'
51 json_root
['templates'] = sorters
.templates
.as_dict()
53 orders
= OrderedDict()
54 orders
['worlds'] = OrderedDict()
55 for world
in ['vm', 'malloc']:
56 orders
['worlds'][world
] = OrderedDict()
57 orders
['worlds'][world
]['breakdown'] = OrderedDict()
58 for sorter
in sorters
.iter_world(world
):
60 for rule
in sorter
.iter_rule():
61 if rule
.name
not in order
:
62 order
.append(rule
.name
)
63 orders
['worlds'][world
]['breakdown'][sorter
.name
] = order
64 json_root
['orders'] = orders
66 json_root
['snapshots'] = []
69 if json_root
['run_id'] and json_root
['run_id'] != dump
.run_id
:
70 LOGGER
.error('Inconsistent heap profile dumps.')
71 json_root
['run_id'] = ''
73 json_root
['run_id'] = dump
.run_id
75 LOGGER
.info('Sorting a dump %s...' % dump
.path
)
76 json_root
['snapshots'].append(
77 self
._fill
_snapshot
(dump
, bucket_set
, sorters
))
80 json
.dump(json_root
, sys
.stdout
, indent
=2)
82 json
.dump(json_root
, sys
.stdout
)
86 def _fill_snapshot(dump
, bucket_set
, sorters
):
88 root
['time'] = dump
.time
89 root
['worlds'] = OrderedDict()
90 root
['worlds']['vm'] = CatCommand
._fill
_world
(
91 dump
, bucket_set
, sorters
, 'vm')
92 root
['worlds']['malloc'] = CatCommand
._fill
_world
(
93 dump
, bucket_set
, sorters
, 'malloc')
97 def _fill_world(dump
, bucket_set
, sorters
, world
):
102 root
['unit_fields'] = ['size', 'reserved']
103 elif world
== 'malloc':
104 root
['unit_fields'] = ['size', 'alloc_count', 'free_count']
106 # Make { vm | malloc } units with their sizes.
107 root
['units'] = OrderedDict()
108 unit_set
= UnitSet(world
)
110 for unit
in CatCommand
._iterate
_vm
_unit
(dump
, None, bucket_set
):
111 unit_set
.append(unit
)
112 for unit
in unit_set
:
113 root
['units'][unit
.unit_id
] = [unit
.committed
, unit
.reserved
]
114 elif world
== 'malloc':
115 for unit
in CatCommand
._iterate
_malloc
_unit
(dump
, bucket_set
):
116 unit_set
.append(unit
)
117 for unit
in unit_set
:
118 root
['units'][unit
.unit_id
] = [
119 unit
.size
, unit
.alloc_count
, unit
.free_count
]
121 # Iterate for { vm | malloc } sorters.
122 root
['breakdown'] = OrderedDict()
123 for sorter
in sorters
.iter_world(world
):
124 LOGGER
.info(' Sorting with %s:%s.' % (sorter
.world
, sorter
.name
))
125 breakdown
= OrderedDict()
126 for rule
in sorter
.iter_rule():
127 category
= OrderedDict()
128 category
['name'] = rule
.name
130 for sub_world
, sub_breakdown
in rule
.iter_subs():
131 subs
.append([sub_world
, sub_breakdown
])
133 category
['subs'] = subs
135 category
['hidden'] = True
136 category
['units'] = []
137 breakdown
[rule
.name
] = category
138 for unit
in unit_set
:
139 found
= sorter
.find(unit
)
141 # Note that a bucket which doesn't match any rule is just dropped.
142 breakdown
[found
.name
]['units'].append(unit
.unit_id
)
143 root
['breakdown'][sorter
.name
] = breakdown
148 def _iterate_vm_unit(dump
, pfn_dict
, bucket_set
):
150 for _
, region
in dump
.iter_map
:
152 if region
[0] == 'unhooked':
153 if pfn_dict
and dump
.pageframe_length
:
154 for pageframe
in region
[1]['pageframe']:
155 yield UnhookedUnit(unit_id
, pageframe
.size
, pageframe
.size
,
156 region
, pageframe
, pfn_dict
)
158 yield UnhookedUnit(unit_id
,
159 int(region
[1]['committed']),
160 int(region
[1]['reserved']),
162 elif region
[0] == 'hooked':
163 if pfn_dict
and dump
.pageframe_length
:
164 for pageframe
in region
[1]['pageframe']:
165 yield MMapUnit(unit_id
,
168 region
, bucket_set
, pageframe
, pfn_dict
)
170 yield MMapUnit(unit_id
,
171 int(region
[1]['committed']),
172 int(region
[1]['reserved']),
176 LOGGER
.error('Unrecognized mapping status: %s' % region
[0])
179 def _iterate_malloc_unit(dump
, bucket_set
):
180 for line
in dump
.iter_stacktrace
:
182 bucket
= bucket_set
.get(int(words
[BUCKET_ID
]))
183 if bucket
and bucket
.allocator_type
== 'malloc':
184 yield MallocUnit(int(words
[BUCKET_ID
]),
185 int(words
[COMMITTED
]),
186 int(words
[ALLOC_COUNT
]),
187 int(words
[FREE_COUNT
]),
190 # 'Not-found' buckets are all assumed as malloc buckets.
191 yield MallocUnit(int(words
[BUCKET_ID
]),
192 int(words
[COMMITTED
]),
193 int(words
[ALLOC_COUNT
]),
194 int(words
[FREE_COUNT
]),