1 # Copyright 2013 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
9 from lib
.ordered_dict
import OrderedDict
10 from lib
.subcommand
import SubCommand
11 from lib
.sorter
import MallocUnit
, MMapUnit
, SorterSet
, UnhookedUnit
, UnitSet
14 LOGGER
= logging
.getLogger('dmprof')
17 class CatCommand(SubCommand
):
19 super(CatCommand
, self
).__init
__('Usage: %prog cat <first-dump>')
20 self
._parser
.add_option('--alternative-dirs', dest
='alternative_dirs',
21 metavar
='/path/on/target@/path/on/host[:...]',
22 help='Read files in /path/on/host/ instead of '
23 'files in /path/on/target/.')
24 self
._parser
.add_option('--indent', dest
='indent', action
='store_true',
25 help='Indent the output.')
27 def do(self
, sys_argv
):
28 options
, args
= self
._parse
_args
(sys_argv
, 1)
30 # TODO(dmikurube): Support shared memory.
31 alternative_dirs_dict
= {}
32 if options
.alternative_dirs
:
33 for alternative_dir_pair
in options
.alternative_dirs
.split(':'):
34 target_path
, host_path
= alternative_dir_pair
.split('@', 1)
35 alternative_dirs_dict
[target_path
] = host_path
36 (bucket_set
, dumps
) = SubCommand
.load_basic_files(
37 dump_path
, True, alternative_dirs
=alternative_dirs_dict
)
42 json_root
= OrderedDict()
43 json_root
['version'] = 1
44 json_root
['run_id'] = None
45 json_root
['roots'] = []
46 for sorter
in sorters
:
48 json_root
['roots'].append([sorter
.world
, sorter
.name
])
49 json_root
['default_template'] = 'l2'
50 json_root
['templates'] = sorters
.templates
.as_dict()
52 orders
= OrderedDict()
53 orders
['worlds'] = OrderedDict()
54 for world
in ['vm', 'malloc']:
55 orders
['worlds'][world
] = OrderedDict()
56 orders
['worlds'][world
]['breakdown'] = OrderedDict()
57 for sorter
in sorters
.iter_world(world
):
59 for rule
in sorter
.iter_rule():
60 if rule
.name
not in order
:
61 order
.append(rule
.name
)
62 orders
['worlds'][world
]['breakdown'][sorter
.name
] = order
63 json_root
['orders'] = orders
65 json_root
['snapshots'] = []
68 if json_root
['run_id'] and json_root
['run_id'] != dump
.run_id
:
69 LOGGER
.error('Inconsistent heap profile dumps.')
70 json_root
['run_id'] = ''
72 json_root
['run_id'] = dump
.run_id
74 LOGGER
.info('Sorting a dump %s...' % dump
.path
)
75 json_root
['snapshots'].append(
76 self
._fill
_snapshot
(dump
, bucket_set
, sorters
))
79 json
.dump(json_root
, sys
.stdout
, indent
=2)
81 json
.dump(json_root
, sys
.stdout
)
85 def _fill_snapshot(dump
, bucket_set
, sorters
):
87 root
['time'] = dump
.time
88 root
['worlds'] = OrderedDict()
89 root
['worlds']['vm'] = CatCommand
._fill
_world
(
90 dump
, bucket_set
, sorters
, 'vm')
91 root
['worlds']['malloc'] = CatCommand
._fill
_world
(
92 dump
, bucket_set
, sorters
, 'malloc')
96 def _fill_world(dump
, bucket_set
, sorters
, world
):
101 root
['unit_fields'] = ['size', 'reserved']
102 elif world
== 'malloc':
103 root
['unit_fields'] = ['size', 'alloc_count', 'free_count']
105 # Make { vm | malloc } units with their sizes.
106 root
['units'] = OrderedDict()
107 unit_set
= UnitSet(world
)
109 for unit
in CatCommand
._iterate
_vm
_unit
(dump
, None, bucket_set
):
110 unit_set
.append(unit
)
111 for unit
in unit_set
:
112 root
['units'][unit
.unit_id
] = [unit
.committed
, unit
.reserved
]
113 elif world
== 'malloc':
114 for unit
in CatCommand
._iterate
_malloc
_unit
(dump
, bucket_set
):
115 unit_set
.append(unit
)
116 for unit
in unit_set
:
117 root
['units'][unit
.unit_id
] = [
118 unit
.size
, unit
.alloc_count
, unit
.free_count
]
120 # Iterate for { vm | malloc } sorters.
121 root
['breakdown'] = OrderedDict()
122 for sorter
in sorters
.iter_world(world
):
123 LOGGER
.info(' Sorting with %s:%s.' % (sorter
.world
, sorter
.name
))
124 breakdown
= OrderedDict()
125 for rule
in sorter
.iter_rule():
126 category
= OrderedDict()
127 category
['name'] = rule
.name
129 for sub_world
, sub_breakdown
in rule
.iter_subs():
130 subs
.append([sub_world
, sub_breakdown
])
132 category
['subs'] = subs
134 category
['hidden'] = True
135 category
['units'] = []
136 breakdown
[rule
.name
] = category
137 for unit
in unit_set
:
138 found
= sorter
.find(unit
)
140 # Note that a bucket which doesn't match any rule is just dropped.
141 breakdown
[found
.name
]['units'].append(unit
.unit_id
)
142 root
['breakdown'][sorter
.name
] = breakdown
147 def _iterate_vm_unit(dump
, pfn_dict
, bucket_set
):
149 for _
, region
in dump
.iter_map
:
151 if region
[0] == 'unhooked':
152 if pfn_dict
and dump
.pageframe_length
:
153 for pageframe
in region
[1]['pageframe']:
154 yield UnhookedUnit(unit_id
, pageframe
.size
, pageframe
.size
,
155 region
, pageframe
, pfn_dict
)
157 yield UnhookedUnit(unit_id
,
158 int(region
[1]['committed']),
159 int(region
[1]['reserved']),
161 elif region
[0] == 'hooked':
162 if pfn_dict
and dump
.pageframe_length
:
163 for pageframe
in region
[1]['pageframe']:
164 yield MMapUnit(unit_id
,
167 region
, bucket_set
, pageframe
, pfn_dict
)
169 yield MMapUnit(unit_id
,
170 int(region
[1]['committed']),
171 int(region
[1]['reserved']),
175 LOGGER
.error('Unrecognized mapping status: %s' % region
[0])
178 def _iterate_malloc_unit(dump
, bucket_set
):
179 for bucket_id
, _
, committed
, allocs
, frees
in dump
.iter_stacktrace
:
180 bucket
= bucket_set
.get(bucket_id
)
181 if bucket
and bucket
.allocator_type
== 'malloc':
182 yield MallocUnit(bucket_id
, committed
, allocs
, frees
, bucket
)
184 # 'Not-found' buckets are all assumed as malloc buckets.
185 yield MallocUnit(bucket_id
, committed
, allocs
, frees
, None)