2 * Virtual Processor Dispatch Trace Log
4 * (C) Copyright IBM Corporation 2009
6 * Author: Jeremy Kerr <jk@ozlabs.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
26 #include <linux/uaccess.h>
27 #include <asm/firmware.h>
28 #include <asm/lppaca.h>
29 #include <asm/debugfs.h>
30 #include <asm/plpar_wrappers.h>
31 #include <asm/machdep.h>
34 struct dtl_entry
*buf
;
41 static DEFINE_PER_CPU(struct dtl
, cpu_dtl
);
44 * Dispatch trace log event mask:
45 * 0x7: 0x1: voluntary virtual processor waits
46 * 0x2: time-slice preempts
47 * 0x4: virtual partition memory page faults
49 static u8 dtl_event_mask
= 0x7;
53 * Size of per-cpu log buffers. Firmware requires that the buffer does
54 * not cross a 4k boundary.
56 static int dtl_buf_entries
= N_DISPATCH_LOG
;
58 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
61 struct dtl_entry
*write_ptr
;
62 struct dtl_entry
*buf
;
63 struct dtl_entry
*buf_end
;
67 static DEFINE_PER_CPU(struct dtl_ring
, dtl_rings
);
69 static atomic_t dtl_count
;
72 * The cpu accounting code controls the DTL ring buffer, and we get
73 * given entries as they are processed.
75 static void consume_dtle(struct dtl_entry
*dtle
, u64 index
)
77 struct dtl_ring
*dtlr
= this_cpu_ptr(&dtl_rings
);
78 struct dtl_entry
*wp
= dtlr
->write_ptr
;
79 struct lppaca
*vpa
= local_paca
->lppaca_ptr
;
87 /* check for hypervisor ring buffer overflow, ignore this entry if so */
88 if (index
+ N_DISPATCH_LOG
< be64_to_cpu(vpa
->dtl_idx
))
92 if (wp
== dtlr
->buf_end
)
96 /* incrementing write_index makes the new entry visible */
101 static int dtl_start(struct dtl
*dtl
)
103 struct dtl_ring
*dtlr
= &per_cpu(dtl_rings
, dtl
->cpu
);
105 dtlr
->buf
= dtl
->buf
;
106 dtlr
->buf_end
= dtl
->buf
+ dtl
->buf_entries
;
107 dtlr
->write_index
= 0;
109 /* setting write_ptr enables logging into our buffer */
111 dtlr
->write_ptr
= dtl
->buf
;
113 /* enable event logging */
114 dtlr
->saved_dtl_mask
= lppaca_of(dtl
->cpu
).dtl_enable_mask
;
115 lppaca_of(dtl
->cpu
).dtl_enable_mask
|= dtl_event_mask
;
117 dtl_consumer
= consume_dtle
;
118 atomic_inc(&dtl_count
);
122 static void dtl_stop(struct dtl
*dtl
)
124 struct dtl_ring
*dtlr
= &per_cpu(dtl_rings
, dtl
->cpu
);
126 dtlr
->write_ptr
= NULL
;
131 /* restore dtl_enable_mask */
132 lppaca_of(dtl
->cpu
).dtl_enable_mask
= dtlr
->saved_dtl_mask
;
134 if (atomic_dec_and_test(&dtl_count
))
138 static u64
dtl_current_index(struct dtl
*dtl
)
140 return per_cpu(dtl_rings
, dtl
->cpu
).write_index
;
143 #else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
145 static int dtl_start(struct dtl
*dtl
)
150 /* Register our dtl buffer with the hypervisor. The HV expects the
151 * buffer size to be passed in the second word of the buffer */
152 ((u32
*)dtl
->buf
)[1] = DISPATCH_LOG_BYTES
;
154 hwcpu
= get_hard_smp_processor_id(dtl
->cpu
);
155 addr
= __pa(dtl
->buf
);
156 ret
= register_dtl(hwcpu
, addr
);
158 printk(KERN_WARNING
"%s: DTL registration for cpu %d (hw %d) "
159 "failed with %d\n", __func__
, dtl
->cpu
, hwcpu
, ret
);
163 /* set our initial buffer indices */
164 lppaca_of(dtl
->cpu
).dtl_idx
= 0;
166 /* ensure that our updates to the lppaca fields have occurred before
167 * we actually enable the logging */
170 /* enable event logging */
171 lppaca_of(dtl
->cpu
).dtl_enable_mask
= dtl_event_mask
;
176 static void dtl_stop(struct dtl
*dtl
)
178 int hwcpu
= get_hard_smp_processor_id(dtl
->cpu
);
180 lppaca_of(dtl
->cpu
).dtl_enable_mask
= 0x0;
182 unregister_dtl(hwcpu
);
185 static u64
dtl_current_index(struct dtl
*dtl
)
187 return lppaca_of(dtl
->cpu
).dtl_idx
;
189 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
191 static int dtl_enable(struct dtl
*dtl
)
195 struct dtl_entry
*buf
= NULL
;
200 /* only allow one reader */
204 n_entries
= dtl_buf_entries
;
205 buf
= kmem_cache_alloc_node(dtl_cache
, GFP_KERNEL
, cpu_to_node(dtl
->cpu
));
207 printk(KERN_WARNING
"%s: buffer alloc failed for cpu %d\n",
212 spin_lock(&dtl
->lock
);
215 /* store the original allocation size for use during read */
216 dtl
->buf_entries
= n_entries
;
223 spin_unlock(&dtl
->lock
);
226 kmem_cache_free(dtl_cache
, buf
);
230 static void dtl_disable(struct dtl
*dtl
)
232 spin_lock(&dtl
->lock
);
234 kmem_cache_free(dtl_cache
, dtl
->buf
);
236 dtl
->buf_entries
= 0;
237 spin_unlock(&dtl
->lock
);
242 static int dtl_file_open(struct inode
*inode
, struct file
*filp
)
244 struct dtl
*dtl
= inode
->i_private
;
247 rc
= dtl_enable(dtl
);
251 filp
->private_data
= dtl
;
255 static int dtl_file_release(struct inode
*inode
, struct file
*filp
)
257 struct dtl
*dtl
= inode
->i_private
;
262 static ssize_t
dtl_file_read(struct file
*filp
, char __user
*buf
, size_t len
,
265 long int rc
, n_read
, n_req
, read_size
;
267 u64 cur_idx
, last_idx
, i
;
269 if ((len
% sizeof(struct dtl_entry
)) != 0)
272 dtl
= filp
->private_data
;
274 /* requested number of entries to read */
275 n_req
= len
/ sizeof(struct dtl_entry
);
277 /* actual number of entries read */
280 spin_lock(&dtl
->lock
);
282 cur_idx
= dtl_current_index(dtl
);
283 last_idx
= dtl
->last_idx
;
285 if (last_idx
+ dtl
->buf_entries
<= cur_idx
)
286 last_idx
= cur_idx
- dtl
->buf_entries
+ 1;
288 if (last_idx
+ n_req
> cur_idx
)
289 n_req
= cur_idx
- last_idx
;
292 dtl
->last_idx
= last_idx
+ n_req
;
294 spin_unlock(&dtl
->lock
);
299 i
= last_idx
% dtl
->buf_entries
;
301 /* read the tail of the buffer if we've wrapped */
302 if (i
+ n_req
> dtl
->buf_entries
) {
303 read_size
= dtl
->buf_entries
- i
;
305 rc
= copy_to_user(buf
, &dtl
->buf
[i
],
306 read_size
* sizeof(struct dtl_entry
));
313 buf
+= read_size
* sizeof(struct dtl_entry
);
316 /* .. and now the head */
317 rc
= copy_to_user(buf
, &dtl
->buf
[i
], n_req
* sizeof(struct dtl_entry
));
323 return n_read
* sizeof(struct dtl_entry
);
326 static const struct file_operations dtl_fops
= {
327 .open
= dtl_file_open
,
328 .release
= dtl_file_release
,
329 .read
= dtl_file_read
,
333 static struct dentry
*dtl_dir
;
335 static int dtl_setup_file(struct dtl
*dtl
)
339 sprintf(name
, "cpu-%d", dtl
->cpu
);
341 dtl
->file
= debugfs_create_file(name
, 0400, dtl_dir
, dtl
, &dtl_fops
);
348 static int dtl_init(void)
350 struct dentry
*event_mask_file
, *buf_entries_file
;
353 if (!firmware_has_feature(FW_FEATURE_SPLPAR
))
356 /* set up common debugfs structure */
359 dtl_dir
= debugfs_create_dir("dtl", powerpc_debugfs_root
);
361 printk(KERN_WARNING
"%s: can't create dtl root dir\n",
366 event_mask_file
= debugfs_create_x8("dtl_event_mask", 0600,
367 dtl_dir
, &dtl_event_mask
);
368 buf_entries_file
= debugfs_create_u32("dtl_buf_entries", 0400,
369 dtl_dir
, &dtl_buf_entries
);
371 if (!event_mask_file
|| !buf_entries_file
) {
372 printk(KERN_WARNING
"%s: can't create dtl files\n", __func__
);
376 /* set up the per-cpu log structures */
377 for_each_possible_cpu(i
) {
378 struct dtl
*dtl
= &per_cpu(cpu_dtl
, i
);
379 spin_lock_init(&dtl
->lock
);
382 rc
= dtl_setup_file(dtl
);
390 debugfs_remove_recursive(dtl_dir
);
394 machine_arch_initcall(pseries
, dtl_init
);