spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / arch / powerpc / platforms / pseries / dtl.c
blob0e8656370063fe6bf005e59032a67c414efe729f
1 /*
2 * Virtual Processor Dispatch Trace Log
4 * (C) Copyright IBM Corporation 2009
6 * Author: Jeremy Kerr <jk@ozlabs.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/init.h>
24 #include <linux/slab.h>
25 #include <linux/debugfs.h>
26 #include <linux/spinlock.h>
27 #include <asm/smp.h>
28 #include <asm/system.h>
29 #include <asm/uaccess.h>
30 #include <asm/firmware.h>
31 #include <asm/lppaca.h>
33 #include "plpar_wrappers.h"
35 struct dtl {
36 struct dtl_entry *buf;
37 struct dentry *file;
38 int cpu;
39 int buf_entries;
40 u64 last_idx;
41 spinlock_t lock;
43 static DEFINE_PER_CPU(struct dtl, cpu_dtl);
46 * Dispatch trace log event mask:
47 * 0x7: 0x1: voluntary virtual processor waits
48 * 0x2: time-slice preempts
49 * 0x4: virtual partition memory page faults
51 static u8 dtl_event_mask = 0x7;
55 * Size of per-cpu log buffers. Firmware requires that the buffer does
56 * not cross a 4k boundary.
58 static int dtl_buf_entries = N_DISPATCH_LOG;
60 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
61 struct dtl_ring {
62 u64 write_index;
63 struct dtl_entry *write_ptr;
64 struct dtl_entry *buf;
65 struct dtl_entry *buf_end;
66 u8 saved_dtl_mask;
69 static DEFINE_PER_CPU(struct dtl_ring, dtl_rings);
71 static atomic_t dtl_count;
74 * The cpu accounting code controls the DTL ring buffer, and we get
75 * given entries as they are processed.
77 static void consume_dtle(struct dtl_entry *dtle, u64 index)
79 struct dtl_ring *dtlr = &__get_cpu_var(dtl_rings);
80 struct dtl_entry *wp = dtlr->write_ptr;
81 struct lppaca *vpa = local_paca->lppaca_ptr;
83 if (!wp)
84 return;
86 *wp = *dtle;
87 barrier();
89 /* check for hypervisor ring buffer overflow, ignore this entry if so */
90 if (index + N_DISPATCH_LOG < vpa->dtl_idx)
91 return;
93 ++wp;
94 if (wp == dtlr->buf_end)
95 wp = dtlr->buf;
96 dtlr->write_ptr = wp;
98 /* incrementing write_index makes the new entry visible */
99 smp_wmb();
100 ++dtlr->write_index;
103 static int dtl_start(struct dtl *dtl)
105 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
107 dtlr->buf = dtl->buf;
108 dtlr->buf_end = dtl->buf + dtl->buf_entries;
109 dtlr->write_index = 0;
111 /* setting write_ptr enables logging into our buffer */
112 smp_wmb();
113 dtlr->write_ptr = dtl->buf;
115 /* enable event logging */
116 dtlr->saved_dtl_mask = lppaca_of(dtl->cpu).dtl_enable_mask;
117 lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask;
119 dtl_consumer = consume_dtle;
120 atomic_inc(&dtl_count);
121 return 0;
124 static void dtl_stop(struct dtl *dtl)
126 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu);
128 dtlr->write_ptr = NULL;
129 smp_wmb();
131 dtlr->buf = NULL;
133 /* restore dtl_enable_mask */
134 lppaca_of(dtl->cpu).dtl_enable_mask = dtlr->saved_dtl_mask;
136 if (atomic_dec_and_test(&dtl_count))
137 dtl_consumer = NULL;
140 static u64 dtl_current_index(struct dtl *dtl)
142 return per_cpu(dtl_rings, dtl->cpu).write_index;
145 #else /* CONFIG_VIRT_CPU_ACCOUNTING */
147 static int dtl_start(struct dtl *dtl)
149 unsigned long addr;
150 int ret, hwcpu;
152 /* Register our dtl buffer with the hypervisor. The HV expects the
153 * buffer size to be passed in the second word of the buffer */
154 ((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES;
156 hwcpu = get_hard_smp_processor_id(dtl->cpu);
157 addr = __pa(dtl->buf);
158 ret = register_dtl(hwcpu, addr);
159 if (ret) {
160 printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) "
161 "failed with %d\n", __func__, dtl->cpu, hwcpu, ret);
162 return -EIO;
165 /* set our initial buffer indices */
166 lppaca_of(dtl->cpu).dtl_idx = 0;
168 /* ensure that our updates to the lppaca fields have occurred before
169 * we actually enable the logging */
170 smp_wmb();
172 /* enable event logging */
173 lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask;
175 return 0;
178 static void dtl_stop(struct dtl *dtl)
180 int hwcpu = get_hard_smp_processor_id(dtl->cpu);
182 lppaca_of(dtl->cpu).dtl_enable_mask = 0x0;
184 unregister_dtl(hwcpu);
187 static u64 dtl_current_index(struct dtl *dtl)
189 return lppaca_of(dtl->cpu).dtl_idx;
191 #endif /* CONFIG_VIRT_CPU_ACCOUNTING */
193 static int dtl_enable(struct dtl *dtl)
195 long int n_entries;
196 long int rc;
197 struct dtl_entry *buf = NULL;
199 if (!dtl_cache)
200 return -ENOMEM;
202 /* only allow one reader */
203 if (dtl->buf)
204 return -EBUSY;
206 n_entries = dtl_buf_entries;
207 buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu));
208 if (!buf) {
209 printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
210 __func__, dtl->cpu);
211 return -ENOMEM;
214 spin_lock(&dtl->lock);
215 rc = -EBUSY;
216 if (!dtl->buf) {
217 /* store the original allocation size for use during read */
218 dtl->buf_entries = n_entries;
219 dtl->buf = buf;
220 dtl->last_idx = 0;
221 rc = dtl_start(dtl);
222 if (rc)
223 dtl->buf = NULL;
225 spin_unlock(&dtl->lock);
227 if (rc)
228 kmem_cache_free(dtl_cache, buf);
229 return rc;
232 static void dtl_disable(struct dtl *dtl)
234 spin_lock(&dtl->lock);
235 dtl_stop(dtl);
236 kmem_cache_free(dtl_cache, dtl->buf);
237 dtl->buf = NULL;
238 dtl->buf_entries = 0;
239 spin_unlock(&dtl->lock);
242 /* file interface */
244 static int dtl_file_open(struct inode *inode, struct file *filp)
246 struct dtl *dtl = inode->i_private;
247 int rc;
249 rc = dtl_enable(dtl);
250 if (rc)
251 return rc;
253 filp->private_data = dtl;
254 return 0;
257 static int dtl_file_release(struct inode *inode, struct file *filp)
259 struct dtl *dtl = inode->i_private;
260 dtl_disable(dtl);
261 return 0;
264 static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
265 loff_t *pos)
267 long int rc, n_read, n_req, read_size;
268 struct dtl *dtl;
269 u64 cur_idx, last_idx, i;
271 if ((len % sizeof(struct dtl_entry)) != 0)
272 return -EINVAL;
274 dtl = filp->private_data;
276 /* requested number of entries to read */
277 n_req = len / sizeof(struct dtl_entry);
279 /* actual number of entries read */
280 n_read = 0;
282 spin_lock(&dtl->lock);
284 cur_idx = dtl_current_index(dtl);
285 last_idx = dtl->last_idx;
287 if (last_idx + dtl->buf_entries <= cur_idx)
288 last_idx = cur_idx - dtl->buf_entries + 1;
290 if (last_idx + n_req > cur_idx)
291 n_req = cur_idx - last_idx;
293 if (n_req > 0)
294 dtl->last_idx = last_idx + n_req;
296 spin_unlock(&dtl->lock);
298 if (n_req <= 0)
299 return 0;
301 i = last_idx % dtl->buf_entries;
303 /* read the tail of the buffer if we've wrapped */
304 if (i + n_req > dtl->buf_entries) {
305 read_size = dtl->buf_entries - i;
307 rc = copy_to_user(buf, &dtl->buf[i],
308 read_size * sizeof(struct dtl_entry));
309 if (rc)
310 return -EFAULT;
312 i = 0;
313 n_req -= read_size;
314 n_read += read_size;
315 buf += read_size * sizeof(struct dtl_entry);
318 /* .. and now the head */
319 rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry));
320 if (rc)
321 return -EFAULT;
323 n_read += n_req;
325 return n_read * sizeof(struct dtl_entry);
328 static const struct file_operations dtl_fops = {
329 .open = dtl_file_open,
330 .release = dtl_file_release,
331 .read = dtl_file_read,
332 .llseek = no_llseek,
335 static struct dentry *dtl_dir;
337 static int dtl_setup_file(struct dtl *dtl)
339 char name[10];
341 sprintf(name, "cpu-%d", dtl->cpu);
343 dtl->file = debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops);
344 if (!dtl->file)
345 return -ENOMEM;
347 return 0;
350 static int dtl_init(void)
352 struct dentry *event_mask_file, *buf_entries_file;
353 int rc, i;
355 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
356 return -ENODEV;
358 /* set up common debugfs structure */
360 rc = -ENOMEM;
361 dtl_dir = debugfs_create_dir("dtl", powerpc_debugfs_root);
362 if (!dtl_dir) {
363 printk(KERN_WARNING "%s: can't create dtl root dir\n",
364 __func__);
365 goto err;
368 event_mask_file = debugfs_create_x8("dtl_event_mask", 0600,
369 dtl_dir, &dtl_event_mask);
370 buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400,
371 dtl_dir, &dtl_buf_entries);
373 if (!event_mask_file || !buf_entries_file) {
374 printk(KERN_WARNING "%s: can't create dtl files\n", __func__);
375 goto err_remove_dir;
378 /* set up the per-cpu log structures */
379 for_each_possible_cpu(i) {
380 struct dtl *dtl = &per_cpu(cpu_dtl, i);
381 spin_lock_init(&dtl->lock);
382 dtl->cpu = i;
384 rc = dtl_setup_file(dtl);
385 if (rc)
386 goto err_remove_dir;
389 return 0;
391 err_remove_dir:
392 debugfs_remove_recursive(dtl_dir);
393 err:
394 return rc;
396 arch_initcall(dtl_init);