[NETLINK]: w1_int.c: fix default netlink group
[linux-2.6/verdex.git] / arch / ppc64 / kernel / ItLpQueue.c
blob4231861288a3cb5b9fe88e6219fa7cedc19ec730
1 /*
2 * ItLpQueue.c
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
11 #include <linux/stddef.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/bootmem.h>
15 #include <linux/seq_file.h>
16 #include <linux/proc_fs.h>
17 #include <asm/system.h>
18 #include <asm/paca.h>
19 #include <asm/iSeries/ItLpQueue.h>
20 #include <asm/iSeries/HvLpEvent.h>
21 #include <asm/iSeries/HvCallEvent.h>
24 * The LpQueue is used to pass event data from the hypervisor to
25 * the partition. This is where I/O interrupt events are communicated.
27 * It is written to by the hypervisor so cannot end up in the BSS.
29 struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
31 DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
33 static char *event_types[HvLpEvent_Type_NumTypes] = {
34 "Hypervisor",
35 "Machine Facilities",
36 "Session Manager",
37 "SPD I/O",
38 "Virtual Bus",
39 "PCI I/O",
40 "RIO I/O",
41 "Virtual Lan",
42 "Virtual I/O"
45 /* Array of LpEvent handler functions */
46 extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
48 static struct HvLpEvent * get_next_hvlpevent(void)
50 struct HvLpEvent * event;
51 event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
53 if (event->xFlags.xValid) {
54 /* rmb() needed only for weakly consistent machines (regatta) */
55 rmb();
56 /* Set pointer to next potential event */
57 hvlpevent_queue.xSlicCurEventPtr += ((event->xSizeMinus1 +
58 LpEventAlign) / LpEventAlign) * LpEventAlign;
60 /* Wrap to beginning if no room at end */
61 if (hvlpevent_queue.xSlicCurEventPtr >
62 hvlpevent_queue.xSlicLastValidEventPtr) {
63 hvlpevent_queue.xSlicCurEventPtr =
64 hvlpevent_queue.xSlicEventStackPtr;
66 } else {
67 event = NULL;
70 return event;
73 static unsigned long spread_lpevents = NR_CPUS;
75 int hvlpevent_is_pending(void)
77 struct HvLpEvent *next_event;
79 if (smp_processor_id() >= spread_lpevents)
80 return 0;
82 next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
84 return next_event->xFlags.xValid |
85 hvlpevent_queue.xPlicOverflowIntPending;
88 static void hvlpevent_clear_valid(struct HvLpEvent * event)
90 /* Tell the Hypervisor that we're done with this event.
91 * Also clear bits within this event that might look like valid bits.
92 * ie. on 64-byte boundaries.
94 struct HvLpEvent *tmp;
95 unsigned extra = ((event->xSizeMinus1 + LpEventAlign) /
96 LpEventAlign) - 1;
98 switch (extra) {
99 case 3:
100 tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign);
101 tmp->xFlags.xValid = 0;
102 case 2:
103 tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign);
104 tmp->xFlags.xValid = 0;
105 case 1:
106 tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign);
107 tmp->xFlags.xValid = 0;
110 mb();
112 event->xFlags.xValid = 0;
115 void process_hvlpevents(struct pt_regs *regs)
117 struct HvLpEvent * event;
119 /* If we have recursed, just return */
120 if (!spin_trylock(&hvlpevent_queue.lock))
121 return;
123 for (;;) {
124 event = get_next_hvlpevent();
125 if (event) {
126 /* Call appropriate handler here, passing
127 * a pointer to the LpEvent. The handler
128 * must make a copy of the LpEvent if it
129 * needs it in a bottom half. (perhaps for
130 * an ACK)
132 * Handlers are responsible for ACK processing
134 * The Hypervisor guarantees that LpEvents will
135 * only be delivered with types that we have
136 * registered for, so no type check is necessary
137 * here!
139 if (event->xType < HvLpEvent_Type_NumTypes)
140 __get_cpu_var(hvlpevent_counts)[event->xType]++;
141 if (event->xType < HvLpEvent_Type_NumTypes &&
142 lpEventHandler[event->xType])
143 lpEventHandler[event->xType](event, regs);
144 else
145 printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType );
147 hvlpevent_clear_valid(event);
148 } else if (hvlpevent_queue.xPlicOverflowIntPending)
150 * No more valid events. If overflow events are
151 * pending process them
153 HvCallEvent_getOverflowLpEvents(hvlpevent_queue.xIndex);
154 else
155 break;
158 spin_unlock(&hvlpevent_queue.lock);
161 static int set_spread_lpevents(char *str)
163 unsigned long val = simple_strtoul(str, NULL, 0);
166 * The parameter is the number of processors to share in processing
167 * lp events.
169 if (( val > 0) && (val <= NR_CPUS)) {
170 spread_lpevents = val;
171 printk("lpevent processing spread over %ld processors\n", val);
172 } else {
173 printk("invalid spread_lpevents %ld\n", val);
176 return 1;
178 __setup("spread_lpevents=", set_spread_lpevents);
180 void setup_hvlpevent_queue(void)
182 void *eventStack;
185 * Allocate a page for the Event Stack. The Hypervisor needs the
186 * absolute real address, so we subtract out the KERNELBASE and add
187 * in the absolute real address of the kernel load area.
189 eventStack = alloc_bootmem_pages(LpEventStackSize);
190 memset(eventStack, 0, LpEventStackSize);
192 /* Invoke the hypervisor to initialize the event stack */
193 HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
195 hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack;
196 hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack;
197 hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack +
198 (LpEventStackSize - LpEventMaxSize);
199 hvlpevent_queue.xIndex = 0;
202 static int proc_lpevents_show(struct seq_file *m, void *v)
204 int cpu, i;
205 unsigned long sum;
206 static unsigned long cpu_totals[NR_CPUS];
208 /* FIXME: do we care that there's no locking here? */
209 sum = 0;
210 for_each_online_cpu(cpu) {
211 cpu_totals[cpu] = 0;
212 for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
213 cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
215 sum += cpu_totals[cpu];
218 seq_printf(m, "LpEventQueue 0\n");
219 seq_printf(m, " events processed:\t%lu\n", sum);
221 for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
222 sum = 0;
223 for_each_online_cpu(cpu) {
224 sum += per_cpu(hvlpevent_counts, cpu)[i];
227 seq_printf(m, " %-20s %10lu\n", event_types[i], sum);
230 seq_printf(m, "\n events processed by processor:\n");
232 for_each_online_cpu(cpu) {
233 seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);
236 return 0;
239 static int proc_lpevents_open(struct inode *inode, struct file *file)
241 return single_open(file, proc_lpevents_show, NULL);
244 static struct file_operations proc_lpevents_operations = {
245 .open = proc_lpevents_open,
246 .read = seq_read,
247 .llseek = seq_lseek,
248 .release = single_release,
251 static int __init proc_lpevents_init(void)
253 struct proc_dir_entry *e;
255 e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
256 if (e)
257 e->proc_fops = &proc_lpevents_operations;
259 return 0;
261 __initcall(proc_lpevents_init);