RDMA/amso1100: Add driver for Ammasso 1100 RNIC
[linux-2.6/verdex.git] / arch / s390 / mm / cmm.c
blob786a44dba5bf83120d28a5b15cd786abc3e01da0
1 /*
2 * arch/s390/mm/cmm.c
4 * S390 version
5 * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Collaborative memory management interface.
9 */
11 #include <linux/errno.h>
12 #include <linux/fs.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/sysctl.h>
17 #include <linux/ctype.h>
19 #include <asm/pgalloc.h>
20 #include <asm/uaccess.h>
22 static char *sender = "VMRMSVM";
23 module_param(sender, charp, 0400);
24 MODULE_PARM_DESC(sender,
25 "Guest name that may send SMSG messages (default VMRMSVM)");
27 #include "../../../drivers/s390/net/smsgiucv.h"
29 #define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2)
31 struct cmm_page_array {
32 struct cmm_page_array *next;
33 unsigned long index;
34 unsigned long pages[CMM_NR_PAGES];
37 static long cmm_pages = 0;
38 static long cmm_timed_pages = 0;
39 static volatile long cmm_pages_target = 0;
40 static volatile long cmm_timed_pages_target = 0;
41 static long cmm_timeout_pages = 0;
42 static long cmm_timeout_seconds = 0;
44 static struct cmm_page_array *cmm_page_list = NULL;
45 static struct cmm_page_array *cmm_timed_page_list = NULL;
47 static unsigned long cmm_thread_active = 0;
48 static struct work_struct cmm_thread_starter;
49 static wait_queue_head_t cmm_thread_wait;
50 static struct timer_list cmm_timer;
52 static void cmm_timer_fn(unsigned long);
53 static void cmm_set_timer(void);
55 static long
56 cmm_alloc_pages(long pages, long *counter, struct cmm_page_array **list)
58 struct cmm_page_array *pa;
59 unsigned long page;
61 pa = *list;
62 while (pages) {
63 page = __get_free_page(GFP_NOIO);
64 if (!page)
65 break;
66 if (!pa || pa->index >= CMM_NR_PAGES) {
67 /* Need a new page for the page list. */
68 pa = (struct cmm_page_array *)
69 __get_free_page(GFP_NOIO);
70 if (!pa) {
71 free_page(page);
72 break;
74 pa->next = *list;
75 pa->index = 0;
76 *list = pa;
78 diag10(page);
79 pa->pages[pa->index++] = page;
80 (*counter)++;
81 pages--;
83 return pages;
86 static void
87 cmm_free_pages(long pages, long *counter, struct cmm_page_array **list)
89 struct cmm_page_array *pa;
90 unsigned long page;
92 pa = *list;
93 while (pages) {
94 if (!pa || pa->index <= 0)
95 break;
96 page = pa->pages[--pa->index];
97 if (pa->index == 0) {
98 pa = pa->next;
99 free_page((unsigned long) *list);
100 *list = pa;
102 free_page(page);
103 (*counter)--;
104 pages--;
108 static int
109 cmm_thread(void *dummy)
111 int rc;
113 daemonize("cmmthread");
114 while (1) {
115 rc = wait_event_interruptible(cmm_thread_wait,
116 (cmm_pages != cmm_pages_target ||
117 cmm_timed_pages != cmm_timed_pages_target));
118 if (rc == -ERESTARTSYS) {
119 /* Got kill signal. End thread. */
120 clear_bit(0, &cmm_thread_active);
121 cmm_pages_target = cmm_pages;
122 cmm_timed_pages_target = cmm_timed_pages;
123 break;
125 if (cmm_pages_target > cmm_pages) {
126 if (cmm_alloc_pages(1, &cmm_pages, &cmm_page_list))
127 cmm_pages_target = cmm_pages;
128 } else if (cmm_pages_target < cmm_pages) {
129 cmm_free_pages(1, &cmm_pages, &cmm_page_list);
131 if (cmm_timed_pages_target > cmm_timed_pages) {
132 if (cmm_alloc_pages(1, &cmm_timed_pages,
133 &cmm_timed_page_list))
134 cmm_timed_pages_target = cmm_timed_pages;
135 } else if (cmm_timed_pages_target < cmm_timed_pages) {
136 cmm_free_pages(1, &cmm_timed_pages,
137 &cmm_timed_page_list);
139 if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer))
140 cmm_set_timer();
142 return 0;
145 static void
146 cmm_start_thread(void)
148 kernel_thread(cmm_thread, NULL, 0);
151 static void
152 cmm_kick_thread(void)
154 if (!test_and_set_bit(0, &cmm_thread_active))
155 schedule_work(&cmm_thread_starter);
156 wake_up(&cmm_thread_wait);
159 static void
160 cmm_set_timer(void)
162 if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) {
163 if (timer_pending(&cmm_timer))
164 del_timer(&cmm_timer);
165 return;
167 if (timer_pending(&cmm_timer)) {
168 if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
169 return;
171 cmm_timer.function = cmm_timer_fn;
172 cmm_timer.data = 0;
173 cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
174 add_timer(&cmm_timer);
177 static void
178 cmm_timer_fn(unsigned long ignored)
180 long pages;
182 pages = cmm_timed_pages_target - cmm_timeout_pages;
183 if (pages < 0)
184 cmm_timed_pages_target = 0;
185 else
186 cmm_timed_pages_target = pages;
187 cmm_kick_thread();
188 cmm_set_timer();
191 void
192 cmm_set_pages(long pages)
194 cmm_pages_target = pages;
195 cmm_kick_thread();
198 long
199 cmm_get_pages(void)
201 return cmm_pages;
204 void
205 cmm_add_timed_pages(long pages)
207 cmm_timed_pages_target += pages;
208 cmm_kick_thread();
211 long
212 cmm_get_timed_pages(void)
214 return cmm_timed_pages;
217 void
218 cmm_set_timeout(long pages, long seconds)
220 cmm_timeout_pages = pages;
221 cmm_timeout_seconds = seconds;
222 cmm_set_timer();
225 static inline int
226 cmm_skip_blanks(char *cp, char **endp)
228 char *str;
230 for (str = cp; *str == ' ' || *str == '\t'; str++);
231 *endp = str;
232 return str != cp;
235 #ifdef CONFIG_CMM_PROC
236 /* These will someday get removed. */
237 #define VM_CMM_PAGES 1111
238 #define VM_CMM_TIMED_PAGES 1112
239 #define VM_CMM_TIMEOUT 1113
241 static struct ctl_table cmm_table[];
243 static int
244 cmm_pages_handler(ctl_table *ctl, int write, struct file *filp,
245 void __user *buffer, size_t *lenp, loff_t *ppos)
247 char buf[16], *p;
248 long pages;
249 int len;
251 if (!*lenp || (*ppos && !write)) {
252 *lenp = 0;
253 return 0;
256 if (write) {
257 len = *lenp;
258 if (copy_from_user(buf, buffer,
259 len > sizeof(buf) ? sizeof(buf) : len))
260 return -EFAULT;
261 buf[sizeof(buf) - 1] = '\0';
262 cmm_skip_blanks(buf, &p);
263 pages = simple_strtoul(p, &p, 0);
264 if (ctl == &cmm_table[0])
265 cmm_set_pages(pages);
266 else
267 cmm_add_timed_pages(pages);
268 } else {
269 if (ctl == &cmm_table[0])
270 pages = cmm_get_pages();
271 else
272 pages = cmm_get_timed_pages();
273 len = sprintf(buf, "%ld\n", pages);
274 if (len > *lenp)
275 len = *lenp;
276 if (copy_to_user(buffer, buf, len))
277 return -EFAULT;
279 *lenp = len;
280 *ppos += len;
281 return 0;
284 static int
285 cmm_timeout_handler(ctl_table *ctl, int write, struct file *filp,
286 void __user *buffer, size_t *lenp, loff_t *ppos)
288 char buf[64], *p;
289 long pages, seconds;
290 int len;
292 if (!*lenp || (*ppos && !write)) {
293 *lenp = 0;
294 return 0;
297 if (write) {
298 len = *lenp;
299 if (copy_from_user(buf, buffer,
300 len > sizeof(buf) ? sizeof(buf) : len))
301 return -EFAULT;
302 buf[sizeof(buf) - 1] = '\0';
303 cmm_skip_blanks(buf, &p);
304 pages = simple_strtoul(p, &p, 0);
305 cmm_skip_blanks(p, &p);
306 seconds = simple_strtoul(p, &p, 0);
307 cmm_set_timeout(pages, seconds);
308 } else {
309 len = sprintf(buf, "%ld %ld\n",
310 cmm_timeout_pages, cmm_timeout_seconds);
311 if (len > *lenp)
312 len = *lenp;
313 if (copy_to_user(buffer, buf, len))
314 return -EFAULT;
316 *lenp = len;
317 *ppos += len;
318 return 0;
321 static struct ctl_table cmm_table[] = {
323 .ctl_name = VM_CMM_PAGES,
324 .procname = "cmm_pages",
325 .mode = 0644,
326 .proc_handler = &cmm_pages_handler,
329 .ctl_name = VM_CMM_TIMED_PAGES,
330 .procname = "cmm_timed_pages",
331 .mode = 0644,
332 .proc_handler = &cmm_pages_handler,
335 .ctl_name = VM_CMM_TIMEOUT,
336 .procname = "cmm_timeout",
337 .mode = 0644,
338 .proc_handler = &cmm_timeout_handler,
340 { .ctl_name = 0 }
343 static struct ctl_table cmm_dir_table[] = {
345 .ctl_name = CTL_VM,
346 .procname = "vm",
347 .maxlen = 0,
348 .mode = 0555,
349 .child = cmm_table,
351 { .ctl_name = 0 }
353 #endif
355 #ifdef CONFIG_CMM_IUCV
356 #define SMSG_PREFIX "CMM"
357 static void
358 cmm_smsg_target(char *from, char *msg)
360 long pages, seconds;
362 if (strlen(sender) > 0 && strcmp(from, sender) != 0)
363 return;
364 if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg))
365 return;
366 if (strncmp(msg, "SHRINK", 6) == 0) {
367 if (!cmm_skip_blanks(msg + 6, &msg))
368 return;
369 pages = simple_strtoul(msg, &msg, 0);
370 cmm_skip_blanks(msg, &msg);
371 if (*msg == '\0')
372 cmm_set_pages(pages);
373 } else if (strncmp(msg, "RELEASE", 7) == 0) {
374 if (!cmm_skip_blanks(msg + 7, &msg))
375 return;
376 pages = simple_strtoul(msg, &msg, 0);
377 cmm_skip_blanks(msg, &msg);
378 if (*msg == '\0')
379 cmm_add_timed_pages(pages);
380 } else if (strncmp(msg, "REUSE", 5) == 0) {
381 if (!cmm_skip_blanks(msg + 5, &msg))
382 return;
383 pages = simple_strtoul(msg, &msg, 0);
384 if (!cmm_skip_blanks(msg, &msg))
385 return;
386 seconds = simple_strtoul(msg, &msg, 0);
387 cmm_skip_blanks(msg, &msg);
388 if (*msg == '\0')
389 cmm_set_timeout(pages, seconds);
392 #endif
394 struct ctl_table_header *cmm_sysctl_header;
396 static int
397 cmm_init (void)
399 #ifdef CONFIG_CMM_PROC
400 cmm_sysctl_header = register_sysctl_table(cmm_dir_table, 1);
401 #endif
402 #ifdef CONFIG_CMM_IUCV
403 smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
404 #endif
405 INIT_WORK(&cmm_thread_starter, (void *) cmm_start_thread, NULL);
406 init_waitqueue_head(&cmm_thread_wait);
407 init_timer(&cmm_timer);
408 return 0;
411 static void
412 cmm_exit(void)
414 cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
415 cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
416 #ifdef CONFIG_CMM_PROC
417 unregister_sysctl_table(cmm_sysctl_header);
418 #endif
419 #ifdef CONFIG_CMM_IUCV
420 smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
421 #endif
424 module_init(cmm_init);
425 module_exit(cmm_exit);
427 EXPORT_SYMBOL(cmm_set_pages);
428 EXPORT_SYMBOL(cmm_get_pages);
429 EXPORT_SYMBOL(cmm_add_timed_pages);
430 EXPORT_SYMBOL(cmm_get_timed_pages);
431 EXPORT_SYMBOL(cmm_set_timeout);
433 MODULE_LICENSE("GPL");