ZIL: Call brt_pending_add() replaying TX_CLONE_RANGE
[zfs.git] / module / os / linux / spl / spl-proc.c
blobf0f929d3ce90e4052268903365cde3c68041d4e1
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
8 * This file is part of the SPL, Solaris Porting Layer.
10 * The SPL is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
15 * The SPL is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
20 * You should have received a copy of the GNU General Public License along
21 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 * Solaris Porting Layer (SPL) Proc Implementation.
26 #include <sys/systeminfo.h>
27 #include <sys/kstat.h>
28 #include <sys/kmem.h>
29 #include <sys/kmem_cache.h>
30 #include <sys/vmem.h>
31 #include <sys/taskq.h>
32 #include <sys/proc.h>
33 #include <linux/ctype.h>
34 #include <linux/kmod.h>
35 #include <linux/seq_file.h>
36 #include <linux/uaccess.h>
37 #include <linux/version.h>
38 #include "zfs_gitrev.h"
40 #if defined(CONSTIFY_PLUGIN) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
41 typedef struct ctl_table __no_const spl_ctl_table;
42 #else
43 typedef struct ctl_table spl_ctl_table;
44 #endif
46 static unsigned long table_min = 0;
47 static unsigned long table_max = ~0;
49 static struct ctl_table_header *spl_header = NULL;
50 #ifndef HAVE_REGISTER_SYSCTL_TABLE
51 static struct ctl_table_header *spl_kmem = NULL;
52 static struct ctl_table_header *spl_kstat = NULL;
53 #endif
54 static struct proc_dir_entry *proc_spl = NULL;
55 static struct proc_dir_entry *proc_spl_kmem = NULL;
56 static struct proc_dir_entry *proc_spl_kmem_slab = NULL;
57 static struct proc_dir_entry *proc_spl_taskq_all = NULL;
58 static struct proc_dir_entry *proc_spl_taskq = NULL;
59 struct proc_dir_entry *proc_spl_kstat = NULL;
61 #ifdef DEBUG_KMEM
62 static int
63 proc_domemused(struct ctl_table *table, int write,
64 void __user *buffer, size_t *lenp, loff_t *ppos)
66 int rc = 0;
67 unsigned long val;
68 spl_ctl_table dummy = *table;
70 dummy.data = &val;
71 dummy.proc_handler = &proc_dointvec;
72 dummy.extra1 = &table_min;
73 dummy.extra2 = &table_max;
75 if (write) {
76 *ppos += *lenp;
77 } else {
78 #ifdef HAVE_ATOMIC64_T
79 val = atomic64_read((atomic64_t *)table->data);
80 #else
81 val = atomic_read((atomic_t *)table->data);
82 #endif /* HAVE_ATOMIC64_T */
83 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
86 return (rc);
88 #endif /* DEBUG_KMEM */
90 static int
91 proc_doslab(struct ctl_table *table, int write,
92 void __user *buffer, size_t *lenp, loff_t *ppos)
94 int rc = 0;
95 unsigned long val = 0, mask;
96 spl_ctl_table dummy = *table;
97 spl_kmem_cache_t *skc = NULL;
99 dummy.data = &val;
100 dummy.proc_handler = &proc_dointvec;
101 dummy.extra1 = &table_min;
102 dummy.extra2 = &table_max;
104 if (write) {
105 *ppos += *lenp;
106 } else {
107 down_read(&spl_kmem_cache_sem);
108 mask = (unsigned long)table->data;
110 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
112 /* Only use slabs of the correct kmem/vmem type */
113 if (!(skc->skc_flags & mask))
114 continue;
116 /* Sum the specified field for selected slabs */
117 switch (mask & (KMC_TOTAL | KMC_ALLOC | KMC_MAX)) {
118 case KMC_TOTAL:
119 val += skc->skc_slab_size * skc->skc_slab_total;
120 break;
121 case KMC_ALLOC:
122 val += skc->skc_obj_size * skc->skc_obj_alloc;
123 break;
124 case KMC_MAX:
125 val += skc->skc_obj_size * skc->skc_obj_max;
126 break;
130 up_read(&spl_kmem_cache_sem);
131 rc = proc_doulongvec_minmax(&dummy, write, buffer, lenp, ppos);
134 return (rc);
137 static int
138 proc_dohostid(struct ctl_table *table, int write,
139 void __user *buffer, size_t *lenp, loff_t *ppos)
141 char *end, str[32];
142 unsigned long hid;
143 spl_ctl_table dummy = *table;
145 dummy.data = str;
146 dummy.maxlen = sizeof (str) - 1;
148 if (!write)
149 snprintf(str, sizeof (str), "%lx",
150 (unsigned long) zone_get_hostid(NULL));
152 /* always returns 0 */
153 proc_dostring(&dummy, write, buffer, lenp, ppos);
155 if (write) {
157 * We can't use proc_doulongvec_minmax() in the write
158 * case here because hostid, while a hex value, has no
159 * leading 0x, which confuses the helper function.
162 hid = simple_strtoul(str, &end, 16);
163 if (str == end)
164 return (-EINVAL);
165 spl_hostid = hid;
168 return (0);
171 static void
172 taskq_seq_show_headers(struct seq_file *f)
174 seq_printf(f, "%-25s %5s %5s %5s %5s %5s %5s %12s %5s %10s\n",
175 "taskq", "act", "nthr", "spwn", "maxt", "pri",
176 "mina", "maxa", "cura", "flags");
179 /* indices into the lheads array below */
180 #define LHEAD_PEND 0
181 #define LHEAD_PRIO 1
182 #define LHEAD_DELAY 2
183 #define LHEAD_WAIT 3
184 #define LHEAD_ACTIVE 4
185 #define LHEAD_SIZE 5
187 static unsigned int spl_max_show_tasks = 512;
188 /* CSTYLED */
189 module_param(spl_max_show_tasks, uint, 0644);
190 MODULE_PARM_DESC(spl_max_show_tasks, "Max number of tasks shown in taskq proc");
192 static int
193 taskq_seq_show_impl(struct seq_file *f, void *p, boolean_t allflag)
195 taskq_t *tq = p;
196 taskq_thread_t *tqt = NULL;
197 spl_wait_queue_entry_t *wq;
198 struct task_struct *tsk;
199 taskq_ent_t *tqe;
200 char name[100];
201 struct list_head *lheads[LHEAD_SIZE], *lh;
202 static char *list_names[LHEAD_SIZE] =
203 {"pend", "prio", "delay", "wait", "active" };
204 int i, j, have_lheads = 0;
205 unsigned long wflags, flags;
207 spin_lock_irqsave_nested(&tq->tq_lock, flags, tq->tq_lock_class);
208 spin_lock_irqsave(&tq->tq_wait_waitq.lock, wflags);
210 /* get the various lists and check whether they're empty */
211 lheads[LHEAD_PEND] = &tq->tq_pend_list;
212 lheads[LHEAD_PRIO] = &tq->tq_prio_list;
213 lheads[LHEAD_DELAY] = &tq->tq_delay_list;
214 #ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
215 lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.head;
216 #else
217 lheads[LHEAD_WAIT] = &tq->tq_wait_waitq.task_list;
218 #endif
219 lheads[LHEAD_ACTIVE] = &tq->tq_active_list;
221 for (i = 0; i < LHEAD_SIZE; ++i) {
222 if (list_empty(lheads[i]))
223 lheads[i] = NULL;
224 else
225 ++have_lheads;
228 /* early return in non-"all" mode if lists are all empty */
229 if (!allflag && !have_lheads) {
230 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
231 spin_unlock_irqrestore(&tq->tq_lock, flags);
232 return (0);
235 /* unlock the waitq quickly */
236 if (!lheads[LHEAD_WAIT])
237 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
239 /* show the base taskq contents */
240 snprintf(name, sizeof (name), "%s/%d", tq->tq_name, tq->tq_instance);
241 seq_printf(f, "%-25s ", name);
242 seq_printf(f, "%5d %5d %5d %5d %5d %5d %12d %5d %10x\n",
243 tq->tq_nactive, tq->tq_nthreads, tq->tq_nspawn,
244 tq->tq_maxthreads, tq->tq_pri, tq->tq_minalloc, tq->tq_maxalloc,
245 tq->tq_nalloc, tq->tq_flags);
247 /* show the active list */
248 if (lheads[LHEAD_ACTIVE]) {
249 j = 0;
250 list_for_each_entry(tqt, &tq->tq_active_list, tqt_active_list) {
251 if (j == 0)
252 seq_printf(f, "\t%s:",
253 list_names[LHEAD_ACTIVE]);
254 else if (j == 2) {
255 seq_printf(f, "\n\t ");
256 j = 0;
258 seq_printf(f, " [%d]%pf(%ps)",
259 tqt->tqt_thread->pid,
260 tqt->tqt_task->tqent_func,
261 tqt->tqt_task->tqent_arg);
262 ++j;
264 seq_printf(f, "\n");
267 for (i = LHEAD_PEND; i <= LHEAD_WAIT; ++i)
268 if (lheads[i]) {
269 j = 0;
270 list_for_each(lh, lheads[i]) {
271 if (spl_max_show_tasks != 0 &&
272 j >= spl_max_show_tasks) {
273 seq_printf(f, "\n\t(truncated)");
274 break;
276 /* show the wait waitq list */
277 if (i == LHEAD_WAIT) {
278 #ifdef HAVE_WAIT_QUEUE_HEAD_ENTRY
279 wq = list_entry(lh,
280 spl_wait_queue_entry_t, entry);
281 #else
282 wq = list_entry(lh,
283 spl_wait_queue_entry_t, task_list);
284 #endif
285 if (j == 0)
286 seq_printf(f, "\t%s:",
287 list_names[i]);
288 else if (j % 8 == 0)
289 seq_printf(f, "\n\t ");
291 tsk = wq->private;
292 seq_printf(f, " %d", tsk->pid);
293 /* pend, prio and delay lists */
294 } else {
295 tqe = list_entry(lh, taskq_ent_t,
296 tqent_list);
297 if (j == 0)
298 seq_printf(f, "\t%s:",
299 list_names[i]);
300 else if (j % 2 == 0)
301 seq_printf(f, "\n\t ");
303 seq_printf(f, " %pf(%ps)",
304 tqe->tqent_func,
305 tqe->tqent_arg);
307 ++j;
309 seq_printf(f, "\n");
311 if (lheads[LHEAD_WAIT])
312 spin_unlock_irqrestore(&tq->tq_wait_waitq.lock, wflags);
313 spin_unlock_irqrestore(&tq->tq_lock, flags);
315 return (0);
318 static int
319 taskq_all_seq_show(struct seq_file *f, void *p)
321 return (taskq_seq_show_impl(f, p, B_TRUE));
324 static int
325 taskq_seq_show(struct seq_file *f, void *p)
327 return (taskq_seq_show_impl(f, p, B_FALSE));
330 static void *
331 taskq_seq_start(struct seq_file *f, loff_t *pos)
333 struct list_head *p;
334 loff_t n = *pos;
336 down_read(&tq_list_sem);
337 if (!n)
338 taskq_seq_show_headers(f);
340 p = tq_list.next;
341 while (n--) {
342 p = p->next;
343 if (p == &tq_list)
344 return (NULL);
347 return (list_entry(p, taskq_t, tq_taskqs));
350 static void *
351 taskq_seq_next(struct seq_file *f, void *p, loff_t *pos)
353 taskq_t *tq = p;
355 ++*pos;
356 return ((tq->tq_taskqs.next == &tq_list) ?
357 NULL : list_entry(tq->tq_taskqs.next, taskq_t, tq_taskqs));
360 static void
361 slab_seq_show_headers(struct seq_file *f)
363 seq_printf(f,
364 "--------------------- cache ----------"
365 "--------------------------------------------- "
366 "----- slab ------ "
367 "---- object ----- "
368 "--- emergency ---\n");
369 seq_printf(f,
370 "name "
371 " flags size alloc slabsize objsize "
372 "total alloc max "
373 "total alloc max "
374 "dlock alloc max\n");
377 static int
378 slab_seq_show(struct seq_file *f, void *p)
380 spl_kmem_cache_t *skc = p;
382 ASSERT(skc->skc_magic == SKC_MAGIC);
384 if (skc->skc_flags & KMC_SLAB) {
386 * This cache is backed by a generic Linux kmem cache which
387 * has its own accounting. For these caches we only track
388 * the number of active allocated objects that exist within
389 * the underlying Linux slabs. For the overall statistics of
390 * the underlying Linux cache please refer to /proc/slabinfo.
392 spin_lock(&skc->skc_lock);
393 uint64_t objs_allocated =
394 percpu_counter_sum(&skc->skc_linux_alloc);
395 seq_printf(f, "%-36s ", skc->skc_name);
396 seq_printf(f, "0x%05lx %9s %9lu %8s %8u "
397 "%5s %5s %5s %5s %5lu %5s %5s %5s %5s\n",
398 (long unsigned)skc->skc_flags,
399 "-",
400 (long unsigned)(skc->skc_obj_size * objs_allocated),
401 "-",
402 (unsigned)skc->skc_obj_size,
403 "-", "-", "-", "-",
404 (long unsigned)objs_allocated,
405 "-", "-", "-", "-");
406 spin_unlock(&skc->skc_lock);
407 return (0);
410 spin_lock(&skc->skc_lock);
411 seq_printf(f, "%-36s ", skc->skc_name);
412 seq_printf(f, "0x%05lx %9lu %9lu %8u %8u "
413 "%5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu %5lu\n",
414 (long unsigned)skc->skc_flags,
415 (long unsigned)(skc->skc_slab_size * skc->skc_slab_total),
416 (long unsigned)(skc->skc_obj_size * skc->skc_obj_alloc),
417 (unsigned)skc->skc_slab_size,
418 (unsigned)skc->skc_obj_size,
419 (long unsigned)skc->skc_slab_total,
420 (long unsigned)skc->skc_slab_alloc,
421 (long unsigned)skc->skc_slab_max,
422 (long unsigned)skc->skc_obj_total,
423 (long unsigned)skc->skc_obj_alloc,
424 (long unsigned)skc->skc_obj_max,
425 (long unsigned)skc->skc_obj_deadlock,
426 (long unsigned)skc->skc_obj_emergency,
427 (long unsigned)skc->skc_obj_emergency_max);
428 spin_unlock(&skc->skc_lock);
429 return (0);
432 static void *
433 slab_seq_start(struct seq_file *f, loff_t *pos)
435 struct list_head *p;
436 loff_t n = *pos;
438 down_read(&spl_kmem_cache_sem);
439 if (!n)
440 slab_seq_show_headers(f);
442 p = spl_kmem_cache_list.next;
443 while (n--) {
444 p = p->next;
445 if (p == &spl_kmem_cache_list)
446 return (NULL);
449 return (list_entry(p, spl_kmem_cache_t, skc_list));
452 static void *
453 slab_seq_next(struct seq_file *f, void *p, loff_t *pos)
455 spl_kmem_cache_t *skc = p;
457 ++*pos;
458 return ((skc->skc_list.next == &spl_kmem_cache_list) ?
459 NULL : list_entry(skc->skc_list.next, spl_kmem_cache_t, skc_list));
462 static void
463 slab_seq_stop(struct seq_file *f, void *v)
465 up_read(&spl_kmem_cache_sem);
468 static const struct seq_operations slab_seq_ops = {
469 .show = slab_seq_show,
470 .start = slab_seq_start,
471 .next = slab_seq_next,
472 .stop = slab_seq_stop,
475 static int
476 proc_slab_open(struct inode *inode, struct file *filp)
478 return (seq_open(filp, &slab_seq_ops));
481 static const kstat_proc_op_t proc_slab_operations = {
482 #ifdef HAVE_PROC_OPS_STRUCT
483 .proc_open = proc_slab_open,
484 .proc_read = seq_read,
485 .proc_lseek = seq_lseek,
486 .proc_release = seq_release,
487 #else
488 .open = proc_slab_open,
489 .read = seq_read,
490 .llseek = seq_lseek,
491 .release = seq_release,
492 #endif
495 static void
496 taskq_seq_stop(struct seq_file *f, void *v)
498 up_read(&tq_list_sem);
501 static const struct seq_operations taskq_all_seq_ops = {
502 .show = taskq_all_seq_show,
503 .start = taskq_seq_start,
504 .next = taskq_seq_next,
505 .stop = taskq_seq_stop,
508 static const struct seq_operations taskq_seq_ops = {
509 .show = taskq_seq_show,
510 .start = taskq_seq_start,
511 .next = taskq_seq_next,
512 .stop = taskq_seq_stop,
515 static int
516 proc_taskq_all_open(struct inode *inode, struct file *filp)
518 return (seq_open(filp, &taskq_all_seq_ops));
521 static int
522 proc_taskq_open(struct inode *inode, struct file *filp)
524 return (seq_open(filp, &taskq_seq_ops));
527 static const kstat_proc_op_t proc_taskq_all_operations = {
528 #ifdef HAVE_PROC_OPS_STRUCT
529 .proc_open = proc_taskq_all_open,
530 .proc_read = seq_read,
531 .proc_lseek = seq_lseek,
532 .proc_release = seq_release,
533 #else
534 .open = proc_taskq_all_open,
535 .read = seq_read,
536 .llseek = seq_lseek,
537 .release = seq_release,
538 #endif
541 static const kstat_proc_op_t proc_taskq_operations = {
542 #ifdef HAVE_PROC_OPS_STRUCT
543 .proc_open = proc_taskq_open,
544 .proc_read = seq_read,
545 .proc_lseek = seq_lseek,
546 .proc_release = seq_release,
547 #else
548 .open = proc_taskq_open,
549 .read = seq_read,
550 .llseek = seq_lseek,
551 .release = seq_release,
552 #endif
555 static struct ctl_table spl_kmem_table[] = {
556 #ifdef DEBUG_KMEM
558 .procname = "kmem_used",
559 .data = &kmem_alloc_used,
560 #ifdef HAVE_ATOMIC64_T
561 .maxlen = sizeof (atomic64_t),
562 #else
563 .maxlen = sizeof (atomic_t),
564 #endif /* HAVE_ATOMIC64_T */
565 .mode = 0444,
566 .proc_handler = &proc_domemused,
569 .procname = "kmem_max",
570 .data = &kmem_alloc_max,
571 .maxlen = sizeof (unsigned long),
572 .extra1 = &table_min,
573 .extra2 = &table_max,
574 .mode = 0444,
575 .proc_handler = &proc_doulongvec_minmax,
577 #endif /* DEBUG_KMEM */
579 .procname = "slab_kvmem_total",
580 .data = (void *)(KMC_KVMEM | KMC_TOTAL),
581 .maxlen = sizeof (unsigned long),
582 .extra1 = &table_min,
583 .extra2 = &table_max,
584 .mode = 0444,
585 .proc_handler = &proc_doslab,
588 .procname = "slab_kvmem_alloc",
589 .data = (void *)(KMC_KVMEM | KMC_ALLOC),
590 .maxlen = sizeof (unsigned long),
591 .extra1 = &table_min,
592 .extra2 = &table_max,
593 .mode = 0444,
594 .proc_handler = &proc_doslab,
597 .procname = "slab_kvmem_max",
598 .data = (void *)(KMC_KVMEM | KMC_MAX),
599 .maxlen = sizeof (unsigned long),
600 .extra1 = &table_min,
601 .extra2 = &table_max,
602 .mode = 0444,
603 .proc_handler = &proc_doslab,
608 static struct ctl_table spl_kstat_table[] = {
612 static struct ctl_table spl_table[] = {
614 * NB No .strategy entries have been provided since
615 * sysctl(8) prefers to go via /proc for portability.
618 .procname = "gitrev",
619 .data = (char *)ZFS_META_GITREV,
620 .maxlen = sizeof (ZFS_META_GITREV),
621 .mode = 0444,
622 .proc_handler = &proc_dostring,
625 .procname = "hostid",
626 .data = &spl_hostid,
627 .maxlen = sizeof (unsigned long),
628 .mode = 0644,
629 .proc_handler = &proc_dohostid,
631 #ifdef HAVE_REGISTER_SYSCTL_TABLE
633 .procname = "kmem",
634 .mode = 0555,
635 .child = spl_kmem_table,
638 .procname = "kstat",
639 .mode = 0555,
640 .child = spl_kstat_table,
642 #endif
646 #ifdef HAVE_REGISTER_SYSCTL_TABLE
647 static struct ctl_table spl_dir[] = {
649 .procname = "spl",
650 .mode = 0555,
651 .child = spl_table,
656 static struct ctl_table spl_root[] = {
658 .procname = "kernel",
659 .mode = 0555,
660 .child = spl_dir,
664 #endif
666 static void spl_proc_cleanup(void)
668 remove_proc_entry("kstat", proc_spl);
669 remove_proc_entry("slab", proc_spl_kmem);
670 remove_proc_entry("kmem", proc_spl);
671 remove_proc_entry("taskq-all", proc_spl);
672 remove_proc_entry("taskq", proc_spl);
673 remove_proc_entry("spl", NULL);
675 #ifndef HAVE_REGISTER_SYSCTL_TABLE
676 if (spl_kstat) {
677 unregister_sysctl_table(spl_kstat);
678 spl_kstat = NULL;
680 if (spl_kmem) {
681 unregister_sysctl_table(spl_kmem);
682 spl_kmem = NULL;
684 #endif
685 if (spl_header) {
686 unregister_sysctl_table(spl_header);
687 spl_header = NULL;
692 spl_proc_init(void)
694 int rc = 0;
696 #ifdef HAVE_REGISTER_SYSCTL_TABLE
697 spl_header = register_sysctl_table(spl_root);
698 if (spl_header == NULL)
699 return (-EUNATCH);
700 #else
701 spl_header = register_sysctl("kernel/spl", spl_table);
702 if (spl_header == NULL)
703 return (-EUNATCH);
705 spl_kmem = register_sysctl("kernel/spl/kmem", spl_kmem_table);
706 if (spl_kmem == NULL) {
707 rc = -EUNATCH;
708 goto out;
710 spl_kstat = register_sysctl("kernel/spl/kstat", spl_kstat_table);
711 if (spl_kstat == NULL) {
712 rc = -EUNATCH;
713 goto out;
715 #endif
717 proc_spl = proc_mkdir("spl", NULL);
718 if (proc_spl == NULL) {
719 rc = -EUNATCH;
720 goto out;
723 proc_spl_taskq_all = proc_create_data("taskq-all", 0444, proc_spl,
724 &proc_taskq_all_operations, NULL);
725 if (proc_spl_taskq_all == NULL) {
726 rc = -EUNATCH;
727 goto out;
730 proc_spl_taskq = proc_create_data("taskq", 0444, proc_spl,
731 &proc_taskq_operations, NULL);
732 if (proc_spl_taskq == NULL) {
733 rc = -EUNATCH;
734 goto out;
737 proc_spl_kmem = proc_mkdir("kmem", proc_spl);
738 if (proc_spl_kmem == NULL) {
739 rc = -EUNATCH;
740 goto out;
743 proc_spl_kmem_slab = proc_create_data("slab", 0444, proc_spl_kmem,
744 &proc_slab_operations, NULL);
745 if (proc_spl_kmem_slab == NULL) {
746 rc = -EUNATCH;
747 goto out;
750 proc_spl_kstat = proc_mkdir("kstat", proc_spl);
751 if (proc_spl_kstat == NULL) {
752 rc = -EUNATCH;
753 goto out;
755 out:
756 if (rc)
757 spl_proc_cleanup();
759 return (rc);
762 void
763 spl_proc_fini(void)
765 spl_proc_cleanup();