2 * Persistent Storage - platform driver interface parts.
4 * Copyright (C) 2007-2008 Google, Inc.
5 * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #define pr_fmt(fmt) "pstore: " fmt
23 #include <linux/atomic.h>
24 #include <linux/types.h>
25 #include <linux/errno.h>
26 #include <linux/init.h>
27 #include <linux/kmsg_dump.h>
28 #include <linux/console.h>
29 #include <linux/module.h>
30 #include <linux/pstore.h>
31 #if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
32 #include <linux/lzo.h>
34 #if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
35 #include <linux/lz4.h>
37 #include <linux/crypto.h>
38 #include <linux/string.h>
39 #include <linux/timer.h>
40 #include <linux/slab.h>
41 #include <linux/uaccess.h>
42 #include <linux/jiffies.h>
43 #include <linux/workqueue.h>
48 * We defer making "oops" entries appear in pstore - see
49 * whether the system is actually still running well enough
50 * to let someone see the entry
52 static int pstore_update_ms
= -1;
53 module_param_named(update_ms
, pstore_update_ms
, int, 0600);
54 MODULE_PARM_DESC(update_ms
, "milliseconds before pstore updates its content "
55 "(default is -1, which means runtime updates are disabled; "
56 "enabling this option is not safe, it may lead to further "
57 "corruption on Oopses)");
59 static int pstore_new_entry
;
61 static void pstore_timefunc(struct timer_list
*);
62 static DEFINE_TIMER(pstore_timer
, pstore_timefunc
);
64 static void pstore_dowork(struct work_struct
*);
65 static DECLARE_WORK(pstore_work
, pstore_dowork
);
68 * pstore_lock just protects "psinfo" during
69 * calls to pstore_register()
71 static DEFINE_SPINLOCK(pstore_lock
);
72 struct pstore_info
*psinfo
;
75 static char *compress
=
76 #ifdef CONFIG_PSTORE_COMPRESS_DEFAULT
77 CONFIG_PSTORE_COMPRESS_DEFAULT
;
82 /* Compression parameters */
83 static struct crypto_comp
*tfm
;
85 struct pstore_zbackend
{
86 int (*zbufsize
)(size_t size
);
90 static char *big_oops_buf
;
91 static size_t big_oops_buf_sz
;
93 /* How much of the console log to snapshot */
94 unsigned long kmsg_bytes
= PSTORE_DEFAULT_KMSG_BYTES
;
96 void pstore_set_kmsg_bytes(int bytes
)
101 /* Tag each group of saved records with a sequence number */
102 static int oopscount
;
104 static const char *get_reason_str(enum kmsg_dump_reason reason
)
107 case KMSG_DUMP_PANIC
:
111 case KMSG_DUMP_EMERG
:
113 case KMSG_DUMP_RESTART
:
117 case KMSG_DUMP_POWEROFF
:
124 bool pstore_cannot_block_path(enum kmsg_dump_reason reason
)
127 * In case of NMI path, pstore shouldn't be blocked
128 * regardless of reason.
134 /* In panic case, other cpus are stopped by smp_send_stop(). */
135 case KMSG_DUMP_PANIC
:
136 /* Emergency restart shouldn't be blocked by spin lock. */
137 case KMSG_DUMP_EMERG
:
143 EXPORT_SYMBOL_GPL(pstore_cannot_block_path
);
145 #if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
146 static int zbufsize_deflate(size_t size
)
151 /* buffer range for efivars */
161 /* buffer range for nvram, erst */
170 return (size
* 100) / cmpr
;
174 #if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
175 static int zbufsize_lzo(size_t size
)
177 return lzo1x_worst_compress(size
);
181 #if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS) || IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
182 static int zbufsize_lz4(size_t size
)
184 return LZ4_compressBound(size
);
188 #if IS_ENABLED(CONFIG_PSTORE_842_COMPRESS)
189 static int zbufsize_842(size_t size
)
195 static const struct pstore_zbackend
*zbackend __ro_after_init
;
197 static const struct pstore_zbackend zbackends
[] = {
198 #if IS_ENABLED(CONFIG_PSTORE_DEFLATE_COMPRESS)
200 .zbufsize
= zbufsize_deflate
,
204 #if IS_ENABLED(CONFIG_PSTORE_LZO_COMPRESS)
206 .zbufsize
= zbufsize_lzo
,
210 #if IS_ENABLED(CONFIG_PSTORE_LZ4_COMPRESS)
212 .zbufsize
= zbufsize_lz4
,
216 #if IS_ENABLED(CONFIG_PSTORE_LZ4HC_COMPRESS)
218 .zbufsize
= zbufsize_lz4
,
222 #if IS_ENABLED(CONFIG_PSTORE_842_COMPRESS)
224 .zbufsize
= zbufsize_842
,
231 static int pstore_compress(const void *in
, void *out
,
232 unsigned int inlen
, unsigned int outlen
)
236 ret
= crypto_comp_compress(tfm
, in
, inlen
, out
, &outlen
);
238 pr_err("crypto_comp_compress failed, ret = %d!\n", ret
);
245 static int pstore_decompress(void *in
, void *out
,
246 unsigned int inlen
, unsigned int outlen
)
250 ret
= crypto_comp_decompress(tfm
, in
, inlen
, out
, &outlen
);
252 pr_err("crypto_comp_decompress failed, ret = %d!\n", ret
);
259 static void allocate_buf_for_compression(void)
261 if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS
) || !zbackend
)
264 if (!crypto_has_comp(zbackend
->name
, 0, 0)) {
265 pr_err("No %s compression\n", zbackend
->name
);
269 big_oops_buf_sz
= zbackend
->zbufsize(psinfo
->bufsize
);
270 if (big_oops_buf_sz
<= 0)
273 big_oops_buf
= kmalloc(big_oops_buf_sz
, GFP_KERNEL
);
275 pr_err("allocate compression buffer error!\n");
279 tfm
= crypto_alloc_comp(zbackend
->name
, 0, 0);
280 if (IS_ERR_OR_NULL(tfm
)) {
283 pr_err("crypto_alloc_comp() failed!\n");
288 static void free_buf_for_compression(void)
290 if (IS_ENABLED(CONFIG_PSTORE_COMPRESS
) && !IS_ERR_OR_NULL(tfm
))
291 crypto_free_comp(tfm
);
298 * Called when compression fails, since the printk buffer
299 * would be fetched for compression calling it again when
300 * compression fails would have moved the iterator of
301 * printk buffer which results in fetching old contents.
302 * Copy the recent messages from big_oops_buf to psinfo->buf
304 static size_t copy_kmsg_to_buffer(int hsize
, size_t len
)
309 total_len
= hsize
+ len
;
311 if (total_len
> psinfo
->bufsize
) {
312 diff
= total_len
- psinfo
->bufsize
+ hsize
;
313 memcpy(psinfo
->buf
, big_oops_buf
, hsize
);
314 memcpy(psinfo
->buf
+ hsize
, big_oops_buf
+ diff
,
315 psinfo
->bufsize
- hsize
);
316 total_len
= psinfo
->bufsize
;
318 memcpy(psinfo
->buf
, big_oops_buf
, total_len
);
323 void pstore_record_init(struct pstore_record
*record
,
324 struct pstore_info
*psinfo
)
326 memset(record
, 0, sizeof(*record
));
328 record
->psi
= psinfo
;
330 /* Report zeroed timestamp if called before timekeeping has resumed. */
331 record
->time
= ns_to_timespec(ktime_get_real_fast_ns());
335 * callback from kmsg_dump. (s2,l2) has the most recently
336 * written bytes, older bytes are in (s1,l1). Save as much
337 * as we can from the end of the buffer.
339 static void pstore_dump(struct kmsg_dumper
*dumper
,
340 enum kmsg_dump_reason reason
)
342 unsigned long total
= 0;
344 unsigned int part
= 1;
345 unsigned long flags
= 0;
349 why
= get_reason_str(reason
);
351 if (pstore_cannot_block_path(reason
)) {
352 is_locked
= spin_trylock_irqsave(&psinfo
->buf_lock
, flags
);
354 pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
355 , in_nmi() ? "NMI" : why
);
359 spin_lock_irqsave(&psinfo
->buf_lock
, flags
);
363 while (total
< kmsg_bytes
) {
369 struct pstore_record record
;
371 pstore_record_init(&record
, psinfo
);
372 record
.type
= PSTORE_TYPE_DMESG
;
373 record
.count
= oopscount
;
374 record
.reason
= reason
;
376 record
.buf
= psinfo
->buf
;
378 if (big_oops_buf
&& is_locked
) {
380 dst_size
= big_oops_buf_sz
;
383 dst_size
= psinfo
->bufsize
;
386 /* Write dump header. */
387 header_size
= snprintf(dst
, dst_size
, "%s#%d Part%u\n", why
,
389 dst_size
-= header_size
;
391 /* Write dump contents. */
392 if (!kmsg_dump_get_buffer(dumper
, true, dst
+ header_size
,
393 dst_size
, &dump_size
))
396 if (big_oops_buf
&& is_locked
) {
397 zipped_len
= pstore_compress(dst
, psinfo
->buf
,
398 header_size
+ dump_size
,
401 if (zipped_len
> 0) {
402 record
.compressed
= true;
403 record
.size
= zipped_len
;
405 record
.size
= copy_kmsg_to_buffer(header_size
,
409 record
.size
= header_size
+ dump_size
;
412 ret
= psinfo
->write(&record
);
413 if (ret
== 0 && reason
== KMSG_DUMP_OOPS
&& pstore_is_mounted())
414 pstore_new_entry
= 1;
416 total
+= record
.size
;
420 spin_unlock_irqrestore(&psinfo
->buf_lock
, flags
);
423 static struct kmsg_dumper pstore_dumper
= {
428 * Register with kmsg_dump to save last part of console log on panic.
430 static void pstore_register_kmsg(void)
432 kmsg_dump_register(&pstore_dumper
);
435 static void pstore_unregister_kmsg(void)
437 kmsg_dump_unregister(&pstore_dumper
);
440 #ifdef CONFIG_PSTORE_CONSOLE
441 static void pstore_console_write(struct console
*con
, const char *s
, unsigned c
)
443 const char *e
= s
+ c
;
446 struct pstore_record record
;
449 pstore_record_init(&record
, psinfo
);
450 record
.type
= PSTORE_TYPE_CONSOLE
;
452 if (c
> psinfo
->bufsize
)
455 if (oops_in_progress
) {
456 if (!spin_trylock_irqsave(&psinfo
->buf_lock
, flags
))
459 spin_lock_irqsave(&psinfo
->buf_lock
, flags
);
461 record
.buf
= (char *)s
;
463 psinfo
->write(&record
);
464 spin_unlock_irqrestore(&psinfo
->buf_lock
, flags
);
470 static struct console pstore_console
= {
472 .write
= pstore_console_write
,
473 .flags
= CON_PRINTBUFFER
| CON_ENABLED
| CON_ANYTIME
,
477 static void pstore_register_console(void)
479 register_console(&pstore_console
);
482 static void pstore_unregister_console(void)
484 unregister_console(&pstore_console
);
487 static void pstore_register_console(void) {}
488 static void pstore_unregister_console(void) {}
491 static int pstore_write_user_compat(struct pstore_record
*record
,
492 const char __user
*buf
)
499 record
->buf
= memdup_user(buf
, record
->size
);
500 if (IS_ERR(record
->buf
)) {
501 ret
= PTR_ERR(record
->buf
);
505 ret
= record
->psi
->write(record
);
511 return unlikely(ret
< 0) ? ret
: record
->size
;
515 * platform specific persistent storage driver registers with
516 * us here. If pstore is already mounted, call the platform
517 * read function right away to populate the file system. If not
518 * then the pstore mount code will call us later to fill out
521 int pstore_register(struct pstore_info
*psi
)
523 struct module
*owner
= psi
->owner
;
525 if (backend
&& strcmp(backend
, psi
->name
)) {
526 pr_warn("ignoring unexpected backend '%s'\n", psi
->name
);
530 /* Sanity check flags. */
532 pr_warn("backend '%s' must support at least one frontend\n",
537 /* Check for required functions. */
538 if (!psi
->read
|| !psi
->write
) {
539 pr_warn("backend '%s' must implement read() and write()\n",
544 spin_lock(&pstore_lock
);
546 pr_warn("backend '%s' already loaded: ignoring '%s'\n",
547 psinfo
->name
, psi
->name
);
548 spin_unlock(&pstore_lock
);
552 if (!psi
->write_user
)
553 psi
->write_user
= pstore_write_user_compat
;
555 mutex_init(&psinfo
->read_mutex
);
556 spin_unlock(&pstore_lock
);
558 if (owner
&& !try_module_get(owner
)) {
563 allocate_buf_for_compression();
565 if (pstore_is_mounted())
566 pstore_get_records(0);
568 if (psi
->flags
& PSTORE_FLAGS_DMESG
)
569 pstore_register_kmsg();
570 if (psi
->flags
& PSTORE_FLAGS_CONSOLE
)
571 pstore_register_console();
572 if (psi
->flags
& PSTORE_FLAGS_FTRACE
)
573 pstore_register_ftrace();
574 if (psi
->flags
& PSTORE_FLAGS_PMSG
)
575 pstore_register_pmsg();
577 /* Start watching for new records, if desired. */
578 if (pstore_update_ms
>= 0) {
579 pstore_timer
.expires
= jiffies
+
580 msecs_to_jiffies(pstore_update_ms
);
581 add_timer(&pstore_timer
);
585 * Update the module parameter backend, so it is visible
586 * through /sys/module/pstore/parameters/backend
590 pr_info("Registered %s as persistent store backend\n", psi
->name
);
596 EXPORT_SYMBOL_GPL(pstore_register
);
598 void pstore_unregister(struct pstore_info
*psi
)
600 /* Stop timer and make sure all work has finished. */
601 pstore_update_ms
= -1;
602 del_timer_sync(&pstore_timer
);
603 flush_work(&pstore_work
);
605 if (psi
->flags
& PSTORE_FLAGS_PMSG
)
606 pstore_unregister_pmsg();
607 if (psi
->flags
& PSTORE_FLAGS_FTRACE
)
608 pstore_unregister_ftrace();
609 if (psi
->flags
& PSTORE_FLAGS_CONSOLE
)
610 pstore_unregister_console();
611 if (psi
->flags
& PSTORE_FLAGS_DMESG
)
612 pstore_unregister_kmsg();
614 free_buf_for_compression();
619 EXPORT_SYMBOL_GPL(pstore_unregister
);
621 static void decompress_record(struct pstore_record
*record
)
626 if (!record
->compressed
)
629 /* Only PSTORE_TYPE_DMESG support compression. */
630 if (record
->type
!= PSTORE_TYPE_DMESG
) {
631 pr_warn("ignored compressed record type %d\n", record
->type
);
635 /* No compression method has created the common buffer. */
637 pr_warn("no decompression buffer allocated\n");
641 unzipped_len
= pstore_decompress(record
->buf
, big_oops_buf
,
642 record
->size
, big_oops_buf_sz
);
643 if (unzipped_len
<= 0) {
644 pr_err("decompression failed: %d\n", unzipped_len
);
648 /* Build new buffer for decompressed contents. */
649 decompressed
= kmalloc(unzipped_len
+ record
->ecc_notice_size
,
652 pr_err("decompression ran out of memory\n");
655 memcpy(decompressed
, big_oops_buf
, unzipped_len
);
657 /* Append ECC notice to decompressed buffer. */
658 memcpy(decompressed
+ unzipped_len
, record
->buf
+ record
->size
,
659 record
->ecc_notice_size
);
661 /* Swap out compresed contents with decompressed contents. */
663 record
->buf
= decompressed
;
664 record
->size
= unzipped_len
;
665 record
->compressed
= false;
669 * Read all the records from one persistent store backend. Create
670 * files in our filesystem. Don't warn about -EEXIST errors
671 * when we are re-scanning the backing store looking to add new
674 void pstore_get_backend_records(struct pstore_info
*psi
,
675 struct dentry
*root
, int quiet
)
678 unsigned int stop_loop
= 65536;
683 mutex_lock(&psi
->read_mutex
);
684 if (psi
->open
&& psi
->open(psi
))
688 * Backend callback read() allocates record.buf. decompress_record()
689 * may reallocate record.buf. On success, pstore_mkfile() will keep
690 * the record.buf, so free it only on failure.
692 for (; stop_loop
; stop_loop
--) {
693 struct pstore_record
*record
;
696 record
= kzalloc(sizeof(*record
), GFP_KERNEL
);
698 pr_err("out of memory creating record\n");
701 pstore_record_init(record
, psi
);
703 record
->size
= psi
->read(record
);
705 /* No more records left in backend? */
706 if (record
->size
<= 0) {
711 decompress_record(record
);
712 rc
= pstore_mkfile(root
, record
);
714 /* pstore_mkfile() did not take record, so free it. */
717 if (rc
!= -EEXIST
|| !quiet
)
724 mutex_unlock(&psi
->read_mutex
);
727 pr_warn("failed to create %d record(s) from '%s'\n",
730 pr_err("looping? Too many records seen from '%s'\n",
734 static void pstore_dowork(struct work_struct
*work
)
736 pstore_get_records(1);
739 static void pstore_timefunc(struct timer_list
*unused
)
741 if (pstore_new_entry
) {
742 pstore_new_entry
= 0;
743 schedule_work(&pstore_work
);
746 if (pstore_update_ms
>= 0)
747 mod_timer(&pstore_timer
,
748 jiffies
+ msecs_to_jiffies(pstore_update_ms
));
751 void __init
pstore_choose_compression(void)
753 const struct pstore_zbackend
*step
;
758 for (step
= zbackends
; step
->name
; step
++) {
759 if (!strcmp(compress
, step
->name
)) {
761 pr_info("using %s compression\n", zbackend
->name
);
767 module_param(compress
, charp
, 0444);
768 MODULE_PARM_DESC(compress
, "Pstore compression to use");
770 module_param(backend
, charp
, 0444);
771 MODULE_PARM_DESC(backend
, "Pstore backend to use");