treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / mips / kernel / crash_dump.c
blob01b2bd95ba1f7cad0137f787cd29b977f85bcdf4
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/highmem.h>
3 #include <linux/memblock.h>
4 #include <linux/crash_dump.h>
5 #include <linux/uaccess.h>
6 #include <linux/slab.h>
8 static void *kdump_buf_page;
10 /**
11 * copy_oldmem_page - copy one page from "oldmem"
12 * @pfn: page frame number to be copied
13 * @buf: target memory address for the copy; this can be in kernel address
14 * space or user address space (see @userbuf)
15 * @csize: number of bytes to copy
16 * @offset: offset in bytes into the page (based on pfn) to begin the copy
17 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
18 * otherwise @buf is in kernel address space, use memcpy().
20 * Copy a page from "oldmem". For this page, there is no pte mapped
21 * in the current kernel.
23 * Calling copy_to_user() in atomic context is not desirable. Hence first
24 * copying the data to a pre-allocated kernel page and then copying to user
25 * space in non-atomic context.
27 ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
28 size_t csize, unsigned long offset, int userbuf)
30 void *vaddr;
32 if (!csize)
33 return 0;
35 vaddr = kmap_atomic_pfn(pfn);
37 if (!userbuf) {
38 memcpy(buf, (vaddr + offset), csize);
39 kunmap_atomic(vaddr);
40 } else {
41 if (!kdump_buf_page) {
42 pr_warn("Kdump: Kdump buffer page not allocated\n");
44 return -EFAULT;
46 copy_page(kdump_buf_page, vaddr);
47 kunmap_atomic(vaddr);
48 if (copy_to_user(buf, (kdump_buf_page + offset), csize))
49 return -EFAULT;
52 return csize;
55 static int __init kdump_buf_page_init(void)
57 int ret = 0;
59 kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
60 if (!kdump_buf_page) {
61 pr_warn("Kdump: Failed to allocate kdump buffer page\n");
62 ret = -ENOMEM;
65 return ret;
67 arch_initcall(kdump_buf_page_init);