1 // SPDX-License-Identifier: GPL-2.0
3 * Memory preserving reboot related code.
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
9 #include <linux/errno.h>
10 #include <linux/crash_dump.h>
11 #include <linux/uaccess.h>
14 static ssize_t
__copy_oldmem_page(unsigned long pfn
, char *buf
, size_t csize
,
15 unsigned long offset
, int userbuf
,
24 vaddr
= (__force
void *)ioremap_encrypted(pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
26 vaddr
= (__force
void *)ioremap_cache(pfn
<< PAGE_SHIFT
, PAGE_SIZE
);
32 if (copy_to_user((void __user
*)buf
, vaddr
+ offset
, csize
)) {
33 iounmap((void __iomem
*)vaddr
);
37 memcpy(buf
, vaddr
+ offset
, csize
);
39 set_iounmap_nonlazy();
40 iounmap((void __iomem
*)vaddr
);
45 * copy_oldmem_page - copy one page of memory
46 * @pfn: page frame number to be copied
47 * @buf: target memory address for the copy; this can be in kernel address
48 * space or user address space (see @userbuf)
49 * @csize: number of bytes to copy
50 * @offset: offset in bytes into the page (based on pfn) to begin the copy
51 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
52 * otherwise @buf is in kernel address space, use memcpy().
54 * Copy a page from the old kernel's memory. For this page, there is no pte
55 * mapped in the current kernel. We stitch up a pte, similar to kmap_atomic.
57 ssize_t
copy_oldmem_page(unsigned long pfn
, char *buf
, size_t csize
,
58 unsigned long offset
, int userbuf
)
60 return __copy_oldmem_page(pfn
, buf
, csize
, offset
, userbuf
, false);
64 * copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the
65 * memory with the encryption mask set to accomodate kdump on SME-enabled
68 ssize_t
copy_oldmem_page_encrypted(unsigned long pfn
, char *buf
, size_t csize
,
69 unsigned long offset
, int userbuf
)
71 return __copy_oldmem_page(pfn
, buf
, csize
, offset
, userbuf
, true);