mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / arch / x86 / kernel / crash_dump_64.c
blob4f2e0778feac2fac9f0546d4c2fb6e6ce1cafa13
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Memory preserving reboot related code.
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
7 */
9 #include <linux/errno.h>
10 #include <linux/crash_dump.h>
11 #include <linux/uaccess.h>
12 #include <linux/io.h>
14 /**
15 * copy_oldmem_page - copy one page from "oldmem"
16 * @pfn: page frame number to be copied
17 * @buf: target memory address for the copy; this can be in kernel address
18 * space or user address space (see @userbuf)
19 * @csize: number of bytes to copy
20 * @offset: offset in bytes into the page (based on pfn) to begin the copy
21 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
22 * otherwise @buf is in kernel address space, use memcpy().
24 * Copy a page from "oldmem". For this page, there is no pte mapped
25 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
27 ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
28 size_t csize, unsigned long offset, int userbuf)
30 void *vaddr;
32 if (!csize)
33 return 0;
35 vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE);
36 if (!vaddr)
37 return -ENOMEM;
39 if (userbuf) {
40 if (copy_to_user(buf, vaddr + offset, csize)) {
41 iounmap(vaddr);
42 return -EFAULT;
44 } else
45 memcpy(buf, vaddr + offset, csize);
47 set_iounmap_nonlazy();
48 iounmap(vaddr);
49 return csize;