Linux 5.7.6
[linux/fpc-iii.git] / arch / csky / mm / highmem.c
blob813129145f3da77c87a516f2f33bf48d8592ed18
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
4 #include <linux/module.h>
5 #include <linux/highmem.h>
6 #include <linux/smp.h>
7 #include <linux/memblock.h>
8 #include <asm/fixmap.h>
9 #include <asm/tlbflush.h>
10 #include <asm/cacheflush.h>
12 static pte_t *kmap_pte;
14 unsigned long highstart_pfn, highend_pfn;
16 void *kmap(struct page *page)
18 void *addr;
20 might_sleep();
21 if (!PageHighMem(page))
22 return page_address(page);
23 addr = kmap_high(page);
24 flush_tlb_one((unsigned long)addr);
26 return addr;
28 EXPORT_SYMBOL(kmap);
30 void kunmap(struct page *page)
32 BUG_ON(in_interrupt());
33 if (!PageHighMem(page))
34 return;
35 kunmap_high(page);
37 EXPORT_SYMBOL(kunmap);
39 void *kmap_atomic(struct page *page)
41 unsigned long vaddr;
42 int idx, type;
44 preempt_disable();
45 pagefault_disable();
46 if (!PageHighMem(page))
47 return page_address(page);
49 type = kmap_atomic_idx_push();
50 idx = type + KM_TYPE_NR*smp_processor_id();
51 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
52 #ifdef CONFIG_DEBUG_HIGHMEM
53 BUG_ON(!pte_none(*(kmap_pte - idx)));
54 #endif
55 set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
56 flush_tlb_one((unsigned long)vaddr);
58 return (void *)vaddr;
60 EXPORT_SYMBOL(kmap_atomic);
62 void __kunmap_atomic(void *kvaddr)
64 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
65 int idx;
67 if (vaddr < FIXADDR_START)
68 goto out;
70 #ifdef CONFIG_DEBUG_HIGHMEM
71 idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx();
73 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
75 pte_clear(&init_mm, vaddr, kmap_pte - idx);
76 flush_tlb_one(vaddr);
77 #else
78 (void) idx; /* to kill a warning */
79 #endif
80 kmap_atomic_idx_pop();
81 out:
82 pagefault_enable();
83 preempt_enable();
85 EXPORT_SYMBOL(__kunmap_atomic);
88 * This is the same as kmap_atomic() but can map memory that doesn't
89 * have a struct page associated with it.
91 void *kmap_atomic_pfn(unsigned long pfn)
93 unsigned long vaddr;
94 int idx, type;
96 pagefault_disable();
98 type = kmap_atomic_idx_push();
99 idx = type + KM_TYPE_NR*smp_processor_id();
100 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
101 set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
102 flush_tlb_one(vaddr);
104 return (void *) vaddr;
107 struct page *kmap_atomic_to_page(void *ptr)
109 unsigned long idx, vaddr = (unsigned long)ptr;
110 pte_t *pte;
112 if (vaddr < FIXADDR_START)
113 return virt_to_page(ptr);
115 idx = virt_to_fix(vaddr);
116 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
117 return pte_page(*pte);
120 static void __init kmap_pages_init(void)
122 unsigned long vaddr;
123 pgd_t *pgd;
124 pmd_t *pmd;
125 pud_t *pud;
126 pte_t *pte;
128 vaddr = PKMAP_BASE;
129 fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
131 pgd = swapper_pg_dir + __pgd_offset(vaddr);
132 pud = (pud_t *)pgd;
133 pmd = pmd_offset(pud, vaddr);
134 pte = pte_offset_kernel(pmd, vaddr);
135 pkmap_page_table = pte;
138 void __init kmap_init(void)
140 unsigned long vaddr;
142 kmap_pages_init();
144 vaddr = __fix_to_virt(FIX_KMAP_BEGIN);
146 kmap_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr);