treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / sparc / mm / highmem.c
blobd4a80adea7e59b1fb6469ac16d110a3589a4bb53
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * highmem.c: virtual kernel memory mappings for high memory
5 * Provides kernel-static versions of atomic kmap functions originally
6 * found as inlines in include/asm-sparc/highmem.h. These became
7 * needed as kmap_atomic() and kunmap_atomic() started getting
8 * called from within modules.
9 * -- Tomas Szepe <szepe@pinerecords.com>, September 2002
11 * But kmap_atomic() and kunmap_atomic() cannot be inlined in
12 * modules because they are loaded with btfixup-ped functions.
16 * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
17 * gives a more generic (and caching) interface. But kmap_atomic can
18 * be used in IRQ contexts, so in some (very limited) cases we need it.
20 * XXX This is an old text. Actually, it's good to use atomic kmaps,
21 * provided you remember that they are atomic and not try to sleep
22 * with a kmap taken, much like a spinlock. Non-atomic kmaps are
23 * shared by CPUs, and so precious, and establishing them requires IPI.
24 * Atomic kmaps are lightweight and we may have NCPUS more of them.
26 #include <linux/highmem.h>
27 #include <linux/export.h>
28 #include <linux/mm.h>
30 #include <asm/cacheflush.h>
31 #include <asm/tlbflush.h>
32 #include <asm/pgalloc.h>
33 #include <asm/vaddrs.h>
35 pgprot_t kmap_prot;
37 static pte_t *kmap_pte;
39 void __init kmap_init(void)
41 unsigned long address;
42 p4d_t *p4d;
43 pud_t *pud;
44 pmd_t *dir;
46 address = __fix_to_virt(FIX_KMAP_BEGIN);
47 p4d = p4d_offset(pgd_offset_k(address), address);
48 pud = pud_offset(p4d, address);
49 dir = pmd_offset(pud, address);
51 /* cache the first kmap pte */
52 kmap_pte = pte_offset_kernel(dir, address);
53 kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
56 void *kmap_atomic(struct page *page)
58 unsigned long vaddr;
59 long idx, type;
61 preempt_disable();
62 pagefault_disable();
63 if (!PageHighMem(page))
64 return page_address(page);
66 type = kmap_atomic_idx_push();
67 idx = type + KM_TYPE_NR*smp_processor_id();
68 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
70 /* XXX Fix - Anton */
71 #if 0
72 __flush_cache_one(vaddr);
73 #else
74 flush_cache_all();
75 #endif
77 #ifdef CONFIG_DEBUG_HIGHMEM
78 BUG_ON(!pte_none(*(kmap_pte-idx)));
79 #endif
80 set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
81 /* XXX Fix - Anton */
82 #if 0
83 __flush_tlb_one(vaddr);
84 #else
85 flush_tlb_all();
86 #endif
88 return (void*) vaddr;
90 EXPORT_SYMBOL(kmap_atomic);
92 void __kunmap_atomic(void *kvaddr)
94 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
95 int type;
97 if (vaddr < FIXADDR_START) { // FIXME
98 pagefault_enable();
99 preempt_enable();
100 return;
103 type = kmap_atomic_idx();
105 #ifdef CONFIG_DEBUG_HIGHMEM
107 unsigned long idx;
109 idx = type + KM_TYPE_NR * smp_processor_id();
110 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
112 /* XXX Fix - Anton */
113 #if 0
114 __flush_cache_one(vaddr);
115 #else
116 flush_cache_all();
117 #endif
120 * force other mappings to Oops if they'll try to access
121 * this pte without first remap it
123 pte_clear(&init_mm, vaddr, kmap_pte-idx);
124 /* XXX Fix - Anton */
125 #if 0
126 __flush_tlb_one(vaddr);
127 #else
128 flush_tlb_all();
129 #endif
131 #endif
133 kmap_atomic_idx_pop();
134 pagefault_enable();
135 preempt_enable();
137 EXPORT_SYMBOL(__kunmap_atomic);